From be37730906230fcc4255a9802231cc4d3fe11679 Mon Sep 17 00:00:00 2001 From: zvonand Date: Fri, 27 Jan 2023 16:23:27 +0300 Subject: [PATCH 001/722] upd --- src/Common/DateLUT.cpp | 10 +++++++++- src/Common/DateLUT.h | 24 ++++++++++++++++++++++-- src/Core/Settings.h | 1 + 3 files changed, 32 insertions(+), 3 deletions(-) diff --git a/src/Common/DateLUT.cpp b/src/Common/DateLUT.cpp index ae6127670e5..fd10b799b66 100644 --- a/src/Common/DateLUT.cpp +++ b/src/Common/DateLUT.cpp @@ -7,6 +7,8 @@ #include #include +#include +#include namespace @@ -147,7 +149,7 @@ DateLUT::DateLUT() } -const DateLUTImpl & DateLUT::getImplementation(const std::string & time_zone) const +const ALWAYS_INLINE DateLUTImpl & DateLUT::getImplementation(const std::string & time_zone) const { std::lock_guard lock(mutex); @@ -163,3 +165,9 @@ DateLUT & DateLUT::getInstance() static DateLUT ret; return ret; } + +std::string DateLUT::extractTimezoneFromContext(const DB::ContextPtr query_context) +{ + std::string ret = query_context->getSettingsRef().implicit_timezone.value; + return ret; +} diff --git a/src/Common/DateLUT.h b/src/Common/DateLUT.h index b7ba37c2bec..bd7969bffa6 100644 --- a/src/Common/DateLUT.h +++ b/src/Common/DateLUT.h @@ -5,6 +5,10 @@ #include #include +// +//#include "Interpreters/Context_fwd.h" +//#include "Interpreters/Context.h" +#include "Common/CurrentThread.h" #include #include @@ -20,16 +24,30 @@ public: static ALWAYS_INLINE const DateLUTImpl & instance() // -V1071 { const auto & date_lut = getInstance(); + + if (DB::CurrentThread::isInitialized()) + { + const auto query_context = DB::CurrentThread::get().getQueryContext(); + + if (query_context) + { + auto implicit_timezone = extractTimezoneFromContext(query_context); + + if (!implicit_timezone.empty()) + return instance(implicit_timezone); + } + } + return *date_lut.default_impl.load(std::memory_order_acquire); } /// Return singleton DateLUTImpl instance for a given time zone. static ALWAYS_INLINE const DateLUTImpl & instance(const std::string & time_zone) { - const auto & date_lut = getInstance(); if (time_zone.empty()) - return *date_lut.default_impl.load(std::memory_order_acquire); + return instance(); + const auto & date_lut = getInstance(); return date_lut.getImplementation(time_zone); } static void setDefaultTimezone(const std::string & time_zone) @@ -45,6 +63,8 @@ protected: private: static DateLUT & getInstance(); + static std::string extractTimezoneFromContext(const DB::ContextPtr query_context); + const DateLUTImpl & getImplementation(const std::string & time_zone) const; using DateLUTImplPtr = std::unique_ptr; diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 1948a6da012..2da5791ff81 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -688,6 +688,7 @@ class IColumn; M(Float, insert_keeper_fault_injection_probability, 0.0f, "Approximate probability of failure for a keeper request during insert. Valid value is in interval [0.0f, 1.0f]", 0) \ M(UInt64, insert_keeper_fault_injection_seed, 0, "0 - random seed, otherwise the setting value", 0) \ M(Bool, force_aggregation_in_order, false, "Force use of aggregation in order on remote nodes during distributed aggregation. PLEASE, NEVER CHANGE THIS SETTING VALUE MANUALLY!", IMPORTANT) \ + M(String, implicit_timezone, "", "Use specified timezone for interpreting Date and DateTime instead of server's timezone.", 0) \ // End of COMMON_SETTINGS // Please add settings related to formats into the FORMAT_FACTORY_SETTINGS and move obsolete settings to OBSOLETE_SETTINGS. From 537721e297e4ba94f30d5bd76ca0a7b01a080a5c Mon Sep 17 00:00:00 2001 From: zvonand Date: Fri, 27 Jan 2023 23:40:43 +0300 Subject: [PATCH 002/722] re-add tests --- .../0_stateless/02538_implicit_timezone.reference | 3 +++ tests/queries/0_stateless/02538_implicit_timezone.sql | 9 +++++++++ 2 files changed, 12 insertions(+) create mode 100644 tests/queries/0_stateless/02538_implicit_timezone.reference create mode 100644 tests/queries/0_stateless/02538_implicit_timezone.sql diff --git a/tests/queries/0_stateless/02538_implicit_timezone.reference b/tests/queries/0_stateless/02538_implicit_timezone.reference new file mode 100644 index 00000000000..8ed8024f652 --- /dev/null +++ b/tests/queries/0_stateless/02538_implicit_timezone.reference @@ -0,0 +1,3 @@ +1999-12-12 18:23:23.123 +1999-12-12 23:23:23.123 +1999-12-13 04:23:23.123 diff --git a/tests/queries/0_stateless/02538_implicit_timezone.sql b/tests/queries/0_stateless/02538_implicit_timezone.sql new file mode 100644 index 00000000000..663b218d235 --- /dev/null +++ b/tests/queries/0_stateless/02538_implicit_timezone.sql @@ -0,0 +1,9 @@ +SET implicit_timezone = 'Asia/Novosibirsk'; + +SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich'); + +SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS implicit_timezone = 'Europe/Zurich'; + +SET implicit_timezone = 'Europe/Zurich'; + +SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Asia/Novosibirsk'); \ No newline at end of file From 010edbf2aad3508402e82b8ce62f90ce62fc9f09 Mon Sep 17 00:00:00 2001 From: zvonand Date: Wed, 22 Feb 2023 10:39:23 +0100 Subject: [PATCH 003/722] do another way and logs work --- src/Common/DateLUT.h | 38 ++++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 20 deletions(-) diff --git a/src/Common/DateLUT.h b/src/Common/DateLUT.h index bd7969bffa6..6ec9cf1646d 100644 --- a/src/Common/DateLUT.h +++ b/src/Common/DateLUT.h @@ -5,9 +5,6 @@ #include #include -// -//#include "Interpreters/Context_fwd.h" -//#include "Interpreters/Context.h" #include "Common/CurrentThread.h" #include @@ -24,30 +21,31 @@ public: static ALWAYS_INLINE const DateLUTImpl & instance() // -V1071 { const auto & date_lut = getInstance(); - - if (DB::CurrentThread::isInitialized()) - { - const auto query_context = DB::CurrentThread::get().getQueryContext(); - - if (query_context) - { - auto implicit_timezone = extractTimezoneFromContext(query_context); - - if (!implicit_timezone.empty()) - return instance(implicit_timezone); - } - } - return *date_lut.default_impl.load(std::memory_order_acquire); } /// Return singleton DateLUTImpl instance for a given time zone. static ALWAYS_INLINE const DateLUTImpl & instance(const std::string & time_zone) { - if (time_zone.empty()) - return instance(); - const auto & date_lut = getInstance(); + + if (time_zone.empty()) + { + if (DB::CurrentThread::isInitialized()) + { + const auto query_context = DB::CurrentThread::get().getQueryContext(); + + if (query_context) + { + auto implicit_timezone = extractTimezoneFromContext(query_context); + + if (!implicit_timezone.empty()) + return instance(implicit_timezone); + } + } + return *date_lut.default_impl.load(std::memory_order_acquire); + } + return date_lut.getImplementation(time_zone); } static void setDefaultTimezone(const std::string & time_zone) From 1cf6c3a9c0fa4867684dee56c651d4131aa3b0fe Mon Sep 17 00:00:00 2001 From: zvonand Date: Wed, 22 Feb 2023 10:51:32 +0100 Subject: [PATCH 004/722] update test names --- ...licit_timezone.reference => 02668_implicit_timezone.reference} | 0 .../{02538_implicit_timezone.sql => 02668_implicit_timezone.sql} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename tests/queries/0_stateless/{02538_implicit_timezone.reference => 02668_implicit_timezone.reference} (100%) rename tests/queries/0_stateless/{02538_implicit_timezone.sql => 02668_implicit_timezone.sql} (100%) diff --git a/tests/queries/0_stateless/02538_implicit_timezone.reference b/tests/queries/0_stateless/02668_implicit_timezone.reference similarity index 100% rename from tests/queries/0_stateless/02538_implicit_timezone.reference rename to tests/queries/0_stateless/02668_implicit_timezone.reference diff --git a/tests/queries/0_stateless/02538_implicit_timezone.sql b/tests/queries/0_stateless/02668_implicit_timezone.sql similarity index 100% rename from tests/queries/0_stateless/02538_implicit_timezone.sql rename to tests/queries/0_stateless/02668_implicit_timezone.sql From 393830ecdc78cd2745cc439d7ac95c3421fe9044 Mon Sep 17 00:00:00 2001 From: zvonand Date: Wed, 22 Feb 2023 16:30:46 +0100 Subject: [PATCH 005/722] add docs + tiny cleanup --- docs/en/operations/settings/settings.md | 27 ++++++++++++++++++++++++ docs/ru/operations/settings/settings.md | 28 +++++++++++++++++++++++++ src/Common/DateLUT.cpp | 3 +-- 3 files changed, 56 insertions(+), 2 deletions(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 1060eae1b0e..4e105124086 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -3939,3 +3939,30 @@ Default value: `0`. :::note Use this setting only for backward compatibility if your use cases depend on old syntax. ::: + +## implicit_timezone {#implicit_timezone} + +If specified, sets a implicit timezone (instead of server-default). All DateTime/DateTime64 values (and/or functions results) that have no explicit timezone specified are treated as having this timezone instead of default. +Examples: +``` +SELECT timeZone(), timeZoneOf(now()) +┌─timeZone()────┬─timeZoneOf(now())─┐ +│ Europe/Berlin │ Europe/Berlin │ +└───────────────┴───────────────────┘ + +:) SELECT timeZone(), timeZoneOf(now()) SETTINGS implicit_timezone = 'Asia/Novosibirsk' +┌─timeZone()────┬─timeZoneOf(now())─┐ +│ Europe/Berlin │ Asia/Novosibirsk │ +└───────────────┴───────────────────┘ + +SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS implicit_timezone = 'America/Denver'; +┌─toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich')─┐ +│ 1999-12-13 07:23:23.123 │ +└──────────────────────────────────────────────────────────────────────────────┘ +``` + +Possible values: + +- Any valid timezone in `Region/Place` notation, e.g. `Europe/Berlin` + +Default value: `''`. \ No newline at end of file diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 4025966ac21..8d3f2706585 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -4084,3 +4084,31 @@ ALTER TABLE test FREEZE SETTINGS alter_partition_verbose_result = 1; Задает символ, который интерпретируется как суффикс после результирующего набора данных формата [CustomSeparated](../../interfaces/formats.md#format-customseparated). Значение по умолчанию: `''`. + +## implicit_timezone {#implicit_timezone} + +Задаёт значение часового пояса (timezone) по умолчанию для текущей сессии вместо часового пояса сервера. То есть, все значения DateTime/DateTime64, для которых явно не задан параметр timezone, будут интерпретированы как относящиеся к указанной зоне. + +Примеры: +``` +SELECT timeZone(), timeZoneOf(now()) +┌─timeZone()────┬─timeZoneOf(now())─┐ +│ Europe/Berlin │ Europe/Berlin │ +└───────────────┴───────────────────┘ + +:) SELECT timeZone(), timeZoneOf(now()) SETTINGS implicit_timezone = 'Asia/Novosibirsk' +┌─timeZone()────┬─timeZoneOf(now())─┐ +│ Europe/Berlin │ Asia/Novosibirsk │ +└───────────────┴───────────────────┘ + +SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS implicit_timezone = 'America/Denver'; +┌─toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich')─┐ +│ 1999-12-13 07:23:23.123 │ +└──────────────────────────────────────────────────────────────────────────────┘ +``` + +Возможные значения: + +- Строка вида `Регион/Город`, например `Europe/Zurich` + +Значение по умолчанию: `''`. \ No newline at end of file diff --git a/src/Common/DateLUT.cpp b/src/Common/DateLUT.cpp index fd10b799b66..e309b0cb28a 100644 --- a/src/Common/DateLUT.cpp +++ b/src/Common/DateLUT.cpp @@ -8,7 +8,6 @@ #include #include #include -#include namespace @@ -149,7 +148,7 @@ DateLUT::DateLUT() } -const ALWAYS_INLINE DateLUTImpl & DateLUT::getImplementation(const std::string & time_zone) const +const DateLUTImpl & DateLUT::getImplementation(const std::string & time_zone) const { std::lock_guard lock(mutex); From c61aff7cac2e5cc79dc4591d9228308e017e5b28 Mon Sep 17 00:00:00 2001 From: zvonand Date: Thu, 23 Feb 2023 13:38:13 +0100 Subject: [PATCH 006/722] Added standalone function to get server's own timezone Fix missing global_context --- src/Common/DateLUT.h | 10 ++++++++++ src/Functions/serverConstants.cpp | 19 +++++++++++++++++-- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/src/Common/DateLUT.h b/src/Common/DateLUT.h index b66821989e3..18ef5ee2e30 100644 --- a/src/Common/DateLUT.h +++ b/src/Common/DateLUT.h @@ -42,6 +42,16 @@ public: if (!implicit_timezone.empty()) return instance(implicit_timezone); } + + const auto global_context = DB::CurrentThread::get().getGlobalContext(); + if (global_context) + { + auto implicit_timezone = extractTimezoneFromContext(global_context); + + if (!implicit_timezone.empty()) + return instance(implicit_timezone); + } + } return *date_lut.default_impl.load(std::memory_order_acquire); } diff --git a/src/Functions/serverConstants.cpp b/src/Functions/serverConstants.cpp index 96615d0a4c9..a89e1564f28 100644 --- a/src/Functions/serverConstants.cpp +++ b/src/Functions/serverConstants.cpp @@ -60,13 +60,22 @@ namespace }; - /// Returns the server time zone. + /// Returns default timezone for current session. class FunctionTimezone : public FunctionConstantBase { public: static constexpr auto name = "timezone"; static FunctionPtr create(ContextPtr context) { return std::make_shared(context); } - explicit FunctionTimezone(ContextPtr context) : FunctionConstantBase(String{DateLUT::instance().getTimeZone()}, context->isDistributed()) {} + explicit FunctionTimezone(ContextPtr context) : FunctionConstantBase(String{DateLUT::instance("").getTimeZone()}, context->isDistributed()) {} + }; + + /// Returns the server time zone (timezone in which server runs). + class FunctionServerTimezone : public FunctionConstantBase + { + public: + static constexpr auto name = "serverTimezone"; + static FunctionPtr create(ContextPtr context) { return std::make_shared(context); } + explicit FunctionServerTimezone(ContextPtr context) : FunctionConstantBase(String{DateLUT::instance().getTimeZone()}, context->isDistributed()) {} }; @@ -155,6 +164,12 @@ REGISTER_FUNCTION(Timezone) factory.registerAlias("timeZone", "timezone"); } +REGISTER_FUNCTION(ServerTimezone) +{ + factory.registerFunction(); + factory.registerAlias("serverTimeZone", "serverTimezone"); +} + REGISTER_FUNCTION(Uptime) { factory.registerFunction(); From a9d0f7e7dbb281def311b22e4ae6300c73b5e979 Mon Sep 17 00:00:00 2001 From: zvonand Date: Thu, 23 Feb 2023 19:14:49 +0100 Subject: [PATCH 007/722] Added docs for new serverTimeZone function Updated tests and docs --- docs/en/operations/settings/settings.md | 28 ++++++++++--------- .../functions/date-time-functions.md | 21 +++++++++++++- docs/ru/operations/settings/settings.md | 27 +++++++++--------- .../functions/date-time-functions.md | 21 +++++++++++++- src/Common/DateLUT.cpp | 3 +- src/Common/DateLUT.h | 8 ++++-- src/Core/Settings.h | 2 +- src/Functions/serverConstants.cpp | 23 +++++++++++++-- ...rence => 02668_timezone_setting.reference} | 0 ...imezone.sql => 02668_timezone_setting.sql} | 6 ++-- 10 files changed, 101 insertions(+), 38 deletions(-) rename tests/queries/0_stateless/{02668_implicit_timezone.reference => 02668_timezone_setting.reference} (100%) rename tests/queries/0_stateless/{02668_implicit_timezone.sql => 02668_timezone_setting.sql} (61%) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 4e105124086..c1c4483d341 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -3940,25 +3940,27 @@ Default value: `0`. Use this setting only for backward compatibility if your use cases depend on old syntax. ::: -## implicit_timezone {#implicit_timezone} +## timezone {#timezone} If specified, sets a implicit timezone (instead of server-default). All DateTime/DateTime64 values (and/or functions results) that have no explicit timezone specified are treated as having this timezone instead of default. Examples: + +```clickhouse +SELECT timeZone(), serverTimezone() FORMAT TSV + +Europe/Berlin Europe/Berlin ``` -SELECT timeZone(), timeZoneOf(now()) -┌─timeZone()────┬─timeZoneOf(now())─┐ -│ Europe/Berlin │ Europe/Berlin │ -└───────────────┴───────────────────┘ -:) SELECT timeZone(), timeZoneOf(now()) SETTINGS implicit_timezone = 'Asia/Novosibirsk' -┌─timeZone()────┬─timeZoneOf(now())─┐ -│ Europe/Berlin │ Asia/Novosibirsk │ -└───────────────┴───────────────────┘ +```clickhouse +SELECT timeZone(), serverTimezone() SETTINGS timezone = 'Asia/Novosibirsk' FORMAT TSV -SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS implicit_timezone = 'America/Denver'; -┌─toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich')─┐ -│ 1999-12-13 07:23:23.123 │ -└──────────────────────────────────────────────────────────────────────────────┘ +Asia/Novosibirsk Europe/Berlin +``` + +```clickhouse +SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS timezone = 'America/Denver' FORMAT TSV + +1999-12-13 07:23:23.123 ``` Possible values: diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index f6af8abcbaf..8d31cb3872f 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -26,7 +26,7 @@ SELECT ## timeZone -Returns the timezone of the server. +Returns the default timezone of the server for current session. This can be modified using `SET timezone = 'New/Value'` If it is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard. Otherwise it produces a constant value. **Syntax** @@ -43,6 +43,25 @@ Alias: `timezone`. Type: [String](../../sql-reference/data-types/string.md). +## serverTimeZone + +Returns the actual timezone in which the server runs in. +If it is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard. Otherwise it produces a constant value. + +**Syntax** + +``` sql +timeZone() +``` + +Alias: `ServerTimezone`, `servertimezone`. + +**Returned value** + +- Timezone. + +Type: [String](../../sql-reference/data-types/string.md). + ## toTimeZone Converts time or date and time to the specified time zone. The time zone is an attribute of the `Date` and `DateTime` data types. The internal value (number of seconds) of the table field or of the resultset's column does not change, the column's type changes and its string representation changes accordingly. diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 8d3f2706585..dd1e9d98427 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -4085,26 +4085,27 @@ ALTER TABLE test FREEZE SETTINGS alter_partition_verbose_result = 1; Значение по умолчанию: `''`. -## implicit_timezone {#implicit_timezone} +## timezone {#timezone} Задаёт значение часового пояса (timezone) по умолчанию для текущей сессии вместо часового пояса сервера. То есть, все значения DateTime/DateTime64, для которых явно не задан параметр timezone, будут интерпретированы как относящиеся к указанной зоне. Примеры: +```clickhouse +SELECT timeZone(), serverTimezone() FORMAT TSV + +Europe/Berlin Europe/Berlin ``` -SELECT timeZone(), timeZoneOf(now()) -┌─timeZone()────┬─timeZoneOf(now())─┐ -│ Europe/Berlin │ Europe/Berlin │ -└───────────────┴───────────────────┘ -:) SELECT timeZone(), timeZoneOf(now()) SETTINGS implicit_timezone = 'Asia/Novosibirsk' -┌─timeZone()────┬─timeZoneOf(now())─┐ -│ Europe/Berlin │ Asia/Novosibirsk │ -└───────────────┴───────────────────┘ +```clickhouse +SELECT timeZone(), serverTimezone() SETTINGS timezone = 'Asia/Novosibirsk' FORMAT TSV -SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS implicit_timezone = 'America/Denver'; -┌─toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich')─┐ -│ 1999-12-13 07:23:23.123 │ -└──────────────────────────────────────────────────────────────────────────────┘ +Asia/Novosibirsk Europe/Berlin +``` + +```clickhouse +SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS timezone = 'America/Denver' FORMAT TSV + +1999-12-13 07:23:23.123 ``` Возможные значения: diff --git a/docs/ru/sql-reference/functions/date-time-functions.md b/docs/ru/sql-reference/functions/date-time-functions.md index 8fbcaf9568b..77188ea2797 100644 --- a/docs/ru/sql-reference/functions/date-time-functions.md +++ b/docs/ru/sql-reference/functions/date-time-functions.md @@ -26,7 +26,7 @@ SELECT ## timeZone {#timezone} -Возвращает часовой пояс сервера. +Возвращает часовой пояс сервера, считающийся умолчанием для текущей сессии. Если функция вызывается в контексте распределенной таблицы, то она генерирует обычный столбец со значениями, актуальными для каждого шарда. Иначе возвращается константа. **Синтаксис** @@ -43,6 +43,25 @@ timeZone() Тип: [String](../../sql-reference/data-types/string.md). +## serverTimeZone {#servertimezone} + +Возвращает (истинный) часовой пояс сервера, в котором тот работает. +Если функция вызывается в контексте распределенной таблицы, то она генерирует обычный столбец со значениями, актуальными для каждого шарда. Иначе возвращается константа. + +**Синтаксис** + +``` sql +serverTimeZone() +``` + +Синонимы: `servertimezone`, `serverTimezone`. + +**Возвращаемое значение** + +- Часовой пояс. + +Тип: [String](../../sql-reference/data-types/string.md). + ## toTimeZone {#totimezone} Переводит дату или дату с временем в указанный часовой пояс. Часовой пояс - это атрибут типов `Date` и `DateTime`. Внутреннее значение (количество секунд) поля таблицы или результирующего столбца не изменяется, изменяется тип поля и, соответственно, его текстовое отображение. diff --git a/src/Common/DateLUT.cpp b/src/Common/DateLUT.cpp index e309b0cb28a..3698fe45aa7 100644 --- a/src/Common/DateLUT.cpp +++ b/src/Common/DateLUT.cpp @@ -167,6 +167,5 @@ DateLUT & DateLUT::getInstance() std::string DateLUT::extractTimezoneFromContext(const DB::ContextPtr query_context) { - std::string ret = query_context->getSettingsRef().implicit_timezone.value; - return ret; + return query_context->getSettingsRef().timezone.value; } diff --git a/src/Common/DateLUT.h b/src/Common/DateLUT.h index 18ef5ee2e30..a9ee61dc8ab 100644 --- a/src/Common/DateLUT.h +++ b/src/Common/DateLUT.h @@ -17,14 +17,18 @@ class DateLUT : private boost::noncopyable { public: - /// Return singleton DateLUTImpl instance for the default time zone. + /// Return singleton DateLUTImpl instance for server's timezone (the one which server has). static ALWAYS_INLINE const DateLUTImpl & instance() { const auto & date_lut = getInstance(); return *date_lut.default_impl.load(std::memory_order_acquire); } - /// Return singleton DateLUTImpl instance for a given time zone. + /* + Return singleton DateLUTImpl instance for a given time zone. If timezone is an empty string, + timezone set by `timezone` setting for current session is used. If it is not set, server's timezone is used, + and return is the same as calling instance(). + */ static ALWAYS_INLINE const DateLUTImpl & instance(const std::string & time_zone) { const auto & date_lut = getInstance(); diff --git a/src/Core/Settings.h b/src/Core/Settings.h index e3ed1b26269..e70b8c131b3 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -712,7 +712,7 @@ class IColumn; M(Float, insert_keeper_fault_injection_probability, 0.0f, "Approximate probability of failure for a keeper request during insert. Valid value is in interval [0.0f, 1.0f]", 0) \ M(UInt64, insert_keeper_fault_injection_seed, 0, "0 - random seed, otherwise the setting value", 0) \ M(Bool, force_aggregation_in_order, false, "Force use of aggregation in order on remote nodes during distributed aggregation. PLEASE, NEVER CHANGE THIS SETTING VALUE MANUALLY!", IMPORTANT) \ - M(String, implicit_timezone, "", "Use specified timezone for interpreting Date and DateTime instead of server's timezone.", 0) \ + M(String, timezone, "", "Use specified timezone for interpreting Date and DateTime instead of server's timezone.", 0) \ // End of COMMON_SETTINGS // Please add settings related to formats into the FORMAT_FACTORY_SETTINGS and move obsolete settings to OBSOLETE_SETTINGS. diff --git a/src/Functions/serverConstants.cpp b/src/Functions/serverConstants.cpp index a89e1564f28..b57f7e40e07 100644 --- a/src/Functions/serverConstants.cpp +++ b/src/Functions/serverConstants.cpp @@ -160,14 +160,33 @@ REGISTER_FUNCTION(TcpPort) REGISTER_FUNCTION(Timezone) { - factory.registerFunction(); + factory.registerFunction({ + R"( +Returns the default timezone for current session. +Used as default timezone for parsing DateTime|DateTime64 without explicitly specified timezone. +Can be changed with SET timezone = 'New/Tz' + +[example:timezone] +)", + Documentation::Examples{{"serverTimezone", "SELECT timezone();"}}, + Documentation::Categories{"Constant", "Miscellaneous"} + }); factory.registerAlias("timeZone", "timezone"); } REGISTER_FUNCTION(ServerTimezone) { - factory.registerFunction(); + factory.registerFunction({ + R"( +Returns the timezone name in which server operates. + +[example:serverTimezone] +)", + Documentation::Examples{{"serverTimezone", "SELECT serverTimezone();"}}, + Documentation::Categories{"Constant", "Miscellaneous"} + }); factory.registerAlias("serverTimeZone", "serverTimezone"); + factory.registerAlias("servertimezone", "serverTimezone"); } REGISTER_FUNCTION(Uptime) diff --git a/tests/queries/0_stateless/02668_implicit_timezone.reference b/tests/queries/0_stateless/02668_timezone_setting.reference similarity index 100% rename from tests/queries/0_stateless/02668_implicit_timezone.reference rename to tests/queries/0_stateless/02668_timezone_setting.reference diff --git a/tests/queries/0_stateless/02668_implicit_timezone.sql b/tests/queries/0_stateless/02668_timezone_setting.sql similarity index 61% rename from tests/queries/0_stateless/02668_implicit_timezone.sql rename to tests/queries/0_stateless/02668_timezone_setting.sql index 663b218d235..3748b536614 100644 --- a/tests/queries/0_stateless/02668_implicit_timezone.sql +++ b/tests/queries/0_stateless/02668_timezone_setting.sql @@ -1,9 +1,9 @@ -SET implicit_timezone = 'Asia/Novosibirsk'; +SET timezone = 'Asia/Novosibirsk'; SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich'); -SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS implicit_timezone = 'Europe/Zurich'; +SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS timezone = 'Europe/Zurich'; -SET implicit_timezone = 'Europe/Zurich'; +SET timezone = 'Europe/Zurich'; SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Asia/Novosibirsk'); \ No newline at end of file From c3a6efe0310ec23521eb97c1b53c6616f72ba7a0 Mon Sep 17 00:00:00 2001 From: zvonand Date: Fri, 24 Feb 2023 20:51:12 +0100 Subject: [PATCH 008/722] update --- src/Client/ClientBase.cpp | 2 +- src/Common/DateLUT.h | 13 +++++++------ src/Functions/serverConstants.cpp | 2 +- src/IO/ReadHelpers.h | 8 ++++---- src/IO/WriteHelpers.h | 6 +++--- .../queries/0_stateless/02668_timezone_setting.sql | 6 +++--- 6 files changed, 19 insertions(+), 18 deletions(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index bc8c43af8c6..9ebe115d408 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -2139,7 +2139,7 @@ void ClientBase::runInteractive() initQueryIdFormats(); /// Initialize DateLUT here to avoid counting time spent here as query execution time. - const auto local_tz = DateLUT::instance().getTimeZone(); + const auto local_tz = DateLUT::instance("").getTimeZone(); suggest.emplace(); if (load_suggestions) diff --git a/src/Common/DateLUT.h b/src/Common/DateLUT.h index a9ee61dc8ab..29a4ee13d87 100644 --- a/src/Common/DateLUT.h +++ b/src/Common/DateLUT.h @@ -32,6 +32,7 @@ public: static ALWAYS_INLINE const DateLUTImpl & instance(const std::string & time_zone) { const auto & date_lut = getInstance(); + std::string effective_time_zone; if (time_zone.empty()) { @@ -41,19 +42,19 @@ public: if (query_context) { - auto implicit_timezone = extractTimezoneFromContext(query_context); + effective_time_zone = extractTimezoneFromContext(query_context); - if (!implicit_timezone.empty()) - return instance(implicit_timezone); + if (!effective_time_zone.empty()) + return date_lut.getImplementation(effective_time_zone); } const auto global_context = DB::CurrentThread::get().getGlobalContext(); if (global_context) { - auto implicit_timezone = extractTimezoneFromContext(global_context); + effective_time_zone = extractTimezoneFromContext(global_context); - if (!implicit_timezone.empty()) - return instance(implicit_timezone); + if (!effective_time_zone.empty()) + return date_lut.getImplementation(effective_time_zone); } } diff --git a/src/Functions/serverConstants.cpp b/src/Functions/serverConstants.cpp index b57f7e40e07..ea74d7a89bb 100644 --- a/src/Functions/serverConstants.cpp +++ b/src/Functions/serverConstants.cpp @@ -168,7 +168,7 @@ Can be changed with SET timezone = 'New/Tz' [example:timezone] )", - Documentation::Examples{{"serverTimezone", "SELECT timezone();"}}, + Documentation::Examples{{"timezone", "SELECT timezone();"}}, Documentation::Categories{"Constant", "Miscellaneous"} }); factory.registerAlias("timeZone", "timezone"); diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index f8931a7f622..9f5358ee141 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -1032,22 +1032,22 @@ inline ReturnType readDateTimeTextImpl(DateTime64 & datetime64, UInt32 scale, Re return ReturnType(is_ok); } -inline void readDateTimeText(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) +inline void readDateTimeText(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance("")) { readDateTimeTextImpl(datetime, buf, time_zone); } -inline void readDateTime64Text(DateTime64 & datetime64, UInt32 scale, ReadBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance()) +inline void readDateTime64Text(DateTime64 & datetime64, UInt32 scale, ReadBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance("")) { readDateTimeTextImpl(datetime64, scale, buf, date_lut); } -inline bool tryReadDateTimeText(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) +inline bool tryReadDateTimeText(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance("")) { return readDateTimeTextImpl(datetime, buf, time_zone); } -inline bool tryReadDateTime64Text(DateTime64 & datetime64, UInt32 scale, ReadBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance()) +inline bool tryReadDateTime64Text(DateTime64 & datetime64, UInt32 scale, ReadBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance("")) { return readDateTimeTextImpl(datetime64, scale, buf, date_lut); } diff --git a/src/IO/WriteHelpers.h b/src/IO/WriteHelpers.h index 1c0b48c53c3..d408e2bed42 100644 --- a/src/IO/WriteHelpers.h +++ b/src/IO/WriteHelpers.h @@ -755,14 +755,14 @@ inline void writeDateTimeText(const LocalDateTime & datetime, WriteBuffer & buf) /// In the format YYYY-MM-DD HH:MM:SS, according to the specified time zone. template -inline void writeDateTimeText(time_t datetime, WriteBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) +inline void writeDateTimeText(time_t datetime, WriteBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance("")) { writeDateTimeText(LocalDateTime(datetime, time_zone), buf); } /// In the format YYYY-MM-DD HH:MM:SS.NNNNNNNNN, according to the specified time zone. template -inline void writeDateTimeText(DateTime64 datetime64, UInt32 scale, WriteBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) +inline void writeDateTimeText(DateTime64 datetime64, UInt32 scale, WriteBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance("")) { static constexpr UInt32 MaxScale = DecimalUtils::max_precision; scale = scale > MaxScale ? MaxScale : scale; @@ -796,7 +796,7 @@ inline void writeDateTimeText(DateTime64 datetime64, UInt32 scale, WriteBuffer & /// In the RFC 1123 format: "Tue, 03 Dec 2019 00:11:50 GMT". You must provide GMT DateLUT. /// This is needed for HTTP requests. -inline void writeDateTimeTextRFC1123(time_t datetime, WriteBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) +inline void writeDateTimeTextRFC1123(time_t datetime, WriteBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance("")) { const auto & values = time_zone.getValues(datetime); diff --git a/tests/queries/0_stateless/02668_timezone_setting.sql b/tests/queries/0_stateless/02668_timezone_setting.sql index 3748b536614..f331ab58307 100644 --- a/tests/queries/0_stateless/02668_timezone_setting.sql +++ b/tests/queries/0_stateless/02668_timezone_setting.sql @@ -1,9 +1,9 @@ SET timezone = 'Asia/Novosibirsk'; - SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich'); - SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS timezone = 'Europe/Zurich'; SET timezone = 'Europe/Zurich'; +SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Asia/Novosibirsk'); -SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Asia/Novosibirsk'); \ No newline at end of file +SET timezone = 'Абырвалг'; +select now(); -- { serverError POCO_EXCEPTION } \ No newline at end of file From 6a996f552b9cf70f88d2a6c7c8f1ef2780268666 Mon Sep 17 00:00:00 2001 From: zvonand Date: Sun, 26 Feb 2023 10:06:27 +0100 Subject: [PATCH 009/722] update undocumented funcs reference --- .../02415_all_new_functions_must_be_documented.reference | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference index e41249af54c..ce14ee871f5 100644 --- a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference +++ b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference @@ -683,7 +683,6 @@ throwIf tid timeSlot timeSlots -timezone timezoneOf timezoneOffset toBool From a69425326de20dcf5814c39a0962023cabec27ec Mon Sep 17 00:00:00 2001 From: zvonand Date: Mon, 27 Feb 2023 01:40:00 +0100 Subject: [PATCH 010/722] upd --- src/Client/ClientBase.cpp | 2 +- src/Common/DateLUT.h | 59 +++++++++++++++---------------- src/DataTypes/TimezoneMixin.h | 2 +- src/Functions/serverConstants.cpp | 4 +-- src/IO/ReadHelpers.h | 8 ++--- src/IO/WriteHelpers.h | 6 ++-- 6 files changed, 39 insertions(+), 42 deletions(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index a335dca0602..96aff9aa304 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -2139,7 +2139,7 @@ void ClientBase::runInteractive() initQueryIdFormats(); /// Initialize DateLUT here to avoid counting time spent here as query execution time. - const auto local_tz = DateLUT::instance("").getTimeZone(); + const auto local_tz = DateLUT::instance().getTimeZone(); suggest.emplace(); if (load_suggestions) diff --git a/src/Common/DateLUT.h b/src/Common/DateLUT.h index 29a4ee13d87..efbf56b59b2 100644 --- a/src/Common/DateLUT.h +++ b/src/Common/DateLUT.h @@ -17,49 +17,46 @@ class DateLUT : private boost::noncopyable { public: - /// Return singleton DateLUTImpl instance for server's timezone (the one which server has). + /// Return singleton DateLUTImpl instance for timezone set by `timezone` setting for current session is used. + /// If it is not set, server's timezone (the one which server has) is being used. static ALWAYS_INLINE const DateLUTImpl & instance() { + std::string effective_time_zone; const auto & date_lut = getInstance(); + + if (DB::CurrentThread::isInitialized()) + { + const auto query_context = DB::CurrentThread::get().getQueryContext(); + + if (query_context) + { + effective_time_zone = extractTimezoneFromContext(query_context); + + if (!effective_time_zone.empty()) + return date_lut.getImplementation(effective_time_zone); + } + + const auto global_context = DB::CurrentThread::get().getGlobalContext(); + if (global_context) + { + effective_time_zone = extractTimezoneFromContext(global_context); + + if (!effective_time_zone.empty()) + return date_lut.getImplementation(effective_time_zone); + } + + } return *date_lut.default_impl.load(std::memory_order_acquire); } - /* - Return singleton DateLUTImpl instance for a given time zone. If timezone is an empty string, - timezone set by `timezone` setting for current session is used. If it is not set, server's timezone is used, - and return is the same as calling instance(). - */ + /// Return singleton DateLUTImpl instance for a given time zone. If timezone is an empty string, + /// server's timezone is used. The `timezone` setting is not considered here. static ALWAYS_INLINE const DateLUTImpl & instance(const std::string & time_zone) { const auto & date_lut = getInstance(); - std::string effective_time_zone; if (time_zone.empty()) - { - if (DB::CurrentThread::isInitialized()) - { - const auto query_context = DB::CurrentThread::get().getQueryContext(); - - if (query_context) - { - effective_time_zone = extractTimezoneFromContext(query_context); - - if (!effective_time_zone.empty()) - return date_lut.getImplementation(effective_time_zone); - } - - const auto global_context = DB::CurrentThread::get().getGlobalContext(); - if (global_context) - { - effective_time_zone = extractTimezoneFromContext(global_context); - - if (!effective_time_zone.empty()) - return date_lut.getImplementation(effective_time_zone); - } - - } return *date_lut.default_impl.load(std::memory_order_acquire); - } return date_lut.getImplementation(time_zone); } diff --git a/src/DataTypes/TimezoneMixin.h b/src/DataTypes/TimezoneMixin.h index 03ecde5dd0a..5b7870c7b9a 100644 --- a/src/DataTypes/TimezoneMixin.h +++ b/src/DataTypes/TimezoneMixin.h @@ -15,7 +15,7 @@ public: explicit TimezoneMixin(const String & time_zone_name = "") : has_explicit_time_zone(!time_zone_name.empty()) - , time_zone(DateLUT::instance(time_zone_name)) + , time_zone(time_zone_name.empty() ? DateLUT::instance() : DateLUT::instance(time_zone_name)) , utc_time_zone(DateLUT::instance("UTC")) { } diff --git a/src/Functions/serverConstants.cpp b/src/Functions/serverConstants.cpp index ea74d7a89bb..57a6279bd7a 100644 --- a/src/Functions/serverConstants.cpp +++ b/src/Functions/serverConstants.cpp @@ -66,7 +66,7 @@ namespace public: static constexpr auto name = "timezone"; static FunctionPtr create(ContextPtr context) { return std::make_shared(context); } - explicit FunctionTimezone(ContextPtr context) : FunctionConstantBase(String{DateLUT::instance("").getTimeZone()}, context->isDistributed()) {} + explicit FunctionTimezone(ContextPtr context) : FunctionConstantBase(String{DateLUT::instance().getTimeZone()}, context->isDistributed()) {} }; /// Returns the server time zone (timezone in which server runs). @@ -75,7 +75,7 @@ namespace public: static constexpr auto name = "serverTimezone"; static FunctionPtr create(ContextPtr context) { return std::make_shared(context); } - explicit FunctionServerTimezone(ContextPtr context) : FunctionConstantBase(String{DateLUT::instance().getTimeZone()}, context->isDistributed()) {} + explicit FunctionServerTimezone(ContextPtr context) : FunctionConstantBase(String{DateLUT::instance("").getTimeZone()}, context->isDistributed()) {} }; diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index 9f5358ee141..f8931a7f622 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -1032,22 +1032,22 @@ inline ReturnType readDateTimeTextImpl(DateTime64 & datetime64, UInt32 scale, Re return ReturnType(is_ok); } -inline void readDateTimeText(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance("")) +inline void readDateTimeText(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) { readDateTimeTextImpl(datetime, buf, time_zone); } -inline void readDateTime64Text(DateTime64 & datetime64, UInt32 scale, ReadBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance("")) +inline void readDateTime64Text(DateTime64 & datetime64, UInt32 scale, ReadBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance()) { readDateTimeTextImpl(datetime64, scale, buf, date_lut); } -inline bool tryReadDateTimeText(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance("")) +inline bool tryReadDateTimeText(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) { return readDateTimeTextImpl(datetime, buf, time_zone); } -inline bool tryReadDateTime64Text(DateTime64 & datetime64, UInt32 scale, ReadBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance("")) +inline bool tryReadDateTime64Text(DateTime64 & datetime64, UInt32 scale, ReadBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance()) { return readDateTimeTextImpl(datetime64, scale, buf, date_lut); } diff --git a/src/IO/WriteHelpers.h b/src/IO/WriteHelpers.h index d408e2bed42..1c0b48c53c3 100644 --- a/src/IO/WriteHelpers.h +++ b/src/IO/WriteHelpers.h @@ -755,14 +755,14 @@ inline void writeDateTimeText(const LocalDateTime & datetime, WriteBuffer & buf) /// In the format YYYY-MM-DD HH:MM:SS, according to the specified time zone. template -inline void writeDateTimeText(time_t datetime, WriteBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance("")) +inline void writeDateTimeText(time_t datetime, WriteBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) { writeDateTimeText(LocalDateTime(datetime, time_zone), buf); } /// In the format YYYY-MM-DD HH:MM:SS.NNNNNNNNN, according to the specified time zone. template -inline void writeDateTimeText(DateTime64 datetime64, UInt32 scale, WriteBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance("")) +inline void writeDateTimeText(DateTime64 datetime64, UInt32 scale, WriteBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) { static constexpr UInt32 MaxScale = DecimalUtils::max_precision; scale = scale > MaxScale ? MaxScale : scale; @@ -796,7 +796,7 @@ inline void writeDateTimeText(DateTime64 datetime64, UInt32 scale, WriteBuffer & /// In the RFC 1123 format: "Tue, 03 Dec 2019 00:11:50 GMT". You must provide GMT DateLUT. /// This is needed for HTTP requests. -inline void writeDateTimeTextRFC1123(time_t datetime, WriteBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance("")) +inline void writeDateTimeTextRFC1123(time_t datetime, WriteBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) { const auto & values = time_zone.getValues(datetime); From f3e19144d81449c1e2bdec52ebc38e85ea1e8ee9 Mon Sep 17 00:00:00 2001 From: zvonand Date: Mon, 27 Feb 2023 14:38:15 +0100 Subject: [PATCH 011/722] update --- src/Interpreters/executeQuery.cpp | 3 ++- src/Loggers/OwnPatternFormatter.cpp | 2 +- src/Loggers/OwnPatternFormatter.h | 2 ++ 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 435401796a0..cda7ec2b0d3 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -49,6 +49,7 @@ #include #include #include +#include #include #include #include @@ -1279,7 +1280,7 @@ void executeQuery( QueryResultDetails result_details { .query_id = context->getClientInfo().current_query_id, - .timezone = DateLUT::instance().getTimeZone(), + .timezone = DateLUT::instance("").getTimeZone(), }; std::unique_ptr compressed_buffer; diff --git a/src/Loggers/OwnPatternFormatter.cpp b/src/Loggers/OwnPatternFormatter.cpp index 02a2c2e510b..54d2b995d15 100644 --- a/src/Loggers/OwnPatternFormatter.cpp +++ b/src/Loggers/OwnPatternFormatter.cpp @@ -22,7 +22,7 @@ void OwnPatternFormatter::formatExtended(const DB::ExtendedLogMessage & msg_ext, const Poco::Message & msg = msg_ext.base; /// Change delimiters in date for compatibility with old logs. - DB::writeDateTimeText<'.', ':'>(msg_ext.time_seconds, wb); + DB::writeDateTimeText<'.', ':'>(msg_ext.time_seconds, wb, server_timezone); DB::writeChar('.', wb); DB::writeChar('0' + ((msg_ext.time_microseconds / 100000) % 10), wb); diff --git a/src/Loggers/OwnPatternFormatter.h b/src/Loggers/OwnPatternFormatter.h index d776b097cb2..07d0409b0ae 100644 --- a/src/Loggers/OwnPatternFormatter.h +++ b/src/Loggers/OwnPatternFormatter.h @@ -2,6 +2,7 @@ #include +#include #include "ExtendedLogChannel.h" @@ -30,5 +31,6 @@ public: virtual void formatExtended(const DB::ExtendedLogMessage & msg_ext, std::string & text) const; private: + const DateLUTImpl & server_timezone = DateLUT::instance(""); bool color; }; From f2fbf2d61e8ede663ba37065d8ea8fe9b430de3e Mon Sep 17 00:00:00 2001 From: zvonand Date: Mon, 6 Mar 2023 02:52:05 +0100 Subject: [PATCH 012/722] tcp protocol modification (min revision to be updated) --- src/Client/ClientBase.cpp | 4 ++++ src/Client/Connection.cpp | 5 +++++ src/Client/IServerConnection.h | 2 ++ src/Client/MultiplexedConnections.cpp | 2 ++ src/Client/Suggest.cpp | 1 + src/Common/DateLUT.h | 1 + src/Core/Protocol.h | 4 +++- src/Core/ProtocolDefines.h | 2 ++ src/Interpreters/executeQuery.cpp | 2 +- src/Server/TCPHandler.cpp | 12 ++++++++++++ src/Server/TCPHandler.h | 1 + 11 files changed, 34 insertions(+), 2 deletions(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 96aff9aa304..65d04a6bb9d 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -1020,6 +1020,10 @@ bool ClientBase::receiveAndProcessPacket(ASTPtr parsed_query, bool cancelled_) onProfileEvents(packet.block); return true; + case Protocol::Server::TimezoneUpdate: + DateLUT::setDefaultTimezone(packet.server_timezone); + return true; + default: throw Exception( ErrorCodes::UNKNOWN_PACKET_FROM_SERVER, "Unknown packet {} from server {}", packet.type, connection->getDescription()); diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index eea007a8608..87e9e20e8f7 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -972,6 +972,11 @@ Packet Connection::receivePacket() res.block = receiveProfileEvents(); return res; + case Protocol::Server::TimezoneUpdate: + readStringBinary(server_timezone, *in); + res.server_timezone = server_timezone; + return res; + default: /// In unknown state, disconnect - to not leave unsynchronised connection. disconnect(); diff --git a/src/Client/IServerConnection.h b/src/Client/IServerConnection.h index cd4db8f5258..52382ff9d45 100644 --- a/src/Client/IServerConnection.h +++ b/src/Client/IServerConnection.h @@ -38,6 +38,8 @@ struct Packet ParallelReadRequest request; ParallelReadResponse response; + std::string server_timezone; + Packet() : type(Protocol::Server::Hello) {} }; diff --git a/src/Client/MultiplexedConnections.cpp b/src/Client/MultiplexedConnections.cpp index cc260353339..668833b2a84 100644 --- a/src/Client/MultiplexedConnections.cpp +++ b/src/Client/MultiplexedConnections.cpp @@ -258,6 +258,7 @@ Packet MultiplexedConnections::drain() switch (packet.type) { + case Protocol::Server::TimezoneUpdate: case Protocol::Server::MergeTreeAllRangesAnnounecement: case Protocol::Server::MergeTreeReadTaskRequest: case Protocol::Server::ReadTaskRequest: @@ -339,6 +340,7 @@ Packet MultiplexedConnections::receivePacketUnlocked(AsyncCallback async_callbac switch (packet.type) { + case Protocol::Server::TimezoneUpdate: case Protocol::Server::MergeTreeAllRangesAnnounecement: case Protocol::Server::MergeTreeReadTaskRequest: case Protocol::Server::ReadTaskRequest: diff --git a/src/Client/Suggest.cpp b/src/Client/Suggest.cpp index 7027f35d21a..4a29bead540 100644 --- a/src/Client/Suggest.cpp +++ b/src/Client/Suggest.cpp @@ -158,6 +158,7 @@ void Suggest::fetch(IServerConnection & connection, const ConnectionTimeouts & t fillWordsFromBlock(packet.block); continue; + case Protocol::Server::TimezoneUpdate: case Protocol::Server::Progress: case Protocol::Server::ProfileInfo: case Protocol::Server::Totals: diff --git a/src/Common/DateLUT.h b/src/Common/DateLUT.h index efbf56b59b2..f17fe772dbc 100644 --- a/src/Common/DateLUT.h +++ b/src/Common/DateLUT.h @@ -60,6 +60,7 @@ public: return date_lut.getImplementation(time_zone); } + static void setDefaultTimezone(const std::string & time_zone) { auto & date_lut = getInstance(); diff --git a/src/Core/Protocol.h b/src/Core/Protocol.h index 86c0a851c60..97a2831ffe8 100644 --- a/src/Core/Protocol.h +++ b/src/Core/Protocol.h @@ -83,7 +83,8 @@ namespace Protocol ProfileEvents = 14, /// Packet with profile events from server. MergeTreeAllRangesAnnounecement = 15, MergeTreeReadTaskRequest = 16, /// Request from a MergeTree replica to a coordinator - MAX = MergeTreeReadTaskRequest, + TimezoneUpdate = 17, /// Receive server's (session-wide) default timezone + MAX = TimezoneUpdate, }; @@ -111,6 +112,7 @@ namespace Protocol "ProfileEvents", "MergeTreeAllRangesAnnounecement", "MergeTreeReadTaskRequest", + "TimezoneUpdate", }; return packet <= MAX ? data[packet] diff --git a/src/Core/ProtocolDefines.h b/src/Core/ProtocolDefines.h index 3bbfb95f020..e56ae0305cc 100644 --- a/src/Core/ProtocolDefines.h +++ b/src/Core/ProtocolDefines.h @@ -72,3 +72,5 @@ #define DBMS_MIN_PROTOCOL_VERSION_WITH_SERVER_QUERY_TIME_IN_PROGRESS 54460 #define DBMS_MIN_PROTOCOL_VERSION_WITH_PASSWORD_COMPLEXITY_RULES 54461 + +#define DBMS_MIN_PROTOCOL_VERSION_WITH_TIMEZONE_UPDATES 54461 diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index cda7ec2b0d3..85e623dc17d 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -1280,7 +1280,7 @@ void executeQuery( QueryResultDetails result_details { .query_id = context->getClientInfo().current_query_id, - .timezone = DateLUT::instance("").getTimeZone(), + .timezone = DateLUT::instance().getTimeZone(), }; std::unique_ptr compressed_buffer; diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index a307b472a64..9bb11f34916 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -446,6 +446,7 @@ void TCPHandler::runImpl() sendSelectProfileEvents(); sendLogs(); + return false; }; @@ -483,6 +484,9 @@ void TCPHandler::runImpl() { std::lock_guard lock(task_callback_mutex); sendLogs(); + if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_TIMEZONE_UPDATES + && client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE) + sendTimezone(); sendEndOfStream(); } @@ -1035,6 +1039,14 @@ void TCPHandler::sendInsertProfileEvents() sendProfileEvents(); } +void TCPHandler::sendTimezone() +{ + writeVarUInt(Protocol::Server::TimezoneUpdate, *out); + writeStringBinary(DateLUT::instance().getTimeZone(), *out); + out->next(); +} + + bool TCPHandler::receiveProxyHeader() { if (in->eof()) diff --git a/src/Server/TCPHandler.h b/src/Server/TCPHandler.h index f06b0b060b3..b19f908bc27 100644 --- a/src/Server/TCPHandler.h +++ b/src/Server/TCPHandler.h @@ -262,6 +262,7 @@ private: void sendProfileEvents(); void sendSelectProfileEvents(); void sendInsertProfileEvents(); + void sendTimezone(); /// Creates state.block_in/block_out for blocks read/write, depending on whether compression is enabled. void initBlockInput(); From 0706108b683ab5d67885b81a16b24a76c4d59513 Mon Sep 17 00:00:00 2001 From: zvonand Date: Mon, 6 Mar 2023 11:16:53 +0100 Subject: [PATCH 013/722] typo fix --- src/Interpreters/executeQuery.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 85e623dc17d..435401796a0 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -49,7 +49,6 @@ #include #include #include -#include #include #include #include From 57c5a637217779fbcc999cbaa5bd965f8892d092 Mon Sep 17 00:00:00 2001 From: zvonand Date: Mon, 6 Mar 2023 16:39:47 +0100 Subject: [PATCH 014/722] fix receive of timezone update on processing --- src/Client/ClientBase.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 65d04a6bb9d..7ca6bbed6ba 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -1188,6 +1188,10 @@ bool ClientBase::receiveSampleBlock(Block & out, ColumnsDescription & columns_de columns_description = ColumnsDescription::parse(packet.multistring_message[1]); return receiveSampleBlock(out, columns_description, parsed_query); + case Protocol::Server::TimezoneUpdate: + DateLUT::setDefaultTimezone(packet.server_timezone); + break; + default: throw NetException(ErrorCodes::UNEXPECTED_PACKET_FROM_SERVER, "Unexpected packet from server (expected Data, Exception or Log, got {})", @@ -1533,6 +1537,10 @@ bool ClientBase::receiveEndOfQuery() onProfileEvents(packet.block); break; + case Protocol::Server::TimezoneUpdate: + DateLUT::setDefaultTimezone(packet.server_timezone); + break; + default: throw NetException(ErrorCodes::UNEXPECTED_PACKET_FROM_SERVER, "Unexpected packet from server (expected Exception, EndOfStream, Log, Progress or ProfileEvents. Got {})", From d93937cc5e92ae4612259e9e57bca15489aabc8f Mon Sep 17 00:00:00 2001 From: zvonand Date: Mon, 6 Mar 2023 16:45:38 +0100 Subject: [PATCH 015/722] increment protocol version --- src/Core/ProtocolDefines.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Core/ProtocolDefines.h b/src/Core/ProtocolDefines.h index e56ae0305cc..5483489d5c4 100644 --- a/src/Core/ProtocolDefines.h +++ b/src/Core/ProtocolDefines.h @@ -54,7 +54,7 @@ /// NOTE: DBMS_TCP_PROTOCOL_VERSION has nothing common with VERSION_REVISION, /// later is just a number for server version (one number instead of commit SHA) /// for simplicity (sometimes it may be more convenient in some use cases). -#define DBMS_TCP_PROTOCOL_VERSION 54461 +#define DBMS_TCP_PROTOCOL_VERSION 54462 #define DBMS_MIN_PROTOCOL_VERSION_WITH_INITIAL_QUERY_START_TIME 54449 @@ -73,4 +73,4 @@ #define DBMS_MIN_PROTOCOL_VERSION_WITH_PASSWORD_COMPLEXITY_RULES 54461 -#define DBMS_MIN_PROTOCOL_VERSION_WITH_TIMEZONE_UPDATES 54461 +#define DBMS_MIN_PROTOCOL_VERSION_WITH_TIMEZONE_UPDATES 54462 From 3a918ae66a984451e0db0f56ffa6232b897ad62f Mon Sep 17 00:00:00 2001 From: zvonand Date: Tue, 7 Mar 2023 02:33:46 +0100 Subject: [PATCH 016/722] revert protocol changes, found better way --- src/Client/ClientBase.cpp | 25 ++++++++++++------------- src/Client/Connection.cpp | 5 ----- src/Client/MultiplexedConnections.cpp | 2 -- src/Client/Suggest.cpp | 1 - src/Core/Protocol.h | 4 +--- src/Core/ProtocolDefines.h | 4 +--- src/Server/TCPHandler.cpp | 10 ---------- src/Server/TCPHandler.h | 1 - 8 files changed, 14 insertions(+), 38 deletions(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 7ca6bbed6ba..09c510f01f3 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -1020,10 +1020,6 @@ bool ClientBase::receiveAndProcessPacket(ASTPtr parsed_query, bool cancelled_) onProfileEvents(packet.block); return true; - case Protocol::Server::TimezoneUpdate: - DateLUT::setDefaultTimezone(packet.server_timezone); - return true; - default: throw Exception( ErrorCodes::UNKNOWN_PACKET_FROM_SERVER, "Unknown packet {} from server {}", packet.type, connection->getDescription()); @@ -1188,10 +1184,6 @@ bool ClientBase::receiveSampleBlock(Block & out, ColumnsDescription & columns_de columns_description = ColumnsDescription::parse(packet.multistring_message[1]); return receiveSampleBlock(out, columns_description, parsed_query); - case Protocol::Server::TimezoneUpdate: - DateLUT::setDefaultTimezone(packet.server_timezone); - break; - default: throw NetException(ErrorCodes::UNEXPECTED_PACKET_FROM_SERVER, "Unexpected packet from server (expected Data, Exception or Log, got {})", @@ -1500,7 +1492,7 @@ void ClientBase::receiveLogsAndProfileEvents(ASTPtr parsed_query) { auto packet_type = connection->checkPacket(0); - while (packet_type && (*packet_type == Protocol::Server::Log || *packet_type == Protocol::Server::ProfileEvents)) + while (packet_type && (*packet_type == Protocol::Server::Log || *packet_type == Protocol::Server::ProfileEvents )) { receiveAndProcessPacket(parsed_query, false); packet_type = connection->checkPacket(0); @@ -1537,10 +1529,6 @@ bool ClientBase::receiveEndOfQuery() onProfileEvents(packet.block); break; - case Protocol::Server::TimezoneUpdate: - DateLUT::setDefaultTimezone(packet.server_timezone); - break; - default: throw NetException(ErrorCodes::UNEXPECTED_PACKET_FROM_SERVER, "Unexpected packet from server (expected Exception, EndOfStream, Log, Progress or ProfileEvents. Got {})", @@ -1611,6 +1599,8 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin progress_indication.resetProgress(); profile_events.watch.restart(); + const std::string old_timezone = DateLUT::instance().getTimeZone(); + { /// Temporarily apply query settings to context. std::optional old_settings; @@ -1659,6 +1649,9 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin bool is_async_insert = global_context->getSettingsRef().async_insert && insert && insert->hasInlinedData(); + if (!global_context->getSettingsRef().timezone.toString().empty()) + DateLUT::setDefaultTimezone(global_context->getSettingsRef().timezone); + /// INSERT query for which data transfer is needed (not an INSERT SELECT or input()) is processed separately. if (insert && (!insert->select || input_function) && !insert->watch && !is_async_insert) { @@ -1693,6 +1686,10 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin query_parameters.insert_or_assign(name, value); global_context->addQueryParameters(set_query->query_parameters); + + if (!global_context->getSettingsRef().timezone.toString().empty()) + DateLUT::setDefaultTimezone(global_context->getSettingsRef().timezone); + } if (const auto * use_query = parsed_query->as()) { @@ -1703,6 +1700,8 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin connection->setDefaultDatabase(new_database); } } + else + DateLUT::setDefaultTimezone(old_timezone); /// Always print last block (if it was not printed already) if (profile_events.last_block) diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index 87e9e20e8f7..eea007a8608 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -972,11 +972,6 @@ Packet Connection::receivePacket() res.block = receiveProfileEvents(); return res; - case Protocol::Server::TimezoneUpdate: - readStringBinary(server_timezone, *in); - res.server_timezone = server_timezone; - return res; - default: /// In unknown state, disconnect - to not leave unsynchronised connection. disconnect(); diff --git a/src/Client/MultiplexedConnections.cpp b/src/Client/MultiplexedConnections.cpp index 668833b2a84..cc260353339 100644 --- a/src/Client/MultiplexedConnections.cpp +++ b/src/Client/MultiplexedConnections.cpp @@ -258,7 +258,6 @@ Packet MultiplexedConnections::drain() switch (packet.type) { - case Protocol::Server::TimezoneUpdate: case Protocol::Server::MergeTreeAllRangesAnnounecement: case Protocol::Server::MergeTreeReadTaskRequest: case Protocol::Server::ReadTaskRequest: @@ -340,7 +339,6 @@ Packet MultiplexedConnections::receivePacketUnlocked(AsyncCallback async_callbac switch (packet.type) { - case Protocol::Server::TimezoneUpdate: case Protocol::Server::MergeTreeAllRangesAnnounecement: case Protocol::Server::MergeTreeReadTaskRequest: case Protocol::Server::ReadTaskRequest: diff --git a/src/Client/Suggest.cpp b/src/Client/Suggest.cpp index 4a29bead540..7027f35d21a 100644 --- a/src/Client/Suggest.cpp +++ b/src/Client/Suggest.cpp @@ -158,7 +158,6 @@ void Suggest::fetch(IServerConnection & connection, const ConnectionTimeouts & t fillWordsFromBlock(packet.block); continue; - case Protocol::Server::TimezoneUpdate: case Protocol::Server::Progress: case Protocol::Server::ProfileInfo: case Protocol::Server::Totals: diff --git a/src/Core/Protocol.h b/src/Core/Protocol.h index 97a2831ffe8..86c0a851c60 100644 --- a/src/Core/Protocol.h +++ b/src/Core/Protocol.h @@ -83,8 +83,7 @@ namespace Protocol ProfileEvents = 14, /// Packet with profile events from server. MergeTreeAllRangesAnnounecement = 15, MergeTreeReadTaskRequest = 16, /// Request from a MergeTree replica to a coordinator - TimezoneUpdate = 17, /// Receive server's (session-wide) default timezone - MAX = TimezoneUpdate, + MAX = MergeTreeReadTaskRequest, }; @@ -112,7 +111,6 @@ namespace Protocol "ProfileEvents", "MergeTreeAllRangesAnnounecement", "MergeTreeReadTaskRequest", - "TimezoneUpdate", }; return packet <= MAX ? data[packet] diff --git a/src/Core/ProtocolDefines.h b/src/Core/ProtocolDefines.h index 5483489d5c4..3bbfb95f020 100644 --- a/src/Core/ProtocolDefines.h +++ b/src/Core/ProtocolDefines.h @@ -54,7 +54,7 @@ /// NOTE: DBMS_TCP_PROTOCOL_VERSION has nothing common with VERSION_REVISION, /// later is just a number for server version (one number instead of commit SHA) /// for simplicity (sometimes it may be more convenient in some use cases). -#define DBMS_TCP_PROTOCOL_VERSION 54462 +#define DBMS_TCP_PROTOCOL_VERSION 54461 #define DBMS_MIN_PROTOCOL_VERSION_WITH_INITIAL_QUERY_START_TIME 54449 @@ -72,5 +72,3 @@ #define DBMS_MIN_PROTOCOL_VERSION_WITH_SERVER_QUERY_TIME_IN_PROGRESS 54460 #define DBMS_MIN_PROTOCOL_VERSION_WITH_PASSWORD_COMPLEXITY_RULES 54461 - -#define DBMS_MIN_PROTOCOL_VERSION_WITH_TIMEZONE_UPDATES 54462 diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 9bb11f34916..617b084a149 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -484,9 +484,6 @@ void TCPHandler::runImpl() { std::lock_guard lock(task_callback_mutex); sendLogs(); - if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_TIMEZONE_UPDATES - && client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE) - sendTimezone(); sendEndOfStream(); } @@ -1039,13 +1036,6 @@ void TCPHandler::sendInsertProfileEvents() sendProfileEvents(); } -void TCPHandler::sendTimezone() -{ - writeVarUInt(Protocol::Server::TimezoneUpdate, *out); - writeStringBinary(DateLUT::instance().getTimeZone(), *out); - out->next(); -} - bool TCPHandler::receiveProxyHeader() { diff --git a/src/Server/TCPHandler.h b/src/Server/TCPHandler.h index b19f908bc27..f06b0b060b3 100644 --- a/src/Server/TCPHandler.h +++ b/src/Server/TCPHandler.h @@ -262,7 +262,6 @@ private: void sendProfileEvents(); void sendSelectProfileEvents(); void sendInsertProfileEvents(); - void sendTimezone(); /// Creates state.block_in/block_out for blocks read/write, depending on whether compression is enabled. void initBlockInput(); From c859478db3a3964c49457e49bab62bdf975bed7f Mon Sep 17 00:00:00 2001 From: zvonand Date: Tue, 7 Mar 2023 02:36:02 +0100 Subject: [PATCH 017/722] upd --- src/Client/IServerConnection.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/Client/IServerConnection.h b/src/Client/IServerConnection.h index 52382ff9d45..cd4db8f5258 100644 --- a/src/Client/IServerConnection.h +++ b/src/Client/IServerConnection.h @@ -38,8 +38,6 @@ struct Packet ParallelReadRequest request; ParallelReadResponse response; - std::string server_timezone; - Packet() : type(Protocol::Server::Hello) {} }; From 5e7a861e688dea04ecfe9c54d30c642f65a28569 Mon Sep 17 00:00:00 2001 From: zvonand Date: Tue, 7 Mar 2023 02:45:47 +0100 Subject: [PATCH 018/722] fix --- src/Server/TCPHandler.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 617b084a149..a307b472a64 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -446,7 +446,6 @@ void TCPHandler::runImpl() sendSelectProfileEvents(); sendLogs(); - return false; }; @@ -1036,7 +1035,6 @@ void TCPHandler::sendInsertProfileEvents() sendProfileEvents(); } - bool TCPHandler::receiveProxyHeader() { if (in->eof()) From a7a3c9d1a675743e776fde32c96ccd9bbfc94e46 Mon Sep 17 00:00:00 2001 From: zvonand Date: Tue, 7 Mar 2023 02:52:42 +0100 Subject: [PATCH 019/722] fix style --- src/Client/ClientBase.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 09c510f01f3..25442c89f99 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -1492,7 +1492,7 @@ void ClientBase::receiveLogsAndProfileEvents(ASTPtr parsed_query) { auto packet_type = connection->checkPacket(0); - while (packet_type && (*packet_type == Protocol::Server::Log || *packet_type == Protocol::Server::ProfileEvents )) + while (packet_type && (*packet_type == Protocol::Server::Log || *packet_type == Protocol::Server::ProfileEvents)) { receiveAndProcessPacket(parsed_query, false); packet_type = connection->checkPacket(0); From e92501d5dd7a9f3a77ad38f8750432a2286e9f0b Mon Sep 17 00:00:00 2001 From: zvonand Date: Tue, 7 Mar 2023 13:02:02 +0100 Subject: [PATCH 020/722] update tests + exception --- src/Client/ClientBase.cpp | 28 +++++++++++++++++-- ...rence => 02674_timezone_setting.reference} | 0 ...setting.sql => 02674_timezone_setting.sql} | 4 +-- 3 files changed, 27 insertions(+), 5 deletions(-) rename tests/queries/0_stateless/{02668_timezone_setting.reference => 02674_timezone_setting.reference} (100%) rename tests/queries/0_stateless/{02668_timezone_setting.sql => 02674_timezone_setting.sql} (73%) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 25442c89f99..13f28806066 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -107,6 +107,7 @@ namespace ErrorCodes extern const int UNRECOGNIZED_ARGUMENTS; extern const int LOGICAL_ERROR; extern const int CANNOT_OPEN_FILE; + extern const int CANNOT_PARSE_DATETIME; } } @@ -1599,6 +1600,9 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin progress_indication.resetProgress(); profile_events.watch.restart(); + /// A query may contain timezone setting. To handle this, old client-wide tz is saved here. + /// If timezone was set for a query, after its execution client tz will be back to old one. + /// If it was a settings query, new setting will be applied to client. const std::string old_timezone = DateLUT::instance().getTimeZone(); { @@ -1649,8 +1653,18 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin bool is_async_insert = global_context->getSettingsRef().async_insert && insert && insert->hasInlinedData(); - if (!global_context->getSettingsRef().timezone.toString().empty()) - DateLUT::setDefaultTimezone(global_context->getSettingsRef().timezone); + /// pre-load timezone from (query) settings -- new timezone may also be specified in query. + try + { + if (!global_context->getSettingsRef().timezone.toString().empty()) + DateLUT::setDefaultTimezone(global_context->getSettingsRef().timezone); + } + catch (Poco::Exception &) + { + throw Exception(ErrorCodes::CANNOT_PARSE_DATETIME, + "Invalid time zone {} in client settings. Use `SET timezone = \'New/TZ\'` to set a proper timezone.", + global_context->getSettingsRef().timezone.toString()); + } /// INSERT query for which data transfer is needed (not an INSERT SELECT or input()) is processed separately. if (insert && (!insert->select || input_function) && !insert->watch && !is_async_insert) @@ -1687,9 +1701,17 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin global_context->addQueryParameters(set_query->query_parameters); + try + { if (!global_context->getSettingsRef().timezone.toString().empty()) DateLUT::setDefaultTimezone(global_context->getSettingsRef().timezone); - + } + catch (Poco::Exception &) + { + throw Exception(ErrorCodes::CANNOT_PARSE_DATETIME, + "Invalid time zone {} in client settings. Use `SET timezone = \'New/TZ\'` to set a proper timezone.", + global_context->getSettingsRef().timezone.toString()); + } } if (const auto * use_query = parsed_query->as()) { diff --git a/tests/queries/0_stateless/02668_timezone_setting.reference b/tests/queries/0_stateless/02674_timezone_setting.reference similarity index 100% rename from tests/queries/0_stateless/02668_timezone_setting.reference rename to tests/queries/0_stateless/02674_timezone_setting.reference diff --git a/tests/queries/0_stateless/02668_timezone_setting.sql b/tests/queries/0_stateless/02674_timezone_setting.sql similarity index 73% rename from tests/queries/0_stateless/02668_timezone_setting.sql rename to tests/queries/0_stateless/02674_timezone_setting.sql index f331ab58307..51820fc2dca 100644 --- a/tests/queries/0_stateless/02668_timezone_setting.sql +++ b/tests/queries/0_stateless/02674_timezone_setting.sql @@ -5,5 +5,5 @@ SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zuric SET timezone = 'Europe/Zurich'; SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Asia/Novosibirsk'); -SET timezone = 'Абырвалг'; -select now(); -- { serverError POCO_EXCEPTION } \ No newline at end of file +SET timezone = 'Абырвалг'; -- { clientError CANNOT_PARSE_DATETIME } +select now(); -- { clientError CANNOT_PARSE_DATETIME } \ No newline at end of file From 1fd6e3f23b41dac6fde5b238e1a5da11a976b5ae Mon Sep 17 00:00:00 2001 From: zvonand Date: Tue, 7 Mar 2023 16:02:30 +0100 Subject: [PATCH 021/722] Revert "fix style" This reverts commit a7a3c9d1a675743e776fde32c96ccd9bbfc94e46. --- src/Client/ClientBase.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 13f28806066..cfef1a5d3fe 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -1493,7 +1493,7 @@ void ClientBase::receiveLogsAndProfileEvents(ASTPtr parsed_query) { auto packet_type = connection->checkPacket(0); - while (packet_type && (*packet_type == Protocol::Server::Log || *packet_type == Protocol::Server::ProfileEvents)) + while (packet_type && (*packet_type == Protocol::Server::Log || *packet_type == Protocol::Server::ProfileEvents )) { receiveAndProcessPacket(parsed_query, false); packet_type = connection->checkPacket(0); From 1ce697d8c06ce7f44e078f9b8809dcaa3e3ba8f8 Mon Sep 17 00:00:00 2001 From: zvonand Date: Tue, 7 Mar 2023 16:05:23 +0100 Subject: [PATCH 022/722] Revert "revert protocol changes, found better way" This reverts commit 3a918ae66a984451e0db0f56ffa6232b897ad62f. --- src/Client/ClientBase.cpp | 47 +++++-------------- src/Client/Connection.cpp | 5 ++ src/Client/IServerConnection.h | 2 + src/Client/MultiplexedConnections.cpp | 2 + src/Client/Suggest.cpp | 1 + src/Core/Protocol.h | 4 +- src/Core/ProtocolDefines.h | 4 +- src/Server/TCPHandler.cpp | 12 +++++ src/Server/TCPHandler.h | 1 + ...rence => 02668_timezone_setting.reference} | 0 ...setting.sql => 02668_timezone_setting.sql} | 4 +- 11 files changed, 44 insertions(+), 38 deletions(-) rename tests/queries/0_stateless/{02674_timezone_setting.reference => 02668_timezone_setting.reference} (100%) rename tests/queries/0_stateless/{02674_timezone_setting.sql => 02668_timezone_setting.sql} (73%) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index cfef1a5d3fe..7ca6bbed6ba 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -107,7 +107,6 @@ namespace ErrorCodes extern const int UNRECOGNIZED_ARGUMENTS; extern const int LOGICAL_ERROR; extern const int CANNOT_OPEN_FILE; - extern const int CANNOT_PARSE_DATETIME; } } @@ -1021,6 +1020,10 @@ bool ClientBase::receiveAndProcessPacket(ASTPtr parsed_query, bool cancelled_) onProfileEvents(packet.block); return true; + case Protocol::Server::TimezoneUpdate: + DateLUT::setDefaultTimezone(packet.server_timezone); + return true; + default: throw Exception( ErrorCodes::UNKNOWN_PACKET_FROM_SERVER, "Unknown packet {} from server {}", packet.type, connection->getDescription()); @@ -1185,6 +1188,10 @@ bool ClientBase::receiveSampleBlock(Block & out, ColumnsDescription & columns_de columns_description = ColumnsDescription::parse(packet.multistring_message[1]); return receiveSampleBlock(out, columns_description, parsed_query); + case Protocol::Server::TimezoneUpdate: + DateLUT::setDefaultTimezone(packet.server_timezone); + break; + default: throw NetException(ErrorCodes::UNEXPECTED_PACKET_FROM_SERVER, "Unexpected packet from server (expected Data, Exception or Log, got {})", @@ -1493,7 +1500,7 @@ void ClientBase::receiveLogsAndProfileEvents(ASTPtr parsed_query) { auto packet_type = connection->checkPacket(0); - while (packet_type && (*packet_type == Protocol::Server::Log || *packet_type == Protocol::Server::ProfileEvents )) + while (packet_type && (*packet_type == Protocol::Server::Log || *packet_type == Protocol::Server::ProfileEvents)) { receiveAndProcessPacket(parsed_query, false); packet_type = connection->checkPacket(0); @@ -1530,6 +1537,10 @@ bool ClientBase::receiveEndOfQuery() onProfileEvents(packet.block); break; + case Protocol::Server::TimezoneUpdate: + DateLUT::setDefaultTimezone(packet.server_timezone); + break; + default: throw NetException(ErrorCodes::UNEXPECTED_PACKET_FROM_SERVER, "Unexpected packet from server (expected Exception, EndOfStream, Log, Progress or ProfileEvents. Got {})", @@ -1600,11 +1611,6 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin progress_indication.resetProgress(); profile_events.watch.restart(); - /// A query may contain timezone setting. To handle this, old client-wide tz is saved here. - /// If timezone was set for a query, after its execution client tz will be back to old one. - /// If it was a settings query, new setting will be applied to client. - const std::string old_timezone = DateLUT::instance().getTimeZone(); - { /// Temporarily apply query settings to context. std::optional old_settings; @@ -1653,19 +1659,6 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin bool is_async_insert = global_context->getSettingsRef().async_insert && insert && insert->hasInlinedData(); - /// pre-load timezone from (query) settings -- new timezone may also be specified in query. - try - { - if (!global_context->getSettingsRef().timezone.toString().empty()) - DateLUT::setDefaultTimezone(global_context->getSettingsRef().timezone); - } - catch (Poco::Exception &) - { - throw Exception(ErrorCodes::CANNOT_PARSE_DATETIME, - "Invalid time zone {} in client settings. Use `SET timezone = \'New/TZ\'` to set a proper timezone.", - global_context->getSettingsRef().timezone.toString()); - } - /// INSERT query for which data transfer is needed (not an INSERT SELECT or input()) is processed separately. if (insert && (!insert->select || input_function) && !insert->watch && !is_async_insert) { @@ -1700,18 +1693,6 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin query_parameters.insert_or_assign(name, value); global_context->addQueryParameters(set_query->query_parameters); - - try - { - if (!global_context->getSettingsRef().timezone.toString().empty()) - DateLUT::setDefaultTimezone(global_context->getSettingsRef().timezone); - } - catch (Poco::Exception &) - { - throw Exception(ErrorCodes::CANNOT_PARSE_DATETIME, - "Invalid time zone {} in client settings. Use `SET timezone = \'New/TZ\'` to set a proper timezone.", - global_context->getSettingsRef().timezone.toString()); - } } if (const auto * use_query = parsed_query->as()) { @@ -1722,8 +1703,6 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin connection->setDefaultDatabase(new_database); } } - else - DateLUT::setDefaultTimezone(old_timezone); /// Always print last block (if it was not printed already) if (profile_events.last_block) diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index eea007a8608..87e9e20e8f7 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -972,6 +972,11 @@ Packet Connection::receivePacket() res.block = receiveProfileEvents(); return res; + case Protocol::Server::TimezoneUpdate: + readStringBinary(server_timezone, *in); + res.server_timezone = server_timezone; + return res; + default: /// In unknown state, disconnect - to not leave unsynchronised connection. disconnect(); diff --git a/src/Client/IServerConnection.h b/src/Client/IServerConnection.h index cd4db8f5258..52382ff9d45 100644 --- a/src/Client/IServerConnection.h +++ b/src/Client/IServerConnection.h @@ -38,6 +38,8 @@ struct Packet ParallelReadRequest request; ParallelReadResponse response; + std::string server_timezone; + Packet() : type(Protocol::Server::Hello) {} }; diff --git a/src/Client/MultiplexedConnections.cpp b/src/Client/MultiplexedConnections.cpp index cc260353339..668833b2a84 100644 --- a/src/Client/MultiplexedConnections.cpp +++ b/src/Client/MultiplexedConnections.cpp @@ -258,6 +258,7 @@ Packet MultiplexedConnections::drain() switch (packet.type) { + case Protocol::Server::TimezoneUpdate: case Protocol::Server::MergeTreeAllRangesAnnounecement: case Protocol::Server::MergeTreeReadTaskRequest: case Protocol::Server::ReadTaskRequest: @@ -339,6 +340,7 @@ Packet MultiplexedConnections::receivePacketUnlocked(AsyncCallback async_callbac switch (packet.type) { + case Protocol::Server::TimezoneUpdate: case Protocol::Server::MergeTreeAllRangesAnnounecement: case Protocol::Server::MergeTreeReadTaskRequest: case Protocol::Server::ReadTaskRequest: diff --git a/src/Client/Suggest.cpp b/src/Client/Suggest.cpp index 7027f35d21a..4a29bead540 100644 --- a/src/Client/Suggest.cpp +++ b/src/Client/Suggest.cpp @@ -158,6 +158,7 @@ void Suggest::fetch(IServerConnection & connection, const ConnectionTimeouts & t fillWordsFromBlock(packet.block); continue; + case Protocol::Server::TimezoneUpdate: case Protocol::Server::Progress: case Protocol::Server::ProfileInfo: case Protocol::Server::Totals: diff --git a/src/Core/Protocol.h b/src/Core/Protocol.h index 86c0a851c60..97a2831ffe8 100644 --- a/src/Core/Protocol.h +++ b/src/Core/Protocol.h @@ -83,7 +83,8 @@ namespace Protocol ProfileEvents = 14, /// Packet with profile events from server. MergeTreeAllRangesAnnounecement = 15, MergeTreeReadTaskRequest = 16, /// Request from a MergeTree replica to a coordinator - MAX = MergeTreeReadTaskRequest, + TimezoneUpdate = 17, /// Receive server's (session-wide) default timezone + MAX = TimezoneUpdate, }; @@ -111,6 +112,7 @@ namespace Protocol "ProfileEvents", "MergeTreeAllRangesAnnounecement", "MergeTreeReadTaskRequest", + "TimezoneUpdate", }; return packet <= MAX ? data[packet] diff --git a/src/Core/ProtocolDefines.h b/src/Core/ProtocolDefines.h index 3bbfb95f020..5483489d5c4 100644 --- a/src/Core/ProtocolDefines.h +++ b/src/Core/ProtocolDefines.h @@ -54,7 +54,7 @@ /// NOTE: DBMS_TCP_PROTOCOL_VERSION has nothing common with VERSION_REVISION, /// later is just a number for server version (one number instead of commit SHA) /// for simplicity (sometimes it may be more convenient in some use cases). -#define DBMS_TCP_PROTOCOL_VERSION 54461 +#define DBMS_TCP_PROTOCOL_VERSION 54462 #define DBMS_MIN_PROTOCOL_VERSION_WITH_INITIAL_QUERY_START_TIME 54449 @@ -72,3 +72,5 @@ #define DBMS_MIN_PROTOCOL_VERSION_WITH_SERVER_QUERY_TIME_IN_PROGRESS 54460 #define DBMS_MIN_PROTOCOL_VERSION_WITH_PASSWORD_COMPLEXITY_RULES 54461 + +#define DBMS_MIN_PROTOCOL_VERSION_WITH_TIMEZONE_UPDATES 54462 diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index a307b472a64..9bb11f34916 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -446,6 +446,7 @@ void TCPHandler::runImpl() sendSelectProfileEvents(); sendLogs(); + return false; }; @@ -483,6 +484,9 @@ void TCPHandler::runImpl() { std::lock_guard lock(task_callback_mutex); sendLogs(); + if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_TIMEZONE_UPDATES + && client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE) + sendTimezone(); sendEndOfStream(); } @@ -1035,6 +1039,14 @@ void TCPHandler::sendInsertProfileEvents() sendProfileEvents(); } +void TCPHandler::sendTimezone() +{ + writeVarUInt(Protocol::Server::TimezoneUpdate, *out); + writeStringBinary(DateLUT::instance().getTimeZone(), *out); + out->next(); +} + + bool TCPHandler::receiveProxyHeader() { if (in->eof()) diff --git a/src/Server/TCPHandler.h b/src/Server/TCPHandler.h index f06b0b060b3..b19f908bc27 100644 --- a/src/Server/TCPHandler.h +++ b/src/Server/TCPHandler.h @@ -262,6 +262,7 @@ private: void sendProfileEvents(); void sendSelectProfileEvents(); void sendInsertProfileEvents(); + void sendTimezone(); /// Creates state.block_in/block_out for blocks read/write, depending on whether compression is enabled. void initBlockInput(); diff --git a/tests/queries/0_stateless/02674_timezone_setting.reference b/tests/queries/0_stateless/02668_timezone_setting.reference similarity index 100% rename from tests/queries/0_stateless/02674_timezone_setting.reference rename to tests/queries/0_stateless/02668_timezone_setting.reference diff --git a/tests/queries/0_stateless/02674_timezone_setting.sql b/tests/queries/0_stateless/02668_timezone_setting.sql similarity index 73% rename from tests/queries/0_stateless/02674_timezone_setting.sql rename to tests/queries/0_stateless/02668_timezone_setting.sql index 51820fc2dca..f331ab58307 100644 --- a/tests/queries/0_stateless/02674_timezone_setting.sql +++ b/tests/queries/0_stateless/02668_timezone_setting.sql @@ -5,5 +5,5 @@ SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zuric SET timezone = 'Europe/Zurich'; SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Asia/Novosibirsk'); -SET timezone = 'Абырвалг'; -- { clientError CANNOT_PARSE_DATETIME } -select now(); -- { clientError CANNOT_PARSE_DATETIME } \ No newline at end of file +SET timezone = 'Абырвалг'; +select now(); -- { serverError POCO_EXCEPTION } \ No newline at end of file From bbb31cf8913527eb21823216821ca536c2779563 Mon Sep 17 00:00:00 2001 From: zvonand Date: Wed, 15 Mar 2023 18:37:23 +0100 Subject: [PATCH 023/722] added validation on setting modification --- src/Client/ClientBase.cpp | 17 ++++++++++++----- src/Client/ClientBase.h | 1 + src/Core/Settings.h | 2 +- src/Core/SettingsFields.cpp | 11 +++++++++++ src/Core/SettingsFields.h | 38 +++++++++++++++++++++++++++++++++++++ src/Server/TCPHandler.cpp | 17 +++++++++++++---- 6 files changed, 76 insertions(+), 10 deletions(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 7ca6bbed6ba..5b086d675ba 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -1021,7 +1021,7 @@ bool ClientBase::receiveAndProcessPacket(ASTPtr parsed_query, bool cancelled_) return true; case Protocol::Server::TimezoneUpdate: - DateLUT::setDefaultTimezone(packet.server_timezone); + onTimezoneUpdate(packet.server_timezone); return true; default: @@ -1046,6 +1046,11 @@ void ClientBase::onProgress(const Progress & value) progress_indication.writeProgress(*tty_buf); } +void ClientBase::onTimezoneUpdate(const String & tz) +{ + DateLUT::setDefaultTimezone(tz); +} + void ClientBase::onEndOfStream() { @@ -1189,12 +1194,12 @@ bool ClientBase::receiveSampleBlock(Block & out, ColumnsDescription & columns_de return receiveSampleBlock(out, columns_description, parsed_query); case Protocol::Server::TimezoneUpdate: - DateLUT::setDefaultTimezone(packet.server_timezone); + onTimezoneUpdate(packet.server_timezone); break; default: throw NetException(ErrorCodes::UNEXPECTED_PACKET_FROM_SERVER, - "Unexpected packet from server (expected Data, Exception or Log, got {})", + "Unexpected packet from server (expected Data, Exception, Log or TimezoneUpdate, got {})", String(Protocol::Server::toString(packet.type))); } } @@ -1500,7 +1505,9 @@ void ClientBase::receiveLogsAndProfileEvents(ASTPtr parsed_query) { auto packet_type = connection->checkPacket(0); - while (packet_type && (*packet_type == Protocol::Server::Log || *packet_type == Protocol::Server::ProfileEvents)) + while (packet_type && (*packet_type == Protocol::Server::Log || + *packet_type == Protocol::Server::ProfileEvents || + *packet_type == Protocol::Server::TimezoneUpdate)) { receiveAndProcessPacket(parsed_query, false); packet_type = connection->checkPacket(0); @@ -1538,7 +1545,7 @@ bool ClientBase::receiveEndOfQuery() break; case Protocol::Server::TimezoneUpdate: - DateLUT::setDefaultTimezone(packet.server_timezone); + onTimezoneUpdate(packet.server_timezone); break; default: diff --git a/src/Client/ClientBase.h b/src/Client/ClientBase.h index 52e15a1a075..18d9a30cac0 100644 --- a/src/Client/ClientBase.h +++ b/src/Client/ClientBase.h @@ -139,6 +139,7 @@ private: void cancelQuery(); void onProgress(const Progress & value); + void onTimezoneUpdate(const String & tz); void onData(Block & block, ASTPtr parsed_query); void onLogData(Block & block); void onTotals(Block & block, ASTPtr parsed_query); diff --git a/src/Core/Settings.h b/src/Core/Settings.h index e508818a26a..ced59219a5b 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -712,7 +712,7 @@ class IColumn; M(Float, insert_keeper_fault_injection_probability, 0.0f, "Approximate probability of failure for a keeper request during insert. Valid value is in interval [0.0f, 1.0f]", 0) \ M(UInt64, insert_keeper_fault_injection_seed, 0, "0 - random seed, otherwise the setting value", 0) \ M(Bool, force_aggregation_in_order, false, "Force use of aggregation in order on remote nodes during distributed aggregation. PLEASE, NEVER CHANGE THIS SETTING VALUE MANUALLY!", IMPORTANT) \ - M(String, timezone, "", "Use specified timezone for interpreting Date and DateTime instead of server's timezone.", 0) \ + M(Timezone, timezone, "", "Use specified timezone for interpreting Date and DateTime instead of server's timezone.", 0) \ // End of COMMON_SETTINGS // Please add settings related to formats into the FORMAT_FACTORY_SETTINGS and move obsolete settings to OBSOLETE_SETTINGS. diff --git a/src/Core/SettingsFields.cpp b/src/Core/SettingsFields.cpp index 4164bf1e27e..44369c7c8a0 100644 --- a/src/Core/SettingsFields.cpp +++ b/src/Core/SettingsFields.cpp @@ -445,6 +445,17 @@ String SettingFieldEnumHelpers::readBinary(ReadBuffer & in) return str; } +void SettingFieldTimezone::writeBinary(WriteBuffer & out) const +{ + writeStringBinary(value, out); +} + +void SettingFieldTimezone::readBinary(ReadBuffer & in) +{ + String str; + readStringBinary(str, in); + *this = std::move(str); +} String SettingFieldCustom::toString() const { diff --git a/src/Core/SettingsFields.h b/src/Core/SettingsFields.h index c6fe46c9f6b..b580122d3db 100644 --- a/src/Core/SettingsFields.h +++ b/src/Core/SettingsFields.h @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -539,6 +540,43 @@ void SettingFieldMultiEnum::readBinary(ReadBuffer & in) return std::initializer_list> __VA_ARGS__ .size();\ } +/* Setting field for specifying user-defined timezone. It is basically a string, but it needs validation. + */ +struct SettingFieldTimezone +{ + String value; + bool changed = false; + + explicit SettingFieldTimezone(std::string_view str = {}) { validateTimezone(std::string(str)); value = str; } + explicit SettingFieldTimezone(const String & str) { validateTimezone(str); value = str; } + explicit SettingFieldTimezone(String && str) { validateTimezone(std::string(str)); value = std::move(str); } + explicit SettingFieldTimezone(const char * str) { validateTimezone(str); value = str; } + explicit SettingFieldTimezone(const Field & f) { const String & str = f.safeGet(); validateTimezone(str); value = str; } + + SettingFieldTimezone & operator =(std::string_view str) { validateTimezone(std::string(str)); value = str; changed = true; return *this; } + SettingFieldTimezone & operator =(const String & str) { *this = std::string_view{str}; return *this; } + SettingFieldTimezone & operator =(String && str) { validateTimezone(str); value = std::move(str); changed = true; return *this; } + SettingFieldTimezone & operator =(const char * str) { *this = std::string_view{str}; return *this; } + SettingFieldTimezone & operator =(const Field & f) { *this = f.safeGet(); return *this; } + + operator const String &() const { return value; } /// NOLINT + explicit operator Field() const { return value; } + + const String & toString() const { return value; } + void parseFromString(const String & str) { *this = str; } + + void writeBinary(WriteBuffer & out) const; + void readBinary(ReadBuffer & in); + +private: + cctz::time_zone validated_tz; + void validateTimezone(const std::string & str) + { + if (str != "" && !cctz::load_time_zone(str, &validated_tz)) + throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Invalid time zone: {}", str); + } +}; + /// Can keep a value of any type. Used for user-defined settings. struct SettingFieldCustom { diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 9bb11f34916..f43982c5133 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -442,6 +442,9 @@ void TCPHandler::runImpl() if (isQueryCancelled()) return true; + if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_TIMEZONE_UPDATES + && client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE) + sendTimezone(); sendProgress(); sendSelectProfileEvents(); sendLogs(); @@ -483,10 +486,10 @@ void TCPHandler::runImpl() { std::lock_guard lock(task_callback_mutex); - sendLogs(); if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_TIMEZONE_UPDATES && client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE) sendTimezone(); + sendLogs(); sendEndOfStream(); } @@ -1041,9 +1044,15 @@ void TCPHandler::sendInsertProfileEvents() void TCPHandler::sendTimezone() { - writeVarUInt(Protocol::Server::TimezoneUpdate, *out); - writeStringBinary(DateLUT::instance().getTimeZone(), *out); - out->next(); +// const String & tz = CurrentThread::get().getQueryContext()->getSettingsRef().timezone.toString(); + const String & tz = query_context->getSettingsRef().timezone.toString(); + if (!tz.empty()) + { + LOG_DEBUG(log, "Sent timezone: {}", tz); + writeVarUInt(Protocol::Server::TimezoneUpdate, *out); + writeStringBinary(tz, *out); + out->next(); + } } From e36addb96a7eaaba8f9a90383d3e77020a1a61e8 Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Tue, 11 Apr 2023 13:03:03 +0200 Subject: [PATCH 024/722] Hackish way of setting up timezone on the client Warning: lots of debug logging --- programs/client/Client.cpp | 12 ++++- src/Client/ClientBase.cpp | 47 +++++++++++++++++++ src/Functions/timezoneOf.cpp | 13 +++++ src/Interpreters/Context.cpp | 9 ++++ src/Server/TCPHandler.cpp | 21 ++++++--- .../0_stateless/02668_timezone_setting.sql | 8 ++-- 6 files changed, 99 insertions(+), 11 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 660b8d7c00a..2aa75e60294 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -4,8 +4,10 @@ #include #include #include +#include #include #include +#include #include #include #include @@ -307,7 +309,7 @@ int Client::main(const std::vector & /*args*/) try { UseSSL use_ssl; - MainThreadStatus::getInstance(); + auto & thread_status = MainThreadStatus::getInstance(); setupSignalHandler(); std::cout << std::fixed << std::setprecision(3); @@ -320,6 +322,14 @@ try processConfig(); initTtyBuffer(toProgressOption(config().getString("progress", "default"))); + { + // All that just to set DB::CurrentThread::get().getGlobalContext() + // which is required for client timezone (pushed as from server) to work. + auto thread_group = std::make_shared(); + thread_group->global_context = global_context; + thread_status.attachQuery(thread_group, false); + } + /// Includes delayed_interactive. if (is_interactive) { diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index ca32b9b97d7..e3e0364523a 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -65,6 +65,7 @@ #include #include +#include #include #include #include @@ -73,11 +74,44 @@ #include "config_version.h" #include "config.h" +#include + namespace fs = std::filesystem; using namespace std::literals; +namespace +{ +using namespace DB; +using ContetGetterFunc = std::function const; +const void* getContextPtrOrNull(ContetGetterFunc contextFunc) +{ + try + { + return contextFunc().get(); + } + catch(...) + { + } + return nullptr; +} + +void LogContextes(const std::string_view scope, const ContextPtr global_context) +{ + const auto * context = global_context.get(); + std::cerr << scope << " contextes" + << "\n\tglobal: " << reinterpret_cast(context) + << "\n\tsession: " << getContextPtrOrNull([&]() { return context ? context->getSessionContext() : nullptr; }) + << "\n\tquery: " << getContextPtrOrNull([&]() { return context ? context->getQueryContext() : nullptr; }) + << "\n\tcurrent T query: " << getContextPtrOrNull([&]() { return DB::CurrentThread::get().getQueryContext(); }) + << "\n\tcurrent T global: " << getContextPtrOrNull([&]() { return DB::CurrentThread::get().getGlobalContext(); }) +// << "\n\tbuffer: " << getContextPtrOrNull(context, &Context::getBufferContext) + << std::endl; +} + +} + namespace CurrentMetrics { extern const Metric MemoryTracking; @@ -438,7 +472,12 @@ void ClientBase::onData(Block & block, ASTPtr parsed_query) /// output_format, do not output it. /// Also do not output too much data if we're fuzzing. if (block.rows() == 0 || (query_fuzzer_runs != 0 && processed_rows >= 100)) + { + LogContextes("ClientBase::onData header", global_context); return; + } + + LogContextes("ClientBase::onData DATA block", global_context); /// If results are written INTO OUTFILE, we can avoid clearing progress to avoid flicker. if (need_render_progress && tty_buf && (!select_into_file || select_into_file_and_stdout)) @@ -1048,7 +1087,15 @@ void ClientBase::onProgress(const Progress & value) void ClientBase::onTimezoneUpdate(const String & tz) { + std::cerr << "ClientBase::onTimezoneUpdate received new TZ from server: " << tz << std::endl; DateLUT::setDefaultTimezone(tz); + + Settings settings; + settings.timezone = tz; + global_context->applySettingsChanges(settings.changes()); +// DB::CurrentThread::get().getQueryContext()->applySettingsChanges(settings.changes()); + + LogContextes("ClientBase::onTimezoneUpdate", global_context); } diff --git a/src/Functions/timezoneOf.cpp b/src/Functions/timezoneOf.cpp index 6454b1cd735..ce419b7b4cd 100644 --- a/src/Functions/timezoneOf.cpp +++ b/src/Functions/timezoneOf.cpp @@ -5,7 +5,11 @@ #include #include #include +#include "Poco/Logger.h" +#include +#include +#include namespace DB { @@ -52,6 +56,15 @@ public: { DataTypePtr type_no_nullable = removeNullable(arguments[0].type); + { + const auto query_context = DB::CurrentThread::get().getQueryContext(); + + LOG_DEBUG(&Poco::Logger::get("Function timezoneOf"), "query context: {}, timezone: {} ({})", + reinterpret_cast(query_context.get()), + query_context->getSettingsRef().timezone.toString(), + (query_context->getSettingsRef().timezone.changed ? "changed" : "UNCHANGED")); + } + return DataTypeString().createColumnConst(input_rows_count, dynamic_cast(*type_no_nullable).getTimeZone().getTimeZone()); } diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index cf1d5203bf7..e27889702c5 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -118,6 +118,8 @@ #include #endif +#include + namespace fs = std::filesystem; namespace ProfileEvents @@ -1543,6 +1545,13 @@ void Context::applySettingChange(const SettingChange & change) void Context::applySettingsChanges(const SettingsChanges & changes) { auto lock = getLock(); + LOG_DEBUG(shared->log, "Context::applySettingsChanges {} applying settings changes: {}", reinterpret_cast(this), + fmt::join(std::ranges::transform_view(changes, + [](const SettingChange & change) + { + return change.name + ": " + change.value.dump(); + }), ", ")); + for (const SettingChange & change : changes) applySettingChange(change); applySettingsQuirks(settings); diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index ef4bf81a5c1..4d5402d65d5 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -345,6 +345,7 @@ void TCPHandler::runImpl() /// Send block to the client - input storage structure. state.input_header = metadata_snapshot->getSampleBlock(); sendData(state.input_header); + sendTimezone(); }); query_context->setInputBlocksReaderCallback([this] (ContextPtr context) -> Block @@ -452,9 +453,7 @@ void TCPHandler::runImpl() if (isQueryCancelled()) return true; - if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_TIMEZONE_UPDATES - && client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE) - sendTimezone(); +// sendTimezone(); sendProgress(); sendSelectProfileEvents(); sendLogs(); @@ -496,9 +495,7 @@ void TCPHandler::runImpl() { std::lock_guard lock(task_callback_mutex); - if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_TIMEZONE_UPDATES - && client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE) - sendTimezone(); +// sendTimezone(); sendLogs(); sendEndOfStream(); } @@ -764,7 +761,7 @@ void TCPHandler::processInsertQuery() /// Send block to the client - table structure. sendData(executor.getHeader()); - + sendTimezone(); sendLogs(); while (readDataNext()) @@ -809,6 +806,7 @@ void TCPHandler::processOrdinaryQueryWithProcessors() { std::lock_guard lock(task_callback_mutex); sendData(header); + sendTimezone(); } } @@ -1061,7 +1059,16 @@ void TCPHandler::sendInsertProfileEvents() void TCPHandler::sendTimezone() { +// if (client_tcp_protocol_version <= DBMS_MIN_PROTOCOL_VERSION_WITH_TIMEZONE_UPDATES +// || client_tcp_protocol_version <= DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE) +// return; + // const String & tz = CurrentThread::get().getQueryContext()->getSettingsRef().timezone.toString(); + LOG_DEBUG(log, "TCPHandler::sendTimezone() query context: {}, timezone: {} ({})", + reinterpret_cast(query_context.get()), + query_context->getSettingsRef().timezone.toString(), + (query_context->getSettingsRef().timezone.changed ? "changed" : "UNCHANGED")); + const String & tz = query_context->getSettingsRef().timezone.toString(); if (!tz.empty()) { diff --git a/tests/queries/0_stateless/02668_timezone_setting.sql b/tests/queries/0_stateless/02668_timezone_setting.sql index f331ab58307..d85efaa8a39 100644 --- a/tests/queries/0_stateless/02668_timezone_setting.sql +++ b/tests/queries/0_stateless/02668_timezone_setting.sql @@ -1,9 +1,11 @@ +SET timezone = 'Абырвалг'; -- { serverError BAD_ARGUMENTS} + SET timezone = 'Asia/Novosibirsk'; SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich'); SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS timezone = 'Europe/Zurich'; -SET timezone = 'Europe/Zurich'; +SET timezone = 'Asia/Manila'; SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Asia/Novosibirsk'); -SET timezone = 'Абырвалг'; -select now(); -- { serverError POCO_EXCEPTION } \ No newline at end of file +SELECT timezone(), serverTimeZone(), timezoneOf(now()) SETTINGS timezone = 'Europe/Zurich'; +SELECT timezone(), serverTimeZone(), timezoneOf(now()) SETTINGS timezone = 'Pacific/Pitcairn'; From 5d18343fb8ac1b0cae8085a660b8c995b9e33ea2 Mon Sep 17 00:00:00 2001 From: zvonand Date: Wed, 12 Apr 2023 00:15:07 +0200 Subject: [PATCH 025/722] fixed delay --- src/Client/ClientBase.cpp | 36 +++++++++++++++++------------------- src/Server/TCPHandler.cpp | 12 ++++++------ 2 files changed, 23 insertions(+), 25 deletions(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index e3e0364523a..8da4ac200d9 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -65,7 +65,7 @@ #include #include -#include +//#include #include #include #include @@ -74,7 +74,7 @@ #include "config_version.h" #include "config.h" -#include +//#include namespace fs = std::filesystem; @@ -97,18 +97,18 @@ const void* getContextPtrOrNull(ContetGetterFunc contextFunc) return nullptr; } -void LogContextes(const std::string_view scope, const ContextPtr global_context) -{ - const auto * context = global_context.get(); - std::cerr << scope << " contextes" - << "\n\tglobal: " << reinterpret_cast(context) - << "\n\tsession: " << getContextPtrOrNull([&]() { return context ? context->getSessionContext() : nullptr; }) - << "\n\tquery: " << getContextPtrOrNull([&]() { return context ? context->getQueryContext() : nullptr; }) - << "\n\tcurrent T query: " << getContextPtrOrNull([&]() { return DB::CurrentThread::get().getQueryContext(); }) - << "\n\tcurrent T global: " << getContextPtrOrNull([&]() { return DB::CurrentThread::get().getGlobalContext(); }) -// << "\n\tbuffer: " << getContextPtrOrNull(context, &Context::getBufferContext) - << std::endl; -} +//void LogContextes(const std::string_view scope, const ContextPtr global_context) +//{ +// const auto * context = global_context.get(); +// std::cerr << scope << " contextes" +// << "\n\tglobal: " << reinterpret_cast(context) +// << "\n\tsession: " << getContextPtrOrNull([&]() { return context ? context->getSessionContext() : nullptr; }) +// << "\n\tquery: " << getContextPtrOrNull([&]() { return context ? context->getQueryContext() : nullptr; }) +// << "\n\tcurrent T query: " << getContextPtrOrNull([&]() { return DB::CurrentThread::get().getQueryContext(); }) +// << "\n\tcurrent T global: " << getContextPtrOrNull([&]() { return DB::CurrentThread::get().getGlobalContext(); }) +//// << "\n\tbuffer: " << getContextPtrOrNull(context, &Context::getBufferContext) +// << std::endl; +//} } @@ -473,11 +473,11 @@ void ClientBase::onData(Block & block, ASTPtr parsed_query) /// Also do not output too much data if we're fuzzing. if (block.rows() == 0 || (query_fuzzer_runs != 0 && processed_rows >= 100)) { - LogContextes("ClientBase::onData header", global_context); +// LogContextes("ClientBase::onData header", global_context); return; } - LogContextes("ClientBase::onData DATA block", global_context); +// LogContextes("ClientBase::onData DATA block", global_context); /// If results are written INTO OUTFILE, we can avoid clearing progress to avoid flicker. if (need_render_progress && tty_buf && (!select_into_file || select_into_file_and_stdout)) @@ -1088,14 +1088,12 @@ void ClientBase::onProgress(const Progress & value) void ClientBase::onTimezoneUpdate(const String & tz) { std::cerr << "ClientBase::onTimezoneUpdate received new TZ from server: " << tz << std::endl; - DateLUT::setDefaultTimezone(tz); Settings settings; settings.timezone = tz; global_context->applySettingsChanges(settings.changes()); -// DB::CurrentThread::get().getQueryContext()->applySettingsChanges(settings.changes()); - LogContextes("ClientBase::onTimezoneUpdate", global_context); +// LogContextes("ClientBase::onTimezoneUpdate", global_context); } diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 4d5402d65d5..6ff7acf025a 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -453,7 +453,7 @@ void TCPHandler::runImpl() if (isQueryCancelled()) return true; -// sendTimezone(); + sendTimezone(); sendProgress(); sendSelectProfileEvents(); sendLogs(); @@ -495,7 +495,7 @@ void TCPHandler::runImpl() { std::lock_guard lock(task_callback_mutex); -// sendTimezone(); + sendTimezone(); sendLogs(); sendEndOfStream(); } @@ -1069,14 +1069,14 @@ void TCPHandler::sendTimezone() query_context->getSettingsRef().timezone.toString(), (query_context->getSettingsRef().timezone.changed ? "changed" : "UNCHANGED")); - const String & tz = query_context->getSettingsRef().timezone.toString(); - if (!tz.empty()) - { + const String & tz = CurrentThread::get().getQueryContext()->getSettingsRef().timezone.toString(); +// if (!tz.empty()) +// { LOG_DEBUG(log, "Sent timezone: {}", tz); writeVarUInt(Protocol::Server::TimezoneUpdate, *out); writeStringBinary(tz, *out); out->next(); - } +// } } From a9499eed794731a3fed2305e4d5f0e3607815816 Mon Sep 17 00:00:00 2001 From: zvonand Date: Wed, 12 Apr 2023 12:47:05 +0200 Subject: [PATCH 026/722] moved getting server TZ DateLUT to separate place, upd tests and fix --- programs/copier/ClusterCopierApp.cpp | 2 +- programs/keeper/Keeper.cpp | 4 +-- programs/obfuscator/Obfuscator.cpp | 2 +- programs/server/Server.cpp | 4 +-- src/Client/ClientBase.cpp | 26 +++++++++---------- src/Common/DateLUT.h | 7 +++++ src/Daemon/BaseDaemon.cpp | 2 +- src/Functions/serverConstants.cpp | 2 +- src/Loggers/OwnPatternFormatter.h | 2 +- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 2 +- .../MergeTree/MergeFromLogEntryTask.cpp | 2 +- .../MergeTree/MergeTreeDataWriter.cpp | 6 ++--- .../MergeTree/MergeTreeMutationEntry.cpp | 2 +- src/Storages/MergeTree/MergeTreePartInfo.cpp | 4 +-- src/Storages/MergeTree/MergeTreePartition.cpp | 4 +-- .../MergeTree/ReplicatedMergeTreeLogEntry.cpp | 2 +- .../ReplicatedMergeTreeMutationEntry.cpp | 2 +- src/Storages/StorageReplicatedMergeTree.cpp | 2 +- src/Storages/WindowView/StorageWindowView.cpp | 2 +- .../02668_timezone_setting.reference | 3 --- .../0_stateless/02668_timezone_setting.sql | 11 -------- .../02681_timezone_setting.reference | 5 ++++ .../0_stateless/02681_timezone_setting.sql | 11 ++++++++ 23 files changed, 59 insertions(+), 50 deletions(-) delete mode 100644 tests/queries/0_stateless/02668_timezone_setting.reference delete mode 100644 tests/queries/0_stateless/02668_timezone_setting.sql create mode 100644 tests/queries/0_stateless/02681_timezone_setting.reference create mode 100644 tests/queries/0_stateless/02681_timezone_setting.sql diff --git a/programs/copier/ClusterCopierApp.cpp b/programs/copier/ClusterCopierApp.cpp index b2994b90e23..92657f81c2a 100644 --- a/programs/copier/ClusterCopierApp.cpp +++ b/programs/copier/ClusterCopierApp.cpp @@ -43,7 +43,7 @@ void ClusterCopierApp::initialize(Poco::Util::Application & self) time_t timestamp = Poco::Timestamp().epochTime(); auto curr_pid = Poco::Process::id(); - process_id = std::to_string(DateLUT::instance().toNumYYYYMMDDhhmmss(timestamp)) + "_" + std::to_string(curr_pid); + process_id = std::to_string(DateLUT::serverTimezoneInstance().toNumYYYYMMDDhhmmss(timestamp)) + "_" + std::to_string(curr_pid); host_id = escapeForFileName(getFQDNOrHostName()) + '#' + process_id; process_path = fs::weakly_canonical(fs::path(base_dir) / ("clickhouse-copier_" + process_id)); fs::create_directories(process_path); diff --git a/programs/keeper/Keeper.cpp b/programs/keeper/Keeper.cpp index ed3297ed7cb..58a87057363 100644 --- a/programs/keeper/Keeper.cpp +++ b/programs/keeper/Keeper.cpp @@ -399,8 +399,8 @@ try /// Initialize DateLUT early, to not interfere with running time of first query. LOG_DEBUG(log, "Initializing DateLUT."); - DateLUT::instance(); - LOG_TRACE(log, "Initialized DateLUT with time zone '{}'.", DateLUT::instance().getTimeZone()); + DateLUT::serverTimezoneInstance(); + LOG_TRACE(log, "Initialized DateLUT with time zone '{}'.", DateLUT::serverTimezoneInstance().getTimeZone()); /// Don't want to use DNS cache DNSResolver::instance().setDisableCacheFlag(); diff --git a/programs/obfuscator/Obfuscator.cpp b/programs/obfuscator/Obfuscator.cpp index 274ad29a174..9b7f2c424d3 100644 --- a/programs/obfuscator/Obfuscator.cpp +++ b/programs/obfuscator/Obfuscator.cpp @@ -492,7 +492,7 @@ private: const DateLUTImpl & date_lut; public: - explicit DateTimeModel(UInt64 seed_) : seed(seed_), date_lut(DateLUT::instance()) {} + explicit DateTimeModel(UInt64 seed_) : seed(seed_), date_lut(DateLUT::serverTimezoneInstance()) {} void train(const IColumn &) override {} void finalize() override {} diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 711dfb3820a..23113686aa1 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -1041,8 +1041,8 @@ try /// Initialize DateLUT early, to not interfere with running time of first query. LOG_DEBUG(log, "Initializing DateLUT."); - DateLUT::instance(); - LOG_TRACE(log, "Initialized DateLUT with time zone '{}'.", DateLUT::instance().getTimeZone()); + DateLUT::serverTimezoneInstance(); + LOG_TRACE(log, "Initialized DateLUT with time zone '{}'.", DateLUT::serverTimezoneInstance().getTimeZone()); /// Storage with temporary data for processing of heavy queries. if (!server_settings.tmp_policy.value.empty()) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 8da4ac200d9..7a91a382787 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -84,18 +84,18 @@ using namespace std::literals; namespace { using namespace DB; -using ContetGetterFunc = std::function const; -const void* getContextPtrOrNull(ContetGetterFunc contextFunc) -{ - try - { - return contextFunc().get(); - } - catch(...) - { - } - return nullptr; -} +//using ContetGetterFunc = std::function const; +//const void* getContextPtrOrNull(ContetGetterFunc contextFunc) +//{ +// try +// { +// return contextFunc().get(); +// } +// catch(...) +// { +// } +// return nullptr; +//} //void LogContextes(const std::string_view scope, const ContextPtr global_context) //{ @@ -1087,7 +1087,7 @@ void ClientBase::onProgress(const Progress & value) void ClientBase::onTimezoneUpdate(const String & tz) { - std::cerr << "ClientBase::onTimezoneUpdate received new TZ from server: " << tz << std::endl; +// std::cerr << "ClientBase::onTimezoneUpdate received new TZ from server: " << tz << std::endl; Settings settings; settings.timezone = tz; diff --git a/src/Common/DateLUT.h b/src/Common/DateLUT.h index f17fe772dbc..810810edb6c 100644 --- a/src/Common/DateLUT.h +++ b/src/Common/DateLUT.h @@ -17,6 +17,13 @@ class DateLUT : private boost::noncopyable { public: + /// Return singleton DateLUTImpl instance for server's (native) time zone. + static ALWAYS_INLINE const DateLUTImpl & serverTimezoneInstance() + { + const auto & date_lut = getInstance(); + return *date_lut.default_impl.load(std::memory_order_acquire); + } + /// Return singleton DateLUTImpl instance for timezone set by `timezone` setting for current session is used. /// If it is not set, server's timezone (the one which server has) is being used. static ALWAYS_INLINE const DateLUTImpl & instance() diff --git a/src/Daemon/BaseDaemon.cpp b/src/Daemon/BaseDaemon.cpp index 18c4c0d97a0..c6b5be3ea87 100644 --- a/src/Daemon/BaseDaemon.cpp +++ b/src/Daemon/BaseDaemon.cpp @@ -1003,7 +1003,7 @@ void BaseDaemon::shouldSetupWatchdog(char * argv0_) void BaseDaemon::setupWatchdog() { /// Initialize in advance to avoid double initialization in forked processes. - DateLUT::instance(); + DateLUT::serverTimezoneInstance(); std::string original_process_name; if (argv0) diff --git a/src/Functions/serverConstants.cpp b/src/Functions/serverConstants.cpp index 57a6279bd7a..5d54815818d 100644 --- a/src/Functions/serverConstants.cpp +++ b/src/Functions/serverConstants.cpp @@ -75,7 +75,7 @@ namespace public: static constexpr auto name = "serverTimezone"; static FunctionPtr create(ContextPtr context) { return std::make_shared(context); } - explicit FunctionServerTimezone(ContextPtr context) : FunctionConstantBase(String{DateLUT::instance("").getTimeZone()}, context->isDistributed()) {} + explicit FunctionServerTimezone(ContextPtr context) : FunctionConstantBase(String{DateLUT::serverTimezoneInstance().getTimeZone()}, context->isDistributed()) {} }; diff --git a/src/Loggers/OwnPatternFormatter.h b/src/Loggers/OwnPatternFormatter.h index 07d0409b0ae..8b0d11bcec1 100644 --- a/src/Loggers/OwnPatternFormatter.h +++ b/src/Loggers/OwnPatternFormatter.h @@ -31,6 +31,6 @@ public: virtual void formatExtended(const DB::ExtendedLogMessage & msg_ext, std::string & text) const; private: - const DateLUTImpl & server_timezone = DateLUT::instance(""); + const DateLUTImpl & server_timezone = DateLUT::serverTimezoneInstance(); bool color; }; diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 85420cabb8d..f50a7169d39 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -1030,7 +1030,7 @@ void IMergeTreeDataPart::loadPartitionAndMinMaxIndex() DayNum max_date; MergeTreePartInfo::parseMinMaxDatesFromPartName(name, min_date, max_date); - const auto & date_lut = DateLUT::instance(); + const auto & date_lut = DateLUT::serverTimezoneInstance(); partition = MergeTreePartition(date_lut.toNumYYYYMM(min_date)); minmax_idx = std::make_shared(min_date, max_date); } diff --git a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp index e017c9681e8..28e30b5f64f 100644 --- a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp @@ -57,7 +57,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare() { LOG_INFO(log, "Will try to fetch part {} until '{}' because this part assigned to recompression merge. " "Source replica {} will try to merge this part first", entry.new_part_name, - DateLUT::instance().timeToString(entry.create_time + storage_settings_ptr->try_fetch_recompressed_part_timeout.totalSeconds()), entry.source_replica); + DateLUT::serverTimezoneInstance().timeToString(entry.create_time + storage_settings_ptr->try_fetch_recompressed_part_timeout.totalSeconds()), entry.source_replica); /// Waiting other replica to recompress part. No need to check it. return PrepareResult{ .prepared_successfully = false, diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index 37cfe4d065e..48a1cb97c89 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -115,7 +115,7 @@ void updateTTL( if (const ColumnUInt16 * column_date = typeid_cast(ttl_column.get())) { - const auto & date_lut = DateLUT::instance(); + const auto & date_lut = DateLUT::serverTimezoneInstance(); for (const auto & val : column_date->getData()) ttl_info.update(date_lut.fromDayNum(DayNum(val))); } @@ -128,7 +128,7 @@ void updateTTL( { if (typeid_cast(&column_const->getDataColumn())) { - const auto & date_lut = DateLUT::instance(); + const auto & date_lut = DateLUT::serverTimezoneInstance(); ttl_info.update(date_lut.fromDayNum(DayNum(column_const->getValue()))); } else if (typeid_cast(&column_const->getDataColumn())) @@ -369,7 +369,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPartImpl( DayNum min_date(minmax_idx->hyperrectangle[data.minmax_idx_date_column_pos].left.get()); DayNum max_date(minmax_idx->hyperrectangle[data.minmax_idx_date_column_pos].right.get()); - const auto & date_lut = DateLUT::instance(); + const auto & date_lut = DateLUT::serverTimezoneInstance(); auto min_month = date_lut.toNumYYYYMM(min_date); auto max_month = date_lut.toNumYYYYMM(max_date); diff --git a/src/Storages/MergeTree/MergeTreeMutationEntry.cpp b/src/Storages/MergeTree/MergeTreeMutationEntry.cpp index 2e30a3f3986..2c0359b0f3f 100644 --- a/src/Storages/MergeTree/MergeTreeMutationEntry.cpp +++ b/src/Storages/MergeTree/MergeTreeMutationEntry.cpp @@ -127,7 +127,7 @@ MergeTreeMutationEntry::MergeTreeMutationEntry(DiskPtr disk_, const String & pat LocalDateTime create_time_dt; *buf >> "create time: " >> create_time_dt >> "\n"; - create_time = DateLUT::instance().makeDateTime( + create_time = DateLUT::serverTimezoneInstance().makeDateTime( create_time_dt.year(), create_time_dt.month(), create_time_dt.day(), create_time_dt.hour(), create_time_dt.minute(), create_time_dt.second()); diff --git a/src/Storages/MergeTree/MergeTreePartInfo.cpp b/src/Storages/MergeTree/MergeTreePartInfo.cpp index 84432a293d7..e1b52d8a7b7 100644 --- a/src/Storages/MergeTree/MergeTreePartInfo.cpp +++ b/src/Storages/MergeTree/MergeTreePartInfo.cpp @@ -148,7 +148,7 @@ void MergeTreePartInfo::parseMinMaxDatesFromPartName(const String & part_name, D throw Exception(ErrorCodes::BAD_DATA_PART_NAME, "Unexpected part name: {}", part_name); } - const auto & date_lut = DateLUT::instance(); + const auto & date_lut = DateLUT::serverTimezoneInstance(); min_date = date_lut.YYYYMMDDToDayNum(min_yyyymmdd); max_date = date_lut.YYYYMMDDToDayNum(max_yyyymmdd); @@ -219,7 +219,7 @@ String MergeTreePartInfo::getPartNameV1() const String MergeTreePartInfo::getPartNameV0(DayNum left_date, DayNum right_date) const { - const auto & date_lut = DateLUT::instance(); + const auto & date_lut = DateLUT::serverTimezoneInstance(); /// Directory name for the part has form: `YYYYMMDD_YYYYMMDD_N_N_L`. diff --git a/src/Storages/MergeTree/MergeTreePartition.cpp b/src/Storages/MergeTree/MergeTreePartition.cpp index 3b28012e7d6..b0fc34ac2f7 100644 --- a/src/Storages/MergeTree/MergeTreePartition.cpp +++ b/src/Storages/MergeTree/MergeTreePartition.cpp @@ -239,7 +239,7 @@ String MergeTreePartition::getID(const Block & partition_key_sample) const result += '-'; if (typeid_cast(partition_key_sample.getByPosition(i).type.get())) - result += toString(DateLUT::instance().toNumYYYYMMDD(DayNum(value[i].safeGet()))); + result += toString(DateLUT::serverTimezoneInstance().toNumYYYYMMDD(DayNum(value[i].safeGet()))); else if (typeid_cast(partition_key_sample.getByPosition(i).type.get())) result += toString(value[i].get().toUnderType()); else @@ -320,7 +320,7 @@ std::optional MergeTreePartition::tryParseValueFromID(const String & partit throw Exception( ErrorCodes::INVALID_PARTITION_VALUE, "Cannot parse partition_id: got unexpected Date: {}", date_yyyymmdd); - UInt32 date = DateLUT::instance().YYYYMMDDToDayNum(date_yyyymmdd); + UInt32 date = DateLUT::serverTimezoneInstance().YYYYMMDDToDayNum(date_yyyymmdd); res.emplace_back(date); break; } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp index 79b0beb0933..ac956433eab 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp @@ -199,7 +199,7 @@ void ReplicatedMergeTreeLogEntryData::readText(ReadBuffer & in, MergeTreeDataFor { LocalDateTime create_time_dt; in >> "create_time: " >> create_time_dt >> "\n"; - create_time = DateLUT::instance().makeDateTime( + create_time = DateLUT::serverTimezoneInstance().makeDateTime( create_time_dt.year(), create_time_dt.month(), create_time_dt.day(), create_time_dt.hour(), create_time_dt.minute(), create_time_dt.second()); } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.cpp index 1efb3f6826b..17f3637e722 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.cpp @@ -38,7 +38,7 @@ void ReplicatedMergeTreeMutationEntry::readText(ReadBuffer & in) LocalDateTime create_time_dt; in >> "create time: " >> create_time_dt >> "\n"; - create_time = DateLUT::instance().makeDateTime( + create_time = DateLUT::serverTimezoneInstance().makeDateTime( create_time_dt.year(), create_time_dt.month(), create_time_dt.day(), create_time_dt.hour(), create_time_dt.minute(), create_time_dt.second()); diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index fe4a144deaa..356663496a6 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -5318,7 +5318,7 @@ String getPartNamePossiblyFake(MergeTreeDataFormatVersion format_version, const if (format_version < MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING) { /// The date range is all month long. - const auto & lut = DateLUT::instance(); + const auto & lut = DateLUT::serverTimezoneInstance(); time_t start_time = lut.YYYYMMDDToDate(parse(part_info.partition_id + "01")); DayNum left_date = DayNum(lut.toDayNum(start_time).toUnderType()); DayNum right_date = DayNum(static_cast(left_date) + lut.daysInMonth(start_time) - 1); diff --git a/src/Storages/WindowView/StorageWindowView.cpp b/src/Storages/WindowView/StorageWindowView.cpp index 3471e4ea6bf..8546fdd3c9f 100644 --- a/src/Storages/WindowView/StorageWindowView.cpp +++ b/src/Storages/WindowView/StorageWindowView.cpp @@ -1335,7 +1335,7 @@ ASTPtr StorageWindowView::innerQueryParser(const ASTSelectQuery & query) time_zone = &DateLUT::instance(window_view_timezone); } else - time_zone = &DateLUT::instance(); + time_zone = &DateLUT::serverTimezoneInstance(); return result; } diff --git a/tests/queries/0_stateless/02668_timezone_setting.reference b/tests/queries/0_stateless/02668_timezone_setting.reference deleted file mode 100644 index 8ed8024f652..00000000000 --- a/tests/queries/0_stateless/02668_timezone_setting.reference +++ /dev/null @@ -1,3 +0,0 @@ -1999-12-12 18:23:23.123 -1999-12-12 23:23:23.123 -1999-12-13 04:23:23.123 diff --git a/tests/queries/0_stateless/02668_timezone_setting.sql b/tests/queries/0_stateless/02668_timezone_setting.sql deleted file mode 100644 index d85efaa8a39..00000000000 --- a/tests/queries/0_stateless/02668_timezone_setting.sql +++ /dev/null @@ -1,11 +0,0 @@ -SET timezone = 'Абырвалг'; -- { serverError BAD_ARGUMENTS} - -SET timezone = 'Asia/Novosibirsk'; -SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich'); -SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS timezone = 'Europe/Zurich'; - -SET timezone = 'Asia/Manila'; -SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Asia/Novosibirsk'); - -SELECT timezone(), serverTimeZone(), timezoneOf(now()) SETTINGS timezone = 'Europe/Zurich'; -SELECT timezone(), serverTimeZone(), timezoneOf(now()) SETTINGS timezone = 'Pacific/Pitcairn'; diff --git a/tests/queries/0_stateless/02681_timezone_setting.reference b/tests/queries/0_stateless/02681_timezone_setting.reference new file mode 100644 index 00000000000..8850d77ab03 --- /dev/null +++ b/tests/queries/0_stateless/02681_timezone_setting.reference @@ -0,0 +1,5 @@ +2022-12-12 17:23:23.123 +2022-12-12 23:23:23.123 +2022-12-12 22:23:23.123 +Europe/Zurich Europe/Zurich +Pacific/Pitcairn Pacific/Pitcairn diff --git a/tests/queries/0_stateless/02681_timezone_setting.sql b/tests/queries/0_stateless/02681_timezone_setting.sql new file mode 100644 index 00000000000..73afb4c029b --- /dev/null +++ b/tests/queries/0_stateless/02681_timezone_setting.sql @@ -0,0 +1,11 @@ +SET timezone = 'Абырвалг'; -- { serverError BAD_ARGUMENTS} + +SET timezone = 'Asia/Novosibirsk'; +SELECT toDateTime64(toDateTime64('2022-12-12 23:23:23.123', 3), 3, 'Europe/Zurich'); +SELECT toDateTime64(toDateTime64('2022-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS timezone = 'Europe/Zurich'; + +SET timezone = 'Asia/Manila'; +SELECT toDateTime64(toDateTime64('2022-12-12 23:23:23.123', 3), 3, 'Asia/Novosibirsk'); + +SELECT timezone(), timezoneOf(now()) SETTINGS timezone = 'Europe/Zurich' FORMAT TSV; +SELECT timezone(), timezoneOf(now()) SETTINGS timezone = 'Pacific/Pitcairn' FORMAT TSV; From daef5d818a5cdc6e358efaa49e56d90ee530e6bf Mon Sep 17 00:00:00 2001 From: zvonand Date: Wed, 12 Apr 2023 15:31:58 +0200 Subject: [PATCH 027/722] fix according to updates in ThreadStatus.h --- programs/client/Client.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index aa563198c82..528c504e555 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -325,9 +325,8 @@ try { // All that just to set DB::CurrentThread::get().getGlobalContext() // which is required for client timezone (pushed as from server) to work. - auto thread_group = std::make_shared(); - thread_group->global_context = global_context; - thread_status.attachQuery(thread_group, false); + auto thread_group = std::make_shared(); + thread_status.attachToGroup(thread_group, false); } /// Includes delayed_interactive. From 3f8956f854253a5b17c6fa4163372f7e0f6cf664 Mon Sep 17 00:00:00 2001 From: zvonand Date: Wed, 12 Apr 2023 17:45:11 +0200 Subject: [PATCH 028/722] remove additional logging --- src/Client/ClientBase.cpp | 30 ------------------------------ src/Functions/timezoneOf.cpp | 14 -------------- src/Interpreters/Context.cpp | 8 -------- src/Server/TCPHandler.cpp | 24 +++++++----------------- 4 files changed, 7 insertions(+), 69 deletions(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index d722d39e8f6..f4253ab90f6 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -66,7 +66,6 @@ #include #include -//#include #include #include #include @@ -75,9 +74,6 @@ #include "config_version.h" #include "config.h" -//#include - - namespace fs = std::filesystem; using namespace std::literals; @@ -85,32 +81,6 @@ using namespace std::literals; namespace { using namespace DB; -//using ContetGetterFunc = std::function const; -//const void* getContextPtrOrNull(ContetGetterFunc contextFunc) -//{ -// try -// { -// return contextFunc().get(); -// } -// catch(...) -// { -// } -// return nullptr; -//} - -//void LogContextes(const std::string_view scope, const ContextPtr global_context) -//{ -// const auto * context = global_context.get(); -// std::cerr << scope << " contextes" -// << "\n\tglobal: " << reinterpret_cast(context) -// << "\n\tsession: " << getContextPtrOrNull([&]() { return context ? context->getSessionContext() : nullptr; }) -// << "\n\tquery: " << getContextPtrOrNull([&]() { return context ? context->getQueryContext() : nullptr; }) -// << "\n\tcurrent T query: " << getContextPtrOrNull([&]() { return DB::CurrentThread::get().getQueryContext(); }) -// << "\n\tcurrent T global: " << getContextPtrOrNull([&]() { return DB::CurrentThread::get().getGlobalContext(); }) -//// << "\n\tbuffer: " << getContextPtrOrNull(context, &Context::getBufferContext) -// << std::endl; -//} - } namespace CurrentMetrics diff --git a/src/Functions/timezoneOf.cpp b/src/Functions/timezoneOf.cpp index ce419b7b4cd..7a5957a5dbc 100644 --- a/src/Functions/timezoneOf.cpp +++ b/src/Functions/timezoneOf.cpp @@ -5,11 +5,6 @@ #include #include #include -#include "Poco/Logger.h" - -#include -#include -#include namespace DB { @@ -56,15 +51,6 @@ public: { DataTypePtr type_no_nullable = removeNullable(arguments[0].type); - { - const auto query_context = DB::CurrentThread::get().getQueryContext(); - - LOG_DEBUG(&Poco::Logger::get("Function timezoneOf"), "query context: {}, timezone: {} ({})", - reinterpret_cast(query_context.get()), - query_context->getSettingsRef().timezone.toString(), - (query_context->getSettingsRef().timezone.changed ? "changed" : "UNCHANGED")); - } - return DataTypeString().createColumnConst(input_rows_count, dynamic_cast(*type_no_nullable).getTimeZone().getTimeZone()); } diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 9e0b1dfd032..e888902ae29 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -119,8 +119,6 @@ #include #endif -#include - namespace fs = std::filesystem; namespace ProfileEvents @@ -1683,12 +1681,6 @@ void Context::applySettingChange(const SettingChange & change) void Context::applySettingsChanges(const SettingsChanges & changes) { auto lock = getLock(); - LOG_DEBUG(shared->log, "Context::applySettingsChanges {} applying settings changes: {}", reinterpret_cast(this), - fmt::join(std::ranges::transform_view(changes, - [](const SettingChange & change) - { - return change.name + ": " + change.value.dump(); - }), ", ")); for (const SettingChange & change : changes) applySettingChange(change); diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index a48097a649f..9a1b64eaf89 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -767,7 +767,6 @@ void TCPHandler::processInsertQuery() /// Send block to the client - table structure. sendData(executor.getHeader()); - sendTimezone(); sendLogs(); while (readDataNext()) @@ -1070,24 +1069,15 @@ void TCPHandler::sendInsertProfileEvents() void TCPHandler::sendTimezone() { -// if (client_tcp_protocol_version <= DBMS_MIN_PROTOCOL_VERSION_WITH_TIMEZONE_UPDATES -// || client_tcp_protocol_version <= DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE) -// return; + if (client_tcp_protocol_version < DBMS_MIN_PROTOCOL_VERSION_WITH_TIMEZONE_UPDATES) + return; -// const String & tz = CurrentThread::get().getQueryContext()->getSettingsRef().timezone.toString(); - LOG_DEBUG(log, "TCPHandler::sendTimezone() query context: {}, timezone: {} ({})", - reinterpret_cast(query_context.get()), - query_context->getSettingsRef().timezone.toString(), - (query_context->getSettingsRef().timezone.changed ? "changed" : "UNCHANGED")); + const String & tz = query_context->getSettingsRef().timezone.toString(); - const String & tz = CurrentThread::get().getQueryContext()->getSettingsRef().timezone.toString(); -// if (!tz.empty()) -// { - LOG_DEBUG(log, "Sent timezone: {}", tz); - writeVarUInt(Protocol::Server::TimezoneUpdate, *out); - writeStringBinary(tz, *out); - out->next(); -// } + LOG_DEBUG(log, "TCPHandler::sendTimezone(): {}", tz); + writeVarUInt(Protocol::Server::TimezoneUpdate, *out); + writeStringBinary(tz, *out); + out->next(); } From 73675cd8d29ffd3c5e1b1a57a023ee1ac946ef8c Mon Sep 17 00:00:00 2001 From: zvonand Date: Wed, 12 Apr 2023 19:17:48 +0200 Subject: [PATCH 029/722] tryfix fasttest --- src/Server/TCPHandler.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 9a1b64eaf89..152c7aba56e 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -458,7 +458,6 @@ void TCPHandler::runImpl() if (getQueryCancellationStatus() == CancellationStatus::FULLY_CANCELLED) return true; - sendTimezone(); sendProgress(); sendSelectProfileEvents(); sendLogs(); @@ -811,7 +810,6 @@ void TCPHandler::processOrdinaryQueryWithProcessors() { std::lock_guard lock(task_callback_mutex); sendData(header); - sendTimezone(); } } From 981a73cd867c435c74adf06cece8ec279fb8fde8 Mon Sep 17 00:00:00 2001 From: zvonand Date: Wed, 12 Apr 2023 21:20:12 +0200 Subject: [PATCH 030/722] upd remotequeryexecutor on receive timezone --- src/QueryPipeline/RemoteQueryExecutor.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/QueryPipeline/RemoteQueryExecutor.cpp b/src/QueryPipeline/RemoteQueryExecutor.cpp index b7490a2ad9c..23c1412dc76 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.cpp +++ b/src/QueryPipeline/RemoteQueryExecutor.cpp @@ -449,6 +449,9 @@ RemoteQueryExecutor::ReadResult RemoteQueryExecutor::processPacket(Packet packet throw Exception(ErrorCodes::SYSTEM_ERROR, "Could not push into profile queue"); break; + case Protocol::Server::TimezoneUpdate: + break; + default: got_unknown_packet_from_replica = true; throw Exception( @@ -546,6 +549,9 @@ void RemoteQueryExecutor::finish(std::unique_ptr * read_context) if (!profile_queue->emplace(std::move(packet.block))) throw Exception(ErrorCodes::SYSTEM_ERROR, "Could not push into profile queue"); break; + + case Protocol::Server::TimezoneUpdate: + break; default: got_unknown_packet_from_replica = true; From 0c616ac0a287ce017c18561013b87e576ac8e74b Mon Sep 17 00:00:00 2001 From: zvonand Date: Wed, 12 Apr 2023 21:31:13 +0200 Subject: [PATCH 031/722] fix style --- src/QueryPipeline/RemoteQueryExecutor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/QueryPipeline/RemoteQueryExecutor.cpp b/src/QueryPipeline/RemoteQueryExecutor.cpp index 23c1412dc76..56b5357c522 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.cpp +++ b/src/QueryPipeline/RemoteQueryExecutor.cpp @@ -549,7 +549,7 @@ void RemoteQueryExecutor::finish(std::unique_ptr * read_context) if (!profile_queue->emplace(std::move(packet.block))) throw Exception(ErrorCodes::SYSTEM_ERROR, "Could not push into profile queue"); break; - + case Protocol::Server::TimezoneUpdate: break; From 1ec32d374d8872aedd2f13bfdbbb263d98feed17 Mon Sep 17 00:00:00 2001 From: zvonand Date: Wed, 12 Apr 2023 22:27:54 +0200 Subject: [PATCH 032/722] update Timezone packet processing --- src/Client/HedgedConnections.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Client/HedgedConnections.cpp b/src/Client/HedgedConnections.cpp index 13e4fe75b3d..d11954f3838 100644 --- a/src/Client/HedgedConnections.cpp +++ b/src/Client/HedgedConnections.cpp @@ -418,6 +418,7 @@ Packet HedgedConnections::receivePacketFromReplica(const ReplicaLocation & repli } replica_with_last_received_packet = replica_location; break; + case Protocol::Server::TimezoneUpdate: case Protocol::Server::PartUUIDs: case Protocol::Server::ProfileInfo: case Protocol::Server::Totals: From 16292eb5a18d1f410421217461a7b3e44b39dbec Mon Sep 17 00:00:00 2001 From: zvonand Date: Wed, 12 Apr 2023 22:54:51 +0200 Subject: [PATCH 033/722] update timezone packet handling in remote inserter --- src/QueryPipeline/RemoteInserter.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/QueryPipeline/RemoteInserter.cpp b/src/QueryPipeline/RemoteInserter.cpp index b8a878b56c3..134c169e35f 100644 --- a/src/QueryPipeline/RemoteInserter.cpp +++ b/src/QueryPipeline/RemoteInserter.cpp @@ -130,7 +130,7 @@ void RemoteInserter::onFinish() break; else if (Protocol::Server::Exception == packet.type) packet.exception->rethrow(); - else if (Protocol::Server::Log == packet.type) + else if (Protocol::Server::Log == packet.type || Protocol::Server::TimezoneUpdate == packet.type) { // Do nothing } From bac5fbc3d2cf1b9606d66543244036797221a4b3 Mon Sep 17 00:00:00 2001 From: zvonand Date: Thu, 13 Apr 2023 13:26:09 +0200 Subject: [PATCH 034/722] fix error on connection drop after 1st query --- src/Client/ClientBase.cpp | 4 ---- src/Client/Connection.cpp | 4 ++++ src/Server/TCPHandler.cpp | 4 +++- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index f4253ab90f6..2e82144e64d 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -1074,13 +1074,9 @@ void ClientBase::onProgress(const Progress & value) void ClientBase::onTimezoneUpdate(const String & tz) { -// std::cerr << "ClientBase::onTimezoneUpdate received new TZ from server: " << tz << std::endl; - Settings settings; settings.timezone = tz; global_context->applySettingsChanges(settings.changes()); - -// LogContextes("ClientBase::onTimezoneUpdate", global_context); } diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index a0025eafd64..08549265848 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -335,6 +335,10 @@ void Connection::receiveHello() nonce.emplace(read_nonce); } } + else if (packet_type == Protocol::Server::TimezoneUpdate) + { + // skip this packet at hello, will receive and process it later + } else if (packet_type == Protocol::Server::Exception) receiveException()->rethrow(); else diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 152c7aba56e..a875507d227 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -183,8 +183,11 @@ void TCPHandler::runImpl() /// User will be authenticated here. It will also set settings from user profile into connection_context. try { + LOG_DEBUG(log, "Before receiveHello"); receiveHello(); + LOG_DEBUG(log, "Before sendHello"); sendHello(); + LOG_DEBUG(log, "Before receiveAddendum"); if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_ADDENDUM) receiveAddendum(); @@ -499,7 +502,6 @@ void TCPHandler::runImpl() { std::lock_guard lock(task_callback_mutex); - sendTimezone(); sendLogs(); sendEndOfStream(); } From d5ea52e4b9b4005356f79e3eaadd4d6458fa116e Mon Sep 17 00:00:00 2001 From: zvonand Date: Fri, 14 Apr 2023 01:28:59 +0200 Subject: [PATCH 035/722] optimize --- src/Common/DateLUT.h | 11 +++++------ src/Core/SettingsFields.h | 6 +++--- src/DataTypes/TimezoneMixin.h | 2 +- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/src/Common/DateLUT.h b/src/Common/DateLUT.h index 810810edb6c..59b280240ea 100644 --- a/src/Common/DateLUT.h +++ b/src/Common/DateLUT.h @@ -28,11 +28,11 @@ public: /// If it is not set, server's timezone (the one which server has) is being used. static ALWAYS_INLINE const DateLUTImpl & instance() { - std::string effective_time_zone; const auto & date_lut = getInstance(); if (DB::CurrentThread::isInitialized()) { + std::string effective_time_zone; const auto query_context = DB::CurrentThread::get().getQueryContext(); if (query_context) @@ -43,6 +43,8 @@ public: return date_lut.getImplementation(effective_time_zone); } + /// Timezone is passed in query_context, but on CH-Client we have no query context, + /// and each time we modify client's global context const auto global_context = DB::CurrentThread::get().getGlobalContext(); if (global_context) { @@ -56,15 +58,12 @@ public: return *date_lut.default_impl.load(std::memory_order_acquire); } - /// Return singleton DateLUTImpl instance for a given time zone. If timezone is an empty string, - /// server's timezone is used. The `timezone` setting is not considered here. static ALWAYS_INLINE const DateLUTImpl & instance(const std::string & time_zone) { - const auto & date_lut = getInstance(); - if (time_zone.empty()) - return *date_lut.default_impl.load(std::memory_order_acquire); + return instance(); + const auto & date_lut = getInstance(); return date_lut.getImplementation(time_zone); } diff --git a/src/Core/SettingsFields.h b/src/Core/SettingsFields.h index 8e9ffe03008..0ee3ddd4862 100644 --- a/src/Core/SettingsFields.h +++ b/src/Core/SettingsFields.h @@ -576,10 +576,10 @@ struct SettingFieldTimezone private: cctz::time_zone validated_tz; - void validateTimezone(const std::string & str) + void validateTimezone(const std::string & tz_str) { - if (str != "" && !cctz::load_time_zone(str, &validated_tz)) - throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Invalid time zone: {}", str); + if (!tz_str.empty() && !cctz::load_time_zone(tz_str, &validated_tz)) + throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Invalid time zone: {}", tz_str); } }; diff --git a/src/DataTypes/TimezoneMixin.h b/src/DataTypes/TimezoneMixin.h index 5b7870c7b9a..03ecde5dd0a 100644 --- a/src/DataTypes/TimezoneMixin.h +++ b/src/DataTypes/TimezoneMixin.h @@ -15,7 +15,7 @@ public: explicit TimezoneMixin(const String & time_zone_name = "") : has_explicit_time_zone(!time_zone_name.empty()) - , time_zone(time_zone_name.empty() ? DateLUT::instance() : DateLUT::instance(time_zone_name)) + , time_zone(DateLUT::instance(time_zone_name)) , utc_time_zone(DateLUT::instance("UTC")) { } From 267bbcab007d02748af2b2b18c63de73c4fa327b Mon Sep 17 00:00:00 2001 From: Aleksei Golub Date: Fri, 14 Apr 2023 00:09:57 +0300 Subject: [PATCH 036/722] Added ability to implicitly use file table function in clickhouse-local --- programs/local/LocalServer.cpp | 3 +- src/Databases/DatabaseFactory.cpp | 21 +- src/Databases/DatabaseFileSystem.cpp | 132 +++++++++ src/Databases/DatabaseFileSystem.h | 51 ++++ src/Databases/DatabasesOverlay.cpp | 267 ++++++++++++++++++ src/Databases/DatabasesOverlay.h | 68 +++++ ...cal_implicit_file_table_function.reference | 9 + ...ouse_local_implicit_file_table_function.sh | 43 +++ 8 files changed, 591 insertions(+), 3 deletions(-) create mode 100644 src/Databases/DatabaseFileSystem.cpp create mode 100644 src/Databases/DatabaseFileSystem.h create mode 100644 src/Databases/DatabasesOverlay.cpp create mode 100644 src/Databases/DatabasesOverlay.h create mode 100644 tests/queries/0_stateless/02707_clickhouse_local_implicit_file_table_function.reference create mode 100755 tests/queries/0_stateless/02707_clickhouse_local_implicit_file_table_function.sh diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 5768e744f94..566d11791ca 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -647,7 +648,7 @@ void LocalServer::processConfig() * if such tables will not be dropped, clickhouse-server will not be able to load them due to security reasons. */ std::string default_database = config().getString("default_database", "_local"); - DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared(default_database, global_context)); + DatabaseCatalog::instance().attachDatabase(default_database, CreateClickHouseLocalDatabaseOverlay(default_database, global_context)); global_context->setCurrentDatabase(default_database); applyCmdOptions(global_context); diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index 89a799349bf..b023bb06ad1 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -132,13 +133,13 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String static const std::unordered_set database_engines{"Ordinary", "Atomic", "Memory", "Dictionary", "Lazy", "Replicated", "MySQL", "MaterializeMySQL", "MaterializedMySQL", - "PostgreSQL", "MaterializedPostgreSQL", "SQLite"}; + "PostgreSQL", "MaterializedPostgreSQL", "SQLite", "FileSystem"}; if (!database_engines.contains(engine_name)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Database engine name `{}` does not exist", engine_name); static const std::unordered_set engines_with_arguments{"MySQL", "MaterializeMySQL", "MaterializedMySQL", - "Lazy", "Replicated", "PostgreSQL", "MaterializedPostgreSQL", "SQLite"}; + "Lazy", "Replicated", "PostgreSQL", "MaterializedPostgreSQL", "SQLite", "FileSystem"}; static const std::unordered_set engines_with_table_overrides{"MaterializeMySQL", "MaterializedMySQL", "MaterializedPostgreSQL"}; bool engine_may_have_arguments = engines_with_arguments.contains(engine_name); @@ -432,6 +433,22 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String return std::make_shared(context, engine_define, create.attach, database_path); } #endif + else if (engine_name == "FileSystem") { + const ASTFunction * engine = engine_define->engine; + + // If init_path is empty, then the current path from Poco will be used + std::string init_path; + + if (engine->arguments && engine->arguments->children.size() > 0) { + if (engine->arguments->children.size() != 1) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "FileSystem database requires at most 1 argument: file_system_path"); + + const auto & arguments = engine->arguments->children; + init_path = safeGetLiteralValue(arguments[0], engine_name); + } + + return std::make_shared(database_name, init_path, context); + } throw Exception(ErrorCodes::UNKNOWN_DATABASE_ENGINE, "Unknown database engine: {}", engine_name); } diff --git a/src/Databases/DatabaseFileSystem.cpp b/src/Databases/DatabaseFileSystem.cpp new file mode 100644 index 00000000000..9e2273970c3 --- /dev/null +++ b/src/Databases/DatabaseFileSystem.cpp @@ -0,0 +1,132 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +DatabaseFileSystem::DatabaseFileSystem(const String & name_, const String & path_, ContextPtr context_) + : IDatabase(name_), WithContext(context_->getGlobalContext()), path(path_), log(&Poco::Logger::get("DatabaseFileSystem(" + name_ + ")")) +{ + if (path.empty()) + path = Poco::Path::current(); +} + +std::string DatabaseFileSystem::getTablePath(const std::string& table_name) const +{ + return Poco::Path(path, table_name).toString(); +} + +void DatabaseFileSystem::addTable(const std::string& table_name, StoragePtr table_storage) const +{ + std::lock_guard lock(mutex); + loaded_tables.emplace(table_name, table_storage); +} + +bool DatabaseFileSystem::isTableExist(const String & name, ContextPtr) const +{ + { + std::lock_guard lock(mutex); + if (loaded_tables.find(name) != loaded_tables.end()) + return true; + } + + Poco::File table_file(getTablePath(name)); + return table_file.exists() && table_file.isFile(); +} + +StoragePtr DatabaseFileSystem::tryGetTable(const String & name, ContextPtr context_) const +{ + // Check if the table exists in the loaded tables map + { + std::lock_guard lock(mutex); + auto it = loaded_tables.find(name); + if (it != loaded_tables.end()) + return it->second; + } + + auto table_path = getTablePath(name); + + // If the table doesn't exist in the tables map, check if the corresponding file exists + Poco::File table_file(table_path); + if (!table_file.exists()) + return nullptr; + + // If the file exists, create a new table using TableFunctionFile and return it. + auto args = makeASTFunction("file", std::make_shared(table_path)); + + auto table_function = TableFunctionFactory::instance().get(args, context_); + if (!table_function) + return nullptr; + + auto table_storage = table_function->execute(args, context_, name); + if (table_storage) + addTable(name, table_storage); + + return table_storage; +} + +ASTPtr DatabaseFileSystem::getCreateDatabaseQuery() const +{ + auto settings = getContext()->getSettingsRef(); + ParserCreateQuery parser; + + String query = "CREATE DATABASE " + backQuoteIfNeed(getDatabaseName()) + " ENGINE = FileSystem(" + backQuoteIfNeed(path) + ")"; + ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth); + + if (const auto database_comment = getDatabaseComment(); !database_comment.empty()) + { + auto & ast_create_query = ast->as(); + ast_create_query.set(ast_create_query.comment, std::make_shared(database_comment)); + } + + return ast; +} + +void DatabaseFileSystem::shutdown() +{ + Tables tables_snapshot; + { + std::lock_guard lock(mutex); + tables_snapshot = loaded_tables; + } + + for (const auto & kv : tables_snapshot) + { + auto table_id = kv.second->getStorageID(); + kv.second->flushAndShutdown(); + } + + std::lock_guard lock(mutex); + loaded_tables.clear(); +} + +/** + * Returns an empty vector because the database is read-only and no tables can be backed up. + */ +std::vector> DatabaseFileSystem::getTablesForBackup(const FilterByNameFunction&, const ContextPtr&) const { + return {}; +} + +/** + * + * Returns an empty iterator because the database does not have its own tables + * But only caches them for quick access. + */ +DatabaseTablesIteratorPtr DatabaseFileSystem::getTablesIterator(ContextPtr, const FilterByNameFunction&) const { + return std::make_unique(Tables{}, getDatabaseName()); +} + +} // DB diff --git a/src/Databases/DatabaseFileSystem.h b/src/Databases/DatabaseFileSystem.h new file mode 100644 index 00000000000..474a7e78335 --- /dev/null +++ b/src/Databases/DatabaseFileSystem.h @@ -0,0 +1,51 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace DB +{ + +class Context; + +/** + * DatabaseFileSystem allows to interact with files stored on the file system + * Uses TableFunctionFile to implicitly load file when a user requests the table, and provides read-only access to the data in the file + * Tables are cached inside the database for quick access + * + * Used in clickhouse-local to access local files + */ +class DatabaseFileSystem : public IDatabase, protected WithContext +{ +public: + DatabaseFileSystem(const String & name, const String & path, ContextPtr context); + + String getEngineName() const override { return "FileSystem"; } + + bool isTableExist(const String & name, ContextPtr context) const override; + + StoragePtr tryGetTable(const String & name, ContextPtr context) const override; + + bool empty() const override { return true; } + + ASTPtr getCreateDatabaseQuery() const override; + + void shutdown() override; + + std::vector> getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const override; + DatabaseTablesIteratorPtr getTablesIterator(ContextPtr, const FilterByNameFunction &) const override; + +protected: + std::string getTablePath(const std::string & table_name) const; + void addTable(const std::string & table_name, StoragePtr table_storage) const; + +private: + String path; + mutable Tables loaded_tables TSA_GUARDED_BY(mutex); + Poco::Logger * log; +}; + +} // DB diff --git a/src/Databases/DatabasesOverlay.cpp b/src/Databases/DatabasesOverlay.cpp new file mode 100644 index 00000000000..9c3d802df73 --- /dev/null +++ b/src/Databases/DatabasesOverlay.cpp @@ -0,0 +1,267 @@ +#include + +#include +#include +#include + +#include +#include + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; + extern const int LOGICAL_ERROR; + extern const int CANNOT_GET_CREATE_TABLE_QUERY; +} + +DatabasesOverlay::DatabasesOverlay(const String & name_, ContextPtr context_) + : IDatabase(name_), WithContext(context_->getGlobalContext()), log(&Poco::Logger::get("DatabaseOverlay(" + name_ + ")")) +{ +} + +DatabasesOverlay & DatabasesOverlay::registerNextDatabase(DatabasePtr database) +{ + databases.push_back(std::move(database)); + return *this; +} + +bool DatabasesOverlay::isTableExist(const String & table_name, ContextPtr context_) const +{ + for (const auto & db : databases) + { + if (db->isTableExist(table_name, context_)) + return true; + } + return false; +} + +StoragePtr DatabasesOverlay::tryGetTable(const String & table_name, ContextPtr context_) const +{ + StoragePtr result = nullptr; + for (const auto & db : databases) + { + result = db->tryGetTable(table_name, context_); + if (result) + break; + } + return result; +} + +void DatabasesOverlay::createTable(ContextPtr context_, const String & table_name, const StoragePtr & table, const ASTPtr & query) +{ + for (auto & db : databases) + { + try + { + db->createTable(context_, table_name, table, query); + return; + } + catch (...) + { + continue; + } + } + throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no databases for CREATE TABLE {} query in Database{}", table_name, getEngineName()); +} + +void DatabasesOverlay::dropTable(ContextPtr context_, const String & table_name, bool sync) +{ + for (auto & db : databases) + { + try + { + db->dropTable(context_, table_name, sync); + return; + } + catch (...) + { + continue; + } + } + throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no databases for DROP TABLE {} query in Database{}", table_name, getEngineName()); +} + +void DatabasesOverlay::attachTable( + ContextPtr context_, const String & table_name, const StoragePtr & table, const String & relative_table_path) +{ + for (auto & db : databases) + { + try + { + db->attachTable(context_, table_name, table, relative_table_path); + return; + } + catch (...) + { + continue; + } + } + throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no databases for ATTACH TABLE query in Database{}", getEngineName()); +} + +StoragePtr DatabasesOverlay::detachTable(ContextPtr context_, const String & table_name) +{ + StoragePtr result = nullptr; + for (auto & db : databases) + { + try + { + result = db->detachTable(context_, table_name); + if (result) + return result; + } + catch (...) + { + continue; + } + } + throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no databases for DETACH TABLE {} query in Database{}", table_name, getEngineName()); +} + +ASTPtr DatabasesOverlay::getCreateTableQueryImpl(const String & name, ContextPtr context_, bool throw_on_error) const +{ + ASTPtr result = nullptr; + for (const auto & db : databases) + { + result = db->tryGetCreateTableQuery(name, context_); + if (result) + break; + } + if (!result && throw_on_error) + throw Exception(ErrorCodes::CANNOT_GET_CREATE_TABLE_QUERY, "There is no metadata of table {} in Database{}", name, getEngineName()); + return result; +} + +/* + * DatabaseOverlay cannot be constructed by "CREATE DATABASE" query, as it is not a traditional ClickHouse database + * To use DatabaseOverlay, it must be constructed programmatically in code + */ +ASTPtr DatabasesOverlay::getCreateDatabaseQuery() const +{ + return std::make_shared(); +} + +String DatabasesOverlay::getTableDataPath(const String & table_name) const +{ + String result; + for (const auto & db : databases) + { + result = db->getTableDataPath(table_name); + if (!result.empty()) + break; + } + return result; +} + +String DatabasesOverlay::getTableDataPath(const ASTCreateQuery & query) const +{ + String result; + for (const auto & db : databases) + { + result = db->getTableDataPath(query); + if (!result.empty()) + break; + } + return result; +} + +UUID DatabasesOverlay::tryGetTableUUID(const String & table_name) const +{ + UUID result = UUIDHelpers::Nil; + for (const auto & db : databases) + { + result = db->tryGetTableUUID(table_name); + if (result != UUIDHelpers::Nil) + break; + } + return result; +} + +void DatabasesOverlay::drop(ContextPtr context_) +{ + for (auto & db : databases) + db->drop(context_); +} + +void DatabasesOverlay::alterTable(ContextPtr local_context, const StorageID & table_id, const StorageInMemoryMetadata & metadata) +{ + for (auto & db : databases) + { + try + { + db->alterTable(local_context, table_id, metadata); + return; + } + catch (...) + { + continue; + } + } + throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no databases for alterTable in Database{}", getEngineName()); +} + +std::vector> +DatabasesOverlay::getTablesForBackup(const FilterByNameFunction & filter, const ContextPtr & local_context) const +{ + std::vector> result; + for (const auto & db : databases) + { + auto dbBackup = db->getTablesForBackup(filter, local_context); + result.insert(result.end(), std::make_move_iterator(dbBackup.begin()), std::make_move_iterator(dbBackup.end())); + } + return result; +} + +void DatabasesOverlay::createTableRestoredFromBackup( + const ASTPtr & create_table_query, + ContextMutablePtr local_context, + std::shared_ptr /*restore_coordination*/, + UInt64 /*timeout_ms*/) +{ + /// Creates a tables by executing a "CREATE TABLE" query. + InterpreterCreateQuery interpreter{create_table_query, local_context}; + interpreter.setInternal(true); + interpreter.execute(); +} + +bool DatabasesOverlay::empty() const +{ + for (const auto & db : databases) + { + if (!db->empty()) + return false; + } + return true; +} + +void DatabasesOverlay::shutdown() +{ + for (auto & db : databases) + db->shutdown(); +} + +DatabaseTablesIteratorPtr DatabasesOverlay::getTablesIterator(ContextPtr context_, const FilterByNameFunction & filter_by_table_name) const +{ + Tables tables; + for (const auto & db : databases) + { + for (auto table_it = db->getTablesIterator(context_, filter_by_table_name); table_it->isValid(); table_it->next()) + tables.insert({table_it->name(), table_it->table()}); + } + return std::make_unique(std::move(tables), getDatabaseName()); +} + +DatabasePtr CreateClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context_) +{ + auto databaseCombiner = std::make_shared(name_, context_); + databaseCombiner->registerNextDatabase(std::make_shared(name_, "", context_)); + databaseCombiner->registerNextDatabase(std::make_shared(name_, context_)); + return databaseCombiner; +} + +} diff --git a/src/Databases/DatabasesOverlay.h b/src/Databases/DatabasesOverlay.h new file mode 100644 index 00000000000..77f0085161b --- /dev/null +++ b/src/Databases/DatabasesOverlay.h @@ -0,0 +1,68 @@ +#pragma once + +#include +#include + +namespace DB +{ + +/** + * Implements the IDatabase interface and combines multiple other databases + * Searches for tables in each database in order until found, and delegates operations to the appropriate database + * Useful for combining databases + * + * Used in clickhouse-local to combine DatabaseFileSystem and DatabaseMemory + */ +class DatabasesOverlay : public IDatabase, protected WithContext +{ +public: + DatabasesOverlay(const String & name_, ContextPtr context_); + + /// Not thread-safe. Use only as factory to initialize database + DatabasesOverlay & registerNextDatabase(DatabasePtr database); + + String getEngineName() const override { return "Overlay"; } + +public: + bool isTableExist(const String & table_name, ContextPtr context) const override; + + StoragePtr tryGetTable(const String & table_name, ContextPtr context) const override; + + void createTable(ContextPtr context, const String & table_name, const StoragePtr & table, const ASTPtr & query) override; + + void dropTable(ContextPtr context, const String & table_name, bool sync) override; + + void attachTable(ContextPtr context, const String & table_name, const StoragePtr & table, const String & relative_table_path) override; + + StoragePtr detachTable(ContextPtr context, const String & table_name) override; + + ASTPtr getCreateTableQueryImpl(const String & name, ContextPtr context, bool throw_on_error) const override; + ASTPtr getCreateDatabaseQuery() const override; + + String getTableDataPath(const String & table_name) const override; + String getTableDataPath(const ASTCreateQuery & query) const override; + + UUID tryGetTableUUID(const String & table_name) const override; + + void drop(ContextPtr context) override; + + void alterTable(ContextPtr local_context, const StorageID & table_id, const StorageInMemoryMetadata & metadata) override; + + std::vector> getTablesForBackup(const FilterByNameFunction & filter, const ContextPtr & local_context) const override; + + void createTableRestoredFromBackup(const ASTPtr & create_table_query, ContextMutablePtr local_context, std::shared_ptr restore_coordination, UInt64 timeout_ms) override; + + DatabaseTablesIteratorPtr getTablesIterator(ContextPtr context, const FilterByNameFunction & filter_by_table_name) const override; + + bool empty() const override; + + void shutdown() override; + +protected: + std::vector databases; + Poco::Logger * log; +}; + +DatabasePtr CreateClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context_); + +} diff --git a/tests/queries/0_stateless/02707_clickhouse_local_implicit_file_table_function.reference b/tests/queries/0_stateless/02707_clickhouse_local_implicit_file_table_function.reference new file mode 100644 index 00000000000..0fcd843e737 --- /dev/null +++ b/tests/queries/0_stateless/02707_clickhouse_local_implicit_file_table_function.reference @@ -0,0 +1,9 @@ +Test 1: check explicit and implicit call of the file table function +explicit: +4 +implicit: +4 +Test 2: check FileSystem database +4 +Test 3: check show database with FileSystem +test02707 diff --git a/tests/queries/0_stateless/02707_clickhouse_local_implicit_file_table_function.sh b/tests/queries/0_stateless/02707_clickhouse_local_implicit_file_table_function.sh new file mode 100755 index 00000000000..4d8d7b1395a --- /dev/null +++ b/tests/queries/0_stateless/02707_clickhouse_local_implicit_file_table_function.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +dir=02707_clickhouse_local_tmp +[[ -d $dir ]] && rm -r $dir +mkdir $dir +# Create temporary csv file for tests +echo '"id","str","int","text"' > $dir/tmp.csv +echo '1,"abc",123,"abacaba"' >> $dir/tmp.csv +echo '2,"def",456,"bacabaa"' >> $dir/tmp.csv +echo '3,"story",78912,"acabaab"' >> $dir/tmp.csv +echo '4,"history",21321321,"cabaaba"' >> $dir/tmp.csv + +################# +echo "Test 1: check explicit and implicit call of the file table function" + +echo "explicit:" +$CLICKHOUSE_LOCAL -q 'SELECT COUNT(*) FROM file("02707_clickhouse_local_tmp/tmp.csv")' +echo "implicit:" +$CLICKHOUSE_LOCAL -q 'SELECT COUNT(*) FROM "02707_clickhouse_local_tmp/tmp.csv"' + +################# +echo "Test 2: check FileSystem database" +$CLICKHOUSE_LOCAL --multiline --multiquery -q """ +DROP DATABASE IF EXISTS test; +CREATE DATABASE test ENGINE = FileSystem('02707_clickhouse_local_tmp'); +SELECT COUNT(*) FROM test.\`tmp.csv\`; +DROP DATABASE test; +""" + +################# +echo "Test 3: check show database with FileSystem" +$CLICKHOUSE_LOCAL --multiline --multiquery -q """ +DROP DATABASE IF EXISTS test02707; +CREATE DATABASE test02707 ENGINE = FileSystem('02707_clickhouse_local_tmp'); +SHOW DATABASES; +DROP DATABASE test02707; +""" | grep "test02707" + +rm -r $dir \ No newline at end of file From 22be85d9764d6ebe3511313c9dadcbdf070c53ad Mon Sep 17 00:00:00 2001 From: zvonand Date: Tue, 18 Apr 2023 02:42:30 +0200 Subject: [PATCH 037/722] renamed setting --- docs/en/operations/settings/settings.md | 8 +++---- .../functions/date-time-functions.md | 2 +- docs/ru/operations/settings/settings.md | 8 +++---- .../functions/date-time-functions.md | 2 +- src/Client/ClientBase.cpp | 5 +--- src/Common/DateLUT.cpp | 2 +- src/Core/Settings.h | 2 +- src/Core/SettingsFields.cpp | 15 ++++++++++++ src/Core/SettingsFields.h | 23 +++++++++++-------- src/Interpreters/Context.cpp | 1 - src/Server/TCPHandler.cpp | 6 +---- .../0_stateless/02681_timezone_setting.sql | 12 +++++----- 12 files changed, 48 insertions(+), 38 deletions(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 7caf3d4333f..dd81b07b9c0 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -4008,9 +4008,9 @@ Default value: `0`. Use this setting only for backward compatibility if your use cases depend on old syntax. ::: -## timezone {#timezone} +## session_timezone {#session_timezone} -If specified, sets a implicit timezone (instead of server-default). All DateTime/DateTime64 values (and/or functions results) that have no explicit timezone specified are treated as having this timezone instead of default. +If specified, sets an implicit timezone (instead of server-default). All DateTime/DateTime64 values (and/or functions results) that have no explicit timezone specified are treated as having this timezone instead of default. Examples: ```clickhouse @@ -4020,13 +4020,13 @@ Europe/Berlin Europe/Berlin ``` ```clickhouse -SELECT timeZone(), serverTimezone() SETTINGS timezone = 'Asia/Novosibirsk' FORMAT TSV +SELECT timeZone(), serverTimezone() SETTINGS session_timezone = 'Asia/Novosibirsk' FORMAT TSV Asia/Novosibirsk Europe/Berlin ``` ```clickhouse -SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS timezone = 'America/Denver' FORMAT TSV +SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS session_timezone = 'America/Denver' FORMAT TSV 1999-12-13 07:23:23.123 ``` diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index f96041996d4..c1b8d201745 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -26,7 +26,7 @@ SELECT ## timeZone -Returns the default timezone of the server for current session. This can be modified using `SET timezone = 'New/Value'` +Returns the default timezone of the server for current session. This can be modified using `SET session_timezone = 'New/Value'` If it is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard. Otherwise it produces a constant value. **Syntax** diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 1687e37dba2..fd4d1e11df7 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -4075,9 +4075,9 @@ SELECT sum(number) FROM numbers(10000000000) SETTINGS partial_result_on_first_ca Значение по умолчанию: `false` -## timezone {#timezone} +## session_timezone {#session_timezone} -Задаёт значение часового пояса (timezone) по умолчанию для текущей сессии вместо часового пояса сервера. То есть, все значения DateTime/DateTime64, для которых явно не задан параметр timezone, будут интерпретированы как относящиеся к указанной зоне. +Задаёт значение часового пояса (session_timezone) по умолчанию для текущей сессии вместо часового пояса сервера. То есть, все значения DateTime/DateTime64, для которых явно не задан параметр timezone, будут интерпретированы как относящиеся к указанной зоне. Примеры: ```clickhouse @@ -4087,13 +4087,13 @@ Europe/Berlin Europe/Berlin ``` ```clickhouse -SELECT timeZone(), serverTimezone() SETTINGS timezone = 'Asia/Novosibirsk' FORMAT TSV +SELECT timeZone(), serverTimezone() SETTINGS session_timezone = 'Asia/Novosibirsk' FORMAT TSV Asia/Novosibirsk Europe/Berlin ``` ```clickhouse -SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS timezone = 'America/Denver' FORMAT TSV +SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS session_timezone = 'America/Denver' FORMAT TSV 1999-12-13 07:23:23.123 ``` diff --git a/docs/ru/sql-reference/functions/date-time-functions.md b/docs/ru/sql-reference/functions/date-time-functions.md index 132b54c1040..3e378c08308 100644 --- a/docs/ru/sql-reference/functions/date-time-functions.md +++ b/docs/ru/sql-reference/functions/date-time-functions.md @@ -26,7 +26,7 @@ SELECT ## timeZone {#timezone} -Возвращает часовой пояс сервера, считающийся умолчанием для текущей сессии. +Возвращает часовой пояс сервера, считающийся умолчанием для текущей сессии. Можно изменить значение с помощью `SET session_timezone = 'New/Timezone''` Если функция вызывается в контексте распределенной таблицы, то она генерирует обычный столбец со значениями, актуальными для каждого шарда. Иначе возвращается константа. **Синтаксис** diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 2e82144e64d..5126777fa1e 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -451,12 +451,9 @@ void ClientBase::onData(Block & block, ASTPtr parsed_query) /// Also do not output too much data if we're fuzzing. if (block.rows() == 0 || (query_fuzzer_runs != 0 && processed_rows >= 100)) { -// LogContextes("ClientBase::onData header", global_context); return; } -// LogContextes("ClientBase::onData DATA block", global_context); - /// If results are written INTO OUTFILE, we can avoid clearing progress to avoid flicker. if (need_render_progress && tty_buf && (!select_into_file || select_into_file_and_stdout)) progress_indication.clearProgressOutput(*tty_buf); @@ -1075,7 +1072,7 @@ void ClientBase::onProgress(const Progress & value) void ClientBase::onTimezoneUpdate(const String & tz) { Settings settings; - settings.timezone = tz; + settings.session_timezone = tz; global_context->applySettingsChanges(settings.changes()); } diff --git a/src/Common/DateLUT.cpp b/src/Common/DateLUT.cpp index 16bd69a20f8..f7e7df016cb 100644 --- a/src/Common/DateLUT.cpp +++ b/src/Common/DateLUT.cpp @@ -167,5 +167,5 @@ DateLUT & DateLUT::getInstance() std::string DateLUT::extractTimezoneFromContext(const DB::ContextPtr query_context) { - return query_context->getSettingsRef().timezone.value; + return query_context->getSettingsRef().session_timezone.value; } diff --git a/src/Core/Settings.h b/src/Core/Settings.h index b3e9f3fb220..837958aa5b0 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -734,7 +734,7 @@ class IColumn; M(Bool, keeper_map_strict_mode, false, "Enforce additional checks during operations on KeeperMap. E.g. throw an exception on an insert for already existing key", 0) \ M(Bool, function_json_value_return_type_allow_nullable, false, "Allow function to return nullable type.", 0) \ M(Bool, function_json_value_return_type_allow_complex, false, "Allow function to return complex type, such as: struct, array, map.", 0) \ - M(Timezone, timezone, "", "Use specified timezone for interpreting Date and DateTime instead of server's timezone.", 0) \ + M(Timezone, session_timezone, "", "Use specified timezone for interpreting Date and DateTime instead of server's timezone in current session.", 0) \ // End of COMMON_SETTINGS // Please add settings related to formats into the FORMAT_FACTORY_SETTINGS and move obsolete settings to OBSOLETE_SETTINGS. diff --git a/src/Core/SettingsFields.cpp b/src/Core/SettingsFields.cpp index f4169aa0c64..65720056c8a 100644 --- a/src/Core/SettingsFields.cpp +++ b/src/Core/SettingsFields.cpp @@ -13,6 +13,7 @@ #include +extern const char * auto_time_zones[]; namespace DB { @@ -26,6 +27,14 @@ namespace ErrorCodes namespace { + bool checkIsExitingTimeZone(const std::string_view timezone) + { + for (auto * it = auto_time_zones; *it; ++it) + if (timezone == *it) + return true; + return false; + } + template T stringToNumber(const String & str) { @@ -463,6 +472,12 @@ void SettingFieldTimezone::readBinary(ReadBuffer & in) *this = std::move(str); } +void SettingFieldTimezone::validateTimezone(std::string_view str) +{ + if (str != "" && !checkIsExitingTimeZone(str)) + throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Invalid time zone: {}", str); +} + String SettingFieldCustom::toString() const { return value.dump(); diff --git a/src/Core/SettingsFields.h b/src/Core/SettingsFields.h index 0ee3ddd4862..e3b18a606a1 100644 --- a/src/Core/SettingsFields.h +++ b/src/Core/SettingsFields.h @@ -2,7 +2,6 @@ #include #include -#include #include #include #include @@ -553,13 +552,16 @@ struct SettingFieldTimezone String value; bool changed = false; - explicit SettingFieldTimezone(std::string_view str = {}) { validateTimezone(std::string(str)); value = str; } + explicit SettingFieldTimezone(std::string_view str = {}) { validateTimezone(str); value = str; } +// explicit SettingFieldTimezone(std::string_view str = {}) { validateTimezone(std::string(str)); value = str; } explicit SettingFieldTimezone(const String & str) { validateTimezone(str); value = str; } - explicit SettingFieldTimezone(String && str) { validateTimezone(std::string(str)); value = std::move(str); } +// explicit SettingFieldTimezone(String && str) { validateTimezone(std::string(str)); value = std::move(str); } + explicit SettingFieldTimezone(String && str) { validateTimezone(str); value = std::move(str); } explicit SettingFieldTimezone(const char * str) { validateTimezone(str); value = str; } explicit SettingFieldTimezone(const Field & f) { const String & str = f.safeGet(); validateTimezone(str); value = str; } - SettingFieldTimezone & operator =(std::string_view str) { validateTimezone(std::string(str)); value = str; changed = true; return *this; } +// SettingFieldTimezone & operator =(std::string_view str) { validateTimezone(std::string(str)); value = str; changed = true; return *this; } + SettingFieldTimezone & operator =(std::string_view str) { validateTimezone(str); value = str; changed = true; return *this; } SettingFieldTimezone & operator =(const String & str) { *this = std::string_view{str}; return *this; } SettingFieldTimezone & operator =(String && str) { validateTimezone(str); value = std::move(str); changed = true; return *this; } SettingFieldTimezone & operator =(const char * str) { *this = std::string_view{str}; return *this; } @@ -575,12 +577,13 @@ struct SettingFieldTimezone void readBinary(ReadBuffer & in); private: - cctz::time_zone validated_tz; - void validateTimezone(const std::string & tz_str) - { - if (!tz_str.empty() && !cctz::load_time_zone(tz_str, &validated_tz)) - throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Invalid time zone: {}", tz_str); - } + static void validateTimezone(std::string_view str); +// cctz::time_zone validated_tz; +// void validateTimezone(const std::string & tz_str) +// { +// if (!tz_str.empty() && !cctz::load_time_zone(tz_str, &validated_tz)) +// throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Invalid time zone: {}", tz_str); +// } }; /// Can keep a value of any type. Used for user-defined settings. diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 82ecd87faa0..400eb570131 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -1680,7 +1680,6 @@ void Context::applySettingChange(const SettingChange & change) void Context::applySettingsChanges(const SettingsChanges & changes) { auto lock = getLock(); - for (const SettingChange & change : changes) applySettingChange(change); applySettingsQuirks(settings); diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index a875507d227..e44609529ba 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -183,11 +183,8 @@ void TCPHandler::runImpl() /// User will be authenticated here. It will also set settings from user profile into connection_context. try { - LOG_DEBUG(log, "Before receiveHello"); receiveHello(); - LOG_DEBUG(log, "Before sendHello"); sendHello(); - LOG_DEBUG(log, "Before receiveAddendum"); if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_ADDENDUM) receiveAddendum(); @@ -465,7 +462,6 @@ void TCPHandler::runImpl() sendSelectProfileEvents(); sendLogs(); - return false; }; @@ -1072,7 +1068,7 @@ void TCPHandler::sendTimezone() if (client_tcp_protocol_version < DBMS_MIN_PROTOCOL_VERSION_WITH_TIMEZONE_UPDATES) return; - const String & tz = query_context->getSettingsRef().timezone.toString(); + const String & tz = query_context->getSettingsRef().session_timezone.toString(); LOG_DEBUG(log, "TCPHandler::sendTimezone(): {}", tz); writeVarUInt(Protocol::Server::TimezoneUpdate, *out); diff --git a/tests/queries/0_stateless/02681_timezone_setting.sql b/tests/queries/0_stateless/02681_timezone_setting.sql index 73afb4c029b..f66e8d2b646 100644 --- a/tests/queries/0_stateless/02681_timezone_setting.sql +++ b/tests/queries/0_stateless/02681_timezone_setting.sql @@ -1,11 +1,11 @@ -SET timezone = 'Абырвалг'; -- { serverError BAD_ARGUMENTS} +SET session_timezone = 'Абырвалг'; -- { serverError BAD_ARGUMENTS} -SET timezone = 'Asia/Novosibirsk'; +SET session_timezone = 'Asia/Novosibirsk'; SELECT toDateTime64(toDateTime64('2022-12-12 23:23:23.123', 3), 3, 'Europe/Zurich'); -SELECT toDateTime64(toDateTime64('2022-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS timezone = 'Europe/Zurich'; +SELECT toDateTime64(toDateTime64('2022-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS session_timezone = 'Europe/Zurich'; -SET timezone = 'Asia/Manila'; +SET session_timezone = 'Asia/Manila'; SELECT toDateTime64(toDateTime64('2022-12-12 23:23:23.123', 3), 3, 'Asia/Novosibirsk'); -SELECT timezone(), timezoneOf(now()) SETTINGS timezone = 'Europe/Zurich' FORMAT TSV; -SELECT timezone(), timezoneOf(now()) SETTINGS timezone = 'Pacific/Pitcairn' FORMAT TSV; +SELECT timezone(), timezoneOf(now()) SETTINGS session_timezone = 'Europe/Zurich' FORMAT TSV; +SELECT timezone(), timezoneOf(now()) SETTINGS session_timezone = 'Pacific/Pitcairn' FORMAT TSV; From 0550b0640ce0020d4e4f0015447631c4b742ab13 Mon Sep 17 00:00:00 2001 From: zvonand Date: Tue, 18 Apr 2023 03:35:29 +0200 Subject: [PATCH 038/722] fix linking issue --- src/Core/SettingsFields.cpp | 15 --------------- src/Core/SettingsFields.h | 23 ++++++++++------------- 2 files changed, 10 insertions(+), 28 deletions(-) diff --git a/src/Core/SettingsFields.cpp b/src/Core/SettingsFields.cpp index 65720056c8a..f4169aa0c64 100644 --- a/src/Core/SettingsFields.cpp +++ b/src/Core/SettingsFields.cpp @@ -13,7 +13,6 @@ #include -extern const char * auto_time_zones[]; namespace DB { @@ -27,14 +26,6 @@ namespace ErrorCodes namespace { - bool checkIsExitingTimeZone(const std::string_view timezone) - { - for (auto * it = auto_time_zones; *it; ++it) - if (timezone == *it) - return true; - return false; - } - template T stringToNumber(const String & str) { @@ -472,12 +463,6 @@ void SettingFieldTimezone::readBinary(ReadBuffer & in) *this = std::move(str); } -void SettingFieldTimezone::validateTimezone(std::string_view str) -{ - if (str != "" && !checkIsExitingTimeZone(str)) - throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Invalid time zone: {}", str); -} - String SettingFieldCustom::toString() const { return value.dump(); diff --git a/src/Core/SettingsFields.h b/src/Core/SettingsFields.h index e3b18a606a1..0ee3ddd4862 100644 --- a/src/Core/SettingsFields.h +++ b/src/Core/SettingsFields.h @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -552,16 +553,13 @@ struct SettingFieldTimezone String value; bool changed = false; - explicit SettingFieldTimezone(std::string_view str = {}) { validateTimezone(str); value = str; } -// explicit SettingFieldTimezone(std::string_view str = {}) { validateTimezone(std::string(str)); value = str; } + explicit SettingFieldTimezone(std::string_view str = {}) { validateTimezone(std::string(str)); value = str; } explicit SettingFieldTimezone(const String & str) { validateTimezone(str); value = str; } -// explicit SettingFieldTimezone(String && str) { validateTimezone(std::string(str)); value = std::move(str); } - explicit SettingFieldTimezone(String && str) { validateTimezone(str); value = std::move(str); } + explicit SettingFieldTimezone(String && str) { validateTimezone(std::string(str)); value = std::move(str); } explicit SettingFieldTimezone(const char * str) { validateTimezone(str); value = str; } explicit SettingFieldTimezone(const Field & f) { const String & str = f.safeGet(); validateTimezone(str); value = str; } -// SettingFieldTimezone & operator =(std::string_view str) { validateTimezone(std::string(str)); value = str; changed = true; return *this; } - SettingFieldTimezone & operator =(std::string_view str) { validateTimezone(str); value = str; changed = true; return *this; } + SettingFieldTimezone & operator =(std::string_view str) { validateTimezone(std::string(str)); value = str; changed = true; return *this; } SettingFieldTimezone & operator =(const String & str) { *this = std::string_view{str}; return *this; } SettingFieldTimezone & operator =(String && str) { validateTimezone(str); value = std::move(str); changed = true; return *this; } SettingFieldTimezone & operator =(const char * str) { *this = std::string_view{str}; return *this; } @@ -577,13 +575,12 @@ struct SettingFieldTimezone void readBinary(ReadBuffer & in); private: - static void validateTimezone(std::string_view str); -// cctz::time_zone validated_tz; -// void validateTimezone(const std::string & tz_str) -// { -// if (!tz_str.empty() && !cctz::load_time_zone(tz_str, &validated_tz)) -// throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Invalid time zone: {}", tz_str); -// } + cctz::time_zone validated_tz; + void validateTimezone(const std::string & tz_str) + { + if (!tz_str.empty() && !cctz::load_time_zone(tz_str, &validated_tz)) + throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Invalid time zone: {}", tz_str); + } }; /// Can keep a value of any type. Used for user-defined settings. From 96553bc3d8e70d06e03191f4b848ed07c91e5c6a Mon Sep 17 00:00:00 2001 From: Aleksei Golub Date: Sun, 16 Apr 2023 23:25:57 +0300 Subject: [PATCH 039/722] Fix style and tests --- src/Databases/DatabaseFactory.cpp | 6 ++- src/Databases/DatabaseFileSystem.cpp | 45 +++++++++++-------- src/Databases/DatabasesOverlay.cpp | 1 - ...ouse_local_implicit_file_table_function.sh | 14 +++--- 4 files changed, 38 insertions(+), 28 deletions(-) diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index b023bb06ad1..9c13881fc7b 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -433,13 +433,15 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String return std::make_shared(context, engine_define, create.attach, database_path); } #endif - else if (engine_name == "FileSystem") { + else if (engine_name == "FileSystem") + { const ASTFunction * engine = engine_define->engine; // If init_path is empty, then the current path from Poco will be used std::string init_path; - if (engine->arguments && engine->arguments->children.size() > 0) { + if (engine->arguments && !engine->arguments->children.empty()) + { if (engine->arguments->children.size() != 1) throw Exception(ErrorCodes::BAD_ARGUMENTS, "FileSystem database requires at most 1 argument: file_system_path"); diff --git a/src/Databases/DatabaseFileSystem.cpp b/src/Databases/DatabaseFileSystem.cpp index 9e2273970c3..8b92ad8080a 100644 --- a/src/Databases/DatabaseFileSystem.cpp +++ b/src/Databases/DatabaseFileSystem.cpp @@ -59,23 +59,30 @@ StoragePtr DatabaseFileSystem::tryGetTable(const String & name, ContextPtr conte auto table_path = getTablePath(name); - // If the table doesn't exist in the tables map, check if the corresponding file exists - Poco::File table_file(table_path); - if (!table_file.exists()) + try + { + // If the table doesn't exist in the tables map, check if the corresponding file exists + Poco::File table_file(table_path); + if (!table_file.exists()) + return nullptr; + + // If the file exists, create a new table using TableFunctionFile and return it. + auto args = makeASTFunction("file", std::make_shared(table_path)); + + auto table_function = TableFunctionFactory::instance().get(args, context_); + if (!table_function) + return nullptr; + + auto table_storage = table_function->execute(args, context_, name); + if (table_storage) + addTable(name, table_storage); + + return table_storage; + } + catch (...) + { return nullptr; - - // If the file exists, create a new table using TableFunctionFile and return it. - auto args = makeASTFunction("file", std::make_shared(table_path)); - - auto table_function = TableFunctionFactory::instance().get(args, context_); - if (!table_function) - return nullptr; - - auto table_storage = table_function->execute(args, context_, name); - if (table_storage) - addTable(name, table_storage); - - return table_storage; + } } ASTPtr DatabaseFileSystem::getCreateDatabaseQuery() const @@ -116,7 +123,8 @@ void DatabaseFileSystem::shutdown() /** * Returns an empty vector because the database is read-only and no tables can be backed up. */ -std::vector> DatabaseFileSystem::getTablesForBackup(const FilterByNameFunction&, const ContextPtr&) const { +std::vector> DatabaseFileSystem::getTablesForBackup(const FilterByNameFunction&, const ContextPtr&) const +{ return {}; } @@ -125,7 +133,8 @@ std::vector> DatabaseFileSystem::getTablesForBacku * Returns an empty iterator because the database does not have its own tables * But only caches them for quick access. */ -DatabaseTablesIteratorPtr DatabaseFileSystem::getTablesIterator(ContextPtr, const FilterByNameFunction&) const { +DatabaseTablesIteratorPtr DatabaseFileSystem::getTablesIterator(ContextPtr, const FilterByNameFunction&) const +{ return std::make_unique(Tables{}, getDatabaseName()); } diff --git a/src/Databases/DatabasesOverlay.cpp b/src/Databases/DatabasesOverlay.cpp index 9c3d802df73..da26f9282a0 100644 --- a/src/Databases/DatabasesOverlay.cpp +++ b/src/Databases/DatabasesOverlay.cpp @@ -14,7 +14,6 @@ namespace DB namespace ErrorCodes { - extern const int NOT_IMPLEMENTED; extern const int LOGICAL_ERROR; extern const int CANNOT_GET_CREATE_TABLE_QUERY; } diff --git a/tests/queries/0_stateless/02707_clickhouse_local_implicit_file_table_function.sh b/tests/queries/0_stateless/02707_clickhouse_local_implicit_file_table_function.sh index 4d8d7b1395a..eea1e47ba7f 100755 --- a/tests/queries/0_stateless/02707_clickhouse_local_implicit_file_table_function.sh +++ b/tests/queries/0_stateless/02707_clickhouse_local_implicit_file_table_function.sh @@ -4,8 +4,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -dir=02707_clickhouse_local_tmp -[[ -d $dir ]] && rm -r $dir +dir=${CLICKHOUSE_TEST_UNIQUE_NAME} +[[ -d $dir ]] && rm -rd $dir mkdir $dir # Create temporary csv file for tests echo '"id","str","int","text"' > $dir/tmp.csv @@ -18,15 +18,15 @@ echo '4,"history",21321321,"cabaaba"' >> $dir/tmp.csv echo "Test 1: check explicit and implicit call of the file table function" echo "explicit:" -$CLICKHOUSE_LOCAL -q 'SELECT COUNT(*) FROM file("02707_clickhouse_local_tmp/tmp.csv")' +$CLICKHOUSE_LOCAL -q "SELECT COUNT(*) FROM file('${dir}/tmp.csv')" echo "implicit:" -$CLICKHOUSE_LOCAL -q 'SELECT COUNT(*) FROM "02707_clickhouse_local_tmp/tmp.csv"' +$CLICKHOUSE_LOCAL -q "SELECT COUNT(*) FROM \"${dir}/tmp.csv\"" ################# echo "Test 2: check FileSystem database" $CLICKHOUSE_LOCAL --multiline --multiquery -q """ DROP DATABASE IF EXISTS test; -CREATE DATABASE test ENGINE = FileSystem('02707_clickhouse_local_tmp'); +CREATE DATABASE test ENGINE = FileSystem('${dir}'); SELECT COUNT(*) FROM test.\`tmp.csv\`; DROP DATABASE test; """ @@ -35,9 +35,9 @@ DROP DATABASE test; echo "Test 3: check show database with FileSystem" $CLICKHOUSE_LOCAL --multiline --multiquery -q """ DROP DATABASE IF EXISTS test02707; -CREATE DATABASE test02707 ENGINE = FileSystem('02707_clickhouse_local_tmp'); +CREATE DATABASE test02707 ENGINE = FileSystem('${dir}'); SHOW DATABASES; DROP DATABASE test02707; """ | grep "test02707" -rm -r $dir \ No newline at end of file +rm -rd $dir From 1e8c0a2db9671f0862975499f16b923a49c3a2ec Mon Sep 17 00:00:00 2001 From: zvonand Date: Wed, 19 Apr 2023 00:06:15 +0200 Subject: [PATCH 040/722] Lighter timezone validation Reused external variable from src/Storages/System/StorageSystemTimeZones.generated.cpp Required changes to CMakeLists of some standalone modules to link properly --- programs/library-bridge/CMakeLists.txt | 8 +++++++- programs/odbc-bridge/CMakeLists.txt | 6 +++++- src/Core/SettingsFields.cpp | 15 +++++++++++++++ src/Core/SettingsFields.h | 24 ++++++++++++++---------- utils/check-marks/CMakeLists.txt | 6 +++++- utils/keeper-data-dumper/CMakeLists.txt | 8 +++++++- utils/wal-dump/CMakeLists.txt | 6 +++++- 7 files changed, 58 insertions(+), 15 deletions(-) diff --git a/programs/library-bridge/CMakeLists.txt b/programs/library-bridge/CMakeLists.txt index 1cacc391ca5..97af7c3b22e 100644 --- a/programs/library-bridge/CMakeLists.txt +++ b/programs/library-bridge/CMakeLists.txt @@ -13,11 +13,17 @@ set (CLICKHOUSE_LIBRARY_BRIDGE_SOURCES library-bridge.cpp ) +set(CLICKHOUSE_LIBRARY_BRIDGE_EXTERNAL_SOURCES + ${CMAKE_CURRENT_BINARY_DIR}/../../src/Storages/System/StorageSystemTimeZones.generated.cpp +) + if (OS_LINUX) set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-export-dynamic") endif () -clickhouse_add_executable(clickhouse-library-bridge ${CLICKHOUSE_LIBRARY_BRIDGE_SOURCES}) +clickhouse_add_executable(clickhouse-library-bridge + ${CLICKHOUSE_LIBRARY_BRIDGE_SOURCES} + ${CLICKHOUSE_LIBRARY_BRIDGE_EXTERNAL_SOURCES}) target_link_libraries(clickhouse-library-bridge PRIVATE daemon diff --git a/programs/odbc-bridge/CMakeLists.txt b/programs/odbc-bridge/CMakeLists.txt index 118610e4dcd..bf1b42df026 100644 --- a/programs/odbc-bridge/CMakeLists.txt +++ b/programs/odbc-bridge/CMakeLists.txt @@ -15,13 +15,17 @@ set (CLICKHOUSE_ODBC_BRIDGE_SOURCES validateODBCConnectionString.cpp ) +set(ODBC_BRIDGE_EXTERNAL_SOURCES + ${CMAKE_CURRENT_BINARY_DIR}/../../src/Storages/System/StorageSystemTimeZones.generated.cpp +) + if (OS_LINUX) # clickhouse-odbc-bridge is always a separate binary. # Reason: it must not export symbols from SSL, mariadb-client, etc. to not break ABI compatibility with ODBC drivers. set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-export-dynamic") endif () -clickhouse_add_executable(clickhouse-odbc-bridge ${CLICKHOUSE_ODBC_BRIDGE_SOURCES}) +clickhouse_add_executable(clickhouse-odbc-bridge ${CLICKHOUSE_ODBC_BRIDGE_SOURCES} ${ODBC_BRIDGE_EXTERNAL_SOURCES}) target_link_libraries(clickhouse-odbc-bridge PRIVATE daemon diff --git a/src/Core/SettingsFields.cpp b/src/Core/SettingsFields.cpp index f4169aa0c64..65720056c8a 100644 --- a/src/Core/SettingsFields.cpp +++ b/src/Core/SettingsFields.cpp @@ -13,6 +13,7 @@ #include +extern const char * auto_time_zones[]; namespace DB { @@ -26,6 +27,14 @@ namespace ErrorCodes namespace { + bool checkIsExitingTimeZone(const std::string_view timezone) + { + for (auto * it = auto_time_zones; *it; ++it) + if (timezone == *it) + return true; + return false; + } + template T stringToNumber(const String & str) { @@ -463,6 +472,12 @@ void SettingFieldTimezone::readBinary(ReadBuffer & in) *this = std::move(str); } +void SettingFieldTimezone::validateTimezone(std::string_view str) +{ + if (str != "" && !checkIsExitingTimeZone(str)) + throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Invalid time zone: {}", str); +} + String SettingFieldCustom::toString() const { return value.dump(); diff --git a/src/Core/SettingsFields.h b/src/Core/SettingsFields.h index 0ee3ddd4862..e78fef9f455 100644 --- a/src/Core/SettingsFields.h +++ b/src/Core/SettingsFields.h @@ -2,7 +2,7 @@ #include #include -#include +//#include #include #include #include @@ -553,13 +553,16 @@ struct SettingFieldTimezone String value; bool changed = false; - explicit SettingFieldTimezone(std::string_view str = {}) { validateTimezone(std::string(str)); value = str; } +// explicit SettingFieldTimezone(std::string_view str = {}) { validateTimezone(std::string(str)); value = str; } + explicit SettingFieldTimezone(std::string_view str = {}) { validateTimezone(str); value = str; } explicit SettingFieldTimezone(const String & str) { validateTimezone(str); value = str; } - explicit SettingFieldTimezone(String && str) { validateTimezone(std::string(str)); value = std::move(str); } +// explicit SettingFieldTimezone(String && str) { validateTimezone(std::string(str)); value = std::move(str); } + explicit SettingFieldTimezone(String && str) { validateTimezone(str); value = std::move(str); } explicit SettingFieldTimezone(const char * str) { validateTimezone(str); value = str; } explicit SettingFieldTimezone(const Field & f) { const String & str = f.safeGet(); validateTimezone(str); value = str; } - SettingFieldTimezone & operator =(std::string_view str) { validateTimezone(std::string(str)); value = str; changed = true; return *this; } +// SettingFieldTimezone & operator =(std::string_view str) { validateTimezone(std::string(str)); value = str; changed = true; return *this; } + SettingFieldTimezone & operator =(std::string_view str) { validateTimezone(str); value = str; changed = true; return *this; } SettingFieldTimezone & operator =(const String & str) { *this = std::string_view{str}; return *this; } SettingFieldTimezone & operator =(String && str) { validateTimezone(str); value = std::move(str); changed = true; return *this; } SettingFieldTimezone & operator =(const char * str) { *this = std::string_view{str}; return *this; } @@ -575,12 +578,13 @@ struct SettingFieldTimezone void readBinary(ReadBuffer & in); private: - cctz::time_zone validated_tz; - void validateTimezone(const std::string & tz_str) - { - if (!tz_str.empty() && !cctz::load_time_zone(tz_str, &validated_tz)) - throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Invalid time zone: {}", tz_str); - } +// cctz::time_zone validated_tz; +// void validateTimezone(const std::string & str) +// { +// if (!str.empty() && !cctz::load_time_zone(str, &validated_tz)) +// throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Invalid time zone: {}", str); +// } + static void validateTimezone(std::string_view str); }; /// Can keep a value of any type. Used for user-defined settings. diff --git a/utils/check-marks/CMakeLists.txt b/utils/check-marks/CMakeLists.txt index 05546a2989b..456fb3d7112 100644 --- a/utils/check-marks/CMakeLists.txt +++ b/utils/check-marks/CMakeLists.txt @@ -1,2 +1,6 @@ -clickhouse_add_executable (check-marks main.cpp) +set(CHECK_MARKS_EXTERNAL_SOURCES + ${CMAKE_CURRENT_BINARY_DIR}/../../src/Storages/System/StorageSystemTimeZones.generated.cpp +) + +clickhouse_add_executable (check-marks ${CHECK_MARKS_EXTERNAL_SOURCES} main.cpp) target_link_libraries(check-marks PRIVATE dbms boost::program_options) diff --git a/utils/keeper-data-dumper/CMakeLists.txt b/utils/keeper-data-dumper/CMakeLists.txt index 1f55e50e68e..a6858a29e8b 100644 --- a/utils/keeper-data-dumper/CMakeLists.txt +++ b/utils/keeper-data-dumper/CMakeLists.txt @@ -1,2 +1,8 @@ -clickhouse_add_executable(keeper-data-dumper main.cpp) +set(KEEPER_DATA_DUMPER_EXTERNAL_SOURCES + ${CMAKE_CURRENT_BINARY_DIR}/../../src/Storages/System/StorageSystemTimeZones.generated.cpp +) + +clickhouse_add_executable(keeper-data-dumper + ${KEEPER_DATA_DUMPER_EXTERNAL_SOURCES} + main.cpp) target_link_libraries(keeper-data-dumper PRIVATE dbms) diff --git a/utils/wal-dump/CMakeLists.txt b/utils/wal-dump/CMakeLists.txt index 3d59e95b4ca..754799a6faf 100644 --- a/utils/wal-dump/CMakeLists.txt +++ b/utils/wal-dump/CMakeLists.txt @@ -1,2 +1,6 @@ -clickhouse_add_executable (wal-dump main.cpp) +set(WAL_DUMP_EXTERNAL_SOURCES + ${CMAKE_CURRENT_BINARY_DIR}/../../src/Storages/System/StorageSystemTimeZones.generated.cpp +) + +clickhouse_add_executable (wal-dump ${WAL_DUMP_EXTERNAL_SOURCES} main.cpp) target_link_libraries(wal-dump PRIVATE dbms boost::program_options) From 24be7203d931b57a35241fec1abe31a9099ba096 Mon Sep 17 00:00:00 2001 From: zvonand Date: Wed, 19 Apr 2023 00:39:08 +0200 Subject: [PATCH 041/722] add errorcode reference --- src/Core/SettingsFields.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Core/SettingsFields.cpp b/src/Core/SettingsFields.cpp index 65720056c8a..e952688a968 100644 --- a/src/Core/SettingsFields.cpp +++ b/src/Core/SettingsFields.cpp @@ -22,6 +22,7 @@ namespace ErrorCodes extern const int SIZE_OF_FIXED_STRING_DOESNT_MATCH; extern const int CANNOT_PARSE_BOOL; extern const int CANNOT_PARSE_NUMBER; + extern const int BAD_ARGUMENTS; } From 542c09cb518988cf54261edbab691c459efa9a88 Mon Sep 17 00:00:00 2001 From: zvonand Date: Wed, 19 Apr 2023 01:35:49 +0200 Subject: [PATCH 042/722] fix keeper standalone linking --- programs/keeper/CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/programs/keeper/CMakeLists.txt b/programs/keeper/CMakeLists.txt index 761335fb707..ff2de3f581c 100644 --- a/programs/keeper/CMakeLists.txt +++ b/programs/keeper/CMakeLists.txt @@ -107,6 +107,8 @@ if (BUILD_STANDALONE_KEEPER) ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Daemon/GraphiteWriter.cpp ${CMAKE_CURRENT_BINARY_DIR}/../../src/Daemon/GitHash.generated.cpp + ${CMAKE_CURRENT_BINARY_DIR}/../../src/Storages/System/StorageSystemTimeZones.generated.cpp + Keeper.cpp clickhouse-keeper.cpp ) From ce7dc8b123502aee8af1578d87133f8283c66a5b Mon Sep 17 00:00:00 2001 From: zvonand Date: Wed, 19 Apr 2023 13:42:07 +0200 Subject: [PATCH 043/722] tidy --- src/Core/SettingsFields.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/SettingsFields.cpp b/src/Core/SettingsFields.cpp index e952688a968..6af38586ed8 100644 --- a/src/Core/SettingsFields.cpp +++ b/src/Core/SettingsFields.cpp @@ -475,7 +475,7 @@ void SettingFieldTimezone::readBinary(ReadBuffer & in) void SettingFieldTimezone::validateTimezone(std::string_view str) { - if (str != "" && !checkIsExitingTimeZone(str)) + if (!str.empty() && !checkIsExitingTimeZone(str)) throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Invalid time zone: {}", str); } From daae5025e8586156a016687672100a8ec0db6016 Mon Sep 17 00:00:00 2001 From: zvonand Date: Wed, 19 Apr 2023 14:45:51 +0200 Subject: [PATCH 044/722] small updates due to review --- docs/en/operations/settings/settings.md | 1 + docs/en/sql-reference/functions/date-time-functions.md | 2 +- docs/ru/operations/settings/settings.md | 1 + programs/client/Client.cpp | 2 +- src/Client/ClientBase.cpp | 5 ----- 5 files changed, 4 insertions(+), 7 deletions(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index dd81b07b9c0..2010f763c84 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -4011,6 +4011,7 @@ Use this setting only for backward compatibility if your use cases depend on old ## session_timezone {#session_timezone} If specified, sets an implicit timezone (instead of server-default). All DateTime/DateTime64 values (and/or functions results) that have no explicit timezone specified are treated as having this timezone instead of default. +Setting this to `''` (empty string) effectively resets implicit timezone to server timezone. Examples: ```clickhouse diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index c1b8d201745..265ce676ef7 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -51,7 +51,7 @@ If it is executed in the context of a distributed table, then it generates a nor **Syntax** ``` sql -timeZone() +serverTimeZone() ``` Alias: `ServerTimezone`, `servertimezone`. diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index fd4d1e11df7..8180f5435b8 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -4078,6 +4078,7 @@ SELECT sum(number) FROM numbers(10000000000) SETTINGS partial_result_on_first_ca ## session_timezone {#session_timezone} Задаёт значение часового пояса (session_timezone) по умолчанию для текущей сессии вместо часового пояса сервера. То есть, все значения DateTime/DateTime64, для которых явно не задан параметр timezone, будут интерпретированы как относящиеся к указанной зоне. +При значении настройки `''` (пустая строка), будет совпадать с часовым поясом сервера. Примеры: ```clickhouse diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 528c504e555..b760efc21d1 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -324,7 +324,7 @@ try { // All that just to set DB::CurrentThread::get().getGlobalContext() - // which is required for client timezone (pushed as from server) to work. + // which is required for client timezone (pushed from server) to work. auto thread_group = std::make_shared(); thread_status.attachToGroup(thread_group, false); } diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 5126777fa1e..6df86db886b 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -78,11 +78,6 @@ namespace fs = std::filesystem; using namespace std::literals; -namespace -{ -using namespace DB; -} - namespace CurrentMetrics { extern const Metric MemoryTracking; From 61f55930ceee99ce23cdab794ce77945f9a6ee1c Mon Sep 17 00:00:00 2001 From: Andrey Zvonov <32552679+zvonand@users.noreply.github.com> Date: Wed, 19 Apr 2023 15:46:17 +0300 Subject: [PATCH 045/722] Update docs/en/operations/settings/settings.md Co-authored-by: Vasily Nemkov --- docs/en/operations/settings/settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 2010f763c84..366e7de8d28 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -4034,7 +4034,7 @@ SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zuric Possible values: -- Any valid timezone in `Region/Place` notation, e.g. `Europe/Berlin` +- Any timezone name from `system.time_zones`, e.g. `Europe/Berlin`, `UTC` or `Zulu` Default value: `''`. From b281ceacbb4cc8f66a56b9bc12d5ab521098ce08 Mon Sep 17 00:00:00 2001 From: zvonand Date: Wed, 19 Apr 2023 14:47:57 +0200 Subject: [PATCH 046/722] Update docs/ru/operations/settings/settings.md --- docs/ru/operations/settings/settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 8180f5435b8..f0f497b6254 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -4101,6 +4101,6 @@ SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zuric Возможные значения: -- Строка вида `Регион/Город`, например `Europe/Zurich` +- Любая зона из `system.time_zones`, например `Europe/Berlin`, `UTC` или `Zulu` Значение по умолчанию: `''`. From b06d7355d597abdf6692b5c232fb12449d57aa5b Mon Sep 17 00:00:00 2001 From: Andrey Zvonov <32552679+zvonand@users.noreply.github.com> Date: Wed, 19 Apr 2023 17:39:40 +0300 Subject: [PATCH 047/722] Update src/Core/SettingsFields.h Co-authored-by: Vasily Nemkov --- src/Core/SettingsFields.h | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Core/SettingsFields.h b/src/Core/SettingsFields.h index e78fef9f455..8bd7370c980 100644 --- a/src/Core/SettingsFields.h +++ b/src/Core/SettingsFields.h @@ -2,7 +2,6 @@ #include #include -//#include #include #include #include From b81ce64fa23cf2d05edd488eeb1adbf981784a54 Mon Sep 17 00:00:00 2001 From: Andrey Zvonov <32552679+zvonand@users.noreply.github.com> Date: Wed, 19 Apr 2023 17:39:52 +0300 Subject: [PATCH 048/722] Update src/Client/ClientBase.cpp Co-authored-by: Vasily Nemkov --- src/Client/ClientBase.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 6df86db886b..bd83246871b 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -1531,9 +1531,9 @@ void ClientBase::receiveLogsAndProfileEvents(ASTPtr parsed_query) { auto packet_type = connection->checkPacket(0); - while (packet_type && (*packet_type == Protocol::Server::Log || - *packet_type == Protocol::Server::ProfileEvents || - *packet_type == Protocol::Server::TimezoneUpdate)) + while (packet_type && (*packet_type == Protocol::Server::Log + || *packet_type == Protocol::Server::ProfileEvents + || *packet_type == Protocol::Server::TimezoneUpdate)) { receiveAndProcessPacket(parsed_query, false); packet_type = connection->checkPacket(0); From 21d5846cabd0717184f44d98b8480fefc683e807 Mon Sep 17 00:00:00 2001 From: Aleksei Golub Date: Tue, 18 Apr 2023 18:12:11 +0300 Subject: [PATCH 049/722] Fix test --- .../02707_clickhouse_local_implicit_file_table_function.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/02707_clickhouse_local_implicit_file_table_function.sh b/tests/queries/0_stateless/02707_clickhouse_local_implicit_file_table_function.sh index eea1e47ba7f..24de0ad579c 100755 --- a/tests/queries/0_stateless/02707_clickhouse_local_implicit_file_table_function.sh +++ b/tests/queries/0_stateless/02707_clickhouse_local_implicit_file_table_function.sh @@ -7,6 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) dir=${CLICKHOUSE_TEST_UNIQUE_NAME} [[ -d $dir ]] && rm -rd $dir mkdir $dir + # Create temporary csv file for tests echo '"id","str","int","text"' > $dir/tmp.csv echo '1,"abc",123,"abacaba"' >> $dir/tmp.csv @@ -40,4 +41,5 @@ SHOW DATABASES; DROP DATABASE test02707; """ | grep "test02707" +# Remove temporary dir with files rm -rd $dir From bf55f43e1933fdcbbc2ec85e5b0823c6a7e3eb5e Mon Sep 17 00:00:00 2001 From: zvonand Date: Thu, 20 Apr 2023 02:53:42 +0200 Subject: [PATCH 050/722] update cmakelists --- src/CMakeLists.txt | 5 +++++ src/Core/SettingsFields.cpp | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 76e5ef83e41..5ac3f6e1654 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -191,6 +191,11 @@ list (APPEND dbms_headers Dictionaries/DictionaryStructure.h Dictionaries/getDictionaryConfigurationFromAST.h) +# Required for validation of Timezone in session_timezone setting. +# This way we don't need to create timezone via cctz each time, but check against pregenerated char** +list (APPEND dbms_sources + Storages/System/StorageSystemTimeZones.generated.cpp) + if (NOT ENABLE_SSL) list (REMOVE_ITEM clickhouse_common_io_sources Common/OpenSSLHelpers.cpp) list (REMOVE_ITEM clickhouse_common_io_headers Common/OpenSSLHelpers.h) diff --git a/src/Core/SettingsFields.cpp b/src/Core/SettingsFields.cpp index 6af38586ed8..c0556519563 100644 --- a/src/Core/SettingsFields.cpp +++ b/src/Core/SettingsFields.cpp @@ -28,7 +28,7 @@ namespace ErrorCodes namespace { - bool checkIsExitingTimeZone(const std::string_view timezone) + bool checkIsExistingTimeZone(const std::string_view timezone) { for (auto * it = auto_time_zones; *it; ++it) if (timezone == *it) @@ -475,7 +475,7 @@ void SettingFieldTimezone::readBinary(ReadBuffer & in) void SettingFieldTimezone::validateTimezone(std::string_view str) { - if (!str.empty() && !checkIsExitingTimeZone(str)) + if (!str.empty() && !checkIsExistingTimeZone(str)) throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Invalid time zone: {}", str); } From f4af76ab8baee97c06cf1e53346da6107c7ccbbd Mon Sep 17 00:00:00 2001 From: zvonand Date: Thu, 20 Apr 2023 11:58:51 +0200 Subject: [PATCH 051/722] cleanup cmakelists --- programs/library-bridge/CMakeLists.txt | 7 +------ programs/odbc-bridge/CMakeLists.txt | 6 +----- utils/check-marks/CMakeLists.txt | 6 +----- utils/keeper-data-dumper/CMakeLists.txt | 8 +------- utils/wal-dump/CMakeLists.txt | 6 +----- 5 files changed, 5 insertions(+), 28 deletions(-) diff --git a/programs/library-bridge/CMakeLists.txt b/programs/library-bridge/CMakeLists.txt index 97af7c3b22e..79497d5fb2e 100644 --- a/programs/library-bridge/CMakeLists.txt +++ b/programs/library-bridge/CMakeLists.txt @@ -13,17 +13,12 @@ set (CLICKHOUSE_LIBRARY_BRIDGE_SOURCES library-bridge.cpp ) -set(CLICKHOUSE_LIBRARY_BRIDGE_EXTERNAL_SOURCES - ${CMAKE_CURRENT_BINARY_DIR}/../../src/Storages/System/StorageSystemTimeZones.generated.cpp -) - if (OS_LINUX) set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-export-dynamic") endif () clickhouse_add_executable(clickhouse-library-bridge - ${CLICKHOUSE_LIBRARY_BRIDGE_SOURCES} - ${CLICKHOUSE_LIBRARY_BRIDGE_EXTERNAL_SOURCES}) + ${CLICKHOUSE_LIBRARY_BRIDGE_SOURCES}) target_link_libraries(clickhouse-library-bridge PRIVATE daemon diff --git a/programs/odbc-bridge/CMakeLists.txt b/programs/odbc-bridge/CMakeLists.txt index bf1b42df026..118610e4dcd 100644 --- a/programs/odbc-bridge/CMakeLists.txt +++ b/programs/odbc-bridge/CMakeLists.txt @@ -15,17 +15,13 @@ set (CLICKHOUSE_ODBC_BRIDGE_SOURCES validateODBCConnectionString.cpp ) -set(ODBC_BRIDGE_EXTERNAL_SOURCES - ${CMAKE_CURRENT_BINARY_DIR}/../../src/Storages/System/StorageSystemTimeZones.generated.cpp -) - if (OS_LINUX) # clickhouse-odbc-bridge is always a separate binary. # Reason: it must not export symbols from SSL, mariadb-client, etc. to not break ABI compatibility with ODBC drivers. set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-export-dynamic") endif () -clickhouse_add_executable(clickhouse-odbc-bridge ${CLICKHOUSE_ODBC_BRIDGE_SOURCES} ${ODBC_BRIDGE_EXTERNAL_SOURCES}) +clickhouse_add_executable(clickhouse-odbc-bridge ${CLICKHOUSE_ODBC_BRIDGE_SOURCES}) target_link_libraries(clickhouse-odbc-bridge PRIVATE daemon diff --git a/utils/check-marks/CMakeLists.txt b/utils/check-marks/CMakeLists.txt index 456fb3d7112..05546a2989b 100644 --- a/utils/check-marks/CMakeLists.txt +++ b/utils/check-marks/CMakeLists.txt @@ -1,6 +1,2 @@ -set(CHECK_MARKS_EXTERNAL_SOURCES - ${CMAKE_CURRENT_BINARY_DIR}/../../src/Storages/System/StorageSystemTimeZones.generated.cpp -) - -clickhouse_add_executable (check-marks ${CHECK_MARKS_EXTERNAL_SOURCES} main.cpp) +clickhouse_add_executable (check-marks main.cpp) target_link_libraries(check-marks PRIVATE dbms boost::program_options) diff --git a/utils/keeper-data-dumper/CMakeLists.txt b/utils/keeper-data-dumper/CMakeLists.txt index a6858a29e8b..1f55e50e68e 100644 --- a/utils/keeper-data-dumper/CMakeLists.txt +++ b/utils/keeper-data-dumper/CMakeLists.txt @@ -1,8 +1,2 @@ -set(KEEPER_DATA_DUMPER_EXTERNAL_SOURCES - ${CMAKE_CURRENT_BINARY_DIR}/../../src/Storages/System/StorageSystemTimeZones.generated.cpp -) - -clickhouse_add_executable(keeper-data-dumper - ${KEEPER_DATA_DUMPER_EXTERNAL_SOURCES} - main.cpp) +clickhouse_add_executable(keeper-data-dumper main.cpp) target_link_libraries(keeper-data-dumper PRIVATE dbms) diff --git a/utils/wal-dump/CMakeLists.txt b/utils/wal-dump/CMakeLists.txt index 754799a6faf..3d59e95b4ca 100644 --- a/utils/wal-dump/CMakeLists.txt +++ b/utils/wal-dump/CMakeLists.txt @@ -1,6 +1,2 @@ -set(WAL_DUMP_EXTERNAL_SOURCES - ${CMAKE_CURRENT_BINARY_DIR}/../../src/Storages/System/StorageSystemTimeZones.generated.cpp -) - -clickhouse_add_executable (wal-dump ${WAL_DUMP_EXTERNAL_SOURCES} main.cpp) +clickhouse_add_executable (wal-dump main.cpp) target_link_libraries(wal-dump PRIVATE dbms boost::program_options) From e37745811cd6000348655c7c42cdc25436a3090e Mon Sep 17 00:00:00 2001 From: zvonand Date: Thu, 20 Apr 2023 12:04:12 +0200 Subject: [PATCH 052/722] style --- programs/library-bridge/CMakeLists.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/programs/library-bridge/CMakeLists.txt b/programs/library-bridge/CMakeLists.txt index 79497d5fb2e..1cacc391ca5 100644 --- a/programs/library-bridge/CMakeLists.txt +++ b/programs/library-bridge/CMakeLists.txt @@ -17,8 +17,7 @@ if (OS_LINUX) set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-export-dynamic") endif () -clickhouse_add_executable(clickhouse-library-bridge - ${CLICKHOUSE_LIBRARY_BRIDGE_SOURCES}) +clickhouse_add_executable(clickhouse-library-bridge ${CLICKHOUSE_LIBRARY_BRIDGE_SOURCES}) target_link_libraries(clickhouse-library-bridge PRIVATE daemon From 2d2483d695f39fd8488e3667d77faaaa4177bd92 Mon Sep 17 00:00:00 2001 From: Aleksei Golub Date: Mon, 24 Apr 2023 21:50:40 +0300 Subject: [PATCH 053/722] Rename DatabaseFileSystem to DatabaseFilesystem --- src/Databases/DatabaseFactory.cpp | 16 +++++++------- ...eFileSystem.cpp => DatabaseFilesystem.cpp} | 22 +++++++++---------- ...abaseFileSystem.h => DatabaseFilesystem.h} | 8 +++---- src/Databases/DatabasesOverlay.cpp | 4 ++-- ...cal_implicit_file_table_function.reference | 4 ++-- ...ouse_local_implicit_file_table_function.sh | 8 +++---- 6 files changed, 31 insertions(+), 31 deletions(-) rename src/Databases/{DatabaseFileSystem.cpp => DatabaseFilesystem.cpp} (82%) rename src/Databases/{DatabaseFileSystem.h => DatabaseFilesystem.h} (83%) diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index 3356689d892..9950ab5bf45 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -3,11 +3,11 @@ #include #include #include +#include #include #include #include #include -#include #include #include #include @@ -15,10 +15,10 @@ #include #include #include -#include -#include #include +#include #include +#include #include "config.h" @@ -133,13 +133,13 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String static const std::unordered_set database_engines{"Ordinary", "Atomic", "Memory", "Dictionary", "Lazy", "Replicated", "MySQL", "MaterializeMySQL", "MaterializedMySQL", - "PostgreSQL", "MaterializedPostgreSQL", "SQLite", "FileSystem"}; + "PostgreSQL", "MaterializedPostgreSQL", "SQLite", "Filesystem"}; if (!database_engines.contains(engine_name)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Database engine name `{}` does not exist", engine_name); static const std::unordered_set engines_with_arguments{"MySQL", "MaterializeMySQL", "MaterializedMySQL", - "Lazy", "Replicated", "PostgreSQL", "MaterializedPostgreSQL", "SQLite", "FileSystem"}; + "Lazy", "Replicated", "PostgreSQL", "MaterializedPostgreSQL", "SQLite", "Filesystem"}; static const std::unordered_set engines_with_table_overrides{"MaterializeMySQL", "MaterializedMySQL", "MaterializedPostgreSQL"}; bool engine_may_have_arguments = engines_with_arguments.contains(engine_name); @@ -433,7 +433,7 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String return std::make_shared(context, engine_define, create.attach, database_path); } #endif - else if (engine_name == "FileSystem") + else if (engine_name == "Filesystem") { const ASTFunction * engine = engine_define->engine; @@ -443,13 +443,13 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String if (engine->arguments && !engine->arguments->children.empty()) { if (engine->arguments->children.size() != 1) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "FileSystem database requires at most 1 argument: file_system_path"); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Filesystem database requires at most 1 argument: filesystem_path"); const auto & arguments = engine->arguments->children; init_path = safeGetLiteralValue(arguments[0], engine_name); } - return std::make_shared(database_name, init_path, context); + return std::make_shared(database_name, init_path, context); } throw Exception(ErrorCodes::UNKNOWN_DATABASE_ENGINE, "Unknown database engine: {}", engine_name); diff --git a/src/Databases/DatabaseFileSystem.cpp b/src/Databases/DatabaseFilesystem.cpp similarity index 82% rename from src/Databases/DatabaseFileSystem.cpp rename to src/Databases/DatabaseFilesystem.cpp index 8b92ad8080a..177b4717716 100644 --- a/src/Databases/DatabaseFileSystem.cpp +++ b/src/Databases/DatabaseFilesystem.cpp @@ -1,4 +1,4 @@ -#include +#include #include #include @@ -17,25 +17,25 @@ namespace DB { -DatabaseFileSystem::DatabaseFileSystem(const String & name_, const String & path_, ContextPtr context_) +DatabaseFilesystem::DatabaseFilesystem(const String & name_, const String & path_, ContextPtr context_) : IDatabase(name_), WithContext(context_->getGlobalContext()), path(path_), log(&Poco::Logger::get("DatabaseFileSystem(" + name_ + ")")) { if (path.empty()) path = Poco::Path::current(); } -std::string DatabaseFileSystem::getTablePath(const std::string& table_name) const +std::string DatabaseFilesystem::getTablePath(const std::string& table_name) const { return Poco::Path(path, table_name).toString(); } -void DatabaseFileSystem::addTable(const std::string& table_name, StoragePtr table_storage) const +void DatabaseFilesystem::addTable(const std::string& table_name, StoragePtr table_storage) const { std::lock_guard lock(mutex); loaded_tables.emplace(table_name, table_storage); } -bool DatabaseFileSystem::isTableExist(const String & name, ContextPtr) const +bool DatabaseFilesystem::isTableExist(const String & name, ContextPtr) const { { std::lock_guard lock(mutex); @@ -47,7 +47,7 @@ bool DatabaseFileSystem::isTableExist(const String & name, ContextPtr) const return table_file.exists() && table_file.isFile(); } -StoragePtr DatabaseFileSystem::tryGetTable(const String & name, ContextPtr context_) const +StoragePtr DatabaseFilesystem::tryGetTable(const String & name, ContextPtr context_) const { // Check if the table exists in the loaded tables map { @@ -85,12 +85,12 @@ StoragePtr DatabaseFileSystem::tryGetTable(const String & name, ContextPtr conte } } -ASTPtr DatabaseFileSystem::getCreateDatabaseQuery() const +ASTPtr DatabaseFilesystem::getCreateDatabaseQuery() const { auto settings = getContext()->getSettingsRef(); ParserCreateQuery parser; - String query = "CREATE DATABASE " + backQuoteIfNeed(getDatabaseName()) + " ENGINE = FileSystem(" + backQuoteIfNeed(path) + ")"; + const String query = fmt::format("CREATE DATABASE {} ENGINE = Filesystem({})", backQuoteIfNeed(getDatabaseName()), backQuoteIfNeed(path)); ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth); if (const auto database_comment = getDatabaseComment(); !database_comment.empty()) @@ -102,7 +102,7 @@ ASTPtr DatabaseFileSystem::getCreateDatabaseQuery() const return ast; } -void DatabaseFileSystem::shutdown() +void DatabaseFilesystem::shutdown() { Tables tables_snapshot; { @@ -123,7 +123,7 @@ void DatabaseFileSystem::shutdown() /** * Returns an empty vector because the database is read-only and no tables can be backed up. */ -std::vector> DatabaseFileSystem::getTablesForBackup(const FilterByNameFunction&, const ContextPtr&) const +std::vector> DatabaseFilesystem::getTablesForBackup(const FilterByNameFunction&, const ContextPtr&) const { return {}; } @@ -133,7 +133,7 @@ std::vector> DatabaseFileSystem::getTablesForBacku * Returns an empty iterator because the database does not have its own tables * But only caches them for quick access. */ -DatabaseTablesIteratorPtr DatabaseFileSystem::getTablesIterator(ContextPtr, const FilterByNameFunction&) const +DatabaseTablesIteratorPtr DatabaseFilesystem::getTablesIterator(ContextPtr, const FilterByNameFunction&) const { return std::make_unique(Tables{}, getDatabaseName()); } diff --git a/src/Databases/DatabaseFileSystem.h b/src/Databases/DatabaseFilesystem.h similarity index 83% rename from src/Databases/DatabaseFileSystem.h rename to src/Databases/DatabaseFilesystem.h index 474a7e78335..d5fdd528aa5 100644 --- a/src/Databases/DatabaseFileSystem.h +++ b/src/Databases/DatabaseFilesystem.h @@ -12,18 +12,18 @@ namespace DB class Context; /** - * DatabaseFileSystem allows to interact with files stored on the file system + * DatabaseFilesystem allows to interact with files stored on the file system * Uses TableFunctionFile to implicitly load file when a user requests the table, and provides read-only access to the data in the file * Tables are cached inside the database for quick access * * Used in clickhouse-local to access local files */ -class DatabaseFileSystem : public IDatabase, protected WithContext +class DatabaseFilesystem : public IDatabase, protected WithContext { public: - DatabaseFileSystem(const String & name, const String & path, ContextPtr context); + DatabaseFilesystem(const String & name, const String & path, ContextPtr context); - String getEngineName() const override { return "FileSystem"; } + String getEngineName() const override { return "Filesystem"; } bool isTableExist(const String & name, ContextPtr context) const override; diff --git a/src/Databases/DatabasesOverlay.cpp b/src/Databases/DatabasesOverlay.cpp index da26f9282a0..3563fa715a6 100644 --- a/src/Databases/DatabasesOverlay.cpp +++ b/src/Databases/DatabasesOverlay.cpp @@ -4,8 +4,8 @@ #include #include +#include #include -#include #include @@ -258,7 +258,7 @@ DatabaseTablesIteratorPtr DatabasesOverlay::getTablesIterator(ContextPtr context DatabasePtr CreateClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context_) { auto databaseCombiner = std::make_shared(name_, context_); - databaseCombiner->registerNextDatabase(std::make_shared(name_, "", context_)); + databaseCombiner->registerNextDatabase(std::make_shared(name_, "", context_)); databaseCombiner->registerNextDatabase(std::make_shared(name_, context_)); return databaseCombiner; } diff --git a/tests/queries/0_stateless/02707_clickhouse_local_implicit_file_table_function.reference b/tests/queries/0_stateless/02707_clickhouse_local_implicit_file_table_function.reference index 0fcd843e737..ccc02ad4f34 100644 --- a/tests/queries/0_stateless/02707_clickhouse_local_implicit_file_table_function.reference +++ b/tests/queries/0_stateless/02707_clickhouse_local_implicit_file_table_function.reference @@ -3,7 +3,7 @@ explicit: 4 implicit: 4 -Test 2: check FileSystem database +Test 2: check Filesystem database 4 -Test 3: check show database with FileSystem +Test 3: check show database with Filesystem test02707 diff --git a/tests/queries/0_stateless/02707_clickhouse_local_implicit_file_table_function.sh b/tests/queries/0_stateless/02707_clickhouse_local_implicit_file_table_function.sh index 24de0ad579c..7c9095b3d8b 100755 --- a/tests/queries/0_stateless/02707_clickhouse_local_implicit_file_table_function.sh +++ b/tests/queries/0_stateless/02707_clickhouse_local_implicit_file_table_function.sh @@ -24,19 +24,19 @@ echo "implicit:" $CLICKHOUSE_LOCAL -q "SELECT COUNT(*) FROM \"${dir}/tmp.csv\"" ################# -echo "Test 2: check FileSystem database" +echo "Test 2: check Filesystem database" $CLICKHOUSE_LOCAL --multiline --multiquery -q """ DROP DATABASE IF EXISTS test; -CREATE DATABASE test ENGINE = FileSystem('${dir}'); +CREATE DATABASE test ENGINE = Filesystem('${dir}'); SELECT COUNT(*) FROM test.\`tmp.csv\`; DROP DATABASE test; """ ################# -echo "Test 3: check show database with FileSystem" +echo "Test 3: check show database with Filesystem" $CLICKHOUSE_LOCAL --multiline --multiquery -q """ DROP DATABASE IF EXISTS test02707; -CREATE DATABASE test02707 ENGINE = FileSystem('${dir}'); +CREATE DATABASE test02707 ENGINE = Filesystem('${dir}'); SHOW DATABASES; DROP DATABASE test02707; """ | grep "test02707" From 79ca39d920fbc52e92f6bbc9496bde2cc1afec42 Mon Sep 17 00:00:00 2001 From: Aleksei Golub Date: Mon, 24 Apr 2023 22:26:16 +0300 Subject: [PATCH 054/722] Fixed exception messages --- src/Databases/DatabasesOverlay.cpp | 42 +++++++++++++++++++++++++----- 1 file changed, 36 insertions(+), 6 deletions(-) diff --git a/src/Databases/DatabasesOverlay.cpp b/src/Databases/DatabasesOverlay.cpp index 3563fa715a6..c3af6d9305e 100644 --- a/src/Databases/DatabasesOverlay.cpp +++ b/src/Databases/DatabasesOverlay.cpp @@ -65,7 +65,12 @@ void DatabasesOverlay::createTable(ContextPtr context_, const String & table_nam continue; } } - throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no databases for CREATE TABLE {} query in Database{}", table_name, getEngineName()); + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "There is no databases for CREATE TABLE `{}` query in database `{}` (engine {})", + table_name, + getDatabaseName(), + getEngineName()); } void DatabasesOverlay::dropTable(ContextPtr context_, const String & table_name, bool sync) @@ -82,7 +87,12 @@ void DatabasesOverlay::dropTable(ContextPtr context_, const String & table_name, continue; } } - throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no databases for DROP TABLE {} query in Database{}", table_name, getEngineName()); + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "There is no databases for DROP TABLE `{}` query in database `{}` (engine {})", + table_name, + getDatabaseName(), + getEngineName()); } void DatabasesOverlay::attachTable( @@ -100,7 +110,12 @@ void DatabasesOverlay::attachTable( continue; } } - throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no databases for ATTACH TABLE query in Database{}", getEngineName()); + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "There is no databases for ATTACH TABLE `{}` query in database `{}` (engine {})", + table_name, + getDatabaseName(), + getEngineName()); } StoragePtr DatabasesOverlay::detachTable(ContextPtr context_, const String & table_name) @@ -119,7 +134,12 @@ StoragePtr DatabasesOverlay::detachTable(ContextPtr context_, const String & tab continue; } } - throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no databases for DETACH TABLE {} query in Database{}", table_name, getEngineName()); + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "There is no databases for DETACH TABLE `{}` query in database `{}` (engine {})", + table_name, + getDatabaseName(), + getEngineName()); } ASTPtr DatabasesOverlay::getCreateTableQueryImpl(const String & name, ContextPtr context_, bool throw_on_error) const @@ -132,7 +152,12 @@ ASTPtr DatabasesOverlay::getCreateTableQueryImpl(const String & name, ContextPtr break; } if (!result && throw_on_error) - throw Exception(ErrorCodes::CANNOT_GET_CREATE_TABLE_QUERY, "There is no metadata of table {} in Database{}", name, getEngineName()); + throw Exception( + ErrorCodes::CANNOT_GET_CREATE_TABLE_QUERY, + "There is no metadata of table `{}` in database `{}` (engine {})", + name, + getDatabaseName(), + getEngineName()); return result; } @@ -201,7 +226,12 @@ void DatabasesOverlay::alterTable(ContextPtr local_context, const StorageID & ta continue; } } - throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no databases for alterTable in Database{}", getEngineName()); + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "There is no databases for ALTER TABLE `{}` query in database `{}` (engine {})", + table_id.table_name, + getDatabaseName(), + getEngineName()); } std::vector> From c9f8dd8bfd3d4123a0a7111f19d8863b19729d9a Mon Sep 17 00:00:00 2001 From: Aleksei Golub Date: Mon, 24 Apr 2023 22:53:32 +0300 Subject: [PATCH 055/722] Replaced Poco::File with std::filesystem --- src/Databases/DatabaseFactory.cpp | 2 +- src/Databases/DatabaseFilesystem.cpp | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index 9950ab5bf45..8a50c31efc8 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -437,7 +437,7 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String { const ASTFunction * engine = engine_define->engine; - // If init_path is empty, then the current path from Poco will be used + /// If init_path is empty, then the current path will be used std::string init_path; if (engine->arguments && !engine->arguments->children.empty()) diff --git a/src/Databases/DatabaseFilesystem.cpp b/src/Databases/DatabaseFilesystem.cpp index 177b4717716..1decb273ae1 100644 --- a/src/Databases/DatabaseFilesystem.cpp +++ b/src/Databases/DatabaseFilesystem.cpp @@ -8,11 +8,12 @@ #include #include #include -#include -#include #include #include +#include + +namespace fs = std::filesystem; namespace DB { @@ -21,12 +22,12 @@ DatabaseFilesystem::DatabaseFilesystem(const String & name_, const String & path : IDatabase(name_), WithContext(context_->getGlobalContext()), path(path_), log(&Poco::Logger::get("DatabaseFileSystem(" + name_ + ")")) { if (path.empty()) - path = Poco::Path::current(); + path = fs::current_path(); } std::string DatabaseFilesystem::getTablePath(const std::string& table_name) const { - return Poco::Path(path, table_name).toString(); + return fs::path(path) / table_name; } void DatabaseFilesystem::addTable(const std::string& table_name, StoragePtr table_storage) const @@ -43,8 +44,8 @@ bool DatabaseFilesystem::isTableExist(const String & name, ContextPtr) const return true; } - Poco::File table_file(getTablePath(name)); - return table_file.exists() && table_file.isFile(); + fs::path table_file_path(getTablePath(name)); + return fs::exists(table_file_path) && fs::is_regular_file(table_file_path); } StoragePtr DatabaseFilesystem::tryGetTable(const String & name, ContextPtr context_) const @@ -62,8 +63,7 @@ StoragePtr DatabaseFilesystem::tryGetTable(const String & name, ContextPtr conte try { // If the table doesn't exist in the tables map, check if the corresponding file exists - Poco::File table_file(table_path); - if (!table_file.exists()) + if (!fs::exists(table_path) || !fs::is_regular_file(table_path)) return nullptr; // If the file exists, create a new table using TableFunctionFile and return it. From 26812f36fb73ca8a3f1c16a0db54dd4327f7dc6c Mon Sep 17 00:00:00 2001 From: Aleksei Golub Date: Wed, 26 Apr 2023 01:13:29 +0300 Subject: [PATCH 056/722] Added read-only database setting; Fixed error messages for filesystem database; added tests --- src/Databases/DatabaseFilesystem.cpp | 46 ++++++++------- src/Databases/DatabaseFilesystem.h | 8 +++ src/Databases/DatabasesOverlay.cpp | 13 ++--- src/Databases/IDatabase.h | 4 +- src/Interpreters/DatabaseCatalog.cpp | 14 ++++- .../02722_database_filesystem.reference | 12 ++++ .../0_stateless/02722_database_filesystem.sh | 58 +++++++++++++++++++ 7 files changed, 124 insertions(+), 31 deletions(-) create mode 100644 tests/queries/0_stateless/02722_database_filesystem.reference create mode 100755 tests/queries/0_stateless/02722_database_filesystem.sh diff --git a/src/Databases/DatabaseFilesystem.cpp b/src/Databases/DatabaseFilesystem.cpp index 1decb273ae1..106885e7c3e 100644 --- a/src/Databases/DatabaseFilesystem.cpp +++ b/src/Databases/DatabaseFilesystem.cpp @@ -21,8 +21,7 @@ namespace DB DatabaseFilesystem::DatabaseFilesystem(const String & name_, const String & path_, ContextPtr context_) : IDatabase(name_), WithContext(context_->getGlobalContext()), path(path_), log(&Poco::Logger::get("DatabaseFileSystem(" + name_ + ")")) { - if (path.empty()) - path = fs::current_path(); + path = fs::path(path).lexically_normal().string(); } std::string DatabaseFilesystem::getTablePath(const std::string& table_name) const @@ -48,7 +47,7 @@ bool DatabaseFilesystem::isTableExist(const String & name, ContextPtr) const return fs::exists(table_file_path) && fs::is_regular_file(table_file_path); } -StoragePtr DatabaseFilesystem::tryGetTable(const String & name, ContextPtr context_) const +StoragePtr DatabaseFilesystem::getTableImpl(const String & name, ContextPtr context_) const { // Check if the table exists in the loaded tables map { @@ -60,24 +59,31 @@ StoragePtr DatabaseFilesystem::tryGetTable(const String & name, ContextPtr conte auto table_path = getTablePath(name); + // If the file exists, create a new table using TableFunctionFile and return it. + auto args = makeASTFunction("file", std::make_shared(table_path)); + + auto table_function = TableFunctionFactory::instance().get(args, context_); + if (!table_function) + return nullptr; + + auto table_storage = table_function->execute(args, context_, name); + if (table_storage) + addTable(name, table_storage); + + return table_storage; +} + +StoragePtr DatabaseFilesystem::getTable(const String & name, ContextPtr context_) const +{ + if (auto storage = getTableImpl(name, context_)) + return storage; + throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist", backQuoteIfNeed(getDatabaseName()), backQuoteIfNeed(name)); +} + +StoragePtr DatabaseFilesystem::tryGetTable(const String & name, ContextPtr context_) const { try { - // If the table doesn't exist in the tables map, check if the corresponding file exists - if (!fs::exists(table_path) || !fs::is_regular_file(table_path)) - return nullptr; - - // If the file exists, create a new table using TableFunctionFile and return it. - auto args = makeASTFunction("file", std::make_shared(table_path)); - - auto table_function = TableFunctionFactory::instance().get(args, context_); - if (!table_function) - return nullptr; - - auto table_storage = table_function->execute(args, context_, name); - if (table_storage) - addTable(name, table_storage); - - return table_storage; + return getTable(name, context_); } catch (...) { @@ -90,7 +96,7 @@ ASTPtr DatabaseFilesystem::getCreateDatabaseQuery() const auto settings = getContext()->getSettingsRef(); ParserCreateQuery parser; - const String query = fmt::format("CREATE DATABASE {} ENGINE = Filesystem({})", backQuoteIfNeed(getDatabaseName()), backQuoteIfNeed(path)); + const String query = fmt::format("CREATE DATABASE {} ENGINE = Filesystem('{}')", backQuoteIfNeed(getDatabaseName()), path); ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth); if (const auto database_comment = getDatabaseComment(); !database_comment.empty()) diff --git a/src/Databases/DatabaseFilesystem.h b/src/Databases/DatabaseFilesystem.h index d5fdd528aa5..697511ac5b3 100644 --- a/src/Databases/DatabaseFilesystem.h +++ b/src/Databases/DatabaseFilesystem.h @@ -27,10 +27,14 @@ public: bool isTableExist(const String & name, ContextPtr context) const override; + StoragePtr getTable(const String & name, ContextPtr context) const override; + StoragePtr tryGetTable(const String & name, ContextPtr context) const override; bool empty() const override { return true; } + bool isReadOnly() const override { return true; } + ASTPtr getCreateDatabaseQuery() const override; void shutdown() override; @@ -39,9 +43,13 @@ public: DatabaseTablesIteratorPtr getTablesIterator(ContextPtr, const FilterByNameFunction &) const override; protected: + StoragePtr getTableImpl(const String & name, ContextPtr context) const; + std::string getTablePath(const std::string & table_name) const; + void addTable(const std::string & table_name, StoragePtr table_storage) const; + private: String path; mutable Tables loaded_tables TSA_GUARDED_BY(mutex); diff --git a/src/Databases/DatabasesOverlay.cpp b/src/Databases/DatabasesOverlay.cpp index c3af6d9305e..5a6a4fe5cc6 100644 --- a/src/Databases/DatabasesOverlay.cpp +++ b/src/Databases/DatabasesOverlay.cpp @@ -55,15 +55,11 @@ void DatabasesOverlay::createTable(ContextPtr context_, const String & table_nam { for (auto & db : databases) { - try + if (!db->isReadOnly()) { db->createTable(context_, table_name, table, query); return; } - catch (...) - { - continue; - } } throw Exception( ErrorCodes::LOGICAL_ERROR, @@ -218,8 +214,11 @@ void DatabasesOverlay::alterTable(ContextPtr local_context, const StorageID & ta { try { - db->alterTable(local_context, table_id, metadata); - return; + if (!db->isReadOnly()) + { + db->alterTable(local_context, table_id, metadata); + return; + } } catch (...) { diff --git a/src/Databases/IDatabase.h b/src/Databases/IDatabase.h index 53a2f372814..6508e2ce060 100644 --- a/src/Databases/IDatabase.h +++ b/src/Databases/IDatabase.h @@ -170,7 +170,7 @@ public: /// Get the table for work. Return nullptr if there is no table. virtual StoragePtr tryGetTable(const String & name, ContextPtr context) const = 0; - StoragePtr getTable(const String & name, ContextPtr context) const; + virtual StoragePtr getTable(const String & name, ContextPtr context) const; virtual UUID tryGetTableUUID(const String & /*table_name*/) const { return UUIDHelpers::Nil; } @@ -183,6 +183,8 @@ public: /// Is the database empty. virtual bool empty() const = 0; + virtual bool isReadOnly() const { return false; } + /// Add the table to the database. Record its presence in the metadata. virtual void createTable( ContextPtr /*context*/, diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index 8d3fa91a7fe..f9e74fadcbd 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -338,9 +338,17 @@ DatabaseAndTable DatabaseCatalog::getTableImpl( database = it->second; } - auto table = database->tryGetTable(table_id.table_name, context_); - if (!table && exception) - exception->emplace(Exception(ErrorCodes::UNKNOWN_TABLE, "Table {} doesn't exist", table_id.getNameForLogs())); + StoragePtr table = nullptr; + try + { + table = database->getTable(table_id.table_name, context_); + } + catch (const Exception & e) + { + if (exception) + exception->emplace(*e.clone()); + } + if (!table) database = nullptr; diff --git a/tests/queries/0_stateless/02722_database_filesystem.reference b/tests/queries/0_stateless/02722_database_filesystem.reference new file mode 100644 index 00000000000..a583f1e2e3c --- /dev/null +++ b/tests/queries/0_stateless/02722_database_filesystem.reference @@ -0,0 +1,12 @@ +Test 1: create filesystem database and check implicit calls +0 +test1 +4 +4 +4 +Test 2: check DatabaseFilesystem access rights on server +OK +OK +OK +OK +OK diff --git a/tests/queries/0_stateless/02722_database_filesystem.sh b/tests/queries/0_stateless/02722_database_filesystem.sh new file mode 100755 index 00000000000..0adeface438 --- /dev/null +++ b/tests/queries/0_stateless/02722_database_filesystem.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash +# Tags: no-parallel + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +# see 01658_read_file_to_stringcolumn.sh +CLICKHOUSE_USER_FILES_PATH=$(clickhouse-client --query "select _path, _file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') + +# Prepare data +mkdir -p ${CLICKHOUSE_USER_FILES_PATH}/tmp/ +echo '"id","str","int","text"' > ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv +echo '1,"abc",123,"abacaba"' >> ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv +echo '2,"def",456,"bacabaa"' >> ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv +echo '3,"story",78912,"acabaab"' >> ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv +echo '4,"history",21321321,"cabaaba"' >> ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv + +tmp_dir=${CLICKHOUSE_TEST_UNIQUE_NAME} +[[ -d $tmp_dir ]] && rm -rd $tmp_dir +mkdir $tmp_dir +cp ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv ${tmp_dir}/tmp.csv +cp ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv ${CLICKHOUSE_USER_FILES_PATH}/tmp/tmp.csv + +################# +echo "Test 1: create filesystem database and check implicit calls" +${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +DROP DATABASE IF EXISTS test1; +CREATE DATABASE test1 ENGINE = Filesystem; +""" +echo $? +${CLICKHOUSE_CLIENT} --query "SHOW DATABASES" | grep "test1" +${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`tmp.csv\`;" +${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`tmp/tmp.csv\`;" +${CLICKHOUSE_LOCAL} -q "SELECT COUNT(*) FROM \"${tmp_dir}/tmp.csv\"" + +################# +echo "Test 2: check DatabaseFilesystem access rights on server" +# Allows list files only inside user_files +${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`../tmp.csv\`;" 2>&1| grep -F "Code: 291" > /dev/null && echo "OK" +${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`/tmp/tmp.csv\`;" 2>&1| grep -F "Code: 291" > /dev/null && echo "OK" + +${CLICKHOUSE_CLIENT} --multiline --multiquery --query """ +USE test1; +SELECT COUNT(*) FROM \"../${tmp_dir}/tmp.csv\"; +""" 2>&1| grep -F "Code: 291" > /dev/null && echo "OK" +${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`../../../../../../tmp.csv\`;" 2>&1| grep -F "Code: 291" > /dev/null && echo "OK" +${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +DROP DATABASE IF EXISTS test2; +CREATE DATABASE test2 ENGINE = Filesystem('/tmp'); +SELECT COUNT(*) FROM test2.\`tmp.csv\`; +""" 2>&1| grep -F "Code: 291" > /dev/null && echo "OK" + +# Clean +${CLICKHOUSE_CLIENT} --query "DROP DATABASE test1;" +${CLICKHOUSE_CLIENT} --query "DROP DATABASE test2;" +rm -rd $tmp_dir +rm -rd $CLICKHOUSE_USER_FILES_PATH From 4606e660683992b630f9db952beda9b261f82d76 Mon Sep 17 00:00:00 2001 From: Aleksei Golub Date: Wed, 26 Apr 2023 11:06:01 +0300 Subject: [PATCH 057/722] Fix style --- src/Databases/DatabaseFilesystem.cpp | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/src/Databases/DatabaseFilesystem.cpp b/src/Databases/DatabaseFilesystem.cpp index 106885e7c3e..16aed185669 100644 --- a/src/Databases/DatabaseFilesystem.cpp +++ b/src/Databases/DatabaseFilesystem.cpp @@ -18,18 +18,23 @@ namespace fs = std::filesystem; namespace DB { +namespace ErrorCodes +{ + extern const int UNKNOWN_TABLE; +} + DatabaseFilesystem::DatabaseFilesystem(const String & name_, const String & path_, ContextPtr context_) : IDatabase(name_), WithContext(context_->getGlobalContext()), path(path_), log(&Poco::Logger::get("DatabaseFileSystem(" + name_ + ")")) { path = fs::path(path).lexically_normal().string(); } -std::string DatabaseFilesystem::getTablePath(const std::string& table_name) const +std::string DatabaseFilesystem::getTablePath(const std::string & table_name) const { return fs::path(path) / table_name; } -void DatabaseFilesystem::addTable(const std::string& table_name, StoragePtr table_storage) const +void DatabaseFilesystem::addTable(const std::string & table_name, StoragePtr table_storage) const { std::lock_guard lock(mutex); loaded_tables.emplace(table_name, table_storage); @@ -80,7 +85,8 @@ StoragePtr DatabaseFilesystem::getTable(const String & name, ContextPtr context_ throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist", backQuoteIfNeed(getDatabaseName()), backQuoteIfNeed(name)); } -StoragePtr DatabaseFilesystem::tryGetTable(const String & name, ContextPtr context_) const { +StoragePtr DatabaseFilesystem::tryGetTable(const String & name, ContextPtr context_) const +{ try { return getTable(name, context_); @@ -127,9 +133,9 @@ void DatabaseFilesystem::shutdown() } /** - * Returns an empty vector because the database is read-only and no tables can be backed up. + * Returns an empty vector because the database is read-only and no tables can be backed up */ -std::vector> DatabaseFilesystem::getTablesForBackup(const FilterByNameFunction&, const ContextPtr&) const +std::vector> DatabaseFilesystem::getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const { return {}; } @@ -137,9 +143,9 @@ std::vector> DatabaseFilesystem::getTablesForBacku /** * * Returns an empty iterator because the database does not have its own tables - * But only caches them for quick access. + * But only caches them for quick access */ -DatabaseTablesIteratorPtr DatabaseFilesystem::getTablesIterator(ContextPtr, const FilterByNameFunction&) const +DatabaseTablesIteratorPtr DatabaseFilesystem::getTablesIterator(ContextPtr, const FilterByNameFunction &) const { return std::make_unique(Tables{}, getDatabaseName()); } From ca1501aeb4e9c7a1db131f4c24255bd24bd99059 Mon Sep 17 00:00:00 2001 From: Aleksei Golub Date: Wed, 26 Apr 2023 13:05:56 +0300 Subject: [PATCH 058/722] retrigger checks From 1f90e9bde8ab740ae5fda958ca93f9c4abab6008 Mon Sep 17 00:00:00 2001 From: Aleksei Golub Date: Wed, 26 Apr 2023 14:37:41 +0300 Subject: [PATCH 059/722] retrigger checks From e20f92ce0f6ef4d06813932025cdea10a361631c Mon Sep 17 00:00:00 2001 From: Aleksei Golub Date: Thu, 27 Apr 2023 21:26:36 +0300 Subject: [PATCH 060/722] Fixed exceptions handling; Fixed style; --- programs/local/LocalServer.cpp | 10 +++++- src/Databases/DatabaseFactory.cpp | 2 +- src/Databases/DatabaseFilesystem.cpp | 48 +++++++++++++++++++++++---- src/Databases/DatabasesOverlay.cpp | 49 ++++++---------------------- src/Databases/DatabasesOverlay.h | 2 -- 5 files changed, 62 insertions(+), 49 deletions(-) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 566d11791ca..4939997b323 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -149,6 +150,13 @@ static DatabasePtr createMemoryDatabaseIfNotExists(ContextPtr context, const Str return system_database; } +static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context_) +{ + auto databaseCombiner = std::make_shared(name_, context_); + databaseCombiner->registerNextDatabase(std::make_shared(name_, "", context_)); + databaseCombiner->registerNextDatabase(std::make_shared(name_, context_)); + return databaseCombiner; +} /// If path is specified and not empty, will try to setup server environment and load existing metadata void LocalServer::tryInitPath() @@ -648,7 +656,7 @@ void LocalServer::processConfig() * if such tables will not be dropped, clickhouse-server will not be able to load them due to security reasons. */ std::string default_database = config().getString("default_database", "_local"); - DatabaseCatalog::instance().attachDatabase(default_database, CreateClickHouseLocalDatabaseOverlay(default_database, global_context)); + DatabaseCatalog::instance().attachDatabase(default_database, createClickHouseLocalDatabaseOverlay(default_database, global_context)); global_context->setCurrentDatabase(default_database); applyCmdOptions(global_context); diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index 8a50c31efc8..1be0d5dd7b2 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -443,7 +443,7 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String if (engine->arguments && !engine->arguments->children.empty()) { if (engine->arguments->children.size() != 1) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Filesystem database requires at most 1 argument: filesystem_path"); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Filesystem database requires exactly 1 argument: filesystem_path"); const auto & arguments = engine->arguments->children; init_path = safeGetLiteralValue(arguments[0], engine_name); diff --git a/src/Databases/DatabaseFilesystem.cpp b/src/Databases/DatabaseFilesystem.cpp index 16aed185669..8275bdf6151 100644 --- a/src/Databases/DatabaseFilesystem.cpp +++ b/src/Databases/DatabaseFilesystem.cpp @@ -20,24 +20,42 @@ namespace DB namespace ErrorCodes { + extern const int LOGICAL_ERROR; extern const int UNKNOWN_TABLE; + extern const int DATABASE_ACCESS_DENIED; + extern const int BAD_ARGUMENTS; } DatabaseFilesystem::DatabaseFilesystem(const String & name_, const String & path_, ContextPtr context_) : IDatabase(name_), WithContext(context_->getGlobalContext()), path(path_), log(&Poco::Logger::get("DatabaseFileSystem(" + name_ + ")")) { - path = fs::path(path).lexically_normal().string(); + fs::path user_files_path; + if (context_->getApplicationType() != Context::ApplicationType::LOCAL) + user_files_path = fs::canonical(fs::path(getContext()->getUserFilesPath())); + + if (fs::path(path).is_relative()) + path = user_files_path / path; + + path = fs::absolute(path).lexically_normal().string(); } std::string DatabaseFilesystem::getTablePath(const std::string & table_name) const { - return fs::path(path) / table_name; + fs::path table_path = fs::path(path) / table_name; + return table_path.lexically_normal().string(); } void DatabaseFilesystem::addTable(const std::string & table_name, StoragePtr table_storage) const { std::lock_guard lock(mutex); - loaded_tables.emplace(table_name, table_storage); + auto [_, inserted] = loaded_tables.emplace(table_name, table_storage); + if (!inserted) + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Table with name `{}` already exists in database `{}` (engine {})", + table_name, + getDatabaseName(), + getEngineName()); } bool DatabaseFilesystem::isTableExist(const String & name, ContextPtr) const @@ -62,8 +80,20 @@ StoragePtr DatabaseFilesystem::getTableImpl(const String & name, ContextPtr cont return it->second; } + // If run in Local mode, no need for path checking. + bool need_check_path = context_->getApplicationType() != Context::ApplicationType::LOCAL; + std::string user_files_path = fs::canonical(fs::path(context_->getUserFilesPath())).string(); + auto table_path = getTablePath(name); + // Check access for file before checking its existence + if (need_check_path && table_path.find(user_files_path) != 0) + throw Exception(ErrorCodes::DATABASE_ACCESS_DENIED, "File is not inside {}", user_files_path); + + // If the table doesn't exist in the tables map, check if the corresponding file exists + if (!fs::exists(table_path) || !fs::is_regular_file(table_path)) + return nullptr; + // If the file exists, create a new table using TableFunctionFile and return it. auto args = makeASTFunction("file", std::make_shared(table_path)); @@ -89,11 +119,17 @@ StoragePtr DatabaseFilesystem::tryGetTable(const String & name, ContextPtr conte { try { - return getTable(name, context_); + return getTableImpl(name, context_); } - catch (...) + catch (const Exception & e) { - return nullptr; + // Ignore exceptions thrown by TableFunctionFile and which indicate that there is no table + if (e.code() == ErrorCodes::BAD_ARGUMENTS) + return nullptr; + if (e.code() == ErrorCodes::DATABASE_ACCESS_DENIED) + return nullptr; + + throw; } } diff --git a/src/Databases/DatabasesOverlay.cpp b/src/Databases/DatabasesOverlay.cpp index 5a6a4fe5cc6..b44a9798072 100644 --- a/src/Databases/DatabasesOverlay.cpp +++ b/src/Databases/DatabasesOverlay.cpp @@ -1,11 +1,9 @@ #include +#include #include #include -#include - -#include -#include +#include #include @@ -73,15 +71,11 @@ void DatabasesOverlay::dropTable(ContextPtr context_, const String & table_name, { for (auto & db : databases) { - try + if (db->isTableExist(table_name, context_)) { db->dropTable(context_, table_name, sync); return; } - catch (...) - { - continue; - } } throw Exception( ErrorCodes::LOGICAL_ERROR, @@ -119,16 +113,8 @@ StoragePtr DatabasesOverlay::detachTable(ContextPtr context_, const String & tab StoragePtr result = nullptr; for (auto & db : databases) { - try - { - result = db->detachTable(context_, table_name); - if (result) - return result; - } - catch (...) - { - continue; - } + if (db->isTableExist(table_name, context_)) + return db->detachTable(context_, table_name); } throw Exception( ErrorCodes::LOGICAL_ERROR, @@ -212,17 +198,10 @@ void DatabasesOverlay::alterTable(ContextPtr local_context, const StorageID & ta { for (auto & db : databases) { - try + if (!db->isReadOnly() && db->isTableExist(table_id.table_name, local_context)) { - if (!db->isReadOnly()) - { - db->alterTable(local_context, table_id, metadata); - return; - } - } - catch (...) - { - continue; + db->alterTable(local_context, table_id, metadata); + return; } } throw Exception( @@ -239,8 +218,8 @@ DatabasesOverlay::getTablesForBackup(const FilterByNameFunction & filter, const std::vector> result; for (const auto & db : databases) { - auto dbBackup = db->getTablesForBackup(filter, local_context); - result.insert(result.end(), std::make_move_iterator(dbBackup.begin()), std::make_move_iterator(dbBackup.end())); + auto db_backup = db->getTablesForBackup(filter, local_context); + result.insert(result.end(), std::make_move_iterator(db_backup.begin()), std::make_move_iterator(db_backup.end())); } return result; } @@ -284,12 +263,4 @@ DatabaseTablesIteratorPtr DatabasesOverlay::getTablesIterator(ContextPtr context return std::make_unique(std::move(tables), getDatabaseName()); } -DatabasePtr CreateClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context_) -{ - auto databaseCombiner = std::make_shared(name_, context_); - databaseCombiner->registerNextDatabase(std::make_shared(name_, "", context_)); - databaseCombiner->registerNextDatabase(std::make_shared(name_, context_)); - return databaseCombiner; -} - } diff --git a/src/Databases/DatabasesOverlay.h b/src/Databases/DatabasesOverlay.h index 77f0085161b..0f31bbd6a47 100644 --- a/src/Databases/DatabasesOverlay.h +++ b/src/Databases/DatabasesOverlay.h @@ -63,6 +63,4 @@ protected: Poco::Logger * log; }; -DatabasePtr CreateClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context_); - } From 57d852a60e804da746ce5e4cde2d56222afe677e Mon Sep 17 00:00:00 2001 From: Aleksei Golub Date: Sun, 30 Apr 2023 14:46:11 +0300 Subject: [PATCH 061/722] Fixed table existence checking --- src/Databases/DatabaseFactory.cpp | 2 +- src/Databases/DatabaseFilesystem.cpp | 60 ++++++++++++++----- src/Databases/DatabaseFilesystem.h | 2 + .../02722_database_filesystem.reference | 5 +- .../0_stateless/02722_database_filesystem.sh | 24 ++++++-- 5 files changed, 70 insertions(+), 23 deletions(-) diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index 1be0d5dd7b2..8a50c31efc8 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -443,7 +443,7 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String if (engine->arguments && !engine->arguments->children.empty()) { if (engine->arguments->children.size() != 1) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Filesystem database requires exactly 1 argument: filesystem_path"); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Filesystem database requires at most 1 argument: filesystem_path"); const auto & arguments = engine->arguments->children; init_path = safeGetLiteralValue(arguments[0], engine_name); diff --git a/src/Databases/DatabaseFilesystem.cpp b/src/Databases/DatabaseFilesystem.cpp index 8275bdf6151..7f22b8a16a0 100644 --- a/src/Databases/DatabaseFilesystem.cpp +++ b/src/Databases/DatabaseFilesystem.cpp @@ -1,5 +1,6 @@ #include +#include #include #include #include @@ -24,19 +25,27 @@ namespace ErrorCodes extern const int UNKNOWN_TABLE; extern const int DATABASE_ACCESS_DENIED; extern const int BAD_ARGUMENTS; + extern const int FILE_DOESNT_EXIST; } DatabaseFilesystem::DatabaseFilesystem(const String & name_, const String & path_, ContextPtr context_) : IDatabase(name_), WithContext(context_->getGlobalContext()), path(path_), log(&Poco::Logger::get("DatabaseFileSystem(" + name_ + ")")) { fs::path user_files_path; - if (context_->getApplicationType() != Context::ApplicationType::LOCAL) + const auto & application_type = context_->getApplicationType(); + + if (application_type != Context::ApplicationType::LOCAL) user_files_path = fs::canonical(fs::path(getContext()->getUserFilesPath())); if (fs::path(path).is_relative()) path = user_files_path / path; + else if (application_type != Context::ApplicationType::LOCAL && !pathStartsWith(fs::path(path), user_files_path)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path must be inside user-files path ({})", user_files_path.string()); path = fs::absolute(path).lexically_normal().string(); + + if (!fs::exists(path)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path does not exist ({})", path); } std::string DatabaseFilesystem::getTablePath(const std::string & table_name) const @@ -58,7 +67,32 @@ void DatabaseFilesystem::addTable(const std::string & table_name, StoragePtr tab getEngineName()); } -bool DatabaseFilesystem::isTableExist(const String & name, ContextPtr) const +bool DatabaseFilesystem::checkTableFilePath(const std::string & table_path, ContextPtr context_, bool throw_on_error) const { + // If run in Local mode, no need for path checking. + bool need_check_path = context_->getApplicationType() != Context::ApplicationType::LOCAL; + std::string user_files_path = fs::canonical(fs::path(context_->getUserFilesPath())).string(); + + // Check access for file before checking its existence + if (need_check_path && !fileOrSymlinkPathStartsWith(table_path, user_files_path)) + { + if (throw_on_error) + throw Exception(ErrorCodes::DATABASE_ACCESS_DENIED, "File is not inside {}", user_files_path); + else + return false; + } + + // Check if the corresponding file exists + if (!fs::exists(table_path) || !fs::is_regular_file(table_path)) { + if (throw_on_error) + throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "File does not exist ({})", table_path); + else + return false; + } + + return true; +} + +bool DatabaseFilesystem::isTableExist(const String & name, ContextPtr context_) const { { std::lock_guard lock(mutex); @@ -67,7 +101,8 @@ bool DatabaseFilesystem::isTableExist(const String & name, ContextPtr) const } fs::path table_file_path(getTablePath(name)); - return fs::exists(table_file_path) && fs::is_regular_file(table_file_path); + + return checkTableFilePath(table_file_path, context_, false); } StoragePtr DatabaseFilesystem::getTableImpl(const String & name, ContextPtr context_) const @@ -80,19 +115,9 @@ StoragePtr DatabaseFilesystem::getTableImpl(const String & name, ContextPtr cont return it->second; } - // If run in Local mode, no need for path checking. - bool need_check_path = context_->getApplicationType() != Context::ApplicationType::LOCAL; - std::string user_files_path = fs::canonical(fs::path(context_->getUserFilesPath())).string(); - auto table_path = getTablePath(name); - // Check access for file before checking its existence - if (need_check_path && table_path.find(user_files_path) != 0) - throw Exception(ErrorCodes::DATABASE_ACCESS_DENIED, "File is not inside {}", user_files_path); - - // If the table doesn't exist in the tables map, check if the corresponding file exists - if (!fs::exists(table_path) || !fs::is_regular_file(table_path)) - return nullptr; + checkTableFilePath(table_path, context_, true); // If the file exists, create a new table using TableFunctionFile and return it. auto args = makeASTFunction("file", std::make_shared(table_path)); @@ -101,6 +126,7 @@ StoragePtr DatabaseFilesystem::getTableImpl(const String & name, ContextPtr cont if (!table_function) return nullptr; + // TableFunctionFile throws exceptions, if table cannot be created auto table_storage = table_function->execute(args, context_, name); if (table_storage) addTable(name, table_storage); @@ -110,6 +136,7 @@ StoragePtr DatabaseFilesystem::getTableImpl(const String & name, ContextPtr cont StoragePtr DatabaseFilesystem::getTable(const String & name, ContextPtr context_) const { + // rethrow all exceptions from TableFunctionFile to show correct error to user if (auto storage = getTableImpl(name, context_)) return storage; throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist", backQuoteIfNeed(getDatabaseName()), backQuoteIfNeed(name)); @@ -123,11 +150,14 @@ StoragePtr DatabaseFilesystem::tryGetTable(const String & name, ContextPtr conte } catch (const Exception & e) { - // Ignore exceptions thrown by TableFunctionFile and which indicate that there is no table + // Ignore exceptions thrown by TableFunctionFile, which indicate that there is no table + // see tests/02722_database_filesystem.sh for more details if (e.code() == ErrorCodes::BAD_ARGUMENTS) return nullptr; if (e.code() == ErrorCodes::DATABASE_ACCESS_DENIED) return nullptr; + if (e.code() == ErrorCodes::FILE_DOESNT_EXIST) + return nullptr; throw; } diff --git a/src/Databases/DatabaseFilesystem.h b/src/Databases/DatabaseFilesystem.h index 697511ac5b3..3d2ad695cc6 100644 --- a/src/Databases/DatabaseFilesystem.h +++ b/src/Databases/DatabaseFilesystem.h @@ -49,6 +49,8 @@ protected: void addTable(const std::string & table_name, StoragePtr table_storage) const; + bool checkTableFilePath(const std::string & table_path, ContextPtr context_, bool throw_on_error) const; + private: String path; diff --git a/tests/queries/0_stateless/02722_database_filesystem.reference b/tests/queries/0_stateless/02722_database_filesystem.reference index a583f1e2e3c..c65dda7933a 100644 --- a/tests/queries/0_stateless/02722_database_filesystem.reference +++ b/tests/queries/0_stateless/02722_database_filesystem.reference @@ -4,7 +4,10 @@ test1 4 4 4 -Test 2: check DatabaseFilesystem access rights on server +Test 2: check DatabaseFilesystem access rights and errors handling on server +OK +OK +OK OK OK OK diff --git a/tests/queries/0_stateless/02722_database_filesystem.sh b/tests/queries/0_stateless/02722_database_filesystem.sh index 0adeface438..80f97af693e 100755 --- a/tests/queries/0_stateless/02722_database_filesystem.sh +++ b/tests/queries/0_stateless/02722_database_filesystem.sh @@ -21,6 +21,7 @@ tmp_dir=${CLICKHOUSE_TEST_UNIQUE_NAME} mkdir $tmp_dir cp ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv ${tmp_dir}/tmp.csv cp ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv ${CLICKHOUSE_USER_FILES_PATH}/tmp/tmp.csv +cp ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv ${CLICKHOUSE_USER_FILES_PATH}/tmp.myext ################# echo "Test 1: create filesystem database and check implicit calls" @@ -35,24 +36,35 @@ ${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`tmp/tmp.csv\`;" ${CLICKHOUSE_LOCAL} -q "SELECT COUNT(*) FROM \"${tmp_dir}/tmp.csv\"" ################# -echo "Test 2: check DatabaseFilesystem access rights on server" -# Allows list files only inside user_files +echo "Test 2: check DatabaseFilesystem access rights and errors handling on server" +# DATABASE_ACCESS_DENIED: Allows list files only inside user_files ${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`../tmp.csv\`;" 2>&1| grep -F "Code: 291" > /dev/null && echo "OK" ${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`/tmp/tmp.csv\`;" 2>&1| grep -F "Code: 291" > /dev/null && echo "OK" - ${CLICKHOUSE_CLIENT} --multiline --multiquery --query """ USE test1; SELECT COUNT(*) FROM \"../${tmp_dir}/tmp.csv\"; """ 2>&1| grep -F "Code: 291" > /dev/null && echo "OK" ${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`../../../../../../tmp.csv\`;" 2>&1| grep -F "Code: 291" > /dev/null && echo "OK" + +# BAD_ARGUMENTS: path should be inside user_files ${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ DROP DATABASE IF EXISTS test2; CREATE DATABASE test2 ENGINE = Filesystem('/tmp'); -SELECT COUNT(*) FROM test2.\`tmp.csv\`; -""" 2>&1| grep -F "Code: 291" > /dev/null && echo "OK" +""" 2>&1| grep -F "Code: 36" > /dev/null && echo "OK" + +# BAD_ARGUMENTS: .../user_files/relative_unknown_dir does not exists +${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +DROP DATABASE IF EXISTS test2; +CREATE DATABASE test2 ENGINE = Filesystem('relative_unknown_dir'); +""" 2>&1| grep -F "Code: 36" > /dev/null && echo "OK" + +# FILE_DOESNT_EXIST: unknown file +${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`tmp2.csv\`;" 2>&1| grep -F "Code: 107" > /dev/null && echo "OK" + +# BAD_ARGUMENTS: Cannot determine the file format by it's extension +${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`tmp.myext\`;" 2>&1| grep -F "Code: 36" > /dev/null && echo "OK" # Clean ${CLICKHOUSE_CLIENT} --query "DROP DATABASE test1;" -${CLICKHOUSE_CLIENT} --query "DROP DATABASE test2;" rm -rd $tmp_dir rm -rd $CLICKHOUSE_USER_FILES_PATH From 6831eb20013aadeec451ac8fb94d894abbfccef9 Mon Sep 17 00:00:00 2001 From: Aleksei Golub Date: Sun, 30 Apr 2023 14:51:04 +0300 Subject: [PATCH 062/722] fix style --- src/Databases/DatabaseFilesystem.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/Databases/DatabaseFilesystem.cpp b/src/Databases/DatabaseFilesystem.cpp index 7f22b8a16a0..8de609f0ca2 100644 --- a/src/Databases/DatabaseFilesystem.cpp +++ b/src/Databases/DatabaseFilesystem.cpp @@ -1,6 +1,5 @@ #include -#include #include #include #include @@ -11,6 +10,7 @@ #include #include #include +#include #include @@ -67,7 +67,8 @@ void DatabaseFilesystem::addTable(const std::string & table_name, StoragePtr tab getEngineName()); } -bool DatabaseFilesystem::checkTableFilePath(const std::string & table_path, ContextPtr context_, bool throw_on_error) const { +bool DatabaseFilesystem::checkTableFilePath(const std::string & table_path, ContextPtr context_, bool throw_on_error) const +{ // If run in Local mode, no need for path checking. bool need_check_path = context_->getApplicationType() != Context::ApplicationType::LOCAL; std::string user_files_path = fs::canonical(fs::path(context_->getUserFilesPath())).string(); @@ -82,7 +83,8 @@ bool DatabaseFilesystem::checkTableFilePath(const std::string & table_path, Cont } // Check if the corresponding file exists - if (!fs::exists(table_path) || !fs::is_regular_file(table_path)) { + if (!fs::exists(table_path) || !fs::is_regular_file(table_path)) + { if (throw_on_error) throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "File does not exist ({})", table_path); else From 1846b76982828ed3223b25e2e5d6f5c8cee937eb Mon Sep 17 00:00:00 2001 From: Aleksei Golub Date: Sun, 30 Apr 2023 23:13:42 +0300 Subject: [PATCH 063/722] Added DatabaseS3 with test --- programs/local/LocalServer.cpp | 9 + src/Databases/DatabaseFactory.cpp | 29 ++- src/Databases/DatabaseS3.cpp | 199 ++++++++++++++++++ src/Databases/DatabaseS3.h | 63 ++++++ .../0_stateless/02724_database_s3.reference | 18 ++ .../queries/0_stateless/02724_database_s3.sh | 51 +++++ 6 files changed, 367 insertions(+), 2 deletions(-) create mode 100644 src/Databases/DatabaseS3.cpp create mode 100644 src/Databases/DatabaseS3.h create mode 100644 tests/queries/0_stateless/02724_database_s3.reference create mode 100755 tests/queries/0_stateless/02724_database_s3.sh diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 4939997b323..215a92e1944 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -51,6 +51,8 @@ #include #include +#include "config.h" + #if defined(FUZZING_MODE) #include #endif @@ -59,6 +61,10 @@ # include #endif +#if USE_AWS_S3 +#include +#endif + namespace fs = std::filesystem; @@ -155,6 +161,9 @@ static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, Co auto databaseCombiner = std::make_shared(name_, context_); databaseCombiner->registerNextDatabase(std::make_shared(name_, "", context_)); databaseCombiner->registerNextDatabase(std::make_shared(name_, context_)); +#if USE_AWS_S3 + databaseCombiner->registerNextDatabase(std::make_shared(name_, "", "", context_)); +#endif return databaseCombiner; } diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index 8a50c31efc8..b21435527a5 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -49,6 +49,10 @@ #include #endif +#if USE_AWS_S3 +#include +#endif + namespace fs = std::filesystem; namespace DB @@ -133,13 +137,13 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String static const std::unordered_set database_engines{"Ordinary", "Atomic", "Memory", "Dictionary", "Lazy", "Replicated", "MySQL", "MaterializeMySQL", "MaterializedMySQL", - "PostgreSQL", "MaterializedPostgreSQL", "SQLite", "Filesystem"}; + "PostgreSQL", "MaterializedPostgreSQL", "SQLite", "Filesystem", "S3"}; if (!database_engines.contains(engine_name)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Database engine name `{}` does not exist", engine_name); static const std::unordered_set engines_with_arguments{"MySQL", "MaterializeMySQL", "MaterializedMySQL", - "Lazy", "Replicated", "PostgreSQL", "MaterializedPostgreSQL", "SQLite", "Filesystem"}; + "Lazy", "Replicated", "PostgreSQL", "MaterializedPostgreSQL", "SQLite", "Filesystem", "S3"}; static const std::unordered_set engines_with_table_overrides{"MaterializeMySQL", "MaterializedMySQL", "MaterializedPostgreSQL"}; bool engine_may_have_arguments = engines_with_arguments.contains(engine_name); @@ -451,6 +455,27 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String return std::make_shared(database_name, init_path, context); } +#if USE_AWS_S3 + else if (engine_name == "S3") + { + const ASTFunction * engine = engine_define->engine; + + std::string key_id; + std::string secret_key; + + if (engine->arguments && !engine->arguments->children.empty()) + { + if (engine->arguments->children.size() != 2) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "S3 database requires 0 or 2 argument: [access_key_id, secret_access_key]"); + + const auto & arguments = engine->arguments->children; + key_id = safeGetLiteralValue(arguments[0], engine_name); + secret_key = safeGetLiteralValue(arguments[1], engine_name); + } + + return std::make_shared(database_name, key_id, secret_key, context); + } +#endif throw Exception(ErrorCodes::UNKNOWN_DATABASE_ENGINE, "Unknown database engine: {}", engine_name); } diff --git a/src/Databases/DatabaseS3.cpp b/src/Databases/DatabaseS3.cpp new file mode 100644 index 00000000000..d4412ba7973 --- /dev/null +++ b/src/Databases/DatabaseS3.cpp @@ -0,0 +1,199 @@ +#include "config.h" + +#if USE_AWS_S3 + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; + extern const int UNKNOWN_TABLE; + extern const int BAD_ARGUMENTS; + extern const int FILE_DOESNT_EXIST; + extern const int UNACCEPTABLE_URL; + extern const int S3_ERROR; +} + +DatabaseS3::DatabaseS3(const String & name_, const String & key_id, const String & secret_key, ContextPtr context_) + : IDatabase(name_) + , WithContext(context_->getGlobalContext()) + , access_key_id(key_id) + , secret_access_key(secret_key) + , log(&Poco::Logger::get("DatabaseS3(" + name_ + ")")) +{ +} + +void DatabaseS3::addTable(const std::string & table_name, StoragePtr table_storage) const +{ + std::lock_guard lock(mutex); + auto [_, inserted] = loaded_tables.emplace(table_name, table_storage); + if (!inserted) + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Table with name `{}` already exists in database `{}` (engine {})", + table_name, + getDatabaseName(), + getEngineName()); +} + +bool DatabaseS3::checkUrl(const std::string & url, ContextPtr context_, bool throw_on_error) const +{ + try + { + S3::URI uri(url); + context_->getGlobalContext()->getRemoteHostFilter().checkURL(uri.uri); + } + catch (...) + { + if (throw_on_error) + throw; + return false; + } + return true; +} + +bool DatabaseS3::isTableExist(const String & name, ContextPtr context_) const +{ + std::lock_guard lock(mutex); + if (loaded_tables.find(name) != loaded_tables.end()) + return true; + + return checkUrl(name, context_, false); +} + +StoragePtr DatabaseS3::getTableImpl(const String & url, ContextPtr context_) const +{ + // Check if the table exists in the loaded tables map + { + std::lock_guard lock(mutex); + auto it = loaded_tables.find(url); + if (it != loaded_tables.end()) + return it->second; + } + + checkUrl(url, context_, true); + + // call TableFunctionS3 + auto args = makeASTFunction( + "s3", + std::make_shared(url), + std::make_shared(access_key_id), + std::make_shared(secret_access_key)); + + auto table_function = TableFunctionFactory::instance().get(args, context_); + if (!table_function) + return nullptr; + + // TableFunctionS3 throws exceptions, if table cannot be created + auto table_storage = table_function->execute(args, context_, url); + if (table_storage) + addTable(url, table_storage); + + return table_storage; +} + +StoragePtr DatabaseS3::getTable(const String & name, ContextPtr context_) const +{ + // rethrow all exceptions from TableFunctionS3 to show correct error to user + if (auto storage = getTableImpl(name, context_)) + return storage; + throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist", backQuoteIfNeed(getDatabaseName()), backQuoteIfNeed(name)); +} + +StoragePtr DatabaseS3::tryGetTable(const String & name, ContextPtr context_) const +{ + try + { + return getTableImpl(name, context_); + } + catch (const Exception & e) + { + // Ignore exceptions thrown by TableFunctionS3, which indicate that there is no table + if (e.code() == ErrorCodes::BAD_ARGUMENTS) + return nullptr; + if (e.code() == ErrorCodes::S3_ERROR) + return nullptr; + if (e.code() == ErrorCodes::FILE_DOESNT_EXIST) + return nullptr; + if (e.code() == ErrorCodes::UNACCEPTABLE_URL) + return nullptr; + throw; + } + catch (const Poco::URISyntaxException &) + { + return nullptr; + } +} + +ASTPtr DatabaseS3::getCreateDatabaseQuery() const +{ + auto settings = getContext()->getSettingsRef(); + ParserCreateQuery parser; + + const String query = fmt::format("CREATE DATABASE {} ENGINE = S3('{}', '{}')", + backQuoteIfNeed(getDatabaseName()), + access_key_id, + secret_access_key); + ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth); + + if (const auto database_comment = getDatabaseComment(); !database_comment.empty()) + { + auto & ast_create_query = ast->as(); + ast_create_query.set(ast_create_query.comment, std::make_shared(database_comment)); + } + + return ast; +} + +void DatabaseS3::shutdown() +{ + Tables tables_snapshot; + { + std::lock_guard lock(mutex); + tables_snapshot = loaded_tables; + } + + for (const auto & kv : tables_snapshot) + { + auto table_id = kv.second->getStorageID(); + kv.second->flushAndShutdown(); + } + + std::lock_guard lock(mutex); + loaded_tables.clear(); +} + +/** + * Returns an empty vector because the database is read-only and no tables can be backed up + */ +std::vector> DatabaseS3::getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const +{ + return {}; +} + +/** + * + * Returns an empty iterator because the database does not have its own tables + * But only caches them for quick access + */ +DatabaseTablesIteratorPtr DatabaseS3::getTablesIterator(ContextPtr, const FilterByNameFunction &) const +{ + return std::make_unique(Tables{}, getDatabaseName()); +} + +} // DB + +#endif diff --git a/src/Databases/DatabaseS3.h b/src/Databases/DatabaseS3.h new file mode 100644 index 00000000000..d5269e57f5a --- /dev/null +++ b/src/Databases/DatabaseS3.h @@ -0,0 +1,63 @@ +#pragma once + +#include "config.h" + +#if USE_AWS_S3 + +#include +#include +#include +#include +#include + +namespace DB +{ + +class Context; + +/** + * DatabaseS3 provides access to data stored in S3 + * Uses TableFunctionS3 to implicitly load file when a user requests the table, and provides read-only access to the data in the file + * Tables are cached inside the database for quick access + */ +class DatabaseS3 : public IDatabase, protected WithContext +{ +public: + DatabaseS3(const String & name, const String & key_id, const String & secret_key, ContextPtr context); + + String getEngineName() const override { return "S3"; } + + bool isTableExist(const String & name, ContextPtr context) const override; + + StoragePtr getTable(const String & name, ContextPtr context) const override; + + StoragePtr tryGetTable(const String & name, ContextPtr context) const override; + + bool empty() const override { return true; } + + bool isReadOnly() const override { return true; } + + ASTPtr getCreateDatabaseQuery() const override; + + void shutdown() override; + + std::vector> getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const override; + DatabaseTablesIteratorPtr getTablesIterator(ContextPtr, const FilterByNameFunction &) const override; + +protected: + StoragePtr getTableImpl(const String & url, ContextPtr context) const; + + void addTable(const std::string & table_name, StoragePtr table_storage) const; + + bool checkUrl(const std::string & url, ContextPtr context_, bool throw_on_error) const; + +private: + const String access_key_id; + const String secret_access_key; + mutable Tables loaded_tables TSA_GUARDED_BY(mutex); + Poco::Logger * log; +}; + +} // DB + +#endif diff --git a/tests/queries/0_stateless/02724_database_s3.reference b/tests/queries/0_stateless/02724_database_s3.reference new file mode 100644 index 00000000000..8a985913ff9 --- /dev/null +++ b/tests/queries/0_stateless/02724_database_s3.reference @@ -0,0 +1,18 @@ +Test 1: select from s3 +1 2 3 +4 5 6 +7 8 9 +0 0 0 +test1 +10 11 12 +13 14 15 +16 17 18 +0 0 0 +20 21 22 +23 24 25 +26 27 28 +0 0 0 +Test 2: check exceptions +OK +OK +OK diff --git a/tests/queries/0_stateless/02724_database_s3.sh b/tests/queries/0_stateless/02724_database_s3.sh new file mode 100755 index 00000000000..4f9df402040 --- /dev/null +++ b/tests/queries/0_stateless/02724_database_s3.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash +# Tags: no-fasttest +# Tag no-fasttest: Depends on AWS + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +################# +echo "Test 1: select from s3" +${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +DROP DATABASE IF EXISTS test1; +CREATE DATABASE test1 ENGINE = S3; +USE test1; +SELECT * FROM \"http://localhost:11111/test/a.tsv\" +""" +${CLICKHOUSE_CLIENT} -q "SHOW DATABASES;" | grep test1 +${CLICKHOUSE_CLIENT} -q "DROP DATABASE test1;" + +${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +DROP DATABASE IF EXISTS test2; +CREATE DATABASE test2 ENGINE = S3('test', 'testtest'); +USE test2; +SELECT * FROM \"http://localhost:11111/test/b.tsv\" +""" +${CLICKHOUSE_CLIENT} -q "DROP DATABASE test2;" + +${CLICKHOUSE_LOCAL} --query "SELECT * FROM \"http://localhost:11111/test/c.tsv\"" + +################# +echo "Test 2: check exceptions" +${CLICKHOUSE_LOCAL} --query "SELECT * FROM \"http://localhost:11111/test/c.myext\"" 2>&1| grep -F "UNKNOWN_TABLE" > /dev/null && echo "OK" + +${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +DROP DATABASE IF EXISTS test3; +CREATE DATABASE test3 ENGINE = S3; +USE test3; +SELECT * FROM \"http://localhost:11111/test/a.myext\" +""" 2>&1| grep -F "BAD_ARGUMENTS" > /dev/null && echo "OK" + +${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +USE test3; +SELECT * FROM \"abacaba\" +""" 2>&1| grep -F "BAD_ARGUMENTS" > /dev/null && echo "OK" + +# Cleanup +${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +DROP DATABASE IF EXISTS test1; +DROP DATABASE IF EXISTS test2; +DROP DATABASE IF EXISTS test3; +""" From 3d1affbddb3de6c464f05459c1e9e5f34b6ff957 Mon Sep 17 00:00:00 2001 From: Aleksei Golub Date: Mon, 1 May 2023 12:17:10 +0300 Subject: [PATCH 064/722] retrigger checks From c1c69553741af4789170590f8a669d17f2dffbeb Mon Sep 17 00:00:00 2001 From: Alexander Gololobov <440544+davenger@users.noreply.github.com> Date: Wed, 26 Apr 2023 14:06:22 +0200 Subject: [PATCH 065/722] Deprecate delete-on-destroy.txt, do not create it any more --- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 17 +---------------- src/Storages/MergeTree/IMergeTreeDataPart.h | 7 +++++-- src/Storages/MergeTree/MergeTreeData.cpp | 7 +++---- 3 files changed, 9 insertions(+), 22 deletions(-) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 148cbf93948..d7f2f3ca7c7 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -953,24 +953,9 @@ void IMergeTreeDataPart::writeVersionMetadata(const VersionMetadata & version_, } } -void IMergeTreeDataPart::writeDeleteOnDestroyMarker() -{ - static constexpr auto marker_path = "delete-on-destroy.txt"; - - try - { - getDataPartStorage().createFile(marker_path); - } - catch (Poco::Exception & e) - { - LOG_ERROR(storage.log, "{} (while creating DeleteOnDestroy marker: {})", - e.what(), (fs::path(getDataPartStorage().getFullPath()) / marker_path).string()); - } -} - void IMergeTreeDataPart::removeDeleteOnDestroyMarker() { - getDataPartStorage().removeFileIfExists("delete-on-destroy.txt"); + getDataPartStorage().removeFileIfExists(DELETE_ON_DESTROY_MARKER_FILE_NAME_DEPRECATED); } void IMergeTreeDataPart::removeVersionMetadata() diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index a36634d2cf9..f7bcaa263d6 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -381,7 +381,8 @@ public: /// default will be stored in this file. static inline constexpr auto DEFAULT_COMPRESSION_CODEC_FILE_NAME = "default_compression_codec.txt"; - static inline constexpr auto DELETE_ON_DESTROY_MARKER_FILE_NAME = "delete-on-destroy.txt"; + /// "delete-on-destroy.txt" is deprecated. It is no longer being created, only is removed. + static inline constexpr auto DELETE_ON_DESTROY_MARKER_FILE_NAME_DEPRECATED = "delete-on-destroy.txt"; static inline constexpr auto UUID_FILE_NAME = "uuid.txt"; @@ -456,8 +457,10 @@ public: void writeChecksums(const MergeTreeDataPartChecksums & checksums_, const WriteSettings & settings); - void writeDeleteOnDestroyMarker(); + /// "delete-on-destroy.txt" is deprecated. It is no longer being created, only is removed. + /// TODO: remove this method after some time. void removeDeleteOnDestroyMarker(); + /// It may look like a stupid joke. but these two methods are absolutely unrelated. /// This one is about removing file with metadata about part version (for transactions) void removeVersionMetadata(); diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 83f5c0d359c..2def6fb08d3 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1214,7 +1214,7 @@ MergeTreeData::LoadPartResult MergeTreeData::loadDataPart( .build(); String part_path = fs::path(relative_data_path) / part_name; - String marker_path = fs::path(part_path) / IMergeTreeDataPart::DELETE_ON_DESTROY_MARKER_FILE_NAME; + String marker_path = fs::path(part_path) / IMergeTreeDataPart::DELETE_ON_DESTROY_MARKER_FILE_NAME_DEPRECATED; if (part_disk_ptr->exists(marker_path)) { @@ -4410,7 +4410,6 @@ void MergeTreeData::swapActivePart(MergeTreeData::DataPartPtr part_copy) /// All other locks are taken in StorageReplicatedMergeTree lockSharedData(*part_copy); - asMutableDeletingPart(original_active_part)->writeDeleteOnDestroyMarker(); return; } } @@ -7174,7 +7173,7 @@ std::pair MergeTreeData::cloneAn for (auto it = src_part->getDataPartStorage().iterate(); it->isValid(); it->next()) { if (!files_to_copy_instead_of_hardlinks.contains(it->name()) - && it->name() != IMergeTreeDataPart::DELETE_ON_DESTROY_MARKER_FILE_NAME + && it->name() != IMergeTreeDataPart::DELETE_ON_DESTROY_MARKER_FILE_NAME_DEPRECATED && it->name() != IMergeTreeDataPart::TXN_VERSION_METADATA_FILE_NAME) { hardlinked_files->hardlinks_from_source_part.insert(it->name()); @@ -7189,7 +7188,7 @@ std::pair MergeTreeData::cloneAn { auto file_name_with_projection_prefix = fs::path(projection_storage.getPartDirectory()) / it->name(); if (!files_to_copy_instead_of_hardlinks.contains(file_name_with_projection_prefix) - && it->name() != IMergeTreeDataPart::DELETE_ON_DESTROY_MARKER_FILE_NAME + && it->name() != IMergeTreeDataPart::DELETE_ON_DESTROY_MARKER_FILE_NAME_DEPRECATED && it->name() != IMergeTreeDataPart::TXN_VERSION_METADATA_FILE_NAME) { hardlinked_files->hardlinks_from_source_part.insert(file_name_with_projection_prefix); From 64d232f1aa584b3eba5abf9fe02bfa9b0535701c Mon Sep 17 00:00:00 2001 From: alekseygolub Date: Mon, 1 May 2023 18:00:26 +0000 Subject: [PATCH 066/722] Fix memory leak --- src/Interpreters/DatabaseCatalog.cpp | 2 +- tests/queries/0_stateless/02724_database_s3.reference | 6 +++--- tests/queries/0_stateless/02724_database_s3.sh | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index f9e74fadcbd..129323cd6b3 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -346,7 +346,7 @@ DatabaseAndTable DatabaseCatalog::getTableImpl( catch (const Exception & e) { if (exception) - exception->emplace(*e.clone()); + exception->emplace(e); } if (!table) diff --git a/tests/queries/0_stateless/02724_database_s3.reference b/tests/queries/0_stateless/02724_database_s3.reference index 8a985913ff9..b3800a27305 100644 --- a/tests/queries/0_stateless/02724_database_s3.reference +++ b/tests/queries/0_stateless/02724_database_s3.reference @@ -8,9 +8,9 @@ test1 13 14 15 16 17 18 0 0 0 -20 21 22 -23 24 25 -26 27 28 +10 11 12 +13 14 15 +16 17 18 0 0 0 Test 2: check exceptions OK diff --git a/tests/queries/0_stateless/02724_database_s3.sh b/tests/queries/0_stateless/02724_database_s3.sh index 4f9df402040..9b539407884 100755 --- a/tests/queries/0_stateless/02724_database_s3.sh +++ b/tests/queries/0_stateless/02724_database_s3.sh @@ -25,7 +25,7 @@ SELECT * FROM \"http://localhost:11111/test/b.tsv\" """ ${CLICKHOUSE_CLIENT} -q "DROP DATABASE test2;" -${CLICKHOUSE_LOCAL} --query "SELECT * FROM \"http://localhost:11111/test/c.tsv\"" +${CLICKHOUSE_LOCAL} --query "SELECT * FROM \"http://localhost:11111/test/b.tsv\"" ################# echo "Test 2: check exceptions" From 95522ad7a6486bdbe5861c4f65c3a0ffe9610372 Mon Sep 17 00:00:00 2001 From: alekseygolub Date: Mon, 1 May 2023 21:46:17 +0000 Subject: [PATCH 067/722] Added DatabaseHDFS --- programs/local/LocalServer.cpp | 7 + src/Databases/DatabaseFactory.cpp | 31 ++- src/Databases/DatabaseHDFS.cpp | 228 ++++++++++++++++++ src/Databases/DatabaseHDFS.h | 65 +++++ .../0_stateless/02725_database_hdfs.reference | 16 ++ .../0_stateless/02725_database_hdfs.sh | 66 +++++ 6 files changed, 411 insertions(+), 2 deletions(-) create mode 100644 src/Databases/DatabaseHDFS.cpp create mode 100644 src/Databases/DatabaseHDFS.h create mode 100644 tests/queries/0_stateless/02725_database_hdfs.reference create mode 100755 tests/queries/0_stateless/02725_database_hdfs.sh diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 215a92e1944..0cf94892171 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -65,6 +65,10 @@ #include #endif +#if USE_HDFS +#include +#endif + namespace fs = std::filesystem; @@ -163,6 +167,9 @@ static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, Co databaseCombiner->registerNextDatabase(std::make_shared(name_, context_)); #if USE_AWS_S3 databaseCombiner->registerNextDatabase(std::make_shared(name_, "", "", context_)); +#endif +#if USE_HDFS + databaseCombiner->registerNextDatabase(std::make_shared(name_, "", context_)); #endif return databaseCombiner; } diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index b21435527a5..5c4256c8a9f 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -53,6 +53,10 @@ #include #endif +#if USE_HDFS +#include +#endif + namespace fs = std::filesystem; namespace DB @@ -137,13 +141,13 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String static const std::unordered_set database_engines{"Ordinary", "Atomic", "Memory", "Dictionary", "Lazy", "Replicated", "MySQL", "MaterializeMySQL", "MaterializedMySQL", - "PostgreSQL", "MaterializedPostgreSQL", "SQLite", "Filesystem", "S3"}; + "PostgreSQL", "MaterializedPostgreSQL", "SQLite", "Filesystem", "S3", "HDFS"}; if (!database_engines.contains(engine_name)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Database engine name `{}` does not exist", engine_name); static const std::unordered_set engines_with_arguments{"MySQL", "MaterializeMySQL", "MaterializedMySQL", - "Lazy", "Replicated", "PostgreSQL", "MaterializedPostgreSQL", "SQLite", "Filesystem", "S3"}; + "Lazy", "Replicated", "PostgreSQL", "MaterializedPostgreSQL", "SQLite", "Filesystem", "S3", "HDFS"}; static const std::unordered_set engines_with_table_overrides{"MaterializeMySQL", "MaterializedMySQL", "MaterializedPostgreSQL"}; bool engine_may_have_arguments = engines_with_arguments.contains(engine_name); @@ -437,6 +441,7 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String return std::make_shared(context, engine_define, create.attach, database_path); } #endif + else if (engine_name == "Filesystem") { const ASTFunction * engine = engine_define->engine; @@ -455,6 +460,7 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String return std::make_shared(database_name, init_path, context); } + #if USE_AWS_S3 else if (engine_name == "S3") { @@ -477,6 +483,27 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String } #endif +#if USE_HDFS + else if (engine_name == "HDFS") + { + const ASTFunction * engine = engine_define->engine; + + /// If source_url is empty, then table name must contain full url + std::string source_url; + + if (engine->arguments && !engine->arguments->children.empty()) + { + if (engine->arguments->children.size() != 1) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "HDFS database requires at most 1 argument: source_url"); + + const auto & arguments = engine->arguments->children; + source_url = safeGetLiteralValue(arguments[0], engine_name); + } + + return std::make_shared(database_name, source_url, context); + } +#endif + throw Exception(ErrorCodes::UNKNOWN_DATABASE_ENGINE, "Unknown database engine: {}", engine_name); } diff --git a/src/Databases/DatabaseHDFS.cpp b/src/Databases/DatabaseHDFS.cpp new file mode 100644 index 00000000000..39c3f955bf5 --- /dev/null +++ b/src/Databases/DatabaseHDFS.cpp @@ -0,0 +1,228 @@ +#include "config.h" + +#if USE_HDFS + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +namespace fs = std::filesystem; + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; + extern const int UNKNOWN_TABLE; + extern const int BAD_ARGUMENTS; + extern const int FILE_DOESNT_EXIST; + extern const int UNACCEPTABLE_URL; + extern const int ACCESS_DENIED; + extern const int DATABASE_ACCESS_DENIED; + extern const int HDFS_ERROR; + extern const int CANNOT_EXTRACT_TABLE_STRUCTURE; +} + +static constexpr std::string_view HDFS_HOST_REGEXP = "^hdfs://[^/]*"; + + +DatabaseHDFS::DatabaseHDFS(const String & name_, const String & source_url, ContextPtr context_) + : IDatabase(name_) + , WithContext(context_->getGlobalContext()) + , source(source_url) + , log(&Poco::Logger::get("DatabaseHDFS(" + name_ + ")")) +{ + if (!source.empty()) + { + if (!re2::RE2::FullMatch(source, std::string(HDFS_HOST_REGEXP))) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad hdfs host: {}. It should have structure 'hdfs://:'", source); + context_->getGlobalContext()->getRemoteHostFilter().checkURL(Poco::URI(source)); + } +} + +void DatabaseHDFS::addTable(const std::string & table_name, StoragePtr table_storage) const +{ + std::lock_guard lock(mutex); + auto [_, inserted] = loaded_tables.emplace(table_name, table_storage); + if (!inserted) + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Table with name `{}` already exists in database `{}` (engine {})", + table_name, + getDatabaseName(), + getEngineName()); +} + +std::string DatabaseHDFS::getTablePath(const std::string & table_name) const +{ + if (table_name.starts_with("hdfs://")) + return table_name; + if (source.empty()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad hdfs url: {}. It should have structure 'hdfs://:/path'", table_name); + return (fs::path(source) / table_name).string(); +} + +bool DatabaseHDFS::checkUrl(const std::string & url, ContextPtr context_, bool throw_on_error) const +{ + try + { + checkHDFSURL(url); + context_->getGlobalContext()->getRemoteHostFilter().checkURL(Poco::URI(url)); + } + catch (...) + { + if (throw_on_error) + throw; + return false; + } + + return true; +} + +bool DatabaseHDFS::isTableExist(const String & name, ContextPtr context_) const +{ + std::lock_guard lock(mutex); + if (loaded_tables.find(name) != loaded_tables.end()) + return true; + + return checkUrl(name, context_, false); +} + +StoragePtr DatabaseHDFS::getTableImpl(const String & name, ContextPtr context_) const +{ + // Check if the table exists in the loaded tables map + { + std::lock_guard lock(mutex); + auto it = loaded_tables.find(name); + if (it != loaded_tables.end()) + return it->second; + } + + auto url = getTablePath(name); + + checkUrl(url, context_, true); + + // call TableFunctionHDFS + auto args = makeASTFunction("hdfs", std::make_shared(url)); + + auto table_function = TableFunctionFactory::instance().get(args, context_); + if (!table_function) + return nullptr; + + // TableFunctionHDFS throws exceptions, if table cannot be created + auto table_storage = table_function->execute(args, context_, name); + if (table_storage) + addTable(name, table_storage); + + return table_storage; +} + +StoragePtr DatabaseHDFS::getTable(const String & name, ContextPtr context_) const +{ + // rethrow all exceptions from TableFunctionHDFS to show correct error to user + if (auto storage = getTableImpl(name, context_)) + return storage; + throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist", backQuoteIfNeed(getDatabaseName()), backQuoteIfNeed(name)); +} + +StoragePtr DatabaseHDFS::tryGetTable(const String & name, ContextPtr context_) const +{ + try + { + return getTableImpl(name, context_); + } + catch (const Exception & e) + { + // Ignore exceptions thrown by TableFunctionHDFS, which indicate that there is no table + if (e.code() == ErrorCodes::BAD_ARGUMENTS) + return nullptr; + if (e.code() == ErrorCodes::ACCESS_DENIED) + return nullptr; + if (e.code() == ErrorCodes::DATABASE_ACCESS_DENIED) + return nullptr; + if (e.code() == ErrorCodes::FILE_DOESNT_EXIST) + return nullptr; + if (e.code() == ErrorCodes::UNACCEPTABLE_URL) + return nullptr; + if (e.code() == ErrorCodes::HDFS_ERROR) + return nullptr; + if (e.code() == ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE) + return nullptr; + throw; + } + catch (const Poco::URISyntaxException &) + { + return nullptr; + } +} + +ASTPtr DatabaseHDFS::getCreateDatabaseQuery() const +{ + auto settings = getContext()->getSettingsRef(); + ParserCreateQuery parser; + + const String query = fmt::format("CREATE DATABASE {} ENGINE = HDFS('{}')", backQuoteIfNeed(getDatabaseName()), source); + ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth); + + if (const auto database_comment = getDatabaseComment(); !database_comment.empty()) + { + auto & ast_create_query = ast->as(); + ast_create_query.set(ast_create_query.comment, std::make_shared(database_comment)); + } + + return ast; +} + +void DatabaseHDFS::shutdown() +{ + Tables tables_snapshot; + { + std::lock_guard lock(mutex); + tables_snapshot = loaded_tables; + } + + for (const auto & kv : tables_snapshot) + { + auto table_id = kv.second->getStorageID(); + kv.second->flushAndShutdown(); + } + + std::lock_guard lock(mutex); + loaded_tables.clear(); +} + +/** + * Returns an empty vector because the database is read-only and no tables can be backed up + */ +std::vector> DatabaseHDFS::getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const +{ + return {}; +} + +/** + * + * Returns an empty iterator because the database does not have its own tables + * But only caches them for quick access + */ +DatabaseTablesIteratorPtr DatabaseHDFS::getTablesIterator(ContextPtr, const FilterByNameFunction &) const +{ + return std::make_unique(Tables{}, getDatabaseName()); +} + +} // DB + +#endif diff --git a/src/Databases/DatabaseHDFS.h b/src/Databases/DatabaseHDFS.h new file mode 100644 index 00000000000..4e2b8578fcd --- /dev/null +++ b/src/Databases/DatabaseHDFS.h @@ -0,0 +1,65 @@ +#pragma once + +#include "config.h" + +#if USE_HDFS + +#include +#include +#include +#include +#include + +namespace DB +{ + +class Context; + +/** + * DatabaseHDFS allows to interact with files stored on the file system + * Uses TableFunctionHDFS to implicitly load file when a user requests the table, and provides read-only access to the data in the file + * Tables are cached inside the database for quick access + */ +class DatabaseHDFS : public IDatabase, protected WithContext +{ +public: + DatabaseHDFS(const String & name, const String & source_url, ContextPtr context); + + String getEngineName() const override { return "S3"; } + + bool isTableExist(const String & name, ContextPtr context) const override; + + StoragePtr getTable(const String & name, ContextPtr context) const override; + + StoragePtr tryGetTable(const String & name, ContextPtr context) const override; + + bool empty() const override { return true; } + + bool isReadOnly() const override { return true; } + + ASTPtr getCreateDatabaseQuery() const override; + + void shutdown() override; + + std::vector> getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const override; + DatabaseTablesIteratorPtr getTablesIterator(ContextPtr, const FilterByNameFunction &) const override; + +protected: + StoragePtr getTableImpl(const String & url, ContextPtr context) const; + + void addTable(const std::string & table_name, StoragePtr table_storage) const; + + bool checkUrl(const std::string & name, ContextPtr context_, bool throw_on_error) const; + + std::string getTablePath(const std::string & table_name) const; + +private: + const String source; + + mutable Tables loaded_tables TSA_GUARDED_BY(mutex); + Poco::Logger * log; +}; + +} // DB + +#endif diff --git a/tests/queries/0_stateless/02725_database_hdfs.reference b/tests/queries/0_stateless/02725_database_hdfs.reference new file mode 100644 index 00000000000..2a2e6c20aaa --- /dev/null +++ b/tests/queries/0_stateless/02725_database_hdfs.reference @@ -0,0 +1,16 @@ +Test 1: select from hdfs database +1 2 3 +test1 +1 2 3 +test2 +4 5 6 +Test 2: check exceptions +OK0 +OK1 +OK2 +OK3 +OK4 +OK5 +OK6 +OK7 +OK8 diff --git a/tests/queries/0_stateless/02725_database_hdfs.sh b/tests/queries/0_stateless/02725_database_hdfs.sh new file mode 100755 index 00000000000..ea16dd4024c --- /dev/null +++ b/tests/queries/0_stateless/02725_database_hdfs.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, use-hdfs + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +# Prepare data +${CLICKHOUSE_CLIENT} -q "insert into table function hdfs('hdfs://localhost:12222/test_02725_1.tsv', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32') select 1, 2, 3 settings hdfs_truncate_on_insert=1;" +${CLICKHOUSE_CLIENT} -q "insert into table function hdfs('hdfs://localhost:12222/test_02725_2.tsv', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32') select 4, 5, 6 settings hdfs_truncate_on_insert=1;" + +################# +echo "Test 1: select from hdfs database" + +# Database without specific host +${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +DROP DATABASE IF EXISTS test1; +CREATE DATABASE test1 ENGINE = HDFS; +USE test1; +SELECT * FROM \"hdfs://localhost:12222/test_02725_1.tsv\" +""" +${CLICKHOUSE_CLIENT} -q "SHOW DATABASES;" | grep test1 + +# Database with host +${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +DROP DATABASE IF EXISTS test2; +CREATE DATABASE test2 ENGINE = HDFS('hdfs://localhost:12222'); +USE test2; +SELECT * FROM \"test_02725_1.tsv\" +""" +${CLICKHOUSE_CLIENT} -q "SHOW DATABASES;" | grep test2 + +# Check implicit call in clickhouse-local +${CLICKHOUSE_LOCAL} --query "SELECT * FROM \"hdfs://localhost:12222/test_02725_2.tsv\"" + +################# +echo "Test 2: check exceptions" +${CLICKHOUSE_LOCAL} --query "SELECT * FROM \"hdfs://localhost:12222/file.myext\"" 2>&1| grep -F "UNKNOWN_TABLE" > /dev/null && echo "OK0" +${CLICKHOUSE_LOCAL} --query "SELECT * FROM \"hdfs://localhost:12222/test_02725_3.tsv\"" 2>&1| grep -F "UNKNOWN_TABLE" > /dev/null && echo "OK1" +${CLICKHOUSE_LOCAL} --query "SELECT * FROM \"hdfs://localhost:12222\"" 2>&1| grep -F "UNKNOWN_TABLE" > /dev/null && echo "OK2" + +${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +DROP DATABASE IF EXISTS test3; +CREATE DATABASE test3 ENGINE = HDFS('abacaba'); +""" 2>&1| grep -F "BAD_ARGUMENTS" > /dev/null && echo "OK3" + +${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +DROP DATABASE IF EXISTS test4; +CREATE DATABASE test4 ENGINE = HDFS; +USE test4; +SELECT * FROM \"abacaba/file.tsv\" +""" 2>&1| grep -F "BAD_ARGUMENTS" > /dev/null && echo "OK4" + +${CLICKHOUSE_CLIENT} -q "SELECT * FROM test4.\`http://localhost:11111/test/a.tsv\`" 2>&1| grep -F "BAD_ARGUMENTS" > /dev/null && echo "OK5" +${CLICKHOUSE_CLIENT} --query "SELECT * FROM test4.\`hdfs://localhost:12222/file.myext\`" 2>&1| grep -F "BAD_ARGUMENTS" > /dev/null && echo "OK6" +${CLICKHOUSE_CLIENT} --query "SELECT * FROM test4.\`hdfs://localhost:12222/test_02725_3.tsv\`" 2>&1| grep -F "CANNOT_EXTRACT_TABLE_STRUCTURE" > /dev/null && echo "OK7" +${CLICKHOUSE_CLIENT} --query "SELECT * FROM test4.\`hdfs://localhost:12222\`" 2>&1| grep -F "BAD_ARGUMENTS" > /dev/null && echo "OK8" + + +# Cleanup +${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +DROP DATABASE IF EXISTS test1; +DROP DATABASE IF EXISTS test2; +DROP DATABASE IF EXISTS test3; +DROP DATABASE IF EXISTS test4; +""" \ No newline at end of file From 82bb1e8bf2a3183179938629cc8f6aab3d876e87 Mon Sep 17 00:00:00 2001 From: alekseygolub Date: Tue, 2 May 2023 18:51:35 +0000 Subject: [PATCH 068/722] Fix build and try fix tests --- src/Databases/DatabaseHDFS.h | 4 ++-- tests/queries/0_stateless/02724_database_s3.sh | 2 +- tests/queries/0_stateless/02725_database_hdfs.sh | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Databases/DatabaseHDFS.h b/src/Databases/DatabaseHDFS.h index 4e2b8578fcd..9a506c5c8ac 100644 --- a/src/Databases/DatabaseHDFS.h +++ b/src/Databases/DatabaseHDFS.h @@ -45,11 +45,11 @@ public: DatabaseTablesIteratorPtr getTablesIterator(ContextPtr, const FilterByNameFunction &) const override; protected: - StoragePtr getTableImpl(const String & url, ContextPtr context) const; + StoragePtr getTableImpl(const String & name, ContextPtr context) const; void addTable(const std::string & table_name, StoragePtr table_storage) const; - bool checkUrl(const std::string & name, ContextPtr context_, bool throw_on_error) const; + bool checkUrl(const std::string & url, ContextPtr context_, bool throw_on_error) const; std::string getTablePath(const std::string & table_name) const; diff --git a/tests/queries/0_stateless/02724_database_s3.sh b/tests/queries/0_stateless/02724_database_s3.sh index 9b539407884..af858d140d7 100755 --- a/tests/queries/0_stateless/02724_database_s3.sh +++ b/tests/queries/0_stateless/02724_database_s3.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-fasttest +# Tags: no-fasttest, no-parallel # Tag no-fasttest: Depends on AWS CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) diff --git a/tests/queries/0_stateless/02725_database_hdfs.sh b/tests/queries/0_stateless/02725_database_hdfs.sh index ea16dd4024c..8d4e982504a 100755 --- a/tests/queries/0_stateless/02725_database_hdfs.sh +++ b/tests/queries/0_stateless/02725_database_hdfs.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-fasttest, use-hdfs +# Tags: no-fasttest, use-hdfs, no-parallel CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From 58cb6c7837872ae4eb46eed84d5aa0d75607d661 Mon Sep 17 00:00:00 2001 From: alekseygolub Date: Tue, 2 May 2023 19:57:36 +0000 Subject: [PATCH 069/722] S3, HDFS only for explicit creation --- programs/local/LocalServer.cpp | 14 -------------- .../0_stateless/02724_database_s3.reference | 5 ----- tests/queries/0_stateless/02724_database_s3.sh | 4 ---- .../0_stateless/02725_database_hdfs.reference | 4 ---- .../queries/0_stateless/02725_database_hdfs.sh | 18 ++++++------------ 5 files changed, 6 insertions(+), 39 deletions(-) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 0cf94892171..b413483686a 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -61,14 +61,6 @@ # include #endif -#if USE_AWS_S3 -#include -#endif - -#if USE_HDFS -#include -#endif - namespace fs = std::filesystem; @@ -165,12 +157,6 @@ static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, Co auto databaseCombiner = std::make_shared(name_, context_); databaseCombiner->registerNextDatabase(std::make_shared(name_, "", context_)); databaseCombiner->registerNextDatabase(std::make_shared(name_, context_)); -#if USE_AWS_S3 - databaseCombiner->registerNextDatabase(std::make_shared(name_, "", "", context_)); -#endif -#if USE_HDFS - databaseCombiner->registerNextDatabase(std::make_shared(name_, "", context_)); -#endif return databaseCombiner; } diff --git a/tests/queries/0_stateless/02724_database_s3.reference b/tests/queries/0_stateless/02724_database_s3.reference index b3800a27305..72ba0e240b1 100644 --- a/tests/queries/0_stateless/02724_database_s3.reference +++ b/tests/queries/0_stateless/02724_database_s3.reference @@ -8,11 +8,6 @@ test1 13 14 15 16 17 18 0 0 0 -10 11 12 -13 14 15 -16 17 18 -0 0 0 Test 2: check exceptions OK OK -OK diff --git a/tests/queries/0_stateless/02724_database_s3.sh b/tests/queries/0_stateless/02724_database_s3.sh index af858d140d7..2758580a355 100755 --- a/tests/queries/0_stateless/02724_database_s3.sh +++ b/tests/queries/0_stateless/02724_database_s3.sh @@ -25,12 +25,8 @@ SELECT * FROM \"http://localhost:11111/test/b.tsv\" """ ${CLICKHOUSE_CLIENT} -q "DROP DATABASE test2;" -${CLICKHOUSE_LOCAL} --query "SELECT * FROM \"http://localhost:11111/test/b.tsv\"" - ################# echo "Test 2: check exceptions" -${CLICKHOUSE_LOCAL} --query "SELECT * FROM \"http://localhost:11111/test/c.myext\"" 2>&1| grep -F "UNKNOWN_TABLE" > /dev/null && echo "OK" - ${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ DROP DATABASE IF EXISTS test3; CREATE DATABASE test3 ENGINE = S3; diff --git a/tests/queries/0_stateless/02725_database_hdfs.reference b/tests/queries/0_stateless/02725_database_hdfs.reference index 2a2e6c20aaa..ef8adae2bbc 100644 --- a/tests/queries/0_stateless/02725_database_hdfs.reference +++ b/tests/queries/0_stateless/02725_database_hdfs.reference @@ -3,7 +3,6 @@ Test 1: select from hdfs database test1 1 2 3 test2 -4 5 6 Test 2: check exceptions OK0 OK1 @@ -11,6 +10,3 @@ OK2 OK3 OK4 OK5 -OK6 -OK7 -OK8 diff --git a/tests/queries/0_stateless/02725_database_hdfs.sh b/tests/queries/0_stateless/02725_database_hdfs.sh index 8d4e982504a..a78f3e6bbdc 100755 --- a/tests/queries/0_stateless/02725_database_hdfs.sh +++ b/tests/queries/0_stateless/02725_database_hdfs.sh @@ -30,31 +30,25 @@ SELECT * FROM \"test_02725_1.tsv\" """ ${CLICKHOUSE_CLIENT} -q "SHOW DATABASES;" | grep test2 -# Check implicit call in clickhouse-local -${CLICKHOUSE_LOCAL} --query "SELECT * FROM \"hdfs://localhost:12222/test_02725_2.tsv\"" - ################# echo "Test 2: check exceptions" -${CLICKHOUSE_LOCAL} --query "SELECT * FROM \"hdfs://localhost:12222/file.myext\"" 2>&1| grep -F "UNKNOWN_TABLE" > /dev/null && echo "OK0" -${CLICKHOUSE_LOCAL} --query "SELECT * FROM \"hdfs://localhost:12222/test_02725_3.tsv\"" 2>&1| grep -F "UNKNOWN_TABLE" > /dev/null && echo "OK1" -${CLICKHOUSE_LOCAL} --query "SELECT * FROM \"hdfs://localhost:12222\"" 2>&1| grep -F "UNKNOWN_TABLE" > /dev/null && echo "OK2" ${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ DROP DATABASE IF EXISTS test3; CREATE DATABASE test3 ENGINE = HDFS('abacaba'); -""" 2>&1| grep -F "BAD_ARGUMENTS" > /dev/null && echo "OK3" +""" 2>&1| grep -F "BAD_ARGUMENTS" > /dev/null && echo "OK0" ${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ DROP DATABASE IF EXISTS test4; CREATE DATABASE test4 ENGINE = HDFS; USE test4; SELECT * FROM \"abacaba/file.tsv\" -""" 2>&1| grep -F "BAD_ARGUMENTS" > /dev/null && echo "OK4" +""" 2>&1| grep -F "BAD_ARGUMENTS" > /dev/null && echo "OK1" -${CLICKHOUSE_CLIENT} -q "SELECT * FROM test4.\`http://localhost:11111/test/a.tsv\`" 2>&1| grep -F "BAD_ARGUMENTS" > /dev/null && echo "OK5" -${CLICKHOUSE_CLIENT} --query "SELECT * FROM test4.\`hdfs://localhost:12222/file.myext\`" 2>&1| grep -F "BAD_ARGUMENTS" > /dev/null && echo "OK6" -${CLICKHOUSE_CLIENT} --query "SELECT * FROM test4.\`hdfs://localhost:12222/test_02725_3.tsv\`" 2>&1| grep -F "CANNOT_EXTRACT_TABLE_STRUCTURE" > /dev/null && echo "OK7" -${CLICKHOUSE_CLIENT} --query "SELECT * FROM test4.\`hdfs://localhost:12222\`" 2>&1| grep -F "BAD_ARGUMENTS" > /dev/null && echo "OK8" +${CLICKHOUSE_CLIENT} -q "SELECT * FROM test4.\`http://localhost:11111/test/a.tsv\`" 2>&1| grep -F "BAD_ARGUMENTS" > /dev/null && echo "OK2" +${CLICKHOUSE_CLIENT} --query "SELECT * FROM test4.\`hdfs://localhost:12222/file.myext\`" 2>&1| grep -F "BAD_ARGUMENTS" > /dev/null && echo "OK3" +${CLICKHOUSE_CLIENT} --query "SELECT * FROM test4.\`hdfs://localhost:12222/test_02725_3.tsv\`" 2>&1| grep -F "CANNOT_EXTRACT_TABLE_STRUCTURE" > /dev/null && echo "OK4" +${CLICKHOUSE_CLIENT} --query "SELECT * FROM test4.\`hdfs://localhost:12222\`" 2>&1| grep -F "BAD_ARGUMENTS" > /dev/null && echo "OK5" # Cleanup From 963d6be120da9c00c583cec2a051f9386de47b0e Mon Sep 17 00:00:00 2001 From: alekseygolub Date: Thu, 4 May 2023 16:44:08 +0000 Subject: [PATCH 070/722] Added configurations for DatabaseS3 --- src/Databases/DatabaseFactory.cpp | 13 +- src/Databases/DatabaseS3.cpp | 159 +++++++++++++++--- src/Databases/DatabaseS3.h | 20 ++- tests/config/config.d/named_collection.xml | 5 + .../0_stateless/02724_database_s3.reference | 12 ++ .../queries/0_stateless/02724_database_s3.sh | 28 ++- 6 files changed, 202 insertions(+), 35 deletions(-) diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index 5c4256c8a9f..41ca1de6a0e 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -466,20 +466,15 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String { const ASTFunction * engine = engine_define->engine; - std::string key_id; - std::string secret_key; + DatabaseS3::Configuration config; if (engine->arguments && !engine->arguments->children.empty()) { - if (engine->arguments->children.size() != 2) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "S3 database requires 0 or 2 argument: [access_key_id, secret_access_key]"); - - const auto & arguments = engine->arguments->children; - key_id = safeGetLiteralValue(arguments[0], engine_name); - secret_key = safeGetLiteralValue(arguments[1], engine_name); + ASTs & engine_args = engine->arguments->children; + config = DatabaseS3::parseArguments(engine_args, context); } - return std::make_shared(database_name, key_id, secret_key, context); + return std::make_shared(database_name, config, context); } #endif diff --git a/src/Databases/DatabaseS3.cpp b/src/Databases/DatabaseS3.cpp index d4412ba7973..f4aafc5d03a 100644 --- a/src/Databases/DatabaseS3.cpp +++ b/src/Databases/DatabaseS3.cpp @@ -4,19 +4,36 @@ #include -#include #include +#include +#include #include #include #include -#include #include +#include +#include #include +#include #include +#include +#include + +#include "DatabaseS3.h" + +namespace fs = std::filesystem; + namespace DB { +static const std::unordered_set optional_configuration_keys = { + "url", + "access_key_id", + "secret_access_key", + "no_sign_request" +}; + namespace ErrorCodes { extern const int LOGICAL_ERROR; @@ -25,13 +42,14 @@ namespace ErrorCodes extern const int FILE_DOESNT_EXIST; extern const int UNACCEPTABLE_URL; extern const int S3_ERROR; + + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; } -DatabaseS3::DatabaseS3(const String & name_, const String & key_id, const String & secret_key, ContextPtr context_) +DatabaseS3::DatabaseS3(const String & name_, const Configuration& config_, ContextPtr context_) : IDatabase(name_) , WithContext(context_->getGlobalContext()) - , access_key_id(key_id) - , secret_access_key(secret_key) + , config(config_) , log(&Poco::Logger::get("DatabaseS3(" + name_ + ")")) { } @@ -49,6 +67,20 @@ void DatabaseS3::addTable(const std::string & table_name, StoragePtr table_stora getEngineName()); } +std::string DatabaseS3::getFullUrl(const std::string & name) const +{ + try + { + S3::URI uri(name); + } + catch (...) + { + return (fs::path(config.url_prefix) / name).string(); + } + + return name; +} + bool DatabaseS3::checkUrl(const std::string & url, ContextPtr context_, bool throw_on_error) const { try @@ -71,36 +103,49 @@ bool DatabaseS3::isTableExist(const String & name, ContextPtr context_) const if (loaded_tables.find(name) != loaded_tables.end()) return true; - return checkUrl(name, context_, false); + return checkUrl(getFullUrl(name), context_, false); } -StoragePtr DatabaseS3::getTableImpl(const String & url, ContextPtr context_) const +StoragePtr DatabaseS3::getTableImpl(const String & name, ContextPtr context_) const { // Check if the table exists in the loaded tables map { std::lock_guard lock(mutex); - auto it = loaded_tables.find(url); + auto it = loaded_tables.find(name); if (it != loaded_tables.end()) return it->second; } + auto url = getFullUrl(name); + checkUrl(url, context_, true); // call TableFunctionS3 - auto args = makeASTFunction( - "s3", - std::make_shared(url), - std::make_shared(access_key_id), - std::make_shared(secret_access_key)); + auto function = std::make_shared(); - auto table_function = TableFunctionFactory::instance().get(args, context_); + function->name = "s3"; + function->arguments = std::make_shared(); + function->children.push_back(function->arguments); + + function->arguments->children.push_back(std::make_shared(url)); + if (config.no_sign_request) + { + function->arguments->children.push_back(std::make_shared("NOSIGN")); + } + else if (config.access_key_id.has_value() && config.secret_access_key.has_value()) + { + function->arguments->children.push_back(std::make_shared(config.access_key_id.value())); + function->arguments->children.push_back(std::make_shared(config.secret_access_key.value())); + } + + auto table_function = TableFunctionFactory::instance().get(function, context_); if (!table_function) return nullptr; // TableFunctionS3 throws exceptions, if table cannot be created - auto table_storage = table_function->execute(args, context_, url); + auto table_storage = table_function->execute(function, context_, name); if (table_storage) - addTable(url, table_storage); + addTable(name, table_storage); return table_storage; } @@ -143,10 +188,14 @@ ASTPtr DatabaseS3::getCreateDatabaseQuery() const auto settings = getContext()->getSettingsRef(); ParserCreateQuery parser; - const String query = fmt::format("CREATE DATABASE {} ENGINE = S3('{}', '{}')", - backQuoteIfNeed(getDatabaseName()), - access_key_id, - secret_access_key); + std::string creation_args; + creation_args += fmt::format("'{}'", config.url_prefix); + if (config.no_sign_request) + creation_args += ", 'NOSIGN'"; + else if (config.access_key_id.has_value() && config.secret_access_key.has_value()) + creation_args += fmt::format(", '{}', '{}'", config.access_key_id.value(), config.secret_access_key.value()); + + const String query = fmt::format("CREATE DATABASE {} ENGINE = S3({})", backQuoteIfNeed(getDatabaseName()), creation_args); ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth); if (const auto database_comment = getDatabaseComment(); !database_comment.empty()) @@ -176,6 +225,76 @@ void DatabaseS3::shutdown() loaded_tables.clear(); } +DatabaseS3::Configuration DatabaseS3::parseArguments(ASTs engine_args, ContextPtr context_) +{ + Configuration result; + + if (auto named_collection = tryGetNamedCollectionWithOverrides(engine_args, context_)) + { + auto & collection = *named_collection; + + validateNamedCollection(collection, {}, optional_configuration_keys); + + result.url_prefix = collection.getOrDefault("url", ""); + result.no_sign_request = collection.getOrDefault("no_sign_request", false); + + auto key_id = collection.getOrDefault("access_key_id", ""); + auto secret_key = collection.getOrDefault("secret_access_key", ""); + + if (!key_id.empty()) + result.access_key_id = key_id; + + if (!secret_key.empty()) + result.secret_access_key = secret_key; + } + else + { + auto supported_signature = + " - S3()\n" + " - S3('url')\n" + " - S3('url', 'NOSIGN')\n" + " - S3('url', 'access_key_id', 'secret_access_key')\n"; + const auto error_message = + fmt::format("Engine DatabaseS3 must have the following arguments signature\n{}", supported_signature); + + for (auto & arg : engine_args) + arg = evaluateConstantExpressionOrIdentifierAsLiteral(arg, context_); + + if (engine_args.size() > 3) + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, error_message.c_str()); + + if (engine_args.empty()) + return result; + + result.url_prefix = checkAndGetLiteralArgument(engine_args[0], "url"); + + // url, NOSIGN + if (engine_args.size() == 2) + { + auto second_arg = checkAndGetLiteralArgument(engine_args[1], "NOSIGN"); + if (boost::iequals(second_arg, "NOSIGN")) + result.no_sign_request = true; + else + throw Exception(ErrorCodes::BAD_ARGUMENTS, error_message.c_str()); + } + + // url, access_key_id, secret_access_key + if (engine_args.size() == 3) + { + auto key_id = checkAndGetLiteralArgument(engine_args[1], "access_key_id"); + auto secret_key = checkAndGetLiteralArgument(engine_args[2], "secret_access_key"); + + if (key_id.empty() || secret_key.empty() || boost::iequals(key_id, "NOSIGN")) + throw Exception(ErrorCodes::BAD_ARGUMENTS, error_message.c_str()); + + result.access_key_id = key_id; + result.secret_access_key = secret_key; + } + } + + return result; +} + /** * Returns an empty vector because the database is read-only and no tables can be backed up */ diff --git a/src/Databases/DatabaseS3.h b/src/Databases/DatabaseS3.h index d5269e57f5a..65f80dca2ba 100644 --- a/src/Databases/DatabaseS3.h +++ b/src/Databases/DatabaseS3.h @@ -23,7 +23,17 @@ class Context; class DatabaseS3 : public IDatabase, protected WithContext { public: - DatabaseS3(const String & name, const String & key_id, const String & secret_key, ContextPtr context); + struct Configuration + { + std::string url_prefix; + + bool no_sign_request = false; + + std::optional access_key_id; + std::optional secret_access_key; + }; + + DatabaseS3(const String & name, const Configuration& config, ContextPtr context); String getEngineName() const override { return "S3"; } @@ -44,6 +54,8 @@ public: std::vector> getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const override; DatabaseTablesIteratorPtr getTablesIterator(ContextPtr, const FilterByNameFunction &) const override; + static Configuration parseArguments(ASTs engine_args, ContextPtr context); + protected: StoragePtr getTableImpl(const String & url, ContextPtr context) const; @@ -51,9 +63,11 @@ protected: bool checkUrl(const std::string & url, ContextPtr context_, bool throw_on_error) const; + std::string getFullUrl(const std::string & name) const; + private: - const String access_key_id; - const String secret_access_key; + const Configuration config; + mutable Tables loaded_tables TSA_GUARDED_BY(mutex); Poco::Logger * log; }; diff --git a/tests/config/config.d/named_collection.xml b/tests/config/config.d/named_collection.xml index 2e49c0c596f..5b716a7b8da 100644 --- a/tests/config/config.d/named_collection.xml +++ b/tests/config/config.d/named_collection.xml @@ -32,5 +32,10 @@ testtest auto + + http://localhost:11111/test/ + test + testtest + diff --git a/tests/queries/0_stateless/02724_database_s3.reference b/tests/queries/0_stateless/02724_database_s3.reference index 72ba0e240b1..811e38b7f2b 100644 --- a/tests/queries/0_stateless/02724_database_s3.reference +++ b/tests/queries/0_stateless/02724_database_s3.reference @@ -8,6 +8,18 @@ test1 13 14 15 16 17 18 0 0 0 +10 11 12 +13 14 15 +16 17 18 +0 0 0 +1 2 3 +4 5 6 +7 8 9 +0 0 0 +10 11 12 +13 14 15 +16 17 18 +0 0 0 Test 2: check exceptions OK OK diff --git a/tests/queries/0_stateless/02724_database_s3.sh b/tests/queries/0_stateless/02724_database_s3.sh index 2758580a355..ac1b97beecf 100755 --- a/tests/queries/0_stateless/02724_database_s3.sh +++ b/tests/queries/0_stateless/02724_database_s3.sh @@ -15,15 +15,35 @@ USE test1; SELECT * FROM \"http://localhost:11111/test/a.tsv\" """ ${CLICKHOUSE_CLIENT} -q "SHOW DATABASES;" | grep test1 -${CLICKHOUSE_CLIENT} -q "DROP DATABASE test1;" +# check credentials with absolute path ${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ DROP DATABASE IF EXISTS test2; -CREATE DATABASE test2 ENGINE = S3('test', 'testtest'); +CREATE DATABASE test2 ENGINE = S3('', 'test', 'testtest'); USE test2; SELECT * FROM \"http://localhost:11111/test/b.tsv\" """ -${CLICKHOUSE_CLIENT} -q "DROP DATABASE test2;" + +# check credentials with relative path +${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +DROP DATABASE IF EXISTS test4; +CREATE DATABASE test4 ENGINE = S3('http://localhost:11111/test', 'test', 'testtest'); +USE test4; +SELECT * FROM \"b.tsv\" +""" + +# check that database url_prefix is ignored if pass full url as table name +${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +USE test4; +SELECT * FROM \"http://localhost:11111/test/a.tsv\" +""" + +# Check named collection loading +${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +DROP DATABASE IF EXISTS test5; +CREATE DATABASE test5 ENGINE = S3(s3_conn_db); +SELECT * FROM test5.\`b.tsv\` +""" ################# echo "Test 2: check exceptions" @@ -44,4 +64,6 @@ ${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ DROP DATABASE IF EXISTS test1; DROP DATABASE IF EXISTS test2; DROP DATABASE IF EXISTS test3; +DROP DATABASE IF EXISTS test4; +DROP DATABASE IF EXISTS test5; """ From f083372c0cd5618b07873e886078ac86aecf54cd Mon Sep 17 00:00:00 2001 From: alekseygolub Date: Thu, 4 May 2023 16:46:51 +0000 Subject: [PATCH 071/722] remove extra include --- src/Databases/DatabaseS3.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/Databases/DatabaseS3.cpp b/src/Databases/DatabaseS3.cpp index f4aafc5d03a..5529f582572 100644 --- a/src/Databases/DatabaseS3.cpp +++ b/src/Databases/DatabaseS3.cpp @@ -20,8 +20,6 @@ #include #include -#include "DatabaseS3.h" - namespace fs = std::filesystem; namespace DB From 814a3f04cd421c991e6976fa314f13e96e96069f Mon Sep 17 00:00:00 2001 From: alekseygolub Date: Thu, 4 May 2023 17:12:35 +0000 Subject: [PATCH 072/722] fix style --- src/Databases/DatabaseS3.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Databases/DatabaseS3.cpp b/src/Databases/DatabaseS3.cpp index 5529f582572..bc318ecd9bf 100644 --- a/src/Databases/DatabaseS3.cpp +++ b/src/Databases/DatabaseS3.cpp @@ -253,7 +253,7 @@ DatabaseS3::Configuration DatabaseS3::parseArguments(ASTs engine_args, ContextPt " - S3('url', 'NOSIGN')\n" " - S3('url', 'access_key_id', 'secret_access_key')\n"; const auto error_message = - fmt::format("Engine DatabaseS3 must have the following arguments signature\n{}", supported_signature); + fmt::format("Engine DatabaseS3 must have the following arguments signature\n{}", supported_signature); for (auto & arg : engine_args) arg = evaluateConstantExpressionOrIdentifierAsLiteral(arg, context_); From 367583b96ea53a5203163a90e49581b2bdf225de Mon Sep 17 00:00:00 2001 From: alekseygolub Date: Thu, 4 May 2023 17:38:41 +0000 Subject: [PATCH 073/722] retriger checks From a218f010e85c0bc995df17f5416bee4bcd8e6148 Mon Sep 17 00:00:00 2001 From: zvonand Date: Fri, 5 May 2023 14:12:14 +0200 Subject: [PATCH 074/722] First portion of speedup Do not generate DateLUT for each serialized and deserialized date --- src/Common/LocalDate.h | 8 ++++---- .../Serializations/SerializationDate.cpp | 11 +++++++---- src/DataTypes/Serializations/SerializationDate.h | 5 ++++- src/Functions/FunctionsConversion.h | 12 ++++++------ src/IO/ReadHelpers.h | 16 ++++++++-------- src/IO/WriteHelpers.h | 8 ++++---- 6 files changed, 33 insertions(+), 27 deletions(-) diff --git a/src/Common/LocalDate.h b/src/Common/LocalDate.h index dc36f92bebf..4a383129ae4 100644 --- a/src/Common/LocalDate.h +++ b/src/Common/LocalDate.h @@ -61,17 +61,17 @@ public: init(time); } - LocalDate(DayNum day_num) /// NOLINT + LocalDate(DayNum day_num, const DateLUTImpl & time_zone = DateLUT::instance()) /// NOLINT { - const auto & values = DateLUT::instance().getValues(day_num); + const auto & values = time_zone.getValues(day_num); m_year = values.year; m_month = values.month; m_day = values.day_of_month; } - explicit LocalDate(ExtendedDayNum day_num) + explicit LocalDate(ExtendedDayNum day_num, const DateLUTImpl & time_zone = DateLUT::instance()) { - const auto & values = DateLUT::instance().getValues(day_num); + const auto & values = time_zone.getValues(day_num); m_year = values.year; m_month = values.month; m_day = values.day_of_month; diff --git a/src/DataTypes/Serializations/SerializationDate.cpp b/src/DataTypes/Serializations/SerializationDate.cpp index 678817017e0..bc2057d549e 100644 --- a/src/DataTypes/Serializations/SerializationDate.cpp +++ b/src/DataTypes/Serializations/SerializationDate.cpp @@ -13,7 +13,7 @@ namespace DB void SerializationDate::serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const { - writeDateText(DayNum(assert_cast(column).getData()[row_num]), ostr); + writeDateText(DayNum(assert_cast(column).getData()[row_num]), ostr, time_zone); } void SerializationDate::deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const @@ -26,7 +26,7 @@ void SerializationDate::deserializeWholeText(IColumn & column, ReadBuffer & istr void SerializationDate::deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings &) const { DayNum x; - readDateText(x, istr); + readDateText(x, istr, time_zone); assert_cast(column).getData().push_back(x); } @@ -46,7 +46,7 @@ void SerializationDate::deserializeTextQuoted(IColumn & column, ReadBuffer & ist { DayNum x; assertChar('\'', istr); - readDateText(x, istr); + readDateText(x, istr, time_zone); assertChar('\'', istr); assert_cast(column).getData().push_back(x); /// It's important to do this at the end - for exception safety. } @@ -62,7 +62,7 @@ void SerializationDate::deserializeTextJSON(IColumn & column, ReadBuffer & istr, { DayNum x; assertChar('"', istr); - readDateText(x, istr); + readDateText(x, istr, time_zone); assertChar('"', istr); assert_cast(column).getData().push_back(x); } @@ -80,5 +80,8 @@ void SerializationDate::deserializeTextCSV(IColumn & column, ReadBuffer & istr, readCSV(value, istr); assert_cast(column).getData().push_back(value); } +SerializationDate::SerializationDate(const TimezoneMixin & time_zone_) : TimezoneMixin(time_zone_) +{ +} } diff --git a/src/DataTypes/Serializations/SerializationDate.h b/src/DataTypes/Serializations/SerializationDate.h index 099d7444c3d..c4e57470673 100644 --- a/src/DataTypes/Serializations/SerializationDate.h +++ b/src/DataTypes/Serializations/SerializationDate.h @@ -1,13 +1,16 @@ #pragma once #include +#include namespace DB { -class SerializationDate final : public SerializationNumber +class SerializationDate final : public SerializationNumber, public TimezoneMixin { public: + explicit SerializationDate(const TimezoneMixin & time_zone_ = TimezoneMixin()); + void serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override; void deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings &) const override; void serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override; diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index 28002d34acc..645504df829 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -718,9 +718,9 @@ template <> struct FormatImpl { template - static ReturnType execute(const DataTypeDate::FieldType x, WriteBuffer & wb, const DataTypeDate *, const DateLUTImpl *) + static ReturnType execute(const DataTypeDate::FieldType x, WriteBuffer & wb, const DataTypeDate *, const DateLUTImpl * time_zone) { - writeDateText(DayNum(x), wb); + writeDateText(DayNum(x), wb, *time_zone); return ReturnType(true); } }; @@ -729,9 +729,9 @@ template <> struct FormatImpl { template - static ReturnType execute(const DataTypeDate32::FieldType x, WriteBuffer & wb, const DataTypeDate32 *, const DateLUTImpl *) + static ReturnType execute(const DataTypeDate32::FieldType x, WriteBuffer & wb, const DataTypeDate32 *, const DateLUTImpl * time_zone) { - writeDateText(ExtendedDayNum(x), wb); + writeDateText(ExtendedDayNum(x), wb, *time_zone); return ReturnType(true); } }; @@ -830,8 +830,8 @@ struct ConvertImpl(*col_with_type_and_name.type); const DateLUTImpl * time_zone = nullptr; - /// For argument of DateTime type, second argument with time zone could be specified. - if constexpr (std::is_same_v || std::is_same_v) + /// For argument of Date or DateTime type, second argument with time zone could be specified. + if constexpr (std::is_same_v || std::is_same_v || std::is_same_v) { auto non_null_args = createBlockWithNestedColumns(arguments); time_zone = &extractTimeZoneFromFunctionArguments(non_null_args, 1, 0); diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index 0b0a0640cb1..0127809c832 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -723,7 +723,7 @@ inline void convertToDayNum(DayNum & date, ExtendedDayNum & from) } template -inline ReturnType readDateTextImpl(DayNum & date, ReadBuffer & buf) +inline ReturnType readDateTextImpl(DayNum & date, ReadBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance()) { static constexpr bool throw_exception = std::is_same_v; @@ -734,13 +734,13 @@ inline ReturnType readDateTextImpl(DayNum & date, ReadBuffer & buf) else if (!readDateTextImpl(local_date, buf)) return false; - ExtendedDayNum ret = DateLUT::instance().makeDayNum(local_date.year(), local_date.month(), local_date.day()); + ExtendedDayNum ret = date_lut.makeDayNum(local_date.year(), local_date.month(), local_date.day()); convertToDayNum(date,ret); return ReturnType(true); } template -inline ReturnType readDateTextImpl(ExtendedDayNum & date, ReadBuffer & buf) +inline ReturnType readDateTextImpl(ExtendedDayNum & date, ReadBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance()) { static constexpr bool throw_exception = std::is_same_v; @@ -752,7 +752,7 @@ inline ReturnType readDateTextImpl(ExtendedDayNum & date, ReadBuffer & buf) return false; /// When the parameter is out of rule or out of range, Date32 uses 1925-01-01 as the default value (-DateLUT::instance().getDayNumOffsetEpoch(), -16436) and Date uses 1970-01-01. - date = DateLUT::instance().makeDayNum(local_date.year(), local_date.month(), local_date.day(), -static_cast(DateLUT::instance().getDayNumOffsetEpoch())); + date = date_lut.makeDayNum(local_date.year(), local_date.month(), local_date.day(), -static_cast(date_lut.getDayNumOffsetEpoch())); return ReturnType(true); } @@ -762,14 +762,14 @@ inline void readDateText(LocalDate & date, ReadBuffer & buf) readDateTextImpl(date, buf); } -inline void readDateText(DayNum & date, ReadBuffer & buf) +inline void readDateText(DayNum & date, ReadBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance()) { - readDateTextImpl(date, buf); + readDateTextImpl(date, buf, date_lut); } -inline void readDateText(ExtendedDayNum & date, ReadBuffer & buf) +inline void readDateText(ExtendedDayNum & date, ReadBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance()) { - readDateTextImpl(date, buf); + readDateTextImpl(date, buf, date_lut); } inline bool tryReadDateText(LocalDate & date, ReadBuffer & buf) diff --git a/src/IO/WriteHelpers.h b/src/IO/WriteHelpers.h index e08e451e0a7..9ee11d3cc9f 100644 --- a/src/IO/WriteHelpers.h +++ b/src/IO/WriteHelpers.h @@ -694,15 +694,15 @@ inline void writeDateText(const LocalDate & date, WriteBuffer & buf) } template -inline void writeDateText(DayNum date, WriteBuffer & buf) +inline void writeDateText(DayNum date, WriteBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) { - writeDateText(LocalDate(date), buf); + writeDateText(LocalDate(date, time_zone), buf); } template -inline void writeDateText(ExtendedDayNum date, WriteBuffer & buf) +inline void writeDateText(ExtendedDayNum date, WriteBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) { - writeDateText(LocalDate(date), buf); + writeDateText(LocalDate(date, time_zone), buf); } /// In the format YYYY-MM-DD HH:MM:SS From 2b08801ae9bc1ca456247282ebfe060a9df0bce4 Mon Sep 17 00:00:00 2001 From: zvonand Date: Fri, 5 May 2023 15:50:19 +0200 Subject: [PATCH 075/722] add timezone param --- src/IO/WriteHelpers.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/IO/WriteHelpers.h b/src/IO/WriteHelpers.h index 9ee11d3cc9f..8a7cd72f79a 100644 --- a/src/IO/WriteHelpers.h +++ b/src/IO/WriteHelpers.h @@ -883,7 +883,7 @@ inline void writeText(is_enum auto x, WriteBuffer & buf) { writeText(magic_enum: inline void writeText(std::string_view x, WriteBuffer & buf) { writeString(x.data(), x.size(), buf); } -inline void writeText(const DayNum & x, WriteBuffer & buf) { writeDateText(LocalDate(x), buf); } +inline void writeText(const DayNum & x, WriteBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) { writeDateText(LocalDate(x, time_zone), buf); } inline void writeText(const LocalDate & x, WriteBuffer & buf) { writeDateText(x, buf); } inline void writeText(const LocalDateTime & x, WriteBuffer & buf) { writeDateTimeText(x, buf); } inline void writeText(const UUID & x, WriteBuffer & buf) { writeUUIDText(x, buf); } From e1151f150f23e0bbcb52ae0a1a3ef01a0ecb97da Mon Sep 17 00:00:00 2001 From: alekseygolub Date: Fri, 5 May 2023 18:37:25 +0000 Subject: [PATCH 076/722] Fix clang build errors --- src/Databases/DatabaseS3.cpp | 2 +- src/Databases/DatabaseS3.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Databases/DatabaseS3.cpp b/src/Databases/DatabaseS3.cpp index bc318ecd9bf..96616426475 100644 --- a/src/Databases/DatabaseS3.cpp +++ b/src/Databases/DatabaseS3.cpp @@ -247,7 +247,7 @@ DatabaseS3::Configuration DatabaseS3::parseArguments(ASTs engine_args, ContextPt } else { - auto supported_signature = + const std::string supported_signature = " - S3()\n" " - S3('url')\n" " - S3('url', 'NOSIGN')\n" diff --git a/src/Databases/DatabaseS3.h b/src/Databases/DatabaseS3.h index 65f80dca2ba..4e6910566df 100644 --- a/src/Databases/DatabaseS3.h +++ b/src/Databases/DatabaseS3.h @@ -57,7 +57,7 @@ public: static Configuration parseArguments(ASTs engine_args, ContextPtr context); protected: - StoragePtr getTableImpl(const String & url, ContextPtr context) const; + StoragePtr getTableImpl(const String & name, ContextPtr context) const; void addTable(const std::string & table_name, StoragePtr table_storage) const; From 18d1a4356d2ba1e7502d0ba207e6ac8f53fc3e02 Mon Sep 17 00:00:00 2001 From: tpanetti Date: Fri, 5 May 2023 12:19:35 -0700 Subject: [PATCH 077/722] Change SHOW COLUMNS query to display MySQL types in MySQL Compatibility mode This updates the SHOW COLUMN SQL query to display MySQL types when this query is issued by a client connected via MySQL Compatibility port --- .../InterpreterShowColumnsQuery.cpp | 78 ++++++- .../InterpreterShowColumnsQuery.h | 1 + ...show_columns_mysql_compatibility.reference | 213 ++++++++++++++++++ .../02726_show_columns_mysql_compatibility.sh | 115 ++++++++++ 4 files changed, 405 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/02726_show_columns_mysql_compatibility.reference create mode 100755 tests/queries/0_stateless/02726_show_columns_mysql_compatibility.sh diff --git a/src/Interpreters/InterpreterShowColumnsQuery.cpp b/src/Interpreters/InterpreterShowColumnsQuery.cpp index 4474be21d8b..17ccafdd1ce 100644 --- a/src/Interpreters/InterpreterShowColumnsQuery.cpp +++ b/src/Interpreters/InterpreterShowColumnsQuery.cpp @@ -26,11 +26,17 @@ String InterpreterShowColumnsQuery::getRewrittenQuery() WriteBufferFromOwnString rewritten_query; - rewritten_query << "SELECT name AS field, type AS type, startsWith(type, 'Nullable') AS null, trim(concatWithSeparator(' ', if(is_in_primary_key, 'PRI', ''), if (is_in_sorting_key, 'SOR', ''))) AS key, if(default_kind IN ('ALIAS', 'DEFAULT', 'MATERIALIZED'), default_expression, NULL) AS default, '' AS extra "; - // TODO Interpret query.extended. It is supposed to show internal/virtual columns. Need to fetch virtual column names, see // IStorage::getVirtuals(). We can't easily do that via SQL. + // If connected via MySQL Compatibility mode, convert ClickHouse types to MySQL + if (getContext()->getClientInfo().interface == DB::ClientInfo::Interface::MYSQL) + { + rewritten_query << getMySQLQuery(); + } + else { + rewritten_query << "SELECT name AS field, type AS type, startsWith(type, 'Nullable') AS null, trim(concatWithSeparator(' ', if(is_in_primary_key, 'PRI', ''), if (is_in_sorting_key, 'SOR', ''))) AS key, if(default_kind IN ('ALIAS', 'DEFAULT', 'MATERIALIZED'), default_expression, NULL) AS default, '' AS extra "; + } if (query.full) { /// "Full" mode is mostly for MySQL compat @@ -93,6 +99,74 @@ String InterpreterShowColumnsQuery::getRewrittenQuery() } +String InterpreterShowColumnsQuery::getMySQLQuery() +{ + WriteBufferFromOwnString mysql_specific_query; + + mysql_specific_query << "SELECT name AS field, " + << "CASE " + << " WHEN startsWith(type, 'Nullable') THEN " + << " CASE " + << " WHEN substring(type, 10, length(type) - 10) IN ('UInt8', 'Int8') THEN 'tinyint' " + << " WHEN substring(type, 10, length(type) - 10) IN ('UInt16', 'Int16') THEN 'smallint' " + << " WHEN substring(type, 10, length(type) - 10) IN ('UInt32', 'Int32') THEN 'int' " + << " WHEN substring(type, 10, length(type) - 10) IN ('UInt64', 'Int64', 'UInt128', 'Int128', 'UInt256', 'Int256') THEN 'bigint' " + << " WHEN substring(type, 10, length(type) - 10) = 'Float32' THEN 'float' " + << " WHEN substring(type, 10, length(type) - 10) = 'Float64' THEN 'double' " + << " WHEN substring(type, 10, length(type) - 10) LIKE 'Decimal%' THEN 'decimal' " + << " WHEN substring(type, 10, length(type) - 10) = 'Boolean' THEN 'tinyint' " + << " WHEN substring(type, 10, length(type) - 10) = 'String' THEN 'text' " + << " WHEN substring(type, 10, length(type) - 10) LIKE 'FixedString%' THEN 'text' " + << " WHEN substring(type, 10, length(type) - 10) LIKE 'Date%' THEN 'date' " + << " WHEN substring(type, 10, length(type) - 10) LIKE 'DateTime%' THEN 'datetime' " + << " WHEN substring(type, 10, length(type) - 10) = 'JSON' THEN 'json' " + << " WHEN substring(type, 10, length(type) - 10) = 'UUID' THEN 'binary' " + << " WHEN substring(type, 10, length(type) - 10) LIKE 'Enum%' THEN 'enum' " + << " WHEN substring(type, 10, length(type) - 10) LIKE 'LowCardinality%' THEN 'text' " + << " WHEN substring(type, 10, length(type) - 10) LIKE 'Array%' THEN 'json' " + << " WHEN substring(type, 10, length(type) - 10) LIKE 'Map%' THEN 'json' " + << " WHEN substring(type, 10, length(type) - 10) IN ('SimpleAggregateFunction', 'AggregateFunction') THEN 'text' " + << " WHEN substring(type, 10, length(type) - 10) = 'Nested' THEN 'json' " + << " WHEN substring(type, 10, length(type) - 10) LIKE 'Tuple%' THEN 'json' " + << " WHEN substring(type, 10, length(type) - 10) LIKE 'IPv%' THEN 'text' " + << " WHEN substring(type, 10, length(type) - 10) IN ('Expression', 'Set', 'Nothing', 'Interval') THEN 'text' " + << " ELSE substring(type, 10, length(type) - 10) " + << " END " + << " ELSE " + << " CASE " + << " WHEN type IN ('UInt8', 'Int8') THEN 'tinyint' " + << " WHEN type IN ('UInt16', 'Int16') THEN 'smallint' " + << " WHEN type IN ('UInt32', 'Int32') THEN 'int' " + << " WHEN type IN ('UInt64', 'Int64', 'UInt128', 'Int128', 'UInt256', 'Int256') THEN 'bigint' " + << " WHEN type = 'Float32' THEN 'float' " + << " WHEN type = 'Float64' THEN 'double' " + << " WHEN type LIKE 'Decimal%' THEN 'decimal' " + << " WHEN type = 'Boolean' THEN 'tinyint' " + << " WHEN type = 'String' THEN 'text' " + << " WHEN type LIKE 'FixedString%' THEN 'text' " + << " WHEN type LIKE 'Date%' THEN 'date' " + << " WHEN type LIKE 'DateTime%' THEN 'datetime' " + << " WHEN type = 'JSON' THEN 'json' " + << " WHEN type = 'UUID' THEN 'binary' " + << " WHEN type LIKE 'Enum%' THEN 'enum' " + << " WHEN type LIKE 'LowCardinality%' THEN 'text' " + << " WHEN type LIKE 'Array%' THEN 'json' " + << " WHEN type LIKE 'Map%' THEN 'json' " + << " WHEN type IN ('SimpleAggregateFunction', 'AggregateFunction') THEN 'text' " + << " WHEN type = 'Nested' THEN 'json' " + << " WHEN type LIKE 'Tuple%' THEN 'json' " + << " WHEN type LIKE 'IPv%' THEN 'text' " + << " WHEN type IN ('Expression', 'Set', 'Nothing', 'Interval') THEN 'text' " + << " ELSE type " + << " END " + << "END AS type, " + << "startsWith(type, 'Nullable') AS null, " + << "trim(concatWithSeparator(' ', if(is_in_primary_key, 'PRI', ''), if (is_in_sorting_key, 'SOR', ''))) AS key, " + << "if(default_kind IN ('ALIAS', 'DEFAULT', 'MATERIALIZED'), default_expression, NULL) AS default, " + << "'' AS extra "; + + return mysql_specific_query.str(); +} BlockIO InterpreterShowColumnsQuery::execute() { diff --git a/src/Interpreters/InterpreterShowColumnsQuery.h b/src/Interpreters/InterpreterShowColumnsQuery.h index ee6dcabd97b..b843a163978 100644 --- a/src/Interpreters/InterpreterShowColumnsQuery.h +++ b/src/Interpreters/InterpreterShowColumnsQuery.h @@ -26,6 +26,7 @@ private: ASTPtr query_ptr; String getRewrittenQuery(); + String getMySQLQuery(); }; diff --git a/tests/queries/0_stateless/02726_show_columns_mysql_compatibility.reference b/tests/queries/0_stateless/02726_show_columns_mysql_compatibility.reference new file mode 100644 index 00000000000..c9ad94a34c4 --- /dev/null +++ b/tests/queries/0_stateless/02726_show_columns_mysql_compatibility.reference @@ -0,0 +1,213 @@ +Drop tables if they exist +Create tab table +Create pseudo-random database name +Create tab duplicate table +Run MySQL test +field type null key default extra +array_value json 0 NULL +boolean_value tinyint 0 NULL +date32_value date 0 NULL +date_value date 0 NULL +datetime64_value date 0 NULL +datetime_value date 0 NULL +decimal_value decimal 0 NULL +enum_value enum 0 NULL +fixed_string_value text 0 NULL +float32 float 0 NULL +float64 double 0 NULL +int32 int 0 NULL +ipv4_value text 0 NULL +ipv6_value text 0 NULL +json_value text 0 NULL +low_cardinality text 0 NULL +map_value json 0 NULL +nested.nested_int json 0 NULL +nested.nested_string json 0 NULL +nullable_value int 0 NULL +string_value text 0 NULL +tuple_value json 0 NULL +uint64 bigint 0 PRI SOR NULL +uuid_value binary 0 NULL +field type null key default extra +array_value json 0 NULL +boolean_value tinyint 0 NULL +date32_value date 0 NULL +date_value date 0 NULL +datetime64_value date 0 NULL +datetime_value date 0 NULL +decimal_value decimal 0 NULL +enum_value enum 0 NULL +fixed_string_value text 0 NULL +float32 float 0 NULL +float64 double 0 NULL +int32 int 0 NULL +ipv4_value text 0 NULL +ipv6_value text 0 NULL +json_value text 0 NULL +low_cardinality text 0 NULL +map_value json 0 NULL +nested.nested_int json 0 NULL +nested.nested_string json 0 NULL +nullable_value int 0 NULL +string_value text 0 NULL +tuple_value json 0 NULL +uint64 bigint 0 PRI SOR NULL +uuid_value binary 0 NULL +field type null key default extra collation comment privileges +array_value json 0 NULL NULL +boolean_value tinyint 0 NULL NULL +date32_value date 0 NULL NULL +date_value date 0 NULL NULL +datetime64_value date 0 NULL NULL +datetime_value date 0 NULL NULL +decimal_value decimal 0 NULL NULL +enum_value enum 0 NULL NULL +fixed_string_value text 0 NULL NULL +float32 float 0 NULL NULL +float64 double 0 NULL NULL +int32 int 0 NULL NULL +ipv4_value text 0 NULL NULL +ipv6_value text 0 NULL NULL +json_value text 0 NULL NULL +low_cardinality text 0 NULL NULL +map_value json 0 NULL NULL +nested.nested_int json 0 NULL NULL +nested.nested_string json 0 NULL NULL +nullable_value int 0 NULL NULL +string_value text 0 NULL NULL +tuple_value json 0 NULL NULL +uint64 bigint 0 PRI SOR NULL NULL +uuid_value binary 0 NULL NULL +field type null key default extra +int32 int 0 NULL +nested.nested_int json 0 NULL +uint64 bigint 0 PRI SOR NULL +field type null key default extra +array_value json 0 NULL +boolean_value tinyint 0 NULL +date32_value date 0 NULL +date_value date 0 NULL +datetime64_value date 0 NULL +datetime_value date 0 NULL +decimal_value decimal 0 NULL +enum_value enum 0 NULL +fixed_string_value text 0 NULL +float32 float 0 NULL +float64 double 0 NULL +ipv4_value text 0 NULL +ipv6_value text 0 NULL +json_value text 0 NULL +low_cardinality text 0 NULL +map_value json 0 NULL +nested.nested_string json 0 NULL +nullable_value int 0 NULL +string_value text 0 NULL +tuple_value json 0 NULL +uuid_value binary 0 NULL +field type null key default extra +int32 int 0 NULL +nested.nested_int json 0 NULL +uint64 bigint 0 PRI SOR NULL +field type null key default extra +array_value json 0 NULL +boolean_value tinyint 0 NULL +date32_value date 0 NULL +date_value date 0 NULL +datetime64_value date 0 NULL +datetime_value date 0 NULL +decimal_value decimal 0 NULL +enum_value enum 0 NULL +fixed_string_value text 0 NULL +float32 float 0 NULL +float64 double 0 NULL +ipv4_value text 0 NULL +ipv6_value text 0 NULL +json_value text 0 NULL +low_cardinality text 0 NULL +map_value json 0 NULL +nested.nested_string json 0 NULL +nullable_value int 0 NULL +string_value text 0 NULL +tuple_value json 0 NULL +uuid_value binary 0 NULL +field type null key default extra +int32 int 0 NULL +nested.nested_int json 0 NULL +uint64 bigint 0 PRI SOR NULL +field type null key default extra +array_value json 0 NULL +field type null key default extra +array_value json 0 NULL +boolean_value tinyint 0 NULL +date32_value date 0 NULL +date_value date 0 NULL +datetime64_value date 0 NULL +datetime_value date 0 NULL +decimal_value decimal 0 NULL +enum_value enum 0 NULL +fixed_string_value text 0 NULL +float32 float 0 NULL +float64 double 0 NULL +int32 int 0 NULL +ipv4_value text 0 NULL +ipv6_value text 0 NULL +json_value text 0 NULL +low_cardinality text 0 NULL +map_value json 0 NULL +nested.nested_int json 0 NULL +nested.nested_string json 0 NULL +nullable_value int 0 NULL +string_value text 0 NULL +tuple_value json 0 NULL +uint64 bigint 0 PRI SOR NULL +uuid_value binary 0 NULL +field type null key default extra +array_value json 0 NULL +boolean_value tinyint 0 NULL +date32_value date 0 NULL +date_value date 0 NULL +datetime64_value date 0 NULL +datetime_value date 0 NULL +decimal_value decimal 0 NULL +enum_value enum 0 NULL +fixed_string_value text 0 NULL +float32 float 0 NULL +float64 double 0 NULL +int32 int 0 NULL +ipv4_value text 0 NULL +ipv6_value text 0 NULL +json_value text 0 NULL +low_cardinality text 0 NULL +map_value json 0 NULL +nested.nested_int json 0 NULL +nested.nested_string json 0 NULL +nullable_value int 0 NULL +string_value text 0 NULL +tuple_value json 0 NULL +uint64 bigint 0 PRI SOR NULL +uuid_value binary 0 NULL +field type null key default extra +array_value json 0 NULL +boolean_value tinyint 0 NULL +date32_value date 0 NULL +date_value date 0 NULL +datetime64_value date 0 NULL +datetime_value date 0 NULL +decimal_value decimal 0 NULL +enum_value enum 0 NULL +fixed_string_value text 0 NULL +float32 float 0 NULL +float64 double 0 NULL +int32 int 0 NULL +ipv4_value text 0 NULL +ipv6_value text 0 NULL +json_value text 0 NULL +low_cardinality text 0 NULL +map_value json 0 NULL +nested.nested_int json 0 NULL +nested.nested_string json 0 NULL +nullable_value int 0 NULL +string_value text 0 NULL +tuple_value json 0 NULL +uint64 bigint 0 PRI SOR NULL +uuid_value binary 0 NULL diff --git a/tests/queries/0_stateless/02726_show_columns_mysql_compatibility.sh b/tests/queries/0_stateless/02726_show_columns_mysql_compatibility.sh new file mode 100755 index 00000000000..5324496edd3 --- /dev/null +++ b/tests/queries/0_stateless/02726_show_columns_mysql_compatibility.sh @@ -0,0 +1,115 @@ +#!/bin/bash + +# This script tests the MySQL compatibility of the SHOW COLUMNS command in ClickHouse +USER="default" +PASSWORD="" +HOST="127.0.0.1" +PORT=9004 + +# First run the clickhouse test to create the ClickHouse Tables + +echo "Drop tables if they exist" +${CLICKHOUSE_LOCAL} --query "DROP TABLE IF EXISTS tab" +${CLICKHOUSE_LOCAL} --query "DROP TABLE IF EXISTS database_123456789abcde" +${CLICKHOUSE_LOCAL} --query "DROP TABLE IF EXISTS database_123456789abcde.tab" + +echo "Create tab table " +${CLICKHOUSE_LOCAL} --query " + CREATE TABLE tab + ( + uint64 UInt64, + int32 Nullable(Int32), + float32 Float32, + float64 Float64, + decimal_value Decimal(10, 2), + boolean_value UInt8, -- Use 0 for false, 1 for true + string_value String, + fixed_string_value FixedString(10), + date_value Date, + date32_value Date32, + datetime_value DateTime, + datetime64_value DateTime64(3), + json_value String, -- Store JSON as a string + uuid_value UUID, + enum_value Enum8('apple' = 1, 'banana' = 2, 'orange' = 3), + low_cardinality LowCardinality(String), + array_value Array(Int32), + map_value Map(String, Int32), + tuple_value Tuple(Int32, String), + nullable_value Nullable(Int32), + ipv4_value IPv4, + ipv6_value IPv6, + nested Nested + ( + nested_int Int32, + nested_string String + ) + ) ENGINE = MergeTree + ORDER BY uint64; + " + + +echo "Create pseudo-random database name" +${CLICKHOUSE_LOCAL} --query "CREATE DATABASE database_123456789abcde;" + +echo "Create tab duplicate table" +${CLICKHOUSE_LOCAL} --query " + CREATE TABLE database_123456789abcde.tab + ( + uint64 UInt64, + int32 Nullable(Int32), + float32 Float32, + float64 Float64, + decimal_value Decimal(10, 2), + boolean_value UInt8, -- Use 0 for false, 1 for true + string_value String, + fixed_string_value FixedString(10), + date_value Date, + date32_value Date32, + datetime_value DateTime, + datetime64_value DateTime64(3), + json_value String, -- Store JSON as a string + uuid_value UUID, + enum_value Enum8('apple' = 1, 'banana' = 2, 'orange' = 3), + low_cardinality LowCardinality(String), + array_value Array(Int32), + map_value Map(String, Int32), + tuple_value Tuple(Int32, String), + nullable_value Nullable(Int32), + ipv4_value IPv4, + ipv6_value IPv6, + nested Nested + ( + nested_int Int32, + nested_string String + ) + ) ENGINE = MergeTree + ORDER BY uint64; + " + +# Write sql to temp file +TEMP_FILE=$(mktemp) + +cat < $TEMP_FILE +SHOW COLUMNS FROM tab; +SHOW EXTENDED COLUMNS FROM tab; +SHOW FULL COLUMNS FROM tab; +SHOW COLUMNS FROM tab LIKE '%int%'; +SHOW COLUMNS FROM tab NOT LIKE '%int%'; +SHOW COLUMNS FROM tab ILIKE '%INT%'; +SHOW COLUMNS FROM tab NOT ILIKE '%INT%'; +SHOW COLUMNS FROM tab WHERE field LIKE '%int%'; +SHOW COLUMNS FROM tab LIMIT 1; +SHOW COLUMNS FROM tab; +SHOW COLUMNS FROM tab FROM database_123456789abcde; +SHOW COLUMNS FROM database_123456789abcde.tab; +DROP DATABASE database_123456789abcde; +DROP TABLE tab; +EOT + +# Now run the MySQL test script on the ClickHouse DB +echo "Run MySQL test" +mysql --user="$USER" --password="$PASSWORD" --host="$HOST" --port="$PORT" < $TEMP_FILE + +# Clean up the temp file +rm $TEMP_FILE From 3fedd683ef97e61ebcc17b2f8b38feb297fbc26c Mon Sep 17 00:00:00 2001 From: zvonand Date: Mon, 8 May 2023 22:28:31 +0200 Subject: [PATCH 078/722] speedup vol. II --- .../Serializations/SerializationDate.cpp | 2 +- src/Functions/FunctionsConversion.h | 16 ++++---- src/IO/ReadHelpers.h | 40 +++++++++++++++---- 3 files changed, 42 insertions(+), 16 deletions(-) diff --git a/src/DataTypes/Serializations/SerializationDate.cpp b/src/DataTypes/Serializations/SerializationDate.cpp index bc2057d549e..8b4956f7826 100644 --- a/src/DataTypes/Serializations/SerializationDate.cpp +++ b/src/DataTypes/Serializations/SerializationDate.cpp @@ -77,7 +77,7 @@ void SerializationDate::serializeTextCSV(const IColumn & column, size_t row_num, void SerializationDate::deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings &) const { DayNum value; - readCSV(value, istr); + readCSV(value, istr, time_zone); assert_cast(column).getData().push_back(value); } SerializationDate::SerializationDate(const TimezoneMixin & time_zone_) : TimezoneMixin(time_zone_) diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index 0f2d49f2557..6af5c44eb5e 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -981,18 +981,18 @@ void parseImpl(typename DataType::FieldType & x, ReadBuffer & rb, const DateLUTI } template <> -inline void parseImpl(DataTypeDate::FieldType & x, ReadBuffer & rb, const DateLUTImpl *) +inline void parseImpl(DataTypeDate::FieldType & x, ReadBuffer & rb, const DateLUTImpl * time_zone) { DayNum tmp(0); - readDateText(tmp, rb); + readDateText(tmp, rb, *time_zone); x = tmp; } template <> -inline void parseImpl(DataTypeDate32::FieldType & x, ReadBuffer & rb, const DateLUTImpl *) +inline void parseImpl(DataTypeDate32::FieldType & x, ReadBuffer & rb, const DateLUTImpl * time_zone) { ExtendedDayNum tmp(0); - readDateText(tmp, rb); + readDateText(tmp, rb, *time_zone); x = tmp; } @@ -1040,20 +1040,20 @@ bool tryParseImpl(typename DataType::FieldType & x, ReadBuffer & rb, const DateL } template <> -inline bool tryParseImpl(DataTypeDate::FieldType & x, ReadBuffer & rb, const DateLUTImpl *) +inline bool tryParseImpl(DataTypeDate::FieldType & x, ReadBuffer & rb, const DateLUTImpl * time_zone) { DayNum tmp(0); - if (!tryReadDateText(tmp, rb)) + if (!tryReadDateText(tmp, rb, *time_zone)) return false; x = tmp; return true; } template <> -inline bool tryParseImpl(DataTypeDate32::FieldType & x, ReadBuffer & rb, const DateLUTImpl *) +inline bool tryParseImpl(DataTypeDate32::FieldType & x, ReadBuffer & rb, const DateLUTImpl * time_zone) { ExtendedDayNum tmp(0); - if (!tryReadDateText(tmp, rb)) + if (!tryReadDateText(tmp, rb, *time_zone)) return false; x = tmp; return true; diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index 16c28b89667..f9e21418a41 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -760,14 +760,14 @@ inline bool tryReadDateText(LocalDate & date, ReadBuffer & buf) return readDateTextImpl(date, buf); } -inline bool tryReadDateText(DayNum & date, ReadBuffer & buf) +inline bool tryReadDateText(DayNum & date, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) { - return readDateTextImpl(date, buf); + return readDateTextImpl(date, buf, time_zone); } -inline bool tryReadDateText(ExtendedDayNum & date, ReadBuffer & buf) +inline bool tryReadDateText(ExtendedDayNum & date, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) { - return readDateTextImpl(date, buf); + return readDateTextImpl(date, buf, time_zone); } template @@ -1160,7 +1160,7 @@ inline void readText(is_floating_point auto & x, ReadBuffer & buf) { readFloatTe inline void readText(String & x, ReadBuffer & buf) { readEscapedString(x, buf); } inline void readText(LocalDate & x, ReadBuffer & buf) { readDateText(x, buf); } -inline void readText(DayNum & x, ReadBuffer & buf) { readDateText(x, buf); } +inline void readText(DayNum & x, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) { readDateText(x, buf, time_zone); } inline void readText(LocalDateTime & x, ReadBuffer & buf) { readDateTimeText(x, buf); } inline void readText(UUID & x, ReadBuffer & buf) { readUUIDText(x, buf); } inline void readText(IPv4 & x, ReadBuffer & buf) { readIPv4Text(x, buf); } @@ -1172,6 +1172,10 @@ template requires is_arithmetic_v inline void readQuoted(T & x, ReadBuffer & buf) { readText(x, buf); } +template +requires is_arithmetic_v +inline void readQuoted(T & x, ReadBuffer & buf, const DateLUTImpl & time_zone) { readText(x, buf, time_zone); } + inline void readQuoted(String & x, ReadBuffer & buf) { readQuotedString(x, buf); } inline void readQuoted(LocalDate & x, ReadBuffer & buf) @@ -1214,6 +1218,10 @@ template requires is_arithmetic_v inline void readDoubleQuoted(T & x, ReadBuffer & buf) { readText(x, buf); } +template + requires is_arithmetic_v +inline void readDoubleQuoted(T & x, ReadBuffer & buf, const DateLUTImpl & time_zone) { readText(x, buf, time_zone); } + inline void readDoubleQuoted(String & x, ReadBuffer & buf) { readDoubleQuotedString(x, buf); } inline void readDoubleQuoted(LocalDate & x, ReadBuffer & buf) @@ -1230,7 +1238,7 @@ inline void readDoubleQuoted(LocalDateTime & x, ReadBuffer & buf) assertChar('"', buf); } -/// CSV, for numbers, dates: quotes are optional, no special escaping rules. +/// CSV for numbers: quotes are optional, no special escaping rules. template inline void readCSVSimple(T & x, ReadBuffer & buf) { @@ -1248,6 +1256,24 @@ inline void readCSVSimple(T & x, ReadBuffer & buf) assertChar(maybe_quote, buf); } +// standalone overload for dates: to avoid instantiating DateLUTs while parsing other types +template +inline void readCSVSimple(T & x, ReadBuffer & buf, const DateLUTImpl & time_zone) +{ + if (buf.eof()) [[unlikely]] + throwReadAfterEOF(); + + char maybe_quote = *buf.position(); + + if (maybe_quote == '\'' || maybe_quote == '\"') + ++buf.position(); + + readText(x, buf, time_zone); + + if (maybe_quote == '\'' || maybe_quote == '\"') + assertChar(maybe_quote, buf); +} + template requires is_arithmetic_v inline void readCSV(T & x, ReadBuffer & buf) @@ -1257,7 +1283,7 @@ inline void readCSV(T & x, ReadBuffer & buf) inline void readCSV(String & x, ReadBuffer & buf, const FormatSettings::CSV & settings) { readCSVString(x, buf, settings); } inline void readCSV(LocalDate & x, ReadBuffer & buf) { readCSVSimple(x, buf); } -inline void readCSV(DayNum & x, ReadBuffer & buf) { readCSVSimple(x, buf); } +inline void readCSV(DayNum & x, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) { readCSVSimple(x, buf, time_zone); } inline void readCSV(LocalDateTime & x, ReadBuffer & buf) { readCSVSimple(x, buf); } inline void readCSV(UUID & x, ReadBuffer & buf) { readCSVSimple(x, buf); } inline void readCSV(IPv4 & x, ReadBuffer & buf) { readCSVSimple(x, buf); } From 1751ccc7aca3830d21a06ec4f09bd28bf9254f79 Mon Sep 17 00:00:00 2001 From: zvonand Date: Tue, 9 May 2023 14:18:04 +0200 Subject: [PATCH 079/722] fix stateless --- src/Functions/FunctionsConversion.h | 15 ++++++++++++--- src/IO/ReadHelpers.h | 12 ++++++++---- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index 6af5c44eb5e..e0e188f68c2 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -439,7 +439,7 @@ struct ToDate32Transform32Or64Signed static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) { - static const Int32 daynum_min_offset = -static_cast(DateLUT::instance().getDayNumOffsetEpoch()); + static const Int32 daynum_min_offset = -static_cast(time_zone.getDayNumOffsetEpoch()); if (from < daynum_min_offset) return daynum_min_offset; return (from < DATE_LUT_MAX_EXTEND_DAY_NUM) @@ -830,8 +830,11 @@ struct ConvertImpl(*col_with_type_and_name.type); const DateLUTImpl * time_zone = nullptr; + + if constexpr (std::is_same_v) + time_zone = &DateLUT::instance(); /// For argument of Date or DateTime type, second argument with time zone could be specified. - if constexpr (std::is_same_v || std::is_same_v || std::is_same_v) + if constexpr (std::is_same_v || std::is_same_v) { auto non_null_args = createBlockWithNestedColumns(arguments); time_zone = &extractTimeZoneFromFunctionArguments(non_null_args, 1, 0); @@ -1193,7 +1196,7 @@ struct ConvertThroughParsing const DateLUTImpl * local_time_zone [[maybe_unused]] = nullptr; const DateLUTImpl * utc_time_zone [[maybe_unused]] = nullptr; - /// For conversion to DateTime type, second argument with time zone could be specified. + /// For conversion to Date or DateTime type, second argument with time zone could be specified. if constexpr (std::is_same_v || to_datetime64) { const auto result_type = removeNullable(res_type); @@ -1206,6 +1209,12 @@ struct ConvertThroughParsing if constexpr (parsing_mode == ConvertFromStringParsingMode::BestEffort || parsing_mode == ConvertFromStringParsingMode::BestEffortUS) utc_time_zone = &DateLUT::instance("UTC"); } + else if constexpr (std::is_same_v || std::is_same_v) + { + // Timezone is more or less dummy when parsing Date/Date32 from string. + local_time_zone = &DateLUT::instance(); + utc_time_zone = &DateLUT::instance("UTC"); + } const IColumn * col_from = arguments[0].column.get(); const ColumnString * col_from_string = checkAndGetColumn(col_from); diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index f9e21418a41..ea565d11914 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -718,7 +718,7 @@ inline ReturnType readDateTextImpl(DayNum & date, ReadBuffer & buf, const DateLU return false; ExtendedDayNum ret = date_lut.makeDayNum(local_date.year(), local_date.month(), local_date.day()); - convertToDayNum(date,ret); + convertToDayNum(date, ret); return ReturnType(true); } @@ -1159,8 +1159,11 @@ inline bool tryReadText(IPv6 & x, ReadBuffer & buf) { return tryReadIPv6Text(x, inline void readText(is_floating_point auto & x, ReadBuffer & buf) { readFloatText(x, buf); } inline void readText(String & x, ReadBuffer & buf) { readEscapedString(x, buf); } + +inline void readText(DayNum & x, ReadBuffer & buf) { readDateText(x, buf); } +inline void readText(DayNum & x, ReadBuffer & buf, const DateLUTImpl & time_zone) { readDateText(x, buf, time_zone); } + inline void readText(LocalDate & x, ReadBuffer & buf) { readDateText(x, buf); } -inline void readText(DayNum & x, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) { readDateText(x, buf, time_zone); } inline void readText(LocalDateTime & x, ReadBuffer & buf) { readDateTimeText(x, buf); } inline void readText(UUID & x, ReadBuffer & buf) { readUUIDText(x, buf); } inline void readText(IPv4 & x, ReadBuffer & buf) { readIPv4Text(x, buf); } @@ -1219,7 +1222,7 @@ requires is_arithmetic_v inline void readDoubleQuoted(T & x, ReadBuffer & buf) { readText(x, buf); } template - requires is_arithmetic_v +requires is_arithmetic_v inline void readDoubleQuoted(T & x, ReadBuffer & buf, const DateLUTImpl & time_zone) { readText(x, buf, time_zone); } inline void readDoubleQuoted(String & x, ReadBuffer & buf) { readDoubleQuotedString(x, buf); } @@ -1283,7 +1286,8 @@ inline void readCSV(T & x, ReadBuffer & buf) inline void readCSV(String & x, ReadBuffer & buf, const FormatSettings::CSV & settings) { readCSVString(x, buf, settings); } inline void readCSV(LocalDate & x, ReadBuffer & buf) { readCSVSimple(x, buf); } -inline void readCSV(DayNum & x, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) { readCSVSimple(x, buf, time_zone); } +inline void readCSV(DayNum & x, ReadBuffer & buf) { readCSVSimple(x, buf); } +inline void readCSV(DayNum & x, ReadBuffer & buf, const DateLUTImpl & time_zone) { readCSVSimple(x, buf, time_zone); } inline void readCSV(LocalDateTime & x, ReadBuffer & buf) { readCSVSimple(x, buf); } inline void readCSV(UUID & x, ReadBuffer & buf) { readCSVSimple(x, buf); } inline void readCSV(IPv4 & x, ReadBuffer & buf) { readCSVSimple(x, buf); } From ddbad79c5e67518acebbacaad5be0cad3967ac67 Mon Sep 17 00:00:00 2001 From: tpanetti Date: Fri, 5 May 2023 12:19:35 -0700 Subject: [PATCH 080/722] Change SHOW COLUMNS query to display MySQL types in MySQL Compatibility mode This updates the SHOW COLUMN SQL query to display MySQL types when this query is issued by a client connected via MySQL Compatibility port --- .../InterpreterShowColumnsQuery.cpp | 76 +++++++ .../InterpreterShowColumnsQuery.h | 1 + ...show_columns_mysql_compatibility.reference | 213 ++++++++++++++++++ .../02726_show_columns_mysql_compatibility.sh | 115 ++++++++++ 4 files changed, 405 insertions(+) create mode 100644 tests/queries/0_stateless/02726_show_columns_mysql_compatibility.reference create mode 100755 tests/queries/0_stateless/02726_show_columns_mysql_compatibility.sh diff --git a/src/Interpreters/InterpreterShowColumnsQuery.cpp b/src/Interpreters/InterpreterShowColumnsQuery.cpp index c86d3c753c4..c545c621abb 100644 --- a/src/Interpreters/InterpreterShowColumnsQuery.cpp +++ b/src/Interpreters/InterpreterShowColumnsQuery.cpp @@ -45,6 +45,14 @@ SELECT // TODO Interpret query.extended. It is supposed to show internal/virtual columns. Need to fetch virtual column names, see // IStorage::getVirtuals(). We can't easily do that via SQL. + // If connected via MySQL Compatibility mode, convert ClickHouse types to MySQL + if (getContext()->getClientInfo().interface == DB::ClientInfo::Interface::MYSQL) + { + rewritten_query += getMySQLQuery(); + } + else { + rewritten_query += "SELECT name AS field, type AS type, startsWith(type, 'Nullable') AS null, trim(concatWithSeparator(' ', if(is_in_primary_key, 'PRI', ''), if (is_in_sorting_key, 'SOR', ''))) AS key, if(default_kind IN ('ALIAS', 'DEFAULT', 'MATERIALIZED'), default_expression, NULL) AS default, '' AS extra "; + } if (query.full) { /// "Full" mode is mostly for MySQL compat @@ -88,6 +96,74 @@ WHERE return rewritten_query; } +String InterpreterShowColumnsQuery::getMySQLQuery() +{ + String mysql_specific_query; + + mysql_specific_query = R"(SELECT name AS field, + CASE + WHEN startsWith(type, 'Nullable') THEN + CASE + WHEN substring(type, 10, length(type) - 10) IN ('UInt8', 'Int8') THEN 'tinyint' + WHEN substring(type, 10, length(type) - 10) IN ('UInt16', 'Int16') THEN 'smallint' + WHEN substring(type, 10, length(type) - 10) IN ('UInt32', 'Int32') THEN 'int' + WHEN substring(type, 10, length(type) - 10) IN ('UInt64', 'Int64', 'UInt128', 'Int128', 'UInt256', 'Int256') THEN 'bigint' + WHEN substring(type, 10, length(type) - 10) = 'Float32' THEN 'float' + WHEN substring(type, 10, length(type) - 10) = 'Float64' THEN 'double' + WHEN substring(type, 10, length(type) - 10) LIKE 'Decimal%' THEN 'decimal' + WHEN substring(type, 10, length(type) - 10) = 'Boolean' THEN 'tinyint' + WHEN substring(type, 10, length(type) - 10) = 'String' THEN 'text' + WHEN substring(type, 10, length(type) - 10) LIKE 'FixedString%' THEN 'text' + WHEN substring(type, 10, length(type) - 10) LIKE 'Date%' THEN 'date' + WHEN substring(type, 10, length(type) - 10) LIKE 'DateTime%' THEN 'datetime' + WHEN substring(type, 10, length(type) - 10) = 'JSON' THEN 'json' + WHEN substring(type, 10, length(type) - 10) = 'UUID' THEN 'binary' + WHEN substring(type, 10, length(type) - 10) LIKE 'Enum%' THEN 'enum' + WHEN substring(type, 10, length(type) - 10) LIKE 'LowCardinality%' THEN 'text' + WHEN substring(type, 10, length(type) - 10) LIKE 'Array%' THEN 'json' + WHEN substring(type, 10, length(type) - 10) LIKE 'Map%' THEN 'json' + WHEN substring(type, 10, length(type) - 10) IN ('SimpleAggregateFunction', 'AggregateFunction') THEN 'text' + WHEN substring(type, 10, length(type) - 10) = 'Nested' THEN 'json' + WHEN substring(type, 10, length(type) - 10) LIKE 'Tuple%' THEN 'json' + WHEN substring(type, 10, length(type) - 10) LIKE 'IPv%' THEN 'text' + WHEN substring(type, 10, length(type) - 10) IN ('Expression', 'Set', 'Nothing', 'Interval') THEN 'text' + ELSE substring(type, 10, length(type) - 10) + END + ELSE + CASE + WHEN type IN ('UInt8', 'Int8') THEN 'tinyint' + WHEN type IN ('UInt16', 'Int16') THEN 'smallint' + WHEN type IN ('UInt32', 'Int32') THEN 'int' + WHEN type IN ('UInt64', 'Int64', 'UInt128', 'Int128', 'UInt256', 'Int256') THEN 'bigint' + WHEN type = 'Float32' THEN 'float' + WHEN type = 'Float64' THEN 'double' + WHEN type LIKE 'Decimal%' THEN 'decimal' + WHEN type = 'Boolean' THEN 'tinyint' + WHEN type = 'String' THEN 'text' + WHEN type LIKE 'FixedString%' THEN 'text' + WHEN type LIKE 'Date%' THEN 'date' + WHEN type LIKE 'DateTime%' THEN 'datetime' + WHEN type = 'JSON' THEN 'json' + WHEN type = 'UUID' THEN 'binary' + WHEN type LIKE 'Enum%' THEN 'enum' + WHEN type LIKE 'LowCardinality%' THEN 'text' + WHEN type LIKE 'Array%' THEN 'json' + WHEN type LIKE 'Map%' THEN 'json' + WHEN type IN ('SimpleAggregateFunction', 'AggregateFunction') THEN 'text' + WHEN type = 'Nested' THEN 'json' + WHEN type LIKE 'Tuple%' THEN 'json' + WHEN type LIKE 'IPv%' THEN 'text' + WHEN type IN ('Expression', 'Set', 'Nothing', 'Interval') THEN 'text' + ELSE type + END + END AS type, + startsWith(type, 'Nullable') AS null, + trim(concatWithSeparator(' ', if(is_in_primary_key, 'PRI', ''), if (is_in_sorting_key, 'SOR', ''))) AS key, + if(default_kind IN ('ALIAS', 'DEFAULT', 'MATERIALIZED'), default_expression, NULL) AS default, + '' AS extra )"; + + return mysql_specific_query.str(); +} BlockIO InterpreterShowColumnsQuery::execute() { diff --git a/src/Interpreters/InterpreterShowColumnsQuery.h b/src/Interpreters/InterpreterShowColumnsQuery.h index ee6dcabd97b..b843a163978 100644 --- a/src/Interpreters/InterpreterShowColumnsQuery.h +++ b/src/Interpreters/InterpreterShowColumnsQuery.h @@ -26,6 +26,7 @@ private: ASTPtr query_ptr; String getRewrittenQuery(); + String getMySQLQuery(); }; diff --git a/tests/queries/0_stateless/02726_show_columns_mysql_compatibility.reference b/tests/queries/0_stateless/02726_show_columns_mysql_compatibility.reference new file mode 100644 index 00000000000..c9ad94a34c4 --- /dev/null +++ b/tests/queries/0_stateless/02726_show_columns_mysql_compatibility.reference @@ -0,0 +1,213 @@ +Drop tables if they exist +Create tab table +Create pseudo-random database name +Create tab duplicate table +Run MySQL test +field type null key default extra +array_value json 0 NULL +boolean_value tinyint 0 NULL +date32_value date 0 NULL +date_value date 0 NULL +datetime64_value date 0 NULL +datetime_value date 0 NULL +decimal_value decimal 0 NULL +enum_value enum 0 NULL +fixed_string_value text 0 NULL +float32 float 0 NULL +float64 double 0 NULL +int32 int 0 NULL +ipv4_value text 0 NULL +ipv6_value text 0 NULL +json_value text 0 NULL +low_cardinality text 0 NULL +map_value json 0 NULL +nested.nested_int json 0 NULL +nested.nested_string json 0 NULL +nullable_value int 0 NULL +string_value text 0 NULL +tuple_value json 0 NULL +uint64 bigint 0 PRI SOR NULL +uuid_value binary 0 NULL +field type null key default extra +array_value json 0 NULL +boolean_value tinyint 0 NULL +date32_value date 0 NULL +date_value date 0 NULL +datetime64_value date 0 NULL +datetime_value date 0 NULL +decimal_value decimal 0 NULL +enum_value enum 0 NULL +fixed_string_value text 0 NULL +float32 float 0 NULL +float64 double 0 NULL +int32 int 0 NULL +ipv4_value text 0 NULL +ipv6_value text 0 NULL +json_value text 0 NULL +low_cardinality text 0 NULL +map_value json 0 NULL +nested.nested_int json 0 NULL +nested.nested_string json 0 NULL +nullable_value int 0 NULL +string_value text 0 NULL +tuple_value json 0 NULL +uint64 bigint 0 PRI SOR NULL +uuid_value binary 0 NULL +field type null key default extra collation comment privileges +array_value json 0 NULL NULL +boolean_value tinyint 0 NULL NULL +date32_value date 0 NULL NULL +date_value date 0 NULL NULL +datetime64_value date 0 NULL NULL +datetime_value date 0 NULL NULL +decimal_value decimal 0 NULL NULL +enum_value enum 0 NULL NULL +fixed_string_value text 0 NULL NULL +float32 float 0 NULL NULL +float64 double 0 NULL NULL +int32 int 0 NULL NULL +ipv4_value text 0 NULL NULL +ipv6_value text 0 NULL NULL +json_value text 0 NULL NULL +low_cardinality text 0 NULL NULL +map_value json 0 NULL NULL +nested.nested_int json 0 NULL NULL +nested.nested_string json 0 NULL NULL +nullable_value int 0 NULL NULL +string_value text 0 NULL NULL +tuple_value json 0 NULL NULL +uint64 bigint 0 PRI SOR NULL NULL +uuid_value binary 0 NULL NULL +field type null key default extra +int32 int 0 NULL +nested.nested_int json 0 NULL +uint64 bigint 0 PRI SOR NULL +field type null key default extra +array_value json 0 NULL +boolean_value tinyint 0 NULL +date32_value date 0 NULL +date_value date 0 NULL +datetime64_value date 0 NULL +datetime_value date 0 NULL +decimal_value decimal 0 NULL +enum_value enum 0 NULL +fixed_string_value text 0 NULL +float32 float 0 NULL +float64 double 0 NULL +ipv4_value text 0 NULL +ipv6_value text 0 NULL +json_value text 0 NULL +low_cardinality text 0 NULL +map_value json 0 NULL +nested.nested_string json 0 NULL +nullable_value int 0 NULL +string_value text 0 NULL +tuple_value json 0 NULL +uuid_value binary 0 NULL +field type null key default extra +int32 int 0 NULL +nested.nested_int json 0 NULL +uint64 bigint 0 PRI SOR NULL +field type null key default extra +array_value json 0 NULL +boolean_value tinyint 0 NULL +date32_value date 0 NULL +date_value date 0 NULL +datetime64_value date 0 NULL +datetime_value date 0 NULL +decimal_value decimal 0 NULL +enum_value enum 0 NULL +fixed_string_value text 0 NULL +float32 float 0 NULL +float64 double 0 NULL +ipv4_value text 0 NULL +ipv6_value text 0 NULL +json_value text 0 NULL +low_cardinality text 0 NULL +map_value json 0 NULL +nested.nested_string json 0 NULL +nullable_value int 0 NULL +string_value text 0 NULL +tuple_value json 0 NULL +uuid_value binary 0 NULL +field type null key default extra +int32 int 0 NULL +nested.nested_int json 0 NULL +uint64 bigint 0 PRI SOR NULL +field type null key default extra +array_value json 0 NULL +field type null key default extra +array_value json 0 NULL +boolean_value tinyint 0 NULL +date32_value date 0 NULL +date_value date 0 NULL +datetime64_value date 0 NULL +datetime_value date 0 NULL +decimal_value decimal 0 NULL +enum_value enum 0 NULL +fixed_string_value text 0 NULL +float32 float 0 NULL +float64 double 0 NULL +int32 int 0 NULL +ipv4_value text 0 NULL +ipv6_value text 0 NULL +json_value text 0 NULL +low_cardinality text 0 NULL +map_value json 0 NULL +nested.nested_int json 0 NULL +nested.nested_string json 0 NULL +nullable_value int 0 NULL +string_value text 0 NULL +tuple_value json 0 NULL +uint64 bigint 0 PRI SOR NULL +uuid_value binary 0 NULL +field type null key default extra +array_value json 0 NULL +boolean_value tinyint 0 NULL +date32_value date 0 NULL +date_value date 0 NULL +datetime64_value date 0 NULL +datetime_value date 0 NULL +decimal_value decimal 0 NULL +enum_value enum 0 NULL +fixed_string_value text 0 NULL +float32 float 0 NULL +float64 double 0 NULL +int32 int 0 NULL +ipv4_value text 0 NULL +ipv6_value text 0 NULL +json_value text 0 NULL +low_cardinality text 0 NULL +map_value json 0 NULL +nested.nested_int json 0 NULL +nested.nested_string json 0 NULL +nullable_value int 0 NULL +string_value text 0 NULL +tuple_value json 0 NULL +uint64 bigint 0 PRI SOR NULL +uuid_value binary 0 NULL +field type null key default extra +array_value json 0 NULL +boolean_value tinyint 0 NULL +date32_value date 0 NULL +date_value date 0 NULL +datetime64_value date 0 NULL +datetime_value date 0 NULL +decimal_value decimal 0 NULL +enum_value enum 0 NULL +fixed_string_value text 0 NULL +float32 float 0 NULL +float64 double 0 NULL +int32 int 0 NULL +ipv4_value text 0 NULL +ipv6_value text 0 NULL +json_value text 0 NULL +low_cardinality text 0 NULL +map_value json 0 NULL +nested.nested_int json 0 NULL +nested.nested_string json 0 NULL +nullable_value int 0 NULL +string_value text 0 NULL +tuple_value json 0 NULL +uint64 bigint 0 PRI SOR NULL +uuid_value binary 0 NULL diff --git a/tests/queries/0_stateless/02726_show_columns_mysql_compatibility.sh b/tests/queries/0_stateless/02726_show_columns_mysql_compatibility.sh new file mode 100755 index 00000000000..5324496edd3 --- /dev/null +++ b/tests/queries/0_stateless/02726_show_columns_mysql_compatibility.sh @@ -0,0 +1,115 @@ +#!/bin/bash + +# This script tests the MySQL compatibility of the SHOW COLUMNS command in ClickHouse +USER="default" +PASSWORD="" +HOST="127.0.0.1" +PORT=9004 + +# First run the clickhouse test to create the ClickHouse Tables + +echo "Drop tables if they exist" +${CLICKHOUSE_LOCAL} --query "DROP TABLE IF EXISTS tab" +${CLICKHOUSE_LOCAL} --query "DROP TABLE IF EXISTS database_123456789abcde" +${CLICKHOUSE_LOCAL} --query "DROP TABLE IF EXISTS database_123456789abcde.tab" + +echo "Create tab table " +${CLICKHOUSE_LOCAL} --query " + CREATE TABLE tab + ( + uint64 UInt64, + int32 Nullable(Int32), + float32 Float32, + float64 Float64, + decimal_value Decimal(10, 2), + boolean_value UInt8, -- Use 0 for false, 1 for true + string_value String, + fixed_string_value FixedString(10), + date_value Date, + date32_value Date32, + datetime_value DateTime, + datetime64_value DateTime64(3), + json_value String, -- Store JSON as a string + uuid_value UUID, + enum_value Enum8('apple' = 1, 'banana' = 2, 'orange' = 3), + low_cardinality LowCardinality(String), + array_value Array(Int32), + map_value Map(String, Int32), + tuple_value Tuple(Int32, String), + nullable_value Nullable(Int32), + ipv4_value IPv4, + ipv6_value IPv6, + nested Nested + ( + nested_int Int32, + nested_string String + ) + ) ENGINE = MergeTree + ORDER BY uint64; + " + + +echo "Create pseudo-random database name" +${CLICKHOUSE_LOCAL} --query "CREATE DATABASE database_123456789abcde;" + +echo "Create tab duplicate table" +${CLICKHOUSE_LOCAL} --query " + CREATE TABLE database_123456789abcde.tab + ( + uint64 UInt64, + int32 Nullable(Int32), + float32 Float32, + float64 Float64, + decimal_value Decimal(10, 2), + boolean_value UInt8, -- Use 0 for false, 1 for true + string_value String, + fixed_string_value FixedString(10), + date_value Date, + date32_value Date32, + datetime_value DateTime, + datetime64_value DateTime64(3), + json_value String, -- Store JSON as a string + uuid_value UUID, + enum_value Enum8('apple' = 1, 'banana' = 2, 'orange' = 3), + low_cardinality LowCardinality(String), + array_value Array(Int32), + map_value Map(String, Int32), + tuple_value Tuple(Int32, String), + nullable_value Nullable(Int32), + ipv4_value IPv4, + ipv6_value IPv6, + nested Nested + ( + nested_int Int32, + nested_string String + ) + ) ENGINE = MergeTree + ORDER BY uint64; + " + +# Write sql to temp file +TEMP_FILE=$(mktemp) + +cat < $TEMP_FILE +SHOW COLUMNS FROM tab; +SHOW EXTENDED COLUMNS FROM tab; +SHOW FULL COLUMNS FROM tab; +SHOW COLUMNS FROM tab LIKE '%int%'; +SHOW COLUMNS FROM tab NOT LIKE '%int%'; +SHOW COLUMNS FROM tab ILIKE '%INT%'; +SHOW COLUMNS FROM tab NOT ILIKE '%INT%'; +SHOW COLUMNS FROM tab WHERE field LIKE '%int%'; +SHOW COLUMNS FROM tab LIMIT 1; +SHOW COLUMNS FROM tab; +SHOW COLUMNS FROM tab FROM database_123456789abcde; +SHOW COLUMNS FROM database_123456789abcde.tab; +DROP DATABASE database_123456789abcde; +DROP TABLE tab; +EOT + +# Now run the MySQL test script on the ClickHouse DB +echo "Run MySQL test" +mysql --user="$USER" --password="$PASSWORD" --host="$HOST" --port="$PORT" < $TEMP_FILE + +# Clean up the temp file +rm $TEMP_FILE From 297188ce583a94f9942f7fd141a85dbdcfdcd587 Mon Sep 17 00:00:00 2001 From: zvonand Date: Tue, 9 May 2023 22:37:25 +0200 Subject: [PATCH 081/722] fix Date32 --- src/DataTypes/Serializations/SerializationDate32.cpp | 11 +++++++---- src/DataTypes/Serializations/SerializationDate32.h | 5 ++++- src/Functions/FunctionsConversion.h | 7 +++++-- 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/src/DataTypes/Serializations/SerializationDate32.cpp b/src/DataTypes/Serializations/SerializationDate32.cpp index ef92202f89d..8dcaee8d266 100644 --- a/src/DataTypes/Serializations/SerializationDate32.cpp +++ b/src/DataTypes/Serializations/SerializationDate32.cpp @@ -11,7 +11,7 @@ namespace DB void SerializationDate32::serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const { - writeDateText(ExtendedDayNum(assert_cast(column).getData()[row_num]), ostr); + writeDateText(ExtendedDayNum(assert_cast(column).getData()[row_num]), ostr, time_zone); } void SerializationDate32::deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const @@ -24,7 +24,7 @@ void SerializationDate32::deserializeWholeText(IColumn & column, ReadBuffer & is void SerializationDate32::deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings &) const { ExtendedDayNum x; - readDateText(x, istr); + readDateText(x, istr, time_zone); assert_cast(column).getData().push_back(x); } @@ -44,7 +44,7 @@ void SerializationDate32::deserializeTextQuoted(IColumn & column, ReadBuffer & i { ExtendedDayNum x; assertChar('\'', istr); - readDateText(x, istr); + readDateText(x, istr, time_zone); assertChar('\'', istr); assert_cast(column).getData().push_back(x); /// It's important to do this at the end - for exception safety. } @@ -60,7 +60,7 @@ void SerializationDate32::deserializeTextJSON(IColumn & column, ReadBuffer & ist { ExtendedDayNum x; assertChar('"', istr); - readDateText(x, istr); + readDateText(x, istr, time_zone); assertChar('"', istr); assert_cast(column).getData().push_back(x); } @@ -78,4 +78,7 @@ void SerializationDate32::deserializeTextCSV(IColumn & column, ReadBuffer & istr readCSV(value, istr); assert_cast(column).getData().push_back(value.getExtenedDayNum()); } +SerializationDate32::SerializationDate32(const TimezoneMixin & time_zone_) : TimezoneMixin(time_zone_) +{ +} } diff --git a/src/DataTypes/Serializations/SerializationDate32.h b/src/DataTypes/Serializations/SerializationDate32.h index 484b4f4a958..e8e8f1a74d6 100644 --- a/src/DataTypes/Serializations/SerializationDate32.h +++ b/src/DataTypes/Serializations/SerializationDate32.h @@ -1,12 +1,15 @@ #pragma once #include +#include namespace DB { -class SerializationDate32 final : public SerializationNumber +class SerializationDate32 final : public SerializationNumber, public TimezoneMixin { public: + explicit SerializationDate32(const TimezoneMixin & time_zone_ = TimezoneMixin()); + void serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override; void deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings &) const override; void serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override; diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index e0e188f68c2..2f751e72222 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -731,7 +731,10 @@ struct FormatImpl template static ReturnType execute(const DataTypeDate32::FieldType x, WriteBuffer & wb, const DataTypeDate32 *, const DateLUTImpl * time_zone) { + std::cerr << "BEFORE: " << std::endl; + std::cerr << time_zone->getTimeZone() << std::endl; writeDateText(ExtendedDayNum(x), wb, *time_zone); + std::cerr << "AFTER" << std::endl; return ReturnType(true); } }; @@ -831,7 +834,7 @@ struct ConvertImpl) + if constexpr (std::is_same_v || std::is_same_v) time_zone = &DateLUT::instance(); /// For argument of Date or DateTime type, second argument with time zone could be specified. if constexpr (std::is_same_v || std::is_same_v) @@ -1765,7 +1768,7 @@ public: || std::is_same_v // toDate(value[, timezone : String]) || std::is_same_v // TODO: shall we allow timestamp argument for toDate? DateTime knows nothing about timezones and this argument is ignored below. - // toDate(value[, timezone : String]) + // toDate32(value[, timezone : String]) || std::is_same_v // toDateTime(value[, timezone: String]) || std::is_same_v From 8d0644e79301a9a0ccf67b66d44c43e4766d8aa7 Mon Sep 17 00:00:00 2001 From: zvonand Date: Tue, 9 May 2023 23:02:03 +0200 Subject: [PATCH 082/722] cleanup --- src/Functions/FunctionsConversion.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index 2f751e72222..b10d9f4a31a 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -731,10 +731,7 @@ struct FormatImpl template static ReturnType execute(const DataTypeDate32::FieldType x, WriteBuffer & wb, const DataTypeDate32 *, const DateLUTImpl * time_zone) { - std::cerr << "BEFORE: " << std::endl; - std::cerr << time_zone->getTimeZone() << std::endl; writeDateText(ExtendedDayNum(x), wb, *time_zone); - std::cerr << "AFTER" << std::endl; return ReturnType(true); } }; From ac7c54a4d3636c8616799fa8272f6da1ce6683fc Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 10 May 2023 18:39:38 +0000 Subject: [PATCH 083/722] Refactor CapnProto format to improve input/output performance --- src/Core/Settings.h | 2 +- src/Core/SettingsEnums.cpp | 8 +- src/Core/SettingsEnums.h | 2 +- src/Formats/CapnProtoSchema.cpp | 298 ++++ .../{CapnProtoUtils.h => CapnProtoSchema.h} | 13 +- src/Formats/CapnProtoSerializer.cpp | 1218 +++++++++++++++++ src/Formats/CapnProtoSerializer.h | 25 + src/Formats/CapnProtoUtils.cpp | 734 ---------- src/Formats/FormatSettings.h | 6 +- .../Formats/Impl/CapnProtoRowInputFormat.cpp | 253 +--- .../Formats/Impl/CapnProtoRowInputFormat.h | 9 +- .../Formats/Impl/CapnProtoRowOutputFormat.cpp | 266 +--- .../Formats/Impl/CapnProtoRowOutputFormat.h | 17 +- .../Formats/Impl/ProtobufListInputFormat.cpp | 9 +- .../Formats/Impl/ProtobufRowInputFormat.cpp | 9 +- .../queries/0_stateless/02030_capnp_format.sh | 4 +- ...p_case_insensitive_names_matcing.reference | 1 + ...35_capnp_case_insensitive_names_matcing.sh | 10 + ...ing_and_writing_structure_fields.reference | 3 + ...36_reading_and_writing_structure_fields.sh | 24 + ...2735_case_insensitive_names_matching.capnp | 13 + .../02736_nested_structures.capnp | 21 + 22 files changed, 1686 insertions(+), 1259 deletions(-) create mode 100644 src/Formats/CapnProtoSchema.cpp rename src/Formats/{CapnProtoUtils.h => CapnProtoSchema.h} (59%) create mode 100644 src/Formats/CapnProtoSerializer.cpp create mode 100644 src/Formats/CapnProtoSerializer.h delete mode 100644 src/Formats/CapnProtoUtils.cpp create mode 100644 tests/queries/0_stateless/02735_capnp_case_insensitive_names_matcing.reference create mode 100755 tests/queries/0_stateless/02735_capnp_case_insensitive_names_matcing.sh create mode 100644 tests/queries/0_stateless/02736_reading_and_writing_structure_fields.reference create mode 100755 tests/queries/0_stateless/02736_reading_and_writing_structure_fields.sh create mode 100644 tests/queries/0_stateless/format_schemas/02735_case_insensitive_names_matching.capnp create mode 100644 tests/queries/0_stateless/format_schemas/02736_nested_structures.capnp diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 1bea2c26392..269fa832f45 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -958,7 +958,7 @@ class IColumn; M(Bool, output_format_orc_string_as_string, false, "Use ORC String type instead of Binary for String columns", 0) \ M(ORCCompression, output_format_orc_compression_method, "lz4", "Compression method for ORC output format. Supported codecs: lz4, snappy, zlib, zstd, none (uncompressed)", 0) \ \ - M(EnumComparingMode, format_capn_proto_enum_comparising_mode, FormatSettings::EnumComparingMode::BY_VALUES, "How to map ClickHouse Enum and CapnProto Enum", 0) \ + M(CapnProtoEnumComparingMode, format_capn_proto_enum_comparising_mode, FormatSettings::CapnProtoEnumComparingMode::BY_VALUES, "How to map ClickHouse Enum and CapnProto Enum", 0) \ \ M(String, input_format_mysql_dump_table_name, "", "Name of the table in MySQL dump from which to read data", 0) \ M(Bool, input_format_mysql_dump_map_column_names, true, "Match columns from table in MySQL dump and columns from ClickHouse table by names", 0) \ diff --git a/src/Core/SettingsEnums.cpp b/src/Core/SettingsEnums.cpp index e0f16ea00db..a291a23c140 100644 --- a/src/Core/SettingsEnums.cpp +++ b/src/Core/SettingsEnums.cpp @@ -144,10 +144,10 @@ IMPLEMENT_SETTING_ENUM(TransactionsWaitCSNMode, ErrorCodes::BAD_ARGUMENTS, {"wait", TransactionsWaitCSNMode::WAIT}, {"wait_unknown", TransactionsWaitCSNMode::WAIT_UNKNOWN}}) -IMPLEMENT_SETTING_ENUM(EnumComparingMode, ErrorCodes::BAD_ARGUMENTS, - {{"by_names", FormatSettings::EnumComparingMode::BY_NAMES}, - {"by_values", FormatSettings::EnumComparingMode::BY_VALUES}, - {"by_names_case_insensitive", FormatSettings::EnumComparingMode::BY_NAMES_CASE_INSENSITIVE}}) +IMPLEMENT_SETTING_ENUM(CapnProtoEnumComparingMode, ErrorCodes::BAD_ARGUMENTS, + {{"by_names", FormatSettings::CapnProtoEnumComparingMode::BY_NAMES}, + {"by_values", FormatSettings::CapnProtoEnumComparingMode::BY_VALUES}, + {"by_names_case_insensitive", FormatSettings::CapnProtoEnumComparingMode::BY_NAMES_CASE_INSENSITIVE}}) IMPLEMENT_SETTING_ENUM(EscapingRule, ErrorCodes::BAD_ARGUMENTS, {{"None", FormatSettings::EscapingRule::None}, diff --git a/src/Core/SettingsEnums.h b/src/Core/SettingsEnums.h index 3ae7bfaa673..1c5be910ef7 100644 --- a/src/Core/SettingsEnums.h +++ b/src/Core/SettingsEnums.h @@ -188,7 +188,7 @@ enum class TransactionsWaitCSNMode DECLARE_SETTING_ENUM(TransactionsWaitCSNMode) -DECLARE_SETTING_ENUM_WITH_RENAME(EnumComparingMode, FormatSettings::EnumComparingMode) +DECLARE_SETTING_ENUM_WITH_RENAME(CapnProtoEnumComparingMode, FormatSettings::CapnProtoEnumComparingMode) DECLARE_SETTING_ENUM_WITH_RENAME(EscapingRule, FormatSettings::EscapingRule) diff --git a/src/Formats/CapnProtoSchema.cpp b/src/Formats/CapnProtoSchema.cpp new file mode 100644 index 00000000000..22518d5061a --- /dev/null +++ b/src/Formats/CapnProtoSchema.cpp @@ -0,0 +1,298 @@ +#include + +#if USE_CAPNP + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int CANNOT_PARSE_CAPN_PROTO_SCHEMA; + extern const int BAD_TYPE_OF_FIELD; + extern const int FILE_DOESNT_EXIST; + extern const int UNKNOWN_EXCEPTION; + extern const int CAPN_PROTO_BAD_TYPE; + extern const int BAD_ARGUMENTS; +} + +capnp::StructSchema CapnProtoSchemaParser::getMessageSchema(const FormatSchemaInfo & schema_info) +{ + capnp::ParsedSchema schema; + try + { + int fd; + KJ_SYSCALL(fd = open(schema_info.schemaDirectory().data(), O_RDONLY)); // NOLINT(bugprone-suspicious-semicolon) + auto schema_dir = kj::newDiskDirectory(kj::OsFileHandle(fd)); + schema = impl.parseFromDirectory(*schema_dir, kj::Path::parse(schema_info.schemaPath()), {}); + } + catch (const kj::Exception & e) + { + /// That's not good to determine the type of error by its description, but + /// this is the only way to do it here, because kj doesn't specify the type of error. + auto description = std::string_view(e.getDescription().cStr()); + if (description.find("No such file or directory") != String::npos || description.find("no such directory") != String::npos) + throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot open CapnProto schema, file {} doesn't exists", schema_info.absoluteSchemaPath()); + + if (description.find("Parse error") != String::npos) + throw Exception(ErrorCodes::CANNOT_PARSE_CAPN_PROTO_SCHEMA, "Cannot parse CapnProto schema {}:{}", schema_info.schemaPath(), e.getLine()); + + throw Exception(ErrorCodes::UNKNOWN_EXCEPTION, + "Unknown exception while parsing CapnProto schema: {}, schema dir and file: {}, {}", + description, schema_info.schemaDirectory(), schema_info.schemaPath()); + } + + auto message_maybe = schema.findNested(schema_info.messageName()); + auto * message_schema = kj::_::readMaybe(message_maybe); + if (!message_schema) + throw Exception(ErrorCodes::CANNOT_PARSE_CAPN_PROTO_SCHEMA, + "CapnProto schema doesn't contain message with name {}", schema_info.messageName()); + return message_schema->asStruct(); +} + +bool checkIfStructContainsUnnamedUnion(const capnp::StructSchema & struct_schema) +{ + return struct_schema.getFields().size() != struct_schema.getNonUnionFields().size(); +} + +bool checkIfStructIsNamedUnion(const capnp::StructSchema & struct_schema) +{ + return struct_schema.getFields().size() == struct_schema.getUnionFields().size(); +} + +/// Get full name of type for better exception messages. +String getCapnProtoFullTypeName(const capnp::Type & type) +{ + static const std::map capnp_simple_type_names = + { + {capnp::schema::Type::Which::BOOL, "Bool"}, + {capnp::schema::Type::Which::VOID, "Void"}, + {capnp::schema::Type::Which::INT8, "Int8"}, + {capnp::schema::Type::Which::INT16, "Int16"}, + {capnp::schema::Type::Which::INT32, "Int32"}, + {capnp::schema::Type::Which::INT64, "Int64"}, + {capnp::schema::Type::Which::UINT8, "UInt8"}, + {capnp::schema::Type::Which::UINT16, "UInt16"}, + {capnp::schema::Type::Which::UINT32, "UInt32"}, + {capnp::schema::Type::Which::UINT64, "UInt64"}, + {capnp::schema::Type::Which::FLOAT32, "Float32"}, + {capnp::schema::Type::Which::FLOAT64, "Float64"}, + {capnp::schema::Type::Which::TEXT, "Text"}, + {capnp::schema::Type::Which::DATA, "Data"}, + {capnp::schema::Type::Which::INTERFACE, "Interface"}, + {capnp::schema::Type::Which::ANY_POINTER, "AnyPointer"}, + }; + + switch (type.which()) + { + case capnp::schema::Type::Which::STRUCT: + { + auto struct_schema = type.asStruct(); + + auto non_union_fields = struct_schema.getNonUnionFields(); + std::vector non_union_field_names; + for (auto nested_field : non_union_fields) + non_union_field_names.push_back(String(nested_field.getProto().getName()) + " " + getCapnProtoFullTypeName(nested_field.getType())); + + auto union_fields = struct_schema.getUnionFields(); + std::vector union_field_names; + for (auto nested_field : union_fields) + union_field_names.push_back(String(nested_field.getProto().getName()) + " " + getCapnProtoFullTypeName(nested_field.getType())); + + String union_name = "Union(" + boost::algorithm::join(union_field_names, ", ") + ")"; + /// Check if the struct is a named union. + if (non_union_field_names.empty()) + return union_name; + + String type_name = "Struct(" + boost::algorithm::join(non_union_field_names, ", "); + /// Check if the struct contains unnamed union. + if (!union_field_names.empty()) + type_name += ", " + union_name; + type_name += ")"; + return type_name; + } + case capnp::schema::Type::Which::LIST: + return "List(" + getCapnProtoFullTypeName(type.asList().getElementType()) + ")"; + case capnp::schema::Type::Which::ENUM: + { + auto enum_schema = type.asEnum(); + String enum_name = "Enum("; + auto enumerants = enum_schema.getEnumerants(); + for (unsigned i = 0; i != enumerants.size(); ++i) + { + enum_name += String(enumerants[i].getProto().getName()) + " = " + std::to_string(enumerants[i].getOrdinal()); + if (i + 1 != enumerants.size()) + enum_name += ", "; + } + enum_name += ")"; + return enum_name; + } + default: + auto it = capnp_simple_type_names.find(type.which()); + if (it == capnp_simple_type_names.end()) + throw Exception(ErrorCodes::BAD_TYPE_OF_FIELD, "Unknown CapnProto type"); + return it->second; + } +} + +namespace +{ + + template + static DataTypePtr getEnumDataTypeFromEnumerants(const capnp::EnumSchema::EnumerantList & enumerants) + { + std::vector> values; + for (auto enumerant : enumerants) + values.emplace_back(enumerant.getProto().getName(), ValueType(enumerant.getOrdinal())); + return std::make_shared>(std::move(values)); + } + + static DataTypePtr getEnumDataTypeFromEnumSchema(const capnp::EnumSchema & enum_schema) + { + auto enumerants = enum_schema.getEnumerants(); + if (enumerants.size() < 128) + return getEnumDataTypeFromEnumerants(enumerants); + if (enumerants.size() < 32768) + return getEnumDataTypeFromEnumerants(enumerants); + + throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "ClickHouse supports only 8 and 16-bit Enums"); + } + + static DataTypePtr getDataTypeFromCapnProtoType(const capnp::Type & capnp_type, bool skip_unsupported_fields) + { + switch (capnp_type.which()) + { + case capnp::schema::Type::INT8: + return std::make_shared(); + case capnp::schema::Type::INT16: + return std::make_shared(); + case capnp::schema::Type::INT32: + return std::make_shared(); + case capnp::schema::Type::INT64: + return std::make_shared(); + case capnp::schema::Type::BOOL: [[fallthrough]]; + case capnp::schema::Type::UINT8: + return std::make_shared(); + case capnp::schema::Type::UINT16: + return std::make_shared(); + case capnp::schema::Type::UINT32: + return std::make_shared(); + case capnp::schema::Type::UINT64: + return std::make_shared(); + case capnp::schema::Type::FLOAT32: + return std::make_shared(); + case capnp::schema::Type::FLOAT64: + return std::make_shared(); + case capnp::schema::Type::DATA: [[fallthrough]]; + case capnp::schema::Type::TEXT: + return std::make_shared(); + case capnp::schema::Type::ENUM: + return getEnumDataTypeFromEnumSchema(capnp_type.asEnum()); + case capnp::schema::Type::LIST: + { + auto list_schema = capnp_type.asList(); + auto nested_type = getDataTypeFromCapnProtoType(list_schema.getElementType(), skip_unsupported_fields); + if (!nested_type) + return nullptr; + return std::make_shared(nested_type); + } + case capnp::schema::Type::STRUCT: + { + auto struct_schema = capnp_type.asStruct(); + + + if (struct_schema.getFields().size() == 0) + { + if (skip_unsupported_fields) + return nullptr; + throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "Empty messages are not supported"); + } + + /// Check if it can be Nullable. + if (checkIfStructIsNamedUnion(struct_schema)) + { + auto fields = struct_schema.getUnionFields(); + if (fields.size() != 2 || (!fields[0].getType().isVoid() && !fields[1].getType().isVoid())) + { + if (skip_unsupported_fields) + return nullptr; + throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "Unions are not supported"); + } + auto value_type = fields[0].getType().isVoid() ? fields[1].getType() : fields[0].getType(); + if (value_type.isStruct() || value_type.isList()) + { + if (skip_unsupported_fields) + return nullptr; + throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "Tuples and Lists cannot be inside Nullable"); + } + + auto nested_type = getDataTypeFromCapnProtoType(value_type, skip_unsupported_fields); + if (!nested_type) + return nullptr; + return std::make_shared(nested_type); + } + + if (checkIfStructContainsUnnamedUnion(struct_schema)) + throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "Unnamed union is not supported"); + + /// Treat Struct as Tuple. + DataTypes nested_types; + Names nested_names; + for (auto field : struct_schema.getNonUnionFields()) + { + auto nested_type = getDataTypeFromCapnProtoType(field.getType(), skip_unsupported_fields); + if (!nested_type) + continue; + nested_names.push_back(field.getProto().getName()); + nested_types.push_back(nested_type); + } + if (nested_types.empty()) + return nullptr; + return std::make_shared(std::move(nested_types), std::move(nested_names)); + } + default: + { + if (skip_unsupported_fields) + return nullptr; + throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "Unsupported CapnProtoType: {}", getCapnProtoFullTypeName(capnp_type)); + } + } +} + +} + +NamesAndTypesList capnProtoSchemaToCHSchema(const capnp::StructSchema & schema, bool skip_unsupported_fields) +{ + if (checkIfStructContainsUnnamedUnion(schema)) + throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "Unnamed union is not supported"); + + NamesAndTypesList names_and_types; + for (auto field : schema.getNonUnionFields()) + { + auto name = field.getProto().getName(); + auto type = getDataTypeFromCapnProtoType(field.getType(), skip_unsupported_fields); + if (type) + names_and_types.emplace_back(name, type); + } + if (names_and_types.empty()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Cannot convert CapnProto schema to ClickHouse table schema, all fields have unsupported types"); + + return names_and_types; +} + +} + +#endif diff --git a/src/Formats/CapnProtoUtils.h b/src/Formats/CapnProtoSchema.h similarity index 59% rename from src/Formats/CapnProtoUtils.h rename to src/Formats/CapnProtoSchema.h index 2d8cdb418d7..225f6f56207 100644 --- a/src/Formats/CapnProtoUtils.h +++ b/src/Formats/CapnProtoSchema.h @@ -30,17 +30,14 @@ public: capnp::StructSchema getMessageSchema(const FormatSchemaInfo & schema_info); }; -std::pair splitCapnProtoFieldName(const String & name); +bool checkIfStructContainsUnnamedUnion(const capnp::StructSchema & struct_schema); +bool checkIfStructIsNamedUnion(const capnp::StructSchema & struct_schema); -bool compareEnumNames(const String & first, const String & second, FormatSettings::EnumComparingMode mode); - -std::pair getStructBuilderAndFieldByColumnName(capnp::DynamicStruct::Builder struct_builder, const String & name); - -capnp::DynamicValue::Reader getReaderByColumnName(const capnp::DynamicStruct::Reader & struct_reader, const String & name); - -void checkCapnProtoSchemaStructure(const capnp::StructSchema & schema, const Block & header, FormatSettings::EnumComparingMode mode); +/// Get full name of type for better exception messages. +String getCapnProtoFullTypeName(const capnp::Type & type); NamesAndTypesList capnProtoSchemaToCHSchema(const capnp::StructSchema & schema, bool skip_unsupported_fields); + } #endif diff --git a/src/Formats/CapnProtoSerializer.cpp b/src/Formats/CapnProtoSerializer.cpp new file mode 100644 index 00000000000..e0c8ae2a79a --- /dev/null +++ b/src/Formats/CapnProtoSerializer.cpp @@ -0,0 +1,1218 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int THERE_IS_NO_COLUMN; + extern const int BAD_TYPE_OF_FIELD; + extern const int CAPN_PROTO_BAD_CAST; + extern const int INCORRECT_DATA; + extern const int ILLEGAL_COLUMN; +} + +namespace +{ + std::pair splitFieldName(const String & name) + { + const auto * begin = name.data(); + const auto * end = name.data() + name.size(); + const auto * it = find_first_symbols<'_', '.'>(begin, end); + String first = String(begin, it); + String second = it == end ? "" : String(it + 1, end); + return {first, second}; + } + + std::optional findFieldByName(const capnp::StructSchema & struct_schema, const String & name) + { + const auto & fields = struct_schema.getFields(); + for (auto field : fields) + { + auto field_name = String(field.getProto().getName()); + if (boost::to_lower_copy(name) == boost::to_lower_copy(field_name)) + return field; + } + return std::nullopt; + } + + [[noreturn]] void throwCannotConvert(const DataTypePtr & type, const String & name, const capnp::Type & capnp_type) + { + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert ClickHouse column \"{}\" with type {} to CapnProto type {}", + name, + type->getName(), + getCapnProtoFullTypeName(capnp_type)); + } + + struct FieldBuilder + { + virtual ~FieldBuilder() = default; + }; + + struct ListBuilder : public FieldBuilder + { + explicit ListBuilder(capnp::DynamicValue::Builder builder) : impl(builder.as()) + { + } + + capnp::DynamicList::Builder impl; + std::vector> nested_builders; + }; + + struct StructBuilder : public FieldBuilder + { + explicit StructBuilder(capnp::DynamicValue::Builder builder, size_t fields_size) : impl(builder.as()), field_builders(fields_size) + { + } + + explicit StructBuilder(capnp::DynamicStruct::Builder struct_builder, size_t fields_size) : impl(std::move(struct_builder)), field_builders(fields_size) + { + } + + capnp::DynamicStruct::Builder impl; + std::vector> field_builders; + }; + + std::unique_ptr initStructFieldBuilderIfNeeded(const ColumnPtr & column, size_t row_num, capnp::DynamicStruct::Builder & struct_builder, const capnp::StructSchema::Field & field, const capnp::Type & capnp_type, size_t nested_fields_size) + { + switch (capnp_type.which()) + { + case capnp::schema::Type::LIST: + { + const auto * array_column = assert_cast(column.get()); + size_t size = array_column->getOffsets()[row_num] - array_column->getOffsets()[row_num - 1]; + return std::make_unique(struct_builder.init(field, static_cast(size))); + } + case capnp::schema::Type::STRUCT: + { + return std::make_unique(struct_builder.init(field), nested_fields_size); + } + default: + return nullptr; + } + } + + class ICapnProtoSerializer + { + public: + virtual std::optional writeRow(const ColumnPtr & column, FieldBuilder * builder, size_t row_num) = 0; + virtual void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) = 0; + + virtual ~ICapnProtoSerializer() = default; + }; + + template + class CapnProtoIntegerSerializer : public ICapnProtoSerializer + { + public: + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + if constexpr (capnp_dynamic_type == capnp::DynamicValue::Type::INT) + return capnp::DynamicValue::Reader(column->getInt(row_num)); + if constexpr (capnp_dynamic_type == capnp::DynamicValue::Type::UINT) + return capnp::DynamicValue::Reader(column->getUInt(row_num)); + return capnp::DynamicValue::Reader(column->getBool(row_num)); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + NumericType value; + if constexpr (capnp_dynamic_type == capnp::DynamicValue::Type::INT) + value = static_cast(reader.as()); + else if constexpr (capnp_dynamic_type == capnp::DynamicValue::Type::UINT) + value = static_cast(reader.as()); + else if constexpr (capnp_dynamic_type == capnp::DynamicValue::Type::BOOL) + value = static_cast(reader.as()); + + if constexpr (is_bool_data_type) + assert_cast(column).insertValue(static_cast(value)); + else + assert_cast &>(column).insertValue(value); + } + }; + + template + static std::unique_ptr createIntegerSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + { + switch (capnp_type.which()) + { + case capnp::schema::Type::INT8: [[fallthrough]]; + case capnp::schema::Type::INT16: [[fallthrough]]; + case capnp::schema::Type::INT32: [[fallthrough]]; + case capnp::schema::Type::INT64: + return std::make_unique>(); + case capnp::schema::Type::UINT8: [[fallthrough]]; + case capnp::schema::Type::UINT16: [[fallthrough]]; + case capnp::schema::Type::UINT32: [[fallthrough]]; + case capnp::schema::Type::UINT64: + return std::make_unique>(); + case capnp::schema::Type::BOOL: + return std::make_unique>(); + default: + throwCannotConvert(data_type, column_name, capnp_type); + } + } + + template + class CapnProtoBigIntegerSerializer : public ICapnProtoSerializer + { + public: + CapnProtoBigIntegerSerializer(const DataTypePtr & data_type_, const String & column_name, const capnp::Type & capnp_type) : data_type(data_type_) + { + if (!capnp_type.isData()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + auto data = column->getDataAt(row_num); + return capnp::DynamicValue::Reader(capnp::Data::Reader(reinterpret_cast(data.data), data.size)); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + auto value = reader.as(); + if (value.size() != sizeof(NumericType)) + throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected size of {} value: {}", data_type->getName(), value.size()); + + column.insertData(reinterpret_cast(value.begin()), value.size()); + } + + private: + DataTypePtr data_type; + }; + + template + class CapnProtoFloatSerializer : public ICapnProtoSerializer + { + public: + CapnProtoFloatSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + { + if (!capnp_type.isFloat32() && !capnp_type.isFloat64()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + return capnp::DynamicValue::Reader(column->getFloat64(row_num)); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + assert_cast &>(column).insertValue(reader.as()); + } + }; + + template + class CapnProtoEnumSerializer : public ICapnProtoSerializer + { + public: + CapnProtoEnumSerializer( + const DataTypePtr & data_type_, + const String & column_name, + const capnp::Type & capnp_type, + const FormatSettings::CapnProtoEnumComparingMode enum_comparing_mode_) : data_type(data_type_), enum_comparing_mode(enum_comparing_mode_) + { + if (!capnp_type.isEnum()) + throwCannotConvert(data_type, column_name, capnp_type); + + bool to_lower = enum_comparing_mode == FormatSettings::CapnProtoEnumComparingMode::BY_NAMES_CASE_INSENSITIVE; + const auto * enum_type = assert_cast *>(data_type.get()); + const auto & enum_values = dynamic_cast &>(*enum_type); + + enum_schema = capnp_type.asEnum(); + auto enumerants = enum_schema.getEnumerants(); + constexpr auto max_value = std::is_same_v ? INT8_MAX : INT16_MAX; + if (enum_comparing_mode == FormatSettings::CapnProtoEnumComparingMode::BY_VALUES) + { + /// In CapnProto Enum fields are numbered sequentially starting from zero. + if (enumerants.size() > max_value) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Enum from CapnProto schema contains values that are out of range for Clickhouse enum type {}", + data_type->getName()); + + auto values = enum_values.getSetOfAllValues(); + std::unordered_set capn_enum_values; + for (auto enumerant : enumerants) + capn_enum_values.insert(EnumType(enumerant.getOrdinal())); + if (values != capn_enum_values) + throw Exception(ErrorCodes::CAPN_PROTO_BAD_CAST, "The set of values in Enum from CapnProto schema is different from the set of values in ClickHouse Enum"); + } + else + { + auto names = enum_values.getSetOfAllNames(to_lower); + std::unordered_set capn_enum_names; + + for (auto enumerant : enumerants) + { + String name = enumerant.getProto().getName(); + capn_enum_names.insert(to_lower ? boost::algorithm::to_lower_copy(name) : name); + } + + if (names != capn_enum_names) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "The set of names in Enum from CapnProto schema is different from the set of names in ClickHouse Enum"); + } + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + const auto * enum_data_type = assert_cast *>(data_type.get()); + EnumType enum_value = assert_cast &>(*column).getElement(row_num); + if (enum_comparing_mode == FormatSettings::CapnProtoEnumComparingMode::BY_VALUES) + return capnp::DynamicValue::Reader(capnp::DynamicEnum(enum_schema, enum_value)); + + auto enum_name = enum_data_type->getNameForValue(enum_value); + for (const auto enumerant : enum_schema.getEnumerants()) + { + if (compareEnumNames(String(enum_name), enumerant.getProto().getName(), enum_comparing_mode)) + return capnp::DynamicValue::Reader(capnp::DynamicEnum(enumerant)); + } + + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot convert CLickHouse Enum value to CapnProto Enum"); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + auto enum_value = reader.as(); + auto enumerant = *kj::_::readMaybe(enum_value.getEnumerant()); + auto enum_type = assert_cast *>(data_type.get()); + DataTypePtr nested_type = std::make_shared>(); + switch (enum_comparing_mode) + { + case FormatSettings::CapnProtoEnumComparingMode::BY_VALUES: + { + assert_cast &>(column).insertValue(static_cast(enumerant.getOrdinal())); + return; + } + case FormatSettings::CapnProtoEnumComparingMode::BY_NAMES: + { + auto value = enum_type->getValue(String(enumerant.getProto().getName())); + assert_cast &>(column).insertValue(value); + return; + } + case FormatSettings::CapnProtoEnumComparingMode::BY_NAMES_CASE_INSENSITIVE: + { + /// Find the same enum name case insensitive. + String enum_name = enumerant.getProto().getName(); + for (auto & name : enum_type->getAllRegisteredNames()) + { + if (compareEnumNames(name, enum_name, enum_comparing_mode)) + { + assert_cast &>(column).insertValue(enum_type->getValue(name)); + break; + } + } + return; + } + } + } + + private: + bool compareEnumNames(const String & first, const String & second, const FormatSettings::CapnProtoEnumComparingMode mode) + { + if (mode == FormatSettings::CapnProtoEnumComparingMode::BY_NAMES_CASE_INSENSITIVE) + return boost::algorithm::to_lower_copy(first) == boost::algorithm::to_lower_copy(second); + return first == second; + } + + DataTypePtr data_type; + capnp::EnumSchema enum_schema; + const FormatSettings::CapnProtoEnumComparingMode enum_comparing_mode; + }; + + class CapnProtoDateSerializer : public ICapnProtoSerializer + { + public: + CapnProtoDateSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + { + if (!capnp_type.isUInt16()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + return capnp::DynamicValue::Reader(column->getUInt(row_num)); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + assert_cast(column).insertValue(reader.as()); + } + }; + + class CapnProtoDate32Serializer : public ICapnProtoSerializer + { + public: + CapnProtoDate32Serializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + { + if (!capnp_type.isInt32()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + return capnp::DynamicValue::Reader(column->getInt(row_num)); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + assert_cast(column).insertValue(reader.as()); + } + }; + + class CapnProtoDateTimeSerializer : public ICapnProtoSerializer + { + public: + CapnProtoDateTimeSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + { + if (!capnp_type.isUInt32()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + return capnp::DynamicValue::Reader(column->getInt(row_num)); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + assert_cast(column).insertValue(reader.as()); + } + }; + + class CapnProtoDateTime64Serializer : public ICapnProtoSerializer + { + public: + CapnProtoDateTime64Serializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + { + if (!capnp_type.isInt64()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + return capnp::DynamicValue::Reader(column->getInt(row_num)); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + assert_cast(column).insertValue(reader.as()); + } + }; + + template + class CapnProtoDecimalSerializer : public ICapnProtoSerializer + { + public: + CapnProtoDecimalSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + { + auto which = WhichDataType(data_type); + if ((!capnp_type.isInt32() && which.isDecimal32()) || (!capnp_type.isInt64() && which.isDecimal64())) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + return capnp::DynamicValue::Reader(column->getInt(row_num)); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + assert_cast &>(column).insertValue(reader.as()); + } + }; + + template + class CapnProtoBigDecimalSerializer : public ICapnProtoSerializer + { + public: + CapnProtoBigDecimalSerializer(const DataTypePtr & data_type_, const String & column_name, const capnp::Type & capnp_type) : data_type(data_type_) + { + if (!capnp_type.isData()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + auto data = column->getDataAt(row_num); + return capnp::DynamicValue::Reader(capnp::Data::Reader(reinterpret_cast(data.data), data.size)); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + auto value = reader.as(); + if (value.size() != sizeof(DecimalType)) + throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected size of {} value: {}", data_type->getName(), value.size()); + + column.insertData(reinterpret_cast(value.begin()), value.size()); + } + + private: + DataTypePtr data_type; + }; + + template + class CapnProtoStringSerializer : public ICapnProtoSerializer + { + public: + CapnProtoStringSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type_) : capnp_type(capnp_type_) + { + if (!capnp_type.isData() && !capnp_type.isText()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + auto data = column->getDataAt(row_num); + + if constexpr (is_binary) + return capnp::DynamicValue::Reader(capnp::Data::Reader(reinterpret_cast(data.data), data.size)); + + /// For type TEXT data must be null-terminated, but in String column we always have 0 byte at the end of each value. + return capnp::DynamicValue::Reader(capnp::Text::Reader(data.data, data.size)); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + if constexpr (is_binary) + { + auto value = reader.as(); + column.insertData(reinterpret_cast(value.begin()), value.size()); + } + else + { + auto value = reader.as(); + column.insertData(reinterpret_cast(value.begin()), value.size()); + } + } + + private: + capnp::Type capnp_type; + }; + + template + class CapnProtoFixedStringSerializer : public ICapnProtoSerializer + { + public: + CapnProtoFixedStringSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type_) : capnp_type(capnp_type_) + { + if (!capnp_type.isData() && !capnp_type.isText()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + auto data = column->getDataAt(row_num); + if constexpr (is_binary) + return capnp::DynamicValue::Reader(capnp::Data::Reader(reinterpret_cast(data.data), data.size)); + + if (data.data[data.size - 1] == 0) + return capnp::DynamicValue::Reader(capnp::Text::Reader(reinterpret_cast(data.data), data.size)); + + /// In TEXT type data should be null-terminated, but ClickHouse FixedString data could not be. + /// To make data null-terminated we should copy it to temporary String object and use it in capnp::Text::Reader. + /// Note that capnp::Text::Reader works only with pointer to the data and it's size, so we should + /// guarantee that new String object life time is longer than capnp::Text::Reader life time. + tmp_string = data.toString(); + return capnp::DynamicValue::Reader(capnp::Text::Reader(reinterpret_cast(tmp_string.data()), tmp_string.size())); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + auto & fixed_string_column = assert_cast(column); + if constexpr (is_binary) + { + auto value = reader.as(); + if (value.size() > fixed_string_column.getN()) + throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot read data with size {} to FixedString with size {}", value.size(), fixed_string_column.getN()); + + fixed_string_column.insertData(reinterpret_cast(value.begin()), value.size()); + } + else + { + auto value = reader.as(); + if (value.size() > fixed_string_column.getN()) + throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot read data with size {} to FixedString with size {}", value.size(), fixed_string_column.getN()); + + fixed_string_column.insertData(reinterpret_cast(value.begin()), value.size()); + } + } + + private: + String tmp_string; + capnp::Type capnp_type; + }; + + class CapnProtoIPv4Serializer : public ICapnProtoSerializer + { + public: + CapnProtoIPv4Serializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + { + if (!capnp_type.isUInt32()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + return capnp::DynamicValue::Reader(assert_cast(*column).getElement(row_num).toUnderType()); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + assert_cast(column).insertValue(IPv4(reader.as())); + } + }; + + class CapnProtoIPv6Serializer : public ICapnProtoSerializer + { + public: + CapnProtoIPv6Serializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + { + if (!capnp_type.isData()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + auto data = column->getDataAt(row_num); + return capnp::DynamicValue::Reader(capnp::Data::Reader(reinterpret_cast(data.data), data.size)); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + auto value = reader.as(); + if (value.size() != sizeof(IPv6)) + throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected size of IPv6 value: {}", value.size()); + + column.insertData(reinterpret_cast(value.begin()), value.size()); + } + }; + + class CapnProtoUUIDSerializer : public ICapnProtoSerializer + { + public: + CapnProtoUUIDSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + { + if (!capnp_type.isData()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + auto data = column->getDataAt(row_num); + return capnp::DynamicValue::Reader(capnp::Data::Reader(reinterpret_cast(data.data), data.size)); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + auto value = reader.as(); + if (value.size() != sizeof(UUID)) + throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected size of UUID value: {}", value.size()); + + column.insertData(reinterpret_cast(value.begin()), value.size()); + } + }; + + std::unique_ptr createSerializer(const DataTypePtr & type, const String & name, const capnp::Type & capnp_type, const FormatSettings::CapnProto & settings); + + class CapnProtoLowCardinalitySerializer : public ICapnProtoSerializer + { + public: + CapnProtoLowCardinalitySerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type, const FormatSettings::CapnProto & settings) + { + nested_serializer = createSerializer(assert_cast(*data_type).getDictionaryType(), column_name, capnp_type, settings); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder * field_builder, size_t row_num) override + { + const auto & low_cardinality_column = assert_cast(*column); + size_t index = low_cardinality_column.getIndexAt(row_num); + const auto & dict_column = low_cardinality_column.getDictionary().getNestedColumn(); + return nested_serializer->writeRow(dict_column, field_builder, index); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + auto & low_cardinality_column = assert_cast(column); + auto tmp_column = low_cardinality_column.getDictionary().getNestedColumn()->cloneEmpty(); + nested_serializer->readRow(*tmp_column, reader); + low_cardinality_column.insertFromFullColumn(*tmp_column, 0); + } + + private: + std::unique_ptr nested_serializer; + }; + + class CapnProtoNullableSerializer : public ICapnProtoSerializer + { + public: + CapnProtoNullableSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type, const FormatSettings::CapnProto & settings) + { + if (!capnp_type.isStruct()) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert column \"{}\": Nullable can be represented only as a named union of type Void and nested type, got CapnProto type {}", + column_name, + getCapnProtoFullTypeName(capnp_type)); + + /// Check that struct is a named union of type VOID and one arbitrary type. + auto struct_schema = capnp_type.asStruct(); + if (!checkIfStructIsNamedUnion(struct_schema)) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert column \"{}\": Nullable can be represented only as a named union of type Void and nested type." + "Given CapnProto struct is not a named union: {}", + column_name, + getCapnProtoFullTypeName(capnp_type)); + + auto union_fields = struct_schema.getUnionFields(); + if (union_fields.size() != 2) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert column \"{}\": Nullable can be represented only as a named union of type Void and nested type." + "Given CapnProto union have more than 2 fields: {}", + column_name, + getCapnProtoFullTypeName(capnp_type)); + + auto first = union_fields[0]; + auto second = union_fields[1]; + auto nested_type = assert_cast(data_type.get())->getNestedType(); + if (first.getType().isVoid()) + { + null_field = first; + nested_field = second; + nested_capnp_type = second.getType(); + if (nested_capnp_type.isStruct()) + nested_fields_size = nested_capnp_type.asStruct().getFields().size(); + nested_serializer = createSerializer(nested_type, column_name, nested_capnp_type, settings); + } + else if (second.getType().isVoid()) + { + null_field = second; + nested_field = first; + nested_capnp_type = first.getType(); + if (nested_capnp_type.isStruct()) + nested_fields_size = nested_capnp_type.asStruct().getFields().size(); + nested_serializer = createSerializer(nested_type, column_name, nested_capnp_type, settings); + } + else + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert column \"{}\": Nullable can be represented only as a named union of type Void and nested type." + "Given CapnProto union doesn't have field with type Void: {}", + column_name, + getCapnProtoFullTypeName(capnp_type)); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder * field_builder, size_t row_num) override + { + assert(field_builder); + auto & struct_builder = assert_cast(*field_builder); + const auto & nullable_column = assert_cast(*column); + if (nullable_column.isNullAt(row_num)) + { + struct_builder.impl.set(null_field, capnp::Void()); + } + else + { + struct_builder.impl.clear(nested_field); + const auto & nested_column = nullable_column.getNestedColumnPtr(); + auto nested_field_builder = initStructFieldBuilderIfNeeded(nested_column, row_num, struct_builder.impl, nested_field, nested_capnp_type, nested_fields_size); + auto value = nested_serializer->writeRow(nested_column, nested_field_builder.get(), row_num); + if (value) + struct_builder.impl.set(nested_field, *value); + } + + return std::nullopt; + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + auto struct_reader = reader.as(); + auto & nullable_column = assert_cast(column); + auto field = *kj::_::readMaybe(struct_reader.which()); + if (field.getType().isVoid()) + nullable_column.insertDefault(); + else + { + auto & nested_column = nullable_column.getNestedColumn(); + auto nested_reader = struct_reader.get(field); + nested_serializer->readRow(nested_column, nested_reader); + nullable_column.getNullMapData().push_back(0); + } + } + + private: + std::unique_ptr nested_serializer; + capnp::StructSchema::Field null_field; + capnp::StructSchema::Field nested_field; + size_t nested_fields_size = 0; + capnp::Type nested_capnp_type; + }; + + class CapnProtoArraySerializer : public ICapnProtoSerializer + { + public: + CapnProtoArraySerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type, const FormatSettings::CapnProto & settings) + { + if (!capnp_type.isList()) + throwCannotConvert(data_type, column_name, capnp_type); + + auto nested_type = assert_cast(data_type.get())->getNestedType(); + element_type = capnp_type.asList().getElementType(); + if (element_type.isStruct()) + element_struct_fields = element_type.asStruct().getFields().size(); + nested_serializer = createSerializer(nested_type, column_name, capnp_type.asList().getElementType(), settings); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder * field_builder, size_t row_num) override + { + assert(field_builder); + auto & list_builder = assert_cast(*field_builder); + const auto * array_column = assert_cast(column.get()); + const auto & nested_column = array_column->getDataPtr(); + const auto & offsets = array_column->getOffsets(); + auto offset = offsets[row_num - 1]; + size_t size = offsets[row_num] - offset; + bool need_nested_builders = list_builder.nested_builders.empty(); + for (unsigned i = 0; i != static_cast(size); ++i) + { + if (need_nested_builders) + { + /// For nested lists we need to initialize nested list builder. + if (element_type.isList()) + { + const auto & nested_offset = checkAndGetColumn(*nested_column)->getOffsets(); + size_t nested_array_size = nested_offset[offset + i] - nested_offset[offset + i - 1]; + list_builder.nested_builders.emplace_back(std::make_unique(list_builder.impl.init(i, static_cast(nested_array_size)))); + } + else if (element_type.isStruct()) + { + list_builder.nested_builders.emplace_back(std::make_unique(list_builder.impl[i], element_struct_fields)); + } + else + { + list_builder.nested_builders.emplace_back(); + } + } + + auto value = nested_serializer->writeRow(nested_column, list_builder.nested_builders[i].get(), offset + i); + if (value) + list_builder.impl.set(i, *value); + } + + return std::nullopt; + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + auto list_reader = reader.as(); + auto & column_array = assert_cast(column); + auto & offsets = column_array.getOffsets(); + offsets.push_back(offsets.back() + list_reader.size()); + + auto & nested_column = column_array.getData(); + for (const auto & nested_reader : list_reader) + nested_serializer->readRow(nested_column, nested_reader); + } + + private: + std::unique_ptr nested_serializer; + capnp::Type element_type; + size_t element_struct_fields; + }; + + class CapnProtoMapSerializer : public ICapnProtoSerializer + { + public: + CapnProtoMapSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type, const FormatSettings::CapnProto & settings) + { + /// We output/input Map type as follow CapnProto schema + /// + /// struct Map { + /// struct Entry { + /// key @0: Key; + /// value @1: Value; + /// } + /// entries @0 :List(Entry); + /// } + + if (!capnp_type.isStruct()) + throwCannotConvert(data_type, column_name, capnp_type); + + auto struct_schema = capnp_type.asStruct(); + + if (checkIfStructContainsUnnamedUnion(struct_schema)) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert ClickHouse column \"{}\" with type {} to CapnProto Struct with unnamed union {}", + column_name, + data_type->getName(), + getCapnProtoFullTypeName(capnp_type)); + + if (struct_schema.getFields().size() != 1) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert ClickHouse column \"{}\": Map type can be represented as a Struct with one list field, got struct: {}", + column_name, + getCapnProtoFullTypeName(capnp_type)); + + const auto & field_type = struct_schema.getFields()[0].getType(); + if (!field_type.isList()) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert ClickHouse column \"{}\": Map type can be represented as a Struct with one list field, got field: {}", + column_name, + getCapnProtoFullTypeName(field_type)); + + auto list_element_type = field_type.asList().getElementType(); + if (!list_element_type.isStruct()) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert ClickHouse column \"{}\": Field of struct that represents Map should be a list of structs, got list of {}", + column_name, + getCapnProtoFullTypeName(list_element_type)); + + auto key_value_struct = list_element_type.asStruct(); + if (checkIfStructContainsUnnamedUnion(key_value_struct)) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert ClickHouse column \"{}\": struct that represents Map entries is unnamed union: {}", + column_name, + getCapnProtoFullTypeName(list_element_type)); + + if (key_value_struct.getFields().size() != 2) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert ClickHouse column \"{}\": struct that represents Map entries should contain only 2 fields, got struct {}", + column_name, + getCapnProtoFullTypeName(list_element_type)); + + const auto & map_type = assert_cast(*data_type); + DataTypes types = {map_type.getKeyType(), map_type.getValueType()}; + Names names = {"key", "value"}; + auto entries_type = std::make_shared(std::make_shared(types, names)); + entries_field = struct_schema.getFields()[0]; + entries_capnp_type = entries_field.getType(); + nested_serializer = createSerializer(entries_type, column_name, field_type, settings); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder * field_builder, size_t row_num) override + { + assert(field_builder); + auto & struct_builder = assert_cast(*field_builder); + const auto & entries_column = assert_cast(column.get())->getNestedColumnPtr(); + auto entries_builder = initStructFieldBuilderIfNeeded(entries_column, row_num, struct_builder.impl, entries_field, entries_capnp_type, 0); + nested_serializer->writeRow(entries_column, entries_builder.get(), row_num); + return std::nullopt; + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + auto struct_reader = reader.as(); + auto & entries_column = assert_cast(column).getNestedColumn(); + nested_serializer->readRow(entries_column, struct_reader.get(entries_field)); + } + + private: + std::unique_ptr nested_serializer; + capnp::StructSchema::Field entries_field; + capnp::Type entries_capnp_type; + }; + + class CapnProtoStructureSerializer : public ICapnProtoSerializer + { + public: + CapnProtoStructureSerializer(const DataTypes & data_types, const Names & names, const capnp::StructSchema & schema, const FormatSettings::CapnProto & settings) + { + if (checkIfStructIsNamedUnion(schema) || checkIfStructContainsUnnamedUnion(schema)) + throw Exception(ErrorCodes::CAPN_PROTO_BAD_CAST, "Root CapnProto Struct cannot be named union/struct with unnamed union"); + + initialize(data_types, names, schema, settings); + } + + CapnProtoStructureSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type, const FormatSettings::CapnProto & settings) + { + if (!capnp_type.isStruct()) + throwCannotConvert(data_type, column_name, capnp_type); + + auto struct_schema = capnp_type.asStruct(); + + if (checkIfStructIsNamedUnion(struct_schema) || checkIfStructContainsUnnamedUnion(struct_schema)) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert ClickHouse column \"{}\" with type {} to CapnProto named union/struct with unnamed union {}", + column_name, + data_type->getName(), + getCapnProtoFullTypeName(capnp_type)); + + const auto * tuple_data_type = assert_cast(data_type.get()); + auto nested_types = tuple_data_type->getElements(); + Names nested_names; + bool have_explicit_names = tuple_data_type->haveExplicitNames(); + auto structure_fields = struct_schema.getFields(); + if (!have_explicit_names) + { + if (nested_types.size() != structure_fields.size()) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert ClickHouse column \"{}\" with type {} to CapnProto type {}: Tuple and Struct have different sizes {} != {}", + column_name, + data_type->getName(), + getCapnProtoFullTypeName(capnp_type), + nested_types.size(), + structure_fields.size()); + nested_names.reserve(structure_fields.size()); + for (auto field : structure_fields) + nested_names.push_back(field.getProto().getName()); + } + else + { + nested_names = tuple_data_type->getElementNames(); + } + + try + { + initialize(nested_types, nested_names, struct_schema, settings); + } + catch (Exception & e) + { + e.addMessage("(while converting column {})", column_name); + throw e; + } + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder * builder, size_t row_num) override + { + assert(builder); + auto & struct_builder = assert_cast(*builder); + if (auto tuple_column = typeid_cast(column.get())) + writeRow(tuple_column->getColumnsCopy(), struct_builder, row_num); + else + writeRow(Columns{column}, struct_builder, row_num); + return std::nullopt; + } + + void writeRow(const Columns & columns, StructBuilder & struct_builder, size_t row_num) + { + for (size_t i = 0; i != columns.size(); ++i) + { + const auto & field = fields[i]; + size_t field_index = field.getIndex(); + if (likely(!struct_builder.field_builders[field_index])) + struct_builder.field_builders[field_index] = initStructFieldBuilderIfNeeded( + columns[i], row_num, struct_builder.impl, field, fields_types[i], nested_field_sizes[i]); + + auto value = field_serializers[i]->writeRow(columns[i], struct_builder.field_builders[field_index].get(), row_num); + if (value) + struct_builder.impl.set(field, *value); + } + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + auto struct_reader = reader.as(); + if (auto * tuple_column = typeid_cast(&column)) + { + for (size_t i = 0; i != tuple_column->tupleSize(); ++i) + field_serializers[i]->readRow(tuple_column->getColumn(i), struct_reader.get(fields[i])); + } + else + field_serializers[0]->readRow(column, struct_reader.get(fields[0])); + } + + void readRow(MutableColumns & columns, const capnp::DynamicStruct::Reader & reader) + { + for (size_t i = 0; i != columns.size(); ++i) + field_serializers[i]->readRow(*columns[i], reader.get(fields[i])); + } + + private: + void initialize(const DataTypes & data_types, const Names & names, const capnp::StructSchema & schema, const FormatSettings::CapnProto & settings) + { + field_serializers.reserve(data_types.size()); + fields.reserve(data_types.size()); + fields_types.reserve(data_types.size()); + nested_field_sizes.reserve(data_types.size()); + for (size_t i = 0; i != data_types.size(); ++i) + { + auto [field_name, _] = splitFieldName(names[i]); + auto field = findFieldByName(schema, field_name); + if (!field) + throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "Capnproto schema doesn't contain field with name {}", field_name); + + fields.push_back(*field); + auto capnp_type = field->getType(); + fields_types.push_back(capnp_type); + nested_field_sizes.push_back(capnp_type.isStruct() ? capnp_type.asStruct().getFields().size() : 0); + field_serializers.push_back(createSerializer(data_types[i], names[i], capnp_type, settings)); + } + } + + std::vector> field_serializers; + std::vector fields; + std::vector nested_field_sizes; + std::vector fields_types; + }; + + std::unique_ptr createSerializer(const DataTypePtr & type, const String & name, const capnp::Type & capnp_type, const FormatSettings::CapnProto & settings) + { + auto [field_name, nested_name] = splitFieldName(name); + if (!nested_name.empty() && !capnp_type.isList()) + { + if (!capnp_type.isStruct()) + throw Exception(ErrorCodes::CAPN_PROTO_BAD_CAST, "Field {} is not a struct", field_name); + + return std::make_unique(DataTypes{type}, Names{nested_name}, capnp_type.asStruct(), settings); + } + + switch (type->getTypeId()) + { + case TypeIndex::Int8: + return createIntegerSerializer(type, name, capnp_type); + case TypeIndex::UInt8: + if (isBool(type)) + return createIntegerSerializer(type, name, capnp_type); + return createIntegerSerializer(type, name, capnp_type); + case TypeIndex::Int16: + return createIntegerSerializer(type, name, capnp_type); + case TypeIndex::UInt16: + return createIntegerSerializer(type, name, capnp_type); + case TypeIndex::Int32: + return createIntegerSerializer(type, name, capnp_type); + case TypeIndex::UInt32: + return createIntegerSerializer(type, name, capnp_type); + case TypeIndex::Int64: + return createIntegerSerializer(type, name, capnp_type); + case TypeIndex::UInt64: + return createIntegerSerializer(type, name, capnp_type); + case TypeIndex::Int128: + return std::make_unique>(type, name, capnp_type); + case TypeIndex::UInt128: + return std::make_unique>(type, name, capnp_type); + case TypeIndex::Int256: + return std::make_unique>(type, name, capnp_type); + case TypeIndex::UInt256: + return std::make_unique>(type, name, capnp_type); + case TypeIndex::Float32: + return std::make_unique>(type, name, capnp_type); + case TypeIndex::Float64: + return std::make_unique>(type, name, capnp_type); + case TypeIndex::Date: + return std::make_unique(type, name, capnp_type); + case TypeIndex::Date32: + return std::make_unique(type, name, capnp_type); + case TypeIndex::DateTime: + return std::make_unique(type, name, capnp_type); + case TypeIndex::DateTime64: + return std::make_unique(type, name, capnp_type); + case TypeIndex::Decimal32: + return std::make_unique>(type, name, capnp_type); + case TypeIndex::Decimal64: + return std::make_unique>(type, name, capnp_type); + case TypeIndex::Decimal128: + return std::make_unique>(type, name, capnp_type); + case TypeIndex::Decimal256: + return std::make_unique>(type, name, capnp_type); + case TypeIndex::IPv4: + return std::make_unique(type, name, capnp_type); + case TypeIndex::IPv6: + return std::make_unique(type, name, capnp_type); + case TypeIndex::UUID: + return std::make_unique(type, name, capnp_type); + case TypeIndex::Enum8: + return std::make_unique>(type, name, capnp_type, settings.enum_comparing_mode); + case TypeIndex::Enum16: + return std::make_unique>(type, name, capnp_type, settings.enum_comparing_mode); + case TypeIndex::String: + if (capnp_type.isData()) + return std::make_unique>(type, name, capnp_type); + return std::make_unique>(type, name, capnp_type); + case TypeIndex::FixedString: + if (capnp_type.isData()) + return std::make_unique>(type, name, capnp_type); + return std::make_unique>(type, name, capnp_type); + case TypeIndex::LowCardinality: + return std::make_unique(type, name, capnp_type, settings); + case TypeIndex::Nullable: + return std::make_unique(type, name, capnp_type, settings); + case TypeIndex::Array: + return std::make_unique(type, name, capnp_type, settings); + case TypeIndex::Map: + return std::make_unique(type, name, capnp_type, settings); + case TypeIndex::Tuple: + return std::make_unique(type, name, capnp_type, settings); + default: + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Type {} is not supported in CapnProto format", type->getName()); + } + } +} + +class CapnProtoSerializer::Impl +{ +public: + Impl(const DataTypes & data_types, const Names & names, const capnp::StructSchema & schema, const FormatSettings::CapnProto & settings) + : struct_serializer(std::make_unique(data_types, names, schema, settings)) + , fields_size(schema.getFields().size()) + { + } + + void writeRow(const Columns & columns, capnp::DynamicStruct::Builder builder, size_t row_num) + { + StructBuilder struct_builder(std::move(builder), fields_size); + struct_serializer->writeRow(columns, struct_builder, row_num); + } + + void readRow(MutableColumns & columns, capnp::DynamicStruct::Reader & reader) + { + struct_serializer->readRow(columns, reader); + } + +private: + std::unique_ptr struct_serializer; + size_t fields_size; +}; + +CapnProtoSerializer::CapnProtoSerializer(const DataTypes & data_types, const Names & names, const capnp::StructSchema & schema, const FormatSettings::CapnProto & settings) + : serializer_impl(std::make_unique(data_types, names, schema, settings)) +{ +} + +void CapnProtoSerializer::writeRow(const Columns & columns, capnp::DynamicStruct::Builder builder, size_t row_num) +{ + serializer_impl->writeRow(columns, std::move(builder), row_num); +} + +void CapnProtoSerializer::readRow(MutableColumns & columns, capnp::DynamicStruct::Reader & reader) +{ + serializer_impl->readRow(columns, reader); +} + +CapnProtoSerializer::~CapnProtoSerializer() = default; + +} diff --git a/src/Formats/CapnProtoSerializer.h b/src/Formats/CapnProtoSerializer.h new file mode 100644 index 00000000000..efae797875b --- /dev/null +++ b/src/Formats/CapnProtoSerializer.h @@ -0,0 +1,25 @@ +#pragma once + +#include +#include + +namespace DB +{ + +class CapnProtoSerializer +{ +public: + CapnProtoSerializer(const DataTypes & data_types, const Names & names, const capnp::StructSchema & schema, const FormatSettings::CapnProto & settings); + + void writeRow(const Columns & columns, capnp::DynamicStruct::Builder builder, size_t row_num); + + void readRow(MutableColumns & columns, capnp::DynamicStruct::Reader & reader); + + ~CapnProtoSerializer(); + +private: + class Impl; + std::unique_ptr serializer_impl; +}; + +} diff --git a/src/Formats/CapnProtoUtils.cpp b/src/Formats/CapnProtoUtils.cpp deleted file mode 100644 index d6c032408bb..00000000000 --- a/src/Formats/CapnProtoUtils.cpp +++ /dev/null @@ -1,734 +0,0 @@ -#include - -#if USE_CAPNP - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int CANNOT_PARSE_CAPN_PROTO_SCHEMA; - extern const int THERE_IS_NO_COLUMN; - extern const int BAD_TYPE_OF_FIELD; - extern const int CAPN_PROTO_BAD_CAST; - extern const int FILE_DOESNT_EXIST; - extern const int UNKNOWN_EXCEPTION; - extern const int INCORRECT_DATA; - extern const int CAPN_PROTO_BAD_TYPE; - extern const int BAD_ARGUMENTS; -} - -std::pair splitCapnProtoFieldName(const String & name) -{ - const auto * begin = name.data(); - const auto * end = name.data() + name.size(); - const auto * it = find_first_symbols<'_', '.'>(begin, end); - String first = String(begin, it); - String second = it == end ? "" : String(it + 1, end); - return {first, second}; -} - -capnp::StructSchema CapnProtoSchemaParser::getMessageSchema(const FormatSchemaInfo & schema_info) -{ - capnp::ParsedSchema schema; - try - { - int fd; - KJ_SYSCALL(fd = open(schema_info.schemaDirectory().data(), O_RDONLY)); // NOLINT(bugprone-suspicious-semicolon) - auto schema_dir = kj::newDiskDirectory(kj::OsFileHandle(fd)); - schema = impl.parseFromDirectory(*schema_dir, kj::Path::parse(schema_info.schemaPath()), {}); - } - catch (const kj::Exception & e) - { - /// That's not good to determine the type of error by its description, but - /// this is the only way to do it here, because kj doesn't specify the type of error. - auto description = std::string_view(e.getDescription().cStr()); - if (description.find("No such file or directory") != String::npos || description.find("no such directory") != String::npos) - throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot open CapnProto schema, file {} doesn't exists", schema_info.absoluteSchemaPath()); - - if (description.find("Parse error") != String::npos) - throw Exception(ErrorCodes::CANNOT_PARSE_CAPN_PROTO_SCHEMA, "Cannot parse CapnProto schema {}:{}", schema_info.schemaPath(), e.getLine()); - - throw Exception(ErrorCodes::UNKNOWN_EXCEPTION, - "Unknown exception while parsing CapnProto schema: {}, schema dir and file: {}, {}", - description, schema_info.schemaDirectory(), schema_info.schemaPath()); - } - - auto message_maybe = schema.findNested(schema_info.messageName()); - auto * message_schema = kj::_::readMaybe(message_maybe); - if (!message_schema) - throw Exception(ErrorCodes::CANNOT_PARSE_CAPN_PROTO_SCHEMA, - "CapnProto schema doesn't contain message with name {}", schema_info.messageName()); - return message_schema->asStruct(); -} - -bool compareEnumNames(const String & first, const String & second, FormatSettings::EnumComparingMode mode) -{ - if (mode == FormatSettings::EnumComparingMode::BY_NAMES_CASE_INSENSITIVE) - return boost::algorithm::to_lower_copy(first) == boost::algorithm::to_lower_copy(second); - return first == second; -} - -static const std::map capnp_simple_type_names = -{ - {capnp::schema::Type::Which::BOOL, "Bool"}, - {capnp::schema::Type::Which::VOID, "Void"}, - {capnp::schema::Type::Which::INT8, "Int8"}, - {capnp::schema::Type::Which::INT16, "Int16"}, - {capnp::schema::Type::Which::INT32, "Int32"}, - {capnp::schema::Type::Which::INT64, "Int64"}, - {capnp::schema::Type::Which::UINT8, "UInt8"}, - {capnp::schema::Type::Which::UINT16, "UInt16"}, - {capnp::schema::Type::Which::UINT32, "UInt32"}, - {capnp::schema::Type::Which::UINT64, "UInt64"}, - {capnp::schema::Type::Which::FLOAT32, "Float32"}, - {capnp::schema::Type::Which::FLOAT64, "Float64"}, - {capnp::schema::Type::Which::TEXT, "Text"}, - {capnp::schema::Type::Which::DATA, "Data"}, - {capnp::schema::Type::Which::INTERFACE, "Interface"}, - {capnp::schema::Type::Which::ANY_POINTER, "AnyPointer"}, -}; - -static bool checkIfStructContainsUnnamedUnion(const capnp::StructSchema & struct_schema) -{ - return struct_schema.getFields().size() != struct_schema.getNonUnionFields().size(); -} - -static bool checkIfStructIsNamedUnion(const capnp::StructSchema & struct_schema) -{ - return struct_schema.getFields().size() == struct_schema.getUnionFields().size(); -} - -/// Get full name of type for better exception messages. -static String getCapnProtoFullTypeName(const capnp::Type & type) -{ - switch (type.which()) - { - case capnp::schema::Type::Which::STRUCT: - { - auto struct_schema = type.asStruct(); - - auto non_union_fields = struct_schema.getNonUnionFields(); - std::vector non_union_field_names; - for (auto nested_field : non_union_fields) - non_union_field_names.push_back(String(nested_field.getProto().getName()) + " " + getCapnProtoFullTypeName(nested_field.getType())); - - auto union_fields = struct_schema.getUnionFields(); - std::vector union_field_names; - for (auto nested_field : union_fields) - union_field_names.push_back(String(nested_field.getProto().getName()) + " " + getCapnProtoFullTypeName(nested_field.getType())); - - String union_name = "Union(" + boost::algorithm::join(union_field_names, ", ") + ")"; - /// Check if the struct is a named union. - if (non_union_field_names.empty()) - return union_name; - - String type_name = "Struct(" + boost::algorithm::join(non_union_field_names, ", "); - /// Check if the struct contains unnamed union. - if (!union_field_names.empty()) - type_name += ", " + union_name; - type_name += ")"; - return type_name; - } - case capnp::schema::Type::Which::LIST: - return "List(" + getCapnProtoFullTypeName(type.asList().getElementType()) + ")"; - case capnp::schema::Type::Which::ENUM: - { - auto enum_schema = type.asEnum(); - String enum_name = "Enum("; - auto enumerants = enum_schema.getEnumerants(); - for (unsigned i = 0; i != enumerants.size(); ++i) - { - enum_name += String(enumerants[i].getProto().getName()) + " = " + std::to_string(enumerants[i].getOrdinal()); - if (i + 1 != enumerants.size()) - enum_name += ", "; - } - enum_name += ")"; - return enum_name; - } - default: - auto it = capnp_simple_type_names.find(type.which()); - if (it == capnp_simple_type_names.end()) - throw Exception(ErrorCodes::BAD_TYPE_OF_FIELD, "Unknown CapnProto type"); - return it->second; - } -} - -template -static bool checkEnums(const capnp::Type & capnp_type, const DataTypePtr column_type, FormatSettings::EnumComparingMode mode, UInt64 max_value, String & error_message) -{ - if (!capnp_type.isEnum()) - return false; - - auto enum_schema = capnp_type.asEnum(); - bool to_lower = mode == FormatSettings::EnumComparingMode::BY_NAMES_CASE_INSENSITIVE; - const auto * enum_type = assert_cast *>(column_type.get()); - const auto & enum_values = dynamic_cast &>(*enum_type); - - auto enumerants = enum_schema.getEnumerants(); - if (mode == FormatSettings::EnumComparingMode::BY_VALUES) - { - /// In CapnProto Enum fields are numbered sequentially starting from zero. - if (enumerants.size() > max_value) - { - error_message += "Enum from CapnProto schema contains values that is out of range for Clickhouse Enum"; - return false; - } - - auto values = enum_values.getSetOfAllValues(); - std::unordered_set capn_enum_values; - for (auto enumerant : enumerants) - capn_enum_values.insert(Type(enumerant.getOrdinal())); - auto result = values == capn_enum_values; - if (!result) - error_message += "The set of values in Enum from CapnProto schema is different from the set of values in ClickHouse Enum"; - return result; - } - - auto names = enum_values.getSetOfAllNames(to_lower); - std::unordered_set capn_enum_names; - - for (auto enumerant : enumerants) - { - String name = enumerant.getProto().getName(); - capn_enum_names.insert(to_lower ? boost::algorithm::to_lower_copy(name) : name); - } - - auto result = names == capn_enum_names; - if (!result) - error_message += "The set of names in Enum from CapnProto schema is different from the set of names in ClickHouse Enum"; - return result; -} - -static bool checkCapnProtoType(const capnp::Type & capnp_type, const DataTypePtr & data_type, FormatSettings::EnumComparingMode mode, String & error_message, const String & column_name); - -static bool checkNullableType(const capnp::Type & capnp_type, const DataTypePtr & data_type, FormatSettings::EnumComparingMode mode, String & error_message, const String & column_name) -{ - if (!capnp_type.isStruct()) - return false; - - /// Check that struct is a named union of type VOID and one arbitrary type. - auto struct_schema = capnp_type.asStruct(); - if (!checkIfStructIsNamedUnion(struct_schema)) - return false; - - auto union_fields = struct_schema.getUnionFields(); - if (union_fields.size() != 2) - return false; - - auto first = union_fields[0]; - auto second = union_fields[1]; - - auto nested_type = assert_cast(data_type.get())->getNestedType(); - if (first.getType().isVoid()) - return checkCapnProtoType(second.getType(), nested_type, mode, error_message, column_name); - if (second.getType().isVoid()) - return checkCapnProtoType(first.getType(), nested_type, mode, error_message, column_name); - return false; -} - -static bool checkTupleType(const capnp::Type & capnp_type, const DataTypePtr & data_type, FormatSettings::EnumComparingMode mode, String & error_message) -{ - if (!capnp_type.isStruct()) - return false; - auto struct_schema = capnp_type.asStruct(); - - if (checkIfStructIsNamedUnion(struct_schema)) - return false; - - if (checkIfStructContainsUnnamedUnion(struct_schema)) - { - error_message += "CapnProto struct contains unnamed union"; - return false; - } - - const auto * tuple_data_type = assert_cast(data_type.get()); - auto nested_types = tuple_data_type->getElements(); - if (nested_types.size() != struct_schema.getFields().size()) - { - error_message += "Tuple and Struct types have different sizes"; - return false; - } - - bool have_explicit_names = tuple_data_type->haveExplicitNames(); - const auto & nested_names = tuple_data_type->getElementNames(); - for (uint32_t i = 0; i != nested_names.size(); ++i) - { - if (have_explicit_names) - { - KJ_IF_MAYBE (field, struct_schema.findFieldByName(nested_names[i])) - { - if (!checkCapnProtoType(field->getType(), nested_types[tuple_data_type->getPositionByName(nested_names[i])], mode, error_message, nested_names[i])) - return false; - } - else - { - error_message += "CapnProto struct doesn't contain a field with name " + nested_names[i]; - return false; - } - } - else if (!checkCapnProtoType(struct_schema.getFields()[i].getType(), nested_types[tuple_data_type->getPositionByName(nested_names[i])], mode, error_message, nested_names[i])) - return false; - } - - return true; -} - -static bool checkArrayType(const capnp::Type & capnp_type, const DataTypePtr & data_type, FormatSettings::EnumComparingMode mode, String & error_message, const String & column_name) -{ - if (!capnp_type.isList()) - return false; - auto list_schema = capnp_type.asList(); - auto nested_type = assert_cast(data_type.get())->getNestedType(); - - auto [field_name, nested_name] = splitCapnProtoFieldName(column_name); - if (!nested_name.empty() && list_schema.getElementType().isStruct()) - { - auto struct_schema = list_schema.getElementType().asStruct(); - KJ_IF_MAYBE(field, struct_schema.findFieldByName(nested_name)) - return checkCapnProtoType(field->getType(), nested_type, mode, error_message, nested_name); - - error_message += "Element type of List {} doesn't contain field with name " + nested_name; - return false; - } - - return checkCapnProtoType(list_schema.getElementType(), nested_type, mode, error_message, column_name); -} - -static bool checkMapType(const capnp::Type & capnp_type, const DataTypePtr & data_type, FormatSettings::EnumComparingMode mode, String & error_message) -{ - /// We output/input Map type as follow CapnProto schema - /// - /// struct Map { - /// struct Entry { - /// key @0: Key; - /// value @1: Value; - /// } - /// entries @0 :List(Entry); - /// } - - if (!capnp_type.isStruct()) - return false; - auto struct_schema = capnp_type.asStruct(); - - if (checkIfStructContainsUnnamedUnion(struct_schema)) - { - error_message += "CapnProto struct contains unnamed union"; - return false; - } - - if (struct_schema.getFields().size() != 1) - { - error_message += "CapnProto struct that represents Map type can contain only one field"; - return false; - } - - const auto & field_type = struct_schema.getFields()[0].getType(); - if (!field_type.isList()) - { - error_message += "Field of CapnProto struct that represents Map is not a list"; - return false; - } - - auto list_element_type = field_type.asList().getElementType(); - if (!list_element_type.isStruct()) - { - error_message += "Field of CapnProto struct that represents Map is not a list of structs"; - return false; - } - - auto key_value_struct = list_element_type.asStruct(); - if (checkIfStructContainsUnnamedUnion(key_value_struct)) - { - error_message += "CapnProto struct contains unnamed union"; - return false; - } - - if (key_value_struct.getFields().size() != 2) - { - error_message += "Key-value structure for Map struct should have exactly 2 fields"; - return false; - } - - const auto & map_type = assert_cast(*data_type); - DataTypes types = {map_type.getKeyType(), map_type.getValueType()}; - Names names = {"key", "value"}; - - for (size_t i = 0; i != types.size(); ++i) - { - KJ_IF_MAYBE(field, key_value_struct.findFieldByName(names[i])) - { - if (!checkCapnProtoType(field->getType(), types[i], mode, error_message, names[i])) - return false; - } - else - { - error_message += R"(Key-value structure for Map struct should have exactly 2 fields with names "key" and "value")"; - return false; - } - } - - return true; -} - -static bool isCapnInteger(const capnp::Type & capnp_type) -{ - return capnp_type.isInt8() || capnp_type.isUInt8() || capnp_type.isInt16() || capnp_type.isUInt16() || capnp_type.isInt32() - || capnp_type.isUInt32() || capnp_type.isInt64() || capnp_type.isUInt64(); -} - -static bool checkCapnProtoType(const capnp::Type & capnp_type, const DataTypePtr & data_type, FormatSettings::EnumComparingMode mode, String & error_message, const String & column_name) -{ - switch (data_type->getTypeId()) - { - case TypeIndex::UInt8: - return capnp_type.isBool() || isCapnInteger(capnp_type); - case TypeIndex::Int8: [[fallthrough]]; - case TypeIndex::Int16: [[fallthrough]]; - case TypeIndex::UInt16: [[fallthrough]]; - case TypeIndex::Int32: [[fallthrough]]; - case TypeIndex::UInt32: [[fallthrough]]; - case TypeIndex::Int64: [[fallthrough]]; - case TypeIndex::UInt64: - /// Allow integer conversions durin input/output. - return isCapnInteger(capnp_type); - case TypeIndex::Date: - return capnp_type.isUInt16(); - case TypeIndex::DateTime: [[fallthrough]]; - case TypeIndex::IPv4: - return capnp_type.isUInt32(); - case TypeIndex::Date32: [[fallthrough]]; - case TypeIndex::Decimal32: - return capnp_type.isInt32() || capnp_type.isUInt32(); - case TypeIndex::DateTime64: [[fallthrough]]; - case TypeIndex::Decimal64: - return capnp_type.isInt64() || capnp_type.isUInt64(); - case TypeIndex::Float32:[[fallthrough]]; - case TypeIndex::Float64: - /// Allow converting between Float32 and isFloat64 - return capnp_type.isFloat32() || capnp_type.isFloat64(); - case TypeIndex::Enum8: - return checkEnums(capnp_type, data_type, mode, INT8_MAX, error_message); - case TypeIndex::Enum16: - return checkEnums(capnp_type, data_type, mode, INT16_MAX, error_message); - case TypeIndex::Int128: [[fallthrough]]; - case TypeIndex::UInt128: [[fallthrough]]; - case TypeIndex::Int256: [[fallthrough]]; - case TypeIndex::UInt256: [[fallthrough]]; - case TypeIndex::Decimal128: [[fallthrough]]; - case TypeIndex::Decimal256: - return capnp_type.isData(); - case TypeIndex::Tuple: - return checkTupleType(capnp_type, data_type, mode, error_message); - case TypeIndex::Nullable: - { - auto result = checkNullableType(capnp_type, data_type, mode, error_message, column_name); - if (!result) - error_message += "Nullable can be represented only as a named union of type Void and nested type"; - return result; - } - case TypeIndex::Array: - return checkArrayType(capnp_type, data_type, mode, error_message, column_name); - case TypeIndex::LowCardinality: - return checkCapnProtoType(capnp_type, assert_cast(data_type.get())->getDictionaryType(), mode, error_message, column_name); - case TypeIndex::FixedString: [[fallthrough]]; - case TypeIndex::IPv6: [[fallthrough]]; - case TypeIndex::String: - return capnp_type.isText() || capnp_type.isData(); - case TypeIndex::Map: - return checkMapType(capnp_type, data_type, mode, error_message); - default: - return false; - } -} - -capnp::DynamicValue::Reader getReaderByColumnName(const capnp::DynamicStruct::Reader & struct_reader, const String & name) -{ - auto [field_name, nested_name] = splitCapnProtoFieldName(name); - KJ_IF_MAYBE(field, struct_reader.getSchema().findFieldByName(field_name)) - { - capnp::DynamicValue::Reader field_reader; - try - { - field_reader = struct_reader.get(*field); - } - catch (const kj::Exception & e) - { - throw Exception(ErrorCodes::INCORRECT_DATA, - "Cannot extract field value from struct by provided schema, error: " - "{} Perhaps the data was generated by another schema", String(e.getDescription().cStr())); - } - - if (nested_name.empty()) - return field_reader; - - /// Support reading Nested as List of Structs. - if (field_reader.getType() == capnp::DynamicValue::LIST) - { - auto list_schema = field->getType().asList(); - if (!list_schema.getElementType().isStruct()) - throw Exception(ErrorCodes::CAPN_PROTO_BAD_CAST, "Element type of List {} is not a struct", field_name); - - auto struct_schema = list_schema.getElementType().asStruct(); - KJ_IF_MAYBE(nested_field, struct_schema.findFieldByName(nested_name)) - return field_reader; - - throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "Element type of List {} doesn't contain field with name \"{}\"", field_name, nested_name); - } - - if (field_reader.getType() != capnp::DynamicValue::STRUCT) - throw Exception(ErrorCodes::CAPN_PROTO_BAD_CAST, "Field {} is not a struct", field_name); - - return getReaderByColumnName(field_reader.as(), nested_name); - } - - throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "Capnproto struct doesn't contain field with name {}", field_name); -} - -std::pair getStructBuilderAndFieldByColumnName(capnp::DynamicStruct::Builder struct_builder, const String & name) -{ - auto [field_name, nested_name] = splitCapnProtoFieldName(name); - KJ_IF_MAYBE(field, struct_builder.getSchema().findFieldByName(field_name)) - { - if (nested_name.empty()) - return {struct_builder, *field}; - - auto field_builder = struct_builder.get(*field); - - /// Support reading Nested as List of Structs. - if (field_builder.getType() == capnp::DynamicValue::LIST) - { - auto list_schema = field->getType().asList(); - if (!list_schema.getElementType().isStruct()) - throw Exception(ErrorCodes::CAPN_PROTO_BAD_CAST, "Element type of List {} is not a struct", field_name); - - auto struct_schema = list_schema.getElementType().asStruct(); - KJ_IF_MAYBE(nested_field, struct_schema.findFieldByName(nested_name)) - return {struct_builder, *field}; - - throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "Element type of List {} doesn't contain field with name \"{}\"", field_name, nested_name); - } - - if (field_builder.getType() != capnp::DynamicValue::STRUCT) - throw Exception(ErrorCodes::CAPN_PROTO_BAD_CAST, "Field {} is not a struct", field_name); - - return getStructBuilderAndFieldByColumnName(field_builder.as(), nested_name); - } - - throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "Capnproto struct doesn't contain field with name {}", field_name); -} - -static std::pair getFieldByName(const capnp::StructSchema & schema, const String & name) -{ - auto [field_name, nested_name] = splitCapnProtoFieldName(name); - KJ_IF_MAYBE(field, schema.findFieldByName(field_name)) - { - if (nested_name.empty()) - return {*field, name}; - - /// Support reading Nested as List of Structs. - if (field->getType().isList()) - { - auto list_schema = field->getType().asList(); - if (!list_schema.getElementType().isStruct()) - throw Exception(ErrorCodes::CAPN_PROTO_BAD_CAST, "Element type of List {} is not a struct", field_name); - - auto struct_schema = list_schema.getElementType().asStruct(); - KJ_IF_MAYBE(nested_field, struct_schema.findFieldByName(nested_name)) - return {*field, name}; - - throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "Element type of List {} doesn't contain field with name \"{}\"", field_name, nested_name); - } - - if (!field->getType().isStruct()) - throw Exception(ErrorCodes::CAPN_PROTO_BAD_CAST, "Field {} is not a struct", field_name); - - return getFieldByName(field->getType().asStruct(), nested_name); - } - - throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "Capnproto schema doesn't contain field with name {}", field_name); -} - -void checkCapnProtoSchemaStructure(const capnp::StructSchema & schema, const Block & header, FormatSettings::EnumComparingMode mode) -{ - /// Firstly check that struct doesn't contain unnamed union, because we don't support it. - if (checkIfStructContainsUnnamedUnion(schema)) - throw Exception(ErrorCodes::CAPN_PROTO_BAD_CAST, "Schema contains unnamed union that is not supported"); - auto names_and_types = header.getNamesAndTypesList(); - String additional_error_message; - for (auto & [name, type] : names_and_types) - { - auto [field, field_name] = getFieldByName(schema, name); - if (!checkCapnProtoType(field.getType(), type, mode, additional_error_message, field_name)) - { - auto e = Exception( - ErrorCodes::CAPN_PROTO_BAD_CAST, - "Cannot convert ClickHouse type {} to CapnProto type {}", - type->getName(), - getCapnProtoFullTypeName(field.getType())); - if (!additional_error_message.empty()) - e.addMessage(additional_error_message); - throw std::move(e); - } - } -} - -template -static DataTypePtr getEnumDataTypeFromEnumerants(const capnp::EnumSchema::EnumerantList & enumerants) -{ - std::vector> values; - for (auto enumerant : enumerants) - values.emplace_back(enumerant.getProto().getName(), ValueType(enumerant.getOrdinal())); - return std::make_shared>(std::move(values)); -} - -static DataTypePtr getEnumDataTypeFromEnumSchema(const capnp::EnumSchema & enum_schema) -{ - auto enumerants = enum_schema.getEnumerants(); - if (enumerants.size() < 128) - return getEnumDataTypeFromEnumerants(enumerants); - if (enumerants.size() < 32768) - return getEnumDataTypeFromEnumerants(enumerants); - - throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "ClickHouse supports only 8 and 16-bit Enums"); -} - -static DataTypePtr getDataTypeFromCapnProtoType(const capnp::Type & capnp_type, bool skip_unsupported_fields) -{ - switch (capnp_type.which()) - { - case capnp::schema::Type::INT8: - return std::make_shared(); - case capnp::schema::Type::INT16: - return std::make_shared(); - case capnp::schema::Type::INT32: - return std::make_shared(); - case capnp::schema::Type::INT64: - return std::make_shared(); - case capnp::schema::Type::BOOL: [[fallthrough]]; - case capnp::schema::Type::UINT8: - return std::make_shared(); - case capnp::schema::Type::UINT16: - return std::make_shared(); - case capnp::schema::Type::UINT32: - return std::make_shared(); - case capnp::schema::Type::UINT64: - return std::make_shared(); - case capnp::schema::Type::FLOAT32: - return std::make_shared(); - case capnp::schema::Type::FLOAT64: - return std::make_shared(); - case capnp::schema::Type::DATA: [[fallthrough]]; - case capnp::schema::Type::TEXT: - return std::make_shared(); - case capnp::schema::Type::ENUM: - return getEnumDataTypeFromEnumSchema(capnp_type.asEnum()); - case capnp::schema::Type::LIST: - { - auto list_schema = capnp_type.asList(); - auto nested_type = getDataTypeFromCapnProtoType(list_schema.getElementType(), skip_unsupported_fields); - if (!nested_type) - return nullptr; - return std::make_shared(nested_type); - } - case capnp::schema::Type::STRUCT: - { - auto struct_schema = capnp_type.asStruct(); - - - if (struct_schema.getFields().size() == 0) - { - if (skip_unsupported_fields) - return nullptr; - throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "Empty messages are not supported"); - } - - /// Check if it can be Nullable. - if (checkIfStructIsNamedUnion(struct_schema)) - { - auto fields = struct_schema.getUnionFields(); - if (fields.size() != 2 || (!fields[0].getType().isVoid() && !fields[1].getType().isVoid())) - { - if (skip_unsupported_fields) - return nullptr; - throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "Unions are not supported"); - } - auto value_type = fields[0].getType().isVoid() ? fields[1].getType() : fields[0].getType(); - if (value_type.isStruct() || value_type.isList()) - { - if (skip_unsupported_fields) - return nullptr; - throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "Tuples and Lists cannot be inside Nullable"); - } - - auto nested_type = getDataTypeFromCapnProtoType(value_type, skip_unsupported_fields); - if (!nested_type) - return nullptr; - return std::make_shared(nested_type); - } - - if (checkIfStructContainsUnnamedUnion(struct_schema)) - throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "Unnamed union is not supported"); - - /// Treat Struct as Tuple. - DataTypes nested_types; - Names nested_names; - for (auto field : struct_schema.getNonUnionFields()) - { - auto nested_type = getDataTypeFromCapnProtoType(field.getType(), skip_unsupported_fields); - if (!nested_type) - continue; - nested_names.push_back(field.getProto().getName()); - nested_types.push_back(nested_type); - } - if (nested_types.empty()) - return nullptr; - return std::make_shared(std::move(nested_types), std::move(nested_names)); - } - default: - { - if (skip_unsupported_fields) - return nullptr; - throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "Unsupported CapnProtoType: {}", getCapnProtoFullTypeName(capnp_type)); - } - } -} - -NamesAndTypesList capnProtoSchemaToCHSchema(const capnp::StructSchema & schema, bool skip_unsupported_fields) -{ - if (checkIfStructContainsUnnamedUnion(schema)) - throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "Unnamed union is not supported"); - - NamesAndTypesList names_and_types; - for (auto field : schema.getNonUnionFields()) - { - auto name = field.getProto().getName(); - auto type = getDataTypeFromCapnProtoType(field.getType(), skip_unsupported_fields); - if (type) - names_and_types.emplace_back(name, type); - } - if (names_and_types.empty()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Cannot convert CapnProto schema to ClickHouse table schema, all fields have unsupported types"); - - return names_and_types; -} - -} - -#endif diff --git a/src/Formats/FormatSettings.h b/src/Formats/FormatSettings.h index 3a2e818d540..384c6a725dc 100644 --- a/src/Formats/FormatSettings.h +++ b/src/Formats/FormatSettings.h @@ -325,16 +325,16 @@ struct FormatSettings /// For capnProto format we should determine how to /// compare ClickHouse Enum and Enum from schema. - enum class EnumComparingMode + enum class CapnProtoEnumComparingMode { BY_NAMES, // Names in enums should be the same, values can be different. BY_NAMES_CASE_INSENSITIVE, // Case-insensitive name comparison. BY_VALUES, // Values should be the same, names can be different. }; - struct + struct CapnProto { - EnumComparingMode enum_comparing_mode = EnumComparingMode::BY_VALUES; + CapnProtoEnumComparingMode enum_comparing_mode = CapnProtoEnumComparingMode::BY_VALUES; bool skip_fields_with_unsupported_types_in_schema_inference = false; } capn_proto; diff --git a/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp b/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp index 2f84e9bde3c..e686ae86997 100644 --- a/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp @@ -9,23 +9,6 @@ #include #include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - namespace DB { @@ -35,16 +18,14 @@ namespace ErrorCodes extern const int INCORRECT_DATA; } -CapnProtoRowInputFormat::CapnProtoRowInputFormat(ReadBuffer & in_, Block header, Params params_, const FormatSchemaInfo & info, const FormatSettings & format_settings_) - : IRowInputFormat(std::move(header), in_, std::move(params_)) +CapnProtoRowInputFormat::CapnProtoRowInputFormat(ReadBuffer & in_, Block header_, Params params_, const FormatSchemaInfo & info, const FormatSettings & format_settings) + : IRowInputFormat(std::move(header_), in_, std::move(params_)) , parser(std::make_shared()) - , format_settings(format_settings_) - , column_types(getPort().getHeader().getDataTypes()) - , column_names(getPort().getHeader().getNames()) { // Parse the schema and fetch the root object - root = parser->getMessageSchema(info); - checkCapnProtoSchemaStructure(root, getPort().getHeader(), format_settings.capn_proto.enum_comparing_mode); + schema = parser->getMessageSchema(info); + const auto & header = getPort().getHeader(); + serializer = std::make_unique(header.getDataTypes(), header.getNames(), schema, format_settings.capn_proto); } kj::Array CapnProtoRowInputFormat::readMessage() @@ -82,213 +63,6 @@ kj::Array CapnProtoRowInputFormat::readMessage() return msg; } -static void insertInteger(IColumn & column, const DataTypePtr & column_type, UInt64 value) -{ - switch (column_type->getTypeId()) - { - case TypeIndex::Int8: - assert_cast(column).insertValue(value); - break; - case TypeIndex::UInt8: - assert_cast(column).insertValue(value); - break; - case TypeIndex::Int16: - assert_cast(column).insertValue(value); - break; - case TypeIndex::Date: [[fallthrough]]; - case TypeIndex::UInt16: - assert_cast(column).insertValue(value); - break; - case TypeIndex::Int32: - assert_cast(column).insertValue(static_cast(value)); - break; - case TypeIndex::DateTime: [[fallthrough]]; - case TypeIndex::UInt32: - assert_cast(column).insertValue(static_cast(value)); - break; - case TypeIndex::IPv4: - assert_cast(column).insertValue(IPv4(static_cast(value))); - break; - case TypeIndex::Int64: - assert_cast(column).insertValue(value); - break; - case TypeIndex::UInt64: - assert_cast(column).insertValue(value); - break; - case TypeIndex::DateTime64: - assert_cast &>(column).insertValue(value); - break; - case TypeIndex::Decimal32: - assert_cast &>(column).insertValue(static_cast(value)); - break; - case TypeIndex::Decimal64: - assert_cast &>(column).insertValue(value); - break; - default: - throw Exception(ErrorCodes::LOGICAL_ERROR, "Column type {} cannot be parsed from integer", column_type->getName()); - } -} - -static void insertFloat(IColumn & column, const DataTypePtr & column_type, Float64 value) -{ - switch (column_type->getTypeId()) - { - case TypeIndex::Float32: - assert_cast(column).insertValue(static_cast(value)); - break; - case TypeIndex::Float64: - assert_cast(column).insertValue(value); - break; - default: - throw Exception(ErrorCodes::LOGICAL_ERROR, "Column type is not a float."); - } -} - -template -static void insertData(IColumn & column, const DataTypePtr & column_type, Value value) -{ - if (column_type->haveMaximumSizeOfValue() && value.size() != column_type->getSizeOfValueInMemory()) - throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected size of {} value: {}", column_type->getName(), value.size()); - - column.insertData(reinterpret_cast(value.begin()), value.size()); -} - -template -static void insertEnum(IColumn & column, const DataTypePtr & column_type, const capnp::DynamicEnum & enum_value, FormatSettings::EnumComparingMode enum_comparing_mode) -{ - auto enumerant = *kj::_::readMaybe(enum_value.getEnumerant()); - auto enum_type = assert_cast *>(column_type.get()); - DataTypePtr nested_type = std::make_shared>(); - switch (enum_comparing_mode) - { - case FormatSettings::EnumComparingMode::BY_VALUES: - insertInteger(column, nested_type, Int64(enumerant.getOrdinal())); - return; - case FormatSettings::EnumComparingMode::BY_NAMES: - insertInteger(column, nested_type, Int64(enum_type->getValue(String(enumerant.getProto().getName())))); - return; - case FormatSettings::EnumComparingMode::BY_NAMES_CASE_INSENSITIVE: - { - /// Find the same enum name case insensitive. - String enum_name = enumerant.getProto().getName(); - for (auto & name : enum_type->getAllRegisteredNames()) - { - if (compareEnumNames(name, enum_name, enum_comparing_mode)) - { - insertInteger(column, nested_type, Int64(enum_type->getValue(name))); - break; - } - } - } - } -} - -static void insertValue(IColumn & column, const DataTypePtr & column_type, const String & column_name, const capnp::DynamicValue::Reader & value, FormatSettings::EnumComparingMode enum_comparing_mode) -{ - if (column_type->lowCardinality()) - { - auto & lc_column = assert_cast(column); - auto tmp_column = lc_column.getDictionary().getNestedColumn()->cloneEmpty(); - auto dict_type = assert_cast(column_type.get())->getDictionaryType(); - insertValue(*tmp_column, dict_type, column_name, value, enum_comparing_mode); - lc_column.insertFromFullColumn(*tmp_column, 0); - return; - } - - switch (value.getType()) - { - case capnp::DynamicValue::Type::INT: - insertInteger(column, column_type, value.as()); - break; - case capnp::DynamicValue::Type::UINT: - insertInteger(column, column_type, value.as()); - break; - case capnp::DynamicValue::Type::FLOAT: - insertFloat(column, column_type, value.as()); - break; - case capnp::DynamicValue::Type::BOOL: - insertInteger(column, column_type, UInt64(value.as())); - break; - case capnp::DynamicValue::Type::DATA: - insertData(column, column_type, value.as()); - break; - case capnp::DynamicValue::Type::TEXT: - insertData(column, column_type, value.as()); - break; - case capnp::DynamicValue::Type::ENUM: - if (column_type->getTypeId() == TypeIndex::Enum8) - insertEnum(column, column_type, value.as(), enum_comparing_mode); - else - insertEnum(column, column_type, value.as(), enum_comparing_mode); - break; - case capnp::DynamicValue::LIST: - { - auto list_value = value.as(); - auto & column_array = assert_cast(column); - auto & offsets = column_array.getOffsets(); - offsets.push_back(offsets.back() + list_value.size()); - - auto & nested_column = column_array.getData(); - auto nested_type = assert_cast(column_type.get())->getNestedType(); - for (const auto & nested_value : list_value) - insertValue(nested_column, nested_type, column_name, nested_value, enum_comparing_mode); - break; - } - case capnp::DynamicValue::Type::STRUCT: - { - auto struct_value = value.as(); - if (column_type->isNullable()) - { - auto & nullable_column = assert_cast(column); - auto field = *kj::_::readMaybe(struct_value.which()); - if (field.getType().isVoid()) - nullable_column.insertDefault(); - else - { - auto & nested_column = nullable_column.getNestedColumn(); - auto nested_type = assert_cast(column_type.get())->getNestedType(); - auto nested_value = struct_value.get(field); - insertValue(nested_column, nested_type, column_name, nested_value, enum_comparing_mode); - nullable_column.getNullMapData().push_back(0); - } - } - else if (isTuple(column_type)) - { - auto & tuple_column = assert_cast(column); - const auto * tuple_type = assert_cast(column_type.get()); - bool have_explicit_names = tuple_type->haveExplicitNames(); - auto struct_schema = struct_value.getSchema(); - for (uint32_t i = 0; i != tuple_column.tupleSize(); ++i) - insertValue( - tuple_column.getColumn(i), - tuple_type->getElements()[i], - tuple_type->getElementNames()[i], - struct_value.get(have_explicit_names ? struct_schema.getFieldByName(tuple_type->getElementNames()[i]) : struct_schema.getFields()[i]), - enum_comparing_mode); - } - else if (isMap(column_type)) - { - const auto & map_type = assert_cast(*column_type); - DataTypes key_value_types = {map_type.getKeyType(), map_type.getValueType()}; - Names key_value_names = {"key", "value"}; - auto entries_type = std::make_shared(std::make_shared(key_value_types, key_value_names)); - auto & entries_column = assert_cast(column).getNestedColumn(); - auto entries_field = struct_value.getSchema().getFields()[0]; - insertValue(entries_column, entries_type, column_name, struct_value.get(entries_field), enum_comparing_mode); - } - else - { - /// It can be nested column from Nested type. - auto [field_name, nested_name] = splitCapnProtoFieldName(column_name); - insertValue(column, column_type, nested_name, struct_value.get(nested_name), enum_comparing_mode); - } - break; - } - default: - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected CapnProto value type."); - } -} - bool CapnProtoRowInputFormat::readRow(MutableColumns & columns, RowReadExtension &) { if (in->eof()) @@ -298,12 +72,8 @@ bool CapnProtoRowInputFormat::readRow(MutableColumns & columns, RowReadExtension { auto array = readMessage(); capnp::FlatArrayMessageReader msg(array); - auto root_reader = msg.getRoot(root); - for (size_t i = 0; i != columns.size(); ++i) - { - auto value = getReaderByColumnName(root_reader, column_names[i]); - insertValue(*columns[i], column_types[i], column_names[i], value, format_settings.capn_proto.enum_comparing_mode); - } + auto root_reader = msg.getRoot(schema); + serializer->readRow(columns, root_reader); } catch (const kj::Exception & e) { @@ -343,7 +113,14 @@ void registerInputFormatCapnProto(FormatFactory & factory) factory.markFormatSupportsSubsetOfColumns("CapnProto"); factory.registerFileExtension("capnp", "CapnProto"); factory.registerAdditionalInfoForSchemaCacheGetter( - "CapnProto", [](const FormatSettings & settings) { return fmt::format("format_schema={}", settings.schema.format_schema); }); + "CapnProto", + [](const FormatSettings & settings) + { + return fmt::format( + "format_schema={}, skip_fields_with_unsupported_types_in_schema_inference={}", + settings.schema.format_schema, + settings.capn_proto.skip_fields_with_unsupported_types_in_schema_inference); + }); } void registerCapnProtoSchemaReader(FormatFactory & factory) diff --git a/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h b/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h index cf23f22b643..06e94da123f 100644 --- a/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h +++ b/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h @@ -4,7 +4,8 @@ #if USE_CAPNP #include -#include +#include +#include #include #include @@ -33,10 +34,8 @@ private: kj::Array readMessage(); std::shared_ptr parser; - capnp::StructSchema root; - const FormatSettings format_settings; - DataTypes column_types; - Names column_names; + capnp::StructSchema schema; + std::unique_ptr serializer; }; class CapnProtoSchemaReader : public IExternalSchemaReader diff --git a/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.cpp b/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.cpp index 0225680b396..7dd18be27f4 100644 --- a/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.cpp @@ -1,28 +1,13 @@ #include #if USE_CAPNP -#include +#include #include +#include #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - namespace DB { @@ -45,252 +30,25 @@ CapnProtoRowOutputFormat::CapnProtoRowOutputFormat( WriteBuffer & out_, const Block & header_, const FormatSchemaInfo & info, - const FormatSettings & format_settings_) - : IRowOutputFormat(header_, out_), column_names(header_.getNames()), column_types(header_.getDataTypes()), output_stream(std::make_unique(out_)), format_settings(format_settings_) + const FormatSettings & format_settings) + : IRowOutputFormat(header_, out_) + , column_names(header_.getNames()) + , column_types(header_.getDataTypes()) + , output_stream(std::make_unique(out_)) { schema = schema_parser.getMessageSchema(info); - checkCapnProtoSchemaStructure(schema, getPort(PortKind::Main).getHeader(), format_settings.capn_proto.enum_comparing_mode); -} - -template -static capnp::DynamicEnum getDynamicEnum( - const ColumnPtr & column, - const DataTypePtr & data_type, - size_t row_num, - const capnp::EnumSchema & enum_schema, - FormatSettings::EnumComparingMode mode) -{ - const auto * enum_data_type = assert_cast *>(data_type.get()); - EnumValue enum_value = column->getInt(row_num); - if (mode == FormatSettings::EnumComparingMode::BY_VALUES) - return capnp::DynamicEnum(enum_schema, enum_value); - - auto enum_name = enum_data_type->getNameForValue(enum_value); - for (const auto enumerant : enum_schema.getEnumerants()) - { - if (compareEnumNames(String(enum_name), enumerant.getProto().getName(), mode)) - return capnp::DynamicEnum(enumerant); - } - - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot convert CLickHouse Enum value to CapnProto Enum"); -} - -static capnp::DynamicValue::Builder initStructFieldBuilder(const ColumnPtr & column, size_t row_num, capnp::DynamicStruct::Builder & struct_builder, capnp::StructSchema::Field field) -{ - if (const auto * array_column = checkAndGetColumn(*column)) - { - size_t size = array_column->getOffsets()[row_num] - array_column->getOffsets()[row_num - 1]; - return struct_builder.init(field, static_cast(size)); - } - - if (field.getType().isStruct()) - return struct_builder.init(field); - - return struct_builder.get(field); -} - -static std::optional convertToDynamicValue( - const ColumnPtr & column, - const DataTypePtr & data_type, - size_t row_num, - const String & column_name, - capnp::DynamicValue::Builder builder, - FormatSettings::EnumComparingMode enum_comparing_mode, - std::vector> & temporary_text_data_storage) -{ - /// Here we don't do any types validation, because we did it in CapnProtoRowOutputFormat constructor. - - if (data_type->lowCardinality()) - { - const auto * lc_column = assert_cast(column.get()); - const auto & dict_type = assert_cast(data_type.get())->getDictionaryType(); - size_t index = lc_column->getIndexAt(row_num); - return convertToDynamicValue(lc_column->getDictionary().getNestedColumn(), dict_type, index, column_name, builder, enum_comparing_mode, temporary_text_data_storage); - } - - switch (builder.getType()) - { - case capnp::DynamicValue::Type::INT: - return capnp::DynamicValue::Reader(column->getInt(row_num)); - case capnp::DynamicValue::Type::UINT: - { - /// IPv4 column doesn't support getUInt method. - if (isIPv4(data_type)) - return capnp::DynamicValue::Reader(assert_cast(column.get())->getElement(row_num)); - return capnp::DynamicValue::Reader(column->getUInt(row_num)); - } - case capnp::DynamicValue::Type::BOOL: - return capnp::DynamicValue::Reader(column->getBool(row_num)); - case capnp::DynamicValue::Type::FLOAT: - return capnp::DynamicValue::Reader(column->getFloat64(row_num)); - case capnp::DynamicValue::Type::ENUM: - { - auto enum_schema = builder.as().getSchema(); - if (data_type->getTypeId() == TypeIndex::Enum8) - return capnp::DynamicValue::Reader( - getDynamicEnum(column, data_type, row_num, enum_schema, enum_comparing_mode)); - return capnp::DynamicValue::Reader( - getDynamicEnum(column, data_type, row_num, enum_schema, enum_comparing_mode)); - } - case capnp::DynamicValue::Type::DATA: - { - auto data = column->getDataAt(row_num); - return capnp::DynamicValue::Reader(capnp::Data::Reader(reinterpret_cast(data.data), data.size)); - } - case capnp::DynamicValue::Type::TEXT: - { - /// In TEXT type data should be null-terminated, but ClickHouse String data could not be. - /// To make data null-terminated we should copy it to temporary String object, but - /// capnp::Text::Reader works only with pointer to the data and it's size, so we should - /// guarantee that new String object life time is longer than capnp::Text::Reader life time. - /// To do this we store new String object in a temporary storage, passed in this function - /// by reference. We use unique_ptr instead of just String to avoid pointers - /// invalidation on vector reallocation. - temporary_text_data_storage.push_back(std::make_unique(column->getDataAt(row_num))); - auto & data = temporary_text_data_storage.back(); - return capnp::DynamicValue::Reader(capnp::Text::Reader(data->data(), data->size())); - } - case capnp::DynamicValue::Type::STRUCT: - { - auto struct_builder = builder.as(); - auto nested_struct_schema = struct_builder.getSchema(); - /// Struct can represent Tuple, Nullable (named union with two fields) or single column when it contains one nested column. - if (data_type->isNullable()) - { - const auto * nullable_type = assert_cast(data_type.get()); - const auto * nullable_column = assert_cast(column.get()); - auto fields = nested_struct_schema.getUnionFields(); - if (nullable_column->isNullAt(row_num)) - { - auto null_field = fields[0].getType().isVoid() ? fields[0] : fields[1]; - struct_builder.set(null_field, capnp::Void()); - } - else - { - auto value_field = fields[0].getType().isVoid() ? fields[1] : fields[0]; - struct_builder.clear(value_field); - const auto & nested_column = nullable_column->getNestedColumnPtr(); - auto value_builder = initStructFieldBuilder(nested_column, row_num, struct_builder, value_field); - auto value = convertToDynamicValue(nested_column, nullable_type->getNestedType(), row_num, column_name, value_builder, enum_comparing_mode, temporary_text_data_storage); - if (value) - struct_builder.set(value_field, *value); - } - } - else if (isTuple(data_type)) - { - const auto * tuple_data_type = assert_cast(data_type.get()); - const auto & nested_types = tuple_data_type->getElements(); - const auto & nested_names = tuple_data_type->getElementNames(); - const auto & nested_columns = assert_cast(column.get())->getColumns(); - bool have_explicit_names = tuple_data_type->haveExplicitNames(); - for (uint32_t i = 0; i != nested_names.size(); ++i) - { - capnp::StructSchema::Field nested_field = have_explicit_names ? nested_struct_schema.getFieldByName(nested_names[i]) : nested_struct_schema.getFields()[i]; - auto field_builder = initStructFieldBuilder(nested_columns[i], row_num, struct_builder, nested_field); - auto value = convertToDynamicValue(nested_columns[i], nested_types[i], row_num, nested_names[i], field_builder, enum_comparing_mode, temporary_text_data_storage); - if (value) - struct_builder.set(nested_field, *value); - } - } - else if (isMap(data_type)) - { - /// We output Map type as follow CapnProto schema - /// - /// struct Map { - /// struct Entry { - /// key @0: Key; - /// value @1: Value; - /// } - /// entries @0 :List(Entry); - /// } - /// - /// And we don't need to check that struct have this form here because we checked it before. - const auto & map_type = assert_cast(*data_type); - DataTypes key_value_types = {map_type.getKeyType(), map_type.getValueType()}; - Names key_value_names = {"key", "value"}; - auto entries_type = std::make_shared(std::make_shared(key_value_types, key_value_names)); - - /// Nested column in Map is actually Array(Tuple), so we can output it according to "entries" field schema. - const auto & entries_column = assert_cast(column.get())->getNestedColumnPtr(); - - auto entries_field = nested_struct_schema.getFields()[0]; - auto field_builder = initStructFieldBuilder(entries_column, row_num, struct_builder, entries_field); - auto entries_value = convertToDynamicValue(entries_column, entries_type, row_num, column_name, field_builder, enum_comparing_mode, temporary_text_data_storage); - if (entries_value) - struct_builder.set(entries_field, *entries_value); - } - else - { - /// It can be nested column from Nested type. - auto [field_name, nested_name] = splitCapnProtoFieldName(column_name); - auto nested_field = nested_struct_schema.getFieldByName(nested_name); - auto field_builder = initStructFieldBuilder(column, row_num, struct_builder, nested_field); - auto value = convertToDynamicValue(column, data_type, row_num, nested_name, field_builder, enum_comparing_mode, temporary_text_data_storage); - if (value) - struct_builder.set(nested_field, *value); - } - return std::nullopt; - } - case capnp::DynamicValue::Type::LIST: - { - auto list_builder = builder.as(); - const auto * array_column = assert_cast(column.get()); - const auto & nested_column = array_column->getDataPtr(); - const auto & nested_type = assert_cast(data_type.get())->getNestedType(); - const auto & offsets = array_column->getOffsets(); - auto offset = offsets[row_num - 1]; - size_t size = offsets[row_num] - offset; - - const auto * nested_array_column = checkAndGetColumn(*nested_column); - for (unsigned i = 0; i != static_cast(size); ++i) - { - capnp::DynamicValue::Builder value_builder; - /// For nested arrays we need to initialize nested list builder. - if (nested_array_column) - { - const auto & nested_offset = nested_array_column->getOffsets(); - size_t nested_array_size = nested_offset[offset + i] - nested_offset[offset + i - 1]; - value_builder = list_builder.init(i, static_cast(nested_array_size)); - } - else - value_builder = list_builder[i]; - - auto value = convertToDynamicValue(nested_column, nested_type, offset + i, column_name, value_builder, enum_comparing_mode, temporary_text_data_storage); - if (value) - list_builder.set(i, *value); - } - return std::nullopt; - } - default: - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected CapnProto type."); - } + const auto & header = getPort(PortKind::Main).getHeader(); + serializer = std::make_unique(header.getDataTypes(), header.getNames(), schema, format_settings.capn_proto); + capnp::MallocMessageBuilder message; } void CapnProtoRowOutputFormat::write(const Columns & columns, size_t row_num) { capnp::MallocMessageBuilder message; - /// Temporary storage for data that will be outputted in fields with CapnProto type TEXT. - /// See comment in convertToDynamicValue() for more details. - std::vector> temporary_text_data_storage; capnp::DynamicStruct::Builder root = message.initRoot(schema); - - /// Some columns can share same field builder. For example when we have - /// column with Nested type that was flattened into several columns. - std::unordered_map field_builders; - for (size_t i = 0; i != columns.size(); ++i) - { - auto [struct_builder, field] = getStructBuilderAndFieldByColumnName(root, column_names[i]); - if (!field_builders.contains(field.getIndex())) - { - auto field_builder = initStructFieldBuilder(columns[i], row_num, struct_builder, field); - field_builders[field.getIndex()] = field_builder; - } - auto value = convertToDynamicValue(columns[i], column_types[i], row_num, column_names[i], field_builders[field.getIndex()], format_settings.capn_proto.enum_comparing_mode, temporary_text_data_storage); - if (value) - struct_builder.set(field, *value); - } - + serializer->writeRow(columns, std::move(root), row_num); capnp::writeMessage(*output_stream, message); + } void registerOutputFormatCapnProto(FormatFactory & factory) diff --git a/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.h b/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.h index 5cc7099d4c7..dd9dcc6b340 100644 --- a/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.h +++ b/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.h @@ -3,15 +3,17 @@ #include "config.h" #if USE_CAPNP -#include -#include -#include -#include -#include -#include +# include +# include +# include +# include +# include +# include +# include namespace DB { + class CapnProtoOutputStream : public kj::OutputStream { public: @@ -43,8 +45,9 @@ private: DataTypes column_types; capnp::StructSchema schema; std::unique_ptr output_stream; - const FormatSettings format_settings; CapnProtoSchemaParser schema_parser; + std::unique_ptr serializer; + }; } diff --git a/src/Processors/Formats/Impl/ProtobufListInputFormat.cpp b/src/Processors/Formats/Impl/ProtobufListInputFormat.cpp index 9777f2361a2..6098923a195 100644 --- a/src/Processors/Formats/Impl/ProtobufListInputFormat.cpp +++ b/src/Processors/Formats/Impl/ProtobufListInputFormat.cpp @@ -88,7 +88,14 @@ void registerInputFormatProtobufList(FormatFactory & factory) }); factory.markFormatSupportsSubsetOfColumns("ProtobufList"); factory.registerAdditionalInfoForSchemaCacheGetter( - "ProtobufList", [](const FormatSettings & settings) { return fmt::format("format_schema={}", settings.schema.format_schema); }); + "ProtobufList", + [](const FormatSettings & settings) + { + return fmt::format( + "format_schema={}, skip_fields_with_unsupported_types_in_schema_inference={}", + settings.schema.format_schema, + settings.protobuf.skip_fields_with_unsupported_types_in_schema_inference); + }); } void registerProtobufListSchemaReader(FormatFactory & factory) diff --git a/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp b/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp index ee60501dba5..126f3673571 100644 --- a/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp @@ -128,7 +128,14 @@ void registerProtobufSchemaReader(FormatFactory & factory) for (const auto & name : {"Protobuf", "ProtobufSingle"}) factory.registerAdditionalInfoForSchemaCacheGetter( - name, [](const FormatSettings & settings) { return fmt::format("format_schema={}", settings.schema.format_schema); }); + name, + [](const FormatSettings & settings) + { + return fmt::format( + "format_schema={}, skip_fields_with_unsupported_types_in_schema_inference={}", + settings.schema.format_schema, + settings.protobuf.skip_fields_with_unsupported_types_in_schema_inference); + }); } } diff --git a/tests/queries/0_stateless/02030_capnp_format.sh b/tests/queries/0_stateless/02030_capnp_format.sh index c15d6fe442e..625104fb590 100755 --- a/tests/queries/0_stateless/02030_capnp_format.sh +++ b/tests/queries/0_stateless/02030_capnp_format.sh @@ -96,8 +96,8 @@ $CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'a_b U $CLICKHOUSE_CLIENT --query="SELECT number AS a_b, number + 1 AS a_c_d, number + 2 AS a_c_e_f FROM numbers(5) FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_nested_tuples:Message'" > $CAPN_PROTO_FILE $CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'a Tuple(b UInt64, c Tuple(d UInt64, e Tuple(f UInt64)))') SETTINGS format_schema='$SERVER_SCHEMADIR/02030_capnp_nested_tuples:Message'" -$CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'a Tuple(bb UInt64, c Tuple(d UInt64, e Tuple(f UInt64)))') SETTINGS format_schema='$SERVER_SCHEMADIR/02030_capnp_nested_tuples:Message'" 2>&1 | grep -F -q "CAPN_PROTO_BAD_CAST" && echo 'OK' || echo 'FAIL'; -$CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'a Tuple(b UInt64, c Tuple(d UInt64, e Tuple(ff UInt64)))') SETTINGS format_schema='$SERVER_SCHEMADIR/02030_capnp_nested_tuples:Message'" 2>&1 | grep -F -q "CAPN_PROTO_BAD_CAST" && echo 'OK' || echo 'FAIL'; +$CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'a Tuple(bb UInt64, c Tuple(d UInt64, e Tuple(f UInt64)))') SETTINGS format_schema='$SERVER_SCHEMADIR/02030_capnp_nested_tuples:Message'" 2>&1 | grep -F -q "THERE_IS_NO_COLUMN" && echo 'OK' || echo 'FAIL'; +$CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'a Tuple(b UInt64, c Tuple(d UInt64, e Tuple(ff UInt64)))') SETTINGS format_schema='$SERVER_SCHEMADIR/02030_capnp_nested_tuples:Message'" 2>&1 | grep -F -q "THERE_IS_NO_COLUMN" && echo 'OK' || echo 'FAIL'; $CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'string String') SETTINGS format_schema='$SERVER_SCHEMADIR/02030_capnp_simple_types:Message'" 2>&1 | grep -F -q "INCORRECT_DATA" && echo 'OK' || echo 'FAIL'; diff --git a/tests/queries/0_stateless/02735_capnp_case_insensitive_names_matcing.reference b/tests/queries/0_stateless/02735_capnp_case_insensitive_names_matcing.reference new file mode 100644 index 00000000000..f34c857e2f6 --- /dev/null +++ b/tests/queries/0_stateless/02735_capnp_case_insensitive_names_matcing.reference @@ -0,0 +1 @@ +42 (42,42) diff --git a/tests/queries/0_stateless/02735_capnp_case_insensitive_names_matcing.sh b/tests/queries/0_stateless/02735_capnp_case_insensitive_names_matcing.sh new file mode 100755 index 00000000000..c3835948437 --- /dev/null +++ b/tests/queries/0_stateless/02735_capnp_case_insensitive_names_matcing.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-parallel, no-replicated-database + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +SCHEMADIR=$CURDIR/format_schemas +$CLICKHOUSE_LOCAL -q "select 42 as Field1, (42, 42)::Tuple(Field1 UInt32, Field2 UInt32) as Nested format CapnProto settings format_schema='$SCHEMADIR/02735_case_insensitive_names_matching:Message'" | $CLICKHOUSE_LOCAL --input-format CapnProto --structure "Field1 UInt32, Nested Tuple(Field1 UInt32, Field2 UInt32)" -q "select * from table" --format_schema="$SCHEMADIR/02735_case_insensitive_names_matching:Message" + diff --git a/tests/queries/0_stateless/02736_reading_and_writing_structure_fields.reference b/tests/queries/0_stateless/02736_reading_and_writing_structure_fields.reference new file mode 100644 index 00000000000..b6e6d485929 --- /dev/null +++ b/tests/queries/0_stateless/02736_reading_and_writing_structure_fields.reference @@ -0,0 +1,3 @@ +(42,(42,42),[(42,42),(24,24)]) [(42,(42,42),[(42,42),(24,24)]),(24,(24,24),[(24,24),(42,42)])] +42 42 42 +[42,24] [42,24] [42,24] [[42,24],[24,42]] [[42,24],[24,42]] diff --git a/tests/queries/0_stateless/02736_reading_and_writing_structure_fields.sh b/tests/queries/0_stateless/02736_reading_and_writing_structure_fields.sh new file mode 100755 index 00000000000..c669be2ed33 --- /dev/null +++ b/tests/queries/0_stateless/02736_reading_and_writing_structure_fields.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-parallel, no-replicated-database + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +SCHEMADIR=$CURDIR/format_schemas +DATA_FILE=02736_$CLICKHOUSE_TEST_UNIQUE_NAME.bin + +$CLICKHOUSE_LOCAL -q "select tuple(42, tuple(42, 42), [tuple(42, 42), tuple(24, 24)]) as nested, [tuple(42, tuple(42, 42), [tuple(42, 42), tuple(24, 24)]), tuple(24, tuple(24, 24), [tuple(24, 24), tuple(42, 42)])] as nestedList format CapnProto settings format_schema='$SCHEMADIR/02736_nested_structures:Message'" > $DATA_FILE + +$CLICKHOUSE_LOCAL -q "select * from file($DATA_FILE, CapnProto) settings format_schema='$SCHEMADIR/02736_nested_structures:Message'" + +$CLICKHOUSE_LOCAL -q "select 42 as nested_field1, 42 as nested_nested_field1, 42 as nested_nested_field2 format CapnProto settings format_schema='$SCHEMADIR/02736_nested_structures:Message'" > $DATA_FILE + +$CLICKHOUSE_LOCAL -q "select * from file($DATA_FILE, CapnProto, 'nested_field1 UInt32, nested_nested_field1 UInt32, nested_nested_field2 UInt32') settings format_schema='$SCHEMADIR/02736_nested_structures:Message'" + +$CLICKHOUSE_LOCAL -q "select [42, 24] as nestedList_field1, [42, 24] as nestedList_nested_field1, [42, 24] as nestedList_nested_field2, [[42, 24], [24, 42]] as nestedList_nestedList_field1, [[42, 24], [24, 42]] as nestedList_nestedList_field2 format CapnProto settings format_schema='$SCHEMADIR/02736_nested_structures:Message'" > $DATA_FILE + +$CLICKHOUSE_LOCAL -q "select * from file($DATA_FILE, CapnProto, 'nestedList_field1 Array(UInt32), nestedList_nested_field1 Array(UInt32), nestedList_nested_field2 Array(UInt32), nestedList_nestedList_field1 Array(Array(UInt32)), nestedList_nestedList_field2 Array(Array(UInt32))') settings format_schema='$SCHEMADIR/02736_nested_structures:Message'" + +rm $DATA_FILE + diff --git a/tests/queries/0_stateless/format_schemas/02735_case_insensitive_names_matching.capnp b/tests/queries/0_stateless/format_schemas/02735_case_insensitive_names_matching.capnp new file mode 100644 index 00000000000..6b12aab081a --- /dev/null +++ b/tests/queries/0_stateless/format_schemas/02735_case_insensitive_names_matching.capnp @@ -0,0 +1,13 @@ +@0x9ef128e10a8010b8; + +struct Nested +{ + field1 @0 : UInt32; + field2 @1 : UInt32; +} + +struct Message +{ + field1 @0 : UInt32; + nested @1 : Nested; +} diff --git a/tests/queries/0_stateless/format_schemas/02736_nested_structures.capnp b/tests/queries/0_stateless/format_schemas/02736_nested_structures.capnp new file mode 100644 index 00000000000..a03eb27f383 --- /dev/null +++ b/tests/queries/0_stateless/format_schemas/02736_nested_structures.capnp @@ -0,0 +1,21 @@ +@0x9ef128e10a8010b8; + +struct Nested2 +{ + field1 @0 : UInt32; + field2 @1 : UInt32; +} + +struct Nested +{ + field1 @0 : UInt32; + nested @1 : Nested2; + nestedList @2 : List(Nested2); +} + +struct Message +{ + nested @0 : Nested; + nestedList @1 : List(Nested); +} + From 167516b6b088dea2e6b44a6e81caf2c088f2481e Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Wed, 10 May 2023 21:07:56 +0200 Subject: [PATCH 084/722] Fix style --- src/Formats/CapnProtoSerializer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Formats/CapnProtoSerializer.cpp b/src/Formats/CapnProtoSerializer.cpp index e0c8ae2a79a..c31623286d0 100644 --- a/src/Formats/CapnProtoSerializer.cpp +++ b/src/Formats/CapnProtoSerializer.cpp @@ -26,7 +26,7 @@ namespace DB namespace ErrorCodes { extern const int THERE_IS_NO_COLUMN; - extern const int BAD_TYPE_OF_FIELD; + extern const int LOGICAL_ERROR; extern const int CAPN_PROTO_BAD_CAST; extern const int INCORRECT_DATA; extern const int ILLEGAL_COLUMN; @@ -293,7 +293,7 @@ namespace return capnp::DynamicValue::Reader(capnp::DynamicEnum(enumerant)); } - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot convert CLickHouse Enum value to CapnProto Enum"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot convert ClickHouse Enum value to CapnProto Enum"); } void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override From 344885dd27fbc652a5e93040ead764cf88232bbf Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Wed, 10 May 2023 21:08:12 +0200 Subject: [PATCH 085/722] Fix style --- src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp b/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp index e686ae86997..c056ee2b4a4 100644 --- a/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp @@ -14,7 +14,6 @@ namespace DB namespace ErrorCodes { - extern const int LOGICAL_ERROR; extern const int INCORRECT_DATA; } From e3cb1e40e4830d9b5499c2c410e1d12add4cc90f Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Wed, 10 May 2023 21:08:31 +0200 Subject: [PATCH 086/722] Fix style --- src/Processors/Formats/Impl/CapnProtoRowOutputFormat.cpp | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.cpp b/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.cpp index 7dd18be27f4..66a7160dd89 100644 --- a/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.cpp @@ -11,12 +11,6 @@ namespace DB { -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} - - CapnProtoOutputStream::CapnProtoOutputStream(WriteBuffer & out_) : out(out_) { } From 07630ef43fd40f46dfba9adca487c3b69ca2ad3c Mon Sep 17 00:00:00 2001 From: zvonand Date: Thu, 11 May 2023 01:10:34 +0200 Subject: [PATCH 087/722] upd --- src/Client/ClientBase.cpp | 4 +--- src/Client/Connection.cpp | 8 ++++---- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 8b5db85fc02..fad9494ba4b 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -1082,9 +1082,7 @@ void ClientBase::onProgress(const Progress & value) void ClientBase::onTimezoneUpdate(const String & tz) { - Settings settings; - settings.session_timezone = tz; - global_context->applySettingsChanges(settings.changes()); + global_context->setSetting("session_timezone", tz); } diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index 062f05105aa..86585d805d9 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -355,10 +355,10 @@ void Connection::receiveHello() nonce.emplace(read_nonce); } } - else if (packet_type == Protocol::Server::TimezoneUpdate) - { - // skip this packet at hello, will receive and process it later - } +// else if (packet_type == Protocol::Server::TimezoneUpdate) +// { +// // skip this packet at hello, will receive and process it later +// } else if (packet_type == Protocol::Server::Exception) receiveException()->rethrow(); else From 58bdcc29315a712e1255c13d31669f4545de9edb Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 10 May 2023 23:55:13 +0000 Subject: [PATCH 088/722] allow to cast IPv6 to IPv4 for address in proper mapping block --- src/Functions/FunctionsConversion.h | 30 ++++++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index 9c4085f9745..9cdd09780e3 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -57,6 +57,7 @@ #include #include #include +#include #include @@ -217,13 +218,13 @@ struct ConvertImpl } else if constexpr ( (std::is_same_v != std::is_same_v) - && !(is_any_of || is_any_of) + && !(is_any_of || is_any_of) ) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Conversion from {} to {} is not supported", TypeName, TypeName); } - else if constexpr (std::is_same_v != std::is_same_v) + else if constexpr (std::is_same_v != std::is_same_v && !std::is_same_v) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Conversion between numeric types and IPv6 is not supported. " @@ -304,7 +305,30 @@ struct ConvertImpl } else { - if constexpr (std::is_same_v && std::is_same_v) + if constexpr (std::is_same_v && std::is_same_v) + { + const uint8_t ip4_cidr[] {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00}; + const uint8_t * src = reinterpret_cast(&vec_from[i].toUnderType()); + if (!matchIPv6Subnet(src, ip4_cidr, 96)) + throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "IPv6 in column {} is not in IPv4 mapping block", named_from.column->getName()); + + uint8_t * dst = reinterpret_cast(&vec_to[i].toUnderType()); + if constexpr (std::endian::native == std::endian::little) + { + dst[0] = src[15]; + dst[1] = src[14]; + dst[2] = src[13]; + dst[3] = src[12]; + } + else + { + dst[3] = src[15]; + dst[2] = src[14]; + dst[1] = src[13]; + dst[0] = src[12]; + } + } + else if constexpr (std::is_same_v && std::is_same_v) vec_to[i] = static_cast(static_cast(vec_from[i])); else vec_to[i] = static_cast(vec_from[i]); From 288988b59912173883daa757fff394fc5f40b497 Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 11 May 2023 12:08:50 +0000 Subject: [PATCH 089/722] Fix build --- src/Formats/CapnProtoSerializer.cpp | 6 ++++++ src/Formats/CapnProtoSerializer.h | 4 ++++ 2 files changed, 10 insertions(+) diff --git a/src/Formats/CapnProtoSerializer.cpp b/src/Formats/CapnProtoSerializer.cpp index c31623286d0..00ccfc7717d 100644 --- a/src/Formats/CapnProtoSerializer.cpp +++ b/src/Formats/CapnProtoSerializer.cpp @@ -1,3 +1,7 @@ +#include "config.h" + +#if USE_CAPNP + #include #include #include @@ -1216,3 +1220,5 @@ void CapnProtoSerializer::readRow(MutableColumns & columns, capnp::DynamicStruct CapnProtoSerializer::~CapnProtoSerializer() = default; } + +#endif diff --git a/src/Formats/CapnProtoSerializer.h b/src/Formats/CapnProtoSerializer.h index efae797875b..692f5e5301f 100644 --- a/src/Formats/CapnProtoSerializer.h +++ b/src/Formats/CapnProtoSerializer.h @@ -1,5 +1,7 @@ #pragma once +#if USE_CAPNP + #include #include @@ -23,3 +25,5 @@ private: }; } + +#endif From 108e256578574b26f8adeb3916b15238f0557ee9 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 11 May 2023 16:17:52 +0000 Subject: [PATCH 090/722] allow to cast IPv4 to IPv6 --- src/Functions/FunctionsConversion.h | 32 ++++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index 9cdd09780e3..5bf59f33cb5 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -218,13 +218,13 @@ struct ConvertImpl } else if constexpr ( (std::is_same_v != std::is_same_v) - && !(is_any_of || is_any_of) + && !(is_any_of || is_any_of) ) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Conversion from {} to {} is not supported", TypeName, TypeName); } - else if constexpr (std::is_same_v != std::is_same_v && !std::is_same_v) + else if constexpr (std::is_same_v != std::is_same_v && !(std::is_same_v || std::is_same_v)) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Conversion between numeric types and IPv6 is not supported. " @@ -322,10 +322,32 @@ struct ConvertImpl } else { - dst[3] = src[15]; - dst[2] = src[14]; - dst[1] = src[13]; dst[0] = src[12]; + dst[1] = src[13]; + dst[2] = src[14]; + dst[3] = src[15]; + } + } + else if constexpr (std::is_same_v && std::is_same_v) + { + const uint8_t * src = reinterpret_cast(&vec_from[i].toUnderType()); + uint8_t * dst = reinterpret_cast(&vec_to[i].toUnderType()); + std::memset(dst, '\0', IPV6_BINARY_LENGTH); + dst[10] = dst[11] = 0xff; + + if constexpr (std::endian::native == std::endian::little) + { + dst[12] = src[3]; + dst[13] = src[2]; + dst[14] = src[1]; + dst[15] = src[0]; + } + else + { + dst[12] = src[0]; + dst[13] = src[1]; + dst[14] = src[2]; + dst[15] = src[3]; } } else if constexpr (std::is_same_v && std::is_same_v) From 60b69601e9b1e3563eb43bf4ea1deee582e088fa Mon Sep 17 00:00:00 2001 From: zvonand Date: Fri, 12 May 2023 00:27:11 +0200 Subject: [PATCH 091/722] update docs --- .../server-configuration-parameters/settings.md | 4 ++++ docs/en/operations/settings/settings.md | 16 +++++++++++----- .../functions/date-time-functions.md | 14 +++++++++++--- .../server-configuration-parameters/settings.md | 4 ++++ docs/ru/operations/settings/settings.md | 12 ++++++++---- .../functions/date-time-functions.md | 13 +++++++++++-- 6 files changed, 49 insertions(+), 14 deletions(-) diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index e3ca04f5b9b..36ddf6faad0 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -1612,6 +1612,10 @@ The time zone is necessary for conversions between String and DateTime formats w Asia/Istanbul ``` +**See also** + +- [session_timezone](../settings/settings.md#session_timezone) + ## tcp_port {#server_configuration_parameters-tcp_port} Port for communicating with clients over the TCP protocol. diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index cc5f292f677..2a929acd5f2 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -4036,23 +4036,25 @@ Use this setting only for backward compatibility if your use cases depend on old ## session_timezone {#session_timezone} -If specified, sets an implicit timezone (instead of server-default). All DateTime/DateTime64 values (and/or functions results) that have no explicit timezone specified are treated as having this timezone instead of default. -Setting this to `''` (empty string) effectively resets implicit timezone to server timezone. +If specified, sets an implicit timezone (instead of [server default](../server-configuration-parameters/settimgs.md#server_configuration_parameters-timezone). +All DateTime/DateTime64 values (and/or functions results) that have no explicit timezone specified are treated as having this timezone instead of default. +A value of `''` (empty string) configures the session timezone to the server default timezone. + Examples: -```clickhouse +```sql SELECT timeZone(), serverTimezone() FORMAT TSV Europe/Berlin Europe/Berlin ``` -```clickhouse +```sql SELECT timeZone(), serverTimezone() SETTINGS session_timezone = 'Asia/Novosibirsk' FORMAT TSV Asia/Novosibirsk Europe/Berlin ``` -```clickhouse +```sql SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS session_timezone = 'America/Denver' FORMAT TSV 1999-12-13 07:23:23.123 @@ -4064,6 +4066,10 @@ Possible values: Default value: `''`. +**See also** + +- [timezone](../server-configuration-parameters/settings.md#server_configuration_parameters-timezone) + ## final {#final} Automatically applies [FINAL](../../sql-reference/statements/select/from.md#final-modifier) modifier to all tables in a query, to tables where [FINAL](../../sql-reference/statements/select/from.md#final-modifier) is applicable, including joined tables and tables in sub-queries, and diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index 596a2c509cd..9207a135c67 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -139,7 +139,7 @@ makeDateTime32(year, month, day, hour, minute, second[, fraction[, precision[, t ## timeZone -Returns the default timezone of the server for current session. This can be modified using `SET session_timezone = 'New/Value'` +Returns the default timezone of the current session, i.e. the value of setting [session_timezone](../../operations/settings/settings.md#session_timezone). If the function is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard, otherwise it produces a constant value. **Syntax** @@ -156,9 +156,13 @@ Alias: `timezone`. Type: [String](../../sql-reference/data-types/string.md). +**See also** + +- [serverTimeZone](#serverTimeZone) + ## serverTimeZone -Returns the actual timezone in which the server runs in. +Returns the default timezone of the server, i.e. the value of setting [timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). If it is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard. Otherwise it produces a constant value. **Syntax** @@ -175,6 +179,10 @@ Alias: `ServerTimezone`, `servertimezone`. Type: [String](../../sql-reference/data-types/string.md). +**See also** + +- [timeZone](#timeZone) + ## toTimeZone Converts a date or date with time to the specified time zone. Does not change the internal value (number of unix seconds) of the data, only the value's time zone attribute and the value's string representation changes. @@ -408,7 +416,7 @@ Result: ``` :::note -The return type of `toStartOf*`, `toLastDayOfMonth`, `toMonday`, `timeSlot` functions described below is determined by the configuration parameter [enable_extended_results_for_datetime_functions](../../operations/settings/settings.md#enable-extended-results-for-datetime-functions) which is `0` by default. +Thes return type of `toStartOf*`, `toLastDayOfMonth`, `toMonday`, `timeSlot` functions described below is determined by the configuration parameter [enable_extended_results_for_datetime_functions](../../operations/settings/settings.md#enable-extended-results-for-datetime-functions) which is `0` by default. Behavior for * `enable_extended_results_for_datetime_functions = 0`: Functions `toStartOfYear`, `toStartOfISOYear`, `toStartOfQuarter`, `toStartOfMonth`, `toStartOfWeek`, `toLastDayOfMonth`, `toMonday` return `Date` or `DateTime`. Functions `toStartOfDay`, `toStartOfHour`, `toStartOfFifteenMinutes`, `toStartOfTenMinutes`, `toStartOfFiveMinutes`, `toStartOfMinute`, `timeSlot` return `DateTime`. Though these functions can take values of the extended types `Date32` and `DateTime64` as an argument, passing them a time outside the normal range (year 1970 to 2149 for `Date` / 2106 for `DateTime`) will produce wrong results. diff --git a/docs/ru/operations/server-configuration-parameters/settings.md b/docs/ru/operations/server-configuration-parameters/settings.md index 787153d4d19..33db6df0fdd 100644 --- a/docs/ru/operations/server-configuration-parameters/settings.md +++ b/docs/ru/operations/server-configuration-parameters/settings.md @@ -1355,6 +1355,10 @@ Parameters: Europe/Moscow ``` +**См. также** + +- [session_timezone](../settings/settings.md#session_timezone) + ## tcp_port {#server_configuration_parameters-tcp_port} Порт для взаимодействия с клиентами по протоколу TCP. diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 585a3995afe..56bfbf8a57f 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -4077,23 +4077,23 @@ SELECT sum(number) FROM numbers(10000000000) SETTINGS partial_result_on_first_ca ## session_timezone {#session_timezone} -Задаёт значение часового пояса (session_timezone) по умолчанию для текущей сессии вместо часового пояса сервера. То есть, все значения DateTime/DateTime64, для которых явно не задан параметр timezone, будут интерпретированы как относящиеся к указанной зоне. +Задаёт значение часового пояса (session_timezone) по умолчанию для текущей сессии вместо [часового пояса сервера](../server-configuration-parameters/settimgs.md#server_configuration_parameters-timezone). То есть, все значения DateTime/DateTime64, для которых явно не задан параметр timezone, будут интерпретированы как относящиеся к указанной зоне. При значении настройки `''` (пустая строка), будет совпадать с часовым поясом сервера. Примеры: -```clickhouse +```sql SELECT timeZone(), serverTimezone() FORMAT TSV Europe/Berlin Europe/Berlin ``` -```clickhouse +```sql SELECT timeZone(), serverTimezone() SETTINGS session_timezone = 'Asia/Novosibirsk' FORMAT TSV Asia/Novosibirsk Europe/Berlin ``` -```clickhouse +```sql SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS session_timezone = 'America/Denver' FORMAT TSV 1999-12-13 07:23:23.123 @@ -4104,3 +4104,7 @@ SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zuric - Любая зона из `system.time_zones`, например `Europe/Berlin`, `UTC` или `Zulu` Значение по умолчанию: `''`. + +**Смотрите также** + +- [timezone](../server-configuration-parameters/settings.md#server_configuration_parameters-timezone) \ No newline at end of file diff --git a/docs/ru/sql-reference/functions/date-time-functions.md b/docs/ru/sql-reference/functions/date-time-functions.md index 3e378c08308..2d9f96c3199 100644 --- a/docs/ru/sql-reference/functions/date-time-functions.md +++ b/docs/ru/sql-reference/functions/date-time-functions.md @@ -26,7 +26,8 @@ SELECT ## timeZone {#timezone} -Возвращает часовой пояс сервера, считающийся умолчанием для текущей сессии. Можно изменить значение с помощью `SET session_timezone = 'New/Timezone''` +Возвращает часовой пояс сервера, считающийся умолчанием для текущей сессии: значение параметра [session_timezone](../../operations/settings/settings.md#session_timezone), если установлено. + Если функция вызывается в контексте распределенной таблицы, то она генерирует обычный столбец со значениями, актуальными для каждого шарда. Иначе возвращается константа. **Синтаксис** @@ -43,9 +44,13 @@ timeZone() Тип: [String](../../sql-reference/data-types/string.md). +**Смотрите также** + +- [serverTimeZone](#servertimezone) + ## serverTimeZone {#servertimezone} -Возвращает (истинный) часовой пояс сервера, в котором тот работает. +Возвращает часовой пояс сервера по умолчанию, в т.ч. установленный [timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) Если функция вызывается в контексте распределенной таблицы, то она генерирует обычный столбец со значениями, актуальными для каждого шарда. Иначе возвращается константа. **Синтаксис** @@ -62,6 +67,10 @@ serverTimeZone() Тип: [String](../../sql-reference/data-types/string.md). +**Смотрите также** + +- [timeZone](#timezone) + ## toTimeZone {#totimezone} Переводит дату или дату с временем в указанный часовой пояс. Часовой пояс - это атрибут типов `Date` и `DateTime`. Внутреннее значение (количество секунд) поля таблицы или результирующего столбца не изменяется, изменяется тип поля и, соответственно, его текстовое отображение. From c3af36915f049794a9c44c55ebb6a6bc950eadc8 Mon Sep 17 00:00:00 2001 From: zvonand Date: Fri, 12 May 2023 01:29:34 +0200 Subject: [PATCH 092/722] fixed docs 2 --- docs/en/operations/settings/settings.md | 2 +- docs/ru/operations/settings/settings.md | 2 +- src/DataTypes/Serializations/SerializationDate.cpp | 3 ++- src/DataTypes/Serializations/SerializationDate.h | 7 +++++-- src/DataTypes/Serializations/SerializationDate32.cpp | 3 ++- src/DataTypes/Serializations/SerializationDate32.h | 7 +++++-- src/IO/ReadHelpers.h | 3 +-- 7 files changed, 17 insertions(+), 10 deletions(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 2a929acd5f2..e796ea83a6f 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -4036,7 +4036,7 @@ Use this setting only for backward compatibility if your use cases depend on old ## session_timezone {#session_timezone} -If specified, sets an implicit timezone (instead of [server default](../server-configuration-parameters/settimgs.md#server_configuration_parameters-timezone). +If specified, sets an implicit timezone (instead of [server default](../server-configuration-parameters/settings.md#server_configuration_parameters-timezone). All DateTime/DateTime64 values (and/or functions results) that have no explicit timezone specified are treated as having this timezone instead of default. A value of `''` (empty string) configures the session timezone to the server default timezone. diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 56bfbf8a57f..98486847fd9 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -4077,7 +4077,7 @@ SELECT sum(number) FROM numbers(10000000000) SETTINGS partial_result_on_first_ca ## session_timezone {#session_timezone} -Задаёт значение часового пояса (session_timezone) по умолчанию для текущей сессии вместо [часового пояса сервера](../server-configuration-parameters/settimgs.md#server_configuration_parameters-timezone). То есть, все значения DateTime/DateTime64, для которых явно не задан параметр timezone, будут интерпретированы как относящиеся к указанной зоне. +Задаёт значение часового пояса (session_timezone) по умолчанию для текущей сессии вместо [часового пояса сервера](../server-configuration-parameters/settings.md#server_configuration_parameters-timezone). То есть, все значения DateTime/DateTime64, для которых явно не задан параметр timezone, будут интерпретированы как относящиеся к указанной зоне. При значении настройки `''` (пустая строка), будет совпадать с часовым поясом сервера. Примеры: diff --git a/src/DataTypes/Serializations/SerializationDate.cpp b/src/DataTypes/Serializations/SerializationDate.cpp index 8b4956f7826..1ed48fdd31d 100644 --- a/src/DataTypes/Serializations/SerializationDate.cpp +++ b/src/DataTypes/Serializations/SerializationDate.cpp @@ -80,7 +80,8 @@ void SerializationDate::deserializeTextCSV(IColumn & column, ReadBuffer & istr, readCSV(value, istr, time_zone); assert_cast(column).getData().push_back(value); } -SerializationDate::SerializationDate(const TimezoneMixin & time_zone_) : TimezoneMixin(time_zone_) + +SerializationDate::SerializationDate(const DateLUTImpl & time_zone_) : time_zone(time_zone_) { } diff --git a/src/DataTypes/Serializations/SerializationDate.h b/src/DataTypes/Serializations/SerializationDate.h index c4e57470673..4d6a6fa36ec 100644 --- a/src/DataTypes/Serializations/SerializationDate.h +++ b/src/DataTypes/Serializations/SerializationDate.h @@ -6,10 +6,10 @@ namespace DB { -class SerializationDate final : public SerializationNumber, public TimezoneMixin +class SerializationDate final : public SerializationNumber { public: - explicit SerializationDate(const TimezoneMixin & time_zone_ = TimezoneMixin()); + explicit SerializationDate(const DateLUTImpl & time_zone_ = DateLUT::instance()); void serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override; void deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings &) const override; @@ -21,6 +21,9 @@ public: void deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings &) const override; void serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override; void deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; + +protected: + const DateLUTImpl & time_zone; }; } diff --git a/src/DataTypes/Serializations/SerializationDate32.cpp b/src/DataTypes/Serializations/SerializationDate32.cpp index 8dcaee8d266..851710de839 100644 --- a/src/DataTypes/Serializations/SerializationDate32.cpp +++ b/src/DataTypes/Serializations/SerializationDate32.cpp @@ -78,7 +78,8 @@ void SerializationDate32::deserializeTextCSV(IColumn & column, ReadBuffer & istr readCSV(value, istr); assert_cast(column).getData().push_back(value.getExtenedDayNum()); } -SerializationDate32::SerializationDate32(const TimezoneMixin & time_zone_) : TimezoneMixin(time_zone_) + +SerializationDate32::SerializationDate32(const DateLUTImpl & time_zone_) : time_zone(time_zone_) { } } diff --git a/src/DataTypes/Serializations/SerializationDate32.h b/src/DataTypes/Serializations/SerializationDate32.h index e8e8f1a74d6..6b6e5442240 100644 --- a/src/DataTypes/Serializations/SerializationDate32.h +++ b/src/DataTypes/Serializations/SerializationDate32.h @@ -5,10 +5,10 @@ namespace DB { -class SerializationDate32 final : public SerializationNumber, public TimezoneMixin +class SerializationDate32 final : public SerializationNumber { public: - explicit SerializationDate32(const TimezoneMixin & time_zone_ = TimezoneMixin()); + explicit SerializationDate32(const DateLUTImpl & time_zone_ = DateLUT::instance()); void serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override; void deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings &) const override; @@ -20,5 +20,8 @@ public: void deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings &) const override; void serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override; void deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; + +protected: + const DateLUTImpl & time_zone; }; } diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index ea565d11914..3bd9275322e 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -1160,8 +1160,7 @@ inline void readText(is_floating_point auto & x, ReadBuffer & buf) { readFloatTe inline void readText(String & x, ReadBuffer & buf) { readEscapedString(x, buf); } -inline void readText(DayNum & x, ReadBuffer & buf) { readDateText(x, buf); } -inline void readText(DayNum & x, ReadBuffer & buf, const DateLUTImpl & time_zone) { readDateText(x, buf, time_zone); } +inline void readText(DayNum & x, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) { readDateText(x, buf, time_zone); } inline void readText(LocalDate & x, ReadBuffer & buf) { readDateText(x, buf); } inline void readText(LocalDateTime & x, ReadBuffer & buf) { readDateTimeText(x, buf); } From 1daa9811222c80f2d957556d32be4f3309034e4b Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Fri, 12 May 2023 16:12:01 +0200 Subject: [PATCH 093/722] Fix special builds --- src/Formats/CapnProtoSerializer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Formats/CapnProtoSerializer.cpp b/src/Formats/CapnProtoSerializer.cpp index 00ccfc7717d..091e70da656 100644 --- a/src/Formats/CapnProtoSerializer.cpp +++ b/src/Formats/CapnProtoSerializer.cpp @@ -1007,7 +1007,7 @@ namespace catch (Exception & e) { e.addMessage("(while converting column {})", column_name); - throw e; + throw std::move(e); } } @@ -1015,7 +1015,7 @@ namespace { assert(builder); auto & struct_builder = assert_cast(*builder); - if (auto tuple_column = typeid_cast(column.get())) + if (auto * tuple_column = typeid_cast(column.get())) writeRow(tuple_column->getColumnsCopy(), struct_builder, row_num); else writeRow(Columns{column}, struct_builder, row_num); From 24067ea977b6e4484f68efba8858ba8d0ad1cd6b Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 12 May 2023 15:54:50 +0000 Subject: [PATCH 094/722] allow conversion for toIPv4OrDefault --- src/Functions/FunctionsCodingIP.h | 81 +++++++++++++++++++++++++++++ src/Functions/FunctionsConversion.h | 20 +++++++ 2 files changed, 101 insertions(+) diff --git a/src/Functions/FunctionsCodingIP.h b/src/Functions/FunctionsCodingIP.h index d02cc81f608..bd53fa7e043 100644 --- a/src/Functions/FunctionsCodingIP.h +++ b/src/Functions/FunctionsCodingIP.h @@ -2,6 +2,7 @@ #include #include +#include #include #include @@ -16,6 +17,7 @@ namespace ErrorCodes extern const int CANNOT_PARSE_IPV4; extern const int CANNOT_PARSE_IPV6; extern const int ILLEGAL_COLUMN; + extern const int CANNOT_CONVERT_TYPE; } enum class IPStringToNumExceptionMode : uint8_t @@ -296,4 +298,83 @@ ColumnPtr convertToIPv4(ColumnPtr column, const PaddedPODArray * null_map return col_res; } +template +ColumnPtr convertIPv6ToIPv4(ColumnPtr column, const PaddedPODArray * null_map = nullptr) +{ + const ColumnIPv6 * column_ipv6 = checkAndGetColumn(column.get()); + + if (!column_ipv6) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column type {}. Expected IPv6.", column->getName()); + + size_t column_size = column_ipv6->size(); + + ColumnUInt8::MutablePtr col_null_map_to; + ColumnUInt8::Container * vec_null_map_to = nullptr; + + if constexpr (exception_mode == IPStringToNumExceptionMode::Null) + { + col_null_map_to = ColumnUInt8::create(column_size, false); + vec_null_map_to = &col_null_map_to->getData(); + } + + const uint8_t ip4_cidr[] {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00}; + + auto col_res = ToColumn::create(); + auto & vec_res = col_res->getData(); + vec_res.resize(column_size); + const auto & vec_src = column_ipv6->getData(); + + for (size_t i = 0; i < vec_res.size(); ++i) + { + const uint8_t * src = reinterpret_cast(&vec_src[i]); + uint8_t * dst = reinterpret_cast(&vec_res[i]); + + if (null_map && (*null_map)[i]) + { + std::memset(dst, '\0', IPV4_BINARY_LENGTH); + if constexpr (exception_mode == IPStringToNumExceptionMode::Null) + (*vec_null_map_to)[i] = true; + continue; + } + + if (!matchIPv6Subnet(src, ip4_cidr, 96)) + { + if constexpr (exception_mode == IPStringToNumExceptionMode::Throw) + { + throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "IPv6 in column {} is not in IPv4 mapping block", column->getName()); + } + else if constexpr (exception_mode == IPStringToNumExceptionMode::Default) + { + std::memset(dst, '\0', IPV4_BINARY_LENGTH); + } + else if constexpr (exception_mode == IPStringToNumExceptionMode::Null) + { + (*vec_null_map_to)[i] = true; + std::memset(dst, '\0', IPV4_BINARY_LENGTH); + } + continue; + } + + if constexpr (std::endian::native == std::endian::little) + { + dst[0] = src[15]; + dst[1] = src[14]; + dst[2] = src[13]; + dst[3] = src[12]; + } + else + { + dst[0] = src[12]; + dst[1] = src[13]; + dst[2] = src[14]; + dst[3] = src[15]; + } + } + + if constexpr (exception_mode == IPStringToNumExceptionMode::Null) + return ColumnNullable::create(std::move(col_res), std::move(col_null_map_to)); + + return col_res; +} + } diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index 5bf59f33cb5..4d4efc84df1 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -4035,6 +4035,26 @@ private: return true; } } + else if constexpr (WhichDataType(FromDataType::type_id).isIPv6() && WhichDataType(ToDataType::type_id).isIPv4()) + { + ret = [cast_ipv4_ipv6_default_on_conversion_error_value, requested_result_is_nullable]( + ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * column_nullable, size_t) + -> ColumnPtr + { + if (!WhichDataType(result_type).isIPv4()) + throw Exception( + ErrorCodes::TYPE_MISMATCH, "Wrong result type {}. Expected IPv4", result_type->getName()); + + const auto * null_map = column_nullable ? &column_nullable->getNullMapData() : nullptr; + if (cast_ipv4_ipv6_default_on_conversion_error_value || requested_result_is_nullable) + return convertIPv6ToIPv4(arguments[0].column, null_map); + else + return convertIPv6ToIPv4(arguments[0].column, null_map); + }; + + return true; + } + if constexpr (WhichDataType(ToDataType::type_id).isStringOrFixedString()) { if (from_type->getCustomSerialization()) From 7698776d2aae48880e45d8a791b7474b5ef20999 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Fri, 12 May 2023 18:53:51 +0200 Subject: [PATCH 095/722] Fix special build --- src/Formats/CapnProtoSchema.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Formats/CapnProtoSchema.cpp b/src/Formats/CapnProtoSchema.cpp index 22518d5061a..f9ab88d39ed 100644 --- a/src/Formats/CapnProtoSchema.cpp +++ b/src/Formats/CapnProtoSchema.cpp @@ -151,7 +151,7 @@ namespace { template - static DataTypePtr getEnumDataTypeFromEnumerants(const capnp::EnumSchema::EnumerantList & enumerants) + DataTypePtr getEnumDataTypeFromEnumerants(const capnp::EnumSchema::EnumerantList & enumerants) { std::vector> values; for (auto enumerant : enumerants) @@ -159,7 +159,7 @@ namespace return std::make_shared>(std::move(values)); } - static DataTypePtr getEnumDataTypeFromEnumSchema(const capnp::EnumSchema & enum_schema) + DataTypePtr getEnumDataTypeFromEnumSchema(const capnp::EnumSchema & enum_schema) { auto enumerants = enum_schema.getEnumerants(); if (enumerants.size() < 128) @@ -170,7 +170,7 @@ namespace throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "ClickHouse supports only 8 and 16-bit Enums"); } - static DataTypePtr getDataTypeFromCapnProtoType(const capnp::Type & capnp_type, bool skip_unsupported_fields) + DataTypePtr getDataTypeFromCapnProtoType(const capnp::Type & capnp_type, bool skip_unsupported_fields) { switch (capnp_type.which()) { From c9f90fb9adba16ad1131a16d46a6efefdd532379 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Fri, 12 May 2023 18:54:38 +0200 Subject: [PATCH 096/722] Fix special build --- src/Formats/CapnProtoSerializer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Formats/CapnProtoSerializer.cpp b/src/Formats/CapnProtoSerializer.cpp index 091e70da656..ff3880976c7 100644 --- a/src/Formats/CapnProtoSerializer.cpp +++ b/src/Formats/CapnProtoSerializer.cpp @@ -158,7 +158,7 @@ namespace }; template - static std::unique_ptr createIntegerSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + std::unique_ptr createIntegerSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) { switch (capnp_type.which()) { @@ -1015,7 +1015,7 @@ namespace { assert(builder); auto & struct_builder = assert_cast(*builder); - if (auto * tuple_column = typeid_cast(column.get())) + if (const auto * tuple_column = typeid_cast(column.get())) writeRow(tuple_column->getColumnsCopy(), struct_builder, row_num); else writeRow(Columns{column}, struct_builder, row_num); From fc857aa2dbf297d2681af16ddfbafb47739db854 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Mon, 15 May 2023 03:06:03 +0000 Subject: [PATCH 097/722] tests added --- .../queries/0_stateless/02234_cast_to_ip_address.reference | 4 ++++ tests/queries/0_stateless/02234_cast_to_ip_address.sql | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/tests/queries/0_stateless/02234_cast_to_ip_address.reference b/tests/queries/0_stateless/02234_cast_to_ip_address.reference index 9023b36a9bf..fa9c6bd0f94 100644 --- a/tests/queries/0_stateless/02234_cast_to_ip_address.reference +++ b/tests/queries/0_stateless/02234_cast_to_ip_address.reference @@ -11,6 +11,10 @@ IPv4 functions 127.0.0.1 127.0.0.1 -- +1.2.3.4 +1.2.3.4 +0.0.0.0 +-- 127.0.0.1 -- 0 diff --git a/tests/queries/0_stateless/02234_cast_to_ip_address.sql b/tests/queries/0_stateless/02234_cast_to_ip_address.sql index 6c65fe86cc9..28f1afff57f 100644 --- a/tests/queries/0_stateless/02234_cast_to_ip_address.sql +++ b/tests/queries/0_stateless/02234_cast_to_ip_address.sql @@ -20,6 +20,13 @@ SELECT toIPv4OrNull('127.0.0.1'); SELECT '--'; +SELECT toIPv4(toIPv6('::ffff:1.2.3.4')); +SELECT toIPv4(toIPv6('::afff:1.2.3.4')); --{serverError CANNOT_CONVERT_TYPE} +SELECT toIPv4OrDefault(toIPv6('::ffff:1.2.3.4')); +SELECT toIPv4OrDefault(toIPv6('::afff:1.2.3.4')); + +SELECT '--'; + SELECT cast('test' , 'IPv4'); --{serverError CANNOT_PARSE_IPV4} SELECT cast('127.0.0.1' , 'IPv4'); From e5aa3fcc8fdf8bbc45756728f11a9a55c2f17fb6 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Wed, 17 May 2023 19:09:13 +0000 Subject: [PATCH 098/722] Add queries with enabled analyzer --- .../0_stateless/01655_plan_optimizations.sh | 44 ++++++++++++++++--- 1 file changed, 39 insertions(+), 5 deletions(-) diff --git a/tests/queries/0_stateless/01655_plan_optimizations.sh b/tests/queries/0_stateless/01655_plan_optimizations.sh index ec856c9bf27..7c299f9cc26 100755 --- a/tests/queries/0_stateless/01655_plan_optimizations.sh +++ b/tests/queries/0_stateless/01655_plan_optimizations.sh @@ -26,11 +26,17 @@ $CLICKHOUSE_CLIENT -q " settings enable_optimize_predicate_expression=0" echo "> filter should be pushed down after aggregating, column after aggregation is const" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 -q " explain actions = 1 select s, y, y != 0 from (select sum(x) as s, y from ( select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 settings enable_optimize_predicate_expression=0" | grep -o "Aggregating\|Filter\|COLUMN Const(UInt8) -> notEquals(y, 0)" +echo "> (analyzer) filter should be pushed down after aggregating, column after aggregation is const" +$CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 -q " + explain actions = 1 select s, y, y != 0 from (select sum(x) as s, y from ( + select number as x, number + 1 as y from numbers(10)) group by y + ) where y != 0 + settings enable_optimize_predicate_expression=0" | grep -o "Aggregating\|Filter\|COLUMN Const(UInt8) -> notEquals(y_1, 0_UInt8)" $CLICKHOUSE_CLIENT -q " select s, y, y != 0 from (select sum(x) as s, y from ( select number as x, number + 1 as y from numbers(10)) group by y @@ -38,12 +44,19 @@ $CLICKHOUSE_CLIENT -q " settings enable_optimize_predicate_expression=0" echo "> one condition of filter should be pushed down after aggregating, other condition is aliased" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 -q " explain actions = 1 select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 and s != 4 settings enable_optimize_predicate_expression=0" | grep -o "Aggregating\|Filter column\|Filter column: notEquals(y, 0)\|ALIAS notEquals(s, 4) :: 1 -> and(notEquals(y, 0), notEquals(s, 4))" +echo "> (analyzer) one condition of filter should be pushed down after aggregating, other condition is aliased" +$CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 -q " + explain actions = 1 select s, y from ( + select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y + ) where y != 0 and s != 4 + settings enable_optimize_predicate_expression=0" | + grep -o "Aggregating\|Filter column\|Filter column: notEquals(y_1, 0_UInt8)\|ALIAS notEquals(s_0, 4_UInt8) :: 0 -> and(notEquals(y_1, 0_UInt8), notEquals(s_0, 4_UInt8))" $CLICKHOUSE_CLIENT -q " select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y @@ -51,12 +64,19 @@ $CLICKHOUSE_CLIENT -q " settings enable_optimize_predicate_expression=0" echo "> one condition of filter should be pushed down after aggregating, other condition is casted" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 -q " explain actions = 1 select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 and s - 4 settings enable_optimize_predicate_expression=0" | grep -o "Aggregating\|Filter column\|Filter column: notEquals(y, 0)\|FUNCTION and(minus(s, 4) :: 1, 1 :: 3) -> and(notEquals(y, 0), minus(s, 4)) UInt8 : 2" +echo "> (analyzer) one condition of filter should be pushed down after aggregating, other condition is casted" +$CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 -q " + explain actions = 1 select s, y from ( + select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y + ) where y != 0 and s - 4 + settings enable_optimize_predicate_expression=0" | + grep -o "Aggregating\|Filter column\|Filter column: notEquals(y_1, 0_UInt8)\|FUNCTION and(minus(s_0, 4_UInt8) :: 0, 1 :: 3) -> and(notEquals(y_1, 0_UInt8), minus(s_0, 4_UInt8)) UInt8 : 2" $CLICKHOUSE_CLIENT -q " select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y @@ -64,12 +84,19 @@ $CLICKHOUSE_CLIENT -q " settings enable_optimize_predicate_expression=0" echo "> one condition of filter should be pushed down after aggregating, other two conditions are ANDed" -$CLICKHOUSE_CLIENT --convert_query_to_cnf=0 -q " +$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 --convert_query_to_cnf=0 -q " explain actions = 1 select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 and s - 8 and s - 4 settings enable_optimize_predicate_expression=0" | grep -o "Aggregating\|Filter column\|Filter column: notEquals(y, 0)\|FUNCTION and(minus(s, 8) :: 1, minus(s, 4) :: 2) -> and(notEquals(y, 0), minus(s, 8), minus(s, 4))" +echo "> (analyzer) one condition of filter should be pushed down after aggregating, other two conditions are ANDed" +$CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 --convert_query_to_cnf=0 -q " + explain actions = 1 select s, y from ( + select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y + ) where y != 0 and s - 8 and s - 4 + settings enable_optimize_predicate_expression=0" | + grep -o "Aggregating\|Filter column\|Filter column: notEquals(y_1, 0_UInt8)\|FUNCTION and(minus(s_0, 8_UInt8) :: 0, minus(s_0, 4_UInt8) :: 2) -> and(notEquals(y_1, 0_UInt8), minus(s_0, 8_UInt8), minus(s_0, 4_UInt8))" $CLICKHOUSE_CLIENT -q " select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y @@ -77,12 +104,19 @@ $CLICKHOUSE_CLIENT -q " settings enable_optimize_predicate_expression=0" echo "> two conditions of filter should be pushed down after aggregating and ANDed, one condition is aliased" -$CLICKHOUSE_CLIENT --convert_query_to_cnf=0 -q " +$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 --convert_query_to_cnf=0 -q " explain actions = 1 select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 and s != 8 and y - 4 settings enable_optimize_predicate_expression=0" | grep -o "Aggregating\|Filter column\|Filter column: and(notEquals(y, 0), minus(y, 4))\|ALIAS notEquals(s, 8) :: 1 -> and(notEquals(y, 0), notEquals(s, 8), minus(y, 4))" +echo "> (analyzer) two conditions of filter should be pushed down after aggregating and ANDed, one condition is aliased" +$CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 --convert_query_to_cnf=0 -q " + explain actions = 1 select s, y from ( + select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y + ) where y != 0 and s != 8 and y - 4 + settings enable_optimize_predicate_expression=0" | + grep -o "Aggregating\|Filter column\|Filter column: and(notEquals(y_1, 0_UInt8), minus(y_1, 4_UInt8))\|ALIAS notEquals(s_0, 8_UInt8) :: 0 -> and(notEquals(y_1, 0_UInt8), notEquals(s_0, 8_UInt8), minus(y_1, 4_UInt8))" $CLICKHOUSE_CLIENT -q " select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y From c85c3afa1f50307d9a92d24559fe9628fe8cee37 Mon Sep 17 00:00:00 2001 From: alekseygolub Date: Sun, 7 May 2023 12:18:52 +0000 Subject: [PATCH 099/722] Added option to rename files, loaded via TableFunctionFile, after success processing --- src/Client/ClientBase.cpp | 1 + src/Common/FileRenamer.cpp | 99 ++++++++++++++++++++++++ src/Common/FileRenamer.h | 39 ++++++++++ src/Core/Settings.h | 2 + src/Storages/StorageFile.cpp | 64 +++++++++++++++ src/Storages/StorageFile.h | 10 ++- src/TableFunctions/TableFunctionFile.cpp | 1 + 7 files changed, 214 insertions(+), 2 deletions(-) create mode 100644 src/Common/FileRenamer.cpp create mode 100644 src/Common/FileRenamer.h diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 77a93a25e9b..571637c6005 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -1361,6 +1361,7 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des columns_description_for_query, ConstraintsDescription{}, String{}, + {}, }; StoragePtr storage = std::make_shared(in_file, global_context->getUserFilesPath(), args); storage->startup(); diff --git a/src/Common/FileRenamer.cpp b/src/Common/FileRenamer.cpp new file mode 100644 index 00000000000..7a19c50a0d1 --- /dev/null +++ b/src/Common/FileRenamer.cpp @@ -0,0 +1,99 @@ +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace fs = std::filesystem; + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; +} + +FileRenamer::FileRenamer() = default; + +FileRenamer::FileRenamer(const String & renaming_rule) + : rule(renaming_rule) +{ + FileRenamer::validateRenamingRule(rule, true); +} + +String FileRenamer::generateNewFilename(const String & filename) const +{ + // Split filename and extension + String file_base = fs::path(filename).stem(); + String file_ext = fs::path(filename).extension(); + + // Get current timestamp in microseconds + String timestamp; + if (rule.find("%t") != String::npos) + { + auto now = std::chrono::system_clock::now(); + std::stringstream ss; + ss << timeInMicroseconds(now); + timestamp = ss.str(); + } + + // Define placeholders and their corresponding values + std::map placeholders = + { + {"%f", file_base}, + {"%e", file_ext}, + {"%t", timestamp}, + {"%%", "%"} + }; + + // Replace placeholders with their actual values + String new_name = rule; + for (const auto & [placeholder, value] : placeholders) + boost::replace_all(new_name, placeholder, value); + + return new_name; +} + +bool FileRenamer::isEmpty() const +{ + return rule.empty(); +} + +bool FileRenamer::validateRenamingRule(const String & rule, bool throw_on_error) +{ + // Check if the rule contains invalid placeholders + re2::RE2 invalid_placeholder_pattern("^([^%]|%[fet%])*$"); + if (!re2::RE2::FullMatch(rule, invalid_placeholder_pattern)) + { + if (throw_on_error) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Invalid renaming rule: Allowed placeholders only %f, %e, %t, and %%"); + return false; + } + + // Replace valid placeholders with empty strings and count remaining percentage signs. + String replaced_rule = rule; + boost::replace_all(replaced_rule, "%f", ""); + boost::replace_all(replaced_rule, "%e", ""); + boost::replace_all(replaced_rule, "%t", ""); + if (std::count(replaced_rule.begin(), replaced_rule.end(), '%') % 2) + { + if (throw_on_error) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Invalid renaming rule: Odd number of consecutive percentage signs"); + return false; + } + + return true; +} + + +} // DB diff --git a/src/Common/FileRenamer.h b/src/Common/FileRenamer.h new file mode 100644 index 00000000000..c062978d6f6 --- /dev/null +++ b/src/Common/FileRenamer.h @@ -0,0 +1,39 @@ +#pragma once + +#include +#include + +namespace DB +{ + +/** + * The FileRenamer class provides functionality for renaming files based on given pattern with placeholders + * The supported placeholders are: + * %f - Original filename without extension ("sample") + * %e - Original file extension with dot (".csv") + * %t - Timestamp (in microseconds) + * %% - Percentage sign ("%") + * + * Example: + * Pattern - "processed_%f_%t%e" + * Original filename - "sample.csv" + * New filename - "processed_sample_1683405960646224.csv" + */ +class FileRenamer +{ +public: + FileRenamer(); + + FileRenamer(const String & renaming_rule); + + String generateNewFilename(const String & filename) const; + + bool isEmpty() const; + + static bool validateRenamingRule(const String & rule, bool throw_on_error = false); + +private: + String rule; +}; + +} // DB diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 69546011770..d0ce641efb5 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -713,6 +713,8 @@ class IColumn; M(String, workload, "default", "Name of workload to be used to access resources", 0) \ M(Milliseconds, storage_system_stack_trace_pipe_read_timeout_ms, 100, "Maximum time to read from a pipe for receiving information from the threads when querying the `system.stack_trace` table. This setting is used for testing purposes and not meant to be changed by users.", 0) \ \ + M(String, rename_files_after_processing, "", "Rename successfully processed files according to the specified pattern; Pattern can include the following placeholders: `%f` (original filename without extension), `%e` (file extension with dot), `%t` (current timestamp in µs), and `%%` (% sign)", 0) \ + \ M(Bool, parallelize_output_from_storages, true, "Parallelize output for reading step from storage. It allows parallelizing query processing right after reading from storage if possible", 0) \ M(String, insert_deduplication_token, "", "If not empty, used for duplicate detection instead of data digest", 0) \ M(String, ann_index_select_query_params, "", "Parameters passed to ANN indexes in SELECT queries, the format is 'param1=x, param2=y, ...'", 0) \ diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 425fe6bee31..2ea8da1a873 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -76,6 +77,7 @@ namespace ErrorCodes extern const int UNKNOWN_IDENTIFIER; extern const int INCORRECT_FILE_NAME; extern const int FILE_DOESNT_EXIST; + extern const int FILE_ALREADY_EXISTS; extern const int TIMEOUT_EXCEEDED; extern const int INCOMPATIBLE_COLUMNS; extern const int CANNOT_STAT; @@ -460,6 +462,8 @@ StorageFile::StorageFile(const std::string & table_path_, const std::string & us else path_for_partitioned_write = table_path_; + file_renamer = FileRenamer(args.rename_after_processing); + setStorageMetadata(args); } @@ -593,9 +597,68 @@ public: shared_lock = std::shared_lock(storage->rwlock, getLockTimeout(context)); if (!shared_lock) throw Exception(ErrorCodes::TIMEOUT_EXCEEDED, "Lock timeout exceeded"); + storage->readers_counter.fetch_add(1, std::memory_order_release); } } + + /** + * If specified option --rename_files_after_processing and files created by TableFunctionFile + * Last reader will rename files according to specified patten if desctuctor of reader was called without uncaught exceptions + */ + void beforeDestroy() + { + if (storage->file_renamer.isEmpty()) + return; + + int32_t cnt = storage->readers_counter.fetch_sub(1, std::memory_order_acq_rel); + + if (std::uncaught_exceptions() == 0 && cnt == 1 && !storage->was_renamed) + { + shared_lock.unlock(); + auto exclusive_lock = std::unique_lock{storage->rwlock, getLockTimeout(context)}; + + if (!exclusive_lock) + return; + if (storage->readers_counter.load(std::memory_order_acquire) != 0 || storage->was_renamed) + return; + + for (auto & file_path_ref : storage->paths) { + try + { + auto file_path = fs::path(file_path_ref); + String new_filename = storage->file_renamer.generateNewFilename(file_path.filename().string()); + file_path.replace_filename(new_filename); + + // Normalize new path + file_path = file_path.lexically_normal(); + + // Checking access rights + checkCreationIsAllowed(context, context->getUserFilesPath(), file_path, true); + + // Checking an existing of new file + if (fs::exists(file_path)) + throw Exception(ErrorCodes::FILE_ALREADY_EXISTS, "File {} already exists", file_path.string()); + + fs::rename(fs::path(file_path_ref), file_path); + file_path_ref = file_path.string(); + storage->was_renamed = true; + } + catch (const std::exception & e) + { + // Cannot throw exception from destructor, will write only error + LOG_ERROR(&Poco::Logger::get("~StorageFileSource"), "Failed to rename file {}: {}", file_path_ref, e.what()); + continue; + } + } + } + } + + ~StorageFileSource() override + { + beforeDestroy(); + } + String getName() const override { return storage->getName(); @@ -1217,6 +1280,7 @@ void registerStorageFile(StorageFactory & factory) factory_args.columns, factory_args.constraints, factory_args.comment, + {}, }; ASTs & engine_args_ast = factory_args.engine_args; diff --git a/src/Storages/StorageFile.h b/src/Storages/StorageFile.h index 53ce7eeaaf6..0513864fd0f 100644 --- a/src/Storages/StorageFile.h +++ b/src/Storages/StorageFile.h @@ -2,12 +2,11 @@ #include #include - +#include #include #include - namespace DB { @@ -23,6 +22,8 @@ public: const ColumnsDescription & columns; const ConstraintsDescription & constraints; const String & comment; + + const std::string rename_after_processing; }; /// From file descriptor @@ -139,6 +140,11 @@ private: std::unique_ptr read_buffer_from_fd; std::unique_ptr peekable_read_buffer_from_fd; std::atomic has_peekable_read_buffer_from_fd = false; + + // Counts the number of readers + std::atomic readers_counter = 0; + FileRenamer file_renamer; + bool was_renamed = false; }; } diff --git a/src/TableFunctions/TableFunctionFile.cpp b/src/TableFunctions/TableFunctionFile.cpp index ff64bb3dc67..0e49f26db40 100644 --- a/src/TableFunctions/TableFunctionFile.cpp +++ b/src/TableFunctions/TableFunctionFile.cpp @@ -75,6 +75,7 @@ StoragePtr TableFunctionFile::getStorage(const String & source, columns, ConstraintsDescription{}, String{}, + global_context->getSettingsRef().rename_files_after_processing, }; if (fd >= 0) return std::make_shared(fd, args); From 7b3964ff7a63ce6a4027f0dfb8f06c019239ac1c Mon Sep 17 00:00:00 2001 From: alekseygolub Date: Sun, 7 May 2023 14:59:40 +0000 Subject: [PATCH 100/722] Added test --- .../02732_rename_after_processing.reference | 17 ++++ .../02732_rename_after_processing.sh | 77 +++++++++++++++++++ 2 files changed, 94 insertions(+) create mode 100644 tests/queries/0_stateless/02732_rename_after_processing.reference create mode 100755 tests/queries/0_stateless/02732_rename_after_processing.sh diff --git a/tests/queries/0_stateless/02732_rename_after_processing.reference b/tests/queries/0_stateless/02732_rename_after_processing.reference new file mode 100644 index 00000000000..26e152f6b10 --- /dev/null +++ b/tests/queries/0_stateless/02732_rename_after_processing.reference @@ -0,0 +1,17 @@ +4 +processed_tmp1.csv +OK +10 10 +processed_tmp2.csv +OK +8 +processed_tmp3_1.csv +processed_tmp3_2.csv +OK +OK +4 +OK +OK +tmp5.csv +OK +tmp5.csv diff --git a/tests/queries/0_stateless/02732_rename_after_processing.sh b/tests/queries/0_stateless/02732_rename_after_processing.sh new file mode 100755 index 00000000000..05fbfb716ec --- /dev/null +++ b/tests/queries/0_stateless/02732_rename_after_processing.sh @@ -0,0 +1,77 @@ +#!/usr/bin/env bash +# Tags: no-parallel + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +# see 01658_read_file_to_stringcolumn.sh +CLICKHOUSE_USER_FILES_PATH=$(clickhouse-client --query "select _path, _file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') + +# Prepare data +mkdir -p ${CLICKHOUSE_USER_FILES_PATH} +echo '"id","str","int","text"' > ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv +echo '1,"abc",123,"abacaba"' >> ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv +echo '2,"def",456,"bacabaa"' >> ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv +echo '3,"story",78912,"acabaab"' >> ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv +echo '4,"history",21321321,"cabaaba"' >> ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv + +cp ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv ${CLICKHOUSE_USER_FILES_PATH}/tmp1.csv +cp ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv ${CLICKHOUSE_USER_FILES_PATH}/tmp2.csv +cp ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv ${CLICKHOUSE_USER_FILES_PATH}/tmp3_1.csv +cp ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv ${CLICKHOUSE_USER_FILES_PATH}/tmp3_2.csv +cp ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv ${CLICKHOUSE_USER_FILES_PATH}/tmp4.csv +cp ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv ${CLICKHOUSE_USER_FILES_PATH}/tmp5.csv + +### Checking that renaming works + +# simple select +${CLICKHOUSE_CLIENT} --rename-files-after-processing="processed_%f%e" -q "SELECT COUNT(*) FROM file('tmp1.csv')" +ls ${CLICKHOUSE_USER_FILES_PATH} | grep "processed_tmp1.csv" +if [ ! -e "${CLICKHOUSE_USER_FILES_PATH}/tmp1.csv" ]; then + echo "OK" +fi + +# select with multiple file() calls +${CLICKHOUSE_CLIENT} --rename-files-after-processing="processed_%f%e" --multiline -q """ +SELECT + sum(a.id) as aid, + sum(b.id) as bid +FROM file('tmp2.csv') AS a +INNER JOIN file('tmp2.csv') AS b +ON a.text = b.text +""" +ls ${CLICKHOUSE_USER_FILES_PATH} | grep "processed_tmp2.csv" +if [ ! -e "${CLICKHOUSE_USER_FILES_PATH}/tmp2.csv" ]; then + echo "OK" +fi + +# rename multiple files +${CLICKHOUSE_CLIENT} --rename-files-after-processing="processed_%f%e" -q "SELECT COUNT(*) FROM file('tmp3*.csv')" +ls ${CLICKHOUSE_USER_FILES_PATH} | grep "processed_tmp3_1.csv" +ls ${CLICKHOUSE_USER_FILES_PATH} | grep "processed_tmp3_2.csv" +if [ ! -e "${CLICKHOUSE_USER_FILES_PATH}/tmp3_1.csv" ]; then + echo "OK" +fi +if [ ! -e "${CLICKHOUSE_USER_FILES_PATH}/tmp3_2.csv" ]; then + echo "OK" +fi + +# check timestamp placeholder +${CLICKHOUSE_CLIENT} --rename-files-after-processing="processed_%f_%t.csv" -q "SELECT COUNT(*) FROM file('tmp4.csv')" +ls ${CLICKHOUSE_USER_FILES_PATH} | grep -E "^processed_tmp4_[0-9]+\.csv$" > /dev/null && echo "OK" + +### Checking errors + +# cannot overwrite an existing file +${CLICKHOUSE_CLIENT} --rename-files-after-processing="tmp.csv" -q "SELECT COUNT(*) FROM file('tmp5.csv')" \ + 2>&1| grep "already exists" > /dev/null && echo "OK" +ls ${CLICKHOUSE_USER_FILES_PATH} | grep "tmp5.csv" + +# сannot move file from user_files +${CLICKHOUSE_CLIENT} --rename-files-after-processing="../%f%e" -q "SELECT COUNT(*) FROM file('tmp5.csv')" \ + 2>&1| grep "is not inside" > /dev/null && echo "OK" +ls ${CLICKHOUSE_USER_FILES_PATH} | grep "tmp5.csv" + +# Clean +rm -rd $CLICKHOUSE_USER_FILES_PATH \ No newline at end of file From 2b68a6a22a84329f639af953e1fdaad9a6e21584 Mon Sep 17 00:00:00 2001 From: alekseygolub Date: Sun, 7 May 2023 17:43:34 +0000 Subject: [PATCH 101/722] Fix style --- src/Common/FileRenamer.cpp | 5 +-- src/Storages/StorageFile.cpp | 5 ++- .../02732_rename_after_processing.reference | 8 ++-- .../02732_rename_after_processing.sh | 43 +++++++++++++------ 4 files changed, 39 insertions(+), 22 deletions(-) diff --git a/src/Common/FileRenamer.cpp b/src/Common/FileRenamer.cpp index 7a19c50a0d1..3473d543c00 100644 --- a/src/Common/FileRenamer.cpp +++ b/src/Common/FileRenamer.cpp @@ -8,7 +8,6 @@ #include #include #include -#include #include #include @@ -42,9 +41,7 @@ String FileRenamer::generateNewFilename(const String & filename) const if (rule.find("%t") != String::npos) { auto now = std::chrono::system_clock::now(); - std::stringstream ss; - ss << timeInMicroseconds(now); - timestamp = ss.str(); + timestamp = std::to_string(timeInMicroseconds(now)); } // Define placeholders and their corresponding values diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 2ea8da1a873..06af0a00953 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -604,7 +604,7 @@ public: /** * If specified option --rename_files_after_processing and files created by TableFunctionFile - * Last reader will rename files according to specified patten if desctuctor of reader was called without uncaught exceptions + * Last reader will rename files according to specified pattern if desctuctor of reader was called without uncaught exceptions */ void beforeDestroy() { @@ -623,7 +623,8 @@ public: if (storage->readers_counter.load(std::memory_order_acquire) != 0 || storage->was_renamed) return; - for (auto & file_path_ref : storage->paths) { + for (auto & file_path_ref : storage->paths) + { try { auto file_path = fs::path(file_path_ref); diff --git a/tests/queries/0_stateless/02732_rename_after_processing.reference b/tests/queries/0_stateless/02732_rename_after_processing.reference index 26e152f6b10..2f6ccfc1c5e 100644 --- a/tests/queries/0_stateless/02732_rename_after_processing.reference +++ b/tests/queries/0_stateless/02732_rename_after_processing.reference @@ -1,14 +1,14 @@ 4 processed_tmp1.csv -OK +!tmp1.csv 10 10 processed_tmp2.csv -OK +!tmp2.csv 8 processed_tmp3_1.csv processed_tmp3_2.csv -OK -OK +!tmp3_1.csv +!tmp3_2.csv 4 OK OK diff --git a/tests/queries/0_stateless/02732_rename_after_processing.sh b/tests/queries/0_stateless/02732_rename_after_processing.sh index 05fbfb716ec..93bad2eac7d 100755 --- a/tests/queries/0_stateless/02732_rename_after_processing.sh +++ b/tests/queries/0_stateless/02732_rename_after_processing.sh @@ -27,9 +27,11 @@ cp ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv ${CLICKHOUSE_USER_FILES_PATH}/tmp5.csv # simple select ${CLICKHOUSE_CLIENT} --rename-files-after-processing="processed_%f%e" -q "SELECT COUNT(*) FROM file('tmp1.csv')" -ls ${CLICKHOUSE_USER_FILES_PATH} | grep "processed_tmp1.csv" +if [ -e "${CLICKHOUSE_USER_FILES_PATH}/processed_tmp1.csv" ]; then + echo "processed_tmp1.csv" +fi if [ ! -e "${CLICKHOUSE_USER_FILES_PATH}/tmp1.csv" ]; then - echo "OK" + echo "!tmp1.csv" fi # select with multiple file() calls @@ -41,37 +43,54 @@ FROM file('tmp2.csv') AS a INNER JOIN file('tmp2.csv') AS b ON a.text = b.text """ -ls ${CLICKHOUSE_USER_FILES_PATH} | grep "processed_tmp2.csv" +if [ -e "${CLICKHOUSE_USER_FILES_PATH}/processed_tmp2.csv" ]; then + echo "processed_tmp2.csv" +fi if [ ! -e "${CLICKHOUSE_USER_FILES_PATH}/tmp2.csv" ]; then - echo "OK" + echo "!tmp2.csv" fi # rename multiple files ${CLICKHOUSE_CLIENT} --rename-files-after-processing="processed_%f%e" -q "SELECT COUNT(*) FROM file('tmp3*.csv')" -ls ${CLICKHOUSE_USER_FILES_PATH} | grep "processed_tmp3_1.csv" -ls ${CLICKHOUSE_USER_FILES_PATH} | grep "processed_tmp3_2.csv" +if [ -e "${CLICKHOUSE_USER_FILES_PATH}/processed_tmp3_1.csv" ]; then + echo "processed_tmp3_1.csv" +fi +if [ -e "${CLICKHOUSE_USER_FILES_PATH}/processed_tmp3_2.csv" ]; then + echo "processed_tmp3_2.csv" +fi if [ ! -e "${CLICKHOUSE_USER_FILES_PATH}/tmp3_1.csv" ]; then - echo "OK" + echo "!tmp3_1.csv" fi if [ ! -e "${CLICKHOUSE_USER_FILES_PATH}/tmp3_2.csv" ]; then - echo "OK" + echo "!tmp3_2.csv" fi # check timestamp placeholder ${CLICKHOUSE_CLIENT} --rename-files-after-processing="processed_%f_%t.csv" -q "SELECT COUNT(*) FROM file('tmp4.csv')" -ls ${CLICKHOUSE_USER_FILES_PATH} | grep -E "^processed_tmp4_[0-9]+\.csv$" > /dev/null && echo "OK" +# ls ${CLICKHOUSE_USER_FILES_PATH} | grep -E "^processed_tmp4_[0-9]+\.csv$" > /dev/null && echo "OK" +rg="processed_tmp4_[0-9]+\.csv" +for x in "${CLICKHOUSE_USER_FILES_PATH}"/processed*; do + if [[ $x =~ $rg ]]; then + echo "OK" + break + fi; +done ### Checking errors # cannot overwrite an existing file ${CLICKHOUSE_CLIENT} --rename-files-after-processing="tmp.csv" -q "SELECT COUNT(*) FROM file('tmp5.csv')" \ 2>&1| grep "already exists" > /dev/null && echo "OK" -ls ${CLICKHOUSE_USER_FILES_PATH} | grep "tmp5.csv" +if [ -e "${CLICKHOUSE_USER_FILES_PATH}/tmp5.csv" ]; then + echo "tmp5.csv" +fi # сannot move file from user_files ${CLICKHOUSE_CLIENT} --rename-files-after-processing="../%f%e" -q "SELECT COUNT(*) FROM file('tmp5.csv')" \ 2>&1| grep "is not inside" > /dev/null && echo "OK" -ls ${CLICKHOUSE_USER_FILES_PATH} | grep "tmp5.csv" +if [ -e "${CLICKHOUSE_USER_FILES_PATH}/tmp5.csv" ]; then + echo "tmp5.csv" +fi # Clean -rm -rd $CLICKHOUSE_USER_FILES_PATH \ No newline at end of file +rm -rd $CLICKHOUSE_USER_FILES_PATH From eb7b48aab20172b79b1573c05c8a4baa02fe0804 Mon Sep 17 00:00:00 2001 From: alekseygolub Date: Tue, 16 May 2023 18:48:39 +0000 Subject: [PATCH 102/722] Fix test issues --- .../02732_rename_after_processing.reference | 4 + .../02732_rename_after_processing.sh | 85 +++++++++++-------- 2 files changed, 55 insertions(+), 34 deletions(-) diff --git a/tests/queries/0_stateless/02732_rename_after_processing.reference b/tests/queries/0_stateless/02732_rename_after_processing.reference index 2f6ccfc1c5e..39cdb677e09 100644 --- a/tests/queries/0_stateless/02732_rename_after_processing.reference +++ b/tests/queries/0_stateless/02732_rename_after_processing.reference @@ -15,3 +15,7 @@ OK tmp5.csv OK tmp5.csv +OK +tmp5.csv +OK +tmp5.csv diff --git a/tests/queries/0_stateless/02732_rename_after_processing.sh b/tests/queries/0_stateless/02732_rename_after_processing.sh index 93bad2eac7d..dbf2427d2dc 100755 --- a/tests/queries/0_stateless/02732_rename_after_processing.sh +++ b/tests/queries/0_stateless/02732_rename_after_processing.sh @@ -1,5 +1,4 @@ #!/usr/bin/env bash -# Tags: no-parallel CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh @@ -9,28 +8,30 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CLICKHOUSE_USER_FILES_PATH=$(clickhouse-client --query "select _path, _file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') # Prepare data -mkdir -p ${CLICKHOUSE_USER_FILES_PATH} -echo '"id","str","int","text"' > ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv -echo '1,"abc",123,"abacaba"' >> ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv -echo '2,"def",456,"bacabaa"' >> ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv -echo '3,"story",78912,"acabaab"' >> ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv -echo '4,"history",21321321,"cabaaba"' >> ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv +unique_name=${CLICKHOUSE_TEST_UNIQUE_NAME} +tmp_dir=${CLICKHOUSE_USER_FILES_PATH}/${unique_name} +mkdir -p $tmp_dir +echo '"id","str","int","text"' > ${tmp_dir}/tmp.csv +echo '1,"abc",123,"abacaba"' >> ${tmp_dir}/tmp.csv +echo '2,"def",456,"bacabaa"' >> ${tmp_dir}/tmp.csv +echo '3,"story",78912,"acabaab"' >> ${tmp_dir}/tmp.csv +echo '4,"history",21321321,"cabaaba"' >> ${tmp_dir}/tmp.csv -cp ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv ${CLICKHOUSE_USER_FILES_PATH}/tmp1.csv -cp ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv ${CLICKHOUSE_USER_FILES_PATH}/tmp2.csv -cp ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv ${CLICKHOUSE_USER_FILES_PATH}/tmp3_1.csv -cp ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv ${CLICKHOUSE_USER_FILES_PATH}/tmp3_2.csv -cp ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv ${CLICKHOUSE_USER_FILES_PATH}/tmp4.csv -cp ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv ${CLICKHOUSE_USER_FILES_PATH}/tmp5.csv +cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp1.csv +cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp2.csv +cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp3_1.csv +cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp3_2.csv +cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp4.csv +cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp5.csv ### Checking that renaming works # simple select -${CLICKHOUSE_CLIENT} --rename-files-after-processing="processed_%f%e" -q "SELECT COUNT(*) FROM file('tmp1.csv')" -if [ -e "${CLICKHOUSE_USER_FILES_PATH}/processed_tmp1.csv" ]; then +${CLICKHOUSE_CLIENT} --rename-files-after-processing="processed_%f%e" -q "SELECT COUNT(*) FROM file('${unique_name}/tmp1.csv')" +if [ -e "${tmp_dir}/processed_tmp1.csv" ]; then echo "processed_tmp1.csv" fi -if [ ! -e "${CLICKHOUSE_USER_FILES_PATH}/tmp1.csv" ]; then +if [ ! -e "${tmp_dir}/tmp1.csv" ]; then echo "!tmp1.csv" fi @@ -39,37 +40,37 @@ ${CLICKHOUSE_CLIENT} --rename-files-after-processing="processed_%f%e" --multilin SELECT sum(a.id) as aid, sum(b.id) as bid -FROM file('tmp2.csv') AS a -INNER JOIN file('tmp2.csv') AS b +FROM file('${unique_name}/tmp2.csv') AS a +INNER JOIN file('${unique_name}/tmp2.csv') AS b ON a.text = b.text """ -if [ -e "${CLICKHOUSE_USER_FILES_PATH}/processed_tmp2.csv" ]; then +if [ -e "${tmp_dir}/processed_tmp2.csv" ]; then echo "processed_tmp2.csv" fi -if [ ! -e "${CLICKHOUSE_USER_FILES_PATH}/tmp2.csv" ]; then +if [ ! -e "${tmp_dir}/tmp2.csv" ]; then echo "!tmp2.csv" fi # rename multiple files -${CLICKHOUSE_CLIENT} --rename-files-after-processing="processed_%f%e" -q "SELECT COUNT(*) FROM file('tmp3*.csv')" -if [ -e "${CLICKHOUSE_USER_FILES_PATH}/processed_tmp3_1.csv" ]; then +${CLICKHOUSE_CLIENT} --rename-files-after-processing="processed_%f%e" -q "SELECT COUNT(*) FROM file('${unique_name}/tmp3*.csv')" +if [ -e "${tmp_dir}/processed_tmp3_1.csv" ]; then echo "processed_tmp3_1.csv" fi -if [ -e "${CLICKHOUSE_USER_FILES_PATH}/processed_tmp3_2.csv" ]; then +if [ -e "${tmp_dir}/processed_tmp3_2.csv" ]; then echo "processed_tmp3_2.csv" fi -if [ ! -e "${CLICKHOUSE_USER_FILES_PATH}/tmp3_1.csv" ]; then +if [ ! -e "${tmp_dir}/tmp3_1.csv" ]; then echo "!tmp3_1.csv" fi -if [ ! -e "${CLICKHOUSE_USER_FILES_PATH}/tmp3_2.csv" ]; then +if [ ! -e "${tmp_dir}/tmp3_2.csv" ]; then echo "!tmp3_2.csv" fi # check timestamp placeholder -${CLICKHOUSE_CLIENT} --rename-files-after-processing="processed_%f_%t.csv" -q "SELECT COUNT(*) FROM file('tmp4.csv')" -# ls ${CLICKHOUSE_USER_FILES_PATH} | grep -E "^processed_tmp4_[0-9]+\.csv$" > /dev/null && echo "OK" +${CLICKHOUSE_CLIENT} --rename-files-after-processing="processed_%f_%t.csv" -q "SELECT COUNT(*) FROM file('${unique_name}/tmp4.csv')" +# ls ${tmp_dir} | grep -E "^processed_tmp4_[0-9]+\.csv$" > /dev/null && echo "OK" rg="processed_tmp4_[0-9]+\.csv" -for x in "${CLICKHOUSE_USER_FILES_PATH}"/processed*; do +for x in "${tmp_dir}"/processed*; do if [[ $x =~ $rg ]]; then echo "OK" break @@ -79,18 +80,34 @@ done ### Checking errors # cannot overwrite an existing file -${CLICKHOUSE_CLIENT} --rename-files-after-processing="tmp.csv" -q "SELECT COUNT(*) FROM file('tmp5.csv')" \ +${CLICKHOUSE_CLIENT} --rename-files-after-processing="tmp.csv" -q "SELECT COUNT(*) FROM file('${unique_name}/tmp5.csv')" \ 2>&1| grep "already exists" > /dev/null && echo "OK" -if [ -e "${CLICKHOUSE_USER_FILES_PATH}/tmp5.csv" ]; then +if [ -e "${tmp_dir}/tmp5.csv" ]; then echo "tmp5.csv" fi -# сannot move file from user_files -${CLICKHOUSE_CLIENT} --rename-files-after-processing="../%f%e" -q "SELECT COUNT(*) FROM file('tmp5.csv')" \ +# сannot move file outside user_files +${CLICKHOUSE_CLIENT} --rename-files-after-processing="../../%f%e" -q "SELECT COUNT(*) FROM file('${unique_name}/tmp5.csv')" \ 2>&1| grep "is not inside" > /dev/null && echo "OK" -if [ -e "${CLICKHOUSE_USER_FILES_PATH}/tmp5.csv" ]; then +if [ -e "${tmp_dir}/tmp5.csv" ]; then + echo "tmp5.csv" +fi + +# check invalid placeholders + +# unknown type of placeholder (%k) +${CLICKHOUSE_CLIENT} --rename-files-after-processing="processed_%f_%k" -q "SELECT COUNT(*) FROM file('${unique_name}/tmp5.csv')" \ + 2>&1| grep "Allowed placeholders only" > /dev/null && echo "OK" +if [ -e "${tmp_dir}/tmp5.csv" ]; then + echo "tmp5.csv" +fi + +# dd number of consecutive percentage signs after replace valid placeholders +${CLICKHOUSE_CLIENT} --rename-files-after-processing="processed_%f_%%%%e" -q "SELECT COUNT(*) FROM file('${unique_name}/tmp5.csv')" \ + 2>&1| grep "Odd number of consecutive percentage signs" > /dev/null && echo "OK" +if [ -e "${tmp_dir}/tmp5.csv" ]; then echo "tmp5.csv" fi # Clean -rm -rd $CLICKHOUSE_USER_FILES_PATH +rm -rd $tmp_dir From 5e33dd5d5c9eace011d4c729e3c760c017955de9 Mon Sep 17 00:00:00 2001 From: alekseygolub Date: Wed, 17 May 2023 19:14:46 +0000 Subject: [PATCH 103/722] Added chmod for tmp dir and files in test --- tests/queries/0_stateless/02732_rename_after_processing.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/queries/0_stateless/02732_rename_after_processing.sh b/tests/queries/0_stateless/02732_rename_after_processing.sh index dbf2427d2dc..4c87070cd35 100755 --- a/tests/queries/0_stateless/02732_rename_after_processing.sh +++ b/tests/queries/0_stateless/02732_rename_after_processing.sh @@ -11,12 +11,18 @@ CLICKHOUSE_USER_FILES_PATH=$(clickhouse-client --query "select _path, _file from unique_name=${CLICKHOUSE_TEST_UNIQUE_NAME} tmp_dir=${CLICKHOUSE_USER_FILES_PATH}/${unique_name} mkdir -p $tmp_dir +rm -rf ${tmp_dir}/* + +chmod 777 ${tmp_dir} + echo '"id","str","int","text"' > ${tmp_dir}/tmp.csv echo '1,"abc",123,"abacaba"' >> ${tmp_dir}/tmp.csv echo '2,"def",456,"bacabaa"' >> ${tmp_dir}/tmp.csv echo '3,"story",78912,"acabaab"' >> ${tmp_dir}/tmp.csv echo '4,"history",21321321,"cabaaba"' >> ${tmp_dir}/tmp.csv +chmod 777 ${tmp_dir}/tmp.csv + cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp1.csv cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp2.csv cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp3_1.csv From 4550a705f97497607a627f5cca8f265b650abdc1 Mon Sep 17 00:00:00 2001 From: alekseygolub Date: Wed, 17 May 2023 19:49:17 +0000 Subject: [PATCH 104/722] Fix style --- tests/queries/0_stateless/02732_rename_after_processing.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02732_rename_after_processing.sh b/tests/queries/0_stateless/02732_rename_after_processing.sh index 4c87070cd35..c4f80d3462b 100755 --- a/tests/queries/0_stateless/02732_rename_after_processing.sh +++ b/tests/queries/0_stateless/02732_rename_after_processing.sh @@ -11,7 +11,7 @@ CLICKHOUSE_USER_FILES_PATH=$(clickhouse-client --query "select _path, _file from unique_name=${CLICKHOUSE_TEST_UNIQUE_NAME} tmp_dir=${CLICKHOUSE_USER_FILES_PATH}/${unique_name} mkdir -p $tmp_dir -rm -rf ${tmp_dir}/* +rm -rf ${tmp_dir:?}/* chmod 777 ${tmp_dir} From 900d50633d9c541eff5ac05bdef19cd84e3d6c16 Mon Sep 17 00:00:00 2001 From: alekseygolub Date: Wed, 17 May 2023 20:15:26 +0000 Subject: [PATCH 105/722] retrigger checks From 5368355c659ef2f502ea9787593700c1ae03067d Mon Sep 17 00:00:00 2001 From: alekseygolub Date: Fri, 19 May 2023 16:05:51 +0000 Subject: [PATCH 106/722] Marked test as broken due to fail of the new analyzer --- tests/broken_tests.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/broken_tests.txt b/tests/broken_tests.txt index e61c1316e17..cef8f68b210 100644 --- a/tests/broken_tests.txt +++ b/tests/broken_tests.txt @@ -137,3 +137,4 @@ 01600_parts_types_metrics_long 01287_max_execution_speed 02703_row_policy_for_database +02732_rename_after_processing From f459ac5517bcac49c7e6d583fca827504d2b0aa1 Mon Sep 17 00:00:00 2001 From: zvonand Date: Sat, 20 May 2023 01:38:35 +0200 Subject: [PATCH 107/722] resolve ambiguity by adding a section to docs --- docs/en/operations/settings/settings.md | 20 ++++++++++++++++++++ docs/ru/operations/settings/settings.md | 20 ++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index cf9209e182f..9cca1ee5ec3 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -4077,6 +4077,26 @@ SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zuric 1999-12-13 07:23:23.123 ``` +:::warning +The way this setting affects parsing of Date or DateTime types may seem non-obvious, see example and explanation below: +::: + +```sql +CREATE TABLE test_tz (`d` DateTime('UTC')) ENGINE = Memory AS SELECT toDateTime('2000-01-01 00:00:00', 'UTC'); + +SELECT *, timezone() FROM test_tz WHERE d = toDateTime('2000-01-01 00:00:00') SETTINGS session_timezone = 'Asia/Novosibirsk' +0 rows in set. + +SELECT *, timezone() FROM test_tz WHERE d = '2000-01-01 00:00:00' SETTINGS session_timezone = 'Asia/Novosibirsk' +┌───────────────────d─┬─timezone()───────┐ +│ 2000-01-01 00:00:00 │ Asia/Novosibirsk │ +└─────────────────────┴──────────────────┘ +``` + +This happens due to different parsing pipelines: + - `toDateTime('2000-01-01 00:00:00')` creates a new DateTime in a usual way, and thus `session_timezone` setting from query context is applied. + - `2000-01-01 00:00:00` is parsed to a DateTime inheriting type of `d` column, including DateTime's time zone, and `session_timezone` has no impact on this value. + Possible values: - Any timezone name from `system.time_zones`, e.g. `Europe/Berlin`, `UTC` or `Zulu` diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 98486847fd9..607082054cc 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -4105,6 +4105,26 @@ SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zuric Значение по умолчанию: `''`. +:::warning +То, как этот параметр влияет на парсинг значений типа Date или DateTime, может показаться неочевидным. Пример и пояснение см. ниже: +::: + +```sql +CREATE TABLE test_tz (`d` DateTime('UTC')) ENGINE = Memory AS SELECT toDateTime('2000-01-01 00:00:00', 'UTC'); + +SELECT *, timezone() FROM test_tz WHERE d = toDateTime('2000-01-01 00:00:00') SETTINGS session_timezone = 'Asia/Novosibirsk' +0 rows in set. + +SELECT *, timezone() FROM test_tz WHERE d = '2000-01-01 00:00:00' SETTINGS session_timezone = 'Asia/Novosibirsk' +┌───────────────────d─┬─timezone()───────┐ +│ 2000-01-01 00:00:00 │ Asia/Novosibirsk │ +└─────────────────────┴──────────────────┘ +``` + +Это происходит из-за различного происхождения значения, используемого для сравнения: +- `toDateTime('2000-01-01 00:00:00')` создаёт значение типа `DateTime` как и в любом другом случае, в том числе применяет параметр `session_timezone` из контекста запроса, +- `2000-01-01 00:00:00` парсится в `DateTime` того же типа, что и колонка `d` (в том числе с той же `timezone`), и параметр `session_timezone` в данном случае не учитывается. + **Смотрите также** - [timezone](../server-configuration-parameters/settings.md#server_configuration_parameters-timezone) \ No newline at end of file From af5793b26e0ee8fc02201f8d0439b5a15a019e7f Mon Sep 17 00:00:00 2001 From: zvonand Date: Sat, 20 May 2023 16:38:45 +0200 Subject: [PATCH 108/722] fix incode docs --- src/Functions/serverConstants.cpp | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/src/Functions/serverConstants.cpp b/src/Functions/serverConstants.cpp index 1460fc16265..9cd43be50c4 100644 --- a/src/Functions/serverConstants.cpp +++ b/src/Functions/serverConstants.cpp @@ -160,31 +160,33 @@ REGISTER_FUNCTION(TcpPort) REGISTER_FUNCTION(Timezone) { - factory.registerFunction({ - R"( + factory.registerFunction( + FunctionDocumentation{ + .description=R"( Returns the default timezone for current session. Used as default timezone for parsing DateTime|DateTime64 without explicitly specified timezone. Can be changed with SET timezone = 'New/Tz' [example:timezone] -)", - Documentation::Examples{{"timezone", "SELECT timezone();"}}, - Documentation::Categories{"Constant", "Miscellaneous"} - }); - factory.registerAlias("timeZone", "timezone"); + )", + .examples{{"timezone", "SELECT timezone();", ""}}, + .categories{"Constant", "Miscellaneous"} +}); +factory.registerAlias("timeZone", "timezone"); } REGISTER_FUNCTION(ServerTimezone) { - factory.registerFunction({ - R"( + factory.registerFunction( + FunctionDocumentation{ + .description=R"( Returns the timezone name in which server operates. [example:serverTimezone] -)", - Documentation::Examples{{"serverTimezone", "SELECT serverTimezone();"}}, - Documentation::Categories{"Constant", "Miscellaneous"} - }); + )", + .examples{{"serverTimezone", "SELECT serverTimezone();", ""}}, + .categories{"Constant", "Miscellaneous"} +}); factory.registerAlias("serverTimeZone", "serverTimezone"); factory.registerAlias("servertimezone", "serverTimezone"); } From 24320f8f93f56aa9a7088c4daf80a066facdc5b6 Mon Sep 17 00:00:00 2001 From: alekseygolub Date: Sun, 21 May 2023 15:58:29 +0000 Subject: [PATCH 109/722] fixed bad pattern in tests --- .../0_stateless/02722_database_filesystem.sh | 29 ++++++++++--------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/tests/queries/0_stateless/02722_database_filesystem.sh b/tests/queries/0_stateless/02722_database_filesystem.sh index 80f97af693e..7466141d3e3 100755 --- a/tests/queries/0_stateless/02722_database_filesystem.sh +++ b/tests/queries/0_stateless/02722_database_filesystem.sh @@ -1,5 +1,4 @@ #!/usr/bin/env bash -# Tags: no-parallel CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh @@ -9,19 +8,21 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CLICKHOUSE_USER_FILES_PATH=$(clickhouse-client --query "select _path, _file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') # Prepare data -mkdir -p ${CLICKHOUSE_USER_FILES_PATH}/tmp/ -echo '"id","str","int","text"' > ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv -echo '1,"abc",123,"abacaba"' >> ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv -echo '2,"def",456,"bacabaa"' >> ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv -echo '3,"story",78912,"acabaab"' >> ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv -echo '4,"history",21321321,"cabaaba"' >> ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv +unique_name=${CLICKHOUSE_TEST_UNIQUE_NAME} +user_files_tmp_dir=${CLICKHOUSE_USER_FILES_PATH}/${unique_name} +mkdir -p ${user_files_tmp_dir}/tmp/ +echo '"id","str","int","text"' > ${user_files_tmp_dir}/tmp.csv +echo '1,"abc",123,"abacaba"' >> ${user_files_tmp_dir}/tmp.csv +echo '2,"def",456,"bacabaa"' >> ${user_files_tmp_dir}/tmp.csv +echo '3,"story",78912,"acabaab"' >> ${user_files_tmp_dir}/tmp.csv +echo '4,"history",21321321,"cabaaba"' >> ${user_files_tmp_dir}/tmp.csv tmp_dir=${CLICKHOUSE_TEST_UNIQUE_NAME} [[ -d $tmp_dir ]] && rm -rd $tmp_dir mkdir $tmp_dir -cp ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv ${tmp_dir}/tmp.csv -cp ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv ${CLICKHOUSE_USER_FILES_PATH}/tmp/tmp.csv -cp ${CLICKHOUSE_USER_FILES_PATH}/tmp.csv ${CLICKHOUSE_USER_FILES_PATH}/tmp.myext +cp ${user_files_tmp_dir}/tmp.csv ${tmp_dir}/tmp.csv +cp ${user_files_tmp_dir}/tmp.csv ${user_files_tmp_dir}/tmp/tmp.csv +cp ${user_files_tmp_dir}/tmp.csv ${user_files_tmp_dir}/tmp.myext ################# echo "Test 1: create filesystem database and check implicit calls" @@ -31,8 +32,8 @@ CREATE DATABASE test1 ENGINE = Filesystem; """ echo $? ${CLICKHOUSE_CLIENT} --query "SHOW DATABASES" | grep "test1" -${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`tmp.csv\`;" -${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`tmp/tmp.csv\`;" +${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`${unique_name}/tmp.csv\`;" +${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`${unique_name}/tmp/tmp.csv\`;" ${CLICKHOUSE_LOCAL} -q "SELECT COUNT(*) FROM \"${tmp_dir}/tmp.csv\"" ################# @@ -62,9 +63,9 @@ CREATE DATABASE test2 ENGINE = Filesystem('relative_unknown_dir'); ${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`tmp2.csv\`;" 2>&1| grep -F "Code: 107" > /dev/null && echo "OK" # BAD_ARGUMENTS: Cannot determine the file format by it's extension -${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`tmp.myext\`;" 2>&1| grep -F "Code: 36" > /dev/null && echo "OK" +${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`${unique_name}/tmp.myext\`;" 2>&1| grep -F "Code: 36" > /dev/null && echo "OK" # Clean ${CLICKHOUSE_CLIENT} --query "DROP DATABASE test1;" rm -rd $tmp_dir -rm -rd $CLICKHOUSE_USER_FILES_PATH +rm -rd $user_files_tmp_dir From 8c816a5c4a97044b1d3d902145ef4eefecd7beb8 Mon Sep 17 00:00:00 2001 From: zvonand Date: Mon, 22 May 2023 01:00:40 +0200 Subject: [PATCH 110/722] update --- src/Common/DateLUT.h | 37 ++++++++++++++++--------------- src/Core/Settings.h | 2 +- src/Functions/serverConstants.cpp | 4 +--- src/IO/ReadHelpers.h | 4 ++-- 4 files changed, 23 insertions(+), 24 deletions(-) diff --git a/src/Common/DateLUT.h b/src/Common/DateLUT.h index 59b280240ea..23698331afe 100644 --- a/src/Common/DateLUT.h +++ b/src/Common/DateLUT.h @@ -17,30 +17,24 @@ class DateLUT : private boost::noncopyable { public: - /// Return singleton DateLUTImpl instance for server's (native) time zone. - static ALWAYS_INLINE const DateLUTImpl & serverTimezoneInstance() - { - const auto & date_lut = getInstance(); - return *date_lut.default_impl.load(std::memory_order_acquire); - } - - /// Return singleton DateLUTImpl instance for timezone set by `timezone` setting for current session is used. - /// If it is not set, server's timezone (the one which server has) is being used. + /// Return singleton DateLUTImpl instance for session timezone. + /// The session timezone is configured by a session setting. + /// If not set (empty string), it is the server timezone. static ALWAYS_INLINE const DateLUTImpl & instance() { const auto & date_lut = getInstance(); if (DB::CurrentThread::isInitialized()) { - std::string effective_time_zone; - const auto query_context = DB::CurrentThread::get().getQueryContext(); + std::string context_timezone; + const DB::ContextPtr query_context = DB::CurrentThread::get().getQueryContext(); if (query_context) { - effective_time_zone = extractTimezoneFromContext(query_context); + context_timezone = extractTimezoneFromContext(query_context); - if (!effective_time_zone.empty()) - return date_lut.getImplementation(effective_time_zone); + if (!context_timezone.empty()) + return date_lut.getImplementation(context_timezone); } /// Timezone is passed in query_context, but on CH-Client we have no query context, @@ -48,10 +42,10 @@ public: const auto global_context = DB::CurrentThread::get().getGlobalContext(); if (global_context) { - effective_time_zone = extractTimezoneFromContext(global_context); + context_timezone = extractTimezoneFromContext(global_context); - if (!effective_time_zone.empty()) - return date_lut.getImplementation(effective_time_zone); + if (!context_timezone.empty()) + return date_lut.getImplementation(context_timezone); } } @@ -67,6 +61,13 @@ public: return date_lut.getImplementation(time_zone); } + // Return singleton DateLUTImpl for the server time zone. + static ALWAYS_INLINE const DateLUTImpl & serverTimezoneInstance() + { + const auto & date_lut = getInstance(); + return *date_lut.default_impl.load(std::memory_order_acquire); + } + static void setDefaultTimezone(const std::string & time_zone) { auto & date_lut = getInstance(); @@ -80,7 +81,7 @@ protected: private: static DateLUT & getInstance(); - static std::string extractTimezoneFromContext(const DB::ContextPtr query_context); + static std::string extractTimezoneFromContext(DB::ContextPtr query_context); const DateLUTImpl & getImplementation(const std::string & time_zone) const; diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 90063f8efd4..2d766e8e18f 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -747,7 +747,7 @@ class IColumn; M(Bool, allow_experimental_undrop_table_query, false, "Allow to use undrop query to restore dropped table in a limited time", 0) \ M(Bool, keeper_map_strict_mode, false, "Enforce additional checks during operations on KeeperMap. E.g. throw an exception on an insert for already existing key", 0) \ M(UInt64, extract_kvp_max_pairs_per_row, 1000, "Max number pairs that can be produced by extractKeyValuePairs function. Used to safeguard against consuming too much memory.", 0) \ - M(Timezone, session_timezone, "", "Use specified timezone for interpreting Date and DateTime instead of server's timezone in current session.", 0) \ + M(Timezone, session_timezone, "", "The default timezone for the current session. The server default timezone, if empty.", 0) \ // End of COMMON_SETTINGS // Please add settings related to formats into the FORMAT_FACTORY_SETTINGS and move obsolete settings to OBSOLETE_SETTINGS. diff --git a/src/Functions/serverConstants.cpp b/src/Functions/serverConstants.cpp index 9cd43be50c4..d3e1e6e10fe 100644 --- a/src/Functions/serverConstants.cpp +++ b/src/Functions/serverConstants.cpp @@ -60,7 +60,7 @@ namespace }; - /// Returns default timezone for current session. + /// Returns timezone for current session. class FunctionTimezone : public FunctionConstantBase { public: @@ -187,8 +187,6 @@ Returns the timezone name in which server operates. .examples{{"serverTimezone", "SELECT serverTimezone();", ""}}, .categories{"Constant", "Miscellaneous"} }); - factory.registerAlias("serverTimeZone", "serverTimezone"); - factory.registerAlias("servertimezone", "serverTimezone"); } REGISTER_FUNCTION(Uptime) diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index 3bd9275322e..cbe18e11c9a 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -706,7 +706,7 @@ inline void convertToDayNum(DayNum & date, ExtendedDayNum & from) } template -inline ReturnType readDateTextImpl(DayNum & date, ReadBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance()) +inline ReturnType readDateTextImpl(DayNum & date, ReadBuffer & buf, const DateLUTImpl & date_lut) { static constexpr bool throw_exception = std::is_same_v; @@ -723,7 +723,7 @@ inline ReturnType readDateTextImpl(DayNum & date, ReadBuffer & buf, const DateLU } template -inline ReturnType readDateTextImpl(ExtendedDayNum & date, ReadBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance()) +inline ReturnType readDateTextImpl(ExtendedDayNum & date, ReadBuffer & buf, const DateLUTImpl & date_lut) { static constexpr bool throw_exception = std::is_same_v; From 183f90e45a7601e5ad4af63b24dabfc506a637ae Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky Date: Mon, 22 May 2023 02:02:09 +0000 Subject: [PATCH 111/722] Update MongoDB protocol --- .../Foundation/include/Poco/BinaryReader.h | 3 + .../Foundation/include/Poco/BinaryWriter.h | 5 + base/poco/Foundation/src/BinaryReader.cpp | 25 ++ base/poco/Foundation/src/BinaryWriter.cpp | 11 +- base/poco/MongoDB/CMakeLists.txt | 1 + .../poco/MongoDB/include/Poco/MongoDB/Array.h | 36 +- .../MongoDB/include/Poco/MongoDB/Binary.h | 2 +- .../MongoDB/include/Poco/MongoDB/Connection.h | 22 +- .../MongoDB/include/Poco/MongoDB/Cursor.h | 3 + .../MongoDB/include/Poco/MongoDB/Database.h | 83 +++- .../MongoDB/include/Poco/MongoDB/Document.h | 27 +- .../MongoDB/include/Poco/MongoDB/Element.h | 6 +- .../include/Poco/MongoDB/JavaScriptCode.h | 2 +- .../include/Poco/MongoDB/MessageHeader.h | 11 +- .../MongoDB/include/Poco/MongoDB/MongoDB.h | 12 + .../MongoDB/include/Poco/MongoDB/ObjectId.h | 2 +- .../include/Poco/MongoDB/OpMsgCursor.h | 96 ++++ .../include/Poco/MongoDB/OpMsgMessage.h | 163 +++++++ .../Poco/MongoDB/PoolableConnectionFactory.h | 16 + .../include/Poco/MongoDB/RegularExpression.h | 2 +- .../include/Poco/MongoDB/ResponseMessage.h | 3 + base/poco/MongoDB/src/Array.cpp | 4 +- base/poco/MongoDB/src/Connection.cpp | 26 ++ base/poco/MongoDB/src/Cursor.cpp | 6 + base/poco/MongoDB/src/Database.cpp | 48 +- base/poco/MongoDB/src/DeleteRequest.cpp | 4 +- base/poco/MongoDB/src/Document.cpp | 14 +- base/poco/MongoDB/src/Element.cpp | 2 +- base/poco/MongoDB/src/GetMoreRequest.cpp | 2 +- base/poco/MongoDB/src/InsertRequest.cpp | 2 +- base/poco/MongoDB/src/KillCursorsRequest.cpp | 2 +- base/poco/MongoDB/src/Message.cpp | 2 +- base/poco/MongoDB/src/MessageHeader.cpp | 12 +- base/poco/MongoDB/src/ObjectId.cpp | 2 +- base/poco/MongoDB/src/OpMsgCursor.cpp | 187 ++++++++ base/poco/MongoDB/src/OpMsgMessage.cpp | 412 ++++++++++++++++++ base/poco/MongoDB/src/QueryRequest.cpp | 6 +- base/poco/MongoDB/src/RegularExpression.cpp | 4 +- base/poco/MongoDB/src/ReplicaSet.cpp | 6 +- base/poco/MongoDB/src/RequestMessage.cpp | 4 +- base/poco/MongoDB/src/ResponseMessage.cpp | 20 +- base/poco/MongoDB/src/UpdateRequest.cpp | 2 +- .../runner/compose/docker_compose_mongo.yml | 2 +- .../compose/docker_compose_mongo_secure.yml | 2 +- src/Dictionaries/MongoDBDictionarySource.cpp | 15 +- src/Dictionaries/MongoDBDictionarySource.h | 1 - src/Processors/Sources/MongoDBSource.cpp | 76 +++- src/Processors/Sources/MongoDBSource.h | 32 +- src/Storages/StorageMongoDB.cpp | 34 +- .../integration/test_storage_mongodb/test.py | 42 +- 50 files changed, 1399 insertions(+), 103 deletions(-) create mode 100644 base/poco/MongoDB/include/Poco/MongoDB/OpMsgCursor.h create mode 100644 base/poco/MongoDB/include/Poco/MongoDB/OpMsgMessage.h create mode 100644 base/poco/MongoDB/src/OpMsgCursor.cpp create mode 100644 base/poco/MongoDB/src/OpMsgMessage.cpp diff --git a/base/poco/Foundation/include/Poco/BinaryReader.h b/base/poco/Foundation/include/Poco/BinaryReader.h index 4042b507a2f..2b9bca29944 100644 --- a/base/poco/Foundation/include/Poco/BinaryReader.h +++ b/base/poco/Foundation/include/Poco/BinaryReader.h @@ -117,6 +117,9 @@ public: void readRaw(char * buffer, std::streamsize length); /// Reads length bytes of raw data into buffer. + void readCString(std::string& value); + /// Reads zero-terminated C-string into value. + void readBOM(); /// Reads a byte-order mark from the stream and configures /// the reader for the encountered byte order. diff --git a/base/poco/Foundation/include/Poco/BinaryWriter.h b/base/poco/Foundation/include/Poco/BinaryWriter.h index aa280d4ccab..a35d76d84bc 100644 --- a/base/poco/Foundation/include/Poco/BinaryWriter.h +++ b/base/poco/Foundation/include/Poco/BinaryWriter.h @@ -56,6 +56,8 @@ public: LITTLE_ENDIAN_BYTE_ORDER = 3 /// little-endian byte-order }; + static const std::streamsize DEFAULT_MAX_CSTR_LENGTH { 1024 }; + BinaryWriter(std::ostream & ostr, StreamByteOrder byteOrder = NATIVE_BYTE_ORDER); /// Creates the BinaryWriter. @@ -131,6 +133,9 @@ public: void writeRaw(const char * buffer, std::streamsize length); /// Writes length raw bytes from the given buffer to the stream. + void writeCString(const char* cString, std::streamsize maxLength = DEFAULT_MAX_CSTR_LENGTH); + /// Writes zero-terminated C-string. + void writeBOM(); /// Writes a byte-order mark to the stream. A byte order mark is /// a 16-bit integer with a value of 0xFEFF, written in host byte-order. diff --git a/base/poco/Foundation/src/BinaryReader.cpp b/base/poco/Foundation/src/BinaryReader.cpp index f2961e03966..37ec2bc9040 100644 --- a/base/poco/Foundation/src/BinaryReader.cpp +++ b/base/poco/Foundation/src/BinaryReader.cpp @@ -274,6 +274,31 @@ void BinaryReader::readRaw(char* buffer, std::streamsize length) } +void BinaryReader::readCString(std::string& value) +{ + value.clear(); + if (!_istr.good()) + { + return; + } + value.reserve(256); + while (true) + { + char c; + _istr.get(c); + if (!_istr.good()) + { + break; + } + if (c == '\0') + { + break; + } + value += c; + } +} + + void BinaryReader::readBOM() { UInt16 bom; diff --git a/base/poco/Foundation/src/BinaryWriter.cpp b/base/poco/Foundation/src/BinaryWriter.cpp index 6db5ab7cb90..c3fcabc4374 100644 --- a/base/poco/Foundation/src/BinaryWriter.cpp +++ b/base/poco/Foundation/src/BinaryWriter.cpp @@ -271,7 +271,7 @@ BinaryWriter& BinaryWriter::operator << (const std::string& value) BinaryWriter& BinaryWriter::operator << (const char* value) { poco_check_ptr (value); - + if (_pTextConverter) { std::string converted; @@ -332,6 +332,15 @@ void BinaryWriter::writeRaw(const char* buffer, std::streamsize length) } +void BinaryWriter::writeCString(const char* cString, std::streamsize maxLength) +{ + const std::size_t len = ::strnlen(cString, maxLength); + writeRaw(cString, len); + static const char zero = '\0'; + _ostr.write(&zero, sizeof(zero)); +} + + void BinaryWriter::writeBOM() { UInt16 value = 0xFEFF; diff --git a/base/poco/MongoDB/CMakeLists.txt b/base/poco/MongoDB/CMakeLists.txt index 8f5c6be2cae..bb6f90ed8f5 100644 --- a/base/poco/MongoDB/CMakeLists.txt +++ b/base/poco/MongoDB/CMakeLists.txt @@ -13,3 +13,4 @@ target_compile_options (_poco_mongodb target_include_directories (_poco_mongodb SYSTEM PUBLIC "include") target_link_libraries (_poco_mongodb PUBLIC Poco::Net) + diff --git a/base/poco/MongoDB/include/Poco/MongoDB/Array.h b/base/poco/MongoDB/include/Poco/MongoDB/Array.h index 4ed9cdd87ee..8a30c785b2d 100644 --- a/base/poco/MongoDB/include/Poco/MongoDB/Array.h +++ b/base/poco/MongoDB/include/Poco/MongoDB/Array.h @@ -33,7 +33,7 @@ namespace MongoDB /// This class represents a BSON Array. { public: - typedef SharedPtr Ptr; + using Ptr = SharedPtr; Array(); /// Creates an empty Array. @@ -41,8 +41,31 @@ namespace MongoDB virtual ~Array(); /// Destroys the Array. + // Document template functions available for backward compatibility + using Document::add; + using Document::get; + template - T get(int pos) const + Document & add(T value) + /// Creates an element with the name from the current pos and value and + /// adds it to the array document. + /// + /// The active document is returned to allow chaining of the add methods. + { + return Document::add(Poco::NumberFormatter::format(size()), value); + } + + Document & add(const char * value) + /// Creates an element with a name from the current pos and value and + /// adds it to the array document. + /// + /// The active document is returned to allow chaining of the add methods. + { + return Document::add(Poco::NumberFormatter::format(size()), value); + } + + template + T get(std::size_t pos) const /// Returns the element at the given index and tries to convert /// it to the template type. If the element is not found, a /// Poco::NotFoundException will be thrown. If the element cannot be @@ -52,7 +75,7 @@ namespace MongoDB } template - T get(int pos, const T & deflt) const + T get(std::size_t pos, const T & deflt) const /// Returns the element at the given index and tries to convert /// it to the template type. If the element is not found, or /// has the wrong type, the deflt argument will be returned. @@ -60,12 +83,12 @@ namespace MongoDB return Document::get(Poco::NumberFormatter::format(pos), deflt); } - Element::Ptr get(int pos) const; + Element::Ptr get(std::size_t pos) const; /// Returns the element at the given index. /// An empty element will be returned if the element is not found. template - bool isType(int pos) const + bool isType(std::size_t pos) const /// Returns true if the type of the element equals the TypeId of ElementTrait, /// otherwise false. { @@ -74,6 +97,9 @@ namespace MongoDB std::string toString(int indent = 0) const; /// Returns a string representation of the Array. + + private: + friend void BSONReader::read(Array::Ptr & to); }; diff --git a/base/poco/MongoDB/include/Poco/MongoDB/Binary.h b/base/poco/MongoDB/include/Poco/MongoDB/Binary.h index 1005cb000f5..aad8736e8b6 100644 --- a/base/poco/MongoDB/include/Poco/MongoDB/Binary.h +++ b/base/poco/MongoDB/include/Poco/MongoDB/Binary.h @@ -40,7 +40,7 @@ namespace MongoDB /// A Binary stores its data in a Poco::Buffer. { public: - typedef SharedPtr Ptr; + using Ptr = SharedPtr; Binary(); /// Creates an empty Binary with subtype 0. diff --git a/base/poco/MongoDB/include/Poco/MongoDB/Connection.h b/base/poco/MongoDB/include/Poco/MongoDB/Connection.h index dcb813b75bc..cf679d530aa 100644 --- a/base/poco/MongoDB/include/Poco/MongoDB/Connection.h +++ b/base/poco/MongoDB/include/Poco/MongoDB/Connection.h @@ -18,6 +18,7 @@ #define MongoDB_Connection_INCLUDED +#include "Poco/MongoDB/OpMsgMessage.h" #include "Poco/MongoDB/RequestMessage.h" #include "Poco/MongoDB/ResponseMessage.h" #include "Poco/Mutex.h" @@ -39,7 +40,7 @@ namespace MongoDB /// for more information on the wire protocol. { public: - typedef Poco::SharedPtr Ptr; + using Ptr = Poco::SharedPtr; class MongoDB_API SocketFactory { @@ -90,7 +91,7 @@ namespace MongoDB Poco::Net::SocketAddress address() const; /// Returns the address of the MongoDB server. - + const std::string & uri() const; /// Returns the uri on which the connection was made. @@ -145,6 +146,21 @@ namespace MongoDB /// Use this when a response is expected: only a "query" or "getmore" /// request will return a response. + void sendRequest(OpMsgMessage & request, OpMsgMessage & response); + /// Sends a request to the MongoDB server and receives the response + /// using newer wire protocol with OP_MSG. + + void sendRequest(OpMsgMessage & request); + /// Sends an unacknowledged request to the MongoDB server using newer + /// wire protocol with OP_MSG. + /// No response is sent by the server. + + void readResponse(OpMsgMessage & response); + /// Reads additional response data when previous message's flag moreToCome + /// indicates that server will send more data. + /// NOTE: See comments in OpMsgCursor code. + + protected: void connect(); @@ -164,7 +180,7 @@ namespace MongoDB } inline const std::string & Connection::uri() const { - return _uri; + return _uri; } diff --git a/base/poco/MongoDB/include/Poco/MongoDB/Cursor.h b/base/poco/MongoDB/include/Poco/MongoDB/Cursor.h index 4aed9fe64fb..8849d737a62 100644 --- a/base/poco/MongoDB/include/Poco/MongoDB/Cursor.h +++ b/base/poco/MongoDB/include/Poco/MongoDB/Cursor.h @@ -40,6 +40,9 @@ namespace MongoDB Cursor(const std::string & fullCollectionName, QueryRequest::Flags flags = QueryRequest::QUERY_DEFAULT); /// Creates a Cursor for the given database and collection ("database.collection"), using the specified flags. + Cursor(const Document & aggregationResponse); + /// Creates a Cursor for the given aggregation query response. + virtual ~Cursor(); /// Destroys the Cursor. diff --git a/base/poco/MongoDB/include/Poco/MongoDB/Database.h b/base/poco/MongoDB/include/Poco/MongoDB/Database.h index 62aea632b08..3334a673df6 100644 --- a/base/poco/MongoDB/include/Poco/MongoDB/Database.h +++ b/base/poco/MongoDB/include/Poco/MongoDB/Database.h @@ -26,6 +26,8 @@ #include "Poco/MongoDB/QueryRequest.h" #include "Poco/MongoDB/UpdateRequest.h" +#include "Poco/MongoDB/OpMsgCursor.h" +#include "Poco/MongoDB/OpMsgMessage.h" namespace Poco { @@ -45,6 +47,9 @@ namespace MongoDB virtual ~Database(); /// Destroys the Database. + const std::string & name() const; + /// Database name + bool authenticate( Connection & connection, const std::string & username, @@ -62,34 +67,49 @@ namespace MongoDB /// May throw a Poco::ProtocolException if authentication fails for a reason other than /// invalid credentials. + Document::Ptr queryBuildInfo(Connection & connection) const; + /// Queries server build info (all wire protocols) + + Document::Ptr queryServerHello(Connection & connection) const; + /// Queries hello response from server (all wire protocols) + Int64 count(Connection & connection, const std::string & collectionName) const; - /// Sends a count request for the given collection to MongoDB. + /// Sends a count request for the given collection to MongoDB. (old wire protocol) /// /// If the command fails, -1 is returned. Poco::SharedPtr createCommand() const; - /// Creates a QueryRequest for a command. + /// Creates a QueryRequest for a command. (old wire protocol) Poco::SharedPtr createCountRequest(const std::string & collectionName) const; /// Creates a QueryRequest to count the given collection. - /// The collectionname must not contain the database name. + /// The collectionname must not contain the database name. (old wire protocol) Poco::SharedPtr createDeleteRequest(const std::string & collectionName) const; /// Creates a DeleteRequest to delete documents in the given collection. - /// The collectionname must not contain the database name. + /// The collectionname must not contain the database name. (old wire protocol) Poco::SharedPtr createInsertRequest(const std::string & collectionName) const; /// Creates an InsertRequest to insert new documents in the given collection. - /// The collectionname must not contain the database name. + /// The collectionname must not contain the database name. (old wire protocol) Poco::SharedPtr createQueryRequest(const std::string & collectionName) const; - /// Creates a QueryRequest. + /// Creates a QueryRequest. (old wire protocol) /// The collectionname must not contain the database name. Poco::SharedPtr createUpdateRequest(const std::string & collectionName) const; - /// Creates an UpdateRequest. + /// Creates an UpdateRequest. (old wire protocol) /// The collectionname must not contain the database name. + Poco::SharedPtr createOpMsgMessage(const std::string & collectionName) const; + /// Creates OpMsgMessage. (new wire protocol) + + Poco::SharedPtr createOpMsgMessage() const; + /// Creates OpMsgMessage for database commands that do not require collection as an argument. (new wire protocol) + + Poco::SharedPtr createOpMsgCursor(const std::string & collectionName) const; + /// Creates OpMsgCursor. (new wire protocol) + Poco::MongoDB::Document::Ptr ensureIndex( Connection & connection, const std::string & collection, @@ -100,14 +120,16 @@ namespace MongoDB int version = 0, int ttl = 0); /// Creates an index. The document returned is the result of a getLastError call. - /// For more info look at the ensureIndex information on the MongoDB website. + /// For more info look at the ensureIndex information on the MongoDB website. (old wire protocol) Document::Ptr getLastErrorDoc(Connection & connection) const; /// Sends the getLastError command to the database and returns the error document. + /// (old wire protocol) std::string getLastError(Connection & connection) const; /// Sends the getLastError command to the database and returns the err element /// from the error document. When err is null, an empty string is returned. + /// (old wire protocol) static const std::string AUTH_MONGODB_CR; /// Default authentication mechanism prior to MongoDB 3.0. @@ -115,6 +137,27 @@ namespace MongoDB static const std::string AUTH_SCRAM_SHA1; /// Default authentication mechanism for MongoDB 3.0. + enum WireVersion + /// Wire version as reported by the command hello. + /// See details in MongoDB github, repository specifications. + /// @see queryServerHello + { + VER_26 = 1, + VER_26_2 = 2, + VER_30 = 3, + VER_32 = 4, + VER_34 = 5, + VER_36 = 6, ///< First wire version that supports OP_MSG + VER_40 = 7, + VER_42 = 8, + VER_44 = 9, + VER_50 = 13, + VER_51 = 14, ///< First wire version that supports only OP_MSG + VER_52 = 15, + VER_53 = 16, + VER_60 = 17 + }; + protected: bool authCR(Connection & connection, const std::string & username, const std::string & password); bool authSCRAM(Connection & connection, const std::string & username, const std::string & password); @@ -127,6 +170,12 @@ namespace MongoDB // // inlines // + inline const std::string & Database::name() const + { + return _dbname; + } + + inline Poco::SharedPtr Database::createCommand() const { Poco::SharedPtr cmd = createQueryRequest("$cmd"); @@ -158,6 +207,24 @@ namespace MongoDB return new Poco::MongoDB::UpdateRequest(_dbname + '.' + collectionName); } + // -- New wire protocol commands + + inline Poco::SharedPtr Database::createOpMsgMessage(const std::string & collectionName) const + { + return new Poco::MongoDB::OpMsgMessage(_dbname, collectionName); + } + + inline Poco::SharedPtr Database::createOpMsgMessage() const + { + // Collection name for database commands is not needed. + return createOpMsgMessage(""); + } + + inline Poco::SharedPtr Database::createOpMsgCursor(const std::string & collectionName) const + { + return new Poco::MongoDB::OpMsgCursor(_dbname, collectionName); + } + } } // namespace Poco::MongoDB diff --git a/base/poco/MongoDB/include/Poco/MongoDB/Document.h b/base/poco/MongoDB/include/Poco/MongoDB/Document.h index 12889663827..9e1df349e20 100644 --- a/base/poco/MongoDB/include/Poco/MongoDB/Document.h +++ b/base/poco/MongoDB/include/Poco/MongoDB/Document.h @@ -31,6 +31,7 @@ namespace Poco namespace MongoDB { + class Array; class ElementFindByName { @@ -48,8 +49,8 @@ namespace MongoDB /// Represents a MongoDB (BSON) document. { public: - typedef SharedPtr Ptr; - typedef std::vector Vector; + using Ptr = SharedPtr; + using Vector = std::vector; Document(); /// Creates an empty Document. @@ -86,6 +87,10 @@ namespace MongoDB /// Unlike the other add methods, this method returns /// a reference to the new document. + Array & addNewArray(const std::string & name); + /// Create a new array and add it to this document. + /// Method returns a reference to the new array. + void clear(); /// Removes all elements from the document. @@ -95,7 +100,7 @@ namespace MongoDB bool empty() const; /// Returns true if the document doesn't contain any documents. - bool exists(const std::string & name); + bool exists(const std::string & name) const; /// Returns true if the document has an element with the given name. template @@ -158,6 +163,9 @@ namespace MongoDB /// return an Int64. When the element is not found, a /// Poco::NotFoundException will be thrown. + bool remove(const std::string & name); + /// Removes an element from the document. + template bool isType(const std::string & name) const /// Returns true when the type of the element equals the TypeId of ElementTrait. @@ -227,12 +235,23 @@ namespace MongoDB } - inline bool Document::exists(const std::string & name) + inline bool Document::exists(const std::string & name) const { return std::find_if(_elements.begin(), _elements.end(), ElementFindByName(name)) != _elements.end(); } + inline bool Document::remove(const std::string & name) + { + auto it = std::find_if(_elements.begin(), _elements.end(), ElementFindByName(name)); + if (it == _elements.end()) + return false; + + _elements.erase(it); + return true; + } + + inline std::size_t Document::size() const { return _elements.size(); diff --git a/base/poco/MongoDB/include/Poco/MongoDB/Element.h b/base/poco/MongoDB/include/Poco/MongoDB/Element.h index b5592bd0e0b..26525d7d02b 100644 --- a/base/poco/MongoDB/include/Poco/MongoDB/Element.h +++ b/base/poco/MongoDB/include/Poco/MongoDB/Element.h @@ -45,7 +45,7 @@ namespace MongoDB /// Represents an Element of a Document or an Array. { public: - typedef Poco::SharedPtr Ptr; + using Ptr = Poco::SharedPtr; explicit Element(const std::string & name); /// Creates the Element with the given name. @@ -80,7 +80,7 @@ namespace MongoDB } - typedef std::list ElementSet; + using ElementSet = std::list; template @@ -266,7 +266,7 @@ namespace MongoDB } - typedef Nullable NullValue; + using NullValue = Nullable; // BSON Null Value diff --git a/base/poco/MongoDB/include/Poco/MongoDB/JavaScriptCode.h b/base/poco/MongoDB/include/Poco/MongoDB/JavaScriptCode.h index df1edc16817..c0f584b7c19 100644 --- a/base/poco/MongoDB/include/Poco/MongoDB/JavaScriptCode.h +++ b/base/poco/MongoDB/include/Poco/MongoDB/JavaScriptCode.h @@ -35,7 +35,7 @@ namespace MongoDB /// Represents JavaScript type in BSON. { public: - typedef SharedPtr Ptr; + using Ptr = SharedPtr; JavaScriptCode(); /// Creates an empty JavaScriptCode object. diff --git a/base/poco/MongoDB/include/Poco/MongoDB/MessageHeader.h b/base/poco/MongoDB/include/Poco/MongoDB/MessageHeader.h index 2b88e30fc74..98f45e876c1 100644 --- a/base/poco/MongoDB/include/Poco/MongoDB/MessageHeader.h +++ b/base/poco/MongoDB/include/Poco/MongoDB/MessageHeader.h @@ -28,6 +28,9 @@ namespace MongoDB { + class Message; // Required to disambiguate friend declaration in MessageHeader. + + class MongoDB_API MessageHeader /// Represents the message header which is always prepended to a /// MongoDB request or response message. @@ -37,14 +40,18 @@ namespace MongoDB enum OpCode { + // Opcodes deprecated in MongoDB 5.0 OP_REPLY = 1, - OP_MSG = 1000, OP_UPDATE = 2001, OP_INSERT = 2002, OP_QUERY = 2004, OP_GET_MORE = 2005, OP_DELETE = 2006, - OP_KILL_CURSORS = 2007 + OP_KILL_CURSORS = 2007, + + /// Opcodes supported in MongoDB 5.1 and later + OP_COMPRESSED = 2012, + OP_MSG = 2013 }; explicit MessageHeader(OpCode); diff --git a/base/poco/MongoDB/include/Poco/MongoDB/MongoDB.h b/base/poco/MongoDB/include/Poco/MongoDB/MongoDB.h index 253f1f8ab27..de246ddc9dd 100644 --- a/base/poco/MongoDB/include/Poco/MongoDB/MongoDB.h +++ b/base/poco/MongoDB/include/Poco/MongoDB/MongoDB.h @@ -33,6 +33,13 @@ // +#if defined(_WIN32) && defined(POCO_DLL) +# if defined(MongoDB_EXPORTS) +# define MongoDB_API __declspec(dllexport) +# else +# define MongoDB_API __declspec(dllimport) +# endif +#endif #if !defined(MongoDB_API) @@ -47,6 +54,11 @@ // // Automatically link MongoDB library. // +#if defined(_MSC_VER) +# if !defined(POCO_NO_AUTOMATIC_LIBS) && !defined(MongoDB_EXPORTS) +# pragma comment(lib, "PocoMongoDB" POCO_LIB_SUFFIX) +# endif +#endif #endif // MongoDBMongoDB_INCLUDED diff --git a/base/poco/MongoDB/include/Poco/MongoDB/ObjectId.h b/base/poco/MongoDB/include/Poco/MongoDB/ObjectId.h index 76bb190db48..8a335320ea0 100644 --- a/base/poco/MongoDB/include/Poco/MongoDB/ObjectId.h +++ b/base/poco/MongoDB/include/Poco/MongoDB/ObjectId.h @@ -44,7 +44,7 @@ namespace MongoDB /// as its value. { public: - typedef SharedPtr Ptr; + using Ptr = SharedPtr; explicit ObjectId(const std::string & id); /// Creates an ObjectId from a string. diff --git a/base/poco/MongoDB/include/Poco/MongoDB/OpMsgCursor.h b/base/poco/MongoDB/include/Poco/MongoDB/OpMsgCursor.h new file mode 100644 index 00000000000..a465a71bb1c --- /dev/null +++ b/base/poco/MongoDB/include/Poco/MongoDB/OpMsgCursor.h @@ -0,0 +1,96 @@ +// +// OpMsgCursor.h +// +// Library: MongoDB +// Package: MongoDB +// Module: OpMsgCursor +// +// Definition of the OpMsgCursor class. +// +// Copyright (c) 2012, Applied Informatics Software Engineering GmbH. +// and Contributors. +// +// SPDX-License-Identifier: BSL-1.0 +// + + +#ifndef MongoDB_OpMsgCursor_INCLUDED +#define MongoDB_OpMsgCursor_INCLUDED + + +#include "Poco/MongoDB/Connection.h" +#include "Poco/MongoDB/MongoDB.h" +#include "Poco/MongoDB/OpMsgMessage.h" + +namespace Poco +{ +namespace MongoDB +{ + + + class MongoDB_API OpMsgCursor : public Document + /// OpMsgCursor is an helper class for querying multiple documents using OpMsgMessage. + { + public: + OpMsgCursor(const std::string & dbname, const std::string & collectionName); + /// Creates a OpMsgCursor for the given database and collection. + + virtual ~OpMsgCursor(); + /// Destroys the OpMsgCursor. + + void setEmptyFirstBatch(bool empty); + /// Empty first batch is used to get error response faster with little server processing + + bool emptyFirstBatch() const; + + void setBatchSize(Int32 batchSize); + /// Set non-default batch size + + Int32 batchSize() const; + /// Current batch size (zero or negative number indicates default batch size) + + Int64 cursorID() const; + + OpMsgMessage & next(Connection & connection); + /// Tries to get the next documents. As long as response message has a + /// cursor ID next can be called to retrieve the next bunch of documents. + /// + /// The cursor must be killed (see kill()) when not all documents are needed. + + OpMsgMessage & query(); + /// Returns the associated query. + + void kill(Connection & connection); + /// Kills the cursor and reset it so that it can be reused. + + private: + OpMsgMessage _query; + OpMsgMessage _response; + + bool _emptyFirstBatch{false}; + Int32 _batchSize{-1}; + /// Batch size used in the cursor. Zero or negative value means that default shall be used. + + Int64 _cursorID{0}; + }; + + + // + // inlines + // + inline OpMsgMessage & OpMsgCursor::query() + { + return _query; + } + + inline Int64 OpMsgCursor::cursorID() const + { + return _cursorID; + } + + +} +} // namespace Poco::MongoDB + + +#endif // MongoDB_OpMsgCursor_INCLUDED diff --git a/base/poco/MongoDB/include/Poco/MongoDB/OpMsgMessage.h b/base/poco/MongoDB/include/Poco/MongoDB/OpMsgMessage.h new file mode 100644 index 00000000000..699c7fc4e12 --- /dev/null +++ b/base/poco/MongoDB/include/Poco/MongoDB/OpMsgMessage.h @@ -0,0 +1,163 @@ +// +// OpMsgMessage.h +// +// Library: MongoDB +// Package: MongoDB +// Module: OpMsgMessage +// +// Definition of the OpMsgMessage class. +// +// Copyright (c) 2022, Applied Informatics Software Engineering GmbH. +// and Contributors. +// +// SPDX-License-Identifier: BSL-1.0 +// + + +#ifndef MongoDB_OpMsgMessage_INCLUDED +#define MongoDB_OpMsgMessage_INCLUDED + + +#include "Poco/MongoDB/Document.h" +#include "Poco/MongoDB/Message.h" +#include "Poco/MongoDB/MongoDB.h" + +#include + +namespace Poco +{ +namespace MongoDB +{ + + + class MongoDB_API OpMsgMessage : public Message + /// This class represents a request/response (OP_MSG) to send requests and receive responses to/from MongoDB. + { + public: + // Constants for most often used MongoDB commands that can be sent using OP_MSG + // For complete list see: https://www.mongodb.com/docs/manual/reference/command/ + + // Query and write + static const std::string CMD_INSERT; + static const std::string CMD_DELETE; + static const std::string CMD_UPDATE; + static const std::string CMD_FIND; + static const std::string CMD_FIND_AND_MODIFY; + static const std::string CMD_GET_MORE; + + // Aggregation + static const std::string CMD_AGGREGATE; + static const std::string CMD_COUNT; + static const std::string CMD_DISTINCT; + static const std::string CMD_MAP_REDUCE; + + // Replication and administration + static const std::string CMD_HELLO; + static const std::string CMD_REPL_SET_GET_STATUS; + static const std::string CMD_REPL_SET_GET_CONFIG; + + static const std::string CMD_CREATE; + static const std::string CMD_CREATE_INDEXES; + static const std::string CMD_DROP; + static const std::string CMD_DROP_DATABASE; + static const std::string CMD_KILL_CURSORS; + static const std::string CMD_LIST_DATABASES; + static const std::string CMD_LIST_INDEXES; + + // Diagnostic + static const std::string CMD_BUILD_INFO; + static const std::string CMD_COLL_STATS; + static const std::string CMD_DB_STATS; + static const std::string CMD_HOST_INFO; + + + enum Flags : UInt32 + { + MSG_FLAGS_DEFAULT = 0, + + MSG_CHECKSUM_PRESENT = (1 << 0), + + MSG_MORE_TO_COME = (1 << 1), + /// Sender will send another message and is not prepared for overlapping messages + + MSG_EXHAUST_ALLOWED = (1 << 16) + /// Client is prepared for multiple replies (using the moreToCome bit) to this request + }; + + OpMsgMessage(); + /// Creates an OpMsgMessage for response. + + OpMsgMessage(const std::string & databaseName, const std::string & collectionName, UInt32 flags = MSG_FLAGS_DEFAULT); + /// Creates an OpMsgMessage for requests. + + virtual ~OpMsgMessage(); + + const std::string & databaseName() const; + + const std::string & collectionName() const; + + void setCommandName(const std::string & command); + /// Sets the command name and clears the command document + + void setCursor(Poco::Int64 cursorID, Poco::Int32 batchSize = -1); + /// Sets the command "getMore" for the cursor id with batch size (if it is not negative). + + const std::string & commandName() const; + /// Current command name. + + void setAcknowledgedRequest(bool ack); + /// Set false to create request that does not return response. + /// It has effect only for commands that write or delete documents. + /// Default is true (request returns acknowledge response). + + bool acknowledgedRequest() const; + + UInt32 flags() const; + + Document & body(); + /// Access to body document. + /// Additional query arguments shall be added after setting the command name. + + const Document & body() const; + + Document::Vector & documents(); + /// Documents prepared for request or retrieved in response. + + const Document::Vector & documents() const; + /// Documents prepared for request or retrieved in response. + + bool responseOk() const; + /// Reads "ok" status from the response message. + + void clear(); + /// Clears the message. + + void send(std::ostream & ostr); + /// Writes the request to stream. + + void read(std::istream & istr); + /// Reads the response from the stream. + + private: + enum PayloadType : UInt8 + { + PAYLOAD_TYPE_0 = 0, + PAYLOAD_TYPE_1 = 1 + }; + + std::string _databaseName; + std::string _collectionName; + UInt32 _flags{MSG_FLAGS_DEFAULT}; + std::string _commandName; + bool _acknowledged{true}; + + Document _body; + Document::Vector _documents; + }; + + +} +} // namespace Poco::MongoDB + + +#endif // MongoDB_OpMsgMessage_INCLUDED diff --git a/base/poco/MongoDB/include/Poco/MongoDB/PoolableConnectionFactory.h b/base/poco/MongoDB/include/Poco/MongoDB/PoolableConnectionFactory.h index 9d35c728e5e..53f4a5127ef 100644 --- a/base/poco/MongoDB/include/Poco/MongoDB/PoolableConnectionFactory.h +++ b/base/poco/MongoDB/include/Poco/MongoDB/PoolableConnectionFactory.h @@ -94,7 +94,23 @@ namespace MongoDB operator Connection::Ptr() { return _connection; } +#if defined(POCO_ENABLE_CPP11) + // Disable copy to prevent unwanted release of resources: C++11 way + PooledConnection(const PooledConnection &) = delete; + PooledConnection & operator=(const PooledConnection &) = delete; + + // Enable move semantics + PooledConnection(PooledConnection && other) = default; + PooledConnection & operator=(PooledConnection &&) = default; +#endif + private: +#if !defined(POCO_ENABLE_CPP11) + // Disable copy to prevent unwanted release of resources: pre C++11 way + PooledConnection(const PooledConnection &); + PooledConnection & operator=(const PooledConnection &); +#endif + Poco::ObjectPool & _pool; Connection::Ptr _connection; }; diff --git a/base/poco/MongoDB/include/Poco/MongoDB/RegularExpression.h b/base/poco/MongoDB/include/Poco/MongoDB/RegularExpression.h index b9a8694d321..244b8c14163 100644 --- a/base/poco/MongoDB/include/Poco/MongoDB/RegularExpression.h +++ b/base/poco/MongoDB/include/Poco/MongoDB/RegularExpression.h @@ -33,7 +33,7 @@ namespace MongoDB /// Represents a regular expression in BSON format. { public: - typedef SharedPtr Ptr; + using Ptr = SharedPtr; RegularExpression(); /// Creates an empty RegularExpression. diff --git a/base/poco/MongoDB/include/Poco/MongoDB/ResponseMessage.h b/base/poco/MongoDB/include/Poco/MongoDB/ResponseMessage.h index 132859cc75f..9cb92cb16c4 100644 --- a/base/poco/MongoDB/include/Poco/MongoDB/ResponseMessage.h +++ b/base/poco/MongoDB/include/Poco/MongoDB/ResponseMessage.h @@ -38,6 +38,9 @@ namespace MongoDB ResponseMessage(); /// Creates an empty ResponseMessage. + ResponseMessage(const Int64 & cursorID); + /// Creates an ResponseMessage for existing cursor ID. + virtual ~ResponseMessage(); /// Destroys the ResponseMessage. diff --git a/base/poco/MongoDB/src/Array.cpp b/base/poco/MongoDB/src/Array.cpp index c6d96d1371d..6fff0994d82 100644 --- a/base/poco/MongoDB/src/Array.cpp +++ b/base/poco/MongoDB/src/Array.cpp @@ -20,7 +20,7 @@ namespace Poco { namespace MongoDB { -Array::Array(): +Array::Array(): Document() { } @@ -31,7 +31,7 @@ Array::~Array() } -Element::Ptr Array::get(int pos) const +Element::Ptr Array::get(std::size_t pos) const { std::string name = Poco::NumberFormatter::format(pos); return Document::get(name); diff --git a/base/poco/MongoDB/src/Connection.cpp b/base/poco/MongoDB/src/Connection.cpp index 38c31d2250a..fa20887054b 100644 --- a/base/poco/MongoDB/src/Connection.cpp +++ b/base/poco/MongoDB/src/Connection.cpp @@ -319,4 +319,30 @@ void Connection::sendRequest(RequestMessage& request, ResponseMessage& response) } +void Connection::sendRequest(OpMsgMessage& request, OpMsgMessage& response) +{ + Poco::Net::SocketOutputStream sos(_socket); + request.send(sos); + + response.clear(); + readResponse(response); +} + + +void Connection::sendRequest(OpMsgMessage& request) +{ + request.setAcknowledgedRequest(false); + Poco::Net::SocketOutputStream sos(_socket); + request.send(sos); +} + + +void Connection::readResponse(OpMsgMessage& response) +{ + Poco::Net::SocketInputStream sis(_socket); + response.read(sis); +} + + + } } // Poco::MongoDB diff --git a/base/poco/MongoDB/src/Cursor.cpp b/base/poco/MongoDB/src/Cursor.cpp index 69031e0ab65..ef7a4ca961d 100644 --- a/base/poco/MongoDB/src/Cursor.cpp +++ b/base/poco/MongoDB/src/Cursor.cpp @@ -33,6 +33,12 @@ Cursor::Cursor(const std::string& fullCollectionName, QueryRequest::Flags flags) } +Cursor::Cursor(const Document& aggregationResponse) : + _query(aggregationResponse.get("cursor")->get("ns")), + _response(aggregationResponse.get("cursor")->get("id")) +{ +} + Cursor::~Cursor() { try diff --git a/base/poco/MongoDB/src/Database.cpp b/base/poco/MongoDB/src/Database.cpp index 2b31523bdc4..1a0d3cfe559 100644 --- a/base/poco/MongoDB/src/Database.cpp +++ b/base/poco/MongoDB/src/Database.cpp @@ -334,6 +334,50 @@ bool Database::authSCRAM(Connection& connection, const std::string& username, co } +Document::Ptr Database::queryBuildInfo(Connection& connection) const +{ + // build info can be issued on "config" system database + Poco::SharedPtr request = createCommand(); + request->selector().add("buildInfo", 1); + + Poco::MongoDB::ResponseMessage response; + connection.sendRequest(*request, response); + + Document::Ptr buildInfo; + if ( response.documents().size() > 0 ) + { + buildInfo = response.documents()[0]; + } + else + { + throw Poco::ProtocolException("Didn't get a response from the buildinfo command"); + } + return buildInfo; +} + + +Document::Ptr Database::queryServerHello(Connection& connection) const +{ + // hello can be issued on "config" system database + Poco::SharedPtr request = createCommand(); + request->selector().add("hello", 1); + + Poco::MongoDB::ResponseMessage response; + connection.sendRequest(*request, response); + + Document::Ptr hello; + if ( response.documents().size() > 0 ) + { + hello = response.documents()[0]; + } + else + { + throw Poco::ProtocolException("Didn't get a response from the hello command"); + } + return hello; +} + + Int64 Database::count(Connection& connection, const std::string& collectionName) const { Poco::SharedPtr countRequest = createCountRequest(collectionName); @@ -390,7 +434,7 @@ Document::Ptr Database::getLastErrorDoc(Connection& connection) const { Document::Ptr errorDoc; - Poco::SharedPtr request = createQueryRequest("$cmd"); + Poco::SharedPtr request = createCommand(); request->setNumberToReturn(1); request->selector().add("getLastError", 1); @@ -420,7 +464,7 @@ std::string Database::getLastError(Connection& connection) const Poco::SharedPtr Database::createCountRequest(const std::string& collectionName) const { - Poco::SharedPtr request = createQueryRequest("$cmd"); + Poco::SharedPtr request = createCommand(); request->setNumberToReturn(1); request->selector().add("count", collectionName); return request; diff --git a/base/poco/MongoDB/src/DeleteRequest.cpp b/base/poco/MongoDB/src/DeleteRequest.cpp index 67a88c33302..ba75beb55fb 100644 --- a/base/poco/MongoDB/src/DeleteRequest.cpp +++ b/base/poco/MongoDB/src/DeleteRequest.cpp @@ -20,8 +20,8 @@ namespace MongoDB { DeleteRequest::DeleteRequest(const std::string& collectionName, DeleteRequest::Flags flags): - RequestMessage(MessageHeader::OP_DELETE), - _flags(flags), + RequestMessage(MessageHeader::OP_DELETE), + _flags(flags), _fullCollectionName(collectionName), _selector() { diff --git a/base/poco/MongoDB/src/Document.cpp b/base/poco/MongoDB/src/Document.cpp index 114fc993891..f7c5c9c5dc6 100644 --- a/base/poco/MongoDB/src/Document.cpp +++ b/base/poco/MongoDB/src/Document.cpp @@ -35,6 +35,14 @@ Document::~Document() } +Array& Document::addNewArray(const std::string& name) +{ + Array::Ptr newArray = new Array(); + add(name, newArray); + return *newArray; +} + + Element::Ptr Document::get(const std::string& name) const { Element::Ptr element; @@ -84,7 +92,7 @@ void Document::read(BinaryReader& reader) while (type != '\0') { Element::Ptr element; - + std::string name = BSONReader(reader).readCString(); switch (type) @@ -198,7 +206,7 @@ void Document::write(BinaryWriter& writer) else { std::stringstream sstream; - Poco::BinaryWriter tempWriter(sstream); + Poco::BinaryWriter tempWriter(sstream, BinaryWriter::LITTLE_ENDIAN_BYTE_ORDER); for (ElementSet::iterator it = _elements.begin(); it != _elements.end(); ++it) { tempWriter << static_cast((*it)->type()); @@ -207,7 +215,7 @@ void Document::write(BinaryWriter& writer) element->write(tempWriter); } tempWriter.flush(); - + Poco::Int32 len = static_cast(5 + sstream.tellp()); /* 5 = sizeof(len) + 0-byte */ writer << len; writer.writeRaw(sstream.str()); diff --git a/base/poco/MongoDB/src/Element.cpp b/base/poco/MongoDB/src/Element.cpp index 89629e0503e..f91ce264493 100644 --- a/base/poco/MongoDB/src/Element.cpp +++ b/base/poco/MongoDB/src/Element.cpp @@ -24,7 +24,7 @@ Element::Element(const std::string& name) : _name(name) } -Element::~Element() +Element::~Element() { } diff --git a/base/poco/MongoDB/src/GetMoreRequest.cpp b/base/poco/MongoDB/src/GetMoreRequest.cpp index f8a6b73c6ad..2c1f6909eb7 100644 --- a/base/poco/MongoDB/src/GetMoreRequest.cpp +++ b/base/poco/MongoDB/src/GetMoreRequest.cpp @@ -21,7 +21,7 @@ namespace MongoDB { GetMoreRequest::GetMoreRequest(const std::string& collectionName, Int64 cursorID): - RequestMessage(MessageHeader::OP_GET_MORE), + RequestMessage(MessageHeader::OP_GET_MORE), _fullCollectionName(collectionName), _numberToReturn(100), _cursorID(cursorID) diff --git a/base/poco/MongoDB/src/InsertRequest.cpp b/base/poco/MongoDB/src/InsertRequest.cpp index ec8dc9cf94a..65be5654b3e 100644 --- a/base/poco/MongoDB/src/InsertRequest.cpp +++ b/base/poco/MongoDB/src/InsertRequest.cpp @@ -20,7 +20,7 @@ namespace MongoDB { InsertRequest::InsertRequest(const std::string& collectionName, Flags flags): - RequestMessage(MessageHeader::OP_INSERT), + RequestMessage(MessageHeader::OP_INSERT), _flags(flags), _fullCollectionName(collectionName) { diff --git a/base/poco/MongoDB/src/KillCursorsRequest.cpp b/base/poco/MongoDB/src/KillCursorsRequest.cpp index 6baa0e0be8f..448002aa16a 100644 --- a/base/poco/MongoDB/src/KillCursorsRequest.cpp +++ b/base/poco/MongoDB/src/KillCursorsRequest.cpp @@ -37,7 +37,7 @@ void KillCursorsRequest::buildRequest(BinaryWriter& writer) for (std::vector::iterator it = _cursors.begin(); it != _cursors.end(); ++it) { writer << *it; - } + } } diff --git a/base/poco/MongoDB/src/Message.cpp b/base/poco/MongoDB/src/Message.cpp index c29d282d15a..7b1cb23bab6 100644 --- a/base/poco/MongoDB/src/Message.cpp +++ b/base/poco/MongoDB/src/Message.cpp @@ -19,7 +19,7 @@ namespace Poco { namespace MongoDB { -Message::Message(MessageHeader::OpCode opcode): +Message::Message(MessageHeader::OpCode opcode): _header(opcode) { } diff --git a/base/poco/MongoDB/src/MessageHeader.cpp b/base/poco/MongoDB/src/MessageHeader.cpp index 222121243db..b472bcec465 100644 --- a/base/poco/MongoDB/src/MessageHeader.cpp +++ b/base/poco/MongoDB/src/MessageHeader.cpp @@ -20,10 +20,10 @@ namespace Poco { namespace MongoDB { -MessageHeader::MessageHeader(OpCode opCode): - _messageLength(0), - _requestID(0), - _responseTo(0), +MessageHeader::MessageHeader(OpCode opCode): + _messageLength(0), + _requestID(0), + _responseTo(0), _opCode(opCode) { } @@ -42,7 +42,7 @@ void MessageHeader::read(BinaryReader& reader) Int32 opCode; reader >> opCode; - _opCode = (OpCode) opCode; + _opCode = static_cast(opCode); if (!reader.good()) { @@ -56,7 +56,7 @@ void MessageHeader::write(BinaryWriter& writer) writer << _messageLength; writer << _requestID; writer << _responseTo; - writer << (Int32) _opCode; + writer << static_cast(_opCode); } diff --git a/base/poco/MongoDB/src/ObjectId.cpp b/base/poco/MongoDB/src/ObjectId.cpp index 3065a2ffc30..0125c246c2d 100644 --- a/base/poco/MongoDB/src/ObjectId.cpp +++ b/base/poco/MongoDB/src/ObjectId.cpp @@ -32,7 +32,7 @@ ObjectId::ObjectId(const std::string& id) poco_assert_dbg(id.size() == 24); const char* p = id.c_str(); - for (std::size_t i = 0; i < 12; ++i) + for (std::size_t i = 0; i < 12; ++i) { _id[i] = fromHex(p); p += 2; diff --git a/base/poco/MongoDB/src/OpMsgCursor.cpp b/base/poco/MongoDB/src/OpMsgCursor.cpp new file mode 100644 index 00000000000..bc95851ae33 --- /dev/null +++ b/base/poco/MongoDB/src/OpMsgCursor.cpp @@ -0,0 +1,187 @@ +// +// OpMsgCursor.cpp +// +// Library: MongoDB +// Package: MongoDB +// Module: OpMsgCursor +// +// Copyright (c) 2022, Applied Informatics Software Engineering GmbH. +// and Contributors. +// +// SPDX-License-Identifier: BSL-1.0 +// + + +#include "Poco/MongoDB/OpMsgCursor.h" +#include "Poco/MongoDB/Array.h" + +// +// NOTE: +// +// MongoDB specification indicates that the flag MSG_EXHAUST_ALLOWED shall be +// used in the request when the receiver is ready to receive multiple messages +// without sending additional requests in between. Sender (MongoDB) indicates +// that more messages follow with flag MSG_MORE_TO_COME. +// +// It seems that this does not work properly. MSG_MORE_TO_COME is set and reading +// next messages sometimes works, however often the data is missing in response +// or the message header contains wrong message length and reading blocks. +// Opcode in the header is correct. +// +// Using MSG_EXHAUST_ALLOWED is therefore currently disabled. +// +// It seems that related JIRA ticket is: +// +// https://jira.mongodb.org/browse/SERVER-57297 +// +// https://github.com/mongodb/specifications/blob/master/source/message/OP_MSG.rst +// + +#define MONGODB_EXHAUST_ALLOWED_WORKS false + +namespace Poco { +namespace MongoDB { + + +static const std::string keyCursor {"cursor"}; +static const std::string keyFirstBatch {"firstBatch"}; +static const std::string keyNextBatch {"nextBatch"}; + +static Poco::Int64 cursorIdFromResponse(const MongoDB::Document& doc); + + +OpMsgCursor::OpMsgCursor(const std::string& db, const std::string& collection): +#if MONGODB_EXHAUST_ALLOWED_WORKS + _query(db, collection, OpMsgMessage::MSG_EXHAUST_ALLOWED) +#else + _query(db, collection) +#endif +{ +} + +OpMsgCursor::~OpMsgCursor() +{ + try + { + poco_assert_dbg(_cursorID == 0); + } + catch (...) + { + } +} + + +void OpMsgCursor::setEmptyFirstBatch(bool empty) +{ + _emptyFirstBatch = empty; +} + + +bool OpMsgCursor::emptyFirstBatch() const +{ + return _emptyFirstBatch; +} + + +void OpMsgCursor::setBatchSize(Int32 batchSize) +{ + _batchSize = batchSize; +} + + +Int32 OpMsgCursor::batchSize() const +{ + return _batchSize; +} + + +OpMsgMessage& OpMsgCursor::next(Connection& connection) +{ + if (_cursorID == 0) + { + _response.clear(); + + if (_emptyFirstBatch || _batchSize > 0) + { + Int32 bsize = _emptyFirstBatch ? 0 : _batchSize; + if (_query.commandName() == OpMsgMessage::CMD_FIND) + { + _query.body().add("batchSize", bsize); + } + else if (_query.commandName() == OpMsgMessage::CMD_AGGREGATE) + { + auto& cursorDoc = _query.body().addNewDocument("cursor"); + cursorDoc.add("batchSize", bsize); + } + } + + connection.sendRequest(_query, _response); + + const auto& rdoc = _response.body(); + _cursorID = cursorIdFromResponse(rdoc); + } + else + { +#if MONGODB_EXHAUST_ALLOWED_WORKS + std::cout << "Response flags: " << _response.flags() << std::endl; + if (_response.flags() & OpMsgMessage::MSG_MORE_TO_COME) + { + std::cout << "More to come. Reading more response: " << std::endl; + _response.clear(); + connection.readResponse(_response); + } + else +#endif + { + _response.clear(); + _query.setCursor(_cursorID, _batchSize); + connection.sendRequest(_query, _response); + } + } + + const auto& rdoc = _response.body(); + _cursorID = cursorIdFromResponse(rdoc); + + return _response; +} + + +void OpMsgCursor::kill(Connection& connection) +{ + _response.clear(); + if (_cursorID != 0) + { + _query.setCommandName(OpMsgMessage::CMD_KILL_CURSORS); + + MongoDB::Array::Ptr cursors = new MongoDB::Array(); + cursors->add(_cursorID); + _query.body().add("cursors", cursors); + + connection.sendRequest(_query, _response); + + const auto killed = _response.body().get("cursorsKilled", nullptr); + if (!killed || killed->size() != 1 || killed->get(0, -1) != _cursorID) + { + throw Poco::ProtocolException("Cursor not killed as expected: " + std::to_string(_cursorID)); + } + + _cursorID = 0; + _query.clear(); + _response.clear(); + } +} + + +Poco::Int64 cursorIdFromResponse(const MongoDB::Document& doc) +{ + Poco::Int64 id {0}; + auto cursorDoc = doc.get(keyCursor, nullptr); + if(cursorDoc) + { + id = cursorDoc->get("id", 0); + } + return id; +} + + +} } // Namespace Poco::MongoDB diff --git a/base/poco/MongoDB/src/OpMsgMessage.cpp b/base/poco/MongoDB/src/OpMsgMessage.cpp new file mode 100644 index 00000000000..2b55772ca59 --- /dev/null +++ b/base/poco/MongoDB/src/OpMsgMessage.cpp @@ -0,0 +1,412 @@ +// +// OpMsgMessage.cpp +// +// Library: MongoDB +// Package: MongoDB +// Module: OpMsgMessage +// +// Copyright (c) 2022, Applied Informatics Software Engineering GmbH. +// and Contributors. +// +// SPDX-License-Identifier: BSL-1.0 +// + +#include "Poco/MongoDB/OpMsgMessage.h" +#include "Poco/MongoDB/MessageHeader.h" +#include "Poco/MongoDB/Array.h" +#include "Poco/StreamCopier.h" +#include "Poco/Logger.h" + +#define POCO_MONGODB_DUMP false + +namespace Poco { +namespace MongoDB { + +// Query and write +const std::string OpMsgMessage::CMD_INSERT { "insert" }; +const std::string OpMsgMessage::CMD_DELETE { "delete" }; +const std::string OpMsgMessage::CMD_UPDATE { "update" }; +const std::string OpMsgMessage::CMD_FIND { "find" }; +const std::string OpMsgMessage::CMD_FIND_AND_MODIFY { "findAndModify" }; +const std::string OpMsgMessage::CMD_GET_MORE { "getMore" }; + +// Aggregation +const std::string OpMsgMessage::CMD_AGGREGATE { "aggregate" }; +const std::string OpMsgMessage::CMD_COUNT { "count" }; +const std::string OpMsgMessage::CMD_DISTINCT { "distinct" }; +const std::string OpMsgMessage::CMD_MAP_REDUCE { "mapReduce" }; + +// Replication and administration +const std::string OpMsgMessage::CMD_HELLO { "hello" }; +const std::string OpMsgMessage::CMD_REPL_SET_GET_STATUS { "replSetGetStatus" }; +const std::string OpMsgMessage::CMD_REPL_SET_GET_CONFIG { "replSetGetConfig" }; + +const std::string OpMsgMessage::CMD_CREATE { "create" }; +const std::string OpMsgMessage::CMD_CREATE_INDEXES { "createIndexes" }; +const std::string OpMsgMessage::CMD_DROP { "drop" }; +const std::string OpMsgMessage::CMD_DROP_DATABASE { "dropDatabase" }; +const std::string OpMsgMessage::CMD_KILL_CURSORS { "killCursors" }; +const std::string OpMsgMessage::CMD_LIST_DATABASES { "listDatabases" }; +const std::string OpMsgMessage::CMD_LIST_INDEXES { "listIndexes" }; + +// Diagnostic +const std::string OpMsgMessage::CMD_BUILD_INFO { "buildInfo" }; +const std::string OpMsgMessage::CMD_COLL_STATS { "collStats" }; +const std::string OpMsgMessage::CMD_DB_STATS { "dbStats" }; +const std::string OpMsgMessage::CMD_HOST_INFO { "hostInfo" }; + + +static const std::string& commandIdentifier(const std::string& command); + /// Commands have different names for the payload that is sent in a separate section + + +static const std::string keyCursor {"cursor"}; +static const std::string keyFirstBatch {"firstBatch"}; +static const std::string keyNextBatch {"nextBatch"}; + + +OpMsgMessage::OpMsgMessage() : + Message(MessageHeader::OP_MSG) +{ +} + + +OpMsgMessage::OpMsgMessage(const std::string& databaseName, const std::string& collectionName, UInt32 flags) : + Message(MessageHeader::OP_MSG), + _databaseName(databaseName), + _collectionName(collectionName), + _flags(flags) +{ +} + + +OpMsgMessage::~OpMsgMessage() +{ +} + +const std::string& OpMsgMessage::databaseName() const +{ + return _databaseName; +} + + +const std::string& OpMsgMessage::collectionName() const +{ + return _collectionName; +} + + +void OpMsgMessage::setCommandName(const std::string& command) +{ + _commandName = command; + _body.clear(); + + // IMPORTANT: Command name must be first + if (_collectionName.empty()) + { + // Collection is not specified. It is assumed that this particular command does + // not need it. + _body.add(_commandName, Int32(1)); + } + else + { + _body.add(_commandName, _collectionName); + } + _body.add("$db", _databaseName); +} + + +void OpMsgMessage::setCursor(Poco::Int64 cursorID, Poco::Int32 batchSize) +{ + _commandName = OpMsgMessage::CMD_GET_MORE; + _body.clear(); + + // IMPORTANT: Command name must be first + _body.add(_commandName, cursorID); + _body.add("$db", _databaseName); + _body.add("collection", _collectionName); + if (batchSize > 0) + { + _body.add("batchSize", batchSize); + } +} + + +const std::string& OpMsgMessage::commandName() const +{ + return _commandName; +} + + +void OpMsgMessage::setAcknowledgedRequest(bool ack) +{ + const auto& id = commandIdentifier(_commandName); + if (id.empty()) + return; + + _acknowledged = ack; + + auto writeConcern = _body.get("writeConcern", nullptr); + if (writeConcern) + writeConcern->remove("w"); + + if (ack) + { + _flags = _flags & (~MSG_MORE_TO_COME); + } + else + { + _flags = _flags | MSG_MORE_TO_COME; + if (!writeConcern) + _body.addNewDocument("writeConcern").add("w", 0); + else + writeConcern->add("w", 0); + } + +} + + +bool OpMsgMessage::acknowledgedRequest() const +{ + return _acknowledged; +} + + +UInt32 OpMsgMessage::flags() const +{ + return _flags; +} + + +Document& OpMsgMessage::body() +{ + return _body; +} + + +const Document& OpMsgMessage::body() const +{ + return _body; +} + + +Document::Vector& OpMsgMessage::documents() +{ + return _documents; +} + + +const Document::Vector& OpMsgMessage::documents() const +{ + return _documents; +} + + +bool OpMsgMessage::responseOk() const +{ + Poco::Int64 ok {false}; + if (_body.exists("ok")) + { + ok = _body.getInteger("ok"); + } + return (ok != 0); +} + + +void OpMsgMessage::clear() +{ + _flags = MSG_FLAGS_DEFAULT; + _commandName.clear(); + _body.clear(); + _documents.clear(); +} + + +void OpMsgMessage::send(std::ostream& ostr) +{ + BinaryWriter socketWriter(ostr, BinaryWriter::LITTLE_ENDIAN_BYTE_ORDER); + + // Serialise the body + std::stringstream ss; + BinaryWriter writer(ss, BinaryWriter::LITTLE_ENDIAN_BYTE_ORDER); + writer << _flags; + + writer << PAYLOAD_TYPE_0; + _body.write(writer); + + if (!_documents.empty()) + { + // Serialise attached documents + + std::stringstream ssdoc; + BinaryWriter wdoc(ssdoc, BinaryWriter::LITTLE_ENDIAN_BYTE_ORDER); + for (auto& doc: _documents) + { + doc->write(wdoc); + } + wdoc.flush(); + + const std::string& identifier = commandIdentifier(_commandName); + const Poco::Int32 size = static_cast(sizeof(size) + identifier.size() + 1 + ssdoc.tellp()); + writer << PAYLOAD_TYPE_1; + writer << size; + writer.writeCString(identifier.c_str()); + StreamCopier::copyStream(ssdoc, ss); + } + writer.flush(); + +#if POCO_MONGODB_DUMP + const std::string section = ss.str(); + std::string dump; + Logger::formatDump(dump, section.data(), section.length()); + std::cout << dump << std::endl; +#endif + + messageLength(static_cast(ss.tellp())); + + _header.write(socketWriter); + StreamCopier::copyStream(ss, ostr); + + ostr.flush(); +} + + +void OpMsgMessage::read(std::istream& istr) +{ + std::string message; + { + BinaryReader reader(istr, BinaryReader::LITTLE_ENDIAN_BYTE_ORDER); + _header.read(reader); + + poco_assert_dbg(_header.opCode() == _header.OP_MSG); + + const std::streamsize remainingSize {_header.getMessageLength() - _header.MSG_HEADER_SIZE }; + message.reserve(remainingSize); + +#if POCO_MONGODB_DUMP + std::cout + << "Message hdr: " << _header.getMessageLength() << " " << remainingSize << " " + << _header.opCode() << " " << _header.getRequestID() << " " << _header.responseTo() + << std::endl; +#endif + + reader.readRaw(remainingSize, message); + +#if POCO_MONGODB_DUMP + std::string dump; + Logger::formatDump(dump, message.data(), message.length()); + std::cout << dump << std::endl; +#endif + } + // Read complete message and then interpret it. + + std::istringstream msgss(message); + BinaryReader reader(msgss, BinaryReader::LITTLE_ENDIAN_BYTE_ORDER); + + Poco::UInt8 payloadType {0xFF}; + + reader >> _flags; + reader >> payloadType; + poco_assert_dbg(payloadType == PAYLOAD_TYPE_0); + + _body.read(reader); + + // Read next sections from the buffer + while (msgss.good()) + { + // NOTE: Not tested yet with database, because it returns everything in the body. + // Does MongoDB ever return documents as Payload type 1? + reader >> payloadType; + if (!msgss.good()) + { + break; + } + poco_assert_dbg(payloadType == PAYLOAD_TYPE_1); +#if POCO_MONGODB_DUMP + std::cout << "section payload: " << payloadType << std::endl; +#endif + + Poco::Int32 sectionSize {0}; + reader >> sectionSize; + poco_assert_dbg(sectionSize > 0); + +#if POCO_MONGODB_DUMP + std::cout << "section size: " << sectionSize << std::endl; +#endif + std::streamoff offset = sectionSize - sizeof(sectionSize); + std::streampos endOfSection = msgss.tellg() + offset; + + std::string identifier; + reader.readCString(identifier); +#if POCO_MONGODB_DUMP + std::cout << "section identifier: " << identifier << std::endl; +#endif + + // Loop to read documents from this section. + while (msgss.tellg() < endOfSection) + { +#if POCO_MONGODB_DUMP + std::cout << "section doc: " << msgss.tellg() << " " << endOfSection << std::endl; +#endif + Document::Ptr doc = new Document(); + doc->read(reader); + _documents.push_back(doc); + if (msgss.tellg() < 0) + { + break; + } + } + } + + // Extract documents from the cursor batch if they are there. + MongoDB::Array::Ptr batch; + auto curDoc = _body.get(keyCursor, nullptr); + if (curDoc) + { + batch = curDoc->get(keyFirstBatch, nullptr); + if (!batch) + { + batch = curDoc->get(keyNextBatch, nullptr); + } + } + if (batch) + { + for(std::size_t i = 0; i < batch->size(); i++) + { + const auto& d = batch->get(i, nullptr); + if (d) + { + _documents.push_back(d); + } + } + } + +} + +const std::string& commandIdentifier(const std::string& command) +{ + // Names of identifiers for commands that send bulk documents in the request + // The identifier is set in the section type 1. + static std::map identifiers { + { OpMsgMessage::CMD_INSERT, "documents" }, + { OpMsgMessage::CMD_DELETE, "deletes" }, + { OpMsgMessage::CMD_UPDATE, "updates" }, + + // Not sure if create index can send document section + { OpMsgMessage::CMD_CREATE_INDEXES, "indexes" } + }; + + const auto i = identifiers.find(command); + if (i != identifiers.end()) + { + return i->second; + } + + // This likely means that documents are incorrectly set for a command + // that does not send list of documents in section type 1. + static const std::string emptyIdentifier; + return emptyIdentifier; +} + + +} } // namespace Poco::MongoDB diff --git a/base/poco/MongoDB/src/QueryRequest.cpp b/base/poco/MongoDB/src/QueryRequest.cpp index 7044335ba30..6d7d23a8456 100644 --- a/base/poco/MongoDB/src/QueryRequest.cpp +++ b/base/poco/MongoDB/src/QueryRequest.cpp @@ -20,10 +20,10 @@ namespace MongoDB { QueryRequest::QueryRequest(const std::string& collectionName, QueryRequest::Flags flags): - RequestMessage(MessageHeader::OP_QUERY), - _flags(flags), + RequestMessage(MessageHeader::OP_QUERY), + _flags(flags), _fullCollectionName(collectionName), - _numberToSkip(0), + _numberToSkip(0), _numberToReturn(100), _selector(), _returnFieldSelector() diff --git a/base/poco/MongoDB/src/RegularExpression.cpp b/base/poco/MongoDB/src/RegularExpression.cpp index e95e7da82e1..5f7eb6bb51b 100644 --- a/base/poco/MongoDB/src/RegularExpression.cpp +++ b/base/poco/MongoDB/src/RegularExpression.cpp @@ -25,8 +25,8 @@ RegularExpression::RegularExpression() } -RegularExpression::RegularExpression(const std::string& pattern, const std::string& options): - _pattern(pattern), +RegularExpression::RegularExpression(const std::string& pattern, const std::string& options): + _pattern(pattern), _options(options) { } diff --git a/base/poco/MongoDB/src/ReplicaSet.cpp b/base/poco/MongoDB/src/ReplicaSet.cpp index b56fea49311..fce2f2bdada 100644 --- a/base/poco/MongoDB/src/ReplicaSet.cpp +++ b/base/poco/MongoDB/src/ReplicaSet.cpp @@ -21,7 +21,7 @@ namespace Poco { namespace MongoDB { -ReplicaSet::ReplicaSet(const std::vector &addresses): +ReplicaSet::ReplicaSet(const std::vector &addresses): _addresses(addresses) { } @@ -81,8 +81,8 @@ Connection::Ptr ReplicaSet::isMaster(const Net::SocketAddress& address) { conn = 0; } - - return 0; + + return 0; } diff --git a/base/poco/MongoDB/src/RequestMessage.cpp b/base/poco/MongoDB/src/RequestMessage.cpp index 6391d966198..999ed8a6ba1 100644 --- a/base/poco/MongoDB/src/RequestMessage.cpp +++ b/base/poco/MongoDB/src/RequestMessage.cpp @@ -21,7 +21,7 @@ namespace Poco { namespace MongoDB { -RequestMessage::RequestMessage(MessageHeader::OpCode opcode): +RequestMessage::RequestMessage(MessageHeader::OpCode opcode): Message(opcode) { } @@ -35,7 +35,7 @@ RequestMessage::~RequestMessage() void RequestMessage::send(std::ostream& ostr) { std::stringstream ss; - BinaryWriter requestWriter(ss); + BinaryWriter requestWriter(ss, BinaryWriter::LITTLE_ENDIAN_BYTE_ORDER); buildRequest(requestWriter); requestWriter.flush(); diff --git a/base/poco/MongoDB/src/ResponseMessage.cpp b/base/poco/MongoDB/src/ResponseMessage.cpp index 3254ace63e6..e8216767494 100644 --- a/base/poco/MongoDB/src/ResponseMessage.cpp +++ b/base/poco/MongoDB/src/ResponseMessage.cpp @@ -21,10 +21,20 @@ namespace MongoDB { ResponseMessage::ResponseMessage(): - Message(MessageHeader::OP_REPLY), - _responseFlags(0), - _cursorID(0), - _startingFrom(0), + Message(MessageHeader::OP_REPLY), + _responseFlags(0), + _cursorID(0), + _startingFrom(0), + _numberReturned(0) +{ +} + + +ResponseMessage::ResponseMessage(const Int64& cursorID): + Message(MessageHeader::OP_REPLY), + _responseFlags(0), + _cursorID(cursorID), + _startingFrom(0), _numberReturned(0) { } @@ -50,7 +60,7 @@ void ResponseMessage::read(std::istream& istr) clear(); BinaryReader reader(istr, BinaryReader::LITTLE_ENDIAN_BYTE_ORDER); - + _header.read(reader); reader >> _responseFlags; diff --git a/base/poco/MongoDB/src/UpdateRequest.cpp b/base/poco/MongoDB/src/UpdateRequest.cpp index 2af4621ff64..7477fc752d5 100644 --- a/base/poco/MongoDB/src/UpdateRequest.cpp +++ b/base/poco/MongoDB/src/UpdateRequest.cpp @@ -20,7 +20,7 @@ namespace MongoDB { UpdateRequest::UpdateRequest(const std::string& collectionName, UpdateRequest::Flags flags): - RequestMessage(MessageHeader::OP_UPDATE), + RequestMessage(MessageHeader::OP_UPDATE), _flags(flags), _fullCollectionName(collectionName), _selector(), diff --git a/docker/test/integration/runner/compose/docker_compose_mongo.yml b/docker/test/integration/runner/compose/docker_compose_mongo.yml index 9a6eae6ca8c..60361e9e98d 100644 --- a/docker/test/integration/runner/compose/docker_compose_mongo.yml +++ b/docker/test/integration/runner/compose/docker_compose_mongo.yml @@ -1,7 +1,7 @@ version: '2.3' services: mongo1: - image: mongo:5.0 + image: mongo:5.1 restart: always environment: MONGO_INITDB_ROOT_USERNAME: root diff --git a/docker/test/integration/runner/compose/docker_compose_mongo_secure.yml b/docker/test/integration/runner/compose/docker_compose_mongo_secure.yml index 193e5d26568..f5b0ffed130 100644 --- a/docker/test/integration/runner/compose/docker_compose_mongo_secure.yml +++ b/docker/test/integration/runner/compose/docker_compose_mongo_secure.yml @@ -1,7 +1,7 @@ version: '2.3' services: mongo1: - image: mongo:3.6 + image: mongo:3.5 restart: always environment: MONGO_INITDB_ROOT_USERNAME: root diff --git a/src/Dictionaries/MongoDBDictionarySource.cpp b/src/Dictionaries/MongoDBDictionarySource.cpp index b7e342f3c80..46910fa9f6a 100644 --- a/src/Dictionaries/MongoDBDictionarySource.cpp +++ b/src/Dictionaries/MongoDBDictionarySource.cpp @@ -170,7 +170,7 @@ MongoDBDictionarySource::~MongoDBDictionarySource() = default; QueryPipeline MongoDBDictionarySource::loadAll() { - return QueryPipeline(std::make_shared(connection, createCursor(db, collection, sample_block), sample_block, max_block_size)); + return QueryPipeline(std::make_shared(connection, db, collection, Poco::MongoDB::Document{}, sample_block, max_block_size)); } QueryPipeline MongoDBDictionarySource::loadIds(const std::vector & ids) @@ -178,7 +178,7 @@ QueryPipeline MongoDBDictionarySource::loadIds(const std::vector & ids) if (!dict_struct.id) throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "'id' is required for selective loading"); - auto cursor = createCursor(db, collection, sample_block); + Poco::MongoDB::Document query; /** NOTE: While building array, Poco::MongoDB requires passing of different unused element names, along with values. * In general, Poco::MongoDB is quite inefficient and bulky. @@ -188,9 +188,9 @@ QueryPipeline MongoDBDictionarySource::loadIds(const std::vector & ids) for (const UInt64 id : ids) ids_array->add(DB::toString(id), static_cast(id)); - cursor->query().selector().addNewDocument(dict_struct.id->name).add("$in", ids_array); + query.addNewDocument(dict_struct.id->name).add("$in", ids_array); - return QueryPipeline(std::make_shared(connection, std::move(cursor), sample_block, max_block_size)); + return QueryPipeline(std::make_shared(connection, db, collection, query, sample_block, max_block_size)); } @@ -199,8 +199,7 @@ QueryPipeline MongoDBDictionarySource::loadKeys(const Columns & key_columns, con if (!dict_struct.key) throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "'key' is required for selective loading"); - auto cursor = createCursor(db, collection, sample_block); - + Poco::MongoDB::Document query; Poco::MongoDB::Array::Ptr keys_array(new Poco::MongoDB::Array); for (const auto row_idx : requested_rows) @@ -254,9 +253,9 @@ QueryPipeline MongoDBDictionarySource::loadKeys(const Columns & key_columns, con } /// If more than one key we should use $or - cursor->query().selector().add("$or", keys_array); + query.add("$or", keys_array); - return QueryPipeline(std::make_shared(connection, std::move(cursor), sample_block, max_block_size)); + return QueryPipeline(std::make_shared(connection, db, collection, query, sample_block, max_block_size)); } std::string MongoDBDictionarySource::toString() const diff --git a/src/Dictionaries/MongoDBDictionarySource.h b/src/Dictionaries/MongoDBDictionarySource.h index fefcb1bff9f..6d93bc6c090 100644 --- a/src/Dictionaries/MongoDBDictionarySource.h +++ b/src/Dictionaries/MongoDBDictionarySource.h @@ -16,7 +16,6 @@ namespace Util namespace MongoDB { class Connection; - class Cursor; } } diff --git a/src/Processors/Sources/MongoDBSource.cpp b/src/Processors/Sources/MongoDBSource.cpp index 279a842143f..94b9cb7ad64 100644 --- a/src/Processors/Sources/MongoDBSource.cpp +++ b/src/Processors/Sources/MongoDBSource.cpp @@ -3,10 +3,12 @@ #include #include +#include +#include #include #include +#include #include -#include #include #include @@ -365,27 +367,79 @@ namespace } -std::unique_ptr createCursor(const std::string & database, const std::string & collection, const Block & sample_block_to_select) +bool isMongoDBWireProtocolOld(Poco::MongoDB::Connection & connection_) { - auto cursor = std::make_unique(database, collection); + Poco::MongoDB::Database db("config"); + Poco::MongoDB::Document::Ptr doc = db.queryServerHello(connection_); + auto _wireVersion = doc->getInteger("maxWireVersion"); + return _wireVersion < Poco::MongoDB::Database::WireVersion::VER_36; +} + + +MongoDBCursor::MongoDBCursor( + const std::string & database, + const std::string & collection, + const Block & sample_block_to_select, + const Poco::MongoDB::Document & query, + Poco::MongoDB::Connection & connection) + : is_wire_protocol_old(isMongoDBWireProtocolOld(connection)) +{ + Poco::MongoDB::Document projection; /// Looks like selecting _id column is implicit by default. if (!sample_block_to_select.has("_id")) - cursor->query().returnFieldSelector().add("_id", 0); + projection.add("_id", 0); for (const auto & column : sample_block_to_select) - cursor->query().returnFieldSelector().add(column.name, 1); - return cursor; + projection.add(column.name, 1); + + if (is_wire_protocol_old) + { + old_cursor = std::make_unique(database, collection); + old_cursor->query().selector() = query; + old_cursor->query().returnFieldSelector() = projection; + } + else + { + new_cursor = std::make_unique(database, collection); + new_cursor->query().setCommandName(Poco::MongoDB::OpMsgMessage::CMD_FIND); + new_cursor->query().body().addNewDocument("filter") = query; + new_cursor->query().body().addNewDocument("projection") = projection; + } } +Poco::MongoDB::Document::Vector MongoDBCursor::nextDocuments(Poco::MongoDB::Connection & connection) +{ + if (is_wire_protocol_old) + { + auto response = old_cursor->next(connection); + cursorID_ = response.cursorID(); + return std::move(response.documents()); + } + else + { + auto response = new_cursor->next(connection); + cursorID_ = new_cursor->cursorID(); + return std::move(response.documents()); + } +} + +Int64 MongoDBCursor::cursorID() +{ + return cursorID_; +} + + MongoDBSource::MongoDBSource( std::shared_ptr & connection_, - std::unique_ptr cursor_, + const String & database_name_, + const String & collection_name_, + const Poco::MongoDB::Document & query_, const Block & sample_block, UInt64 max_block_size_) : ISource(sample_block.cloneEmpty()) , connection(connection_) - , cursor{std::move(cursor_)} + , cursor(database_name_, collection_name_, sample_block, query_, *connection_) , max_block_size{max_block_size_} { description.init(sample_block); @@ -412,9 +466,9 @@ Chunk MongoDBSource::generate() size_t num_rows = 0; while (num_rows < max_block_size) { - Poco::MongoDB::ResponseMessage & response = cursor->next(*connection); + auto documents = cursor.nextDocuments(*connection); - for (auto & document : response.documents()) + for (auto & document : documents) { if (document->exists("ok") && document->exists("$err") && document->exists("code") && document->getInteger("ok") == 0) @@ -458,7 +512,7 @@ Chunk MongoDBSource::generate() } } - if (response.cursorID() == 0) + if (cursor.cursorID() == 0) { all_read = true; break; diff --git a/src/Processors/Sources/MongoDBSource.h b/src/Processors/Sources/MongoDBSource.h index d4681d2c05f..f816ccfd1c9 100644 --- a/src/Processors/Sources/MongoDBSource.h +++ b/src/Processors/Sources/MongoDBSource.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include @@ -14,7 +15,9 @@ namespace Poco namespace MongoDB { class Connection; + class Document; class Cursor; + class OpMsgCursor; } } @@ -30,7 +33,28 @@ struct MongoDBArrayInfo void authenticate(Poco::MongoDB::Connection & connection, const std::string & database, const std::string & user, const std::string & password); -std::unique_ptr createCursor(const std::string & database, const std::string & collection, const Block & sample_block_to_select); +bool isMongoDBWireProtocolOld(Poco::MongoDB::Connection & connection_); + +class MongoDBCursor +{ +public: + MongoDBCursor( + const std::string & database, + const std::string & collection, + const Block & sample_block_to_select, + const Poco::MongoDB::Document & query, + Poco::MongoDB::Connection & connection); + + Poco::MongoDB::Document::Vector nextDocuments(Poco::MongoDB::Connection & connection); + + Int64 cursorID(); + +private: + const bool is_wire_protocol_old; + std::unique_ptr old_cursor; + std::unique_ptr new_cursor; + Int64 cursorID_ = 0; +}; /// Converts MongoDB Cursor to a stream of Blocks class MongoDBSource final : public ISource @@ -38,7 +62,9 @@ class MongoDBSource final : public ISource public: MongoDBSource( std::shared_ptr & connection_, - std::unique_ptr cursor_, + const String & database_name_, + const String & collection_name_, + const Poco::MongoDB::Document & query_, const Block & sample_block, UInt64 max_block_size_); @@ -50,7 +76,7 @@ private: Chunk generate() override; std::shared_ptr connection; - std::unique_ptr cursor; + MongoDBCursor cursor; const UInt64 max_block_size; ExternalResultDescription description; bool all_read = false; diff --git a/src/Storages/StorageMongoDB.cpp b/src/Storages/StorageMongoDB.cpp index 63b8c2d00a1..2a1d7e80c07 100644 --- a/src/Storages/StorageMongoDB.cpp +++ b/src/Storages/StorageMongoDB.cpp @@ -99,6 +99,7 @@ public: , db_name(db_name_) , metadata_snapshot{metadata_snapshot_} , connection(connection_) + , is_wire_protocol_old(isMongoDBWireProtocolOld(*connection_)) { } @@ -107,7 +108,7 @@ public: void consume(Chunk chunk) override { Poco::MongoDB::Database db(db_name); - Poco::MongoDB::Document::Ptr index = new Poco::MongoDB::Document(); + Poco::MongoDB::Document::Vector documents; auto block = getHeader().cloneWithColumns(chunk.detachColumns()); @@ -118,20 +119,35 @@ public: const auto data_types = block.getDataTypes(); const auto data_names = block.getNames(); - std::vector row(num_cols); + documents.reserve(num_rows); + for (const auto i : collections::range(0, num_rows)) { + Poco::MongoDB::Document::Ptr document = new Poco::MongoDB::Document(); + for (const auto j : collections::range(0, num_cols)) { WriteBufferFromOwnString ostr; data_types[j]->getDefaultSerialization()->serializeText(*columns[j], i, ostr, FormatSettings{}); - row[j] = ostr.str(); - index->add(data_names[j], row[j]); + document->add(data_names[j], ostr.str()); } + + documents.push_back(std::move(document)); + } + + if (is_wire_protocol_old) + { + Poco::SharedPtr insert_request = db.createInsertRequest(collection_name); + insert_request->documents() = std::move(documents); + connection->sendRequest(*insert_request); + } + else + { + Poco::SharedPtr insert_request = db.createOpMsgMessage(collection_name); + insert_request->setCommandName(Poco::MongoDB::OpMsgMessage::CMD_INSERT); + insert_request->documents() = std::move(documents); + connection->sendRequest(*insert_request); } - Poco::SharedPtr insert_request = db.createInsertRequest(collection_name); - insert_request->documents().push_back(index); - connection->sendRequest(*insert_request); } private: @@ -139,6 +155,8 @@ private: String db_name; StorageMetadataPtr metadata_snapshot; std::shared_ptr connection; + + const bool is_wire_protocol_old; }; @@ -162,7 +180,7 @@ Pipe StorageMongoDB::read( sample_block.insert({ column_data.type, column_data.name }); } - return Pipe(std::make_shared(connection, createCursor(database_name, collection_name, sample_block), sample_block, max_block_size)); + return Pipe(std::make_shared(connection, database_name, collection_name, Poco::MongoDB::Document{}, sample_block, max_block_size)); } SinkToStoragePtr StorageMongoDB::write(const ASTPtr & /* query */, const StorageMetadataPtr & metadata_snapshot, ContextPtr /* context */) diff --git a/tests/integration/test_storage_mongodb/test.py b/tests/integration/test_storage_mongodb/test.py index 6ba5520704d..e6e77c64515 100644 --- a/tests/integration/test_storage_mongodb/test.py +++ b/tests/integration/test_storage_mongodb/test.py @@ -71,6 +71,39 @@ def test_simple_select(started_cluster): simple_mongo_table.drop() +def test_simple_select_from_view(started_cluster): + mongo_connection = get_mongo_connection(started_cluster) + db = mongo_connection["test"] + db.add_user("root", "clickhouse") + simple_mongo_table = db["simple_table"] + data = [] + for i in range(0, 100): + data.append({"key": i, "data": hex(i * i)}) + simple_mongo_table.insert_many(data) + simple_mongo_table_view = db.create_collection( + "simple_table_view", viewOn="simple_table" + ) + + node = started_cluster.instances["node"] + node.query( + "CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo2:27017', 'test', 'simple_table_view', 'root', 'clickhouse')" + ) + + assert node.query("SELECT COUNT() FROM simple_mongo_table") == "100\n" + assert ( + node.query("SELECT sum(key) FROM simple_mongo_table") + == str(sum(range(0, 100))) + "\n" + ) + + assert ( + node.query("SELECT data from simple_mongo_table where key = 42") + == hex(42 * 42) + "\n" + ) + node.query("DROP TABLE simple_mongo_table") + simple_mongo_table_view.drop() + simple_mongo_table.drop() + + @pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) def test_arrays(started_cluster): mongo_connection = get_mongo_connection(started_cluster) @@ -411,13 +444,16 @@ def test_simple_insert_select(started_cluster): node.query( "CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse')" ) - node.query("INSERT INTO simple_mongo_table SELECT 1, 'kek'") + node.query( + "INSERT INTO simple_mongo_table SELECT number, 'kek' || toString(number) FROM numbers(10)" + ) assert ( - node.query("SELECT data from simple_mongo_table where key = 1").strip() == "kek" + node.query("SELECT data from simple_mongo_table where key = 7").strip() + == "kek7" ) node.query("INSERT INTO simple_mongo_table(key) SELECT 12") - assert int(node.query("SELECT count() from simple_mongo_table")) == 2 + assert int(node.query("SELECT count() from simple_mongo_table")) == 11 assert ( node.query("SELECT data from simple_mongo_table where key = 12").strip() == "" ) From 9ea0575ff8c7ac04d46ec93d012debf01eda55c5 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Mon, 22 May 2023 11:24:29 +0000 Subject: [PATCH 112/722] Update: rest tests which output is differ with enabled analyzer --- .../01655_plan_optimizations.reference | 55 ++++++++++++++++- .../0_stateless/01655_plan_optimizations.sh | 60 ++++++++++++++++--- 2 files changed, 107 insertions(+), 8 deletions(-) diff --git a/tests/queries/0_stateless/01655_plan_optimizations.reference b/tests/queries/0_stateless/01655_plan_optimizations.reference index 48d99647b43..9796d2e4f82 100644 --- a/tests/queries/0_stateless/01655_plan_optimizations.reference +++ b/tests/queries/0_stateless/01655_plan_optimizations.reference @@ -1,5 +1,4 @@ Too many optimizations applied to query plan -Too many optimizations applied to query plan > sipHash should be calculated after filtration FUNCTION sipHash64 Filter column: equals @@ -27,6 +26,11 @@ COLUMN Const(UInt8) -> notEquals(y, 0) Aggregating Filter Filter +> (analyzer) filter should be pushed down after aggregating, column after aggregation is const +COLUMN Const(UInt8) -> notEquals(y_1, 0_UInt8) +Aggregating +Filter +Filter 0 1 1 1 2 1 2 3 1 @@ -42,6 +46,11 @@ Filter column ALIAS notEquals(s, 4) :: 1 -> and(notEquals(y, 0), notEquals(s, 4)) Aggregating Filter column: notEquals(y, 0) +> (analyzer) one condition of filter should be pushed down after aggregating, other condition is aliased +Filter column +ALIAS notEquals(s_0, 4_UInt8) :: 0 -> and(notEquals(y_1, 0_UInt8), notEquals(s_0, 4_UInt8)) +Aggregating +Filter column: notEquals(y_1, 0_UInt8) 0 1 1 2 2 3 @@ -56,6 +65,11 @@ Filter column FUNCTION and(minus(s, 4) :: 1, 1 :: 3) -> and(notEquals(y, 0), minus(s, 4)) UInt8 : 2 Aggregating Filter column: notEquals(y, 0) +> (analyzer) one condition of filter should be pushed down after aggregating, other condition is casted +Filter column +FUNCTION and(minus(s_0, 4_UInt8) :: 0, 1 :: 3) -> and(notEquals(y_1, 0_UInt8), minus(s_0, 4_UInt8)) UInt8 : 2 +Aggregating +Filter column: notEquals(y_1, 0_UInt8) 0 1 1 2 2 3 @@ -70,6 +84,11 @@ Filter column FUNCTION and(minus(s, 8) :: 1, minus(s, 4) :: 2) -> and(notEquals(y, 0), minus(s, 8), minus(s, 4)) Aggregating Filter column: notEquals(y, 0) +> (analyzer) one condition of filter should be pushed down after aggregating, other two conditions are ANDed +Filter column +FUNCTION and(minus(s_0, 8_UInt8) :: 0, minus(s_0, 4_UInt8) :: 2) -> and(notEquals(y_1, 0_UInt8), minus(s_0, 8_UInt8), minus(s_0, 4_UInt8)) +Aggregating +Filter column: notEquals(y_1, 0_UInt8) 0 1 1 2 2 3 @@ -83,6 +102,11 @@ Filter column ALIAS notEquals(s, 8) :: 1 -> and(notEquals(y, 0), notEquals(s, 8), minus(y, 4)) Aggregating Filter column: and(notEquals(y, 0), minus(y, 4)) +> (analyzer) two conditions of filter should be pushed down after aggregating and ANDed, one condition is aliased +Filter column +ALIAS notEquals(s_0, 8_UInt8) :: 0 -> and(notEquals(y_1, 0_UInt8), notEquals(s_0, 8_UInt8), minus(y_1, 4_UInt8)) +Aggregating +Filter column: and(notEquals(y_1, 0_UInt8), minus(y_1, 4_UInt8)) 0 1 1 2 2 3 @@ -95,11 +119,19 @@ Filter column: and(notEquals(y, 0), minus(y, 4)) Filter column: and(notEquals(y, 2), notEquals(x, 0)) ARRAY JOIN x Filter column: notEquals(y, 2) +> (analyzer) filter is split, one part is filtered before ARRAY JOIN +Filter column: and(notEquals(y_1, 2_UInt8), notEquals(x_0, 0_UInt8)) +ARRAY JOIN x_0 +Filter column: notEquals(y_1, 2_UInt8) 1 3 > filter is pushed down before Distinct Distinct Distinct Filter column: notEquals(y, 2) +> (analyzer) filter is pushed down before Distinct +Distinct +Distinct +Filter column: notEquals(y_1, 2_UInt8) 0 0 0 1 1 0 @@ -108,12 +140,20 @@ Filter column: notEquals(y, 2) Sorting Sorting Filter column: and(notEquals(x, 0), notEquals(y, 0)) +> (analyzer) filter is pushed down before sorting steps +Sorting +Sorting +Filter column: and(notEquals(x_0, 0_UInt8), notEquals(y_1, 0_UInt8)) 1 2 1 1 > filter is pushed down before TOTALS HAVING and aggregating TotalsHaving Aggregating Filter column: notEquals(y, 2) +> (analyzer) filter is pushed down before TOTALS HAVING and aggregating +TotalsHaving +Aggregating +Filter column: notEquals(y_0, 2_UInt8) 0 12 1 15 3 10 @@ -129,12 +169,18 @@ Filter Join Filter column: notEquals(number, 1) Join +> (analyzer) one condition of filter is pushed down before LEFT JOIN +Join +Filter column: notEquals(l.number_0, 1_UInt8) 0 0 3 3 > one condition of filter is pushed down before INNER JOIN Join Filter column: notEquals(number, 1) Join +> (analyzer) one condition of filter is pushed down before INNER JOIN +Join +Filter column: notEquals(l.number_0, 1_UInt8) 3 3 > filter is pushed down before UNION Union @@ -149,5 +195,12 @@ FUNCTION sipHash64 Sorting Expression (Before ORDER BY) FUNCTION plus +> (analyzer) function calculation should be done after sorting and limit (if possible) +> Expression should be divided into two subexpressions and only one of them should be moved after Sorting +Expression ((Project names + (Before ORDER BY + (Projection + Change column names to column identifiers)) [lifted up part])) +FUNCTION sipHash64 +Sorting +Expression ((Before ORDER BY + (Projection + Change column names to column identifiers))) +FUNCTION plus > this query should be executed without throwing an exception 0 diff --git a/tests/queries/0_stateless/01655_plan_optimizations.sh b/tests/queries/0_stateless/01655_plan_optimizations.sh index 7c299f9cc26..d68c2c8b414 100755 --- a/tests/queries/0_stateless/01655_plan_optimizations.sh +++ b/tests/queries/0_stateless/01655_plan_optimizations.sh @@ -124,11 +124,17 @@ $CLICKHOUSE_CLIENT -q " settings enable_optimize_predicate_expression=0" echo "> filter is split, one part is filtered before ARRAY JOIN" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 -q " explain actions = 1 select x, y from ( select range(number) as x, number + 1 as y from numbers(3) ) array join x where y != 2 and x != 0" | grep -o "Filter column: and(notEquals(y, 2), notEquals(x, 0))\|ARRAY JOIN x\|Filter column: notEquals(y, 2)" +echo "> (analyzer) filter is split, one part is filtered before ARRAY JOIN" +$CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 -q " + explain actions = 1 select x, y from ( + select range(number) as x, number + 1 as y from numbers(3) + ) array join x where y != 2 and x != 0" | + grep -o "Filter column: and(notEquals(y_1, 2_UInt8), notEquals(x_0, 0_UInt8))\|ARRAY JOIN x_0\|Filter column: notEquals(y_1, 2_UInt8)" $CLICKHOUSE_CLIENT -q " select x, y from ( select range(number) as x, number + 1 as y from numbers(3) @@ -148,12 +154,19 @@ $CLICKHOUSE_CLIENT -q " # settings enable_optimize_predicate_expression=0" echo "> filter is pushed down before Distinct" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 -q " explain actions = 1 select x, y from ( select distinct x, y from (select number % 2 as x, number % 3 as y from numbers(10)) ) where y != 2 settings enable_optimize_predicate_expression=0" | grep -o "Distinct\|Filter column: notEquals(y, 2)" +echo "> (analyzer) filter is pushed down before Distinct" +$CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 -q " + explain actions = 1 select x, y from ( + select distinct x, y from (select number % 2 as x, number % 3 as y from numbers(10)) + ) where y != 2 + settings enable_optimize_predicate_expression=0" | + grep -o "Distinct\|Filter column: notEquals(y_1, 2_UInt8)" $CLICKHOUSE_CLIENT -q " select x, y from ( select distinct x, y from (select number % 2 as x, number % 3 as y from numbers(10)) @@ -161,12 +174,19 @@ $CLICKHOUSE_CLIENT -q " settings enable_optimize_predicate_expression=0" echo "> filter is pushed down before sorting steps" -$CLICKHOUSE_CLIENT --convert_query_to_cnf=0 -q " +$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 --convert_query_to_cnf=0 -q " explain actions = 1 select x, y from ( select number % 2 as x, number % 3 as y from numbers(6) order by y desc ) where x != 0 and y != 0 settings enable_optimize_predicate_expression = 0" | grep -o "Sorting\|Filter column: and(notEquals(x, 0), notEquals(y, 0))" +echo "> (analyzer) filter is pushed down before sorting steps" +$CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 --convert_query_to_cnf=0 -q " + explain actions = 1 select x, y from ( + select number % 2 as x, number % 3 as y from numbers(6) order by y desc + ) where x != 0 and y != 0 + settings enable_optimize_predicate_expression = 0" | + grep -o "Sorting\|Filter column: and(notEquals(x_0, 0_UInt8), notEquals(y_1, 0_UInt8))" $CLICKHOUSE_CLIENT -q " select x, y from ( select number % 2 as x, number % 3 as y from numbers(6) order by y desc @@ -174,12 +194,19 @@ $CLICKHOUSE_CLIENT -q " settings enable_optimize_predicate_expression = 0" echo "> filter is pushed down before TOTALS HAVING and aggregating" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 -q " explain actions = 1 select * from ( select y, sum(x) from (select number as x, number % 4 as y from numbers(10)) group by y with totals ) where y != 2 settings enable_optimize_predicate_expression=0" | grep -o "TotalsHaving\|Aggregating\|Filter column: notEquals(y, 2)" +echo "> (analyzer) filter is pushed down before TOTALS HAVING and aggregating" +$CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 -q " + explain actions = 1 select * from ( + select y, sum(x) from (select number as x, number % 4 as y from numbers(10)) group by y with totals + ) where y != 2 + settings enable_optimize_predicate_expression=0" | + grep -o "TotalsHaving\|Aggregating\|Filter column: notEquals(y_0, 2_UInt8)" $CLICKHOUSE_CLIENT -q " select * from ( select y, sum(x) from (select number as x, number % 4 as y from numbers(10)) group by y with totals @@ -197,24 +224,38 @@ $CLICKHOUSE_CLIENT -q " ) where number != 2 settings enable_optimize_predicate_expression=0" echo "> one condition of filter is pushed down before LEFT JOIN" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 -q " explain actions = 1 select number as a, r.b from numbers(4) as l any left join ( select number + 2 as b from numbers(3) ) as r on a = r.b where a != 1 and b != 2 settings enable_optimize_predicate_expression = 0" | grep -o "Join\|Filter column: notEquals(number, 1)" +echo "> (analyzer) one condition of filter is pushed down before LEFT JOIN" +$CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 -q " + explain actions = 1 + select number as a, r.b from numbers(4) as l any left join ( + select number + 2 as b from numbers(3) + ) as r on a = r.b where a != 1 and b != 2 settings enable_optimize_predicate_expression = 0" | + grep -o "Join\|Filter column: notEquals(l.number_0, 1_UInt8)" $CLICKHOUSE_CLIENT -q " select number as a, r.b from numbers(4) as l any left join ( select number + 2 as b from numbers(3) ) as r on a = r.b where a != 1 and b != 2 settings enable_optimize_predicate_expression = 0" | sort echo "> one condition of filter is pushed down before INNER JOIN" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 -q " explain actions = 1 select number as a, r.b from numbers(4) as l any inner join ( select number + 2 as b from numbers(3) ) as r on a = r.b where a != 1 and b != 2 settings enable_optimize_predicate_expression = 0" | grep -o "Join\|Filter column: notEquals(number, 1)" +echo "> (analyzer) one condition of filter is pushed down before INNER JOIN" +$CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 -q " + explain actions = 1 + select number as a, r.b from numbers(4) as l any inner join ( + select number + 2 as b from numbers(3) + ) as r on a = r.b where a != 1 and b != 2 settings enable_optimize_predicate_expression = 0" | + grep -o "Join\|Filter column: notEquals(l.number_0, 1_UInt8)" $CLICKHOUSE_CLIENT -q " select number as a, r.b from numbers(4) as l any inner join ( select number + 2 as b from numbers(3) @@ -233,7 +274,12 @@ $CLICKHOUSE_CLIENT -q " echo "> function calculation should be done after sorting and limit (if possible)" echo "> Expression should be divided into two subexpressions and only one of them should be moved after Sorting" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 -q " + explain actions = 1 select number as n, sipHash64(n) from numbers(100) order by number + 1 limit 5" | + sed 's/^ *//g' | grep -o "^ *\(Expression (.*Before ORDER BY.*)\|Sorting\|FUNCTION \w\+\)" +echo "> (analyzer) function calculation should be done after sorting and limit (if possible)" +echo "> Expression should be divided into two subexpressions and only one of them should be moved after Sorting" +$CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 -q " explain actions = 1 select number as n, sipHash64(n) from numbers(100) order by number + 1 limit 5" | sed 's/^ *//g' | grep -o "^ *\(Expression (.*Before ORDER BY.*)\|Sorting\|FUNCTION \w\+\)" echo "> this query should be executed without throwing an exception" From 90f4b1777832a8e6c3a343671be091c727f3e065 Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky Date: Mon, 22 May 2023 15:45:18 +0000 Subject: [PATCH 113/722] Fix build & test --- .../test/integration/runner/compose/docker_compose_mongo.yml | 4 ++-- src/Processors/Sources/MongoDBSource.cpp | 2 +- src/Processors/Sources/MongoDBSource.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/test/integration/runner/compose/docker_compose_mongo.yml b/docker/test/integration/runner/compose/docker_compose_mongo.yml index 60361e9e98d..8cdcbc421e8 100644 --- a/docker/test/integration/runner/compose/docker_compose_mongo.yml +++ b/docker/test/integration/runner/compose/docker_compose_mongo.yml @@ -1,7 +1,7 @@ version: '2.3' services: mongo1: - image: mongo:5.1 + image: mongo:6.0 restart: always environment: MONGO_INITDB_ROOT_USERNAME: root @@ -11,7 +11,7 @@ services: command: --profile=2 --verbose mongo2: - image: mongo:5.0 + image: mongo:6.0 restart: always ports: - ${MONGO_NO_CRED_EXTERNAL_PORT:-27017}:${MONGO_NO_CRED_INTERNAL_PORT:-27017} diff --git a/src/Processors/Sources/MongoDBSource.cpp b/src/Processors/Sources/MongoDBSource.cpp index 94b9cb7ad64..74dfa13158c 100644 --- a/src/Processors/Sources/MongoDBSource.cpp +++ b/src/Processors/Sources/MongoDBSource.cpp @@ -424,7 +424,7 @@ Poco::MongoDB::Document::Vector MongoDBCursor::nextDocuments(Poco::MongoDB::Conn } } -Int64 MongoDBCursor::cursorID() +Int64 MongoDBCursor::cursorID() const { return cursorID_; } diff --git a/src/Processors/Sources/MongoDBSource.h b/src/Processors/Sources/MongoDBSource.h index f816ccfd1c9..2bc5481e20b 100644 --- a/src/Processors/Sources/MongoDBSource.h +++ b/src/Processors/Sources/MongoDBSource.h @@ -47,7 +47,7 @@ public: Poco::MongoDB::Document::Vector nextDocuments(Poco::MongoDB::Connection & connection); - Int64 cursorID(); + Int64 cursorID() const; private: const bool is_wire_protocol_old; From b8305503d89783b6700ae2c43f69b96798181b03 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Mon, 22 May 2023 19:07:18 +0200 Subject: [PATCH 114/722] more flexible cleanup thread scheduling --- base/base/interpolate.h | 5 + src/Storages/MergeTree/IMergeTreeDataPart.cpp | 10 +- src/Storages/MergeTree/IMergeTreeDataPart.h | 3 + src/Storages/MergeTree/MergeTreeData.cpp | 26 ++- src/Storages/MergeTree/MergeTreeData.h | 6 + src/Storages/MergeTree/MergeTreeSettings.h | 4 +- .../ReplicatedMergeTreeCleanupThread.cpp | 171 +++++++++++++++--- .../ReplicatedMergeTreeCleanupThread.h | 28 ++- .../MergeTree/SimpleMergeSelector.cpp | 8 +- src/Storages/StorageReplicatedMergeTree.cpp | 9 +- src/Storages/StorageReplicatedMergeTree.h | 4 +- tests/config/config.d/merge_tree.xml | 2 + .../test.py | 3 +- .../test_broken_part_during_merge/test.py | 2 +- .../test.py | 6 +- tests/integration/test_drop_replica/test.py | 15 +- tests/integration/test_jbod_balancer/test.py | 1 + tests/integration/test_jbod_ha/test.py | 1 + tests/integration/test_lost_part/test.py | 8 +- tests/integration/test_multiple_disks/test.py | 6 +- .../test_old_parts_finally_removed/test.py | 5 +- .../test_parts_delete_zookeeper/test.py | 2 +- .../integration/test_recovery_replica/test.py | 2 +- tests/integration/test_storage_nats/test.py | 3 +- .../integration/test_storage_rabbitmq/test.py | 6 +- tests/integration/test_system_metrics/test.py | 4 +- tests/integration/test_ttl_replicated/test.py | 3 +- ..._replace_partition_from_table_zookeeper.sh | 8 +- .../00652_replicated_mutations_zookeeper.sh | 6 +- ...ated_minimalistic_part_header_zookeeper.sh | 6 +- ...0953_zookeeper_suetin_deduplication_bug.sh | 2 +- .../00988_parallel_parts_removal.sql | 4 +- ...tem_parts_race_condition_zookeeper_long.sh | 10 +- ...tem_parts_race_condition_drop_zookeeper.sh | 3 +- ...034_move_partition_from_table_zookeeper.sh | 6 +- ...ent_move_partition_from_table_zookeeper.sh | 3 +- ...076_parallel_alter_replicated_zookeeper.sh | 3 +- ...9_parallel_alter_detach_table_zookeeper.sh | 5 +- .../01103_optimize_drop_race_zookeeper.sh | 4 +- .../0_stateless/01158_zookeeper_log_long.sql | 2 +- ...nactive_replica_cleanup_nodes_zookeeper.sh | 6 +- ...e_condition_rename_clear_zookeeper_long.sh | 4 +- .../01509_parallel_quorum_and_merge_long.sh | 3 +- ...nt_ttl_and_normal_merges_zookeeper_long.sh | 3 +- .../0_stateless/02067_lost_part_s3.sql | 12 +- .../02370_lost_part_intersecting_merges.sh | 2 +- .../02396_system_parts_race_condition_rm.sh | 8 +- ...397_system_parts_race_condition_drop_rm.sh | 4 +- .../02432_s3_parallel_parts_cleanup.sql | 4 +- .../02448_clone_replica_lost_part.sql | 6 +- ..._projection_and_mutation_work_together.sql | 6 +- .../02515_cleanup_async_insert_block_ids.sh | 2 +- 52 files changed, 353 insertions(+), 112 deletions(-) diff --git a/base/base/interpolate.h b/base/base/interpolate.h index 1d4fc0b6257..4c27f70c95b 100644 --- a/base/base/interpolate.h +++ b/base/base/interpolate.h @@ -11,3 +11,8 @@ constexpr double interpolateExponential(double min, double max, double ratio) assert(min > 0 && ratio >= 0 && ratio <= 1); return min * std::pow(max / min, ratio); } + +constexpr double interpolateLinear(double min, double max, double ratio) +{ + return std::lerp(min, max, ratio); +} diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index d27b03fff44..3d2b6ecc540 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -211,9 +211,9 @@ void IMergeTreeDataPart::MinMaxIndex::appendFiles(const MergeTreeData & data, St } -static void incrementStateMetric(MergeTreeDataPartState state) +void IMergeTreeDataPart::incrementStateMetric(MergeTreeDataPartState state_) const { - switch (state) + switch (state_) { case MergeTreeDataPartState::Temporary: CurrentMetrics::add(CurrentMetrics::PartsTemporary); @@ -227,6 +227,7 @@ static void incrementStateMetric(MergeTreeDataPartState state) CurrentMetrics::add(CurrentMetrics::PartsCommitted); return; case MergeTreeDataPartState::Outdated: + storage.total_outdated_parts_count.fetch_add(1, std::memory_order_relaxed); CurrentMetrics::add(CurrentMetrics::PartsOutdated); return; case MergeTreeDataPartState::Deleting: @@ -238,9 +239,9 @@ static void incrementStateMetric(MergeTreeDataPartState state) } } -static void decrementStateMetric(MergeTreeDataPartState state) +void IMergeTreeDataPart::decrementStateMetric(MergeTreeDataPartState state_) const { - switch (state) + switch (state_) { case MergeTreeDataPartState::Temporary: CurrentMetrics::sub(CurrentMetrics::PartsTemporary); @@ -254,6 +255,7 @@ static void decrementStateMetric(MergeTreeDataPartState state) CurrentMetrics::sub(CurrentMetrics::PartsCommitted); return; case MergeTreeDataPartState::Outdated: + storage.total_outdated_parts_count.fetch_sub(1, std::memory_order_relaxed); CurrentMetrics::sub(CurrentMetrics::PartsOutdated); return; case MergeTreeDataPartState::Deleting: diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index 388d96314c0..ecc1523b6c0 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -623,6 +623,9 @@ private: /// for this column with default parameters. CompressionCodecPtr detectDefaultCompressionCodec() const; + void incrementStateMetric(MergeTreeDataPartState state) const; + void decrementStateMetric(MergeTreeDataPartState state) const; + mutable MergeTreeDataPartState state{MergeTreeDataPartState::Temporary}; /// This ugly flag is needed for debug assertions only diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index b21f44baeb5..5cfc4c577dc 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -81,6 +81,7 @@ #include #include +#include #include #include @@ -4311,6 +4312,29 @@ size_t MergeTreeData::getActivePartsCount() const } +size_t MergeTreeData::getOutdatedPartsCount() const +{ + return total_outdated_parts_count.load(std::memory_order_relaxed); +} + +size_t MergeTreeData::getNumberOfOutdatedPartsWithExpiredRemovalTime() const +{ + size_t res = 0; + + auto time_now = time(nullptr); + + auto parts_lock = lockParts(); + auto outdated_parts_range = getDataPartsStateRange(DataPartState::Outdated); + for (const auto & part : outdated_parts_range) + { + auto part_remove_time = part->remove_time.load(std::memory_order_relaxed); + if (part_remove_time <= time_now && time_now - part_remove_time >= getSettings()->old_parts_lifetime.totalSeconds() && part.unique()) + ++res; + } + + return res; +} + std::pair MergeTreeData::getMaxPartsCountAndSizeForPartitionWithState(DataPartState state) const { auto lock = lockParts(); @@ -4519,7 +4543,7 @@ void MergeTreeData::delayMutationOrThrowIfNeeded(Poco::Event * until, const Cont size_t allowed_mutations_over_threshold = num_mutations_to_throw - num_mutations_to_delay; double delay_factor = std::min(static_cast(mutations_over_threshold) / allowed_mutations_over_threshold, 1.0); - size_t delay_milliseconds = static_cast(std::lerp(settings->min_delay_to_mutate_ms, settings->max_delay_to_mutate_ms, delay_factor)); + size_t delay_milliseconds = static_cast(interpolateLinear(settings->min_delay_to_mutate_ms, settings->max_delay_to_mutate_ms, delay_factor)); ProfileEvents::increment(ProfileEvents::DelayedMutations); ProfileEvents::increment(ProfileEvents::DelayedMutationsMilliseconds, delay_milliseconds); diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 5488ce72631..4a71c24e6d3 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -532,6 +532,10 @@ public: size_t getActivePartsCount() const; + size_t getOutdatedPartsCount() const; + + size_t getNumberOfOutdatedPartsWithExpiredRemovalTime() const; + /// Returns a pair with: max number of parts in partition across partitions; sum size of parts inside that partition. /// (if there are multiple partitions with max number of parts, the sum size of parts is returned for arbitrary of them) std::pair getMaxPartsCountAndSizeForPartitionWithState(DataPartState state) const; @@ -1491,6 +1495,8 @@ private: std::atomic total_active_size_rows = 0; std::atomic total_active_size_parts = 0; + mutable std::atomic total_outdated_parts_count = 0; + // Record all query ids which access the table. It's guarded by `query_id_set_mutex` and is always mutable. mutable std::set query_id_set TSA_GUARDED_BY(query_id_set_mutex); mutable std::mutex query_id_set_mutex; diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index c9e81ce9103..78d703e795c 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -120,8 +120,10 @@ struct Settings; \ /** Check delay of replicas settings. */ \ M(UInt64, min_relative_delay_to_measure, 120, "Calculate relative replica delay only if absolute delay is not less that this value.", 0) \ - M(UInt64, cleanup_delay_period, 30, "Period to clean old queue logs, blocks hashes and parts.", 0) \ + M(UInt64, cleanup_delay_period, 30, "Minimum period to clean old queue logs, blocks hashes and parts.", 0) \ + M(UInt64, max_cleanup_delay_period, 300, "Maximum period to clean old queue logs, blocks hashes and parts.", 0) \ M(UInt64, cleanup_delay_period_random_add, 10, "Add uniformly distributed value from 0 to x seconds to cleanup_delay_period to avoid thundering herd effect and subsequent DoS of ZooKeeper in case of very large number of tables.", 0) \ + M(UInt64, cleanup_thread_preferred_points_per_iteration, 150, "Preferred batch size for background cleanup (points are abstract but 1 point is approximately equivalent to 1 inserted block).", 0) \ M(UInt64, min_relative_delay_to_close, 300, "Minimal delay from other replicas to close, stop serving requests and not return Ok during status check.", 0) \ M(UInt64, min_absolute_delay_to_close, 0, "Minimal absolute delay to close, stop serving requests and not return Ok during status check.", 0) \ M(UInt64, enable_vertical_merge_algorithm, 1, "Enable usage of Vertical merge algorithm.", 0) \ diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp index 0409cadc1e9..35a860ebb42 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp @@ -25,19 +25,22 @@ ReplicatedMergeTreeCleanupThread::ReplicatedMergeTreeCleanupThread(StorageReplic : storage(storage_) , log_name(storage.getStorageID().getFullTableName() + " (ReplicatedMergeTreeCleanupThread)") , log(&Poco::Logger::get(log_name)) + , sleep_ms(storage.getSettings()->cleanup_delay_period * 1000) { task = storage.getContext()->getSchedulePool().createTask(log_name, [this]{ run(); }); } void ReplicatedMergeTreeCleanupThread::run() { - auto storage_settings = storage.getSettings(); - const auto sleep_ms = storage_settings->cleanup_delay_period * 1000 - + std::uniform_int_distribution(0, storage_settings->cleanup_delay_period_random_add * 1000)(rng); + SCOPE_EXIT({ is_running.store(false, std::memory_order_relaxed); }); + is_running.store(true, std::memory_order_relaxed); + auto storage_settings = storage.getSettings(); + + Float32 cleanup_points = 0; try { - iterate(); + cleanup_points = iterate(); } catch (const Coordination::Exception & e) { @@ -51,39 +54,144 @@ void ReplicatedMergeTreeCleanupThread::run() tryLogCurrentException(log, __PRETTY_FUNCTION__); } + UInt64 prev_timestamp = prev_cleanup_timestamp_ms.load(std::memory_order_relaxed); + UInt64 now_ms = clock_gettime_ns_adjusted(prev_timestamp * 1'000'000) / 1'000'000; + + /// Do not adjust sleep_ms on the first run after starting the server + if (prev_timestamp && storage_settings->cleanup_thread_preferred_points_per_iteration) + { + /// We don't want to run the task too often when the table was barely changed and there's almost nothing to cleanup. + /// But we cannot simply sleep max_cleanup_delay_period (300s) when nothing was cleaned up and cleanup_delay_period (30s) + /// when we removed something, because inserting one part per 30s will lead to running cleanup each 30s just to remove one part. + /// So we need some interpolation based on preferred batch size. + auto expected_cleanup_points = storage_settings->cleanup_thread_preferred_points_per_iteration; + + /// How long should we sleep to remove cleanup_thread_preferred_points_per_iteration on the next iteration? + Float32 ratio = cleanup_points / expected_cleanup_points; + if (ratio == 0) + sleep_ms = storage_settings->max_cleanup_delay_period * 1000; + else + sleep_ms = static_cast(sleep_ms / ratio); + + if (sleep_ms < storage_settings->cleanup_delay_period * 1000) + sleep_ms = storage_settings->cleanup_delay_period * 1000; + if (storage_settings->max_cleanup_delay_period * 1000 < sleep_ms) + sleep_ms = storage_settings->max_cleanup_delay_period * 1000; + + UInt64 interval_ms = now_ms - prev_timestamp; + LOG_TRACE(log, "Scheduling next cleanup after {}ms (points: {}, interval: {}ms, ratio: {}, points per minute: {})", + sleep_ms, cleanup_points, interval_ms, ratio, cleanup_points / interval_ms * 60'000); + } + prev_cleanup_timestamp_ms.store(now_ms, std::memory_order_relaxed); + + sleep_ms += std::uniform_int_distribution(0, storage_settings->cleanup_delay_period_random_add * 1000)(rng); task->scheduleAfter(sleep_ms); } - -void ReplicatedMergeTreeCleanupThread::iterate() +void ReplicatedMergeTreeCleanupThread::wakeupEarlierIfNeeded() { - storage.clearOldPartsAndRemoveFromZK(); + /// It may happen that the tables was idle for a long time, but then a user started to aggressively insert (or mutate) data. + /// In this case, sleep_ms was set to the highest possible value, the task is not going to wake up soon, + /// but the number of objects to clean up is growing. We need to wakeup the task earlier. + auto storage_settings = storage.getSettings(); + if (!storage_settings->cleanup_thread_preferred_points_per_iteration) + return; + + /// The number of other objects (logs, blocks, etc) is usually correlated with the number of Outdated parts. + /// Do not wake up unless we have too many. + size_t number_of_outdated_objects = storage.getOutdatedPartsCount(); + if (number_of_outdated_objects < storage_settings->cleanup_thread_preferred_points_per_iteration * 2) + return; + + /// A race condition is possible here, but it's okay + if (is_running.load(std::memory_order_relaxed)) + return; + + /// Do not re-check all parts too often (avoid constantly calling getNumberOfOutdatedPartsWithExpiredRemovalTime()) + if (!wakeup_check_timer.compareAndRestart(storage_settings->cleanup_delay_period / 4)) + return; + + UInt64 prev_run_timestamp_ms = prev_cleanup_timestamp_ms.load(std::memory_order_relaxed); + UInt64 now_ms = clock_gettime_ns_adjusted(prev_run_timestamp_ms * 1'000'000) / 1'000'000; + if (!prev_run_timestamp_ms || now_ms <= prev_run_timestamp_ms) + return; + + /// Don't run it more often than cleanup_delay_period + UInt64 seconds_passed = (now_ms - prev_run_timestamp_ms) / 1000; + if (seconds_passed < storage_settings->cleanup_delay_period) + return; + + /// Do not count parts that cannot be removed anyway. Do not wake up unless we have too many. + number_of_outdated_objects = storage.getNumberOfOutdatedPartsWithExpiredRemovalTime(); + if (number_of_outdated_objects < storage_settings->cleanup_thread_preferred_points_per_iteration * 2) + return; + + LOG_TRACE(log, "Waking up cleanup thread because there are {} outdated objects and previous cleanup finished {}s ago", + number_of_outdated_objects, seconds_passed); + + wakeup(); +} + + +Float32 ReplicatedMergeTreeCleanupThread::iterate() +{ + size_t cleaned_logs = 0; + Float32 cleaned_blocks = 0; + size_t cleaned_other = 0; + size_t cleaned_part_like = 0; + size_t cleaned_parts = storage.clearOldPartsAndRemoveFromZK(); + + auto storage_settings = storage.getSettings(); { auto lock = storage.lockForShare(RWLockImpl::NO_QUERY, storage.getSettings()->lock_acquire_timeout_for_background_operations); /// Both use relative_data_path which changes during rename, so we /// do it under share lock - storage.clearOldWriteAheadLogs(); - storage.clearOldTemporaryDirectories(storage.getSettings()->temporary_directories_lifetime.totalSeconds()); + cleaned_other += storage.clearOldWriteAheadLogs(); + cleaned_part_like += storage.clearOldTemporaryDirectories(storage.getSettings()->temporary_directories_lifetime.totalSeconds()); if (storage.getSettings()->merge_tree_enable_clear_old_broken_detached) - storage.clearOldBrokenPartsFromDetachedDirectory(); + cleaned_part_like += storage.clearOldBrokenPartsFromDetachedDirectory(); } /// This is loose condition: no problem if we actually had lost leadership at this moment /// and two replicas will try to do cleanup simultaneously. if (storage.is_leader) { - clearOldLogs(); - auto storage_settings = storage.getSettings(); - clearOldBlocks("blocks", storage_settings->replicated_deduplication_window_seconds, storage_settings->replicated_deduplication_window, cached_block_stats_for_sync_inserts); - clearOldBlocks("async_blocks", storage_settings->replicated_deduplication_window_seconds_for_async_inserts, storage_settings->replicated_deduplication_window_for_async_inserts, cached_block_stats_for_async_inserts); - clearOldMutations(); - storage.clearEmptyParts(); + cleaned_logs = clearOldLogs(); + size_t normal_blocks = clearOldBlocks("blocks", storage_settings->replicated_deduplication_window_seconds, + storage_settings->replicated_deduplication_window, cached_block_stats_for_sync_inserts); + + size_t async_blocks = clearOldBlocks("async_blocks", + storage_settings->replicated_deduplication_window_seconds_for_async_inserts, + storage_settings->replicated_deduplication_window_for_async_inserts, + cached_block_stats_for_async_inserts); + + /// Many async blocks are transformed into one ordinary block + Float32 async_blocks_per_block = static_cast(storage_settings->replicated_deduplication_window) / + (storage_settings->replicated_deduplication_window_for_async_inserts + 1); + cleaned_blocks = (normal_blocks + async_blocks * async_blocks_per_block) / 2; + + cleaned_other += clearOldMutations(); + cleaned_part_like += storage.clearEmptyParts(); } + + /// We need to measure the number of removed objects somehow (for better scheduling), + /// but just summing the number of removed async blocks, logs, and empty parts does not make any sense. + /// So we are trying to (approximately) measure the number of inserted blocks/parts, so we will be able to compare apples to apples. + + /// Each inserted block produces 3 objects that have to be cleaned up: one block, one log entry and one part. + /// A few new parts get merged together producing one log entry and one part. + + /// Other objects (like mutations and WALs) are much more rare than Outdated parts (because mutations usually produce + /// many Outdated parts, and WALs usually contain many parts too). We count then as one part for simplicity. + + constexpr Float32 parts_number_amplification = 1.3f; /// Assuming we merge 4-5 parts each time + Float32 cleaned_inserted_parts = (cleaned_blocks + (cleaned_logs + cleaned_parts) / parts_number_amplification) / 3; + return cleaned_inserted_parts + cleaned_part_like + cleaned_other; } -void ReplicatedMergeTreeCleanupThread::clearOldLogs() +size_t ReplicatedMergeTreeCleanupThread::clearOldLogs() { auto zookeeper = storage.getZooKeeper(); auto storage_settings = storage.getSettings(); @@ -102,7 +210,7 @@ void ReplicatedMergeTreeCleanupThread::clearOldLogs() size_t min_replicated_logs_to_keep = static_cast(storage_settings->min_replicated_logs_to_keep * ratio); if (static_cast(children_count) < min_replicated_logs_to_keep) - return; + return 0; Strings replicas = zookeeper->getChildren(storage.zookeeper_path + "/replicas", &stat); @@ -114,7 +222,7 @@ void ReplicatedMergeTreeCleanupThread::clearOldLogs() Strings entries = zookeeper->getChildren(storage.zookeeper_path + "/log"); if (entries.empty()) - return; + return 0; ::sort(entries.begin(), entries.end()); @@ -227,7 +335,7 @@ void ReplicatedMergeTreeCleanupThread::clearOldLogs() entries.erase(std::lower_bound(entries.begin(), entries.end(), "log-" + padIndex(min_saved_log_pointer)), entries.end()); if (entries.empty()) - return; + return 0; markLostReplicas( host_versions_lost_replicas, @@ -268,6 +376,8 @@ void ReplicatedMergeTreeCleanupThread::clearOldLogs() if (i != 0) LOG_DEBUG(log, "Removed {} old log entries: {} - {}", i, entries[0], entries[i - 1]); + + return i; } @@ -323,7 +433,7 @@ struct ReplicatedMergeTreeCleanupThread::NodeWithStat } }; -void ReplicatedMergeTreeCleanupThread::clearOldBlocks(const String & blocks_dir_name, UInt64 window_seconds, UInt64 window_size, NodeCTimeAndVersionCache & cached_block_stats) +size_t ReplicatedMergeTreeCleanupThread::clearOldBlocks(const String & blocks_dir_name, UInt64 window_seconds, UInt64 window_size, NodeCTimeAndVersionCache & cached_block_stats) { auto zookeeper = storage.getZooKeeper(); @@ -331,7 +441,7 @@ void ReplicatedMergeTreeCleanupThread::clearOldBlocks(const String & blocks_dir_ getBlocksSortedByTime(blocks_dir_name, *zookeeper, timed_blocks, cached_block_stats); if (timed_blocks.empty()) - return; + return 0; /// Use ZooKeeper's first node (last according to time) timestamp as "current" time. Int64 current_time = timed_blocks.front().ctime; @@ -350,7 +460,7 @@ void ReplicatedMergeTreeCleanupThread::clearOldBlocks(const String & blocks_dir_ auto num_nodes_to_delete = timed_blocks.end() - first_outdated_block; if (!num_nodes_to_delete) - return; + return 0; auto last_outdated_block = timed_blocks.end() - 1; LOG_TRACE(log, "Will clear {} old blocks from {} (ctime {}) to {} (ctime {})", num_nodes_to_delete, @@ -388,6 +498,7 @@ void ReplicatedMergeTreeCleanupThread::clearOldBlocks(const String & blocks_dir_ } LOG_TRACE(log, "Cleared {} old blocks from ZooKeeper", num_nodes_to_delete); + return num_nodes_to_delete; } @@ -456,17 +567,17 @@ void ReplicatedMergeTreeCleanupThread::getBlocksSortedByTime(const String & bloc } -void ReplicatedMergeTreeCleanupThread::clearOldMutations() +size_t ReplicatedMergeTreeCleanupThread::clearOldMutations() { auto storage_settings = storage.getSettings(); if (!storage_settings->finished_mutations_to_keep) - return; + return 0; if (storage.queue.countFinishedMutations() <= storage_settings->finished_mutations_to_keep) { /// Not strictly necessary, but helps to avoid unnecessary ZooKeeper requests. /// If even this replica hasn't finished enough mutations yet, then we don't need to clean anything. - return; + return 0; } auto zookeeper = storage.getZooKeeper(); @@ -481,7 +592,7 @@ void ReplicatedMergeTreeCleanupThread::clearOldMutations() // No Need to check return value to delete mutations. zookeeper->tryGet(storage.zookeeper_path + "/replicas/" + replica + "/mutation_pointer", pointer); if (pointer.empty()) - return; /// One replica hasn't done anything yet so we can't delete any mutations. + return 0; /// One replica hasn't done anything yet so we can't delete any mutations. min_pointer = std::min(parse(pointer), min_pointer); } @@ -492,11 +603,11 @@ void ReplicatedMergeTreeCleanupThread::clearOldMutations() entries.erase(std::upper_bound(entries.begin(), entries.end(), padIndex(min_pointer)), entries.end()); /// Do not remove last `storage_settings->finished_mutations_to_keep` entries. if (entries.size() <= storage_settings->finished_mutations_to_keep) - return; + return 0; entries.erase(entries.end() - storage_settings->finished_mutations_to_keep, entries.end()); if (entries.empty()) - return; + return 0; Coordination::Requests ops; size_t batch_start_i = 0; @@ -526,6 +637,8 @@ void ReplicatedMergeTreeCleanupThread::clearOldMutations() ops.clear(); } } + + return entries.size(); } } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h index 76b9ee4a575..57de7944970 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h @@ -4,6 +4,7 @@ #include #include #include +#include #include #include @@ -31,6 +32,8 @@ public: void stop() { task->deactivate(); } + void wakeupEarlierIfNeeded(); + private: StorageReplicatedMergeTree & storage; String log_name; @@ -38,11 +41,20 @@ private: BackgroundSchedulePool::TaskHolder task; pcg64 rng{randomSeed()}; - void run(); - void iterate(); + UInt64 sleep_ms; - /// Remove old records from ZooKeeper. - void clearOldLogs(); + std::atomic prev_cleanup_timestamp_ms = 0; + std::atomic is_running = false; + + AtomicStopwatch wakeup_check_timer; + + void run(); + + /// Returns a number this is directly proportional to the number of cleaned up blocks + Float32 iterate(); + + /// Remove old records from ZooKeeper. Returns the number of removed logs + size_t clearOldLogs(); /// The replica is marked as "lost" if it is inactive and its log pointer /// is far behind and we are not going to keep logs for it. @@ -52,11 +64,11 @@ private: size_t replicas_count, const zkutil::ZooKeeperPtr & zookeeper); using NodeCTimeAndVersionCache = std::map>; - /// Remove old block hashes from ZooKeeper. This is done by the leader replica. - void clearOldBlocks(const String & blocks_dir_name, UInt64 window_seconds, UInt64 window_size, NodeCTimeAndVersionCache & cached_block_stats); + /// Remove old block hashes from ZooKeeper. This is done by the leader replica. Returns the number of removed blocks + size_t clearOldBlocks(const String & blocks_dir_name, UInt64 window_seconds, UInt64 window_size, NodeCTimeAndVersionCache & cached_block_stats); - /// Remove old mutations that are done from ZooKeeper. This is done by the leader replica. - void clearOldMutations(); + /// Remove old mutations that are done from ZooKeeper. This is done by the leader replica. Returns the number of removed mutations + size_t clearOldMutations(); NodeCTimeAndVersionCache cached_block_stats_for_sync_inserts; NodeCTimeAndVersionCache cached_block_stats_for_async_inserts; diff --git a/src/Storages/MergeTree/SimpleMergeSelector.cpp b/src/Storages/MergeTree/SimpleMergeSelector.cpp index af3373fd175..7e7539f71d5 100644 --- a/src/Storages/MergeTree/SimpleMergeSelector.cpp +++ b/src/Storages/MergeTree/SimpleMergeSelector.cpp @@ -28,7 +28,7 @@ struct Estimator { double difference = std::abs(log2(static_cast(sum_size) / size_prev_at_left)); if (difference < settings.heuristic_to_align_parts_max_absolute_difference_in_powers_of_two) - current_score *= std::lerp(settings.heuristic_to_align_parts_max_score_adjustment, 1, + current_score *= interpolateLinear(settings.heuristic_to_align_parts_max_score_adjustment, 1, difference / settings.heuristic_to_align_parts_max_absolute_difference_in_powers_of_two); } @@ -115,8 +115,8 @@ bool allow( // std::cerr << "size_normalized: " << size_normalized << "\n"; /// Calculate boundaries for age - double min_age_to_lower_base = std::lerp(settings.min_age_to_lower_base_at_min_size, settings.min_age_to_lower_base_at_max_size, size_normalized); - double max_age_to_lower_base = std::lerp(settings.max_age_to_lower_base_at_min_size, settings.max_age_to_lower_base_at_max_size, size_normalized); + double min_age_to_lower_base = interpolateLinear(settings.min_age_to_lower_base_at_min_size, settings.min_age_to_lower_base_at_max_size, size_normalized); + double max_age_to_lower_base = interpolateLinear(settings.max_age_to_lower_base_at_min_size, settings.max_age_to_lower_base_at_max_size, size_normalized); // std::cerr << "min_age_to_lower_base: " << min_age_to_lower_base << "\n"; // std::cerr << "max_age_to_lower_base: " << max_age_to_lower_base << "\n"; @@ -137,7 +137,7 @@ bool allow( // std::cerr << "combined_ratio: " << combined_ratio << "\n"; - double lowered_base = std::lerp(settings.base, 2.0, combined_ratio); + double lowered_base = interpolateLinear(settings.base, 2.0, combined_ratio); // std::cerr << "------- lowered_base: " << lowered_base << "\n"; diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index d9c8f09ccf1..2b948e1fd60 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -3147,6 +3147,8 @@ bool StorageReplicatedMergeTree::processQueueEntry(ReplicatedMergeTreeQueue::Sel bool StorageReplicatedMergeTree::scheduleDataProcessingJob(BackgroundJobsAssignee & assignee) { + cleanup_thread.wakeupEarlierIfNeeded(); + /// If replication queue is stopped exit immediately as we successfully executed the task if (queue.actions_blocker.isCancelled()) return false; @@ -6589,7 +6591,7 @@ bool StorageReplicatedMergeTree::hasLightweightDeletedMask() const return has_lightweight_delete_parts.load(std::memory_order_relaxed); } -void StorageReplicatedMergeTree::clearOldPartsAndRemoveFromZK() +size_t StorageReplicatedMergeTree::clearOldPartsAndRemoveFromZK() { auto table_lock = lockForShare( RWLockImpl::NO_QUERY, getSettings()->lock_acquire_timeout_for_background_operations); @@ -6598,8 +6600,9 @@ void StorageReplicatedMergeTree::clearOldPartsAndRemoveFromZK() /// Now these parts are in Deleting state. If we fail to remove some of them we must roll them back to Outdated state. /// Otherwise they will not be deleted. DataPartsVector parts = grabOldParts(); + size_t total_parts_to_remove = parts.size(); if (parts.empty()) - return; + return total_parts_to_remove; DataPartsVector parts_to_delete_only_from_filesystem; // Only duplicates DataPartsVector parts_to_delete_completely; // All parts except duplicates @@ -6707,6 +6710,8 @@ void StorageReplicatedMergeTree::clearOldPartsAndRemoveFromZK() /// Otherwise nobody will try to remove them again (see grabOldParts). delete_parts_from_fs_and_rollback_in_case_of_error(parts_to_remove_from_filesystem, "old"); } + + return total_parts_to_remove; } diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index 29b6a4d6817..01b86dd1425 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -342,8 +342,8 @@ public: private: std::atomic_bool are_restoring_replica {false}; - /// Delete old parts from disk and from ZooKeeper. - void clearOldPartsAndRemoveFromZK(); + /// Delete old parts from disk and from ZooKeeper. Returns the number of removed parts + size_t clearOldPartsAndRemoveFromZK(); template friend class ReplicatedMergeTreeSinkImpl; diff --git a/tests/config/config.d/merge_tree.xml b/tests/config/config.d/merge_tree.xml index 43bdb6aa07b..5521e5ba515 100644 --- a/tests/config/config.d/merge_tree.xml +++ b/tests/config/config.d/merge_tree.xml @@ -1,5 +1,7 @@ 8 + 60 + 10 diff --git a/tests/integration/test_broken_detached_part_clean_up/test.py b/tests/integration/test_broken_detached_part_clean_up/test.py index 5b18fa34494..9a70ebe0d48 100644 --- a/tests/integration/test_broken_detached_part_clean_up/test.py +++ b/tests/integration/test_broken_detached_part_clean_up/test.py @@ -141,7 +141,8 @@ def test_remove_broken_detached_part_replicated_merge_tree(started_cluster): merge_tree_enable_clear_old_broken_detached=1, merge_tree_clear_old_broken_detached_parts_ttl_timeout_seconds=5, cleanup_delay_period=1, - cleanup_delay_period_random_add=0; + cleanup_delay_period_random_add=0, + cleanup_thread_preferred_points_per_iteration=0; """ ) diff --git a/tests/integration/test_broken_part_during_merge/test.py b/tests/integration/test_broken_part_during_merge/test.py index f4110844466..26962236869 100644 --- a/tests/integration/test_broken_part_during_merge/test.py +++ b/tests/integration/test_broken_part_during_merge/test.py @@ -25,7 +25,7 @@ def test_merge_and_part_corruption(started_cluster): """ CREATE TABLE replicated_mt(date Date, id UInt32, value Int32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/replicated_mt', '{replica}') ORDER BY id - SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1; + SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0; """.format( replica=node1.name ) diff --git a/tests/integration/test_consistant_parts_after_move_partition/test.py b/tests/integration/test_consistant_parts_after_move_partition/test.py index 63a51472773..91fa884c093 100644 --- a/tests/integration/test_consistant_parts_after_move_partition/test.py +++ b/tests/integration/test_consistant_parts_after_move_partition/test.py @@ -14,11 +14,13 @@ def initialize_database(nodes, shard): CREATE TABLE `{database}`.src (p UInt64, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/{database}/tables/test_consistent_shard1{shard}/replicated', '{replica}') ORDER BY d PARTITION BY p - SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0; + SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, + cleanup_delay_period=0, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0; CREATE TABLE `{database}`.dest (p UInt64, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/{database}/tables/test_consistent_shard2{shard}/replicated', '{replica}') ORDER BY d PARTITION BY p - SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0; + SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, + cleanup_delay_period=0, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0; """.format( shard=shard, replica=node.name, database=CLICKHOUSE_DATABASE ) diff --git a/tests/integration/test_drop_replica/test.py b/tests/integration/test_drop_replica/test.py index e87edb0a578..0941e664982 100644 --- a/tests/integration/test_drop_replica/test.py +++ b/tests/integration/test_drop_replica/test.py @@ -11,7 +11,8 @@ def fill_nodes(nodes, shard): CREATE DATABASE test; CREATE TABLE test.test_table(date Date, id UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{shard}/replicated/test_table', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0; + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{shard}/replicated/test_table', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) + SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0; """.format( shard=shard, replica=node.name ) @@ -22,7 +23,8 @@ def fill_nodes(nodes, shard): CREATE DATABASE test1; CREATE TABLE test1.test_table(date Date, id UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test1/{shard}/replicated/test_table', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0; + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test1/{shard}/replicated/test_table', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) + SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0; """.format( shard=shard, replica=node.name ) @@ -33,7 +35,8 @@ def fill_nodes(nodes, shard): CREATE DATABASE test2; CREATE TABLE test2.test_table(date Date, id UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test2/{shard}/replicated/test_table', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0; + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test2/{shard}/replicated/test_table', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) + SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0; """.format( shard=shard, replica=node.name ) @@ -44,7 +47,8 @@ def fill_nodes(nodes, shard): CREATE DATABASE test3; CREATE TABLE test3.test_table(date Date, id UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test3/{shard}/replicated/test_table', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0; + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test3/{shard}/replicated/test_table', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) + SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0; """.format( shard=shard, replica=node.name ) @@ -55,7 +59,8 @@ def fill_nodes(nodes, shard): CREATE DATABASE test4; CREATE TABLE test4.test_table(date Date, id UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test4/{shard}/replicated/test_table', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0; + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test4/{shard}/replicated/test_table', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) + SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0; """.format( shard=shard, replica=node.name ) diff --git a/tests/integration/test_jbod_balancer/test.py b/tests/integration/test_jbod_balancer/test.py index df34a075d5a..4797eec5381 100644 --- a/tests/integration/test_jbod_balancer/test.py +++ b/tests/integration/test_jbod_balancer/test.py @@ -134,6 +134,7 @@ def test_replicated_balanced_merge_fetch(start_cluster): old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 2, + cleanup_thread_preferred_points_per_iteration=0, min_bytes_to_rebalance_partition_over_jbod = 1024, max_bytes_to_merge_at_max_space_in_pool = 4096 """.format( diff --git a/tests/integration/test_jbod_ha/test.py b/tests/integration/test_jbod_ha/test.py index 5cbb5989ff3..033d751912a 100644 --- a/tests/integration/test_jbod_ha/test.py +++ b/tests/integration/test_jbod_ha/test.py @@ -58,6 +58,7 @@ def test_jbod_ha(start_cluster): old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 2, + cleanup_thread_preferred_points_per_iteration=0, max_bytes_to_merge_at_max_space_in_pool = 4096 """.format( i diff --git a/tests/integration/test_lost_part/test.py b/tests/integration/test_lost_part/test.py index dd4c2105d55..44cd19fd1fb 100644 --- a/tests/integration/test_lost_part/test.py +++ b/tests/integration/test_lost_part/test.py @@ -42,7 +42,7 @@ def test_lost_part_same_replica(start_cluster): for node in [node1, node2]: node.query( f"CREATE TABLE mt0 (id UInt64, date Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/t', '{node.name}') ORDER BY tuple() PARTITION BY date " - "SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1" + "SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0" ) node1.query("SYSTEM STOP MERGES mt0") @@ -109,7 +109,7 @@ def test_lost_part_other_replica(start_cluster): for node in [node1, node2]: node.query( f"CREATE TABLE mt1 (id UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/t1', '{node.name}') ORDER BY tuple() " - "SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1" + "SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0" ) node1.query("SYSTEM STOP MERGES mt1") @@ -178,7 +178,7 @@ def test_lost_part_mutation(start_cluster): for node in [node1, node2]: node.query( f"CREATE TABLE mt2 (id UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/t2', '{node.name}') ORDER BY tuple() " - "SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1" + "SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0" ) node1.query("SYSTEM STOP MERGES mt2") @@ -241,7 +241,7 @@ def test_lost_last_part(start_cluster): for node in [node1, node2]: node.query( f"CREATE TABLE mt3 (id UInt64, p String) ENGINE ReplicatedMergeTree('/clickhouse/tables/t3', '{node.name}') " - "ORDER BY tuple() PARTITION BY p SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1" + "ORDER BY tuple() PARTITION BY p SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0" ) node1.query("SYSTEM STOP MERGES mt3") diff --git a/tests/integration/test_multiple_disks/test.py b/tests/integration/test_multiple_disks/test.py index 0e51df017b2..54e7f6dd8ee 100644 --- a/tests/integration/test_multiple_disks/test.py +++ b/tests/integration/test_multiple_disks/test.py @@ -1528,7 +1528,8 @@ def test_simple_replication_and_moves(start_cluster): s1 String ) ENGINE = ReplicatedMergeTree('/clickhouse/replicated_table_for_moves', '{}') ORDER BY tuple() - SETTINGS storage_policy='moving_jbod_with_external', old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=2 + SETTINGS storage_policy='moving_jbod_with_external', old_parts_lifetime=1, + cleanup_delay_period=1, cleanup_delay_period_random_add=2, cleanup_thread_preferred_points_per_iteration=0 """.format( i + 1 ) @@ -1609,7 +1610,8 @@ def test_download_appropriate_disk(start_cluster): s1 String ) ENGINE = ReplicatedMergeTree('/clickhouse/replicated_table_for_download', '{}') ORDER BY tuple() - SETTINGS storage_policy='moving_jbod_with_external', old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=2 + SETTINGS storage_policy='moving_jbod_with_external', old_parts_lifetime=1, + cleanup_delay_period=1, cleanup_delay_period_random_add=2, cleanup_thread_preferred_points_per_iteration=0 """.format( i + 1 ) diff --git a/tests/integration/test_old_parts_finally_removed/test.py b/tests/integration/test_old_parts_finally_removed/test.py index 5347d433419..cbd701588d5 100644 --- a/tests/integration/test_old_parts_finally_removed/test.py +++ b/tests/integration/test_old_parts_finally_removed/test.py @@ -27,7 +27,8 @@ def started_cluster(): def test_part_finally_removed(started_cluster): node1.query( - "CREATE TABLE drop_outdated_part (Key UInt64) ENGINE = ReplicatedMergeTree('/table/d', '1') ORDER BY tuple() SETTINGS old_parts_lifetime=10, cleanup_delay_period=10, cleanup_delay_period_random_add=1" + "CREATE TABLE drop_outdated_part (Key UInt64) ENGINE = ReplicatedMergeTree('/table/d', '1') ORDER BY tuple() " + "SETTINGS old_parts_lifetime=10, cleanup_delay_period=10, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0" ) node1.query("INSERT INTO drop_outdated_part VALUES (1)") @@ -44,7 +45,7 @@ def test_part_finally_removed(started_cluster): ) node1.query( - "ALTER TABLE drop_outdated_part MODIFY SETTING old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=1" + "ALTER TABLE drop_outdated_part MODIFY SETTING old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0" ) for i in range(60): diff --git a/tests/integration/test_parts_delete_zookeeper/test.py b/tests/integration/test_parts_delete_zookeeper/test.py index a78aefa4595..9fd07e7b65d 100644 --- a/tests/integration/test_parts_delete_zookeeper/test.py +++ b/tests/integration/test_parts_delete_zookeeper/test.py @@ -21,7 +21,7 @@ def start_cluster(): CREATE DATABASE test; CREATE TABLE test_table(date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/replicated', 'node1') - ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS old_parts_lifetime=4, cleanup_delay_period=1; + ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS old_parts_lifetime=4, cleanup_delay_period=1, cleanup_thread_preferred_points_per_iteration=0; """ ) diff --git a/tests/integration/test_recovery_replica/test.py b/tests/integration/test_recovery_replica/test.py index 0a63da4db22..582e018f5d2 100644 --- a/tests/integration/test_recovery_replica/test.py +++ b/tests/integration/test_recovery_replica/test.py @@ -4,7 +4,7 @@ import pytest from helpers.cluster import ClickHouseCluster from helpers.test_tools import assert_eq_with_retry -SETTINGS = "SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0" +SETTINGS = "SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0" def fill_nodes(nodes): diff --git a/tests/integration/test_storage_nats/test.py b/tests/integration/test_storage_nats/test.py index 1d7e046864b..4d7e4cf813d 100644 --- a/tests/integration/test_storage_nats/test.py +++ b/tests/integration/test_storage_nats/test.py @@ -931,7 +931,8 @@ def test_nats_overloaded_insert(nats_cluster): CREATE TABLE test.view_overload (key UInt64, value UInt64) ENGINE = MergeTree ORDER BY key - SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3; + SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3, + cleanup_thread_preferred_points_per_iteration=0; CREATE MATERIALIZED VIEW test.consumer_overload TO test.view_overload AS SELECT * FROM test.nats_consume; """ diff --git a/tests/integration/test_storage_rabbitmq/test.py b/tests/integration/test_storage_rabbitmq/test.py index 4e1e28373e3..b4dcf86e0ba 100644 --- a/tests/integration/test_storage_rabbitmq/test.py +++ b/tests/integration/test_storage_rabbitmq/test.py @@ -642,7 +642,8 @@ def test_rabbitmq_sharding_between_queues_publish(rabbitmq_cluster): CREATE TABLE test.view (key UInt64, value UInt64, channel_id String) ENGINE = MergeTree ORDER BY key - SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3; + SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3, + cleanup_thread_preferred_points_per_iteration=0; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT *, _channel_id AS channel_id FROM test.rabbitmq; """ @@ -1116,7 +1117,8 @@ def test_rabbitmq_direct_exchange(rabbitmq_cluster): CREATE TABLE test.destination(key UInt64, value UInt64) ENGINE = MergeTree() ORDER BY key - SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3; + SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3, + cleanup_thread_preferred_points_per_iteration=0; """ ) diff --git a/tests/integration/test_system_metrics/test.py b/tests/integration/test_system_metrics/test.py index 9ebe198a109..338622b824e 100644 --- a/tests/integration/test_system_metrics/test.py +++ b/tests/integration/test_system_metrics/test.py @@ -13,7 +13,9 @@ def fill_nodes(nodes, shard): CREATE DATABASE test; CREATE TABLE test.test_table(date Date, id UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0; + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) + SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, + cleanup_delay_period=0, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0; """.format( shard=shard, replica=node.name ) diff --git a/tests/integration/test_ttl_replicated/test.py b/tests/integration/test_ttl_replicated/test.py index a3e7d6e4b8b..4ea4472b812 100644 --- a/tests/integration/test_ttl_replicated/test.py +++ b/tests/integration/test_ttl_replicated/test.py @@ -422,7 +422,8 @@ def test_ttl_empty_parts(started_cluster): ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_empty_parts', '{replica}') ORDER BY id SETTINGS max_bytes_to_merge_at_min_space_in_pool = 1, max_bytes_to_merge_at_max_space_in_pool = 1, - cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, old_parts_lifetime = 1 + cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0, old_parts_lifetime = 1 """.format( replica=node.name diff --git a/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh b/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh index a0a3416e406..399511db701 100755 --- a/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh +++ b/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh @@ -36,8 +36,12 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS dst_r1;" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS dst_r2;" $CLICKHOUSE_CLIENT --query="CREATE TABLE src (p UInt64, k String, d UInt64) ENGINE = MergeTree PARTITION BY p ORDER BY k;" -$CLICKHOUSE_CLIENT --query="CREATE TABLE dst_r1 (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst_1', '1') PARTITION BY p ORDER BY k SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0;" -$CLICKHOUSE_CLIENT --query="CREATE TABLE dst_r2 (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst_1', '2') PARTITION BY p ORDER BY k SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0;" +$CLICKHOUSE_CLIENT --query="CREATE TABLE dst_r1 (p UInt64, k String, d UInt64) +ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst_1', '1') PARTITION BY p ORDER BY k +SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0;" +$CLICKHOUSE_CLIENT --query="CREATE TABLE dst_r2 (p UInt64, k String, d UInt64) +ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst_1', '2') PARTITION BY p ORDER BY k +SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0;" $CLICKHOUSE_CLIENT --query="INSERT INTO src VALUES (0, '0', 1);" $CLICKHOUSE_CLIENT --query="INSERT INTO src VALUES (1, '0', 1);" diff --git a/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh b/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh index 1f5bcbdc0d0..d8b1bdec328 100755 --- a/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh +++ b/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh @@ -56,11 +56,13 @@ ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS mutations_cleaner_r2 SYNC" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE mutations_cleaner_r1(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/mutations_cleaner', 'r1') ORDER BY x SETTINGS \ finished_mutations_to_keep = 2, cleanup_delay_period = 1, - cleanup_delay_period_random_add = 0" + cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE mutations_cleaner_r2(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/mutations_cleaner', 'r2') ORDER BY x SETTINGS \ finished_mutations_to_keep = 2, cleanup_delay_period = 1, - cleanup_delay_period_random_add = 0" + cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0" # Insert some data ${CLICKHOUSE_CLIENT} --insert_keeper_fault_injection_probability=0 --query="INSERT INTO mutations_cleaner_r1(x) VALUES (1), (2), (3), (4), (5)" diff --git a/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh b/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh index 5fc3fa460e6..bab2304cec2 100755 --- a/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh +++ b/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh @@ -20,13 +20,15 @@ CREATE TABLE part_header_r1(x UInt32, y UInt32) SETTINGS use_minimalistic_part_header_in_zookeeper = 0, old_parts_lifetime = 1, cleanup_delay_period = 0, - cleanup_delay_period_random_add = 0; + cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0; CREATE TABLE part_header_r2(x UInt32, y UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/test_00814/part_header/{shard}', '2{replica}') ORDER BY x SETTINGS use_minimalistic_part_header_in_zookeeper = 1, old_parts_lifetime = 1, cleanup_delay_period = 0, - cleanup_delay_period_random_add = 0; + cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0; SELECT '*** Test fetches ***'; INSERT INTO part_header_r1 VALUES (1, 1); diff --git a/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh b/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh index c713c7c4926..ad0146b9d99 100755 --- a/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh +++ b/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh @@ -22,7 +22,7 @@ CREATE TABLE elog ( ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/{shard}', '{replica}') PARTITION BY date ORDER BY (engine_id) -SETTINGS replicated_deduplication_window = 2, cleanup_delay_period=4, cleanup_delay_period_random_add=0;" +SETTINGS replicated_deduplication_window = 2, cleanup_delay_period=4, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0;" $CLICKHOUSE_CLIENT --query="INSERT INTO elog VALUES (toDate('2018-10-01'), 1, 'hello')" $CLICKHOUSE_CLIENT --query="INSERT INTO elog VALUES (toDate('2018-10-01'), 2, 'hello')" diff --git a/tests/queries/0_stateless/00988_parallel_parts_removal.sql b/tests/queries/0_stateless/00988_parallel_parts_removal.sql index bff9bbe6d8d..5bd31ba1baa 100644 --- a/tests/queries/0_stateless/00988_parallel_parts_removal.sql +++ b/tests/queries/0_stateless/00988_parallel_parts_removal.sql @@ -1,6 +1,8 @@ DROP TABLE IF EXISTS mt; -CREATE TABLE mt (x UInt64) ENGINE = MergeTree ORDER BY x SETTINGS max_part_removal_threads = 16, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, old_parts_lifetime = 1, parts_to_delay_insert = 100000, parts_to_throw_insert = 100000; +CREATE TABLE mt (x UInt64) ENGINE = MergeTree ORDER BY x + SETTINGS max_part_removal_threads = 16, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0, old_parts_lifetime = 1, parts_to_delay_insert = 100000, parts_to_throw_insert = 100000; SYSTEM STOP MERGES mt; diff --git a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh index 5b1c50262bf..e0b7ab29924 100755 --- a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh +++ b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh @@ -13,8 +13,14 @@ $CLICKHOUSE_CLIENT -n -q " DROP TABLE IF EXISTS alter_table0; DROP TABLE IF EXISTS alter_table1; - CREATE TABLE alter_table0 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r1') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, replicated_max_mutations_in_one_entry = $(($RANDOM / 50 + 100)); - CREATE TABLE alter_table1 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r2') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, replicated_max_mutations_in_one_entry = $(($RANDOM / 50 + 200)); + CREATE TABLE alter_table0 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r1') ORDER BY a PARTITION BY b % 10 + SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0, replicated_max_mutations_in_one_entry = $(($RANDOM / 50 + 100)); + CREATE TABLE alter_table1 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r2') ORDER BY a PARTITION BY b % 10 + SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0, replicated_max_mutations_in_one_entry = $(($RANDOM / 50 + 200)); " function thread1() diff --git a/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh b/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh index f4f38ad9c83..811681794a5 100755 --- a/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh +++ b/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh @@ -58,7 +58,8 @@ function thread6() $CLICKHOUSE_CLIENT -n -q "DROP TABLE IF EXISTS alter_table_$REPLICA; CREATE TABLE alter_table_$REPLICA (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r_$REPLICA') ORDER BY a PARTITION BY b % 10 - SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0, replicated_max_mutations_in_one_entry = $(($RANDOM / 50));"; + SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0, replicated_max_mutations_in_one_entry = $(($RANDOM / 50));"; sleep 0.$RANDOM; done } diff --git a/tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.sh b/tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.sh index 5e9e69d999d..e0a84323dbd 100755 --- a/tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.sh +++ b/tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.sh @@ -28,7 +28,8 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS src;" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS dst;" $CLICKHOUSE_CLIENT --query="CREATE TABLE src (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/src1', '1') PARTITION BY p ORDER BY k;" -$CLICKHOUSE_CLIENT --query="CREATE TABLE dst (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst1', '1') PARTITION BY p ORDER BY k SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0;" +$CLICKHOUSE_CLIENT --query="CREATE TABLE dst (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst1', '1') PARTITION BY p ORDER BY k +SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0;" $CLICKHOUSE_CLIENT --query="INSERT INTO src VALUES (0, '0', 1);" $CLICKHOUSE_CLIENT --query="INSERT INTO src VALUES (1, '0', 1);" @@ -58,7 +59,8 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE dst;" $CLICKHOUSE_CLIENT --query="SELECT 'MOVE incompatible schema missing column';" $CLICKHOUSE_CLIENT --query="CREATE TABLE src (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/src2', '1') PARTITION BY p ORDER BY (d, p);" -$CLICKHOUSE_CLIENT --query="CREATE TABLE dst (p UInt64, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst2', '1') PARTITION BY p ORDER BY (d, p) SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0;" +$CLICKHOUSE_CLIENT --query="CREATE TABLE dst (p UInt64, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst2', '1') PARTITION BY p ORDER BY (d, p) +SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0;" $CLICKHOUSE_CLIENT --query="INSERT INTO src VALUES (0, '0', 1);" $CLICKHOUSE_CLIENT --query="INSERT INTO src VALUES (1, '0', 1);" diff --git a/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh b/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh index 8ef03be02b6..06a460f3600 100755 --- a/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh +++ b/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh @@ -11,7 +11,8 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS src;" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS dst;" $CLICKHOUSE_CLIENT --query="CREATE TABLE src (p UInt64, k String) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/src', '1') PARTITION BY p ORDER BY k;" -$CLICKHOUSE_CLIENT --query="CREATE TABLE dst (p UInt64, k String) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst', '1') PARTITION BY p ORDER BY k SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0;" +$CLICKHOUSE_CLIENT --query="CREATE TABLE dst (p UInt64, k String) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst', '1') PARTITION BY p ORDER BY k +SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0;" function thread1() { diff --git a/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh b/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh index 7f53bf2a627..5f69427c0cd 100755 --- a/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh +++ b/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh @@ -31,7 +31,8 @@ for i in $(seq $REPLICAS); do max_replicated_merges_in_queue = 1000, temporary_directories_lifetime = 10, cleanup_delay_period = 3, - cleanup_delay_period_random_add = 0" + cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0" done $CLICKHOUSE_CLIENT --query "INSERT INTO concurrent_mutate_mt_1 SELECT number, number + 10, toString(number) from numbers(10)" diff --git a/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh b/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh index aec27792603..e508b77a0c2 100755 --- a/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh +++ b/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh @@ -12,7 +12,10 @@ for i in $(seq $REPLICAS); do done for i in $(seq $REPLICAS); do - $CLICKHOUSE_CLIENT --query "CREATE TABLE concurrent_alter_detach_$i (key UInt64, value1 UInt8, value2 UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/concurrent_alter_detach', '$i') ORDER BY key SETTINGS max_replicated_mutations_in_queue=1000, number_of_free_entries_in_pool_to_execute_mutation=0,max_replicated_merges_in_queue=1000,temporary_directories_lifetime=10,cleanup_delay_period=3,cleanup_delay_period_random_add=0" + $CLICKHOUSE_CLIENT --query "CREATE TABLE concurrent_alter_detach_$i (key UInt64, value1 UInt8, value2 UInt8) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/concurrent_alter_detach', '$i') ORDER BY key + SETTINGS max_replicated_mutations_in_queue=1000, number_of_free_entries_in_pool_to_execute_mutation=0,max_replicated_merges_in_queue=1000, + temporary_directories_lifetime=10,cleanup_delay_period=3,cleanup_delay_period_random_add=0,cleanup_thread_preferred_points_per_iteration=0" done $CLICKHOUSE_CLIENT --query "INSERT INTO concurrent_alter_detach_1 SELECT number, number + 10, number from numbers(10)" diff --git a/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh b/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh index 95f8dfc0377..3461283b5ea 100755 --- a/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh +++ b/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh @@ -27,7 +27,9 @@ function thread3() { while true; do $CLICKHOUSE_CLIENT -n -q "DROP TABLE IF EXISTS concurrent_optimize_table; - CREATE TABLE concurrent_optimize_table (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/concurrent_optimize_table', '1') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0;"; + CREATE TABLE concurrent_optimize_table (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/concurrent_optimize_table', '1') ORDER BY a PARTITION BY b % 10 + SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0, cleanup_thread_preferred_points_per_iteration=0;"; sleep 0.$RANDOM; sleep 0.$RANDOM; sleep 0.$RANDOM; diff --git a/tests/queries/0_stateless/01158_zookeeper_log_long.sql b/tests/queries/0_stateless/01158_zookeeper_log_long.sql index 45771494af6..9b5ae7ad7c6 100644 --- a/tests/queries/0_stateless/01158_zookeeper_log_long.sql +++ b/tests/queries/0_stateless/01158_zookeeper_log_long.sql @@ -6,7 +6,7 @@ SET insert_keeper_fault_injection_probability=0; -- disable fault injection; par drop table if exists rmt sync; -- cleanup code will perform extra Exists -- (so the .reference will not match) -create table rmt (n int) engine=ReplicatedMergeTree('/test/01158/{database}/rmt', '1') order by n settings cleanup_delay_period=86400, replicated_can_become_leader=0; +create table rmt (n int) engine=ReplicatedMergeTree('/test/01158/{database}/rmt', '1') order by n settings cleanup_delay_period=86400, max_cleanup_delay_period=86400, replicated_can_become_leader=0; system sync replica rmt; insert into rmt values (1); insert into rmt values (1); diff --git a/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh b/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh index 411705e0469..2d761df998e 100755 --- a/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh +++ b/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh @@ -13,8 +13,10 @@ SCALE=5000 $CLICKHOUSE_CLIENT -n --query " DROP TABLE IF EXISTS r1; DROP TABLE IF EXISTS r2; - CREATE TABLE r1 (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/{shard}', '1{replica}') ORDER BY x SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 1, parts_to_throw_insert = 100000, max_replicated_logs_to_keep = 10; - CREATE TABLE r2 (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/{shard}', '2{replica}') ORDER BY x SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 1, parts_to_throw_insert = 100000, max_replicated_logs_to_keep = 10; + CREATE TABLE r1 (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/{shard}', '1{replica}') ORDER BY x + SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 1, cleanup_thread_preferred_points_per_iteration=0, parts_to_throw_insert = 100000, max_replicated_logs_to_keep = 10; + CREATE TABLE r2 (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/{shard}', '2{replica}') ORDER BY x + SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 1, cleanup_thread_preferred_points_per_iteration=0, parts_to_throw_insert = 100000, max_replicated_logs_to_keep = 10; DETACH TABLE r2; " diff --git a/tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper_long.sh b/tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper_long.sh index 80318ba67fb..c3c87eeaf8b 100755 --- a/tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper_long.sh +++ b/tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper_long.sh @@ -8,7 +8,9 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_renames0" $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_renames50" -$CLICKHOUSE_CLIENT --query "CREATE TABLE table_for_renames0 (value UInt64, data String) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/concurrent_rename', '1') ORDER BY tuple() SETTINGS cleanup_delay_period = 1, cleanup_delay_period_random_add = 0" +$CLICKHOUSE_CLIENT --query "CREATE TABLE table_for_renames0 (value UInt64, data String) +ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/concurrent_rename', '1') ORDER BY tuple() +SETTINGS cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, cleanup_thread_preferred_points_per_iteration=0" $CLICKHOUSE_CLIENT --query "INSERT INTO table_for_renames0 SELECT number, toString(number) FROM numbers(1000)" diff --git a/tests/queries/0_stateless/01509_parallel_quorum_and_merge_long.sh b/tests/queries/0_stateless/01509_parallel_quorum_and_merge_long.sh index 445706e35bf..bf88ad0e0b2 100755 --- a/tests/queries/0_stateless/01509_parallel_quorum_and_merge_long.sh +++ b/tests/queries/0_stateless/01509_parallel_quorum_and_merge_long.sh @@ -13,7 +13,8 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parallel_q1 SYNC" $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parallel_q2 SYNC" -$CLICKHOUSE_CLIENT -q "CREATE TABLE parallel_q1 (x UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/parallel_q', 'r1') ORDER BY tuple() SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0" +$CLICKHOUSE_CLIENT -q "CREATE TABLE parallel_q1 (x UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/parallel_q', 'r1') ORDER BY tuple() +SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0, cleanup_thread_preferred_points_per_iteration=0" $CLICKHOUSE_CLIENT -q "CREATE TABLE parallel_q2 (x UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/parallel_q', 'r2') ORDER BY tuple() SETTINGS always_fetch_merged_part = 1" diff --git a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh index a3682a3a74b..5e1600a0673 100755 --- a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh +++ b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh @@ -24,7 +24,8 @@ for i in $(seq 1 $NUM_REPLICAS); do ENGINE ReplicatedMergeTree('/test/01921_concurrent_ttl_and_normal_merges/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/ttl_table', '$i') ORDER BY tuple() TTL key + INTERVAL 1 SECOND - SETTINGS merge_with_ttl_timeout=1, max_replicated_merges_with_ttl_in_queue=100, max_number_of_merges_with_ttl_in_pool=100, cleanup_delay_period=1, cleanup_delay_period_random_add=0;" + SETTINGS merge_with_ttl_timeout=1, max_replicated_merges_with_ttl_in_queue=100, max_number_of_merges_with_ttl_in_pool=100, + cleanup_delay_period=1, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0;" done function optimize_thread diff --git a/tests/queries/0_stateless/02067_lost_part_s3.sql b/tests/queries/0_stateless/02067_lost_part_s3.sql index 12afdcd4421..7df15ab33c4 100644 --- a/tests/queries/0_stateless/02067_lost_part_s3.sql +++ b/tests/queries/0_stateless/02067_lost_part_s3.sql @@ -4,11 +4,17 @@ DROP TABLE IF EXISTS partslost_0; DROP TABLE IF EXISTS partslost_1; DROP TABLE IF EXISTS partslost_2; -CREATE TABLE partslost_0 (x String) ENGINE=ReplicatedMergeTree('/clickhouse/table/{database}_02067_lost/partslost', '0') ORDER BY tuple() SETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 1; +CREATE TABLE partslost_0 (x String) ENGINE=ReplicatedMergeTree('/clickhouse/table/{database}_02067_lost/partslost', '0') ORDER BY tuple() + SETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, old_parts_lifetime = 1, + cleanup_delay_period = 1, cleanup_delay_period_random_add = 1, cleanup_thread_preferred_points_per_iteration=0; -CREATE TABLE partslost_1 (x String) ENGINE=ReplicatedMergeTree('/clickhouse/table/{database}_02067_lost/partslost', '1') ORDER BY tuple() SETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 1; +CREATE TABLE partslost_1 (x String) ENGINE=ReplicatedMergeTree('/clickhouse/table/{database}_02067_lost/partslost', '1') ORDER BY tuple() + SETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, old_parts_lifetime = 1, + cleanup_delay_period = 1, cleanup_delay_period_random_add = 1, cleanup_thread_preferred_points_per_iteration=0; -CREATE TABLE partslost_2 (x String) ENGINE=ReplicatedMergeTree('/clickhouse/table/{database}_02067_lost/partslost', '2') ORDER BY tuple() SETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 1; +CREATE TABLE partslost_2 (x String) ENGINE=ReplicatedMergeTree('/clickhouse/table/{database}_02067_lost/partslost', '2') ORDER BY tuple() + SETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, old_parts_lifetime = 1, + cleanup_delay_period = 1, cleanup_delay_period_random_add = 1, cleanup_thread_preferred_points_per_iteration=0; INSERT INTO partslost_0 SELECT toString(number) AS x from system.numbers LIMIT 10000; diff --git a/tests/queries/0_stateless/02370_lost_part_intersecting_merges.sh b/tests/queries/0_stateless/02370_lost_part_intersecting_merges.sh index bc297cbb963..e34163d0502 100755 --- a/tests/queries/0_stateless/02370_lost_part_intersecting_merges.sh +++ b/tests/queries/0_stateless/02370_lost_part_intersecting_merges.sh @@ -9,7 +9,7 @@ $CLICKHOUSE_CLIENT -q "drop table if exists rmt1 sync;" $CLICKHOUSE_CLIENT -q "drop table if exists rmt2 sync;" $CLICKHOUSE_CLIENT -q "create table rmt1 (n int) engine=ReplicatedMergeTree('/test/02369/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/{database}', '1') order by n - settings cleanup_delay_period=0, cleanup_delay_period_random_add=0, old_parts_lifetime=0" + settings cleanup_delay_period=0, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0, old_parts_lifetime=0" $CLICKHOUSE_CLIENT -q "create table rmt2 (n int) engine=ReplicatedMergeTree('/test/02369/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/{database}', '2') order by n" $CLICKHOUSE_CLIENT -q "system stop replicated sends rmt2" diff --git a/tests/queries/0_stateless/02396_system_parts_race_condition_rm.sh b/tests/queries/0_stateless/02396_system_parts_race_condition_rm.sh index 5df1a9ba095..e31a091ff45 100755 --- a/tests/queries/0_stateless/02396_system_parts_race_condition_rm.sh +++ b/tests/queries/0_stateless/02396_system_parts_race_condition_rm.sh @@ -15,8 +15,12 @@ $CLICKHOUSE_CLIENT -n -q " DROP TABLE IF EXISTS alter_table0; DROP TABLE IF EXISTS alter_table1; - CREATE TABLE alter_table0 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r1') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0; - CREATE TABLE alter_table1 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r2') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0 + CREATE TABLE alter_table0 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r1') ORDER BY a PARTITION BY b % 10 + SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, cleanup_thread_preferred_points_per_iteration=0; + CREATE TABLE alter_table1 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r2') ORDER BY a PARTITION BY b % 10 + SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, cleanup_thread_preferred_points_per_iteration=0 " function thread1() diff --git a/tests/queries/0_stateless/02397_system_parts_race_condition_drop_rm.sh b/tests/queries/0_stateless/02397_system_parts_race_condition_drop_rm.sh index 548179b94c9..39e513f6be4 100755 --- a/tests/queries/0_stateless/02397_system_parts_race_condition_drop_rm.sh +++ b/tests/queries/0_stateless/02397_system_parts_race_condition_drop_rm.sh @@ -58,7 +58,9 @@ function thread6() while true; do REPLICA=$(($RANDOM % 10)) $CLICKHOUSE_CLIENT -n -q "DROP TABLE IF EXISTS alter_table_$REPLICA; - CREATE TABLE alter_table_$REPLICA (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r_$REPLICA') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0;"; + CREATE TABLE alter_table_$REPLICA (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r_$REPLICA') ORDER BY a PARTITION BY b % 10 + SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0, cleanup_thread_preferred_points_per_iteration=0;"; sleep 0.$RANDOM; done } diff --git a/tests/queries/0_stateless/02432_s3_parallel_parts_cleanup.sql b/tests/queries/0_stateless/02432_s3_parallel_parts_cleanup.sql index 88fb2cdf9b1..bab4bf7881c 100644 --- a/tests/queries/0_stateless/02432_s3_parallel_parts_cleanup.sql +++ b/tests/queries/0_stateless/02432_s3_parallel_parts_cleanup.sql @@ -8,7 +8,7 @@ drop table if exists rmt2; -- Disable compact parts, because we need hardlinks in mutations. create table rmt (n int, m int, k int) engine=ReplicatedMergeTree('/test/02432/{database}', '1') order by tuple() settings storage_policy = 's3_cache', allow_remote_fs_zero_copy_replication=1, - max_part_removal_threads=10, concurrent_part_removal_threshold=1, cleanup_delay_period=1, cleanup_delay_period_random_add=1, + max_part_removal_threads=10, concurrent_part_removal_threshold=1, cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0, max_replicated_merges_in_queue=0, max_replicated_mutations_in_queue=0, min_bytes_for_wide_part=0, min_rows_for_wide_part=0; insert into rmt(n, m) values (1, 42); @@ -38,7 +38,7 @@ select count(), sum(n), sum(m) from rmt; -- New table can assign merges/mutations and can remove old parts create table rmt2 (n int, m int, k String) engine=ReplicatedMergeTree('/test/02432/{database}', '2') order by tuple() settings storage_policy = 's3_cache', allow_remote_fs_zero_copy_replication=1, - max_part_removal_threads=10, concurrent_part_removal_threshold=1, cleanup_delay_period=1, cleanup_delay_period_random_add=1, + max_part_removal_threads=10, concurrent_part_removal_threshold=1, cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0, min_bytes_for_wide_part=0, min_rows_for_wide_part=0, max_replicated_merges_in_queue=1, old_parts_lifetime=0; diff --git a/tests/queries/0_stateless/02448_clone_replica_lost_part.sql b/tests/queries/0_stateless/02448_clone_replica_lost_part.sql index 4befe952a14..44303a1c532 100644 --- a/tests/queries/0_stateless/02448_clone_replica_lost_part.sql +++ b/tests/queries/0_stateless/02448_clone_replica_lost_part.sql @@ -5,9 +5,11 @@ SET insert_keeper_fault_injection_probability=0; -- disable fault injection; par drop table if exists rmt1; drop table if exists rmt2; create table rmt1 (n int) engine=ReplicatedMergeTree('/test/02448/{database}/rmt', '1') order by tuple() - settings min_replicated_logs_to_keep=1, max_replicated_logs_to_keep=2, cleanup_delay_period=0, cleanup_delay_period_random_add=1, old_parts_lifetime=0, max_parts_to_merge_at_once=4; + settings min_replicated_logs_to_keep=1, max_replicated_logs_to_keep=2, cleanup_delay_period=0, cleanup_delay_period_random_add=1, + cleanup_thread_preferred_points_per_iteration=0, old_parts_lifetime=0, max_parts_to_merge_at_once=4; create table rmt2 (n int) engine=ReplicatedMergeTree('/test/02448/{database}/rmt', '2') order by tuple() - settings min_replicated_logs_to_keep=1, max_replicated_logs_to_keep=2, cleanup_delay_period=0, cleanup_delay_period_random_add=1, old_parts_lifetime=0, max_parts_to_merge_at_once=4; + settings min_replicated_logs_to_keep=1, max_replicated_logs_to_keep=2, cleanup_delay_period=0, cleanup_delay_period_random_add=1, + cleanup_thread_preferred_points_per_iteration=0, old_parts_lifetime=0, max_parts_to_merge_at_once=4; -- insert part only on one replica system stop replicated sends rmt1; diff --git a/tests/queries/0_stateless/02494_zero_copy_and_projection_and_mutation_work_together.sql b/tests/queries/0_stateless/02494_zero_copy_and_projection_and_mutation_work_together.sql index 98427874160..b4504a55643 100644 --- a/tests/queries/0_stateless/02494_zero_copy_and_projection_and_mutation_work_together.sql +++ b/tests/queries/0_stateless/02494_zero_copy_and_projection_and_mutation_work_together.sql @@ -24,7 +24,8 @@ CREATE TABLE wikistat1 ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/02494_zero_copy_and_projection', '1') ORDER BY (path, time) -SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0, allow_remote_fs_zero_copy_replication=1, min_bytes_for_wide_part=0; +SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0, allow_remote_fs_zero_copy_replication=1, min_bytes_for_wide_part=0; CREATE TABLE wikistat2 ( @@ -49,7 +50,8 @@ CREATE TABLE wikistat2 ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/02494_zero_copy_and_projection', '2') ORDER BY (path, time) -SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0, allow_remote_fs_zero_copy_replication=1, min_bytes_for_wide_part=0; +SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0, allow_remote_fs_zero_copy_replication=1, min_bytes_for_wide_part=0; INSERT INTO wikistat1 SELECT toDateTime('2020-10-01 00:00:00'), 'hello', 'world', '/data/path', 10 from numbers(100); diff --git a/tests/queries/0_stateless/02515_cleanup_async_insert_block_ids.sh b/tests/queries/0_stateless/02515_cleanup_async_insert_block_ids.sh index 458a5e95faa..bc6e7eeb214 100755 --- a/tests/queries/0_stateless/02515_cleanup_async_insert_block_ids.sh +++ b/tests/queries/0_stateless/02515_cleanup_async_insert_block_ids.sh @@ -13,7 +13,7 @@ $CLICKHOUSE_CLIENT -n --query " CREATE TABLE t_async_insert_cleanup ( KeyID UInt32 ) Engine = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/t_async_insert_cleanup', '{replica}') - ORDER BY (KeyID) SETTINGS cleanup_delay_period = 1, cleanup_delay_period_random_add = 1, replicated_deduplication_window_for_async_inserts=10 + ORDER BY (KeyID) SETTINGS cleanup_delay_period = 1, cleanup_delay_period_random_add = 1, cleanup_thread_preferred_points_per_iteration=0, replicated_deduplication_window_for_async_inserts=10 " for i in {1..100}; do From dbf08b25fb8f569a33dc3a8b05862af9e61eb72a Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Tue, 23 May 2023 01:25:17 +0200 Subject: [PATCH 115/722] better scheduling of merge selecting task --- src/Storages/MergeTree/MergeTreeSettings.cpp | 24 +++ src/Storages/MergeTree/MergeTreeSettings.h | 4 +- .../ReplicatedMergeTreeCleanupThread.cpp | 2 +- src/Storages/StorageMergeTree.cpp | 3 +- src/Storages/StorageReplicatedMergeTree.cpp | 198 +++++++++++------- src/Storages/StorageReplicatedMergeTree.h | 2 + .../test.py | 3 +- .../test_merge_tree_empty_parts/test.py | 2 +- 8 files changed, 157 insertions(+), 81 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeSettings.cpp b/src/Storages/MergeTree/MergeTreeSettings.cpp index 479e50fdebb..6df841059b9 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.cpp +++ b/src/Storages/MergeTree/MergeTreeSettings.cpp @@ -175,5 +175,29 @@ void MergeTreeSettings::sanityCheck(size_t background_pool_tasks) const min_bytes_to_rebalance_partition_over_jbod, max_bytes_to_merge_at_max_space_in_pool / 1024); } + + if (max_cleanup_delay_period < cleanup_delay_period) + { + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "The value of max_cleanup_delay_period setting ({}) must be greater than the value of cleanup_delay_period setting ({})", + max_cleanup_delay_period, cleanup_delay_period); + } + + if (max_merge_selecting_sleep_ms < merge_selecting_sleep_ms) + { + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "The value of max_merge_selecting_sleep_ms setting ({}) must be greater than the value of merge_selecting_sleep_ms setting ({})", + max_merge_selecting_sleep_ms, merge_selecting_sleep_ms); + } + + if (merge_selecting_sleep_slowdown_factor < 1.f) + { + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "The value of merge_selecting_sleep_slowdown_factor setting ({}) cannot be less than 1.0", + merge_selecting_sleep_slowdown_factor); + } } } diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index 78d703e795c..56860342038 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -57,7 +57,9 @@ struct Settings; M(Bool, fsync_part_directory, false, "Do fsync for part directory after all part operations (writes, renames, etc.).", 0) \ M(UInt64, non_replicated_deduplication_window, 0, "How many last blocks of hashes should be kept on disk (0 - disabled).", 0) \ M(UInt64, max_parts_to_merge_at_once, 100, "Max amount of parts which can be merged at once (0 - disabled). Doesn't affect OPTIMIZE FINAL query.", 0) \ - M(UInt64, merge_selecting_sleep_ms, 5000, "Sleep time for merge selecting when no part selected, a lower setting will trigger selecting tasks in background_schedule_pool frequently which result in large amount of requests to zookeeper in large-scale clusters", 0) \ + M(UInt64, merge_selecting_sleep_ms, 5000, "Maximum sleep time for merge selecting, a lower setting will trigger selecting tasks in background_schedule_pool frequently which result in large amount of requests to zookeeper in large-scale clusters", 0) \ + M(UInt64, max_merge_selecting_sleep_ms, 60000, "Maximum sleep time for merge selecting, a lower setting will trigger selecting tasks in background_schedule_pool frequently which result in large amount of requests to zookeeper in large-scale clusters", 0) \ + M(Float, merge_selecting_sleep_slowdown_factor, 1.2f, "The sleep time for merge selecting task is multiplied by this factor when there's nothing to merge and divided when a merge was assigned", 0) \ M(UInt64, merge_tree_clear_old_temporary_directories_interval_seconds, 60, "The period of executing the clear old temporary directories operation in background.", 0) \ M(UInt64, merge_tree_clear_old_parts_interval_seconds, 1, "The period of executing the clear old parts operation in background.", 0) \ M(UInt64, merge_tree_clear_old_broken_detached_parts_ttl_timeout_seconds, 1ULL * 3600 * 24 * 30, "Remove old broken detached parts in the background if they remained intouched for a specified by this setting period of time.", 0) \ diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp index 35a860ebb42..bcc4dc749fb 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp @@ -108,7 +108,7 @@ void ReplicatedMergeTreeCleanupThread::wakeupEarlierIfNeeded() return; /// Do not re-check all parts too often (avoid constantly calling getNumberOfOutdatedPartsWithExpiredRemovalTime()) - if (!wakeup_check_timer.compareAndRestart(storage_settings->cleanup_delay_period / 4)) + if (!wakeup_check_timer.compareAndRestart(storage_settings->cleanup_delay_period / 4.0)) return; UInt64 prev_run_timestamp_ms = prev_cleanup_timestamp_ms.load(std::memory_order_relaxed); diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 2c19d3ba122..cb8b78b4e0a 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -1298,8 +1298,7 @@ bool StorageMergeTree::scheduleDataProcessingJob(BackgroundJobsAssignee & assign /// which is equal or more fresh than commands themselves. In extremely rare case it can happen that we will have alter /// in between we took snapshot above and selected commands. That is why we take new snapshot here. auto task = std::make_shared(*this, getInMemoryMetadataPtr(), mutate_entry, shared_lock, common_assignee_trigger); - assignee.scheduleMergeMutateTask(task); - return true; + return assignee.scheduleMergeMutateTask(task); } if (has_mutations) { diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 0698ab7bf38..a6152c22148 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -4,6 +4,7 @@ #include #include +#include #include #include #include @@ -324,6 +325,7 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( /// Will be activated if we will achieve leader state. merge_selecting_task->deactivate(); + merge_selecting_sleep_ms = getSettings()->merge_selecting_sleep_ms; mutations_finalizing_task = getContext()->getSchedulePool().createTask( getStorageID().getFullTableName() + " (StorageReplicatedMergeTree::mutationsFinalizingTask)", [this] { mutationsFinalizingTask(); }); @@ -414,6 +416,19 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( loadDataParts(skip_sanity_checks); + if (attach) + { + /// Provide better initial value of merge_selecting_sleep_ms on server startup + auto settings = getSettings(); + size_t max_parts_in_partition = getMaxPartsCountAndSizeForPartition().first; + if (settings->parts_to_delay_insert && max_parts_in_partition < settings->parts_to_delay_insert) + { + Float64 ratio = 1.0 - static_cast(max_parts_in_partition) / settings->parts_to_delay_insert; + merge_selecting_sleep_ms = static_cast(interpolateLinear(settings->merge_selecting_sleep_ms, + settings->max_merge_selecting_sleep_ms, ratio)); + } + } + if (!current_zookeeper) { if (!attach) @@ -3237,7 +3252,15 @@ void StorageReplicatedMergeTree::mergeSelectingTask() const bool cleanup = (storage_settings_ptr->clean_deleted_rows != CleanDeletedRows::Never); CreateMergeEntryResult create_result = CreateMergeEntryResult::Other; - try + enum class AttemptStatus + { + EntryCreated, + NeedRetry, + Limited, + CannotSelect, + }; + + auto try_assign_merge = [&]() -> AttemptStatus { /// We must select parts for merge under merge_selecting_mutex because other threads /// (OPTIMIZE queries) can assign new merges. @@ -3259,108 +3282,133 @@ void StorageReplicatedMergeTree::mergeSelectingTask() "Current background tasks memory usage: {}.", formatReadableSizeWithBinarySuffix(background_memory_tracker.getSoftLimit()), formatReadableSizeWithBinarySuffix(background_memory_tracker.get())); + return AttemptStatus::Limited; } - else if (merges_and_mutations_sum >= storage_settings_ptr->max_replicated_merges_in_queue) + + if (merges_and_mutations_sum >= storage_settings_ptr->max_replicated_merges_in_queue) { LOG_TRACE(log, "Number of queued merges ({}) and part mutations ({})" " is greater than max_replicated_merges_in_queue ({}), so won't select new parts to merge or mutate.", merges_and_mutations_queued.merges, merges_and_mutations_queued.mutations, storage_settings_ptr->max_replicated_merges_in_queue); + return AttemptStatus::Limited; } - else + + UInt64 max_source_parts_size_for_merge = merger_mutator.getMaxSourcePartsSizeForMerge( + storage_settings_ptr->max_replicated_merges_in_queue, merges_and_mutations_sum); + + UInt64 max_source_part_size_for_mutation = merger_mutator.getMaxSourcePartSizeForMutation(); + + bool merge_with_ttl_allowed = merges_and_mutations_queued.merges_with_ttl < storage_settings_ptr->max_replicated_merges_with_ttl_in_queue && + getTotalMergesWithTTLInMergeList() < storage_settings_ptr->max_number_of_merges_with_ttl_in_pool; + + auto future_merged_part = std::make_shared(); + if (storage_settings.get()->assign_part_uuids) + future_merged_part->uuid = UUIDHelpers::generateV4(); + + bool can_assign_merge = max_source_parts_size_for_merge > 0; + PartitionIdsHint partitions_to_merge_in; + if (can_assign_merge) { - UInt64 max_source_parts_size_for_merge = merger_mutator.getMaxSourcePartsSizeForMerge( - storage_settings_ptr->max_replicated_merges_in_queue, merges_and_mutations_sum); + auto lightweight_merge_pred = LocalMergePredicate(queue); + partitions_to_merge_in = merger_mutator.getPartitionsThatMayBeMerged( + max_source_parts_size_for_merge, lightweight_merge_pred, merge_with_ttl_allowed, NO_TRANSACTION_PTR); + if (partitions_to_merge_in.empty()) + can_assign_merge = false; + else + merge_pred.emplace(queue.getMergePredicate(zookeeper, partitions_to_merge_in)); + } - UInt64 max_source_part_size_for_mutation = merger_mutator.getMaxSourcePartSizeForMutation(); + if (can_assign_merge && + merger_mutator.selectPartsToMerge(future_merged_part, false, max_source_parts_size_for_merge, *merge_pred, + merge_with_ttl_allowed, NO_TRANSACTION_PTR, nullptr, &partitions_to_merge_in) == SelectPartsDecision::SELECTED) + { + create_result = createLogEntryToMergeParts( + zookeeper, + future_merged_part->parts, + future_merged_part->name, + future_merged_part->uuid, + future_merged_part->part_format, + deduplicate, + deduplicate_by_columns, + cleanup, + nullptr, + merge_pred->getVersion(), + future_merged_part->merge_type); - bool merge_with_ttl_allowed = merges_and_mutations_queued.merges_with_ttl < storage_settings_ptr->max_replicated_merges_with_ttl_in_queue && - getTotalMergesWithTTLInMergeList() < storage_settings_ptr->max_number_of_merges_with_ttl_in_pool; - auto future_merged_part = std::make_shared(); - if (storage_settings.get()->assign_part_uuids) - future_merged_part->uuid = UUIDHelpers::generateV4(); + if (create_result == CreateMergeEntryResult::Ok) + return AttemptStatus::EntryCreated; + if (create_result == CreateMergeEntryResult::LogUpdated) + return AttemptStatus::NeedRetry; + } - bool can_assign_merge = max_source_parts_size_for_merge > 0; - PartitionIdsHint partitions_to_merge_in; - if (can_assign_merge) + /// If there are many mutations in queue, it may happen, that we cannot enqueue enough merges to merge all new parts + if (max_source_part_size_for_mutation == 0 || merges_and_mutations_queued.mutations >= storage_settings_ptr->max_replicated_mutations_in_queue) + return AttemptStatus::Limited; + + if (queue.countMutations() > 0) + { + /// We don't need the list of committing blocks to choose a part to mutate + if (!merge_pred) + merge_pred.emplace(queue.getMergePredicate(zookeeper, PartitionIdsHint{})); + + /// Choose a part to mutate. + DataPartsVector data_parts = getDataPartsVectorForInternalUsage(); + for (const auto & part : data_parts) { - auto lightweight_merge_pred = LocalMergePredicate(queue); - partitions_to_merge_in = merger_mutator.getPartitionsThatMayBeMerged( - max_source_parts_size_for_merge, lightweight_merge_pred, merge_with_ttl_allowed, NO_TRANSACTION_PTR); - if (partitions_to_merge_in.empty()) - can_assign_merge = false; - else - merge_pred.emplace(queue.getMergePredicate(zookeeper, partitions_to_merge_in)); - } + if (part->getBytesOnDisk() > max_source_part_size_for_mutation) + continue; - if (can_assign_merge && - merger_mutator.selectPartsToMerge(future_merged_part, false, max_source_parts_size_for_merge, *merge_pred, - merge_with_ttl_allowed, NO_TRANSACTION_PTR, nullptr, &partitions_to_merge_in) == SelectPartsDecision::SELECTED) - { - create_result = createLogEntryToMergeParts( - zookeeper, - future_merged_part->parts, - future_merged_part->name, + std::optional> desired_mutation_version = merge_pred->getDesiredMutationVersion(part); + if (!desired_mutation_version) + continue; + + create_result = createLogEntryToMutatePart( + *part, future_merged_part->uuid, - future_merged_part->part_format, - deduplicate, - deduplicate_by_columns, - cleanup, - nullptr, - merge_pred->getVersion(), - future_merged_part->merge_type); - } - /// If there are many mutations in queue, it may happen, that we cannot enqueue enough merges to merge all new parts - else if (max_source_part_size_for_mutation > 0 && queue.countMutations() > 0 - && merges_and_mutations_queued.mutations < storage_settings_ptr->max_replicated_mutations_in_queue) - { - /// We don't need the list of committing blocks to choose a part to mutate - if (!merge_pred) - merge_pred.emplace(queue.getMergePredicate(zookeeper, PartitionIdsHint{})); + desired_mutation_version->first, + desired_mutation_version->second, + merge_pred->getVersion()); - /// Choose a part to mutate. - DataPartsVector data_parts = getDataPartsVectorForInternalUsage(); - for (const auto & part : data_parts) - { - if (part->getBytesOnDisk() > max_source_part_size_for_mutation) - continue; - - std::optional> desired_mutation_version = merge_pred->getDesiredMutationVersion(part); - if (!desired_mutation_version) - continue; - - create_result = createLogEntryToMutatePart( - *part, - future_merged_part->uuid, - desired_mutation_version->first, - desired_mutation_version->second, - merge_pred->getVersion()); - - if (create_result == CreateMergeEntryResult::Ok || - create_result == CreateMergeEntryResult::LogUpdated) - break; - } + if (create_result == CreateMergeEntryResult::Ok) + return AttemptStatus::EntryCreated; + if (create_result == CreateMergeEntryResult::LogUpdated) + return AttemptStatus::NeedRetry; } } + + return AttemptStatus::CannotSelect; + }; + + AttemptStatus result = AttemptStatus::CannotSelect; + try + { + result = try_assign_merge(); } catch (...) { tryLogCurrentException(log, __PRETTY_FUNCTION__); } - if (!is_leader) - return; - if (create_result != CreateMergeEntryResult::Ok - && create_result != CreateMergeEntryResult::LogUpdated) - { - merge_selecting_task->scheduleAfter(storage_settings_ptr->merge_selecting_sleep_ms); - } + if (result == AttemptStatus::EntryCreated || result == AttemptStatus::NeedRetry) + merge_selecting_sleep_ms = static_cast(merge_selecting_sleep_ms / storage_settings_ptr->merge_selecting_sleep_slowdown_factor); + else if (result == AttemptStatus::CannotSelect) + merge_selecting_sleep_ms = static_cast(merge_selecting_sleep_ms * storage_settings_ptr->merge_selecting_sleep_slowdown_factor); + + if (merge_selecting_sleep_ms < storage_settings_ptr->merge_selecting_sleep_ms) + merge_selecting_sleep_ms = storage_settings_ptr->merge_selecting_sleep_ms; + if (merge_selecting_sleep_ms > storage_settings_ptr->max_merge_selecting_sleep_ms) + merge_selecting_sleep_ms = storage_settings_ptr->max_merge_selecting_sleep_ms; + + if (result == AttemptStatus::EntryCreated) + merge_selecting_task->schedule(); else { - merge_selecting_task->schedule(); + LOG_TRACE(log, "Scheduling next merge selecting task after {}ms", merge_selecting_sleep_ms); + merge_selecting_task->scheduleAfter(merge_selecting_sleep_ms); } } diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index 01b86dd1425..5d877e4b7fa 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -456,6 +456,8 @@ private: /// It is acquired for each iteration of the selection of parts to merge or each OPTIMIZE query. std::mutex merge_selecting_mutex; + UInt64 merge_selecting_sleep_ms; + /// A task that marks finished mutations as done. BackgroundSchedulePool::TaskHolder mutations_finalizing_task; diff --git a/tests/integration/test_consistent_parts_after_clone_replica/test.py b/tests/integration/test_consistent_parts_after_clone_replica/test.py index 0c907340090..2771a874d68 100644 --- a/tests/integration/test_consistent_parts_after_clone_replica/test.py +++ b/tests/integration/test_consistent_parts_after_clone_replica/test.py @@ -13,7 +13,8 @@ def fill_nodes(nodes, shard): CREATE TABLE test_table(date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) - SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0; + SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, + cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0; """.format( shard=shard, replica=node.name ) diff --git a/tests/integration/test_merge_tree_empty_parts/test.py b/tests/integration/test_merge_tree_empty_parts/test.py index 0f611408a67..212c0577c13 100644 --- a/tests/integration/test_merge_tree_empty_parts/test.py +++ b/tests/integration/test_merge_tree_empty_parts/test.py @@ -27,7 +27,7 @@ def test_empty_parts_alter_delete(started_cluster): "CREATE TABLE empty_parts_delete (d Date, key UInt64, value String) " "ENGINE = ReplicatedMergeTree('/clickhouse/tables/empty_parts_delete', 'r1') " "PARTITION BY toYYYYMM(d) ORDER BY key " - "SETTINGS old_parts_lifetime = 1" + "SETTINGS old_parts_lifetime = 1, cleanup_delay_period=0, cleanup_thread_preferred_points_per_iteration=0" ) node1.query("INSERT INTO empty_parts_delete VALUES (toDate('2020-10-10'), 1, 'a')") From c9aa3042b50ae1b691149ec9012c1521b01705ac Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Tue, 23 May 2023 02:28:23 +0200 Subject: [PATCH 116/722] fix --- .../02427_mutate_and_zero_copy_replication_zookeeper.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02427_mutate_and_zero_copy_replication_zookeeper.sql b/tests/queries/0_stateless/02427_mutate_and_zero_copy_replication_zookeeper.sql index 9b0a52b8dbd..e7e0f2f6c59 100644 --- a/tests/queries/0_stateless/02427_mutate_and_zero_copy_replication_zookeeper.sql +++ b/tests/queries/0_stateless/02427_mutate_and_zero_copy_replication_zookeeper.sql @@ -9,7 +9,7 @@ CREATE TABLE mutate_and_zero_copy_replication1 ) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_02427_mutate_and_zero_copy_replication/alter', '1') ORDER BY tuple() -SETTINGS old_parts_lifetime=0, cleanup_delay_period=300, cleanup_delay_period_random_add=300, min_bytes_for_wide_part = 0; +SETTINGS old_parts_lifetime=0, cleanup_delay_period=300, max_cleanup_delay_period=300, cleanup_delay_period_random_add=300, min_bytes_for_wide_part = 0; CREATE TABLE mutate_and_zero_copy_replication2 ( From 0fb9e63f76323ae60520df37e9a947c420664de9 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 23 May 2023 10:00:00 +0000 Subject: [PATCH 117/722] Fix and update broken_tests --- tests/broken_tests.txt | 2 -- tests/queries/0_stateless/01655_plan_optimizations.reference | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/broken_tests.txt b/tests/broken_tests.txt index 0b4efacba0b..fc60b820f93 100644 --- a/tests/broken_tests.txt +++ b/tests/broken_tests.txt @@ -59,7 +59,6 @@ 01615_random_one_shard_insertion 01624_soft_constraints 01651_bugs_from_15889 -01655_plan_optimizations 01656_test_query_log_factories_info 01681_bloom_filter_nullable_column 01700_system_zookeeper_path_in @@ -101,7 +100,6 @@ 02354_annoy 02366_union_decimal_conversion 02375_rocksdb_with_filters -02377_optimize_sorting_by_input_stream_properties_explain 02382_join_and_filtering_set 02402_merge_engine_with_view 02404_memory_bound_merging diff --git a/tests/queries/0_stateless/01655_plan_optimizations.reference b/tests/queries/0_stateless/01655_plan_optimizations.reference index 9796d2e4f82..34ea2bc20a3 100644 --- a/tests/queries/0_stateless/01655_plan_optimizations.reference +++ b/tests/queries/0_stateless/01655_plan_optimizations.reference @@ -1,4 +1,5 @@ Too many optimizations applied to query plan +Too many optimizations applied to query plan > sipHash should be calculated after filtration FUNCTION sipHash64 Filter column: equals From 84a97ca04a0f22becab1459bb1e557fe1a6104a8 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Tue, 23 May 2023 12:18:41 +0200 Subject: [PATCH 118/722] fix --- .../02427_mutate_and_zero_copy_replication_zookeeper.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02427_mutate_and_zero_copy_replication_zookeeper.sql b/tests/queries/0_stateless/02427_mutate_and_zero_copy_replication_zookeeper.sql index e7e0f2f6c59..e3c8583ccf4 100644 --- a/tests/queries/0_stateless/02427_mutate_and_zero_copy_replication_zookeeper.sql +++ b/tests/queries/0_stateless/02427_mutate_and_zero_copy_replication_zookeeper.sql @@ -19,7 +19,7 @@ CREATE TABLE mutate_and_zero_copy_replication2 ) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_02427_mutate_and_zero_copy_replication/alter', '2') ORDER BY tuple() -SETTINGS old_parts_lifetime=0, cleanup_delay_period=300, cleanup_delay_period_random_add=300; +SETTINGS old_parts_lifetime=0, cleanup_delay_period=300, max_cleanup_delay_period=300, cleanup_delay_period_random_add=300; INSERT INTO mutate_and_zero_copy_replication1 VALUES (1, '1', 1.0); From f3b4959e059640a9b786f421b3fe42f9a1fb4be6 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Tue, 23 May 2023 19:37:35 +0200 Subject: [PATCH 119/722] fix --- src/Storages/StorageReplicatedMergeTree.cpp | 7 +++++-- tests/integration/test_merge_tree_empty_parts/test.py | 2 +- .../queries/0_stateless/02448_clone_replica_lost_part.sql | 1 + 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index a6152c22148..fc90ff550c7 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -3393,10 +3393,13 @@ void StorageReplicatedMergeTree::mergeSelectingTask() } + Float32 new_sleep_ms = merge_selecting_sleep_ms; if (result == AttemptStatus::EntryCreated || result == AttemptStatus::NeedRetry) - merge_selecting_sleep_ms = static_cast(merge_selecting_sleep_ms / storage_settings_ptr->merge_selecting_sleep_slowdown_factor); + new_sleep_ms /= storage_settings_ptr->merge_selecting_sleep_slowdown_factor; else if (result == AttemptStatus::CannotSelect) - merge_selecting_sleep_ms = static_cast(merge_selecting_sleep_ms * storage_settings_ptr->merge_selecting_sleep_slowdown_factor); + new_sleep_ms *= storage_settings_ptr->merge_selecting_sleep_slowdown_factor; + new_sleep_ms *= std::uniform_real_distribution(1.f, 1.1f)(thread_local_rng); + merge_selecting_sleep_ms = static_cast(new_sleep_ms); if (merge_selecting_sleep_ms < storage_settings_ptr->merge_selecting_sleep_ms) merge_selecting_sleep_ms = storage_settings_ptr->merge_selecting_sleep_ms; diff --git a/tests/integration/test_merge_tree_empty_parts/test.py b/tests/integration/test_merge_tree_empty_parts/test.py index 212c0577c13..c6a96f3ed1b 100644 --- a/tests/integration/test_merge_tree_empty_parts/test.py +++ b/tests/integration/test_merge_tree_empty_parts/test.py @@ -48,7 +48,7 @@ def test_empty_parts_summing(started_cluster): "CREATE TABLE empty_parts_summing (d Date, key UInt64, value Int64) " "ENGINE = ReplicatedSummingMergeTree('/clickhouse/tables/empty_parts_summing', 'r1') " "PARTITION BY toYYYYMM(d) ORDER BY key " - "SETTINGS old_parts_lifetime = 1" + "SETTINGS old_parts_lifetime = 1, cleanup_delay_period=0, cleanup_thread_preferred_points_per_iteration=0" ) node1.query("INSERT INTO empty_parts_summing VALUES (toDate('2020-10-10'), 1, 1)") diff --git a/tests/queries/0_stateless/02448_clone_replica_lost_part.sql b/tests/queries/0_stateless/02448_clone_replica_lost_part.sql index 44303a1c532..7ad25d75fbe 100644 --- a/tests/queries/0_stateless/02448_clone_replica_lost_part.sql +++ b/tests/queries/0_stateless/02448_clone_replica_lost_part.sql @@ -144,6 +144,7 @@ select sleep(2) format Null; -- increases probability of reproducing the issue -- rmt1 will mimic rmt2, but will not be able to fetch parts for a while system stop replicated sends rmt2; attach table rmt1; +system sync replica rmt1; -- rmt1 should not show the value (200) from dropped part select throwIf(n = 200) from rmt1 format Null; select 11, arraySort(groupArray(n)) from rmt2; From be49281044eba2be91c46666ce12a28da446585c Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky Date: Wed, 24 May 2023 00:48:09 +0000 Subject: [PATCH 120/722] Try to fix test --- .../test/integration/runner/compose/docker_compose_mongo.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/test/integration/runner/compose/docker_compose_mongo.yml b/docker/test/integration/runner/compose/docker_compose_mongo.yml index 8cdcbc421e8..9a6eae6ca8c 100644 --- a/docker/test/integration/runner/compose/docker_compose_mongo.yml +++ b/docker/test/integration/runner/compose/docker_compose_mongo.yml @@ -1,7 +1,7 @@ version: '2.3' services: mongo1: - image: mongo:6.0 + image: mongo:5.0 restart: always environment: MONGO_INITDB_ROOT_USERNAME: root @@ -11,7 +11,7 @@ services: command: --profile=2 --verbose mongo2: - image: mongo:6.0 + image: mongo:5.0 restart: always ports: - ${MONGO_NO_CRED_EXTERNAL_PORT:-27017}:${MONGO_NO_CRED_INTERNAL_PORT:-27017} From 79c5aa23585efb20d410dccd8036af968525a71b Mon Sep 17 00:00:00 2001 From: alekseygolub Date: Wed, 24 May 2023 06:52:22 +0000 Subject: [PATCH 121/722] Remove test from broken_tests.txt --- tests/broken_tests.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/broken_tests.txt b/tests/broken_tests.txt index cef8f68b210..e61c1316e17 100644 --- a/tests/broken_tests.txt +++ b/tests/broken_tests.txt @@ -137,4 +137,3 @@ 01600_parts_types_metrics_long 01287_max_execution_speed 02703_row_policy_for_database -02732_rename_after_processing From 242c3bc9a971b1f9b76df57b7df1ac5d176fe274 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Wed, 24 May 2023 16:01:28 +0200 Subject: [PATCH 122/722] fix --- tests/integration/test_ttl_replicated/test.py | 22 ++++++++----------- .../02448_clone_replica_lost_part.sql | 7 +++--- 2 files changed, 13 insertions(+), 16 deletions(-) diff --git a/tests/integration/test_ttl_replicated/test.py b/tests/integration/test_ttl_replicated/test.py index 4ea4472b812..d78c00a9f9c 100644 --- a/tests/integration/test_ttl_replicated/test.py +++ b/tests/integration/test_ttl_replicated/test.py @@ -6,6 +6,7 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import TSV, exec_query_with_retry from helpers.wait_for_helpers import wait_for_delete_inactive_parts from helpers.wait_for_helpers import wait_for_delete_empty_parts +from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance("node1", with_zookeeper=True) @@ -66,7 +67,8 @@ def test_ttl_columns(started_cluster): """ CREATE TABLE test_ttl(date DateTime, id UInt32, a Int32 TTL date + INTERVAL 1 DAY, b Int32 TTL date + INTERVAL 1 MONTH) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_columns', '{replica}') - ORDER BY id PARTITION BY toDayOfMonth(date) SETTINGS merge_with_ttl_timeout=0, min_bytes_for_wide_part=0; + ORDER BY id PARTITION BY toDayOfMonth(date) + SETTINGS merge_with_ttl_timeout=0, min_bytes_for_wide_part=0, , max_merge_selecting_sleep_ms=6000; """.format( replica=node.name ) @@ -99,7 +101,7 @@ def test_merge_with_ttl_timeout(started_cluster): CREATE TABLE {table}(date DateTime, id UInt32, a Int32 TTL date + INTERVAL 1 DAY, b Int32 TTL date + INTERVAL 1 MONTH) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{table}', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) - SETTINGS min_bytes_for_wide_part=0; + SETTINGS min_bytes_for_wide_part=0, max_merge_selecting_sleep_ms=6000; """.format( replica=node.name, table=table ) @@ -134,14 +136,8 @@ def test_merge_with_ttl_timeout(started_cluster): ) ) - time.sleep(15) # TTL merges shall not happen. - - assert ( - node1.query("SELECT countIf(a = 0) FROM {table}".format(table=table)) == "3\n" - ) - assert ( - node2.query("SELECT countIf(a = 0) FROM {table}".format(table=table)) == "3\n" - ) + assert_eq_with_retry(node1, "SELECT countIf(a = 0) FROM {table}".format(table=table), "3\n") + assert_eq_with_retry(node2, "SELECT countIf(a = 0) FROM {table}".format(table=table), "3\n") def test_ttl_many_columns(started_cluster): @@ -155,7 +151,7 @@ def test_ttl_many_columns(started_cluster): _offset Int32 TTL date, _partition Int32 TTL date) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_2', '{replica}') - ORDER BY id PARTITION BY toDayOfMonth(date) SETTINGS merge_with_ttl_timeout=0; + ORDER BY id PARTITION BY toDayOfMonth(date) SETTINGS merge_with_ttl_timeout=0, max_merge_selecting_sleep_ms=6000; """.format( replica=node.name ) @@ -213,7 +209,7 @@ def test_ttl_table(started_cluster, delete_suffix): CREATE TABLE test_ttl(date DateTime, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) - TTL date + INTERVAL 1 DAY {delete_suffix} SETTINGS merge_with_ttl_timeout=0; + TTL date + INTERVAL 1 DAY {delete_suffix} SETTINGS merge_with_ttl_timeout=0, max_merge_selecting_sleep_ms=6000; """.format( replica=node.name, delete_suffix=delete_suffix ) @@ -304,7 +300,7 @@ def test_ttl_double_delete_rule_returns_error(started_cluster): CREATE TABLE test_ttl(date DateTime, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_double_delete', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) - TTL date + INTERVAL 1 DAY, date + INTERVAL 2 DAY SETTINGS merge_with_ttl_timeout=0 + TTL date + INTERVAL 1 DAY, date + INTERVAL 2 DAY SETTINGS merge_with_ttl_timeout=0, max_merge_selecting_sleep_ms=6000 """.format( replica=node1.name ) diff --git a/tests/queries/0_stateless/02448_clone_replica_lost_part.sql b/tests/queries/0_stateless/02448_clone_replica_lost_part.sql index 7ad25d75fbe..1e99e1869cc 100644 --- a/tests/queries/0_stateless/02448_clone_replica_lost_part.sql +++ b/tests/queries/0_stateless/02448_clone_replica_lost_part.sql @@ -6,10 +6,12 @@ drop table if exists rmt1; drop table if exists rmt2; create table rmt1 (n int) engine=ReplicatedMergeTree('/test/02448/{database}/rmt', '1') order by tuple() settings min_replicated_logs_to_keep=1, max_replicated_logs_to_keep=2, cleanup_delay_period=0, cleanup_delay_period_random_add=1, - cleanup_thread_preferred_points_per_iteration=0, old_parts_lifetime=0, max_parts_to_merge_at_once=4; + cleanup_thread_preferred_points_per_iteration=0, old_parts_lifetime=0, max_parts_to_merge_at_once=4, + merge_selecting_sleep_ms=100, max_merge_selecting_sleep_ms=500; create table rmt2 (n int) engine=ReplicatedMergeTree('/test/02448/{database}/rmt', '2') order by tuple() settings min_replicated_logs_to_keep=1, max_replicated_logs_to_keep=2, cleanup_delay_period=0, cleanup_delay_period_random_add=1, - cleanup_thread_preferred_points_per_iteration=0, old_parts_lifetime=0, max_parts_to_merge_at_once=4; + cleanup_thread_preferred_points_per_iteration=0, old_parts_lifetime=0, max_parts_to_merge_at_once=4, + merge_selecting_sleep_ms=100, max_merge_selecting_sleep_ms=500; -- insert part only on one replica system stop replicated sends rmt1; @@ -144,7 +146,6 @@ select sleep(2) format Null; -- increases probability of reproducing the issue -- rmt1 will mimic rmt2, but will not be able to fetch parts for a while system stop replicated sends rmt2; attach table rmt1; -system sync replica rmt1; -- rmt1 should not show the value (200) from dropped part select throwIf(n = 200) from rmt1 format Null; select 11, arraySort(groupArray(n)) from rmt2; From a237b8b83958abbb6976fdb72f67790c54442195 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 24 May 2023 14:19:37 +0000 Subject: [PATCH 123/722] Automatic style fix --- tests/integration/test_ttl_replicated/test.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_ttl_replicated/test.py b/tests/integration/test_ttl_replicated/test.py index d78c00a9f9c..d681e81df3a 100644 --- a/tests/integration/test_ttl_replicated/test.py +++ b/tests/integration/test_ttl_replicated/test.py @@ -136,8 +136,12 @@ def test_merge_with_ttl_timeout(started_cluster): ) ) - assert_eq_with_retry(node1, "SELECT countIf(a = 0) FROM {table}".format(table=table), "3\n") - assert_eq_with_retry(node2, "SELECT countIf(a = 0) FROM {table}".format(table=table), "3\n") + assert_eq_with_retry( + node1, "SELECT countIf(a = 0) FROM {table}".format(table=table), "3\n" + ) + assert_eq_with_retry( + node2, "SELECT countIf(a = 0) FROM {table}".format(table=table), "3\n" + ) def test_ttl_many_columns(started_cluster): From 4c94b3d6bce6bf34a52e83f98b6fec312e4ba79b Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Wed, 24 May 2023 20:13:37 +0300 Subject: [PATCH 124/722] Update test.py --- tests/integration/test_ttl_replicated/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_ttl_replicated/test.py b/tests/integration/test_ttl_replicated/test.py index d681e81df3a..7ba5a4359c7 100644 --- a/tests/integration/test_ttl_replicated/test.py +++ b/tests/integration/test_ttl_replicated/test.py @@ -68,7 +68,7 @@ def test_ttl_columns(started_cluster): CREATE TABLE test_ttl(date DateTime, id UInt32, a Int32 TTL date + INTERVAL 1 DAY, b Int32 TTL date + INTERVAL 1 MONTH) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_columns', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) - SETTINGS merge_with_ttl_timeout=0, min_bytes_for_wide_part=0, , max_merge_selecting_sleep_ms=6000; + SETTINGS merge_with_ttl_timeout=0, min_bytes_for_wide_part=0, max_merge_selecting_sleep_ms=6000; """.format( replica=node.name ) From e66f6272d1dc76859251fde165b2d2d9664dce8f Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 10 May 2023 18:39:38 +0000 Subject: [PATCH 125/722] Refactor CapnProto format to improve input/output performance --- src/Core/Settings.h | 2 +- src/Core/SettingsEnums.cpp | 8 +- src/Core/SettingsEnums.h | 2 +- src/Formats/CapnProtoSchema.cpp | 298 ++++ .../{CapnProtoUtils.h => CapnProtoSchema.h} | 13 +- src/Formats/CapnProtoSerializer.cpp | 1218 +++++++++++++++++ src/Formats/CapnProtoSerializer.h | 25 + src/Formats/CapnProtoUtils.cpp | 734 ---------- src/Formats/FormatSettings.h | 6 +- .../Formats/Impl/CapnProtoRowInputFormat.cpp | 253 +--- .../Formats/Impl/CapnProtoRowInputFormat.h | 9 +- .../Formats/Impl/CapnProtoRowOutputFormat.cpp | 266 +--- .../Formats/Impl/CapnProtoRowOutputFormat.h | 17 +- .../Formats/Impl/ProtobufListInputFormat.cpp | 9 +- .../Formats/Impl/ProtobufRowInputFormat.cpp | 9 +- .../queries/0_stateless/02030_capnp_format.sh | 4 +- ...p_case_insensitive_names_matcing.reference | 1 + ...35_capnp_case_insensitive_names_matcing.sh | 10 + ...ing_and_writing_structure_fields.reference | 3 + ...36_reading_and_writing_structure_fields.sh | 24 + ...2735_case_insensitive_names_matching.capnp | 13 + .../02736_nested_structures.capnp | 21 + 22 files changed, 1686 insertions(+), 1259 deletions(-) create mode 100644 src/Formats/CapnProtoSchema.cpp rename src/Formats/{CapnProtoUtils.h => CapnProtoSchema.h} (59%) create mode 100644 src/Formats/CapnProtoSerializer.cpp create mode 100644 src/Formats/CapnProtoSerializer.h delete mode 100644 src/Formats/CapnProtoUtils.cpp create mode 100644 tests/queries/0_stateless/02735_capnp_case_insensitive_names_matcing.reference create mode 100755 tests/queries/0_stateless/02735_capnp_case_insensitive_names_matcing.sh create mode 100644 tests/queries/0_stateless/02736_reading_and_writing_structure_fields.reference create mode 100755 tests/queries/0_stateless/02736_reading_and_writing_structure_fields.sh create mode 100644 tests/queries/0_stateless/format_schemas/02735_case_insensitive_names_matching.capnp create mode 100644 tests/queries/0_stateless/format_schemas/02736_nested_structures.capnp diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 1df0a8af24f..2863cc9d7a7 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -962,7 +962,7 @@ class IColumn; M(Bool, output_format_orc_string_as_string, false, "Use ORC String type instead of Binary for String columns", 0) \ M(ORCCompression, output_format_orc_compression_method, "lz4", "Compression method for ORC output format. Supported codecs: lz4, snappy, zlib, zstd, none (uncompressed)", 0) \ \ - M(EnumComparingMode, format_capn_proto_enum_comparising_mode, FormatSettings::EnumComparingMode::BY_VALUES, "How to map ClickHouse Enum and CapnProto Enum", 0) \ + M(CapnProtoEnumComparingMode, format_capn_proto_enum_comparising_mode, FormatSettings::CapnProtoEnumComparingMode::BY_VALUES, "How to map ClickHouse Enum and CapnProto Enum", 0) \ \ M(String, input_format_mysql_dump_table_name, "", "Name of the table in MySQL dump from which to read data", 0) \ M(Bool, input_format_mysql_dump_map_column_names, true, "Match columns from table in MySQL dump and columns from ClickHouse table by names", 0) \ diff --git a/src/Core/SettingsEnums.cpp b/src/Core/SettingsEnums.cpp index e0f16ea00db..a291a23c140 100644 --- a/src/Core/SettingsEnums.cpp +++ b/src/Core/SettingsEnums.cpp @@ -144,10 +144,10 @@ IMPLEMENT_SETTING_ENUM(TransactionsWaitCSNMode, ErrorCodes::BAD_ARGUMENTS, {"wait", TransactionsWaitCSNMode::WAIT}, {"wait_unknown", TransactionsWaitCSNMode::WAIT_UNKNOWN}}) -IMPLEMENT_SETTING_ENUM(EnumComparingMode, ErrorCodes::BAD_ARGUMENTS, - {{"by_names", FormatSettings::EnumComparingMode::BY_NAMES}, - {"by_values", FormatSettings::EnumComparingMode::BY_VALUES}, - {"by_names_case_insensitive", FormatSettings::EnumComparingMode::BY_NAMES_CASE_INSENSITIVE}}) +IMPLEMENT_SETTING_ENUM(CapnProtoEnumComparingMode, ErrorCodes::BAD_ARGUMENTS, + {{"by_names", FormatSettings::CapnProtoEnumComparingMode::BY_NAMES}, + {"by_values", FormatSettings::CapnProtoEnumComparingMode::BY_VALUES}, + {"by_names_case_insensitive", FormatSettings::CapnProtoEnumComparingMode::BY_NAMES_CASE_INSENSITIVE}}) IMPLEMENT_SETTING_ENUM(EscapingRule, ErrorCodes::BAD_ARGUMENTS, {{"None", FormatSettings::EscapingRule::None}, diff --git a/src/Core/SettingsEnums.h b/src/Core/SettingsEnums.h index 3ae7bfaa673..1c5be910ef7 100644 --- a/src/Core/SettingsEnums.h +++ b/src/Core/SettingsEnums.h @@ -188,7 +188,7 @@ enum class TransactionsWaitCSNMode DECLARE_SETTING_ENUM(TransactionsWaitCSNMode) -DECLARE_SETTING_ENUM_WITH_RENAME(EnumComparingMode, FormatSettings::EnumComparingMode) +DECLARE_SETTING_ENUM_WITH_RENAME(CapnProtoEnumComparingMode, FormatSettings::CapnProtoEnumComparingMode) DECLARE_SETTING_ENUM_WITH_RENAME(EscapingRule, FormatSettings::EscapingRule) diff --git a/src/Formats/CapnProtoSchema.cpp b/src/Formats/CapnProtoSchema.cpp new file mode 100644 index 00000000000..22518d5061a --- /dev/null +++ b/src/Formats/CapnProtoSchema.cpp @@ -0,0 +1,298 @@ +#include + +#if USE_CAPNP + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int CANNOT_PARSE_CAPN_PROTO_SCHEMA; + extern const int BAD_TYPE_OF_FIELD; + extern const int FILE_DOESNT_EXIST; + extern const int UNKNOWN_EXCEPTION; + extern const int CAPN_PROTO_BAD_TYPE; + extern const int BAD_ARGUMENTS; +} + +capnp::StructSchema CapnProtoSchemaParser::getMessageSchema(const FormatSchemaInfo & schema_info) +{ + capnp::ParsedSchema schema; + try + { + int fd; + KJ_SYSCALL(fd = open(schema_info.schemaDirectory().data(), O_RDONLY)); // NOLINT(bugprone-suspicious-semicolon) + auto schema_dir = kj::newDiskDirectory(kj::OsFileHandle(fd)); + schema = impl.parseFromDirectory(*schema_dir, kj::Path::parse(schema_info.schemaPath()), {}); + } + catch (const kj::Exception & e) + { + /// That's not good to determine the type of error by its description, but + /// this is the only way to do it here, because kj doesn't specify the type of error. + auto description = std::string_view(e.getDescription().cStr()); + if (description.find("No such file or directory") != String::npos || description.find("no such directory") != String::npos) + throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot open CapnProto schema, file {} doesn't exists", schema_info.absoluteSchemaPath()); + + if (description.find("Parse error") != String::npos) + throw Exception(ErrorCodes::CANNOT_PARSE_CAPN_PROTO_SCHEMA, "Cannot parse CapnProto schema {}:{}", schema_info.schemaPath(), e.getLine()); + + throw Exception(ErrorCodes::UNKNOWN_EXCEPTION, + "Unknown exception while parsing CapnProto schema: {}, schema dir and file: {}, {}", + description, schema_info.schemaDirectory(), schema_info.schemaPath()); + } + + auto message_maybe = schema.findNested(schema_info.messageName()); + auto * message_schema = kj::_::readMaybe(message_maybe); + if (!message_schema) + throw Exception(ErrorCodes::CANNOT_PARSE_CAPN_PROTO_SCHEMA, + "CapnProto schema doesn't contain message with name {}", schema_info.messageName()); + return message_schema->asStruct(); +} + +bool checkIfStructContainsUnnamedUnion(const capnp::StructSchema & struct_schema) +{ + return struct_schema.getFields().size() != struct_schema.getNonUnionFields().size(); +} + +bool checkIfStructIsNamedUnion(const capnp::StructSchema & struct_schema) +{ + return struct_schema.getFields().size() == struct_schema.getUnionFields().size(); +} + +/// Get full name of type for better exception messages. +String getCapnProtoFullTypeName(const capnp::Type & type) +{ + static const std::map capnp_simple_type_names = + { + {capnp::schema::Type::Which::BOOL, "Bool"}, + {capnp::schema::Type::Which::VOID, "Void"}, + {capnp::schema::Type::Which::INT8, "Int8"}, + {capnp::schema::Type::Which::INT16, "Int16"}, + {capnp::schema::Type::Which::INT32, "Int32"}, + {capnp::schema::Type::Which::INT64, "Int64"}, + {capnp::schema::Type::Which::UINT8, "UInt8"}, + {capnp::schema::Type::Which::UINT16, "UInt16"}, + {capnp::schema::Type::Which::UINT32, "UInt32"}, + {capnp::schema::Type::Which::UINT64, "UInt64"}, + {capnp::schema::Type::Which::FLOAT32, "Float32"}, + {capnp::schema::Type::Which::FLOAT64, "Float64"}, + {capnp::schema::Type::Which::TEXT, "Text"}, + {capnp::schema::Type::Which::DATA, "Data"}, + {capnp::schema::Type::Which::INTERFACE, "Interface"}, + {capnp::schema::Type::Which::ANY_POINTER, "AnyPointer"}, + }; + + switch (type.which()) + { + case capnp::schema::Type::Which::STRUCT: + { + auto struct_schema = type.asStruct(); + + auto non_union_fields = struct_schema.getNonUnionFields(); + std::vector non_union_field_names; + for (auto nested_field : non_union_fields) + non_union_field_names.push_back(String(nested_field.getProto().getName()) + " " + getCapnProtoFullTypeName(nested_field.getType())); + + auto union_fields = struct_schema.getUnionFields(); + std::vector union_field_names; + for (auto nested_field : union_fields) + union_field_names.push_back(String(nested_field.getProto().getName()) + " " + getCapnProtoFullTypeName(nested_field.getType())); + + String union_name = "Union(" + boost::algorithm::join(union_field_names, ", ") + ")"; + /// Check if the struct is a named union. + if (non_union_field_names.empty()) + return union_name; + + String type_name = "Struct(" + boost::algorithm::join(non_union_field_names, ", "); + /// Check if the struct contains unnamed union. + if (!union_field_names.empty()) + type_name += ", " + union_name; + type_name += ")"; + return type_name; + } + case capnp::schema::Type::Which::LIST: + return "List(" + getCapnProtoFullTypeName(type.asList().getElementType()) + ")"; + case capnp::schema::Type::Which::ENUM: + { + auto enum_schema = type.asEnum(); + String enum_name = "Enum("; + auto enumerants = enum_schema.getEnumerants(); + for (unsigned i = 0; i != enumerants.size(); ++i) + { + enum_name += String(enumerants[i].getProto().getName()) + " = " + std::to_string(enumerants[i].getOrdinal()); + if (i + 1 != enumerants.size()) + enum_name += ", "; + } + enum_name += ")"; + return enum_name; + } + default: + auto it = capnp_simple_type_names.find(type.which()); + if (it == capnp_simple_type_names.end()) + throw Exception(ErrorCodes::BAD_TYPE_OF_FIELD, "Unknown CapnProto type"); + return it->second; + } +} + +namespace +{ + + template + static DataTypePtr getEnumDataTypeFromEnumerants(const capnp::EnumSchema::EnumerantList & enumerants) + { + std::vector> values; + for (auto enumerant : enumerants) + values.emplace_back(enumerant.getProto().getName(), ValueType(enumerant.getOrdinal())); + return std::make_shared>(std::move(values)); + } + + static DataTypePtr getEnumDataTypeFromEnumSchema(const capnp::EnumSchema & enum_schema) + { + auto enumerants = enum_schema.getEnumerants(); + if (enumerants.size() < 128) + return getEnumDataTypeFromEnumerants(enumerants); + if (enumerants.size() < 32768) + return getEnumDataTypeFromEnumerants(enumerants); + + throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "ClickHouse supports only 8 and 16-bit Enums"); + } + + static DataTypePtr getDataTypeFromCapnProtoType(const capnp::Type & capnp_type, bool skip_unsupported_fields) + { + switch (capnp_type.which()) + { + case capnp::schema::Type::INT8: + return std::make_shared(); + case capnp::schema::Type::INT16: + return std::make_shared(); + case capnp::schema::Type::INT32: + return std::make_shared(); + case capnp::schema::Type::INT64: + return std::make_shared(); + case capnp::schema::Type::BOOL: [[fallthrough]]; + case capnp::schema::Type::UINT8: + return std::make_shared(); + case capnp::schema::Type::UINT16: + return std::make_shared(); + case capnp::schema::Type::UINT32: + return std::make_shared(); + case capnp::schema::Type::UINT64: + return std::make_shared(); + case capnp::schema::Type::FLOAT32: + return std::make_shared(); + case capnp::schema::Type::FLOAT64: + return std::make_shared(); + case capnp::schema::Type::DATA: [[fallthrough]]; + case capnp::schema::Type::TEXT: + return std::make_shared(); + case capnp::schema::Type::ENUM: + return getEnumDataTypeFromEnumSchema(capnp_type.asEnum()); + case capnp::schema::Type::LIST: + { + auto list_schema = capnp_type.asList(); + auto nested_type = getDataTypeFromCapnProtoType(list_schema.getElementType(), skip_unsupported_fields); + if (!nested_type) + return nullptr; + return std::make_shared(nested_type); + } + case capnp::schema::Type::STRUCT: + { + auto struct_schema = capnp_type.asStruct(); + + + if (struct_schema.getFields().size() == 0) + { + if (skip_unsupported_fields) + return nullptr; + throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "Empty messages are not supported"); + } + + /// Check if it can be Nullable. + if (checkIfStructIsNamedUnion(struct_schema)) + { + auto fields = struct_schema.getUnionFields(); + if (fields.size() != 2 || (!fields[0].getType().isVoid() && !fields[1].getType().isVoid())) + { + if (skip_unsupported_fields) + return nullptr; + throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "Unions are not supported"); + } + auto value_type = fields[0].getType().isVoid() ? fields[1].getType() : fields[0].getType(); + if (value_type.isStruct() || value_type.isList()) + { + if (skip_unsupported_fields) + return nullptr; + throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "Tuples and Lists cannot be inside Nullable"); + } + + auto nested_type = getDataTypeFromCapnProtoType(value_type, skip_unsupported_fields); + if (!nested_type) + return nullptr; + return std::make_shared(nested_type); + } + + if (checkIfStructContainsUnnamedUnion(struct_schema)) + throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "Unnamed union is not supported"); + + /// Treat Struct as Tuple. + DataTypes nested_types; + Names nested_names; + for (auto field : struct_schema.getNonUnionFields()) + { + auto nested_type = getDataTypeFromCapnProtoType(field.getType(), skip_unsupported_fields); + if (!nested_type) + continue; + nested_names.push_back(field.getProto().getName()); + nested_types.push_back(nested_type); + } + if (nested_types.empty()) + return nullptr; + return std::make_shared(std::move(nested_types), std::move(nested_names)); + } + default: + { + if (skip_unsupported_fields) + return nullptr; + throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "Unsupported CapnProtoType: {}", getCapnProtoFullTypeName(capnp_type)); + } + } +} + +} + +NamesAndTypesList capnProtoSchemaToCHSchema(const capnp::StructSchema & schema, bool skip_unsupported_fields) +{ + if (checkIfStructContainsUnnamedUnion(schema)) + throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "Unnamed union is not supported"); + + NamesAndTypesList names_and_types; + for (auto field : schema.getNonUnionFields()) + { + auto name = field.getProto().getName(); + auto type = getDataTypeFromCapnProtoType(field.getType(), skip_unsupported_fields); + if (type) + names_and_types.emplace_back(name, type); + } + if (names_and_types.empty()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Cannot convert CapnProto schema to ClickHouse table schema, all fields have unsupported types"); + + return names_and_types; +} + +} + +#endif diff --git a/src/Formats/CapnProtoUtils.h b/src/Formats/CapnProtoSchema.h similarity index 59% rename from src/Formats/CapnProtoUtils.h rename to src/Formats/CapnProtoSchema.h index 2d8cdb418d7..225f6f56207 100644 --- a/src/Formats/CapnProtoUtils.h +++ b/src/Formats/CapnProtoSchema.h @@ -30,17 +30,14 @@ public: capnp::StructSchema getMessageSchema(const FormatSchemaInfo & schema_info); }; -std::pair splitCapnProtoFieldName(const String & name); +bool checkIfStructContainsUnnamedUnion(const capnp::StructSchema & struct_schema); +bool checkIfStructIsNamedUnion(const capnp::StructSchema & struct_schema); -bool compareEnumNames(const String & first, const String & second, FormatSettings::EnumComparingMode mode); - -std::pair getStructBuilderAndFieldByColumnName(capnp::DynamicStruct::Builder struct_builder, const String & name); - -capnp::DynamicValue::Reader getReaderByColumnName(const capnp::DynamicStruct::Reader & struct_reader, const String & name); - -void checkCapnProtoSchemaStructure(const capnp::StructSchema & schema, const Block & header, FormatSettings::EnumComparingMode mode); +/// Get full name of type for better exception messages. +String getCapnProtoFullTypeName(const capnp::Type & type); NamesAndTypesList capnProtoSchemaToCHSchema(const capnp::StructSchema & schema, bool skip_unsupported_fields); + } #endif diff --git a/src/Formats/CapnProtoSerializer.cpp b/src/Formats/CapnProtoSerializer.cpp new file mode 100644 index 00000000000..e0c8ae2a79a --- /dev/null +++ b/src/Formats/CapnProtoSerializer.cpp @@ -0,0 +1,1218 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int THERE_IS_NO_COLUMN; + extern const int BAD_TYPE_OF_FIELD; + extern const int CAPN_PROTO_BAD_CAST; + extern const int INCORRECT_DATA; + extern const int ILLEGAL_COLUMN; +} + +namespace +{ + std::pair splitFieldName(const String & name) + { + const auto * begin = name.data(); + const auto * end = name.data() + name.size(); + const auto * it = find_first_symbols<'_', '.'>(begin, end); + String first = String(begin, it); + String second = it == end ? "" : String(it + 1, end); + return {first, second}; + } + + std::optional findFieldByName(const capnp::StructSchema & struct_schema, const String & name) + { + const auto & fields = struct_schema.getFields(); + for (auto field : fields) + { + auto field_name = String(field.getProto().getName()); + if (boost::to_lower_copy(name) == boost::to_lower_copy(field_name)) + return field; + } + return std::nullopt; + } + + [[noreturn]] void throwCannotConvert(const DataTypePtr & type, const String & name, const capnp::Type & capnp_type) + { + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert ClickHouse column \"{}\" with type {} to CapnProto type {}", + name, + type->getName(), + getCapnProtoFullTypeName(capnp_type)); + } + + struct FieldBuilder + { + virtual ~FieldBuilder() = default; + }; + + struct ListBuilder : public FieldBuilder + { + explicit ListBuilder(capnp::DynamicValue::Builder builder) : impl(builder.as()) + { + } + + capnp::DynamicList::Builder impl; + std::vector> nested_builders; + }; + + struct StructBuilder : public FieldBuilder + { + explicit StructBuilder(capnp::DynamicValue::Builder builder, size_t fields_size) : impl(builder.as()), field_builders(fields_size) + { + } + + explicit StructBuilder(capnp::DynamicStruct::Builder struct_builder, size_t fields_size) : impl(std::move(struct_builder)), field_builders(fields_size) + { + } + + capnp::DynamicStruct::Builder impl; + std::vector> field_builders; + }; + + std::unique_ptr initStructFieldBuilderIfNeeded(const ColumnPtr & column, size_t row_num, capnp::DynamicStruct::Builder & struct_builder, const capnp::StructSchema::Field & field, const capnp::Type & capnp_type, size_t nested_fields_size) + { + switch (capnp_type.which()) + { + case capnp::schema::Type::LIST: + { + const auto * array_column = assert_cast(column.get()); + size_t size = array_column->getOffsets()[row_num] - array_column->getOffsets()[row_num - 1]; + return std::make_unique(struct_builder.init(field, static_cast(size))); + } + case capnp::schema::Type::STRUCT: + { + return std::make_unique(struct_builder.init(field), nested_fields_size); + } + default: + return nullptr; + } + } + + class ICapnProtoSerializer + { + public: + virtual std::optional writeRow(const ColumnPtr & column, FieldBuilder * builder, size_t row_num) = 0; + virtual void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) = 0; + + virtual ~ICapnProtoSerializer() = default; + }; + + template + class CapnProtoIntegerSerializer : public ICapnProtoSerializer + { + public: + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + if constexpr (capnp_dynamic_type == capnp::DynamicValue::Type::INT) + return capnp::DynamicValue::Reader(column->getInt(row_num)); + if constexpr (capnp_dynamic_type == capnp::DynamicValue::Type::UINT) + return capnp::DynamicValue::Reader(column->getUInt(row_num)); + return capnp::DynamicValue::Reader(column->getBool(row_num)); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + NumericType value; + if constexpr (capnp_dynamic_type == capnp::DynamicValue::Type::INT) + value = static_cast(reader.as()); + else if constexpr (capnp_dynamic_type == capnp::DynamicValue::Type::UINT) + value = static_cast(reader.as()); + else if constexpr (capnp_dynamic_type == capnp::DynamicValue::Type::BOOL) + value = static_cast(reader.as()); + + if constexpr (is_bool_data_type) + assert_cast(column).insertValue(static_cast(value)); + else + assert_cast &>(column).insertValue(value); + } + }; + + template + static std::unique_ptr createIntegerSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + { + switch (capnp_type.which()) + { + case capnp::schema::Type::INT8: [[fallthrough]]; + case capnp::schema::Type::INT16: [[fallthrough]]; + case capnp::schema::Type::INT32: [[fallthrough]]; + case capnp::schema::Type::INT64: + return std::make_unique>(); + case capnp::schema::Type::UINT8: [[fallthrough]]; + case capnp::schema::Type::UINT16: [[fallthrough]]; + case capnp::schema::Type::UINT32: [[fallthrough]]; + case capnp::schema::Type::UINT64: + return std::make_unique>(); + case capnp::schema::Type::BOOL: + return std::make_unique>(); + default: + throwCannotConvert(data_type, column_name, capnp_type); + } + } + + template + class CapnProtoBigIntegerSerializer : public ICapnProtoSerializer + { + public: + CapnProtoBigIntegerSerializer(const DataTypePtr & data_type_, const String & column_name, const capnp::Type & capnp_type) : data_type(data_type_) + { + if (!capnp_type.isData()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + auto data = column->getDataAt(row_num); + return capnp::DynamicValue::Reader(capnp::Data::Reader(reinterpret_cast(data.data), data.size)); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + auto value = reader.as(); + if (value.size() != sizeof(NumericType)) + throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected size of {} value: {}", data_type->getName(), value.size()); + + column.insertData(reinterpret_cast(value.begin()), value.size()); + } + + private: + DataTypePtr data_type; + }; + + template + class CapnProtoFloatSerializer : public ICapnProtoSerializer + { + public: + CapnProtoFloatSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + { + if (!capnp_type.isFloat32() && !capnp_type.isFloat64()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + return capnp::DynamicValue::Reader(column->getFloat64(row_num)); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + assert_cast &>(column).insertValue(reader.as()); + } + }; + + template + class CapnProtoEnumSerializer : public ICapnProtoSerializer + { + public: + CapnProtoEnumSerializer( + const DataTypePtr & data_type_, + const String & column_name, + const capnp::Type & capnp_type, + const FormatSettings::CapnProtoEnumComparingMode enum_comparing_mode_) : data_type(data_type_), enum_comparing_mode(enum_comparing_mode_) + { + if (!capnp_type.isEnum()) + throwCannotConvert(data_type, column_name, capnp_type); + + bool to_lower = enum_comparing_mode == FormatSettings::CapnProtoEnumComparingMode::BY_NAMES_CASE_INSENSITIVE; + const auto * enum_type = assert_cast *>(data_type.get()); + const auto & enum_values = dynamic_cast &>(*enum_type); + + enum_schema = capnp_type.asEnum(); + auto enumerants = enum_schema.getEnumerants(); + constexpr auto max_value = std::is_same_v ? INT8_MAX : INT16_MAX; + if (enum_comparing_mode == FormatSettings::CapnProtoEnumComparingMode::BY_VALUES) + { + /// In CapnProto Enum fields are numbered sequentially starting from zero. + if (enumerants.size() > max_value) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Enum from CapnProto schema contains values that are out of range for Clickhouse enum type {}", + data_type->getName()); + + auto values = enum_values.getSetOfAllValues(); + std::unordered_set capn_enum_values; + for (auto enumerant : enumerants) + capn_enum_values.insert(EnumType(enumerant.getOrdinal())); + if (values != capn_enum_values) + throw Exception(ErrorCodes::CAPN_PROTO_BAD_CAST, "The set of values in Enum from CapnProto schema is different from the set of values in ClickHouse Enum"); + } + else + { + auto names = enum_values.getSetOfAllNames(to_lower); + std::unordered_set capn_enum_names; + + for (auto enumerant : enumerants) + { + String name = enumerant.getProto().getName(); + capn_enum_names.insert(to_lower ? boost::algorithm::to_lower_copy(name) : name); + } + + if (names != capn_enum_names) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "The set of names in Enum from CapnProto schema is different from the set of names in ClickHouse Enum"); + } + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + const auto * enum_data_type = assert_cast *>(data_type.get()); + EnumType enum_value = assert_cast &>(*column).getElement(row_num); + if (enum_comparing_mode == FormatSettings::CapnProtoEnumComparingMode::BY_VALUES) + return capnp::DynamicValue::Reader(capnp::DynamicEnum(enum_schema, enum_value)); + + auto enum_name = enum_data_type->getNameForValue(enum_value); + for (const auto enumerant : enum_schema.getEnumerants()) + { + if (compareEnumNames(String(enum_name), enumerant.getProto().getName(), enum_comparing_mode)) + return capnp::DynamicValue::Reader(capnp::DynamicEnum(enumerant)); + } + + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot convert CLickHouse Enum value to CapnProto Enum"); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + auto enum_value = reader.as(); + auto enumerant = *kj::_::readMaybe(enum_value.getEnumerant()); + auto enum_type = assert_cast *>(data_type.get()); + DataTypePtr nested_type = std::make_shared>(); + switch (enum_comparing_mode) + { + case FormatSettings::CapnProtoEnumComparingMode::BY_VALUES: + { + assert_cast &>(column).insertValue(static_cast(enumerant.getOrdinal())); + return; + } + case FormatSettings::CapnProtoEnumComparingMode::BY_NAMES: + { + auto value = enum_type->getValue(String(enumerant.getProto().getName())); + assert_cast &>(column).insertValue(value); + return; + } + case FormatSettings::CapnProtoEnumComparingMode::BY_NAMES_CASE_INSENSITIVE: + { + /// Find the same enum name case insensitive. + String enum_name = enumerant.getProto().getName(); + for (auto & name : enum_type->getAllRegisteredNames()) + { + if (compareEnumNames(name, enum_name, enum_comparing_mode)) + { + assert_cast &>(column).insertValue(enum_type->getValue(name)); + break; + } + } + return; + } + } + } + + private: + bool compareEnumNames(const String & first, const String & second, const FormatSettings::CapnProtoEnumComparingMode mode) + { + if (mode == FormatSettings::CapnProtoEnumComparingMode::BY_NAMES_CASE_INSENSITIVE) + return boost::algorithm::to_lower_copy(first) == boost::algorithm::to_lower_copy(second); + return first == second; + } + + DataTypePtr data_type; + capnp::EnumSchema enum_schema; + const FormatSettings::CapnProtoEnumComparingMode enum_comparing_mode; + }; + + class CapnProtoDateSerializer : public ICapnProtoSerializer + { + public: + CapnProtoDateSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + { + if (!capnp_type.isUInt16()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + return capnp::DynamicValue::Reader(column->getUInt(row_num)); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + assert_cast(column).insertValue(reader.as()); + } + }; + + class CapnProtoDate32Serializer : public ICapnProtoSerializer + { + public: + CapnProtoDate32Serializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + { + if (!capnp_type.isInt32()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + return capnp::DynamicValue::Reader(column->getInt(row_num)); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + assert_cast(column).insertValue(reader.as()); + } + }; + + class CapnProtoDateTimeSerializer : public ICapnProtoSerializer + { + public: + CapnProtoDateTimeSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + { + if (!capnp_type.isUInt32()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + return capnp::DynamicValue::Reader(column->getInt(row_num)); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + assert_cast(column).insertValue(reader.as()); + } + }; + + class CapnProtoDateTime64Serializer : public ICapnProtoSerializer + { + public: + CapnProtoDateTime64Serializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + { + if (!capnp_type.isInt64()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + return capnp::DynamicValue::Reader(column->getInt(row_num)); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + assert_cast(column).insertValue(reader.as()); + } + }; + + template + class CapnProtoDecimalSerializer : public ICapnProtoSerializer + { + public: + CapnProtoDecimalSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + { + auto which = WhichDataType(data_type); + if ((!capnp_type.isInt32() && which.isDecimal32()) || (!capnp_type.isInt64() && which.isDecimal64())) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + return capnp::DynamicValue::Reader(column->getInt(row_num)); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + assert_cast &>(column).insertValue(reader.as()); + } + }; + + template + class CapnProtoBigDecimalSerializer : public ICapnProtoSerializer + { + public: + CapnProtoBigDecimalSerializer(const DataTypePtr & data_type_, const String & column_name, const capnp::Type & capnp_type) : data_type(data_type_) + { + if (!capnp_type.isData()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + auto data = column->getDataAt(row_num); + return capnp::DynamicValue::Reader(capnp::Data::Reader(reinterpret_cast(data.data), data.size)); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + auto value = reader.as(); + if (value.size() != sizeof(DecimalType)) + throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected size of {} value: {}", data_type->getName(), value.size()); + + column.insertData(reinterpret_cast(value.begin()), value.size()); + } + + private: + DataTypePtr data_type; + }; + + template + class CapnProtoStringSerializer : public ICapnProtoSerializer + { + public: + CapnProtoStringSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type_) : capnp_type(capnp_type_) + { + if (!capnp_type.isData() && !capnp_type.isText()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + auto data = column->getDataAt(row_num); + + if constexpr (is_binary) + return capnp::DynamicValue::Reader(capnp::Data::Reader(reinterpret_cast(data.data), data.size)); + + /// For type TEXT data must be null-terminated, but in String column we always have 0 byte at the end of each value. + return capnp::DynamicValue::Reader(capnp::Text::Reader(data.data, data.size)); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + if constexpr (is_binary) + { + auto value = reader.as(); + column.insertData(reinterpret_cast(value.begin()), value.size()); + } + else + { + auto value = reader.as(); + column.insertData(reinterpret_cast(value.begin()), value.size()); + } + } + + private: + capnp::Type capnp_type; + }; + + template + class CapnProtoFixedStringSerializer : public ICapnProtoSerializer + { + public: + CapnProtoFixedStringSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type_) : capnp_type(capnp_type_) + { + if (!capnp_type.isData() && !capnp_type.isText()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + auto data = column->getDataAt(row_num); + if constexpr (is_binary) + return capnp::DynamicValue::Reader(capnp::Data::Reader(reinterpret_cast(data.data), data.size)); + + if (data.data[data.size - 1] == 0) + return capnp::DynamicValue::Reader(capnp::Text::Reader(reinterpret_cast(data.data), data.size)); + + /// In TEXT type data should be null-terminated, but ClickHouse FixedString data could not be. + /// To make data null-terminated we should copy it to temporary String object and use it in capnp::Text::Reader. + /// Note that capnp::Text::Reader works only with pointer to the data and it's size, so we should + /// guarantee that new String object life time is longer than capnp::Text::Reader life time. + tmp_string = data.toString(); + return capnp::DynamicValue::Reader(capnp::Text::Reader(reinterpret_cast(tmp_string.data()), tmp_string.size())); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + auto & fixed_string_column = assert_cast(column); + if constexpr (is_binary) + { + auto value = reader.as(); + if (value.size() > fixed_string_column.getN()) + throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot read data with size {} to FixedString with size {}", value.size(), fixed_string_column.getN()); + + fixed_string_column.insertData(reinterpret_cast(value.begin()), value.size()); + } + else + { + auto value = reader.as(); + if (value.size() > fixed_string_column.getN()) + throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot read data with size {} to FixedString with size {}", value.size(), fixed_string_column.getN()); + + fixed_string_column.insertData(reinterpret_cast(value.begin()), value.size()); + } + } + + private: + String tmp_string; + capnp::Type capnp_type; + }; + + class CapnProtoIPv4Serializer : public ICapnProtoSerializer + { + public: + CapnProtoIPv4Serializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + { + if (!capnp_type.isUInt32()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + return capnp::DynamicValue::Reader(assert_cast(*column).getElement(row_num).toUnderType()); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + assert_cast(column).insertValue(IPv4(reader.as())); + } + }; + + class CapnProtoIPv6Serializer : public ICapnProtoSerializer + { + public: + CapnProtoIPv6Serializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + { + if (!capnp_type.isData()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + auto data = column->getDataAt(row_num); + return capnp::DynamicValue::Reader(capnp::Data::Reader(reinterpret_cast(data.data), data.size)); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + auto value = reader.as(); + if (value.size() != sizeof(IPv6)) + throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected size of IPv6 value: {}", value.size()); + + column.insertData(reinterpret_cast(value.begin()), value.size()); + } + }; + + class CapnProtoUUIDSerializer : public ICapnProtoSerializer + { + public: + CapnProtoUUIDSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + { + if (!capnp_type.isData()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + { + auto data = column->getDataAt(row_num); + return capnp::DynamicValue::Reader(capnp::Data::Reader(reinterpret_cast(data.data), data.size)); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + auto value = reader.as(); + if (value.size() != sizeof(UUID)) + throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected size of UUID value: {}", value.size()); + + column.insertData(reinterpret_cast(value.begin()), value.size()); + } + }; + + std::unique_ptr createSerializer(const DataTypePtr & type, const String & name, const capnp::Type & capnp_type, const FormatSettings::CapnProto & settings); + + class CapnProtoLowCardinalitySerializer : public ICapnProtoSerializer + { + public: + CapnProtoLowCardinalitySerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type, const FormatSettings::CapnProto & settings) + { + nested_serializer = createSerializer(assert_cast(*data_type).getDictionaryType(), column_name, capnp_type, settings); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder * field_builder, size_t row_num) override + { + const auto & low_cardinality_column = assert_cast(*column); + size_t index = low_cardinality_column.getIndexAt(row_num); + const auto & dict_column = low_cardinality_column.getDictionary().getNestedColumn(); + return nested_serializer->writeRow(dict_column, field_builder, index); + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + auto & low_cardinality_column = assert_cast(column); + auto tmp_column = low_cardinality_column.getDictionary().getNestedColumn()->cloneEmpty(); + nested_serializer->readRow(*tmp_column, reader); + low_cardinality_column.insertFromFullColumn(*tmp_column, 0); + } + + private: + std::unique_ptr nested_serializer; + }; + + class CapnProtoNullableSerializer : public ICapnProtoSerializer + { + public: + CapnProtoNullableSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type, const FormatSettings::CapnProto & settings) + { + if (!capnp_type.isStruct()) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert column \"{}\": Nullable can be represented only as a named union of type Void and nested type, got CapnProto type {}", + column_name, + getCapnProtoFullTypeName(capnp_type)); + + /// Check that struct is a named union of type VOID and one arbitrary type. + auto struct_schema = capnp_type.asStruct(); + if (!checkIfStructIsNamedUnion(struct_schema)) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert column \"{}\": Nullable can be represented only as a named union of type Void and nested type." + "Given CapnProto struct is not a named union: {}", + column_name, + getCapnProtoFullTypeName(capnp_type)); + + auto union_fields = struct_schema.getUnionFields(); + if (union_fields.size() != 2) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert column \"{}\": Nullable can be represented only as a named union of type Void and nested type." + "Given CapnProto union have more than 2 fields: {}", + column_name, + getCapnProtoFullTypeName(capnp_type)); + + auto first = union_fields[0]; + auto second = union_fields[1]; + auto nested_type = assert_cast(data_type.get())->getNestedType(); + if (first.getType().isVoid()) + { + null_field = first; + nested_field = second; + nested_capnp_type = second.getType(); + if (nested_capnp_type.isStruct()) + nested_fields_size = nested_capnp_type.asStruct().getFields().size(); + nested_serializer = createSerializer(nested_type, column_name, nested_capnp_type, settings); + } + else if (second.getType().isVoid()) + { + null_field = second; + nested_field = first; + nested_capnp_type = first.getType(); + if (nested_capnp_type.isStruct()) + nested_fields_size = nested_capnp_type.asStruct().getFields().size(); + nested_serializer = createSerializer(nested_type, column_name, nested_capnp_type, settings); + } + else + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert column \"{}\": Nullable can be represented only as a named union of type Void and nested type." + "Given CapnProto union doesn't have field with type Void: {}", + column_name, + getCapnProtoFullTypeName(capnp_type)); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder * field_builder, size_t row_num) override + { + assert(field_builder); + auto & struct_builder = assert_cast(*field_builder); + const auto & nullable_column = assert_cast(*column); + if (nullable_column.isNullAt(row_num)) + { + struct_builder.impl.set(null_field, capnp::Void()); + } + else + { + struct_builder.impl.clear(nested_field); + const auto & nested_column = nullable_column.getNestedColumnPtr(); + auto nested_field_builder = initStructFieldBuilderIfNeeded(nested_column, row_num, struct_builder.impl, nested_field, nested_capnp_type, nested_fields_size); + auto value = nested_serializer->writeRow(nested_column, nested_field_builder.get(), row_num); + if (value) + struct_builder.impl.set(nested_field, *value); + } + + return std::nullopt; + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + auto struct_reader = reader.as(); + auto & nullable_column = assert_cast(column); + auto field = *kj::_::readMaybe(struct_reader.which()); + if (field.getType().isVoid()) + nullable_column.insertDefault(); + else + { + auto & nested_column = nullable_column.getNestedColumn(); + auto nested_reader = struct_reader.get(field); + nested_serializer->readRow(nested_column, nested_reader); + nullable_column.getNullMapData().push_back(0); + } + } + + private: + std::unique_ptr nested_serializer; + capnp::StructSchema::Field null_field; + capnp::StructSchema::Field nested_field; + size_t nested_fields_size = 0; + capnp::Type nested_capnp_type; + }; + + class CapnProtoArraySerializer : public ICapnProtoSerializer + { + public: + CapnProtoArraySerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type, const FormatSettings::CapnProto & settings) + { + if (!capnp_type.isList()) + throwCannotConvert(data_type, column_name, capnp_type); + + auto nested_type = assert_cast(data_type.get())->getNestedType(); + element_type = capnp_type.asList().getElementType(); + if (element_type.isStruct()) + element_struct_fields = element_type.asStruct().getFields().size(); + nested_serializer = createSerializer(nested_type, column_name, capnp_type.asList().getElementType(), settings); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder * field_builder, size_t row_num) override + { + assert(field_builder); + auto & list_builder = assert_cast(*field_builder); + const auto * array_column = assert_cast(column.get()); + const auto & nested_column = array_column->getDataPtr(); + const auto & offsets = array_column->getOffsets(); + auto offset = offsets[row_num - 1]; + size_t size = offsets[row_num] - offset; + bool need_nested_builders = list_builder.nested_builders.empty(); + for (unsigned i = 0; i != static_cast(size); ++i) + { + if (need_nested_builders) + { + /// For nested lists we need to initialize nested list builder. + if (element_type.isList()) + { + const auto & nested_offset = checkAndGetColumn(*nested_column)->getOffsets(); + size_t nested_array_size = nested_offset[offset + i] - nested_offset[offset + i - 1]; + list_builder.nested_builders.emplace_back(std::make_unique(list_builder.impl.init(i, static_cast(nested_array_size)))); + } + else if (element_type.isStruct()) + { + list_builder.nested_builders.emplace_back(std::make_unique(list_builder.impl[i], element_struct_fields)); + } + else + { + list_builder.nested_builders.emplace_back(); + } + } + + auto value = nested_serializer->writeRow(nested_column, list_builder.nested_builders[i].get(), offset + i); + if (value) + list_builder.impl.set(i, *value); + } + + return std::nullopt; + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + auto list_reader = reader.as(); + auto & column_array = assert_cast(column); + auto & offsets = column_array.getOffsets(); + offsets.push_back(offsets.back() + list_reader.size()); + + auto & nested_column = column_array.getData(); + for (const auto & nested_reader : list_reader) + nested_serializer->readRow(nested_column, nested_reader); + } + + private: + std::unique_ptr nested_serializer; + capnp::Type element_type; + size_t element_struct_fields; + }; + + class CapnProtoMapSerializer : public ICapnProtoSerializer + { + public: + CapnProtoMapSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type, const FormatSettings::CapnProto & settings) + { + /// We output/input Map type as follow CapnProto schema + /// + /// struct Map { + /// struct Entry { + /// key @0: Key; + /// value @1: Value; + /// } + /// entries @0 :List(Entry); + /// } + + if (!capnp_type.isStruct()) + throwCannotConvert(data_type, column_name, capnp_type); + + auto struct_schema = capnp_type.asStruct(); + + if (checkIfStructContainsUnnamedUnion(struct_schema)) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert ClickHouse column \"{}\" with type {} to CapnProto Struct with unnamed union {}", + column_name, + data_type->getName(), + getCapnProtoFullTypeName(capnp_type)); + + if (struct_schema.getFields().size() != 1) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert ClickHouse column \"{}\": Map type can be represented as a Struct with one list field, got struct: {}", + column_name, + getCapnProtoFullTypeName(capnp_type)); + + const auto & field_type = struct_schema.getFields()[0].getType(); + if (!field_type.isList()) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert ClickHouse column \"{}\": Map type can be represented as a Struct with one list field, got field: {}", + column_name, + getCapnProtoFullTypeName(field_type)); + + auto list_element_type = field_type.asList().getElementType(); + if (!list_element_type.isStruct()) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert ClickHouse column \"{}\": Field of struct that represents Map should be a list of structs, got list of {}", + column_name, + getCapnProtoFullTypeName(list_element_type)); + + auto key_value_struct = list_element_type.asStruct(); + if (checkIfStructContainsUnnamedUnion(key_value_struct)) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert ClickHouse column \"{}\": struct that represents Map entries is unnamed union: {}", + column_name, + getCapnProtoFullTypeName(list_element_type)); + + if (key_value_struct.getFields().size() != 2) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert ClickHouse column \"{}\": struct that represents Map entries should contain only 2 fields, got struct {}", + column_name, + getCapnProtoFullTypeName(list_element_type)); + + const auto & map_type = assert_cast(*data_type); + DataTypes types = {map_type.getKeyType(), map_type.getValueType()}; + Names names = {"key", "value"}; + auto entries_type = std::make_shared(std::make_shared(types, names)); + entries_field = struct_schema.getFields()[0]; + entries_capnp_type = entries_field.getType(); + nested_serializer = createSerializer(entries_type, column_name, field_type, settings); + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder * field_builder, size_t row_num) override + { + assert(field_builder); + auto & struct_builder = assert_cast(*field_builder); + const auto & entries_column = assert_cast(column.get())->getNestedColumnPtr(); + auto entries_builder = initStructFieldBuilderIfNeeded(entries_column, row_num, struct_builder.impl, entries_field, entries_capnp_type, 0); + nested_serializer->writeRow(entries_column, entries_builder.get(), row_num); + return std::nullopt; + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + auto struct_reader = reader.as(); + auto & entries_column = assert_cast(column).getNestedColumn(); + nested_serializer->readRow(entries_column, struct_reader.get(entries_field)); + } + + private: + std::unique_ptr nested_serializer; + capnp::StructSchema::Field entries_field; + capnp::Type entries_capnp_type; + }; + + class CapnProtoStructureSerializer : public ICapnProtoSerializer + { + public: + CapnProtoStructureSerializer(const DataTypes & data_types, const Names & names, const capnp::StructSchema & schema, const FormatSettings::CapnProto & settings) + { + if (checkIfStructIsNamedUnion(schema) || checkIfStructContainsUnnamedUnion(schema)) + throw Exception(ErrorCodes::CAPN_PROTO_BAD_CAST, "Root CapnProto Struct cannot be named union/struct with unnamed union"); + + initialize(data_types, names, schema, settings); + } + + CapnProtoStructureSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type, const FormatSettings::CapnProto & settings) + { + if (!capnp_type.isStruct()) + throwCannotConvert(data_type, column_name, capnp_type); + + auto struct_schema = capnp_type.asStruct(); + + if (checkIfStructIsNamedUnion(struct_schema) || checkIfStructContainsUnnamedUnion(struct_schema)) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert ClickHouse column \"{}\" with type {} to CapnProto named union/struct with unnamed union {}", + column_name, + data_type->getName(), + getCapnProtoFullTypeName(capnp_type)); + + const auto * tuple_data_type = assert_cast(data_type.get()); + auto nested_types = tuple_data_type->getElements(); + Names nested_names; + bool have_explicit_names = tuple_data_type->haveExplicitNames(); + auto structure_fields = struct_schema.getFields(); + if (!have_explicit_names) + { + if (nested_types.size() != structure_fields.size()) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "Cannot convert ClickHouse column \"{}\" with type {} to CapnProto type {}: Tuple and Struct have different sizes {} != {}", + column_name, + data_type->getName(), + getCapnProtoFullTypeName(capnp_type), + nested_types.size(), + structure_fields.size()); + nested_names.reserve(structure_fields.size()); + for (auto field : structure_fields) + nested_names.push_back(field.getProto().getName()); + } + else + { + nested_names = tuple_data_type->getElementNames(); + } + + try + { + initialize(nested_types, nested_names, struct_schema, settings); + } + catch (Exception & e) + { + e.addMessage("(while converting column {})", column_name); + throw e; + } + } + + std::optional writeRow(const ColumnPtr & column, FieldBuilder * builder, size_t row_num) override + { + assert(builder); + auto & struct_builder = assert_cast(*builder); + if (auto tuple_column = typeid_cast(column.get())) + writeRow(tuple_column->getColumnsCopy(), struct_builder, row_num); + else + writeRow(Columns{column}, struct_builder, row_num); + return std::nullopt; + } + + void writeRow(const Columns & columns, StructBuilder & struct_builder, size_t row_num) + { + for (size_t i = 0; i != columns.size(); ++i) + { + const auto & field = fields[i]; + size_t field_index = field.getIndex(); + if (likely(!struct_builder.field_builders[field_index])) + struct_builder.field_builders[field_index] = initStructFieldBuilderIfNeeded( + columns[i], row_num, struct_builder.impl, field, fields_types[i], nested_field_sizes[i]); + + auto value = field_serializers[i]->writeRow(columns[i], struct_builder.field_builders[field_index].get(), row_num); + if (value) + struct_builder.impl.set(field, *value); + } + } + + void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + { + auto struct_reader = reader.as(); + if (auto * tuple_column = typeid_cast(&column)) + { + for (size_t i = 0; i != tuple_column->tupleSize(); ++i) + field_serializers[i]->readRow(tuple_column->getColumn(i), struct_reader.get(fields[i])); + } + else + field_serializers[0]->readRow(column, struct_reader.get(fields[0])); + } + + void readRow(MutableColumns & columns, const capnp::DynamicStruct::Reader & reader) + { + for (size_t i = 0; i != columns.size(); ++i) + field_serializers[i]->readRow(*columns[i], reader.get(fields[i])); + } + + private: + void initialize(const DataTypes & data_types, const Names & names, const capnp::StructSchema & schema, const FormatSettings::CapnProto & settings) + { + field_serializers.reserve(data_types.size()); + fields.reserve(data_types.size()); + fields_types.reserve(data_types.size()); + nested_field_sizes.reserve(data_types.size()); + for (size_t i = 0; i != data_types.size(); ++i) + { + auto [field_name, _] = splitFieldName(names[i]); + auto field = findFieldByName(schema, field_name); + if (!field) + throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "Capnproto schema doesn't contain field with name {}", field_name); + + fields.push_back(*field); + auto capnp_type = field->getType(); + fields_types.push_back(capnp_type); + nested_field_sizes.push_back(capnp_type.isStruct() ? capnp_type.asStruct().getFields().size() : 0); + field_serializers.push_back(createSerializer(data_types[i], names[i], capnp_type, settings)); + } + } + + std::vector> field_serializers; + std::vector fields; + std::vector nested_field_sizes; + std::vector fields_types; + }; + + std::unique_ptr createSerializer(const DataTypePtr & type, const String & name, const capnp::Type & capnp_type, const FormatSettings::CapnProto & settings) + { + auto [field_name, nested_name] = splitFieldName(name); + if (!nested_name.empty() && !capnp_type.isList()) + { + if (!capnp_type.isStruct()) + throw Exception(ErrorCodes::CAPN_PROTO_BAD_CAST, "Field {} is not a struct", field_name); + + return std::make_unique(DataTypes{type}, Names{nested_name}, capnp_type.asStruct(), settings); + } + + switch (type->getTypeId()) + { + case TypeIndex::Int8: + return createIntegerSerializer(type, name, capnp_type); + case TypeIndex::UInt8: + if (isBool(type)) + return createIntegerSerializer(type, name, capnp_type); + return createIntegerSerializer(type, name, capnp_type); + case TypeIndex::Int16: + return createIntegerSerializer(type, name, capnp_type); + case TypeIndex::UInt16: + return createIntegerSerializer(type, name, capnp_type); + case TypeIndex::Int32: + return createIntegerSerializer(type, name, capnp_type); + case TypeIndex::UInt32: + return createIntegerSerializer(type, name, capnp_type); + case TypeIndex::Int64: + return createIntegerSerializer(type, name, capnp_type); + case TypeIndex::UInt64: + return createIntegerSerializer(type, name, capnp_type); + case TypeIndex::Int128: + return std::make_unique>(type, name, capnp_type); + case TypeIndex::UInt128: + return std::make_unique>(type, name, capnp_type); + case TypeIndex::Int256: + return std::make_unique>(type, name, capnp_type); + case TypeIndex::UInt256: + return std::make_unique>(type, name, capnp_type); + case TypeIndex::Float32: + return std::make_unique>(type, name, capnp_type); + case TypeIndex::Float64: + return std::make_unique>(type, name, capnp_type); + case TypeIndex::Date: + return std::make_unique(type, name, capnp_type); + case TypeIndex::Date32: + return std::make_unique(type, name, capnp_type); + case TypeIndex::DateTime: + return std::make_unique(type, name, capnp_type); + case TypeIndex::DateTime64: + return std::make_unique(type, name, capnp_type); + case TypeIndex::Decimal32: + return std::make_unique>(type, name, capnp_type); + case TypeIndex::Decimal64: + return std::make_unique>(type, name, capnp_type); + case TypeIndex::Decimal128: + return std::make_unique>(type, name, capnp_type); + case TypeIndex::Decimal256: + return std::make_unique>(type, name, capnp_type); + case TypeIndex::IPv4: + return std::make_unique(type, name, capnp_type); + case TypeIndex::IPv6: + return std::make_unique(type, name, capnp_type); + case TypeIndex::UUID: + return std::make_unique(type, name, capnp_type); + case TypeIndex::Enum8: + return std::make_unique>(type, name, capnp_type, settings.enum_comparing_mode); + case TypeIndex::Enum16: + return std::make_unique>(type, name, capnp_type, settings.enum_comparing_mode); + case TypeIndex::String: + if (capnp_type.isData()) + return std::make_unique>(type, name, capnp_type); + return std::make_unique>(type, name, capnp_type); + case TypeIndex::FixedString: + if (capnp_type.isData()) + return std::make_unique>(type, name, capnp_type); + return std::make_unique>(type, name, capnp_type); + case TypeIndex::LowCardinality: + return std::make_unique(type, name, capnp_type, settings); + case TypeIndex::Nullable: + return std::make_unique(type, name, capnp_type, settings); + case TypeIndex::Array: + return std::make_unique(type, name, capnp_type, settings); + case TypeIndex::Map: + return std::make_unique(type, name, capnp_type, settings); + case TypeIndex::Tuple: + return std::make_unique(type, name, capnp_type, settings); + default: + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Type {} is not supported in CapnProto format", type->getName()); + } + } +} + +class CapnProtoSerializer::Impl +{ +public: + Impl(const DataTypes & data_types, const Names & names, const capnp::StructSchema & schema, const FormatSettings::CapnProto & settings) + : struct_serializer(std::make_unique(data_types, names, schema, settings)) + , fields_size(schema.getFields().size()) + { + } + + void writeRow(const Columns & columns, capnp::DynamicStruct::Builder builder, size_t row_num) + { + StructBuilder struct_builder(std::move(builder), fields_size); + struct_serializer->writeRow(columns, struct_builder, row_num); + } + + void readRow(MutableColumns & columns, capnp::DynamicStruct::Reader & reader) + { + struct_serializer->readRow(columns, reader); + } + +private: + std::unique_ptr struct_serializer; + size_t fields_size; +}; + +CapnProtoSerializer::CapnProtoSerializer(const DataTypes & data_types, const Names & names, const capnp::StructSchema & schema, const FormatSettings::CapnProto & settings) + : serializer_impl(std::make_unique(data_types, names, schema, settings)) +{ +} + +void CapnProtoSerializer::writeRow(const Columns & columns, capnp::DynamicStruct::Builder builder, size_t row_num) +{ + serializer_impl->writeRow(columns, std::move(builder), row_num); +} + +void CapnProtoSerializer::readRow(MutableColumns & columns, capnp::DynamicStruct::Reader & reader) +{ + serializer_impl->readRow(columns, reader); +} + +CapnProtoSerializer::~CapnProtoSerializer() = default; + +} diff --git a/src/Formats/CapnProtoSerializer.h b/src/Formats/CapnProtoSerializer.h new file mode 100644 index 00000000000..efae797875b --- /dev/null +++ b/src/Formats/CapnProtoSerializer.h @@ -0,0 +1,25 @@ +#pragma once + +#include +#include + +namespace DB +{ + +class CapnProtoSerializer +{ +public: + CapnProtoSerializer(const DataTypes & data_types, const Names & names, const capnp::StructSchema & schema, const FormatSettings::CapnProto & settings); + + void writeRow(const Columns & columns, capnp::DynamicStruct::Builder builder, size_t row_num); + + void readRow(MutableColumns & columns, capnp::DynamicStruct::Reader & reader); + + ~CapnProtoSerializer(); + +private: + class Impl; + std::unique_ptr serializer_impl; +}; + +} diff --git a/src/Formats/CapnProtoUtils.cpp b/src/Formats/CapnProtoUtils.cpp deleted file mode 100644 index d6c032408bb..00000000000 --- a/src/Formats/CapnProtoUtils.cpp +++ /dev/null @@ -1,734 +0,0 @@ -#include - -#if USE_CAPNP - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int CANNOT_PARSE_CAPN_PROTO_SCHEMA; - extern const int THERE_IS_NO_COLUMN; - extern const int BAD_TYPE_OF_FIELD; - extern const int CAPN_PROTO_BAD_CAST; - extern const int FILE_DOESNT_EXIST; - extern const int UNKNOWN_EXCEPTION; - extern const int INCORRECT_DATA; - extern const int CAPN_PROTO_BAD_TYPE; - extern const int BAD_ARGUMENTS; -} - -std::pair splitCapnProtoFieldName(const String & name) -{ - const auto * begin = name.data(); - const auto * end = name.data() + name.size(); - const auto * it = find_first_symbols<'_', '.'>(begin, end); - String first = String(begin, it); - String second = it == end ? "" : String(it + 1, end); - return {first, second}; -} - -capnp::StructSchema CapnProtoSchemaParser::getMessageSchema(const FormatSchemaInfo & schema_info) -{ - capnp::ParsedSchema schema; - try - { - int fd; - KJ_SYSCALL(fd = open(schema_info.schemaDirectory().data(), O_RDONLY)); // NOLINT(bugprone-suspicious-semicolon) - auto schema_dir = kj::newDiskDirectory(kj::OsFileHandle(fd)); - schema = impl.parseFromDirectory(*schema_dir, kj::Path::parse(schema_info.schemaPath()), {}); - } - catch (const kj::Exception & e) - { - /// That's not good to determine the type of error by its description, but - /// this is the only way to do it here, because kj doesn't specify the type of error. - auto description = std::string_view(e.getDescription().cStr()); - if (description.find("No such file or directory") != String::npos || description.find("no such directory") != String::npos) - throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot open CapnProto schema, file {} doesn't exists", schema_info.absoluteSchemaPath()); - - if (description.find("Parse error") != String::npos) - throw Exception(ErrorCodes::CANNOT_PARSE_CAPN_PROTO_SCHEMA, "Cannot parse CapnProto schema {}:{}", schema_info.schemaPath(), e.getLine()); - - throw Exception(ErrorCodes::UNKNOWN_EXCEPTION, - "Unknown exception while parsing CapnProto schema: {}, schema dir and file: {}, {}", - description, schema_info.schemaDirectory(), schema_info.schemaPath()); - } - - auto message_maybe = schema.findNested(schema_info.messageName()); - auto * message_schema = kj::_::readMaybe(message_maybe); - if (!message_schema) - throw Exception(ErrorCodes::CANNOT_PARSE_CAPN_PROTO_SCHEMA, - "CapnProto schema doesn't contain message with name {}", schema_info.messageName()); - return message_schema->asStruct(); -} - -bool compareEnumNames(const String & first, const String & second, FormatSettings::EnumComparingMode mode) -{ - if (mode == FormatSettings::EnumComparingMode::BY_NAMES_CASE_INSENSITIVE) - return boost::algorithm::to_lower_copy(first) == boost::algorithm::to_lower_copy(second); - return first == second; -} - -static const std::map capnp_simple_type_names = -{ - {capnp::schema::Type::Which::BOOL, "Bool"}, - {capnp::schema::Type::Which::VOID, "Void"}, - {capnp::schema::Type::Which::INT8, "Int8"}, - {capnp::schema::Type::Which::INT16, "Int16"}, - {capnp::schema::Type::Which::INT32, "Int32"}, - {capnp::schema::Type::Which::INT64, "Int64"}, - {capnp::schema::Type::Which::UINT8, "UInt8"}, - {capnp::schema::Type::Which::UINT16, "UInt16"}, - {capnp::schema::Type::Which::UINT32, "UInt32"}, - {capnp::schema::Type::Which::UINT64, "UInt64"}, - {capnp::schema::Type::Which::FLOAT32, "Float32"}, - {capnp::schema::Type::Which::FLOAT64, "Float64"}, - {capnp::schema::Type::Which::TEXT, "Text"}, - {capnp::schema::Type::Which::DATA, "Data"}, - {capnp::schema::Type::Which::INTERFACE, "Interface"}, - {capnp::schema::Type::Which::ANY_POINTER, "AnyPointer"}, -}; - -static bool checkIfStructContainsUnnamedUnion(const capnp::StructSchema & struct_schema) -{ - return struct_schema.getFields().size() != struct_schema.getNonUnionFields().size(); -} - -static bool checkIfStructIsNamedUnion(const capnp::StructSchema & struct_schema) -{ - return struct_schema.getFields().size() == struct_schema.getUnionFields().size(); -} - -/// Get full name of type for better exception messages. -static String getCapnProtoFullTypeName(const capnp::Type & type) -{ - switch (type.which()) - { - case capnp::schema::Type::Which::STRUCT: - { - auto struct_schema = type.asStruct(); - - auto non_union_fields = struct_schema.getNonUnionFields(); - std::vector non_union_field_names; - for (auto nested_field : non_union_fields) - non_union_field_names.push_back(String(nested_field.getProto().getName()) + " " + getCapnProtoFullTypeName(nested_field.getType())); - - auto union_fields = struct_schema.getUnionFields(); - std::vector union_field_names; - for (auto nested_field : union_fields) - union_field_names.push_back(String(nested_field.getProto().getName()) + " " + getCapnProtoFullTypeName(nested_field.getType())); - - String union_name = "Union(" + boost::algorithm::join(union_field_names, ", ") + ")"; - /// Check if the struct is a named union. - if (non_union_field_names.empty()) - return union_name; - - String type_name = "Struct(" + boost::algorithm::join(non_union_field_names, ", "); - /// Check if the struct contains unnamed union. - if (!union_field_names.empty()) - type_name += ", " + union_name; - type_name += ")"; - return type_name; - } - case capnp::schema::Type::Which::LIST: - return "List(" + getCapnProtoFullTypeName(type.asList().getElementType()) + ")"; - case capnp::schema::Type::Which::ENUM: - { - auto enum_schema = type.asEnum(); - String enum_name = "Enum("; - auto enumerants = enum_schema.getEnumerants(); - for (unsigned i = 0; i != enumerants.size(); ++i) - { - enum_name += String(enumerants[i].getProto().getName()) + " = " + std::to_string(enumerants[i].getOrdinal()); - if (i + 1 != enumerants.size()) - enum_name += ", "; - } - enum_name += ")"; - return enum_name; - } - default: - auto it = capnp_simple_type_names.find(type.which()); - if (it == capnp_simple_type_names.end()) - throw Exception(ErrorCodes::BAD_TYPE_OF_FIELD, "Unknown CapnProto type"); - return it->second; - } -} - -template -static bool checkEnums(const capnp::Type & capnp_type, const DataTypePtr column_type, FormatSettings::EnumComparingMode mode, UInt64 max_value, String & error_message) -{ - if (!capnp_type.isEnum()) - return false; - - auto enum_schema = capnp_type.asEnum(); - bool to_lower = mode == FormatSettings::EnumComparingMode::BY_NAMES_CASE_INSENSITIVE; - const auto * enum_type = assert_cast *>(column_type.get()); - const auto & enum_values = dynamic_cast &>(*enum_type); - - auto enumerants = enum_schema.getEnumerants(); - if (mode == FormatSettings::EnumComparingMode::BY_VALUES) - { - /// In CapnProto Enum fields are numbered sequentially starting from zero. - if (enumerants.size() > max_value) - { - error_message += "Enum from CapnProto schema contains values that is out of range for Clickhouse Enum"; - return false; - } - - auto values = enum_values.getSetOfAllValues(); - std::unordered_set capn_enum_values; - for (auto enumerant : enumerants) - capn_enum_values.insert(Type(enumerant.getOrdinal())); - auto result = values == capn_enum_values; - if (!result) - error_message += "The set of values in Enum from CapnProto schema is different from the set of values in ClickHouse Enum"; - return result; - } - - auto names = enum_values.getSetOfAllNames(to_lower); - std::unordered_set capn_enum_names; - - for (auto enumerant : enumerants) - { - String name = enumerant.getProto().getName(); - capn_enum_names.insert(to_lower ? boost::algorithm::to_lower_copy(name) : name); - } - - auto result = names == capn_enum_names; - if (!result) - error_message += "The set of names in Enum from CapnProto schema is different from the set of names in ClickHouse Enum"; - return result; -} - -static bool checkCapnProtoType(const capnp::Type & capnp_type, const DataTypePtr & data_type, FormatSettings::EnumComparingMode mode, String & error_message, const String & column_name); - -static bool checkNullableType(const capnp::Type & capnp_type, const DataTypePtr & data_type, FormatSettings::EnumComparingMode mode, String & error_message, const String & column_name) -{ - if (!capnp_type.isStruct()) - return false; - - /// Check that struct is a named union of type VOID and one arbitrary type. - auto struct_schema = capnp_type.asStruct(); - if (!checkIfStructIsNamedUnion(struct_schema)) - return false; - - auto union_fields = struct_schema.getUnionFields(); - if (union_fields.size() != 2) - return false; - - auto first = union_fields[0]; - auto second = union_fields[1]; - - auto nested_type = assert_cast(data_type.get())->getNestedType(); - if (first.getType().isVoid()) - return checkCapnProtoType(second.getType(), nested_type, mode, error_message, column_name); - if (second.getType().isVoid()) - return checkCapnProtoType(first.getType(), nested_type, mode, error_message, column_name); - return false; -} - -static bool checkTupleType(const capnp::Type & capnp_type, const DataTypePtr & data_type, FormatSettings::EnumComparingMode mode, String & error_message) -{ - if (!capnp_type.isStruct()) - return false; - auto struct_schema = capnp_type.asStruct(); - - if (checkIfStructIsNamedUnion(struct_schema)) - return false; - - if (checkIfStructContainsUnnamedUnion(struct_schema)) - { - error_message += "CapnProto struct contains unnamed union"; - return false; - } - - const auto * tuple_data_type = assert_cast(data_type.get()); - auto nested_types = tuple_data_type->getElements(); - if (nested_types.size() != struct_schema.getFields().size()) - { - error_message += "Tuple and Struct types have different sizes"; - return false; - } - - bool have_explicit_names = tuple_data_type->haveExplicitNames(); - const auto & nested_names = tuple_data_type->getElementNames(); - for (uint32_t i = 0; i != nested_names.size(); ++i) - { - if (have_explicit_names) - { - KJ_IF_MAYBE (field, struct_schema.findFieldByName(nested_names[i])) - { - if (!checkCapnProtoType(field->getType(), nested_types[tuple_data_type->getPositionByName(nested_names[i])], mode, error_message, nested_names[i])) - return false; - } - else - { - error_message += "CapnProto struct doesn't contain a field with name " + nested_names[i]; - return false; - } - } - else if (!checkCapnProtoType(struct_schema.getFields()[i].getType(), nested_types[tuple_data_type->getPositionByName(nested_names[i])], mode, error_message, nested_names[i])) - return false; - } - - return true; -} - -static bool checkArrayType(const capnp::Type & capnp_type, const DataTypePtr & data_type, FormatSettings::EnumComparingMode mode, String & error_message, const String & column_name) -{ - if (!capnp_type.isList()) - return false; - auto list_schema = capnp_type.asList(); - auto nested_type = assert_cast(data_type.get())->getNestedType(); - - auto [field_name, nested_name] = splitCapnProtoFieldName(column_name); - if (!nested_name.empty() && list_schema.getElementType().isStruct()) - { - auto struct_schema = list_schema.getElementType().asStruct(); - KJ_IF_MAYBE(field, struct_schema.findFieldByName(nested_name)) - return checkCapnProtoType(field->getType(), nested_type, mode, error_message, nested_name); - - error_message += "Element type of List {} doesn't contain field with name " + nested_name; - return false; - } - - return checkCapnProtoType(list_schema.getElementType(), nested_type, mode, error_message, column_name); -} - -static bool checkMapType(const capnp::Type & capnp_type, const DataTypePtr & data_type, FormatSettings::EnumComparingMode mode, String & error_message) -{ - /// We output/input Map type as follow CapnProto schema - /// - /// struct Map { - /// struct Entry { - /// key @0: Key; - /// value @1: Value; - /// } - /// entries @0 :List(Entry); - /// } - - if (!capnp_type.isStruct()) - return false; - auto struct_schema = capnp_type.asStruct(); - - if (checkIfStructContainsUnnamedUnion(struct_schema)) - { - error_message += "CapnProto struct contains unnamed union"; - return false; - } - - if (struct_schema.getFields().size() != 1) - { - error_message += "CapnProto struct that represents Map type can contain only one field"; - return false; - } - - const auto & field_type = struct_schema.getFields()[0].getType(); - if (!field_type.isList()) - { - error_message += "Field of CapnProto struct that represents Map is not a list"; - return false; - } - - auto list_element_type = field_type.asList().getElementType(); - if (!list_element_type.isStruct()) - { - error_message += "Field of CapnProto struct that represents Map is not a list of structs"; - return false; - } - - auto key_value_struct = list_element_type.asStruct(); - if (checkIfStructContainsUnnamedUnion(key_value_struct)) - { - error_message += "CapnProto struct contains unnamed union"; - return false; - } - - if (key_value_struct.getFields().size() != 2) - { - error_message += "Key-value structure for Map struct should have exactly 2 fields"; - return false; - } - - const auto & map_type = assert_cast(*data_type); - DataTypes types = {map_type.getKeyType(), map_type.getValueType()}; - Names names = {"key", "value"}; - - for (size_t i = 0; i != types.size(); ++i) - { - KJ_IF_MAYBE(field, key_value_struct.findFieldByName(names[i])) - { - if (!checkCapnProtoType(field->getType(), types[i], mode, error_message, names[i])) - return false; - } - else - { - error_message += R"(Key-value structure for Map struct should have exactly 2 fields with names "key" and "value")"; - return false; - } - } - - return true; -} - -static bool isCapnInteger(const capnp::Type & capnp_type) -{ - return capnp_type.isInt8() || capnp_type.isUInt8() || capnp_type.isInt16() || capnp_type.isUInt16() || capnp_type.isInt32() - || capnp_type.isUInt32() || capnp_type.isInt64() || capnp_type.isUInt64(); -} - -static bool checkCapnProtoType(const capnp::Type & capnp_type, const DataTypePtr & data_type, FormatSettings::EnumComparingMode mode, String & error_message, const String & column_name) -{ - switch (data_type->getTypeId()) - { - case TypeIndex::UInt8: - return capnp_type.isBool() || isCapnInteger(capnp_type); - case TypeIndex::Int8: [[fallthrough]]; - case TypeIndex::Int16: [[fallthrough]]; - case TypeIndex::UInt16: [[fallthrough]]; - case TypeIndex::Int32: [[fallthrough]]; - case TypeIndex::UInt32: [[fallthrough]]; - case TypeIndex::Int64: [[fallthrough]]; - case TypeIndex::UInt64: - /// Allow integer conversions durin input/output. - return isCapnInteger(capnp_type); - case TypeIndex::Date: - return capnp_type.isUInt16(); - case TypeIndex::DateTime: [[fallthrough]]; - case TypeIndex::IPv4: - return capnp_type.isUInt32(); - case TypeIndex::Date32: [[fallthrough]]; - case TypeIndex::Decimal32: - return capnp_type.isInt32() || capnp_type.isUInt32(); - case TypeIndex::DateTime64: [[fallthrough]]; - case TypeIndex::Decimal64: - return capnp_type.isInt64() || capnp_type.isUInt64(); - case TypeIndex::Float32:[[fallthrough]]; - case TypeIndex::Float64: - /// Allow converting between Float32 and isFloat64 - return capnp_type.isFloat32() || capnp_type.isFloat64(); - case TypeIndex::Enum8: - return checkEnums(capnp_type, data_type, mode, INT8_MAX, error_message); - case TypeIndex::Enum16: - return checkEnums(capnp_type, data_type, mode, INT16_MAX, error_message); - case TypeIndex::Int128: [[fallthrough]]; - case TypeIndex::UInt128: [[fallthrough]]; - case TypeIndex::Int256: [[fallthrough]]; - case TypeIndex::UInt256: [[fallthrough]]; - case TypeIndex::Decimal128: [[fallthrough]]; - case TypeIndex::Decimal256: - return capnp_type.isData(); - case TypeIndex::Tuple: - return checkTupleType(capnp_type, data_type, mode, error_message); - case TypeIndex::Nullable: - { - auto result = checkNullableType(capnp_type, data_type, mode, error_message, column_name); - if (!result) - error_message += "Nullable can be represented only as a named union of type Void and nested type"; - return result; - } - case TypeIndex::Array: - return checkArrayType(capnp_type, data_type, mode, error_message, column_name); - case TypeIndex::LowCardinality: - return checkCapnProtoType(capnp_type, assert_cast(data_type.get())->getDictionaryType(), mode, error_message, column_name); - case TypeIndex::FixedString: [[fallthrough]]; - case TypeIndex::IPv6: [[fallthrough]]; - case TypeIndex::String: - return capnp_type.isText() || capnp_type.isData(); - case TypeIndex::Map: - return checkMapType(capnp_type, data_type, mode, error_message); - default: - return false; - } -} - -capnp::DynamicValue::Reader getReaderByColumnName(const capnp::DynamicStruct::Reader & struct_reader, const String & name) -{ - auto [field_name, nested_name] = splitCapnProtoFieldName(name); - KJ_IF_MAYBE(field, struct_reader.getSchema().findFieldByName(field_name)) - { - capnp::DynamicValue::Reader field_reader; - try - { - field_reader = struct_reader.get(*field); - } - catch (const kj::Exception & e) - { - throw Exception(ErrorCodes::INCORRECT_DATA, - "Cannot extract field value from struct by provided schema, error: " - "{} Perhaps the data was generated by another schema", String(e.getDescription().cStr())); - } - - if (nested_name.empty()) - return field_reader; - - /// Support reading Nested as List of Structs. - if (field_reader.getType() == capnp::DynamicValue::LIST) - { - auto list_schema = field->getType().asList(); - if (!list_schema.getElementType().isStruct()) - throw Exception(ErrorCodes::CAPN_PROTO_BAD_CAST, "Element type of List {} is not a struct", field_name); - - auto struct_schema = list_schema.getElementType().asStruct(); - KJ_IF_MAYBE(nested_field, struct_schema.findFieldByName(nested_name)) - return field_reader; - - throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "Element type of List {} doesn't contain field with name \"{}\"", field_name, nested_name); - } - - if (field_reader.getType() != capnp::DynamicValue::STRUCT) - throw Exception(ErrorCodes::CAPN_PROTO_BAD_CAST, "Field {} is not a struct", field_name); - - return getReaderByColumnName(field_reader.as(), nested_name); - } - - throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "Capnproto struct doesn't contain field with name {}", field_name); -} - -std::pair getStructBuilderAndFieldByColumnName(capnp::DynamicStruct::Builder struct_builder, const String & name) -{ - auto [field_name, nested_name] = splitCapnProtoFieldName(name); - KJ_IF_MAYBE(field, struct_builder.getSchema().findFieldByName(field_name)) - { - if (nested_name.empty()) - return {struct_builder, *field}; - - auto field_builder = struct_builder.get(*field); - - /// Support reading Nested as List of Structs. - if (field_builder.getType() == capnp::DynamicValue::LIST) - { - auto list_schema = field->getType().asList(); - if (!list_schema.getElementType().isStruct()) - throw Exception(ErrorCodes::CAPN_PROTO_BAD_CAST, "Element type of List {} is not a struct", field_name); - - auto struct_schema = list_schema.getElementType().asStruct(); - KJ_IF_MAYBE(nested_field, struct_schema.findFieldByName(nested_name)) - return {struct_builder, *field}; - - throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "Element type of List {} doesn't contain field with name \"{}\"", field_name, nested_name); - } - - if (field_builder.getType() != capnp::DynamicValue::STRUCT) - throw Exception(ErrorCodes::CAPN_PROTO_BAD_CAST, "Field {} is not a struct", field_name); - - return getStructBuilderAndFieldByColumnName(field_builder.as(), nested_name); - } - - throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "Capnproto struct doesn't contain field with name {}", field_name); -} - -static std::pair getFieldByName(const capnp::StructSchema & schema, const String & name) -{ - auto [field_name, nested_name] = splitCapnProtoFieldName(name); - KJ_IF_MAYBE(field, schema.findFieldByName(field_name)) - { - if (nested_name.empty()) - return {*field, name}; - - /// Support reading Nested as List of Structs. - if (field->getType().isList()) - { - auto list_schema = field->getType().asList(); - if (!list_schema.getElementType().isStruct()) - throw Exception(ErrorCodes::CAPN_PROTO_BAD_CAST, "Element type of List {} is not a struct", field_name); - - auto struct_schema = list_schema.getElementType().asStruct(); - KJ_IF_MAYBE(nested_field, struct_schema.findFieldByName(nested_name)) - return {*field, name}; - - throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "Element type of List {} doesn't contain field with name \"{}\"", field_name, nested_name); - } - - if (!field->getType().isStruct()) - throw Exception(ErrorCodes::CAPN_PROTO_BAD_CAST, "Field {} is not a struct", field_name); - - return getFieldByName(field->getType().asStruct(), nested_name); - } - - throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "Capnproto schema doesn't contain field with name {}", field_name); -} - -void checkCapnProtoSchemaStructure(const capnp::StructSchema & schema, const Block & header, FormatSettings::EnumComparingMode mode) -{ - /// Firstly check that struct doesn't contain unnamed union, because we don't support it. - if (checkIfStructContainsUnnamedUnion(schema)) - throw Exception(ErrorCodes::CAPN_PROTO_BAD_CAST, "Schema contains unnamed union that is not supported"); - auto names_and_types = header.getNamesAndTypesList(); - String additional_error_message; - for (auto & [name, type] : names_and_types) - { - auto [field, field_name] = getFieldByName(schema, name); - if (!checkCapnProtoType(field.getType(), type, mode, additional_error_message, field_name)) - { - auto e = Exception( - ErrorCodes::CAPN_PROTO_BAD_CAST, - "Cannot convert ClickHouse type {} to CapnProto type {}", - type->getName(), - getCapnProtoFullTypeName(field.getType())); - if (!additional_error_message.empty()) - e.addMessage(additional_error_message); - throw std::move(e); - } - } -} - -template -static DataTypePtr getEnumDataTypeFromEnumerants(const capnp::EnumSchema::EnumerantList & enumerants) -{ - std::vector> values; - for (auto enumerant : enumerants) - values.emplace_back(enumerant.getProto().getName(), ValueType(enumerant.getOrdinal())); - return std::make_shared>(std::move(values)); -} - -static DataTypePtr getEnumDataTypeFromEnumSchema(const capnp::EnumSchema & enum_schema) -{ - auto enumerants = enum_schema.getEnumerants(); - if (enumerants.size() < 128) - return getEnumDataTypeFromEnumerants(enumerants); - if (enumerants.size() < 32768) - return getEnumDataTypeFromEnumerants(enumerants); - - throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "ClickHouse supports only 8 and 16-bit Enums"); -} - -static DataTypePtr getDataTypeFromCapnProtoType(const capnp::Type & capnp_type, bool skip_unsupported_fields) -{ - switch (capnp_type.which()) - { - case capnp::schema::Type::INT8: - return std::make_shared(); - case capnp::schema::Type::INT16: - return std::make_shared(); - case capnp::schema::Type::INT32: - return std::make_shared(); - case capnp::schema::Type::INT64: - return std::make_shared(); - case capnp::schema::Type::BOOL: [[fallthrough]]; - case capnp::schema::Type::UINT8: - return std::make_shared(); - case capnp::schema::Type::UINT16: - return std::make_shared(); - case capnp::schema::Type::UINT32: - return std::make_shared(); - case capnp::schema::Type::UINT64: - return std::make_shared(); - case capnp::schema::Type::FLOAT32: - return std::make_shared(); - case capnp::schema::Type::FLOAT64: - return std::make_shared(); - case capnp::schema::Type::DATA: [[fallthrough]]; - case capnp::schema::Type::TEXT: - return std::make_shared(); - case capnp::schema::Type::ENUM: - return getEnumDataTypeFromEnumSchema(capnp_type.asEnum()); - case capnp::schema::Type::LIST: - { - auto list_schema = capnp_type.asList(); - auto nested_type = getDataTypeFromCapnProtoType(list_schema.getElementType(), skip_unsupported_fields); - if (!nested_type) - return nullptr; - return std::make_shared(nested_type); - } - case capnp::schema::Type::STRUCT: - { - auto struct_schema = capnp_type.asStruct(); - - - if (struct_schema.getFields().size() == 0) - { - if (skip_unsupported_fields) - return nullptr; - throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "Empty messages are not supported"); - } - - /// Check if it can be Nullable. - if (checkIfStructIsNamedUnion(struct_schema)) - { - auto fields = struct_schema.getUnionFields(); - if (fields.size() != 2 || (!fields[0].getType().isVoid() && !fields[1].getType().isVoid())) - { - if (skip_unsupported_fields) - return nullptr; - throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "Unions are not supported"); - } - auto value_type = fields[0].getType().isVoid() ? fields[1].getType() : fields[0].getType(); - if (value_type.isStruct() || value_type.isList()) - { - if (skip_unsupported_fields) - return nullptr; - throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "Tuples and Lists cannot be inside Nullable"); - } - - auto nested_type = getDataTypeFromCapnProtoType(value_type, skip_unsupported_fields); - if (!nested_type) - return nullptr; - return std::make_shared(nested_type); - } - - if (checkIfStructContainsUnnamedUnion(struct_schema)) - throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "Unnamed union is not supported"); - - /// Treat Struct as Tuple. - DataTypes nested_types; - Names nested_names; - for (auto field : struct_schema.getNonUnionFields()) - { - auto nested_type = getDataTypeFromCapnProtoType(field.getType(), skip_unsupported_fields); - if (!nested_type) - continue; - nested_names.push_back(field.getProto().getName()); - nested_types.push_back(nested_type); - } - if (nested_types.empty()) - return nullptr; - return std::make_shared(std::move(nested_types), std::move(nested_names)); - } - default: - { - if (skip_unsupported_fields) - return nullptr; - throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "Unsupported CapnProtoType: {}", getCapnProtoFullTypeName(capnp_type)); - } - } -} - -NamesAndTypesList capnProtoSchemaToCHSchema(const capnp::StructSchema & schema, bool skip_unsupported_fields) -{ - if (checkIfStructContainsUnnamedUnion(schema)) - throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "Unnamed union is not supported"); - - NamesAndTypesList names_and_types; - for (auto field : schema.getNonUnionFields()) - { - auto name = field.getProto().getName(); - auto type = getDataTypeFromCapnProtoType(field.getType(), skip_unsupported_fields); - if (type) - names_and_types.emplace_back(name, type); - } - if (names_and_types.empty()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Cannot convert CapnProto schema to ClickHouse table schema, all fields have unsupported types"); - - return names_and_types; -} - -} - -#endif diff --git a/src/Formats/FormatSettings.h b/src/Formats/FormatSettings.h index c88af650671..475d08e0fe3 100644 --- a/src/Formats/FormatSettings.h +++ b/src/Formats/FormatSettings.h @@ -325,16 +325,16 @@ struct FormatSettings /// For capnProto format we should determine how to /// compare ClickHouse Enum and Enum from schema. - enum class EnumComparingMode + enum class CapnProtoEnumComparingMode { BY_NAMES, // Names in enums should be the same, values can be different. BY_NAMES_CASE_INSENSITIVE, // Case-insensitive name comparison. BY_VALUES, // Values should be the same, names can be different. }; - struct + struct CapnProto { - EnumComparingMode enum_comparing_mode = EnumComparingMode::BY_VALUES; + CapnProtoEnumComparingMode enum_comparing_mode = CapnProtoEnumComparingMode::BY_VALUES; bool skip_fields_with_unsupported_types_in_schema_inference = false; } capn_proto; diff --git a/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp b/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp index 2f84e9bde3c..e686ae86997 100644 --- a/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp @@ -9,23 +9,6 @@ #include #include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - namespace DB { @@ -35,16 +18,14 @@ namespace ErrorCodes extern const int INCORRECT_DATA; } -CapnProtoRowInputFormat::CapnProtoRowInputFormat(ReadBuffer & in_, Block header, Params params_, const FormatSchemaInfo & info, const FormatSettings & format_settings_) - : IRowInputFormat(std::move(header), in_, std::move(params_)) +CapnProtoRowInputFormat::CapnProtoRowInputFormat(ReadBuffer & in_, Block header_, Params params_, const FormatSchemaInfo & info, const FormatSettings & format_settings) + : IRowInputFormat(std::move(header_), in_, std::move(params_)) , parser(std::make_shared()) - , format_settings(format_settings_) - , column_types(getPort().getHeader().getDataTypes()) - , column_names(getPort().getHeader().getNames()) { // Parse the schema and fetch the root object - root = parser->getMessageSchema(info); - checkCapnProtoSchemaStructure(root, getPort().getHeader(), format_settings.capn_proto.enum_comparing_mode); + schema = parser->getMessageSchema(info); + const auto & header = getPort().getHeader(); + serializer = std::make_unique(header.getDataTypes(), header.getNames(), schema, format_settings.capn_proto); } kj::Array CapnProtoRowInputFormat::readMessage() @@ -82,213 +63,6 @@ kj::Array CapnProtoRowInputFormat::readMessage() return msg; } -static void insertInteger(IColumn & column, const DataTypePtr & column_type, UInt64 value) -{ - switch (column_type->getTypeId()) - { - case TypeIndex::Int8: - assert_cast(column).insertValue(value); - break; - case TypeIndex::UInt8: - assert_cast(column).insertValue(value); - break; - case TypeIndex::Int16: - assert_cast(column).insertValue(value); - break; - case TypeIndex::Date: [[fallthrough]]; - case TypeIndex::UInt16: - assert_cast(column).insertValue(value); - break; - case TypeIndex::Int32: - assert_cast(column).insertValue(static_cast(value)); - break; - case TypeIndex::DateTime: [[fallthrough]]; - case TypeIndex::UInt32: - assert_cast(column).insertValue(static_cast(value)); - break; - case TypeIndex::IPv4: - assert_cast(column).insertValue(IPv4(static_cast(value))); - break; - case TypeIndex::Int64: - assert_cast(column).insertValue(value); - break; - case TypeIndex::UInt64: - assert_cast(column).insertValue(value); - break; - case TypeIndex::DateTime64: - assert_cast &>(column).insertValue(value); - break; - case TypeIndex::Decimal32: - assert_cast &>(column).insertValue(static_cast(value)); - break; - case TypeIndex::Decimal64: - assert_cast &>(column).insertValue(value); - break; - default: - throw Exception(ErrorCodes::LOGICAL_ERROR, "Column type {} cannot be parsed from integer", column_type->getName()); - } -} - -static void insertFloat(IColumn & column, const DataTypePtr & column_type, Float64 value) -{ - switch (column_type->getTypeId()) - { - case TypeIndex::Float32: - assert_cast(column).insertValue(static_cast(value)); - break; - case TypeIndex::Float64: - assert_cast(column).insertValue(value); - break; - default: - throw Exception(ErrorCodes::LOGICAL_ERROR, "Column type is not a float."); - } -} - -template -static void insertData(IColumn & column, const DataTypePtr & column_type, Value value) -{ - if (column_type->haveMaximumSizeOfValue() && value.size() != column_type->getSizeOfValueInMemory()) - throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected size of {} value: {}", column_type->getName(), value.size()); - - column.insertData(reinterpret_cast(value.begin()), value.size()); -} - -template -static void insertEnum(IColumn & column, const DataTypePtr & column_type, const capnp::DynamicEnum & enum_value, FormatSettings::EnumComparingMode enum_comparing_mode) -{ - auto enumerant = *kj::_::readMaybe(enum_value.getEnumerant()); - auto enum_type = assert_cast *>(column_type.get()); - DataTypePtr nested_type = std::make_shared>(); - switch (enum_comparing_mode) - { - case FormatSettings::EnumComparingMode::BY_VALUES: - insertInteger(column, nested_type, Int64(enumerant.getOrdinal())); - return; - case FormatSettings::EnumComparingMode::BY_NAMES: - insertInteger(column, nested_type, Int64(enum_type->getValue(String(enumerant.getProto().getName())))); - return; - case FormatSettings::EnumComparingMode::BY_NAMES_CASE_INSENSITIVE: - { - /// Find the same enum name case insensitive. - String enum_name = enumerant.getProto().getName(); - for (auto & name : enum_type->getAllRegisteredNames()) - { - if (compareEnumNames(name, enum_name, enum_comparing_mode)) - { - insertInteger(column, nested_type, Int64(enum_type->getValue(name))); - break; - } - } - } - } -} - -static void insertValue(IColumn & column, const DataTypePtr & column_type, const String & column_name, const capnp::DynamicValue::Reader & value, FormatSettings::EnumComparingMode enum_comparing_mode) -{ - if (column_type->lowCardinality()) - { - auto & lc_column = assert_cast(column); - auto tmp_column = lc_column.getDictionary().getNestedColumn()->cloneEmpty(); - auto dict_type = assert_cast(column_type.get())->getDictionaryType(); - insertValue(*tmp_column, dict_type, column_name, value, enum_comparing_mode); - lc_column.insertFromFullColumn(*tmp_column, 0); - return; - } - - switch (value.getType()) - { - case capnp::DynamicValue::Type::INT: - insertInteger(column, column_type, value.as()); - break; - case capnp::DynamicValue::Type::UINT: - insertInteger(column, column_type, value.as()); - break; - case capnp::DynamicValue::Type::FLOAT: - insertFloat(column, column_type, value.as()); - break; - case capnp::DynamicValue::Type::BOOL: - insertInteger(column, column_type, UInt64(value.as())); - break; - case capnp::DynamicValue::Type::DATA: - insertData(column, column_type, value.as()); - break; - case capnp::DynamicValue::Type::TEXT: - insertData(column, column_type, value.as()); - break; - case capnp::DynamicValue::Type::ENUM: - if (column_type->getTypeId() == TypeIndex::Enum8) - insertEnum(column, column_type, value.as(), enum_comparing_mode); - else - insertEnum(column, column_type, value.as(), enum_comparing_mode); - break; - case capnp::DynamicValue::LIST: - { - auto list_value = value.as(); - auto & column_array = assert_cast(column); - auto & offsets = column_array.getOffsets(); - offsets.push_back(offsets.back() + list_value.size()); - - auto & nested_column = column_array.getData(); - auto nested_type = assert_cast(column_type.get())->getNestedType(); - for (const auto & nested_value : list_value) - insertValue(nested_column, nested_type, column_name, nested_value, enum_comparing_mode); - break; - } - case capnp::DynamicValue::Type::STRUCT: - { - auto struct_value = value.as(); - if (column_type->isNullable()) - { - auto & nullable_column = assert_cast(column); - auto field = *kj::_::readMaybe(struct_value.which()); - if (field.getType().isVoid()) - nullable_column.insertDefault(); - else - { - auto & nested_column = nullable_column.getNestedColumn(); - auto nested_type = assert_cast(column_type.get())->getNestedType(); - auto nested_value = struct_value.get(field); - insertValue(nested_column, nested_type, column_name, nested_value, enum_comparing_mode); - nullable_column.getNullMapData().push_back(0); - } - } - else if (isTuple(column_type)) - { - auto & tuple_column = assert_cast(column); - const auto * tuple_type = assert_cast(column_type.get()); - bool have_explicit_names = tuple_type->haveExplicitNames(); - auto struct_schema = struct_value.getSchema(); - for (uint32_t i = 0; i != tuple_column.tupleSize(); ++i) - insertValue( - tuple_column.getColumn(i), - tuple_type->getElements()[i], - tuple_type->getElementNames()[i], - struct_value.get(have_explicit_names ? struct_schema.getFieldByName(tuple_type->getElementNames()[i]) : struct_schema.getFields()[i]), - enum_comparing_mode); - } - else if (isMap(column_type)) - { - const auto & map_type = assert_cast(*column_type); - DataTypes key_value_types = {map_type.getKeyType(), map_type.getValueType()}; - Names key_value_names = {"key", "value"}; - auto entries_type = std::make_shared(std::make_shared(key_value_types, key_value_names)); - auto & entries_column = assert_cast(column).getNestedColumn(); - auto entries_field = struct_value.getSchema().getFields()[0]; - insertValue(entries_column, entries_type, column_name, struct_value.get(entries_field), enum_comparing_mode); - } - else - { - /// It can be nested column from Nested type. - auto [field_name, nested_name] = splitCapnProtoFieldName(column_name); - insertValue(column, column_type, nested_name, struct_value.get(nested_name), enum_comparing_mode); - } - break; - } - default: - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected CapnProto value type."); - } -} - bool CapnProtoRowInputFormat::readRow(MutableColumns & columns, RowReadExtension &) { if (in->eof()) @@ -298,12 +72,8 @@ bool CapnProtoRowInputFormat::readRow(MutableColumns & columns, RowReadExtension { auto array = readMessage(); capnp::FlatArrayMessageReader msg(array); - auto root_reader = msg.getRoot(root); - for (size_t i = 0; i != columns.size(); ++i) - { - auto value = getReaderByColumnName(root_reader, column_names[i]); - insertValue(*columns[i], column_types[i], column_names[i], value, format_settings.capn_proto.enum_comparing_mode); - } + auto root_reader = msg.getRoot(schema); + serializer->readRow(columns, root_reader); } catch (const kj::Exception & e) { @@ -343,7 +113,14 @@ void registerInputFormatCapnProto(FormatFactory & factory) factory.markFormatSupportsSubsetOfColumns("CapnProto"); factory.registerFileExtension("capnp", "CapnProto"); factory.registerAdditionalInfoForSchemaCacheGetter( - "CapnProto", [](const FormatSettings & settings) { return fmt::format("format_schema={}", settings.schema.format_schema); }); + "CapnProto", + [](const FormatSettings & settings) + { + return fmt::format( + "format_schema={}, skip_fields_with_unsupported_types_in_schema_inference={}", + settings.schema.format_schema, + settings.capn_proto.skip_fields_with_unsupported_types_in_schema_inference); + }); } void registerCapnProtoSchemaReader(FormatFactory & factory) diff --git a/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h b/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h index cf23f22b643..06e94da123f 100644 --- a/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h +++ b/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h @@ -4,7 +4,8 @@ #if USE_CAPNP #include -#include +#include +#include #include #include @@ -33,10 +34,8 @@ private: kj::Array readMessage(); std::shared_ptr parser; - capnp::StructSchema root; - const FormatSettings format_settings; - DataTypes column_types; - Names column_names; + capnp::StructSchema schema; + std::unique_ptr serializer; }; class CapnProtoSchemaReader : public IExternalSchemaReader diff --git a/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.cpp b/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.cpp index 0225680b396..7dd18be27f4 100644 --- a/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.cpp @@ -1,28 +1,13 @@ #include #if USE_CAPNP -#include +#include #include +#include #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - namespace DB { @@ -45,252 +30,25 @@ CapnProtoRowOutputFormat::CapnProtoRowOutputFormat( WriteBuffer & out_, const Block & header_, const FormatSchemaInfo & info, - const FormatSettings & format_settings_) - : IRowOutputFormat(header_, out_), column_names(header_.getNames()), column_types(header_.getDataTypes()), output_stream(std::make_unique(out_)), format_settings(format_settings_) + const FormatSettings & format_settings) + : IRowOutputFormat(header_, out_) + , column_names(header_.getNames()) + , column_types(header_.getDataTypes()) + , output_stream(std::make_unique(out_)) { schema = schema_parser.getMessageSchema(info); - checkCapnProtoSchemaStructure(schema, getPort(PortKind::Main).getHeader(), format_settings.capn_proto.enum_comparing_mode); -} - -template -static capnp::DynamicEnum getDynamicEnum( - const ColumnPtr & column, - const DataTypePtr & data_type, - size_t row_num, - const capnp::EnumSchema & enum_schema, - FormatSettings::EnumComparingMode mode) -{ - const auto * enum_data_type = assert_cast *>(data_type.get()); - EnumValue enum_value = column->getInt(row_num); - if (mode == FormatSettings::EnumComparingMode::BY_VALUES) - return capnp::DynamicEnum(enum_schema, enum_value); - - auto enum_name = enum_data_type->getNameForValue(enum_value); - for (const auto enumerant : enum_schema.getEnumerants()) - { - if (compareEnumNames(String(enum_name), enumerant.getProto().getName(), mode)) - return capnp::DynamicEnum(enumerant); - } - - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot convert CLickHouse Enum value to CapnProto Enum"); -} - -static capnp::DynamicValue::Builder initStructFieldBuilder(const ColumnPtr & column, size_t row_num, capnp::DynamicStruct::Builder & struct_builder, capnp::StructSchema::Field field) -{ - if (const auto * array_column = checkAndGetColumn(*column)) - { - size_t size = array_column->getOffsets()[row_num] - array_column->getOffsets()[row_num - 1]; - return struct_builder.init(field, static_cast(size)); - } - - if (field.getType().isStruct()) - return struct_builder.init(field); - - return struct_builder.get(field); -} - -static std::optional convertToDynamicValue( - const ColumnPtr & column, - const DataTypePtr & data_type, - size_t row_num, - const String & column_name, - capnp::DynamicValue::Builder builder, - FormatSettings::EnumComparingMode enum_comparing_mode, - std::vector> & temporary_text_data_storage) -{ - /// Here we don't do any types validation, because we did it in CapnProtoRowOutputFormat constructor. - - if (data_type->lowCardinality()) - { - const auto * lc_column = assert_cast(column.get()); - const auto & dict_type = assert_cast(data_type.get())->getDictionaryType(); - size_t index = lc_column->getIndexAt(row_num); - return convertToDynamicValue(lc_column->getDictionary().getNestedColumn(), dict_type, index, column_name, builder, enum_comparing_mode, temporary_text_data_storage); - } - - switch (builder.getType()) - { - case capnp::DynamicValue::Type::INT: - return capnp::DynamicValue::Reader(column->getInt(row_num)); - case capnp::DynamicValue::Type::UINT: - { - /// IPv4 column doesn't support getUInt method. - if (isIPv4(data_type)) - return capnp::DynamicValue::Reader(assert_cast(column.get())->getElement(row_num)); - return capnp::DynamicValue::Reader(column->getUInt(row_num)); - } - case capnp::DynamicValue::Type::BOOL: - return capnp::DynamicValue::Reader(column->getBool(row_num)); - case capnp::DynamicValue::Type::FLOAT: - return capnp::DynamicValue::Reader(column->getFloat64(row_num)); - case capnp::DynamicValue::Type::ENUM: - { - auto enum_schema = builder.as().getSchema(); - if (data_type->getTypeId() == TypeIndex::Enum8) - return capnp::DynamicValue::Reader( - getDynamicEnum(column, data_type, row_num, enum_schema, enum_comparing_mode)); - return capnp::DynamicValue::Reader( - getDynamicEnum(column, data_type, row_num, enum_schema, enum_comparing_mode)); - } - case capnp::DynamicValue::Type::DATA: - { - auto data = column->getDataAt(row_num); - return capnp::DynamicValue::Reader(capnp::Data::Reader(reinterpret_cast(data.data), data.size)); - } - case capnp::DynamicValue::Type::TEXT: - { - /// In TEXT type data should be null-terminated, but ClickHouse String data could not be. - /// To make data null-terminated we should copy it to temporary String object, but - /// capnp::Text::Reader works only with pointer to the data and it's size, so we should - /// guarantee that new String object life time is longer than capnp::Text::Reader life time. - /// To do this we store new String object in a temporary storage, passed in this function - /// by reference. We use unique_ptr instead of just String to avoid pointers - /// invalidation on vector reallocation. - temporary_text_data_storage.push_back(std::make_unique(column->getDataAt(row_num))); - auto & data = temporary_text_data_storage.back(); - return capnp::DynamicValue::Reader(capnp::Text::Reader(data->data(), data->size())); - } - case capnp::DynamicValue::Type::STRUCT: - { - auto struct_builder = builder.as(); - auto nested_struct_schema = struct_builder.getSchema(); - /// Struct can represent Tuple, Nullable (named union with two fields) or single column when it contains one nested column. - if (data_type->isNullable()) - { - const auto * nullable_type = assert_cast(data_type.get()); - const auto * nullable_column = assert_cast(column.get()); - auto fields = nested_struct_schema.getUnionFields(); - if (nullable_column->isNullAt(row_num)) - { - auto null_field = fields[0].getType().isVoid() ? fields[0] : fields[1]; - struct_builder.set(null_field, capnp::Void()); - } - else - { - auto value_field = fields[0].getType().isVoid() ? fields[1] : fields[0]; - struct_builder.clear(value_field); - const auto & nested_column = nullable_column->getNestedColumnPtr(); - auto value_builder = initStructFieldBuilder(nested_column, row_num, struct_builder, value_field); - auto value = convertToDynamicValue(nested_column, nullable_type->getNestedType(), row_num, column_name, value_builder, enum_comparing_mode, temporary_text_data_storage); - if (value) - struct_builder.set(value_field, *value); - } - } - else if (isTuple(data_type)) - { - const auto * tuple_data_type = assert_cast(data_type.get()); - const auto & nested_types = tuple_data_type->getElements(); - const auto & nested_names = tuple_data_type->getElementNames(); - const auto & nested_columns = assert_cast(column.get())->getColumns(); - bool have_explicit_names = tuple_data_type->haveExplicitNames(); - for (uint32_t i = 0; i != nested_names.size(); ++i) - { - capnp::StructSchema::Field nested_field = have_explicit_names ? nested_struct_schema.getFieldByName(nested_names[i]) : nested_struct_schema.getFields()[i]; - auto field_builder = initStructFieldBuilder(nested_columns[i], row_num, struct_builder, nested_field); - auto value = convertToDynamicValue(nested_columns[i], nested_types[i], row_num, nested_names[i], field_builder, enum_comparing_mode, temporary_text_data_storage); - if (value) - struct_builder.set(nested_field, *value); - } - } - else if (isMap(data_type)) - { - /// We output Map type as follow CapnProto schema - /// - /// struct Map { - /// struct Entry { - /// key @0: Key; - /// value @1: Value; - /// } - /// entries @0 :List(Entry); - /// } - /// - /// And we don't need to check that struct have this form here because we checked it before. - const auto & map_type = assert_cast(*data_type); - DataTypes key_value_types = {map_type.getKeyType(), map_type.getValueType()}; - Names key_value_names = {"key", "value"}; - auto entries_type = std::make_shared(std::make_shared(key_value_types, key_value_names)); - - /// Nested column in Map is actually Array(Tuple), so we can output it according to "entries" field schema. - const auto & entries_column = assert_cast(column.get())->getNestedColumnPtr(); - - auto entries_field = nested_struct_schema.getFields()[0]; - auto field_builder = initStructFieldBuilder(entries_column, row_num, struct_builder, entries_field); - auto entries_value = convertToDynamicValue(entries_column, entries_type, row_num, column_name, field_builder, enum_comparing_mode, temporary_text_data_storage); - if (entries_value) - struct_builder.set(entries_field, *entries_value); - } - else - { - /// It can be nested column from Nested type. - auto [field_name, nested_name] = splitCapnProtoFieldName(column_name); - auto nested_field = nested_struct_schema.getFieldByName(nested_name); - auto field_builder = initStructFieldBuilder(column, row_num, struct_builder, nested_field); - auto value = convertToDynamicValue(column, data_type, row_num, nested_name, field_builder, enum_comparing_mode, temporary_text_data_storage); - if (value) - struct_builder.set(nested_field, *value); - } - return std::nullopt; - } - case capnp::DynamicValue::Type::LIST: - { - auto list_builder = builder.as(); - const auto * array_column = assert_cast(column.get()); - const auto & nested_column = array_column->getDataPtr(); - const auto & nested_type = assert_cast(data_type.get())->getNestedType(); - const auto & offsets = array_column->getOffsets(); - auto offset = offsets[row_num - 1]; - size_t size = offsets[row_num] - offset; - - const auto * nested_array_column = checkAndGetColumn(*nested_column); - for (unsigned i = 0; i != static_cast(size); ++i) - { - capnp::DynamicValue::Builder value_builder; - /// For nested arrays we need to initialize nested list builder. - if (nested_array_column) - { - const auto & nested_offset = nested_array_column->getOffsets(); - size_t nested_array_size = nested_offset[offset + i] - nested_offset[offset + i - 1]; - value_builder = list_builder.init(i, static_cast(nested_array_size)); - } - else - value_builder = list_builder[i]; - - auto value = convertToDynamicValue(nested_column, nested_type, offset + i, column_name, value_builder, enum_comparing_mode, temporary_text_data_storage); - if (value) - list_builder.set(i, *value); - } - return std::nullopt; - } - default: - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected CapnProto type."); - } + const auto & header = getPort(PortKind::Main).getHeader(); + serializer = std::make_unique(header.getDataTypes(), header.getNames(), schema, format_settings.capn_proto); + capnp::MallocMessageBuilder message; } void CapnProtoRowOutputFormat::write(const Columns & columns, size_t row_num) { capnp::MallocMessageBuilder message; - /// Temporary storage for data that will be outputted in fields with CapnProto type TEXT. - /// See comment in convertToDynamicValue() for more details. - std::vector> temporary_text_data_storage; capnp::DynamicStruct::Builder root = message.initRoot(schema); - - /// Some columns can share same field builder. For example when we have - /// column with Nested type that was flattened into several columns. - std::unordered_map field_builders; - for (size_t i = 0; i != columns.size(); ++i) - { - auto [struct_builder, field] = getStructBuilderAndFieldByColumnName(root, column_names[i]); - if (!field_builders.contains(field.getIndex())) - { - auto field_builder = initStructFieldBuilder(columns[i], row_num, struct_builder, field); - field_builders[field.getIndex()] = field_builder; - } - auto value = convertToDynamicValue(columns[i], column_types[i], row_num, column_names[i], field_builders[field.getIndex()], format_settings.capn_proto.enum_comparing_mode, temporary_text_data_storage); - if (value) - struct_builder.set(field, *value); - } - + serializer->writeRow(columns, std::move(root), row_num); capnp::writeMessage(*output_stream, message); + } void registerOutputFormatCapnProto(FormatFactory & factory) diff --git a/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.h b/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.h index 5cc7099d4c7..dd9dcc6b340 100644 --- a/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.h +++ b/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.h @@ -3,15 +3,17 @@ #include "config.h" #if USE_CAPNP -#include -#include -#include -#include -#include -#include +# include +# include +# include +# include +# include +# include +# include namespace DB { + class CapnProtoOutputStream : public kj::OutputStream { public: @@ -43,8 +45,9 @@ private: DataTypes column_types; capnp::StructSchema schema; std::unique_ptr output_stream; - const FormatSettings format_settings; CapnProtoSchemaParser schema_parser; + std::unique_ptr serializer; + }; } diff --git a/src/Processors/Formats/Impl/ProtobufListInputFormat.cpp b/src/Processors/Formats/Impl/ProtobufListInputFormat.cpp index 9777f2361a2..6098923a195 100644 --- a/src/Processors/Formats/Impl/ProtobufListInputFormat.cpp +++ b/src/Processors/Formats/Impl/ProtobufListInputFormat.cpp @@ -88,7 +88,14 @@ void registerInputFormatProtobufList(FormatFactory & factory) }); factory.markFormatSupportsSubsetOfColumns("ProtobufList"); factory.registerAdditionalInfoForSchemaCacheGetter( - "ProtobufList", [](const FormatSettings & settings) { return fmt::format("format_schema={}", settings.schema.format_schema); }); + "ProtobufList", + [](const FormatSettings & settings) + { + return fmt::format( + "format_schema={}, skip_fields_with_unsupported_types_in_schema_inference={}", + settings.schema.format_schema, + settings.protobuf.skip_fields_with_unsupported_types_in_schema_inference); + }); } void registerProtobufListSchemaReader(FormatFactory & factory) diff --git a/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp b/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp index ee60501dba5..126f3673571 100644 --- a/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp @@ -128,7 +128,14 @@ void registerProtobufSchemaReader(FormatFactory & factory) for (const auto & name : {"Protobuf", "ProtobufSingle"}) factory.registerAdditionalInfoForSchemaCacheGetter( - name, [](const FormatSettings & settings) { return fmt::format("format_schema={}", settings.schema.format_schema); }); + name, + [](const FormatSettings & settings) + { + return fmt::format( + "format_schema={}, skip_fields_with_unsupported_types_in_schema_inference={}", + settings.schema.format_schema, + settings.protobuf.skip_fields_with_unsupported_types_in_schema_inference); + }); } } diff --git a/tests/queries/0_stateless/02030_capnp_format.sh b/tests/queries/0_stateless/02030_capnp_format.sh index c15d6fe442e..625104fb590 100755 --- a/tests/queries/0_stateless/02030_capnp_format.sh +++ b/tests/queries/0_stateless/02030_capnp_format.sh @@ -96,8 +96,8 @@ $CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'a_b U $CLICKHOUSE_CLIENT --query="SELECT number AS a_b, number + 1 AS a_c_d, number + 2 AS a_c_e_f FROM numbers(5) FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_nested_tuples:Message'" > $CAPN_PROTO_FILE $CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'a Tuple(b UInt64, c Tuple(d UInt64, e Tuple(f UInt64)))') SETTINGS format_schema='$SERVER_SCHEMADIR/02030_capnp_nested_tuples:Message'" -$CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'a Tuple(bb UInt64, c Tuple(d UInt64, e Tuple(f UInt64)))') SETTINGS format_schema='$SERVER_SCHEMADIR/02030_capnp_nested_tuples:Message'" 2>&1 | grep -F -q "CAPN_PROTO_BAD_CAST" && echo 'OK' || echo 'FAIL'; -$CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'a Tuple(b UInt64, c Tuple(d UInt64, e Tuple(ff UInt64)))') SETTINGS format_schema='$SERVER_SCHEMADIR/02030_capnp_nested_tuples:Message'" 2>&1 | grep -F -q "CAPN_PROTO_BAD_CAST" && echo 'OK' || echo 'FAIL'; +$CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'a Tuple(bb UInt64, c Tuple(d UInt64, e Tuple(f UInt64)))') SETTINGS format_schema='$SERVER_SCHEMADIR/02030_capnp_nested_tuples:Message'" 2>&1 | grep -F -q "THERE_IS_NO_COLUMN" && echo 'OK' || echo 'FAIL'; +$CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'a Tuple(b UInt64, c Tuple(d UInt64, e Tuple(ff UInt64)))') SETTINGS format_schema='$SERVER_SCHEMADIR/02030_capnp_nested_tuples:Message'" 2>&1 | grep -F -q "THERE_IS_NO_COLUMN" && echo 'OK' || echo 'FAIL'; $CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'string String') SETTINGS format_schema='$SERVER_SCHEMADIR/02030_capnp_simple_types:Message'" 2>&1 | grep -F -q "INCORRECT_DATA" && echo 'OK' || echo 'FAIL'; diff --git a/tests/queries/0_stateless/02735_capnp_case_insensitive_names_matcing.reference b/tests/queries/0_stateless/02735_capnp_case_insensitive_names_matcing.reference new file mode 100644 index 00000000000..f34c857e2f6 --- /dev/null +++ b/tests/queries/0_stateless/02735_capnp_case_insensitive_names_matcing.reference @@ -0,0 +1 @@ +42 (42,42) diff --git a/tests/queries/0_stateless/02735_capnp_case_insensitive_names_matcing.sh b/tests/queries/0_stateless/02735_capnp_case_insensitive_names_matcing.sh new file mode 100755 index 00000000000..c3835948437 --- /dev/null +++ b/tests/queries/0_stateless/02735_capnp_case_insensitive_names_matcing.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-parallel, no-replicated-database + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +SCHEMADIR=$CURDIR/format_schemas +$CLICKHOUSE_LOCAL -q "select 42 as Field1, (42, 42)::Tuple(Field1 UInt32, Field2 UInt32) as Nested format CapnProto settings format_schema='$SCHEMADIR/02735_case_insensitive_names_matching:Message'" | $CLICKHOUSE_LOCAL --input-format CapnProto --structure "Field1 UInt32, Nested Tuple(Field1 UInt32, Field2 UInt32)" -q "select * from table" --format_schema="$SCHEMADIR/02735_case_insensitive_names_matching:Message" + diff --git a/tests/queries/0_stateless/02736_reading_and_writing_structure_fields.reference b/tests/queries/0_stateless/02736_reading_and_writing_structure_fields.reference new file mode 100644 index 00000000000..b6e6d485929 --- /dev/null +++ b/tests/queries/0_stateless/02736_reading_and_writing_structure_fields.reference @@ -0,0 +1,3 @@ +(42,(42,42),[(42,42),(24,24)]) [(42,(42,42),[(42,42),(24,24)]),(24,(24,24),[(24,24),(42,42)])] +42 42 42 +[42,24] [42,24] [42,24] [[42,24],[24,42]] [[42,24],[24,42]] diff --git a/tests/queries/0_stateless/02736_reading_and_writing_structure_fields.sh b/tests/queries/0_stateless/02736_reading_and_writing_structure_fields.sh new file mode 100755 index 00000000000..c669be2ed33 --- /dev/null +++ b/tests/queries/0_stateless/02736_reading_and_writing_structure_fields.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-parallel, no-replicated-database + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +SCHEMADIR=$CURDIR/format_schemas +DATA_FILE=02736_$CLICKHOUSE_TEST_UNIQUE_NAME.bin + +$CLICKHOUSE_LOCAL -q "select tuple(42, tuple(42, 42), [tuple(42, 42), tuple(24, 24)]) as nested, [tuple(42, tuple(42, 42), [tuple(42, 42), tuple(24, 24)]), tuple(24, tuple(24, 24), [tuple(24, 24), tuple(42, 42)])] as nestedList format CapnProto settings format_schema='$SCHEMADIR/02736_nested_structures:Message'" > $DATA_FILE + +$CLICKHOUSE_LOCAL -q "select * from file($DATA_FILE, CapnProto) settings format_schema='$SCHEMADIR/02736_nested_structures:Message'" + +$CLICKHOUSE_LOCAL -q "select 42 as nested_field1, 42 as nested_nested_field1, 42 as nested_nested_field2 format CapnProto settings format_schema='$SCHEMADIR/02736_nested_structures:Message'" > $DATA_FILE + +$CLICKHOUSE_LOCAL -q "select * from file($DATA_FILE, CapnProto, 'nested_field1 UInt32, nested_nested_field1 UInt32, nested_nested_field2 UInt32') settings format_schema='$SCHEMADIR/02736_nested_structures:Message'" + +$CLICKHOUSE_LOCAL -q "select [42, 24] as nestedList_field1, [42, 24] as nestedList_nested_field1, [42, 24] as nestedList_nested_field2, [[42, 24], [24, 42]] as nestedList_nestedList_field1, [[42, 24], [24, 42]] as nestedList_nestedList_field2 format CapnProto settings format_schema='$SCHEMADIR/02736_nested_structures:Message'" > $DATA_FILE + +$CLICKHOUSE_LOCAL -q "select * from file($DATA_FILE, CapnProto, 'nestedList_field1 Array(UInt32), nestedList_nested_field1 Array(UInt32), nestedList_nested_field2 Array(UInt32), nestedList_nestedList_field1 Array(Array(UInt32)), nestedList_nestedList_field2 Array(Array(UInt32))') settings format_schema='$SCHEMADIR/02736_nested_structures:Message'" + +rm $DATA_FILE + diff --git a/tests/queries/0_stateless/format_schemas/02735_case_insensitive_names_matching.capnp b/tests/queries/0_stateless/format_schemas/02735_case_insensitive_names_matching.capnp new file mode 100644 index 00000000000..6b12aab081a --- /dev/null +++ b/tests/queries/0_stateless/format_schemas/02735_case_insensitive_names_matching.capnp @@ -0,0 +1,13 @@ +@0x9ef128e10a8010b8; + +struct Nested +{ + field1 @0 : UInt32; + field2 @1 : UInt32; +} + +struct Message +{ + field1 @0 : UInt32; + nested @1 : Nested; +} diff --git a/tests/queries/0_stateless/format_schemas/02736_nested_structures.capnp b/tests/queries/0_stateless/format_schemas/02736_nested_structures.capnp new file mode 100644 index 00000000000..a03eb27f383 --- /dev/null +++ b/tests/queries/0_stateless/format_schemas/02736_nested_structures.capnp @@ -0,0 +1,21 @@ +@0x9ef128e10a8010b8; + +struct Nested2 +{ + field1 @0 : UInt32; + field2 @1 : UInt32; +} + +struct Nested +{ + field1 @0 : UInt32; + nested @1 : Nested2; + nestedList @2 : List(Nested2); +} + +struct Message +{ + nested @0 : Nested; + nestedList @1 : List(Nested); +} + From c2eada7ba7bd385281c140dbf225be7eee4f1ff2 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Wed, 10 May 2023 21:07:56 +0200 Subject: [PATCH 126/722] Fix style --- src/Formats/CapnProtoSerializer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Formats/CapnProtoSerializer.cpp b/src/Formats/CapnProtoSerializer.cpp index e0c8ae2a79a..c31623286d0 100644 --- a/src/Formats/CapnProtoSerializer.cpp +++ b/src/Formats/CapnProtoSerializer.cpp @@ -26,7 +26,7 @@ namespace DB namespace ErrorCodes { extern const int THERE_IS_NO_COLUMN; - extern const int BAD_TYPE_OF_FIELD; + extern const int LOGICAL_ERROR; extern const int CAPN_PROTO_BAD_CAST; extern const int INCORRECT_DATA; extern const int ILLEGAL_COLUMN; @@ -293,7 +293,7 @@ namespace return capnp::DynamicValue::Reader(capnp::DynamicEnum(enumerant)); } - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot convert CLickHouse Enum value to CapnProto Enum"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot convert ClickHouse Enum value to CapnProto Enum"); } void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override From cc7cfa050f5723fa4bfeca994a04784732950968 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Wed, 10 May 2023 21:08:12 +0200 Subject: [PATCH 127/722] Fix style --- src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp b/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp index e686ae86997..c056ee2b4a4 100644 --- a/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp @@ -14,7 +14,6 @@ namespace DB namespace ErrorCodes { - extern const int LOGICAL_ERROR; extern const int INCORRECT_DATA; } From 1347dc4ede100dbfc7240fa7ead23b13c924d202 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Wed, 10 May 2023 21:08:31 +0200 Subject: [PATCH 128/722] Fix style --- src/Processors/Formats/Impl/CapnProtoRowOutputFormat.cpp | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.cpp b/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.cpp index 7dd18be27f4..66a7160dd89 100644 --- a/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.cpp @@ -11,12 +11,6 @@ namespace DB { -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} - - CapnProtoOutputStream::CapnProtoOutputStream(WriteBuffer & out_) : out(out_) { } From a89a8b8d50f8ff5c05ebbcdb83f19dcac6739dbf Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 11 May 2023 12:08:50 +0000 Subject: [PATCH 129/722] Fix build --- src/Formats/CapnProtoSerializer.cpp | 6 ++++++ src/Formats/CapnProtoSerializer.h | 4 ++++ 2 files changed, 10 insertions(+) diff --git a/src/Formats/CapnProtoSerializer.cpp b/src/Formats/CapnProtoSerializer.cpp index c31623286d0..00ccfc7717d 100644 --- a/src/Formats/CapnProtoSerializer.cpp +++ b/src/Formats/CapnProtoSerializer.cpp @@ -1,3 +1,7 @@ +#include "config.h" + +#if USE_CAPNP + #include #include #include @@ -1216,3 +1220,5 @@ void CapnProtoSerializer::readRow(MutableColumns & columns, capnp::DynamicStruct CapnProtoSerializer::~CapnProtoSerializer() = default; } + +#endif diff --git a/src/Formats/CapnProtoSerializer.h b/src/Formats/CapnProtoSerializer.h index efae797875b..692f5e5301f 100644 --- a/src/Formats/CapnProtoSerializer.h +++ b/src/Formats/CapnProtoSerializer.h @@ -1,5 +1,7 @@ #pragma once +#if USE_CAPNP + #include #include @@ -23,3 +25,5 @@ private: }; } + +#endif From 5f1ca61d090b70ecee8f70d8e3656195e13f0ee9 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Fri, 12 May 2023 16:12:01 +0200 Subject: [PATCH 130/722] Fix special builds --- src/Formats/CapnProtoSerializer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Formats/CapnProtoSerializer.cpp b/src/Formats/CapnProtoSerializer.cpp index 00ccfc7717d..091e70da656 100644 --- a/src/Formats/CapnProtoSerializer.cpp +++ b/src/Formats/CapnProtoSerializer.cpp @@ -1007,7 +1007,7 @@ namespace catch (Exception & e) { e.addMessage("(while converting column {})", column_name); - throw e; + throw std::move(e); } } @@ -1015,7 +1015,7 @@ namespace { assert(builder); auto & struct_builder = assert_cast(*builder); - if (auto tuple_column = typeid_cast(column.get())) + if (auto * tuple_column = typeid_cast(column.get())) writeRow(tuple_column->getColumnsCopy(), struct_builder, row_num); else writeRow(Columns{column}, struct_builder, row_num); From 94ef08977ae88a48f95343a7b27abed6471efbe6 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Fri, 12 May 2023 18:53:51 +0200 Subject: [PATCH 131/722] Fix special build --- src/Formats/CapnProtoSchema.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Formats/CapnProtoSchema.cpp b/src/Formats/CapnProtoSchema.cpp index 22518d5061a..f9ab88d39ed 100644 --- a/src/Formats/CapnProtoSchema.cpp +++ b/src/Formats/CapnProtoSchema.cpp @@ -151,7 +151,7 @@ namespace { template - static DataTypePtr getEnumDataTypeFromEnumerants(const capnp::EnumSchema::EnumerantList & enumerants) + DataTypePtr getEnumDataTypeFromEnumerants(const capnp::EnumSchema::EnumerantList & enumerants) { std::vector> values; for (auto enumerant : enumerants) @@ -159,7 +159,7 @@ namespace return std::make_shared>(std::move(values)); } - static DataTypePtr getEnumDataTypeFromEnumSchema(const capnp::EnumSchema & enum_schema) + DataTypePtr getEnumDataTypeFromEnumSchema(const capnp::EnumSchema & enum_schema) { auto enumerants = enum_schema.getEnumerants(); if (enumerants.size() < 128) @@ -170,7 +170,7 @@ namespace throw Exception(ErrorCodes::CAPN_PROTO_BAD_TYPE, "ClickHouse supports only 8 and 16-bit Enums"); } - static DataTypePtr getDataTypeFromCapnProtoType(const capnp::Type & capnp_type, bool skip_unsupported_fields) + DataTypePtr getDataTypeFromCapnProtoType(const capnp::Type & capnp_type, bool skip_unsupported_fields) { switch (capnp_type.which()) { From f76fc5e06682fb7931fc067bcbc38960e91dea7b Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Fri, 12 May 2023 18:54:38 +0200 Subject: [PATCH 132/722] Fix special build --- src/Formats/CapnProtoSerializer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Formats/CapnProtoSerializer.cpp b/src/Formats/CapnProtoSerializer.cpp index 091e70da656..ff3880976c7 100644 --- a/src/Formats/CapnProtoSerializer.cpp +++ b/src/Formats/CapnProtoSerializer.cpp @@ -158,7 +158,7 @@ namespace }; template - static std::unique_ptr createIntegerSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + std::unique_ptr createIntegerSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) { switch (capnp_type.which()) { @@ -1015,7 +1015,7 @@ namespace { assert(builder); auto & struct_builder = assert_cast(*builder); - if (auto * tuple_column = typeid_cast(column.get())) + if (const auto * tuple_column = typeid_cast(column.get())) writeRow(tuple_column->getColumnsCopy(), struct_builder, row_num); else writeRow(Columns{column}, struct_builder, row_num); From f48845fa0d56dc81a44fd6314342849325d78b0b Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky Date: Thu, 25 May 2023 12:23:35 +0000 Subject: [PATCH 133/722] Fix test once again --- .../integration/runner/compose/docker_compose_mongo_secure.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/integration/runner/compose/docker_compose_mongo_secure.yml b/docker/test/integration/runner/compose/docker_compose_mongo_secure.yml index f5b0ffed130..193e5d26568 100644 --- a/docker/test/integration/runner/compose/docker_compose_mongo_secure.yml +++ b/docker/test/integration/runner/compose/docker_compose_mongo_secure.yml @@ -1,7 +1,7 @@ version: '2.3' services: mongo1: - image: mongo:3.5 + image: mongo:3.6 restart: always environment: MONGO_INITDB_ROOT_USERNAME: root From ea395e9554e29f5eaa73d9fcd632f87aa4371d42 Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 25 May 2023 15:24:02 +0000 Subject: [PATCH 134/722] Make better --- src/Formats/CapnProtoSerializer.cpp | 1152 ++++++++++++++++++--------- src/Formats/CapnProtoSerializer.h | 1 + 2 files changed, 757 insertions(+), 396 deletions(-) diff --git a/src/Formats/CapnProtoSerializer.cpp b/src/Formats/CapnProtoSerializer.cpp index ff3880976c7..91e207a1846 100644 --- a/src/Formats/CapnProtoSerializer.cpp +++ b/src/Formats/CapnProtoSerializer.cpp @@ -10,7 +10,6 @@ #include #include #include -#include #include #include #include @@ -77,7 +76,7 @@ namespace struct ListBuilder : public FieldBuilder { - explicit ListBuilder(capnp::DynamicValue::Builder builder) : impl(builder.as()) + explicit ListBuilder(capnp::DynamicValue::Builder builder, UInt32 elements_size) : impl(builder.as()), nested_builders(elements_size) { } @@ -87,10 +86,6 @@ namespace struct StructBuilder : public FieldBuilder { - explicit StructBuilder(capnp::DynamicValue::Builder builder, size_t fields_size) : impl(builder.as()), field_builders(fields_size) - { - } - explicit StructBuilder(capnp::DynamicStruct::Builder struct_builder, size_t fields_size) : impl(std::move(struct_builder)), field_builders(fields_size) { } @@ -99,136 +94,144 @@ namespace std::vector> field_builders; }; - std::unique_ptr initStructFieldBuilderIfNeeded(const ColumnPtr & column, size_t row_num, capnp::DynamicStruct::Builder & struct_builder, const capnp::StructSchema::Field & field, const capnp::Type & capnp_type, size_t nested_fields_size) - { - switch (capnp_type.which()) - { - case capnp::schema::Type::LIST: - { - const auto * array_column = assert_cast(column.get()); - size_t size = array_column->getOffsets()[row_num] - array_column->getOffsets()[row_num - 1]; - return std::make_unique(struct_builder.init(field, static_cast(size))); - } - case capnp::schema::Type::STRUCT: - { - return std::make_unique(struct_builder.init(field), nested_fields_size); - } - default: - return nullptr; - } - } - class ICapnProtoSerializer { public: - virtual std::optional writeRow(const ColumnPtr & column, FieldBuilder * builder, size_t row_num) = 0; - virtual void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) = 0; + virtual void writeRow( + const ColumnPtr & column, + std::unique_ptr & builder, + capnp::DynamicStruct::Builder & parent_struct_builder, + UInt32 slot_offset, + size_t row_num) = 0; + + virtual void writeRow( + const ColumnPtr & column, + std::unique_ptr & builder, + capnp::DynamicList::Builder & parent_list_builder, + UInt32 array_index, + size_t row_num) = 0; + + virtual void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) = 0; + + virtual void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) = 0; virtual ~ICapnProtoSerializer() = default; }; - template + template class CapnProtoIntegerSerializer : public ICapnProtoSerializer { public: - std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - if constexpr (capnp_dynamic_type == capnp::DynamicValue::Type::INT) - return capnp::DynamicValue::Reader(column->getInt(row_num)); - if constexpr (capnp_dynamic_type == capnp::DynamicValue::Type::UINT) - return capnp::DynamicValue::Reader(column->getUInt(row_num)); - return capnp::DynamicValue::Reader(column->getBool(row_num)); + auto & builder_impl = parent_struct_builder.getBuilderImpl(); + CapnProtoNumericType value = static_cast(assert_cast &>(*column).getElement(row_num)); + builder_impl.setDataField(slot_offset, value); } - void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override { - NumericType value; - if constexpr (capnp_dynamic_type == capnp::DynamicValue::Type::INT) - value = static_cast(reader.as()); - else if constexpr (capnp_dynamic_type == capnp::DynamicValue::Type::UINT) - value = static_cast(reader.as()); - else if constexpr (capnp_dynamic_type == capnp::DynamicValue::Type::BOOL) - value = static_cast(reader.as()); + auto & builder_impl = parent_list_builder.getBuilderImpl(); + CapnProtoNumericType value = static_cast(assert_cast &>(*column).getElement(row_num)); + builder_impl.setDataElement(array_index, value); + } - if constexpr (is_bool_data_type) + void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override + { + const auto & reader_impl = parent_struct_reader.getReaderImpl(); + CapnProtoNumericType value = reader_impl.getDataField(slot_offset); + if constexpr (convert_to_bool_on_read) assert_cast(column).insertValue(static_cast(value)); else - assert_cast &>(column).insertValue(value); + assert_cast &>(column).insertValue(static_cast(value)); + } + + void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override + { + const auto & reader_impl = parent_list_reader.getReaderImpl(); + CapnProtoNumericType value = reader_impl.getDataElement(array_index); + if constexpr (convert_to_bool_on_read) + assert_cast(column).insertValue(static_cast(value)); + else + assert_cast &>(column).insertValue(static_cast(value)); } }; - template + template std::unique_ptr createIntegerSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) { switch (capnp_type.which()) { - case capnp::schema::Type::INT8: [[fallthrough]]; - case capnp::schema::Type::INT16: [[fallthrough]]; - case capnp::schema::Type::INT32: [[fallthrough]]; + case capnp::schema::Type::INT8: + return std::make_unique>(); + case capnp::schema::Type::INT16: + return std::make_unique>(); + case capnp::schema::Type::INT32: + return std::make_unique>(); case capnp::schema::Type::INT64: - return std::make_unique>(); - case capnp::schema::Type::UINT8: [[fallthrough]]; - case capnp::schema::Type::UINT16: [[fallthrough]]; - case capnp::schema::Type::UINT32: [[fallthrough]]; + return std::make_unique>(); + case capnp::schema::Type::UINT8: + return std::make_unique>(); + case capnp::schema::Type::UINT16: + return std::make_unique>(); + case capnp::schema::Type::UINT32: + return std::make_unique>(); case capnp::schema::Type::UINT64: - return std::make_unique>(); + return std::make_unique>(); case capnp::schema::Type::BOOL: - return std::make_unique>(); + return std::make_unique>(); default: throwCannotConvert(data_type, column_name, capnp_type); } } - template - class CapnProtoBigIntegerSerializer : public ICapnProtoSerializer - { - public: - CapnProtoBigIntegerSerializer(const DataTypePtr & data_type_, const String & column_name, const capnp::Type & capnp_type) : data_type(data_type_) - { - if (!capnp_type.isData()) - throwCannotConvert(data_type, column_name, capnp_type); - } - - std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override - { - auto data = column->getDataAt(row_num); - return capnp::DynamicValue::Reader(capnp::Data::Reader(reinterpret_cast(data.data), data.size)); - } - - void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override - { - auto value = reader.as(); - if (value.size() != sizeof(NumericType)) - throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected size of {} value: {}", data_type->getName(), value.size()); - - column.insertData(reinterpret_cast(value.begin()), value.size()); - } - - private: - DataTypePtr data_type; - }; - - template + template class CapnProtoFloatSerializer : public ICapnProtoSerializer { public: - CapnProtoFloatSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - if (!capnp_type.isFloat32() && !capnp_type.isFloat64()) - throwCannotConvert(data_type, column_name, capnp_type); + auto & builder_impl = parent_struct_builder.getBuilderImpl(); + CapnProtoFloatType value = static_cast(assert_cast &>(*column).getElement(row_num)); + builder_impl.setDataField(slot_offset, value); } - std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override { - return capnp::DynamicValue::Reader(column->getFloat64(row_num)); + auto & builder_impl = parent_list_builder.getBuilderImpl(); + CapnProtoFloatType value = static_cast(assert_cast &>(*column).getElement(row_num)); + builder_impl.setDataElement(array_index, value); } - void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override { - assert_cast &>(column).insertValue(reader.as()); + const auto & reader_impl = parent_struct_reader.getReaderImpl(); + CapnProtoFloatType value = reader_impl.getDataField(slot_offset); + assert_cast &>(column).insertValue(static_cast(value)); + } + + void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override + { + const auto & reader_impl = parent_list_reader.getReaderImpl(); + CapnProtoFloatType value = reader_impl.getDataElement(array_index); + assert_cast &>(column).insertValue(static_cast(value)); } }; + template + std::unique_ptr createFloatSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + { + switch (capnp_type.which()) + { + case capnp::schema::Type::FLOAT32: + return std::make_unique>(); + case capnp::schema::Type::FLOAT64: + return std::make_unique>(); + default: + throwCannotConvert(data_type, column_name, capnp_type); + } + } + template class CapnProtoEnumSerializer : public ICapnProtoSerializer { @@ -267,86 +270,90 @@ namespace } else { - auto names = enum_values.getSetOfAllNames(to_lower); - std::unordered_set capn_enum_names; - - for (auto enumerant : enumerants) - { - String name = enumerant.getProto().getName(); - capn_enum_names.insert(to_lower ? boost::algorithm::to_lower_copy(name) : name); - } - - if (names != capn_enum_names) + auto all_values = enum_values.getValues(); + if (all_values.size() != enumerants.size()) throw Exception( ErrorCodes::CAPN_PROTO_BAD_CAST, "The set of names in Enum from CapnProto schema is different from the set of names in ClickHouse Enum"); + + std::unordered_map ch_name_to_value; + for (auto & [name, value] : all_values) + ch_name_to_value[to_lower ? boost::algorithm::to_lower_copy(name) : name] = value; + + for (auto enumerant : enumerants) + { + String capnp_name = enumerant.getProto().getName(); + UInt16 capnp_value = enumerant.getOrdinal(); + auto it = ch_name_to_value.find(to_lower ? boost::algorithm::to_lower_copy(capnp_name) : capnp_name); + if (it == ch_name_to_value.end()) + throw Exception( + ErrorCodes::CAPN_PROTO_BAD_CAST, + "The set of names in Enum from CapnProto schema is different from the set of names in ClickHouse Enum"); + + ch_to_capnp_values[it->second] = capnp_value; + capnp_to_ch_values[capnp_value] = it->second; + } } } - std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - const auto * enum_data_type = assert_cast *>(data_type.get()); + auto & builder_impl = parent_struct_builder.getBuilderImpl(); EnumType enum_value = assert_cast &>(*column).getElement(row_num); + UInt16 capnp_value; if (enum_comparing_mode == FormatSettings::CapnProtoEnumComparingMode::BY_VALUES) - return capnp::DynamicValue::Reader(capnp::DynamicEnum(enum_schema, enum_value)); + capnp_value = static_cast(enum_value); + else + capnp_value = ch_to_capnp_values[enum_value]; - auto enum_name = enum_data_type->getNameForValue(enum_value); - for (const auto enumerant : enum_schema.getEnumerants()) - { - if (compareEnumNames(String(enum_name), enumerant.getProto().getName(), enum_comparing_mode)) - return capnp::DynamicValue::Reader(capnp::DynamicEnum(enumerant)); - } - - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot convert ClickHouse Enum value to CapnProto Enum"); + builder_impl.setDataField(slot_offset, capnp_value); } - void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override { - auto enum_value = reader.as(); - auto enumerant = *kj::_::readMaybe(enum_value.getEnumerant()); - auto enum_type = assert_cast *>(data_type.get()); - DataTypePtr nested_type = std::make_shared>(); - switch (enum_comparing_mode) - { - case FormatSettings::CapnProtoEnumComparingMode::BY_VALUES: - { - assert_cast &>(column).insertValue(static_cast(enumerant.getOrdinal())); - return; - } - case FormatSettings::CapnProtoEnumComparingMode::BY_NAMES: - { - auto value = enum_type->getValue(String(enumerant.getProto().getName())); - assert_cast &>(column).insertValue(value); - return; - } - case FormatSettings::CapnProtoEnumComparingMode::BY_NAMES_CASE_INSENSITIVE: - { - /// Find the same enum name case insensitive. - String enum_name = enumerant.getProto().getName(); - for (auto & name : enum_type->getAllRegisteredNames()) - { - if (compareEnumNames(name, enum_name, enum_comparing_mode)) - { - assert_cast &>(column).insertValue(enum_type->getValue(name)); - break; - } - } - return; - } - } + auto & builder_impl = parent_list_builder.getBuilderImpl(); + EnumType enum_value = assert_cast &>(*column).getElement(row_num); + UInt16 capnp_value; + if (enum_comparing_mode == FormatSettings::CapnProtoEnumComparingMode::BY_VALUES) + capnp_value = static_cast(enum_value); + else + capnp_value = ch_to_capnp_values[enum_value]; + + builder_impl.setDataElement(array_index, capnp_value); + } + + void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override + { + const auto & reader_impl = parent_struct_reader.getReaderImpl(); + UInt16 capnp_value = reader_impl.getDataField(slot_offset); + EnumType value; + if (enum_comparing_mode == FormatSettings::CapnProtoEnumComparingMode::BY_VALUES) + value = static_cast(capnp_value); + else + value = capnp_to_ch_values[capnp_value]; + + assert_cast &>(column).insertValue(value); + } + + void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override + { + const auto & reader_impl = parent_list_reader.getReaderImpl(); + UInt16 capnp_value = reader_impl.getDataElement(array_index); + EnumType value; + if (enum_comparing_mode == FormatSettings::CapnProtoEnumComparingMode::BY_VALUES) + value = static_cast(capnp_value); + else + value = capnp_to_ch_values[capnp_value]; + + assert_cast &>(column).insertValue(value); } private: - bool compareEnumNames(const String & first, const String & second, const FormatSettings::CapnProtoEnumComparingMode mode) - { - if (mode == FormatSettings::CapnProtoEnumComparingMode::BY_NAMES_CASE_INSENSITIVE) - return boost::algorithm::to_lower_copy(first) == boost::algorithm::to_lower_copy(second); - return first == second; - } - DataTypePtr data_type; capnp::EnumSchema enum_schema; const FormatSettings::CapnProtoEnumComparingMode enum_comparing_mode; + std::unordered_map ch_to_capnp_values; + std::unordered_map capnp_to_ch_values; }; class CapnProtoDateSerializer : public ICapnProtoSerializer @@ -358,14 +365,32 @@ namespace throwCannotConvert(data_type, column_name, capnp_type); } - std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - return capnp::DynamicValue::Reader(column->getUInt(row_num)); + auto & builder_impl = parent_struct_builder.getBuilderImpl(); + UInt16 value = assert_cast(*column).getElement(row_num); + builder_impl.setDataField(slot_offset, value); + } + + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override + { + auto & builder_impl = parent_list_builder.getBuilderImpl(); + UInt16 value = assert_cast(*column).getElement(row_num); + builder_impl.setDataElement(array_index, value); } - void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override { - assert_cast(column).insertValue(reader.as()); + const auto & reader_impl = parent_struct_reader.getReaderImpl(); + UInt16 value = reader_impl.getDataField(slot_offset); + assert_cast(column).insertValue(value); + } + + void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override + { + const auto & reader_impl = parent_list_reader.getReaderImpl(); + UInt16 value = reader_impl.getDataElement(array_index); + assert_cast(column).insertValue(value); } }; @@ -378,14 +403,32 @@ namespace throwCannotConvert(data_type, column_name, capnp_type); } - std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - return capnp::DynamicValue::Reader(column->getInt(row_num)); + auto & builder_impl = parent_struct_builder.getBuilderImpl(); + Int32 value = assert_cast(*column).getElement(row_num); + builder_impl.setDataField(slot_offset, value); } - void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override { - assert_cast(column).insertValue(reader.as()); + auto & builder_impl = parent_list_builder.getBuilderImpl(); + Int32 value = assert_cast(*column).getElement(row_num); + builder_impl.setDataElement(array_index, value); + } + + void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override + { + const auto & reader_impl = parent_struct_reader.getReaderImpl(); + Int32 value = reader_impl.getDataField(slot_offset); + assert_cast(column).insertValue(value); + } + + void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override + { + const auto & reader_impl = parent_list_reader.getReaderImpl(); + Int32 value = reader_impl.getDataElement(array_index); + assert_cast(column).insertValue(value); } }; @@ -398,14 +441,32 @@ namespace throwCannotConvert(data_type, column_name, capnp_type); } - std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - return capnp::DynamicValue::Reader(column->getInt(row_num)); + auto & builder_impl = parent_struct_builder.getBuilderImpl(); + UInt32 value = assert_cast(*column).getElement(row_num); + builder_impl.setDataField(slot_offset, value); } - void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override { - assert_cast(column).insertValue(reader.as()); + auto & builder_impl = parent_list_builder.getBuilderImpl(); + UInt32 value = assert_cast(*column).getElement(row_num); + builder_impl.setDataElement(array_index, value); + } + + void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override + { + const auto & reader_impl = parent_struct_reader.getReaderImpl(); + UInt32 value = reader_impl.getDataField(slot_offset); + assert_cast(column).insertValue(value); + } + + void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override + { + const auto & reader_impl = parent_list_reader.getReaderImpl(); + UInt32 value = reader_impl.getDataElement(array_index); + assert_cast(column).insertValue(value); } }; @@ -418,14 +479,32 @@ namespace throwCannotConvert(data_type, column_name, capnp_type); } - std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - return capnp::DynamicValue::Reader(column->getInt(row_num)); + auto & builder_impl = parent_struct_builder.getBuilderImpl(); + Int64 value = assert_cast(*column).getElement(row_num); + builder_impl.setDataField(slot_offset, value); } - void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override { - assert_cast(column).insertValue(reader.as()); + auto & builder_impl = parent_list_builder.getBuilderImpl(); + Int64 value = assert_cast(*column).getElement(row_num); + builder_impl.setDataElement(array_index, value); + } + + void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override + { + const auto & reader_impl = parent_struct_reader.getReaderImpl(); + Int64 value = reader_impl.getDataField(slot_offset); + assert_cast(column).insertValue(value); + } + + void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override + { + const auto & reader_impl = parent_list_reader.getReaderImpl(); + Int64 value = reader_impl.getDataElement(array_index); + assert_cast(column).insertValue(value); } }; @@ -433,6 +512,8 @@ namespace class CapnProtoDecimalSerializer : public ICapnProtoSerializer { public: + using NativeType = typename DecimalType::NativeType; + CapnProtoDecimalSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) { auto which = WhichDataType(data_type); @@ -440,37 +521,79 @@ namespace throwCannotConvert(data_type, column_name, capnp_type); } - std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - return capnp::DynamicValue::Reader(column->getInt(row_num)); + auto & builder_impl = parent_struct_builder.getBuilderImpl(); + DecimalType value = assert_cast &>(*column).getElement(row_num); + builder_impl.setDataField(slot_offset, value); } - void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override { - assert_cast &>(column).insertValue(reader.as()); + auto & builder_impl = parent_list_builder.getBuilderImpl(); + DecimalType value = assert_cast &>(*column).getElement(row_num); + builder_impl.setDataElement(array_index, value); + } + + void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override + { + const auto & reader_impl = parent_struct_reader.getReaderImpl(); + NativeType value = reader_impl.getDataField(slot_offset); + assert_cast &>(column).insertValue(value); + } + + void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override + { + const auto & reader_impl = parent_list_reader.getReaderImpl(); + NativeType value = reader_impl.getDataElement(array_index); + assert_cast &>(column).insertValue(value); } }; - template - class CapnProtoBigDecimalSerializer : public ICapnProtoSerializer + template + class CapnProtoFixedSizeRawDataSerializer : public ICapnProtoSerializer { + private: + static constexpr size_t value_size = sizeof(T); + public: - CapnProtoBigDecimalSerializer(const DataTypePtr & data_type_, const String & column_name, const capnp::Type & capnp_type) : data_type(data_type_) + CapnProtoFixedSizeRawDataSerializer(const DataTypePtr & data_type_, const String & column_name, const capnp::Type & capnp_type) : data_type(data_type_) { if (!capnp_type.isData()) throwCannotConvert(data_type, column_name, capnp_type); } - std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { + auto & builder_impl = parent_struct_builder.getBuilderImpl(); auto data = column->getDataAt(row_num); - return capnp::DynamicValue::Reader(capnp::Data::Reader(reinterpret_cast(data.data), data.size)); + capnp::Data::Reader value = capnp::Data::Reader(reinterpret_cast(data.data), data.size); + builder_impl.getPointerField(slot_offset).template setBlob(value); } - void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_struct_builder, UInt32 array_index, size_t row_num) override { - auto value = reader.as(); - if (value.size() != sizeof(DecimalType)) + auto & builder_impl = parent_struct_builder.getBuilderImpl(); + auto data = column->getDataAt(row_num); + capnp::Data::Reader value = capnp::Data::Reader(reinterpret_cast(data.data), data.size); + builder_impl.getPointerElement(array_index).setBlob(value); + } + + void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override + { + const auto & reader_impl = parent_struct_reader.getReaderImpl(); + capnp::Data::Reader value = reader_impl.getPointerField(slot_offset).template getBlob(nullptr, 0); + if (value.size() != value_size) + throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected size of {} value: {}", data_type->getName(), value.size()); + + column.insertData(reinterpret_cast(value.begin()), value.size()); + } + + void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override + { + const auto & reader_impl = parent_list_reader.getReaderImpl(); + capnp::Data::Reader value = reader_impl.getPointerElement(array_index).getBlob(nullptr, 0); + if (value.size() != value_size) throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected size of {} value: {}", data_type->getName(), value.size()); column.insertData(reinterpret_cast(value.begin()), value.size()); @@ -484,39 +607,73 @@ namespace class CapnProtoStringSerializer : public ICapnProtoSerializer { public: - CapnProtoStringSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type_) : capnp_type(capnp_type_) + CapnProtoStringSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) { if (!capnp_type.isData() && !capnp_type.isText()) throwCannotConvert(data_type, column_name, capnp_type); } - std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { + auto & builder_impl = parent_struct_builder.getBuilderImpl(); auto data = column->getDataAt(row_num); - - if constexpr (is_binary) - return capnp::DynamicValue::Reader(capnp::Data::Reader(reinterpret_cast(data.data), data.size)); - - /// For type TEXT data must be null-terminated, but in String column we always have 0 byte at the end of each value. - return capnp::DynamicValue::Reader(capnp::Text::Reader(data.data, data.size)); - } - - void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override - { if constexpr (is_binary) { - auto value = reader.as(); + capnp::Data::Reader value = capnp::Data::Reader(reinterpret_cast(data.data), data.size); + builder_impl.getPointerField(slot_offset).setBlob(value); + } + else + { + capnp::Text::Reader value = capnp::Text::Reader(data.data, data.size); + builder_impl.getPointerField(slot_offset).setBlob(value); + } + } + + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_struct_builder, UInt32 array_index, size_t row_num) override + { + auto & builder_impl = parent_struct_builder.getBuilderImpl(); + auto data = column->getDataAt(row_num); + if constexpr (is_binary) + { + capnp::Data::Reader value = capnp::Data::Reader(reinterpret_cast(data.data), data.size); + builder_impl.getPointerElement(array_index).setBlob(value); + } + else + { + capnp::Text::Reader value = capnp::Text::Reader(data.data, data.size); + builder_impl.getPointerElement(array_index).setBlob(value); + } + } + + void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override + { + const auto & reader_impl = parent_struct_reader.getReaderImpl(); + if constexpr (is_binary) + { + capnp::Data::Reader value = reader_impl.getPointerField(slot_offset).getBlob(nullptr, 0); column.insertData(reinterpret_cast(value.begin()), value.size()); } else { - auto value = reader.as(); + capnp::Text::Reader value = reader_impl.getPointerField(slot_offset).getBlob(nullptr, 0); column.insertData(reinterpret_cast(value.begin()), value.size()); } } - private: - capnp::Type capnp_type; + void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override + { + const auto & reader_impl = parent_list_reader.getReaderImpl(); + if constexpr (is_binary) + { + capnp::Data::Reader value = reader_impl.getPointerElement(array_index).getBlob(nullptr, 0); + column.insertData(reinterpret_cast(value.begin()), value.size()); + } + else + { + capnp::Text::Reader value = reader_impl.getPointerElement(array_index).getBlob(nullptr, 0); + column.insertData(reinterpret_cast(value.begin()), value.size()); + } + } }; template @@ -529,29 +686,71 @@ namespace throwCannotConvert(data_type, column_name, capnp_type); } - std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { + auto & builder_impl = parent_struct_builder.getBuilderImpl(); auto data = column->getDataAt(row_num); if constexpr (is_binary) - return capnp::DynamicValue::Reader(capnp::Data::Reader(reinterpret_cast(data.data), data.size)); - - if (data.data[data.size - 1] == 0) - return capnp::DynamicValue::Reader(capnp::Text::Reader(reinterpret_cast(data.data), data.size)); - - /// In TEXT type data should be null-terminated, but ClickHouse FixedString data could not be. - /// To make data null-terminated we should copy it to temporary String object and use it in capnp::Text::Reader. - /// Note that capnp::Text::Reader works only with pointer to the data and it's size, so we should - /// guarantee that new String object life time is longer than capnp::Text::Reader life time. - tmp_string = data.toString(); - return capnp::DynamicValue::Reader(capnp::Text::Reader(reinterpret_cast(tmp_string.data()), tmp_string.size())); + { + capnp::Data::Reader value = capnp::Data::Reader(reinterpret_cast(data.data), data.size); + builder_impl.getPointerField(slot_offset).setBlob(value); + } + else + { + if (data.data[data.size - 1] == 0) + { + capnp::Text::Reader value = capnp::Text::Reader(data.data, data.size); + builder_impl.getPointerField(slot_offset).setBlob(value); + } + else + { + /// In TEXT type data should be null-terminated, but ClickHouse FixedString data could not be. + /// To make data null-terminated we should copy it to temporary String object and use it in capnp::Text::Reader. + /// Note that capnp::Text::Reader works only with pointer to the data and it's size, so we should + /// guarantee that new String object life time is longer than capnp::Text::Reader life time. + tmp_string = data.toString(); + capnp::Text::Reader value = capnp::Text::Reader(tmp_string.data(), tmp_string.size()); + builder_impl.getPointerField(slot_offset).setBlob(value); + } + } } - void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_struct_builder, UInt32 array_index, size_t row_num) override { + auto & builder_impl = parent_struct_builder.getBuilderImpl(); + auto data = column->getDataAt(row_num); + if constexpr (is_binary) + { + capnp::Data::Reader value = capnp::Data::Reader(reinterpret_cast(data.data), data.size); + builder_impl.getPointerElement(array_index).setBlob(value); + } + else + { + if (data.data[data.size - 1] == 0) + { + capnp::Text::Reader value = capnp::Text::Reader(data.data, data.size); + builder_impl.getPointerElement(array_index).setBlob(value); + } + else + { + /// In TEXT type data should be null-terminated, but ClickHouse FixedString data could not be. + /// To make data null-terminated we should copy it to temporary String object and use it in capnp::Text::Reader. + /// Note that capnp::Text::Reader works only with pointer to the data and it's size, so we should + /// guarantee that new String object life time is longer than capnp::Text::Reader life time. + tmp_string = data.toString(); + capnp::Text::Reader value = capnp::Text::Reader(tmp_string.data(), tmp_string.size()); + builder_impl.getPointerElement(array_index).setBlob(value); + } + } + } + + void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override + { + const auto & reader_impl = parent_struct_reader.getReaderImpl(); auto & fixed_string_column = assert_cast(column); if constexpr (is_binary) { - auto value = reader.as(); + capnp::Data::Reader value = reader_impl.getPointerField(slot_offset).getBlob(nullptr, 0); if (value.size() > fixed_string_column.getN()) throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot read data with size {} to FixedString with size {}", value.size(), fixed_string_column.getN()); @@ -559,7 +758,29 @@ namespace } else { - auto value = reader.as(); + capnp::Text::Reader value = reader_impl.getPointerField(slot_offset).getBlob(nullptr, 0); + if (value.size() > fixed_string_column.getN()) + throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot read data with size {} to FixedString with size {}", value.size(), fixed_string_column.getN()); + + fixed_string_column.insertData(reinterpret_cast(value.begin()), value.size()); + } + } + + void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override + { + const auto & reader_impl = parent_list_reader.getReaderImpl(); + auto & fixed_string_column = assert_cast(column); + if constexpr (is_binary) + { + capnp::Data::Reader value = reader_impl.getPointerElement(array_index).getBlob(nullptr, 0); + if (value.size() > fixed_string_column.getN()) + throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot read data with size {} to FixedString with size {}", value.size(), fixed_string_column.getN()); + + fixed_string_column.insertData(reinterpret_cast(value.begin()), value.size()); + } + else + { + capnp::Text::Reader value = reader_impl.getPointerElement(array_index).getBlob(nullptr, 0); if (value.size() > fixed_string_column.getN()) throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot read data with size {} to FixedString with size {}", value.size(), fixed_string_column.getN()); @@ -581,64 +802,32 @@ namespace throwCannotConvert(data_type, column_name, capnp_type); } - std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - return capnp::DynamicValue::Reader(assert_cast(*column).getElement(row_num).toUnderType()); + auto & builder_impl = parent_struct_builder.getBuilderImpl(); + UInt32 value = assert_cast(*column).getElement(row_num); + builder_impl.setDataField(slot_offset, value); } - void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override { - assert_cast(column).insertValue(IPv4(reader.as())); - } - }; - - class CapnProtoIPv6Serializer : public ICapnProtoSerializer - { - public: - CapnProtoIPv6Serializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) - { - if (!capnp_type.isData()) - throwCannotConvert(data_type, column_name, capnp_type); + auto & builder_impl = parent_list_builder.getBuilderImpl(); + UInt32 value = assert_cast(*column).getElement(row_num); + builder_impl.setDataElement(array_index, value); } - std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override + void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override { - auto data = column->getDataAt(row_num); - return capnp::DynamicValue::Reader(capnp::Data::Reader(reinterpret_cast(data.data), data.size)); + const auto & reader_impl = parent_struct_reader.getReaderImpl(); + UInt32 value = reader_impl.getDataField(slot_offset); + assert_cast(column).insertValue(IPv4(value)); } - void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override { - auto value = reader.as(); - if (value.size() != sizeof(IPv6)) - throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected size of IPv6 value: {}", value.size()); - - column.insertData(reinterpret_cast(value.begin()), value.size()); - } - }; - - class CapnProtoUUIDSerializer : public ICapnProtoSerializer - { - public: - CapnProtoUUIDSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) - { - if (!capnp_type.isData()) - throwCannotConvert(data_type, column_name, capnp_type); - } - - std::optional writeRow(const ColumnPtr & column, FieldBuilder *, size_t row_num) override - { - auto data = column->getDataAt(row_num); - return capnp::DynamicValue::Reader(capnp::Data::Reader(reinterpret_cast(data.data), data.size)); - } - - void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override - { - auto value = reader.as(); - if (value.size() != sizeof(UUID)) - throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected size of UUID value: {}", value.size()); - - column.insertData(reinterpret_cast(value.begin()), value.size()); + const auto & reader_impl = parent_list_reader.getReaderImpl(); + UInt32 value = reader_impl.getDataElement(array_index); + assert_cast(column).insertValue(IPv4(value)); } }; @@ -652,19 +841,35 @@ namespace nested_serializer = createSerializer(assert_cast(*data_type).getDictionaryType(), column_name, capnp_type, settings); } - std::optional writeRow(const ColumnPtr & column, FieldBuilder * field_builder, size_t row_num) override + void writeRow(const ColumnPtr & column, std::unique_ptr & field_builder, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { const auto & low_cardinality_column = assert_cast(*column); size_t index = low_cardinality_column.getIndexAt(row_num); const auto & dict_column = low_cardinality_column.getDictionary().getNestedColumn(); - return nested_serializer->writeRow(dict_column, field_builder, index); + nested_serializer->writeRow(dict_column, field_builder, parent_struct_builder, slot_offset, index); } - void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + void writeRow(const ColumnPtr & column, std::unique_ptr & field_builder, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override + { + const auto & low_cardinality_column = assert_cast(*column); + size_t index = low_cardinality_column.getIndexAt(row_num); + const auto & dict_column = low_cardinality_column.getDictionary().getNestedColumn(); + nested_serializer->writeRow(dict_column, field_builder, parent_list_builder, array_index, index); + } + + void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override { auto & low_cardinality_column = assert_cast(column); auto tmp_column = low_cardinality_column.getDictionary().getNestedColumn()->cloneEmpty(); - nested_serializer->readRow(*tmp_column, reader); + nested_serializer->readRow(*tmp_column, parent_struct_reader, slot_offset); + low_cardinality_column.insertFromFullColumn(*tmp_column, 0); + } + + void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override + { + auto & low_cardinality_column = assert_cast(column); + auto tmp_column = low_cardinality_column.getDictionary().getNestedColumn()->cloneEmpty(); + nested_serializer->readRow(*tmp_column, parent_list_reader, array_index); low_cardinality_column.insertFromFullColumn(*tmp_column, 0); } @@ -685,7 +890,10 @@ namespace getCapnProtoFullTypeName(capnp_type)); /// Check that struct is a named union of type VOID and one arbitrary type. - auto struct_schema = capnp_type.asStruct(); + struct_schema = capnp_type.asStruct(); + auto node = struct_schema.getProto().getStruct(); + struct_size = capnp::_::StructSize(node.getDataWordCount(), node.getPointerCount()); + discriminant_offset = node.getDiscriminantOffset(); if (!checkIfStructIsNamedUnion(struct_schema)) throw Exception( ErrorCodes::CAPN_PROTO_BAD_CAST, @@ -706,23 +914,18 @@ namespace auto first = union_fields[0]; auto second = union_fields[1]; auto nested_type = assert_cast(data_type.get())->getNestedType(); + nested_slot_offset = first.getProto().getSlot().getOffset(); /// Both fields have the same offset. if (first.getType().isVoid()) { - null_field = first; - nested_field = second; - nested_capnp_type = second.getType(); - if (nested_capnp_type.isStruct()) - nested_fields_size = nested_capnp_type.asStruct().getFields().size(); - nested_serializer = createSerializer(nested_type, column_name, nested_capnp_type, settings); + nested_serializer = createSerializer(nested_type, column_name, second.getType(), settings); + null_discriminant = 0; + nested_discriminant = 1; } else if (second.getType().isVoid()) { - null_field = second; - nested_field = first; - nested_capnp_type = first.getType(); - if (nested_capnp_type.isStruct()) - nested_fields_size = nested_capnp_type.asStruct().getFields().size(); - nested_serializer = createSerializer(nested_type, column_name, nested_capnp_type, settings); + nested_serializer = createSerializer(nested_type, column_name, first.getType(), settings); + null_discriminant = 1; + nested_discriminant = 0; } else throw Exception( @@ -733,50 +936,102 @@ namespace getCapnProtoFullTypeName(capnp_type)); } - std::optional writeRow(const ColumnPtr & column, FieldBuilder * field_builder, size_t row_num) override + void writeRow(const ColumnPtr & column, std::unique_ptr & field_builder, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - assert(field_builder); + if (!field_builder) + { + auto builder_impl = parent_struct_builder.getBuilderImpl(); + auto struct_builder_impl = capnp::DynamicStruct::Builder(struct_schema, builder_impl.getPointerField(slot_offset).initStruct(struct_size)); + field_builder = std::make_unique(std::move(struct_builder_impl), 1); + } + auto & struct_builder = assert_cast(*field_builder); + const auto & nullable_column = assert_cast(*column); if (nullable_column.isNullAt(row_num)) { - struct_builder.impl.set(null_field, capnp::Void()); + auto struct_builder_impl = struct_builder.impl.getBuilderImpl(); + struct_builder_impl.setDataField(discriminant_offset, null_discriminant); + struct_builder_impl.setDataField(nested_slot_offset, capnp::Void()); } else { - struct_builder.impl.clear(nested_field); const auto & nested_column = nullable_column.getNestedColumnPtr(); - auto nested_field_builder = initStructFieldBuilderIfNeeded(nested_column, row_num, struct_builder.impl, nested_field, nested_capnp_type, nested_fields_size); - auto value = nested_serializer->writeRow(nested_column, nested_field_builder.get(), row_num); - if (value) - struct_builder.impl.set(nested_field, *value); + struct_builder.impl.getBuilderImpl().setDataField(discriminant_offset, nested_discriminant); + nested_serializer->writeRow(nested_column, struct_builder.field_builders[0], struct_builder.impl, nested_slot_offset, row_num); } - - return std::nullopt; } - void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + void writeRow(const ColumnPtr & column, std::unique_ptr & field_builder, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override + { + if (!field_builder) + { + auto builder_impl = parent_list_builder.getBuilderImpl(); + auto struct_builder_impl = capnp::DynamicStruct::Builder(struct_schema, builder_impl.getStructElement(array_index)); + field_builder = std::make_unique(std::move(struct_builder_impl), 1); + } + + auto & struct_builder = assert_cast(*field_builder); + + const auto & nullable_column = assert_cast(*column); + if (nullable_column.isNullAt(row_num)) + { + auto struct_builder_impl = struct_builder.impl.getBuilderImpl(); + struct_builder_impl.setDataField(discriminant_offset, null_discriminant); + struct_builder_impl.setDataField(nested_slot_offset, capnp::Void()); + } + else + { + const auto & nested_column = nullable_column.getNestedColumnPtr(); + struct_builder.impl.getBuilderImpl().setDataField(discriminant_offset, nested_discriminant); + nested_serializer->writeRow(nested_column, struct_builder.field_builders[0], struct_builder.impl, nested_slot_offset, row_num); + } + } + + void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override { - auto struct_reader = reader.as(); auto & nullable_column = assert_cast(column); - auto field = *kj::_::readMaybe(struct_reader.which()); - if (field.getType().isVoid()) + auto reader_impl = parent_struct_reader.getReaderImpl(); + auto struct_reader = capnp::DynamicStruct::Reader(struct_schema, reader_impl.getPointerField(slot_offset).getStruct(nullptr)); + + auto discriminant = struct_reader.getReaderImpl().getDataField(discriminant_offset); + + if (discriminant == null_discriminant) nullable_column.insertDefault(); else { auto & nested_column = nullable_column.getNestedColumn(); - auto nested_reader = struct_reader.get(field); - nested_serializer->readRow(nested_column, nested_reader); + nested_serializer->readRow(nested_column, struct_reader, nested_slot_offset); + nullable_column.getNullMapData().push_back(0); + } + } + + void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override + { + auto & nullable_column = assert_cast(column); + auto reader_impl = parent_list_reader.getReaderImpl(); + auto struct_reader = capnp::DynamicStruct::Reader(struct_schema, reader_impl.getStructElement(array_index)); + + auto discriminant = struct_reader.getReaderImpl().getDataField(discriminant_offset); + + if (discriminant == null_discriminant) + nullable_column.insertDefault(); + else + { + auto & nested_column = nullable_column.getNestedColumn(); + nested_serializer->readRow(nested_column, struct_reader, nested_slot_offset); nullable_column.getNullMapData().push_back(0); } } private: std::unique_ptr nested_serializer; - capnp::StructSchema::Field null_field; - capnp::StructSchema::Field nested_field; - size_t nested_fields_size = 0; - capnp::Type nested_capnp_type; + capnp::StructSchema struct_schema; + capnp::_::StructSize struct_size; + UInt32 discriminant_offset; + UInt16 null_discriminant; + UInt16 nested_discriminant; + UInt32 nested_slot_offset; }; class CapnProtoArraySerializer : public ICapnProtoSerializer @@ -788,67 +1043,102 @@ namespace throwCannotConvert(data_type, column_name, capnp_type); auto nested_type = assert_cast(data_type.get())->getNestedType(); - element_type = capnp_type.asList().getElementType(); + list_schema = capnp_type.asList(); + auto element_type = list_schema.getElementType(); + element_size = capnp::elementSizeFor(element_type.which()); if (element_type.isStruct()) - element_struct_fields = element_type.asStruct().getFields().size(); + { + element_is_struct = true; + auto node = element_type.asStruct().getProto().getStruct(); + element_struct_size = capnp::_::StructSize(node.getDataWordCount(), node.getPointerCount()); + } + nested_serializer = createSerializer(nested_type, column_name, capnp_type.asList().getElementType(), settings); } - std::optional writeRow(const ColumnPtr & column, FieldBuilder * field_builder, size_t row_num) override + void writeRow(const ColumnPtr & column, std::unique_ptr & field_builder, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - assert(field_builder); - auto & list_builder = assert_cast(*field_builder); const auto * array_column = assert_cast(column.get()); const auto & nested_column = array_column->getDataPtr(); const auto & offsets = array_column->getOffsets(); auto offset = offsets[row_num - 1]; - size_t size = offsets[row_num] - offset; - bool need_nested_builders = list_builder.nested_builders.empty(); - for (unsigned i = 0; i != static_cast(size); ++i) - { - if (need_nested_builders) - { - /// For nested lists we need to initialize nested list builder. - if (element_type.isList()) - { - const auto & nested_offset = checkAndGetColumn(*nested_column)->getOffsets(); - size_t nested_array_size = nested_offset[offset + i] - nested_offset[offset + i - 1]; - list_builder.nested_builders.emplace_back(std::make_unique(list_builder.impl.init(i, static_cast(nested_array_size)))); - } - else if (element_type.isStruct()) - { - list_builder.nested_builders.emplace_back(std::make_unique(list_builder.impl[i], element_struct_fields)); - } - else - { - list_builder.nested_builders.emplace_back(); - } - } + UInt32 size = static_cast(offsets[row_num] - offset); - auto value = nested_serializer->writeRow(nested_column, list_builder.nested_builders[i].get(), offset + i); - if (value) - list_builder.impl.set(i, *value); + if (!field_builder) + { + auto builder_impl = parent_struct_builder.getBuilderImpl(); + capnp::DynamicList::Builder list_builder_impl; + if (element_is_struct) + list_builder_impl = capnp::DynamicList::Builder(list_schema, builder_impl.getPointerField(slot_offset).initStructList(size, element_struct_size)); + else + list_builder_impl = capnp::DynamicList::Builder(list_schema, builder_impl.getPointerField(slot_offset).initList(element_size, size)); + field_builder = std::make_unique(std::move(list_builder_impl), size); } - return std::nullopt; + auto & list_builder = assert_cast(*field_builder); + for (UInt32 i = 0; i != size; ++i) + nested_serializer->writeRow(nested_column, list_builder.nested_builders[i], list_builder.impl, i, offset + i); } - void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + void writeRow(const ColumnPtr & column, std::unique_ptr & field_builder, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override { - auto list_reader = reader.as(); + const auto * array_column = assert_cast(column.get()); + const auto & nested_column = array_column->getDataPtr(); + const auto & offsets = array_column->getOffsets(); + auto offset = offsets[row_num - 1]; + UInt32 size = static_cast(offsets[row_num] - offset); + + if (!field_builder) + { + auto builder_impl = parent_list_builder.getBuilderImpl(); + capnp::DynamicList::Builder list_builder_impl; + if (element_is_struct) + list_builder_impl = capnp::DynamicList::Builder(list_schema, builder_impl.getPointerElement(array_index).initStructList(size, element_struct_size)); + else + list_builder_impl = capnp::DynamicList::Builder(list_schema, builder_impl.getPointerElement(array_index).initList(element_size, size)); + field_builder = std::make_unique(std::move(list_builder_impl), size); + } + + auto & list_builder = assert_cast(*field_builder); + for (UInt32 i = 0; i != size; ++i) + nested_serializer->writeRow(nested_column, list_builder.nested_builders[i], list_builder.impl, i, offset + i); + } + + void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override + { + const auto & reader_impl = parent_struct_reader.getReaderImpl(); + auto list_reader = capnp::DynamicList::Reader(list_schema, reader_impl.getPointerField(slot_offset).getList(element_size, nullptr)); + UInt32 size = list_reader.size(); auto & column_array = assert_cast(column); auto & offsets = column_array.getOffsets(); offsets.push_back(offsets.back() + list_reader.size()); auto & nested_column = column_array.getData(); - for (const auto & nested_reader : list_reader) - nested_serializer->readRow(nested_column, nested_reader); + for (UInt32 i = 0; i != size; ++i) + nested_serializer->readRow(nested_column, list_reader, i); + } + + void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override + { + const auto & reader_impl = parent_list_reader.getReaderImpl(); + auto list_reader = capnp::DynamicList::Reader(list_schema, reader_impl.getPointerElement(array_index).getList(element_size, nullptr)); + UInt32 size = list_reader.size(); + auto & column_array = assert_cast(column); + auto & offsets = column_array.getOffsets(); + offsets.push_back(offsets.back() + list_reader.size()); + + auto & nested_column = column_array.getData(); + for (UInt32 i = 0; i != size; ++i) + nested_serializer->readRow(nested_column, list_reader, i); } private: + capnp::ListSchema list_schema; std::unique_ptr nested_serializer; - capnp::Type element_type; - size_t element_struct_fields; + capnp::ElementSize element_size; + capnp::_::StructSize element_struct_size; + bool element_is_struct = false; + }; class CapnProtoMapSerializer : public ICapnProtoSerializer @@ -869,7 +1159,9 @@ namespace if (!capnp_type.isStruct()) throwCannotConvert(data_type, column_name, capnp_type); - auto struct_schema = capnp_type.asStruct(); + struct_schema = capnp_type.asStruct(); + auto node = struct_schema.getProto().getStruct(); + struct_size = capnp::_::StructSize(node.getDataWordCount(), node.getPointerCount()); if (checkIfStructContainsUnnamedUnion(struct_schema)) throw Exception( @@ -921,43 +1213,70 @@ namespace DataTypes types = {map_type.getKeyType(), map_type.getValueType()}; Names names = {"key", "value"}; auto entries_type = std::make_shared(std::make_shared(types, names)); - entries_field = struct_schema.getFields()[0]; - entries_capnp_type = entries_field.getType(); nested_serializer = createSerializer(entries_type, column_name, field_type, settings); + entries_slot_offset = struct_schema.getFields()[0].getProto().getSlot().getOffset(); } - std::optional writeRow(const ColumnPtr & column, FieldBuilder * field_builder, size_t row_num) override + void writeRow(const ColumnPtr & column, std::unique_ptr & field_builder, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - assert(field_builder); + if (!field_builder) + { + auto builder_impl = parent_struct_builder.getBuilderImpl(); + auto struct_builder_impl = capnp::DynamicStruct::Builder(struct_schema, builder_impl.getPointerField(slot_offset).initStruct(struct_size)); + field_builder = std::make_unique(std::move(struct_builder_impl), 1); + } + auto & struct_builder = assert_cast(*field_builder); const auto & entries_column = assert_cast(column.get())->getNestedColumnPtr(); - auto entries_builder = initStructFieldBuilderIfNeeded(entries_column, row_num, struct_builder.impl, entries_field, entries_capnp_type, 0); - nested_serializer->writeRow(entries_column, entries_builder.get(), row_num); - return std::nullopt; + nested_serializer->writeRow(entries_column, struct_builder.field_builders[0], struct_builder.impl, entries_slot_offset, row_num); } - void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + void writeRow(const ColumnPtr & column, std::unique_ptr & field_builder, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override { - auto struct_reader = reader.as(); + if (!field_builder) + { + auto builder_impl = parent_list_builder.getBuilderImpl(); + auto struct_builder_impl = capnp::DynamicStruct::Builder(struct_schema, builder_impl.getStructElement(array_index)); + field_builder = std::make_unique(std::move(struct_builder_impl), 1); + } + + auto & struct_builder = assert_cast(*field_builder); + const auto & entries_column = assert_cast(column.get())->getNestedColumnPtr(); + nested_serializer->writeRow(entries_column, struct_builder.field_builders[0], struct_builder.impl, entries_slot_offset, row_num); + } + + void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override + { + auto reader_impl = parent_struct_reader.getReaderImpl(); + auto struct_reader = capnp::DynamicStruct::Reader(struct_schema, reader_impl.getPointerField(slot_offset).getStruct(nullptr)); auto & entries_column = assert_cast(column).getNestedColumn(); - nested_serializer->readRow(entries_column, struct_reader.get(entries_field)); + nested_serializer->readRow(entries_column, struct_reader, entries_slot_offset); + } + + void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override + { + auto reader_impl = parent_list_reader.getReaderImpl(); + auto struct_reader = capnp::DynamicStruct::Reader(struct_schema, reader_impl.getStructElement(array_index)); + auto & entries_column = assert_cast(column).getNestedColumn(); + nested_serializer->readRow(entries_column, struct_reader, entries_slot_offset); } private: std::unique_ptr nested_serializer; - capnp::StructSchema::Field entries_field; - capnp::Type entries_capnp_type; + capnp::StructSchema struct_schema; + capnp::_::StructSize struct_size; + UInt32 entries_slot_offset; }; class CapnProtoStructureSerializer : public ICapnProtoSerializer { public: - CapnProtoStructureSerializer(const DataTypes & data_types, const Names & names, const capnp::StructSchema & schema, const FormatSettings::CapnProto & settings) + CapnProtoStructureSerializer(const DataTypes & data_types, const Names & names, const capnp::StructSchema & schema, const FormatSettings::CapnProto & settings) : struct_schema(schema) { if (checkIfStructIsNamedUnion(schema) || checkIfStructContainsUnnamedUnion(schema)) throw Exception(ErrorCodes::CAPN_PROTO_BAD_CAST, "Root CapnProto Struct cannot be named union/struct with unnamed union"); - initialize(data_types, names, schema, settings); + initialize(data_types, names, settings); } CapnProtoStructureSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type, const FormatSettings::CapnProto & settings) @@ -965,7 +1284,7 @@ namespace if (!capnp_type.isStruct()) throwCannotConvert(data_type, column_name, capnp_type); - auto struct_schema = capnp_type.asStruct(); + struct_schema = capnp_type.asStruct(); if (checkIfStructIsNamedUnion(struct_schema) || checkIfStructContainsUnnamedUnion(struct_schema)) throw Exception( @@ -1002,7 +1321,7 @@ namespace try { - initialize(nested_types, nested_names, struct_schema, settings); + initialize(nested_types, nested_names, settings); } catch (Exception & e) { @@ -1011,77 +1330,118 @@ namespace } } - std::optional writeRow(const ColumnPtr & column, FieldBuilder * builder, size_t row_num) override + void writeRow(const ColumnPtr & column, std::unique_ptr & field_builder, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - assert(builder); - auto & struct_builder = assert_cast(*builder); + if (!field_builder) + { + auto builder_impl = parent_struct_builder.getBuilderImpl(); + auto struct_builder_impl = capnp::DynamicStruct::Builder(struct_schema, builder_impl.getPointerField(slot_offset).initStruct(struct_size)); + field_builder = std::make_unique(std::move(struct_builder_impl), fields_count); + } + + auto & struct_builder = assert_cast(*field_builder); if (const auto * tuple_column = typeid_cast(column.get())) - writeRow(tuple_column->getColumnsCopy(), struct_builder, row_num); + { + const auto & columns = tuple_column->getColumns(); + for (size_t i = 0; i != columns.size(); ++i) + fields_serializers[i]->writeRow(columns[i], struct_builder.field_builders[fields_indexes[i]], struct_builder.impl, fields_offsets[i], row_num); + } else - writeRow(Columns{column}, struct_builder, row_num); - return std::nullopt; + { + fields_serializers[0]->writeRow(column, struct_builder.field_builders[fields_indexes[0]], struct_builder.impl, fields_offsets[0], row_num); + } + } + + void writeRow(const ColumnPtr & column, std::unique_ptr & field_builder, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override + { + if (!field_builder) + { + auto builder_impl = parent_list_builder.getBuilderImpl(); + auto struct_builder_impl = capnp::DynamicStruct::Builder(struct_schema, builder_impl.getStructElement(array_index)); + field_builder = std::make_unique(std::move(struct_builder_impl), fields_count); + } + + auto & struct_builder = assert_cast(*field_builder); + if (const auto * tuple_column = typeid_cast(column.get())) + { + const auto & columns = tuple_column->getColumns(); + for (size_t i = 0; i != columns.size(); ++i) + fields_serializers[i]->writeRow(columns[i], struct_builder.field_builders[fields_indexes[i]], struct_builder.impl, fields_offsets[i], row_num); + } + else + { + fields_serializers[0]->writeRow(column, struct_builder.field_builders[fields_indexes[0]], struct_builder.impl, fields_offsets[0], row_num); + } } void writeRow(const Columns & columns, StructBuilder & struct_builder, size_t row_num) { for (size_t i = 0; i != columns.size(); ++i) - { - const auto & field = fields[i]; - size_t field_index = field.getIndex(); - if (likely(!struct_builder.field_builders[field_index])) - struct_builder.field_builders[field_index] = initStructFieldBuilderIfNeeded( - columns[i], row_num, struct_builder.impl, field, fields_types[i], nested_field_sizes[i]); - - auto value = field_serializers[i]->writeRow(columns[i], struct_builder.field_builders[field_index].get(), row_num); - if (value) - struct_builder.impl.set(field, *value); - } + fields_serializers[i]->writeRow(columns[i], struct_builder.field_builders[fields_indexes[i]], struct_builder.impl, fields_offsets[i], row_num); } - void readRow(IColumn & column, const capnp::DynamicValue::Reader & reader) override + void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override { - auto struct_reader = reader.as(); + auto reader_impl = parent_struct_reader.getReaderImpl(); + auto struct_reader = capnp::DynamicStruct::Reader(struct_schema, reader_impl.getPointerField(slot_offset).getStruct(nullptr)); if (auto * tuple_column = typeid_cast(&column)) { for (size_t i = 0; i != tuple_column->tupleSize(); ++i) - field_serializers[i]->readRow(tuple_column->getColumn(i), struct_reader.get(fields[i])); + fields_serializers[i]->readRow(tuple_column->getColumn(i), struct_reader, fields_offsets[i]); } else - field_serializers[0]->readRow(column, struct_reader.get(fields[0])); + fields_serializers[0]->readRow(column, struct_reader, fields_offsets[0]); + } + + void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override + { + auto reader_impl = parent_list_reader.getReaderImpl(); + auto struct_reader = capnp::DynamicStruct::Reader(struct_schema, reader_impl.getStructElement(array_index)); + if (auto * tuple_column = typeid_cast(&column)) + { + for (size_t i = 0; i != tuple_column->tupleSize(); ++i) + fields_serializers[i]->readRow(tuple_column->getColumn(i), struct_reader, fields_offsets[i]); + } + else + fields_serializers[0]->readRow(column, struct_reader, fields_offsets[0]); } void readRow(MutableColumns & columns, const capnp::DynamicStruct::Reader & reader) { for (size_t i = 0; i != columns.size(); ++i) - field_serializers[i]->readRow(*columns[i], reader.get(fields[i])); + fields_serializers[i]->readRow(*columns[i], reader, fields_offsets[i]); } private: - void initialize(const DataTypes & data_types, const Names & names, const capnp::StructSchema & schema, const FormatSettings::CapnProto & settings) + void initialize(const DataTypes & data_types, const Names & names, const FormatSettings::CapnProto & settings) { - field_serializers.reserve(data_types.size()); - fields.reserve(data_types.size()); - fields_types.reserve(data_types.size()); - nested_field_sizes.reserve(data_types.size()); + auto node = struct_schema.getProto().getStruct(); + struct_size = capnp::_::StructSize(node.getDataWordCount(), node.getPointerCount()); + fields_count = struct_schema.getFields().size(); + fields_serializers.reserve(data_types.size()); + fields_offsets.reserve(data_types.size()); + fields_indexes.reserve(data_types.size()); for (size_t i = 0; i != data_types.size(); ++i) { auto [field_name, _] = splitFieldName(names[i]); - auto field = findFieldByName(schema, field_name); + auto field = findFieldByName(struct_schema, field_name); if (!field) throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "Capnproto schema doesn't contain field with name {}", field_name); - fields.push_back(*field); auto capnp_type = field->getType(); - fields_types.push_back(capnp_type); - nested_field_sizes.push_back(capnp_type.isStruct() ? capnp_type.asStruct().getFields().size() : 0); - field_serializers.push_back(createSerializer(data_types[i], names[i], capnp_type, settings)); + fields_serializers.push_back(createSerializer(data_types[i], names[i], capnp_type, settings)); + fields_offsets.push_back(field->getProto().getSlot().getOffset()); + fields_indexes.push_back(field->getIndex()); } } - std::vector> field_serializers; - std::vector fields; - std::vector nested_field_sizes; - std::vector fields_types; + capnp::StructSchema struct_schema; + capnp::_::StructSize struct_size; + size_t fields_count; + std::vector> fields_serializers; + std::vector fields_offsets; + std::vector fields_indexes; + }; std::unique_ptr createSerializer(const DataTypePtr & type, const String & name, const capnp::Type & capnp_type, const FormatSettings::CapnProto & settings) @@ -1116,17 +1476,17 @@ namespace case TypeIndex::UInt64: return createIntegerSerializer(type, name, capnp_type); case TypeIndex::Int128: - return std::make_unique>(type, name, capnp_type); + return std::make_unique>(type, name, capnp_type); case TypeIndex::UInt128: - return std::make_unique>(type, name, capnp_type); + return std::make_unique>(type, name, capnp_type); case TypeIndex::Int256: - return std::make_unique>(type, name, capnp_type); + return std::make_unique>(type, name, capnp_type); case TypeIndex::UInt256: - return std::make_unique>(type, name, capnp_type); + return std::make_unique>(type, name, capnp_type); case TypeIndex::Float32: - return std::make_unique>(type, name, capnp_type); + return createFloatSerializer(type, name, capnp_type); case TypeIndex::Float64: - return std::make_unique>(type, name, capnp_type); + return createFloatSerializer(type, name, capnp_type); case TypeIndex::Date: return std::make_unique(type, name, capnp_type); case TypeIndex::Date32: @@ -1140,15 +1500,15 @@ namespace case TypeIndex::Decimal64: return std::make_unique>(type, name, capnp_type); case TypeIndex::Decimal128: - return std::make_unique>(type, name, capnp_type); + return std::make_unique>(type, name, capnp_type); case TypeIndex::Decimal256: - return std::make_unique>(type, name, capnp_type); + return std::make_unique>(type, name, capnp_type); case TypeIndex::IPv4: return std::make_unique(type, name, capnp_type); case TypeIndex::IPv6: - return std::make_unique(type, name, capnp_type); + return std::make_unique>(type, name, capnp_type); case TypeIndex::UUID: - return std::make_unique(type, name, capnp_type); + return std::make_unique>(type, name, capnp_type); case TypeIndex::Enum8: return std::make_unique>(type, name, capnp_type, settings.enum_comparing_mode); case TypeIndex::Enum16: diff --git a/src/Formats/CapnProtoSerializer.h b/src/Formats/CapnProtoSerializer.h index 692f5e5301f..5bdd1a0e554 100644 --- a/src/Formats/CapnProtoSerializer.h +++ b/src/Formats/CapnProtoSerializer.h @@ -4,6 +4,7 @@ #include #include +#include namespace DB { From 36e8f13242edfd83e3650369b6b71bc9e8fc2e64 Mon Sep 17 00:00:00 2001 From: alekseygolub Date: Thu, 25 May 2023 20:10:02 +0000 Subject: [PATCH 135/722] Added docs for feature --- docs/en/operations/settings/settings.md | 26 +++++++++++++++++++ docs/en/sql-reference/table-functions/file.md | 5 ++-- docs/ru/operations/settings/settings.md | 23 ++++++++++++++++ docs/ru/sql-reference/table-functions/file.md | 1 + 4 files changed, 53 insertions(+), 2 deletions(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 2239084a429..df9e8eb2fe2 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -4200,6 +4200,32 @@ Possible values: Default value: `false`. +## rename_files_after_processing + +- **Type:** String + +- **Default value:** Empty string + +This setting allows to specify renaming pattern for files processed by `file` table function. When option is set, all files read by `file` table function will be renamed according to specified pattern with placeholders, only if files processing was successful. + +### Placeholders + +- `%f` — Original filename without extension (e.g., "sample"). +- `%e` — Original file extension with dot (e.g., ".csv"). +- `%t` — Timestamp (in microseconds). +- `%%` — Percentage sign ("%"). + +### Example +- Option: `--rename_files_after_processing="processed_%f_%t%e"` + +- Query: `SELECT * FROM file('sample.csv')` + + +If reading `sample.csv` is successful, file will be renamed to `processed_sample_1683473210851438.csv` + + + + ## function_json_value_return_type_allow_complex Control whether allow to return complex type (such as: struct, array, map) for json_value function. diff --git a/docs/en/sql-reference/table-functions/file.md b/docs/en/sql-reference/table-functions/file.md index 28c2dc9f1f3..577e2e6aa1d 100644 --- a/docs/en/sql-reference/table-functions/file.md +++ b/docs/en/sql-reference/table-functions/file.md @@ -40,7 +40,7 @@ VALUES (1, 2, 3), (3, 2, 1), (1, 3, 2) As a result, the data is written into the file `test.tsv`: ```bash -# cat /var/lib/clickhouse/user_files/test.tsv +# cat /var/lib/clickhouse/user_files/test.tsv 1 2 3 3 2 1 1 3 2 @@ -163,7 +163,7 @@ Query the number of rows in all files of these two directories: SELECT count(*) FROM file('{some,another}_dir/*', 'TSV', 'name String, value UInt32'); ``` -:::note +:::note If your listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`. ::: @@ -199,3 +199,4 @@ SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt3 **See Also** - [Virtual columns](/docs/en/engines/table-engines/index.md#table_engines-virtual_columns) +- [Rename files after processing](/docs/en/operations/settings/settings.md#rename_files_after_processing) diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index fa3ea582c55..7cab607bb3b 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -4064,3 +4064,26 @@ SELECT sum(number) FROM numbers(10000000000) SETTINGS partial_result_on_first_ca Возможные значения:: `true`, `false` Значение по умолчанию: `false` + +## rename_files_after_processing + +- **Тип:** Строка + +- **Значение по умолчанию:** Пустая строка + +Этот параметр позволяет задать паттерн для переименования файлов, обрабатываемых табличной функцией `file`. Когда опция установлена, все файлы, прочитанные табличной функцией `file`, будут переименованы в соответствии с указанным шаблоном, если обработка и чтение файла завершились успешно. + +### Шаблон +Шаблон поддерживает следующие виды плейсхолдеров: + +- `%f` — Исходное имя файла без расширения (например "sample"). +- `%e` — Оригинальное расширение файла с точкой (например ".csv"). +- `%t` — Текущее время (в микросекундах). +- `%%` — Знак процента ("%"). + +### Пример +- Значение аргумента: `--rename_files_after_processing="processed_%f_%t%e"` + +- Запрос: `SELECT * FROM file('sample.csv')` + +Если чтение и обработка `sample.csv` прошли успешно, файл будет переименован в `processed_sample_1683473210851438.csv`. diff --git a/docs/ru/sql-reference/table-functions/file.md b/docs/ru/sql-reference/table-functions/file.md index 94bc734a8fb..0983c51d954 100644 --- a/docs/ru/sql-reference/table-functions/file.md +++ b/docs/ru/sql-reference/table-functions/file.md @@ -126,3 +126,4 @@ SELECT count(*) FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, **Смотрите также** - [Виртуальные столбцы](index.md#table_engines-virtual_columns) +- [Переименование файлов после обработки](/docs/ru/operations/settings/settings.md#rename_files_after_processing) From d4cec1f0e0fa114f5e04dead6b90f51b3e292310 Mon Sep 17 00:00:00 2001 From: zvonand Date: Tue, 30 May 2023 01:30:01 +0200 Subject: [PATCH 136/722] fix client thread attachment + make better tests --- programs/client/Client.cpp | 1 + src/Client/ClientBase.cpp | 3 --- src/Client/Connection.cpp | 4 --- src/Common/ThreadStatus.h | 3 ++- src/DataTypes/DataTypeDateTime.h | 4 ++- src/Server/TCPHandler.cpp | 2 +- .../02681_timezone_setting.reference | 5 ---- .../0_stateless/02681_timezone_setting.sql | 11 -------- .../02737_timezone_setting.reference | 7 +++++ .../0_stateless/02737_timezone_setting.sql | 27 +++++++++++++++++++ 10 files changed, 41 insertions(+), 26 deletions(-) delete mode 100644 tests/queries/0_stateless/02681_timezone_setting.reference delete mode 100644 tests/queries/0_stateless/02681_timezone_setting.sql create mode 100644 tests/queries/0_stateless/02737_timezone_setting.reference create mode 100644 tests/queries/0_stateless/02737_timezone_setting.sql diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 6b34bdbc5bb..231b7fd6d61 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -326,6 +326,7 @@ try // All that just to set DB::CurrentThread::get().getGlobalContext() // which is required for client timezone (pushed from server) to work. auto thread_group = std::make_shared(); + thread_group->global_context = global_context; thread_status.attachToGroup(thread_group, false); } diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index fad9494ba4b..562c11680a1 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -448,9 +448,7 @@ void ClientBase::onData(Block & block, ASTPtr parsed_query) /// output_format, do not output it. /// Also do not output too much data if we're fuzzing. if (block.rows() == 0 || (query_fuzzer_runs != 0 && processed_rows >= 100)) - { return; - } /// If results are written INTO OUTFILE, we can avoid clearing progress to avoid flicker. if (need_render_progress && tty_buf && (!select_into_file || select_into_file_and_stdout)) @@ -902,7 +900,6 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa if (send_external_tables) sendExternalTables(parsed_query); - receiveResult(parsed_query, signals_before_stop, settings.partial_result_on_first_cancel); break; diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index 86585d805d9..457d90c5bd4 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -355,10 +355,6 @@ void Connection::receiveHello() nonce.emplace(read_nonce); } } -// else if (packet_type == Protocol::Server::TimezoneUpdate) -// { -// // skip this packet at hello, will receive and process it later -// } else if (packet_type == Protocol::Server::Exception) receiveException()->rethrow(); else diff --git a/src/Common/ThreadStatus.h b/src/Common/ThreadStatus.h index 600dfc56d2b..400b55c2409 100644 --- a/src/Common/ThreadStatus.h +++ b/src/Common/ThreadStatus.h @@ -72,7 +72,8 @@ public: /// Set up at creation, no race when reading const ContextWeakPtr query_context; - const ContextWeakPtr global_context; + /// Cannot make it const -- we need to modify it in ch-client to process timezone from server + ContextWeakPtr global_context; const FatalErrorCallback fatal_error_callback; diff --git a/src/DataTypes/DataTypeDateTime.h b/src/DataTypes/DataTypeDateTime.h index 91a09ff7cb9..a4a05917ba5 100644 --- a/src/DataTypes/DataTypeDateTime.h +++ b/src/DataTypes/DataTypeDateTime.h @@ -21,7 +21,9 @@ namespace DB * all types with different time zones are equivalent and may be used interchangingly. * Time zone only affects parsing and displaying in text formats. * - * If time zone is not specified (example: DateTime without parameter), then default time zone is used. + * If time zone is not specified (example: DateTime without parameter), + * then `session_timezone` setting value is used. + * If `session_timezone` is not set (or empty string), server default time zone is used. * Default time zone is server time zone, if server is doing transformations * and if client is doing transformations, unless 'use_client_time_zone' setting is passed to client; * Server time zone is the time zone specified in 'timezone' parameter in configuration file, diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index d57a1c93dd7..c41eace68ba 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -1068,7 +1068,7 @@ void TCPHandler::sendTimezone() if (client_tcp_protocol_version < DBMS_MIN_PROTOCOL_VERSION_WITH_TIMEZONE_UPDATES) return; - const String & tz = query_context->getSettingsRef().session_timezone.toString(); + const String & tz = query_context->getSettingsRef().session_timezone.value; LOG_DEBUG(log, "TCPHandler::sendTimezone(): {}", tz); writeVarUInt(Protocol::Server::TimezoneUpdate, *out); diff --git a/tests/queries/0_stateless/02681_timezone_setting.reference b/tests/queries/0_stateless/02681_timezone_setting.reference deleted file mode 100644 index 8850d77ab03..00000000000 --- a/tests/queries/0_stateless/02681_timezone_setting.reference +++ /dev/null @@ -1,5 +0,0 @@ -2022-12-12 17:23:23.123 -2022-12-12 23:23:23.123 -2022-12-12 22:23:23.123 -Europe/Zurich Europe/Zurich -Pacific/Pitcairn Pacific/Pitcairn diff --git a/tests/queries/0_stateless/02681_timezone_setting.sql b/tests/queries/0_stateless/02681_timezone_setting.sql deleted file mode 100644 index f66e8d2b646..00000000000 --- a/tests/queries/0_stateless/02681_timezone_setting.sql +++ /dev/null @@ -1,11 +0,0 @@ -SET session_timezone = 'Абырвалг'; -- { serverError BAD_ARGUMENTS} - -SET session_timezone = 'Asia/Novosibirsk'; -SELECT toDateTime64(toDateTime64('2022-12-12 23:23:23.123', 3), 3, 'Europe/Zurich'); -SELECT toDateTime64(toDateTime64('2022-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS session_timezone = 'Europe/Zurich'; - -SET session_timezone = 'Asia/Manila'; -SELECT toDateTime64(toDateTime64('2022-12-12 23:23:23.123', 3), 3, 'Asia/Novosibirsk'); - -SELECT timezone(), timezoneOf(now()) SETTINGS session_timezone = 'Europe/Zurich' FORMAT TSV; -SELECT timezone(), timezoneOf(now()) SETTINGS session_timezone = 'Pacific/Pitcairn' FORMAT TSV; diff --git a/tests/queries/0_stateless/02737_timezone_setting.reference b/tests/queries/0_stateless/02737_timezone_setting.reference new file mode 100644 index 00000000000..578aec4e316 --- /dev/null +++ b/tests/queries/0_stateless/02737_timezone_setting.reference @@ -0,0 +1,7 @@ +Pacific/Pitcairn Pacific/Pitcairn +Asia/Novosibirsk Asia/Novosibirsk +2022-12-12 17:23:23 +2022-12-13 07:23:23.123 +2002-12-12 23:23:23 2002-12-12 23:23:23 +2002-12-12 23:23:23.123 2002-12-12 23:23:23.123 +2000-01-01 01:00:00 diff --git a/tests/queries/0_stateless/02737_timezone_setting.sql b/tests/queries/0_stateless/02737_timezone_setting.sql new file mode 100644 index 00000000000..87eeec0779b --- /dev/null +++ b/tests/queries/0_stateless/02737_timezone_setting.sql @@ -0,0 +1,27 @@ +SET session_timezone = 'Абырвалг'; -- { serverError BAD_ARGUMENTS} + +SELECT timezone(), timezoneOf(now()) SETTINGS session_timezone = 'Pacific/Pitcairn'; + +SET session_timezone = 'Asia/Novosibirsk'; +SELECT timezone(), timezoneOf(now()); + +-- test simple queries +SELECT toDateTime(toDateTime('2022-12-12 23:23:23'), 'Europe/Zurich'); +SELECT toDateTime64(toDateTime64('2022-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS session_timezone = 'America/Denver'; + +-- test proper serialization +SELECT toDateTime('2002-12-12 23:23:23'), toString(toDateTime('2002-12-12 23:23:23')) SETTINGS session_timezone = 'Asia/Phnom_Penh'; +SELECT toDateTime64('2002-12-12 23:23:23.123', 3), toString(toDateTime64('2002-12-12 23:23:23.123', 3)) SETTINGS session_timezone = 'Asia/Phnom_Penh'; + +-- Create a table and test that DateTimes are processed correctly on insert +SET session_timezone='Asia/Novosibirsk'; +CREATE TABLE test_tz_setting (d DateTime('UTC')) Engine=Memory AS SELECT toDateTime('2000-01-01 00:00:00'); +INSERT INTO test_tz_setting VALUES ('2000-01-01 01:00:00'); -- this is parsed using timezone from `d` column +INSERT INTO test_tz_setting VALUES (toDateTime('2000-01-02 02:00:00')); -- this is parsed using `session_timezone` + +-- Test parsing in WHERE filter, shall have the same logic as insert +SELECT d FROM test_tz_setting WHERE d == '2000-01-01 01:00:00'; -- 1 row expected +SELECT d FROM test_tz_setting WHERE d == toDateTime('2000-01-01 02:00:00'); -- 0 rows expected + +-- Cleanup table +DROP TABLE test_tz_setting SYNC; \ No newline at end of file From c25980bcf4ecae8aaec15b75421b3a187b410ab2 Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Tue, 30 May 2023 12:02:44 +0000 Subject: [PATCH 137/722] Trying to fix toDateOrDefault() --- src/Functions/FunctionsConversion.h | 7 +++++++ .../01746_convert_type_with_default.reference | 4 ++++ .../0_stateless/01746_convert_type_with_default.sql | 10 ++++++++++ 3 files changed, 21 insertions(+) diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index 940585d6d57..d3676349318 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -2882,6 +2882,13 @@ private: return true; } + if constexpr (IsDataTypeNumber && IsDataTypeDateOrDateTime) + { + result_column = ConvertImpl::execute( + arguments, result_type, input_rows_count); + return true; + } + return false; }); diff --git a/tests/queries/0_stateless/01746_convert_type_with_default.reference b/tests/queries/0_stateless/01746_convert_type_with_default.reference index 9ebef9c4a8d..ec2a826982f 100644 --- a/tests/queries/0_stateless/01746_convert_type_with_default.reference +++ b/tests/queries/0_stateless/01746_convert_type_with_default.reference @@ -20,5 +20,9 @@ 2 -1 -2 +2023-05-30 +2023-05-30 +2023-05-30 14:38:20 +2023-05-30 14:38:20 61f0c404-5cb3-11e7-907b-a6006ad3dba0 59f0c404-5cb3-11e7-907b-a6006ad3dba0 diff --git a/tests/queries/0_stateless/01746_convert_type_with_default.sql b/tests/queries/0_stateless/01746_convert_type_with_default.sql index 0881e911466..9d7873081e5 100644 --- a/tests/queries/0_stateless/01746_convert_type_with_default.sql +++ b/tests/queries/0_stateless/01746_convert_type_with_default.sql @@ -26,5 +26,15 @@ select toUInt256OrDefault('1xx', cast(2 as UInt256)); select toInt256OrDefault('-1', cast(-2 as Int256)); select toInt256OrDefault('-1xx', cast(-2 as Int256)); +select toDateOrDefault('2023-05-30'); +select toDateOrDefault(19507); + +select toDateTimeOrDefault('2023-05-30 14:38:20'); +select toDateTimeOrDefault(1685457500); + SELECT toUUIDOrDefault('61f0c404-5cb3-11e7-907b-a6006ad3dba0', cast('59f0c404-5cb3-11e7-907b-a6006ad3dba0' as UUID)); SELECT toUUIDOrDefault('-----61f0c404-5cb3-11e7-907b-a6006ad3dba0', cast('59f0c404-5cb3-11e7-907b-a6006ad3dba0' as UUID)); + + + + From 6c9b7a710c083041b25159109e8cf786c5875ba0 Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Tue, 30 May 2023 14:46:44 +0000 Subject: [PATCH 138/722] Added more tests for toDateOrDefault/toDateTimeOrDefault --- .../01746_convert_type_with_default.reference | 8 ++++++++ .../01746_convert_type_with_default.sql | 14 +++++++++----- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/tests/queries/0_stateless/01746_convert_type_with_default.reference b/tests/queries/0_stateless/01746_convert_type_with_default.reference index ec2a826982f..235a88157c8 100644 --- a/tests/queries/0_stateless/01746_convert_type_with_default.reference +++ b/tests/queries/0_stateless/01746_convert_type_with_default.reference @@ -20,8 +20,16 @@ 2 -1 -2 +1970-01-01 2023-05-30 2023-05-30 +2023-05-30 +1970-01-01 +2023-05-30 +2023-05-30 +1970-01-01 +2023-05-30 14:38:20 +2023-05-30 14:38:20 2023-05-30 14:38:20 2023-05-30 14:38:20 61f0c404-5cb3-11e7-907b-a6006ad3dba0 diff --git a/tests/queries/0_stateless/01746_convert_type_with_default.sql b/tests/queries/0_stateless/01746_convert_type_with_default.sql index 9d7873081e5..18b5ae60920 100644 --- a/tests/queries/0_stateless/01746_convert_type_with_default.sql +++ b/tests/queries/0_stateless/01746_convert_type_with_default.sql @@ -26,15 +26,19 @@ select toUInt256OrDefault('1xx', cast(2 as UInt256)); select toInt256OrDefault('-1', cast(-2 as Int256)); select toInt256OrDefault('-1xx', cast(-2 as Int256)); +select toDateOrDefault('2020-0x-02'); select toDateOrDefault('2023-05-30'); +select toDateOrDefault('2023-05-30', '2000-01-01'::Date); +select toDateOrDefault('2020-0x-02', '2023-05-30'::Date); +select toDateOrDefault(-1); select toDateOrDefault(19507); +select toDateOrDefault(19507, '2000-01-01'::Date); +select toDateOrDefault(-1, '2000-01-01'::Date); select toDateTimeOrDefault('2023-05-30 14:38:20'); +select toDateTimeOrDefault('2023-05-30 14:38:20', 'UTC'); +select toDateTimeOrDefault('s2023', 'UTC', '2023-05-30 14:38:20'::DateTime('UTC')); select toDateTimeOrDefault(1685457500); SELECT toUUIDOrDefault('61f0c404-5cb3-11e7-907b-a6006ad3dba0', cast('59f0c404-5cb3-11e7-907b-a6006ad3dba0' as UUID)); -SELECT toUUIDOrDefault('-----61f0c404-5cb3-11e7-907b-a6006ad3dba0', cast('59f0c404-5cb3-11e7-907b-a6006ad3dba0' as UUID)); - - - - +SELECT toUUIDOrDefault('-----61f0c404-5cb3-11e7-907b-a6006ad3dba0', cast('59f0c404-5cb3-11e7-907b-a6006ad3dba0' as UUID)); \ No newline at end of file From c8bb1f64ad21dea5ba63fa8f2ea8434d90f9e823 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Tue, 30 May 2023 18:46:49 +0200 Subject: [PATCH 139/722] fix --- src/Storages/StorageReplicatedMergeTree.cpp | 3 +++ tests/integration/test_lost_part/test.py | 12 ++++++++---- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index e71f5217c2b..35f75880ced 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -5469,6 +5469,7 @@ void StorageReplicatedMergeTree::alter( if (mutation_znode) { LOG_DEBUG(log, "Metadata changes applied. Will wait for data changes."); + merge_selecting_task->schedule(); waitMutation(*mutation_znode, query_context->getSettingsRef().alter_sync); LOG_DEBUG(log, "Data changes applied."); } @@ -6620,6 +6621,8 @@ void StorageReplicatedMergeTree::mutate(const MutationCommands & commands, Conte throw Coordination::Exception("Unable to create a mutation znode", rc); } + merge_selecting_task->schedule(); + waitMutation(mutation_entry.znode_name, query_context->getSettingsRef().mutations_sync); } diff --git a/tests/integration/test_lost_part/test.py b/tests/integration/test_lost_part/test.py index 44cd19fd1fb..0bc24268040 100644 --- a/tests/integration/test_lost_part/test.py +++ b/tests/integration/test_lost_part/test.py @@ -42,7 +42,8 @@ def test_lost_part_same_replica(start_cluster): for node in [node1, node2]: node.query( f"CREATE TABLE mt0 (id UInt64, date Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/t', '{node.name}') ORDER BY tuple() PARTITION BY date " - "SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0" + "SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0," + "merge_selecting_sleep_ms=100, max_merge_selecting_sleep_ms=1000" ) node1.query("SYSTEM STOP MERGES mt0") @@ -109,7 +110,8 @@ def test_lost_part_other_replica(start_cluster): for node in [node1, node2]: node.query( f"CREATE TABLE mt1 (id UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/t1', '{node.name}') ORDER BY tuple() " - "SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0" + "SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0," + "merge_selecting_sleep_ms=100, max_merge_selecting_sleep_ms=1000" ) node1.query("SYSTEM STOP MERGES mt1") @@ -178,7 +180,8 @@ def test_lost_part_mutation(start_cluster): for node in [node1, node2]: node.query( f"CREATE TABLE mt2 (id UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/t2', '{node.name}') ORDER BY tuple() " - "SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0" + "SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0," + "merge_selecting_sleep_ms=100, max_merge_selecting_sleep_ms=1000" ) node1.query("SYSTEM STOP MERGES mt2") @@ -241,7 +244,8 @@ def test_lost_last_part(start_cluster): for node in [node1, node2]: node.query( f"CREATE TABLE mt3 (id UInt64, p String) ENGINE ReplicatedMergeTree('/clickhouse/tables/t3', '{node.name}') " - "ORDER BY tuple() PARTITION BY p SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0" + "ORDER BY tuple() PARTITION BY p SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0," + "merge_selecting_sleep_ms=100, max_merge_selecting_sleep_ms=1000" ) node1.query("SYSTEM STOP MERGES mt3") From daaae3f573c03be49c9e015c249642034113374d Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Tue, 30 May 2023 19:11:26 +0000 Subject: [PATCH 140/722] Add toString() to fix time zone error --- tests/queries/0_stateless/01746_convert_type_with_default.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01746_convert_type_with_default.sql b/tests/queries/0_stateless/01746_convert_type_with_default.sql index 18b5ae60920..40e4798721b 100644 --- a/tests/queries/0_stateless/01746_convert_type_with_default.sql +++ b/tests/queries/0_stateless/01746_convert_type_with_default.sql @@ -37,7 +37,7 @@ select toDateOrDefault(-1, '2000-01-01'::Date); select toDateTimeOrDefault('2023-05-30 14:38:20'); select toDateTimeOrDefault('2023-05-30 14:38:20', 'UTC'); -select toDateTimeOrDefault('s2023', 'UTC', '2023-05-30 14:38:20'::DateTime('UTC')); +select toString(toDateTimeOrDefault('s2023', 'Asia/Istanbul', '2023-05-30 14:38:20'::DateTime('Asia/Istanbul')), 'Asia/Istanbul'); select toDateTimeOrDefault(1685457500); SELECT toUUIDOrDefault('61f0c404-5cb3-11e7-907b-a6006ad3dba0', cast('59f0c404-5cb3-11e7-907b-a6006ad3dba0' as UUID)); From d4efbbfbd3ca954b83391c60afffe760cc602361 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 30 May 2023 19:32:24 +0000 Subject: [PATCH 141/722] Allow to skip empty files in file/s3/url/hdfs table functions --- src/Core/Settings.h | 4 + src/Storages/HDFS/StorageHDFS.cpp | 113 +++++++++++------- src/Storages/HDFS/StorageHDFS.h | 23 +++- src/Storages/HDFS/StorageHDFSCluster.cpp | 2 +- src/Storages/StorageFile.cpp | 59 ++++++--- src/Storages/StorageS3.cpp | 12 +- src/Storages/StorageURL.cpp | 43 ++++--- src/Storages/StorageURL.h | 2 +- tests/integration/test_storage_hdfs/test.py | 50 ++++++++ tests/integration/test_storage_s3/test.py | 55 +++++++++ .../02771_skip_empty_files.reference | 7 ++ .../0_stateless/02771_skip_empty_files.sh | 24 ++++ 12 files changed, 307 insertions(+), 87 deletions(-) create mode 100644 tests/queries/0_stateless/02771_skip_empty_files.reference create mode 100755 tests/queries/0_stateless/02771_skip_empty_files.sh diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 607be1522db..534cb629aa8 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -91,6 +91,7 @@ class IColumn; M(UInt64, s3_list_object_keys_size, 1000, "Maximum number of files that could be returned in batch by ListObject request", 0) \ M(Bool, s3_truncate_on_insert, false, "Enables or disables truncate before insert in s3 engine tables.", 0) \ M(Bool, s3_create_new_file_on_insert, false, "Enables or disables creating a new file on each insert in s3 engine tables", 0) \ + M(Bool, s3_skip_empty_files, false, "Allow to skip empty files in s3 table engine", 0) \ M(Bool, s3_check_objects_after_upload, false, "Check each uploaded object to s3 with head request to be sure that upload was successful", 0) \ M(Bool, s3_allow_parallel_part_upload, true, "Use multiple threads for s3 multipart upload. It may lead to slightly higher memory usage", 0) \ M(Bool, s3_throw_on_zero_files_match, false, "Throw an error, when ListObjects request cannot match any files", 0) \ @@ -99,6 +100,7 @@ class IColumn; M(UInt64, hdfs_replication, 0, "The actual number of replications can be specified when the hdfs file is created.", 0) \ M(Bool, hdfs_truncate_on_insert, false, "Enables or disables truncate before insert in s3 engine tables", 0) \ M(Bool, hdfs_create_new_file_on_insert, false, "Enables or disables creating a new file on each insert in hdfs engine tables", 0) \ + M(Bool, hdfs_skip_empty_files, false, "Allow to skip empty files in hdfs table engine", 0) \ M(UInt64, hsts_max_age, 0, "Expired time for hsts. 0 means disable HSTS.", 0) \ M(Bool, extremes, false, "Calculate minimums and maximums of the result columns. They can be output in JSON-formats.", IMPORTANT) \ M(Bool, use_uncompressed_cache, false, "Whether to use the cache of uncompressed blocks.", 0) \ @@ -602,6 +604,8 @@ class IColumn; M(Bool, engine_file_empty_if_not_exists, false, "Allows to select data from a file engine table without file", 0) \ M(Bool, engine_file_truncate_on_insert, false, "Enables or disables truncate before insert in file engine tables", 0) \ M(Bool, engine_file_allow_create_multiple_files, false, "Enables or disables creating a new file on each insert in file engine tables if format has suffix.", 0) \ + M(Bool, engine_file_skip_empty_files, false, "Allows to skip empty files in file table engine", 0) \ + M(Bool, engine_url_skip_empty_files, false, "Allows to skip empty files in url table engine", 0) \ M(Bool, allow_experimental_database_replicated, false, "Allow to create databases with Replicated engine", 0) \ M(UInt64, database_replicated_initial_query_timeout_sec, 300, "How long initial DDL query should wait for Replicated database to precess previous DDL queue entries", 0) \ M(Bool, database_replicated_enforce_synchronous_settings, false, "Enforces synchronous waiting for some queries (see also database_atomic_wait_for_drop_and_detach_synchronously, mutation_sync, alter_sync). Not recommended to enable these settings.", 0) \ diff --git a/src/Storages/HDFS/StorageHDFS.cpp b/src/Storages/HDFS/StorageHDFS.cpp index 19c0840149b..08114ed3cba 100644 --- a/src/Storages/HDFS/StorageHDFS.cpp +++ b/src/Storages/HDFS/StorageHDFS.cpp @@ -66,7 +66,7 @@ namespace /* Recursive directory listing with matched paths as a result. * Have the same method in StorageFile. */ - Strings LSWithRegexpMatching(const String & path_for_ls, const HDFSFSPtr & fs, const String & for_match, std::unordered_map * last_mod_times) + std::vector LSWithRegexpMatching(const String & path_for_ls, const HDFSFSPtr & fs, const String & for_match) { const size_t first_glob = for_match.find_first_of("*?{"); @@ -88,7 +88,7 @@ namespace throw Exception( ErrorCodes::ACCESS_DENIED, "Cannot list directory {}: {}", prefix_without_globs, String(hdfsGetLastError())); } - Strings result; + std::vector result; if (!ls.file_info && ls.length > 0) throw Exception(ErrorCodes::LOGICAL_ERROR, "file_info shouldn't be null"); for (int i = 0; i < ls.length; ++i) @@ -102,17 +102,15 @@ namespace if (!is_directory && !looking_for_directory) { if (re2::RE2::FullMatch(file_name, matcher)) - { - result.push_back(String(ls.file_info[i].mName)); - if (last_mod_times) - (*last_mod_times)[result.back()] = ls.file_info[i].mLastMod; - } + result.emplace_back( + String(ls.file_info[i].mName), + StorageHDFS::PathInfo{ls.file_info[i].mLastMod, static_cast(ls.file_info[i].mSize)}); } else if (is_directory && looking_for_directory) { if (re2::RE2::FullMatch(file_name, matcher)) { - Strings result_part = LSWithRegexpMatching(fs::path(full_path) / "", fs, suffix_with_globs.substr(next_slash), last_mod_times); + std::vector result_part = LSWithRegexpMatching(fs::path(full_path) / "", fs, suffix_with_globs.substr(next_slash)); /// Recursion depth is limited by pattern. '*' works only for depth = 1, for depth = 2 pattern path is '*/*'. So we do not need additional check. std::move(result_part.begin(), result_part.end(), std::back_inserter(result)); } @@ -135,12 +133,20 @@ namespace throw Exception(ErrorCodes::BAD_ARGUMENTS, "Storage HDFS requires valid URL to be set"); } - std::vector getPathsList(const String & path_from_uri, const String & uri_without_path, ContextPtr context, std::unordered_map * last_mod_times = nullptr) + std::vector getPathsList(const String & path_from_uri, const String & uri_without_path, ContextPtr context) { HDFSBuilderWrapper builder = createHDFSBuilder(uri_without_path + "/", context->getGlobalContext()->getConfigRef()); HDFSFSPtr fs = createHDFSFS(builder.get()); - return LSWithRegexpMatching("/", fs, path_from_uri, last_mod_times); + return LSWithRegexpMatching("/", fs, path_from_uri); + } + + size_t getFileSize(const String & path_from_uri, const String & uri_without_path, ContextPtr context) + { + HDFSBuilderWrapper builder = createHDFSBuilder(uri_without_path + "/", context->getGlobalContext()->getConfigRef()); + HDFSFSPtr fs = createHDFSFS(builder.get()); + auto * info = hdfsGetPathInfo(fs.get(), path_from_uri.data()); + return info->mSize; } } @@ -199,9 +205,8 @@ ColumnsDescription StorageHDFS::getTableStructureFromData( ContextPtr ctx) { const auto [path_from_uri, uri_without_path] = getPathFromUriAndUriWithoutPath(uri); - std::unordered_map last_mod_time; - auto paths = getPathsList(path_from_uri, uri, ctx, &last_mod_time); - if (paths.empty() && !FormatFactory::instance().checkIfFormatHasExternalSchemaReader(format)) + auto paths_with_info = getPathsList(path_from_uri, uri, ctx); + if (paths_with_info.empty() && !FormatFactory::instance().checkIfFormatHasExternalSchemaReader(format)) throw Exception( ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, "Cannot extract table structure from {} format file, because there are no files in HDFS with provided path." @@ -209,14 +214,27 @@ ColumnsDescription StorageHDFS::getTableStructureFromData( std::optional columns_from_cache; if (ctx->getSettingsRef().schema_inference_use_cache_for_hdfs) - columns_from_cache = tryGetColumnsFromCache(paths, path_from_uri, last_mod_time, format, ctx); + columns_from_cache = tryGetColumnsFromCache(paths_with_info, path_from_uri, format, ctx); - ReadBufferIterator read_buffer_iterator = [&, my_uri_without_path = uri_without_path, it = paths.begin()](ColumnsDescription &) mutable -> std::unique_ptr + ReadBufferIterator read_buffer_iterator + = [&, my_uri_without_path = uri_without_path, it = paths_with_info.begin(), first = true]( + ColumnsDescription & columns) mutable -> std::unique_ptr { - if (it == paths.end()) + if (it == paths_with_info.end()) + { + if (first) + throw Exception(ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, + "Cannot extract table structure from {} format file, because all files are empty. " + "You must specify table structure manually", format); return nullptr; - auto compression = chooseCompressionMethod(*it, compression_method); - auto impl = std::make_unique(my_uri_without_path, *it++, ctx->getGlobalContext()->getConfigRef(), ctx->getReadSettings()); + } + + auto path_with_info = *it++; + if (ctx->getSettingsRef().hdfs_skip_empty_files && path_with_info.info && path_with_info.info->size == 0) + return read_buffer_iterator(columns); + + auto compression = chooseCompressionMethod(path_with_info.path, compression_method); + auto impl = std::make_unique(my_uri_without_path, path_with_info.path, ctx->getGlobalContext()->getConfigRef(), ctx->getReadSettings()); const Int64 zstd_window_log_max = ctx->getSettingsRef().zstd_window_log_max; return wrapReadBufferWithCompressionMethod(std::move(impl), compression, static_cast(zstd_window_log_max)); }; @@ -225,10 +243,10 @@ ColumnsDescription StorageHDFS::getTableStructureFromData( if (columns_from_cache) columns = *columns_from_cache; else - columns = readSchemaFromFormat(format, std::nullopt, read_buffer_iterator, paths.size() > 1, ctx); + columns = readSchemaFromFormat(format, std::nullopt, read_buffer_iterator, paths_with_info.size() > 1, ctx); if (ctx->getSettingsRef().schema_inference_use_cache_for_hdfs) - addColumnsToCache(paths, path_from_uri, columns, format, ctx); + addColumnsToCache(paths_with_info, path_from_uri, columns, format, ctx); return columns; } @@ -241,11 +259,11 @@ public: const auto [path_from_uri, uri_without_path] = getPathFromUriAndUriWithoutPath(uri); uris = getPathsList(path_from_uri, uri_without_path, context_); for (auto & elem : uris) - elem = uri_without_path + elem; + elem.path = uri_without_path + elem.path; uris_iter = uris.begin(); } - String next() + StorageHDFS::PathWithInfo next() { std::lock_guard lock(mutex); if (uris_iter != uris.end()) @@ -258,8 +276,8 @@ public: } private: std::mutex mutex; - Strings uris; - Strings::iterator uris_iter; + std::vector uris; + std::vector::iterator uris_iter; }; class HDFSSource::URISIterator::Impl @@ -279,14 +297,14 @@ public: uris_iter = uris.begin(); } - String next() + StorageHDFS::PathWithInfo next() { std::lock_guard lock(mutex); if (uris_iter == uris.end()) - return ""; + return {"", {}}; auto key = *uris_iter; ++uris_iter; - return key; + return {key, {}}; } private: @@ -298,7 +316,7 @@ private: HDFSSource::DisclosedGlobIterator::DisclosedGlobIterator(ContextPtr context_, const String & uri) : pimpl(std::make_shared(context_, uri)) {} -String HDFSSource::DisclosedGlobIterator::next() +StorageHDFS::PathWithInfo HDFSSource::DisclosedGlobIterator::next() { return pimpl->next(); } @@ -308,7 +326,7 @@ HDFSSource::URISIterator::URISIterator(const std::vector & uris_, Contex { } -String HDFSSource::URISIterator::next() +StorageHDFS::PathWithInfo HDFSSource::URISIterator::next() { return pimpl->next(); } @@ -343,12 +361,21 @@ HDFSSource::HDFSSource( bool HDFSSource::initialize() { - current_path = (*file_iterator)(); - if (current_path.empty()) + auto path_with_info = (*file_iterator)(); + if (path_with_info.path.empty()) return false; + current_path = path_with_info.path; const auto [path_from_uri, uri_without_path] = getPathFromUriAndUriWithoutPath(current_path); + if (getContext()->getSettingsRef().hdfs_skip_empty_files) + { + auto file_size = path_with_info.info ? path_with_info.info->size : getFileSize(path_from_uri, uri_without_path, getContext()); + /// If file is empty and hdfs_skip_empty_files=1, skip it and go to the next file. + if (file_size == 0) + return initialize(); + } + auto compression = chooseCompressionMethod(path_from_uri, storage->compression_method); auto impl = std::make_unique( uri_without_path, path_from_uri, getContext()->getGlobalContext()->getConfigRef(), getContext()->getReadSettings()); @@ -553,8 +580,8 @@ Pipe StorageHDFS::read( if (distributed_processing) { iterator_wrapper = std::make_shared( - [callback = context_->getReadTaskCallback()]() -> String { - return callback(); + [callback = context_->getReadTaskCallback()]() -> StorageHDFS::PathWithInfo { + return StorageHDFS::PathWithInfo{callback(), std::nullopt}; }); } else if (is_path_with_globs) @@ -761,24 +788,22 @@ SchemaCache & StorageHDFS::getSchemaCache(const ContextPtr & ctx) } std::optional StorageHDFS::tryGetColumnsFromCache( - const Strings & paths, + const std::vector & paths_with_info, const String & uri_without_path, - std::unordered_map & last_mod_time, const String & format_name, const ContextPtr & ctx) { auto & schema_cache = getSchemaCache(ctx); - for (const auto & path : paths) + for (const auto & path_with_info : paths_with_info) { auto get_last_mod_time = [&]() -> std::optional { - auto it = last_mod_time.find(path); - if (it == last_mod_time.end()) - return std::nullopt; - return it->second; + if (path_with_info.info) + return path_with_info.info->last_mod_time; + return std::nullopt; }; - String url = fs::path(uri_without_path) / path; + String url = fs::path(uri_without_path) / path_with_info.path; auto cache_key = getKeyForSchemaCache(url, format_name, {}, ctx); auto columns = schema_cache.tryGet(cache_key, get_last_mod_time); if (columns) @@ -789,7 +814,7 @@ std::optional StorageHDFS::tryGetColumnsFromCache( } void StorageHDFS::addColumnsToCache( - const Strings & paths, + const std::vector & paths_with_info, const String & uri_without_path, const ColumnsDescription & columns, const String & format_name, @@ -797,8 +822,8 @@ void StorageHDFS::addColumnsToCache( { auto & schema_cache = getSchemaCache(ctx); Strings sources; - sources.reserve(paths.size()); - std::transform(paths.begin(), paths.end(), std::back_inserter(sources), [&](const String & path){ return fs::path(uri_without_path) / path; }); + sources.reserve(paths_with_info.size()); + std::transform(paths_with_info.begin(), paths_with_info.end(), std::back_inserter(sources), [&](const PathWithInfo & path_with_info){ return fs::path(uri_without_path) / path_with_info.path; }); auto cache_keys = getKeysForSchemaCache(sources, format_name, {}, ctx); schema_cache.addMany(cache_keys, columns); } diff --git a/src/Storages/HDFS/StorageHDFS.h b/src/Storages/HDFS/StorageHDFS.h index b123834e981..87ad5aee6a3 100644 --- a/src/Storages/HDFS/StorageHDFS.h +++ b/src/Storages/HDFS/StorageHDFS.h @@ -18,6 +18,18 @@ namespace DB class StorageHDFS final : public IStorage, WithContext { public: + struct PathInfo + { + time_t last_mod_time; + size_t size; + }; + + struct PathWithInfo + { + String path; + std::optional info; + }; + StorageHDFS( const String & uri_, const StorageID & table_id_, @@ -72,14 +84,13 @@ protected: private: static std::optional tryGetColumnsFromCache( - const Strings & paths, + const std::vector & paths_with_info, const String & uri_without_path, - std::unordered_map & last_mod_time, const String & format_name, const ContextPtr & ctx); static void addColumnsToCache( - const Strings & paths, + const std::vector & paths, const String & uri_without_path, const ColumnsDescription & columns, const String & format_name, @@ -105,7 +116,7 @@ public: { public: DisclosedGlobIterator(ContextPtr context_, const String & uri_); - String next(); + StorageHDFS::PathWithInfo next(); private: class Impl; /// shared_ptr to have copy constructor @@ -116,14 +127,14 @@ public: { public: URISIterator(const std::vector & uris_, ContextPtr context); - String next(); + StorageHDFS::PathWithInfo next(); private: class Impl; /// shared_ptr to have copy constructor std::shared_ptr pimpl; }; - using IteratorWrapper = std::function; + using IteratorWrapper = std::function; using StorageHDFSPtr = std::shared_ptr; static Block getHeader(Block sample_block, const std::vector & requested_virtual_columns); diff --git a/src/Storages/HDFS/StorageHDFSCluster.cpp b/src/Storages/HDFS/StorageHDFSCluster.cpp index 46e67b623e2..b98459aeee3 100644 --- a/src/Storages/HDFS/StorageHDFSCluster.cpp +++ b/src/Storages/HDFS/StorageHDFSCluster.cpp @@ -79,7 +79,7 @@ void StorageHDFSCluster::addColumnsStructureToQuery(ASTPtr & query, const String RemoteQueryExecutor::Extension StorageHDFSCluster::getTaskIteratorExtension(ASTPtr, const ContextPtr & context) const { auto iterator = std::make_shared(context, uri); - auto callback = std::make_shared([iter = std::move(iterator)]() mutable -> String { return iter->next(); }); + auto callback = std::make_shared>([iter = std::move(iterator)]() mutable -> String { return iter->next().path; }); return RemoteQueryExecutor::Extension{.task_iterator = std::move(callback)}; } diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 647f9511052..a20a4e63ba6 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -257,35 +257,40 @@ std::unique_ptr selectReadBuffer( return res; } -std::unique_ptr createReadBuffer( - const String & current_path, - bool use_table_fd, - const String & storage_name, - int table_fd, - const String & compression_method, - ContextPtr context) +struct stat getFileStat(const String & current_path, bool use_table_fd, int table_fd, const String & storage_name) { - CompressionMethod method; - struct stat file_stat{}; - if (use_table_fd) { /// Check if file descriptor allows random reads (and reading it twice). if (0 != fstat(table_fd, &file_stat)) throwFromErrno("Cannot stat table file descriptor, inside " + storage_name, ErrorCodes::CANNOT_STAT); - - method = chooseCompressionMethod("", compression_method); } else { /// Check if file descriptor allows random reads (and reading it twice). if (0 != stat(current_path.c_str(), &file_stat)) throwFromErrno("Cannot stat file " + current_path, ErrorCodes::CANNOT_STAT); - - method = chooseCompressionMethod(current_path, compression_method); } + return file_stat; +} + +std::unique_ptr createReadBuffer( + const String & current_path, + const struct stat & file_stat, + bool use_table_fd, + int table_fd, + const String & compression_method, + ContextPtr context) +{ + CompressionMethod method; + + if (use_table_fd) + method = chooseCompressionMethod("", compression_method); + else + method = chooseCompressionMethod(current_path, compression_method); + std::unique_ptr nested_buffer = selectReadBuffer(current_path, use_table_fd, table_fd, file_stat, context); /// For clickhouse-local and clickhouse-client add progress callback to display progress bar. @@ -355,7 +360,8 @@ ColumnsDescription StorageFile::getTableStructureFromFileDescriptor(ContextPtr c { /// We will use PeekableReadBuffer to create a checkpoint, so we need a place /// where we can store the original read buffer. - read_buffer_from_fd = createReadBuffer("", true, getName(), table_fd, compression_method, context); + auto file_stat = getFileStat("", true, table_fd, getName()); + read_buffer_from_fd = createReadBuffer("", file_stat, true, table_fd, compression_method, context); auto read_buf = std::make_unique(*read_buffer_from_fd); read_buf->setCheckpoint(); return read_buf; @@ -396,12 +402,24 @@ ColumnsDescription StorageFile::getTableStructureFromFile( if (context->getSettingsRef().schema_inference_use_cache_for_file) columns_from_cache = tryGetColumnsFromCache(paths, format, format_settings, context); - ReadBufferIterator read_buffer_iterator = [&, it = paths.begin()](ColumnsDescription &) mutable -> std::unique_ptr + ReadBufferIterator read_buffer_iterator = [&, it = paths.begin(), first = true](ColumnsDescription & columns) mutable -> std::unique_ptr { if (it == paths.end()) + { + if (first) + throw Exception(ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, + "Cannot extract table structure from {} format file, because all files are empty. You must specify table structure manually", + format); return nullptr; + } - return createReadBuffer(*it++, false, "File", -1, compression_method, context); + auto path = *it++; + auto file_stat = getFileStat(path, false, -1, "File"); + if (context->getSettingsRef().engine_file_skip_empty_files && file_stat.st_size == 0) + return read_buffer_iterator(columns); + + first = false; + return createReadBuffer(path, file_stat, false, -1, compression_method, context); }; ColumnsDescription columns; @@ -628,7 +646,12 @@ public: } if (!read_buf) - read_buf = createReadBuffer(current_path, storage->use_table_fd, storage->getName(), storage->table_fd, storage->compression_method, context); + { + auto file_stat = getFileStat(current_path, storage->use_table_fd, storage->table_fd, storage->getName()); + if (context->getSettingsRef().engine_file_skip_empty_files && file_stat.st_size == 0) + continue; + read_buf = createReadBuffer(current_path, file_stat, storage->use_table_fd, storage->table_fd, storage->compression_method, context); + } const Settings & settings = context->getSettingsRef(); chassert(!storage->paths.empty()); diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index 2a2192d9cfe..9c4791020f2 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -573,6 +573,11 @@ StorageS3Source::ReaderHolder StorageS3Source::createReader() return {}; size_t object_size = info ? info->size : S3::getObjectSize(*client, bucket, current_key, version_id, request_settings); + + /// If object is empty and s3_skip_empty_files=1, skip it and go to the next key. + if (getContext()->getSettingsRef().s3_skip_empty_files && object_size == 0) + return createReader(); + auto compression_method = chooseCompressionMethod(current_key, compression_hint); InputFormatPtr input_format; @@ -1456,7 +1461,7 @@ ColumnsDescription StorageS3::getTableStructureFromDataImpl( ReadBufferIterator read_buffer_iterator = [&, first = true](ColumnsDescription & cached_columns) mutable -> std::unique_ptr { - auto [key, _] = (*file_iterator)(); + auto [key, info] = (*file_iterator)(); if (key.empty()) { @@ -1464,11 +1469,14 @@ ColumnsDescription StorageS3::getTableStructureFromDataImpl( throw Exception( ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, "Cannot extract table structure from {} format file, because there are no files with provided path " - "in S3. You must specify table structure manually", configuration.format); + "in S3 or all files are empty. You must specify table structure manually", configuration.format); return nullptr; } + if (ctx->getSettingsRef().s3_skip_empty_files && info->size == 0) + return read_buffer_iterator(cached_columns); + /// S3 file iterator could get new keys after new iteration, check them in schema cache. if (ctx->getSettingsRef().schema_inference_use_cache_for_s3 && read_keys.size() > prev_read_keys_size) { diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index da8f6a151b2..706ce481a24 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -49,6 +49,7 @@ namespace ErrorCodes extern const int NETWORK_ERROR; extern const int BAD_ARGUMENTS; extern const int LOGICAL_ERROR; + extern const int CANNOT_EXTRACT_TABLE_STRUCTURE; } static constexpr auto bad_arguments_error_message = "Storage URL requires 1-4 arguments: " @@ -242,15 +243,16 @@ StorageURLSource::StorageURLSource( auto headers = getHeaders(headers_); /// Lazy initialization. We should not perform requests in constructor, because we need to do it in query pipeline. - initialize = [=, this](const FailoverOptions & uri_options) + initialize = [=, this]() { - if (uri_options.empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Got empty url list"); + const auto current_uri_options = (*uri_iterator)(); + if (current_uri_options.empty()) + return false; - auto first_option = uri_options.begin(); + auto first_option = current_uri_options.begin(); auto [actual_uri, buf_factory] = getFirstAvailableURIAndReadBuffer( first_option, - uri_options.end(), + current_uri_options.end(), context, params, http_method, @@ -259,7 +261,11 @@ StorageURLSource::StorageURLSource( credentials, headers, glob_url, - uri_options.size() == 1); + current_uri_options.size() == 1); + + /// If file is empty and engine_url_skip_empty_files=1, skip it and go to the next file. + if (context->getSettingsRef().engine_url_skip_empty_files && buf_factory->getFileSize() == 0) + return initialize(); curr_uri = actual_uri; @@ -292,6 +298,7 @@ StorageURLSource::StorageURLSource( pipeline = std::make_unique(QueryPipelineBuilder::getPipeline(std::move(builder))); reader = std::make_unique(*pipeline); + return true; }; } @@ -306,14 +313,8 @@ Chunk StorageURLSource::generate() break; } - if (!reader) - { - auto current_uri = (*uri_iterator)(); - if (current_uri.empty()) - return {}; - - initialize(current_uri); - } + if (!reader && !initialize()) + return {}; Chunk chunk; if (reader->pull(chunk)) @@ -592,10 +593,16 @@ ColumnsDescription IStorageURLBase::getTableStructureFromData( if (context->getSettingsRef().schema_inference_use_cache_for_url) columns_from_cache = tryGetColumnsFromCache(urls_to_check, headers, credentials, format, format_settings, context); - ReadBufferIterator read_buffer_iterator = [&, it = urls_to_check.cbegin()](ColumnsDescription &) mutable -> std::unique_ptr + ReadBufferIterator read_buffer_iterator = [&, it = urls_to_check.cbegin(), first = true](ColumnsDescription & columns) mutable -> std::unique_ptr { if (it == urls_to_check.cend()) + { + if (first) + throw Exception(ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, + "Cannot extract table structure from {} format file, because all files are empty. " + "You must specify table structure manually", format); return nullptr; + } auto [_, buf_factory] = StorageURLSource::getFirstAvailableURIAndReadBuffer( it, @@ -609,7 +616,13 @@ ColumnsDescription IStorageURLBase::getTableStructureFromData( headers, false, false); + ++it; + + if (context->getSettingsRef().engine_url_skip_empty_files && buf_factory->getFileSize() == 0) + return read_buffer_iterator(columns); + + first = false; return wrapReadBufferWithCompressionMethod( buf_factory->getReader(), compression_method, diff --git a/src/Storages/StorageURL.h b/src/Storages/StorageURL.h index d53b72105e4..4cd4b66e69a 100644 --- a/src/Storages/StorageURL.h +++ b/src/Storages/StorageURL.h @@ -197,7 +197,7 @@ public: bool delay_initialization); private: - using InitializeFunc = std::function; + using InitializeFunc = std::function; InitializeFunc initialize; String name; diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index edf5344e887..5ac1d3bea6f 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -816,6 +816,56 @@ def test_hdfsCluster_unset_skip_unavailable_shards(started_cluster): ) +def test_skip_empty_files(started_cluster): + node = started_cluster.instances["node1"] + + node.query( + f"insert into function hdfs('hdfs://hdfs1:9000/skip_empty_files1.parquet', TSVRaw) select * from numbers(0) settings hdfs_truncate_on_insert=1" + ) + + node.query( + f"insert into function hdfs('hdfs://hdfs1:9000/skip_empty_files2.parquet') select * from numbers(1) settings hdfs_truncate_on_insert=1" + ) + + node.query_and_get_error( + f"select * from hdfs('hdfs://hdfs1:9000/skip_empty_files1.parquet') settings hdfs_skip_empty_files=0" + ) + + node.query_and_get_error( + f"select * from hdfs('hdfs://hdfs1:9000/skip_empty_files1.parquet', auto, 'number UINt64') settings hdfs_skip_empty_files=0" + ) + + node.query_and_get_error( + f"select * from hdfs('hdfs://hdfs1:9000/skip_empty_files1.parquet') settings hdfs_skip_empty_files=1" + ) + + res = node.query( + f"select * from hdfs('hdfs://hdfs1:9000/skip_empty_files1.parquet', auto, 'number UInt64') settings hdfs_skip_empty_files=1" + ) + + assert len(res) == 0 + + node.query_and_get_error( + f"select * from hdfs('hdfs://hdfs1:9000/skip_empty_files*.parquet') settings hdfs_skip_empty_files=0" + ) + + node.query_and_get_error( + f"select * from hdfs('hdfs://hdfs1:9000/skip_empty_files*.parquet', auto, 'number UInt64') settings hdfs_skip_empty_files=0" + ) + + res = node.query( + f"select * from hdfs('hdfs://hdfs1:9000/skip_empty_files*.parquet') settings hdfs_skip_empty_files=1" + ) + + assert int(res) == 0 + + res = node.query( + f"select * from hdfs('hdfs://hdfs1:9000/skip_empty_files*.parquet', auto, 'number UInt64') settings hdfs_skip_empty_files=1" + ) + + assert int(res) == 0 + + if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") diff --git a/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py index d9ac70f51ad..516c8ed152a 100644 --- a/tests/integration/test_storage_s3/test.py +++ b/tests/integration/test_storage_s3/test.py @@ -1713,3 +1713,58 @@ def test_s3_list_objects_failure(started_cluster): assert ei.value.returncode == 243 assert "Could not list objects" in ei.value.stderr + + +def test_skip_empty_files(started_cluster): + bucket = started_cluster.minio_bucket + instance = started_cluster.instances["dummy"] + + + instance.query( + f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/skip_empty_files1.parquet', TSVRaw) select * from numbers(0) settings s3_truncate_on_insert=1" + ) + + instance.query( + f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/skip_empty_files2.parquet') select * from numbers(1) settings s3_truncate_on_insert=1" + ) + def test(engine, setting): + instance.query_and_get_error( + f"select * from {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/skip_empty_files1.parquet') settings {setting}=0" + ) + + instance.query_and_get_error( + f"select * from {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/skip_empty_files1.parquet', auto, 'number UINt64') settings {setting}=0" + ) + + instance.query_and_get_error( + f"select * from {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/skip_empty_files1.parquet') settings {setting}=1" + ) + + res = instance.query( + f"select * from {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/skip_empty_files1.parquet', auto, 'number UInt64') settings {setting}=1" + ) + + assert len(res) == 0 + + instance.query_and_get_error( + f"select * from {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/skip_empty_files{{1,2}}.parquet') settings {setting}=0" + ) + + instance.query_and_get_error( + f"select * from {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/skip_empty_files{{1,2}}.parquet', auto, 'number UInt64') settings {setting}=0" + ) + + res = instance.query( + f"select * from {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/skip_empty_files{{1,2}}.parquet') settings {setting}=1" + ) + + assert int(res) == 0 + + res = instance.query( + f"select * from {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/skip_empty_files{{1,2}}.parquet', auto, 'number UInt64') settings {setting}=1" + ) + + assert int(res) == 0 + + test("s3", "s3_skip_empty_files") + test("url", "engine_url_skip_empty_files") diff --git a/tests/queries/0_stateless/02771_skip_empty_files.reference b/tests/queries/0_stateless/02771_skip_empty_files.reference new file mode 100644 index 00000000000..83f2e99acd0 --- /dev/null +++ b/tests/queries/0_stateless/02771_skip_empty_files.reference @@ -0,0 +1,7 @@ +1 +1 +1 +1 +1 +0 +0 diff --git a/tests/queries/0_stateless/02771_skip_empty_files.sh b/tests/queries/0_stateless/02771_skip_empty_files.sh new file mode 100755 index 00000000000..99f43d7868a --- /dev/null +++ b/tests/queries/0_stateless/02771_skip_empty_files.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +FILE_PREFIX=$CLICKHOUSE_TEST_UNIQUE_NAME +touch $FILE_PREFIX-1.parquet +$CLICKHOUSE_LOCAL -q "select * from numbers(1) format Parquet" > $FILE_PREFIX-2.parquet +$CLICKHOUSE_LOCAL -q "select * from file('$FILE_PREFIX-1.parquet') settings engine_file_skip_empty_files=0" 2>&1 | grep -c "CANNOT_EXTRACT_TABLE_STRUCTURE" +$CLICKHOUSE_LOCAL -q "select * from file('$FILE_PREFIX-1.parquet', auto, 'number UInt64') settings engine_file_skip_empty_files=0" 2>&1 | grep -c "Exception" +$CLICKHOUSE_LOCAL -q "select * from file('$FILE_PREFIX-1.parquet') settings engine_file_skip_empty_files=1" 2>&1 | grep -c "Exception" +$CLICKHOUSE_LOCAL -q "select * from file('$FILE_PREFIX-1.parquet', auto, 'number UInt64') settings engine_file_skip_empty_files=1" +$CLICKHOUSE_LOCAL -q "select * from file('$FILE_PREFIX-*.parquet') settings engine_file_skip_empty_files=0" 2>&1 | grep -c "Exception" +$CLICKHOUSE_LOCAL -q "select * from file('$FILE_PREFIX-*.parquet', auto, 'number UInt64') settings engine_file_skip_empty_files=0" 2>&1 | grep -c "Exception" +$CLICKHOUSE_LOCAL -q "select * from file('$FILE_PREFIX-*.parquet') settings engine_file_skip_empty_files=1" +$CLICKHOUSE_LOCAL -q "select * from file('$FILE_PREFIX-*.parquet', auto, 'number UInt64') settings engine_file_skip_empty_files=1" + + + + + + +rm $FILE_PREFIX-* From 38634cc5c5221a6ec646fc11dff34deda7c6b7d2 Mon Sep 17 00:00:00 2001 From: tpanetti Date: Wed, 24 May 2023 13:49:18 +0100 Subject: [PATCH 142/722] Convert Clickhouse Types to MySQL types in Compatibility mode This changes MySQL compatibility mode to display MySQL compatible types --- src/DataTypes/DataTypeAggregateFunction.h | 1 + src/DataTypes/DataTypeArray.h | 4 + src/DataTypes/DataTypeDate.h | 1 + src/DataTypes/DataTypeDate32.h | 1 + src/DataTypes/DataTypeDateTime.h | 1 + src/DataTypes/DataTypeDateTime64.h | 1 + src/DataTypes/DataTypeEnum.cpp | 24 ++++ src/DataTypes/DataTypeEnum.h | 3 + src/DataTypes/DataTypeFixedString.h | 1 + src/DataTypes/DataTypeFunction.h | 1 + src/DataTypes/DataTypeIPv4andIPv6.h | 4 + src/DataTypes/DataTypeInterval.h | 1 + src/DataTypes/DataTypeLowCardinality.h | 2 + src/DataTypes/DataTypeMap.h | 1 + src/DataTypes/DataTypeNothing.h | 2 + src/DataTypes/DataTypeNullable.h | 1 + src/DataTypes/DataTypeNumberBase.cpp | 17 +++ src/DataTypes/DataTypeNumberBase.h | 3 + src/DataTypes/DataTypeObject.h | 1 + src/DataTypes/DataTypeSet.h | 2 + src/DataTypes/DataTypeString.h | 3 + src/DataTypes/DataTypeTuple.h | 1 + src/DataTypes/DataTypeUUID.h | 2 + src/DataTypes/DataTypesDecimal.h | 3 + src/DataTypes/IDataType.h | 10 ++ src/Storages/System/StorageSystemColumns.cpp | 15 ++- .../02740_show_columns_mysql_compatibility.sh | 116 ++++++++++++++++++ 27 files changed, 221 insertions(+), 1 deletion(-) create mode 100755 tests/queries/0_stateless/02740_show_columns_mysql_compatibility.sh diff --git a/src/DataTypes/DataTypeAggregateFunction.h b/src/DataTypes/DataTypeAggregateFunction.h index 2d712d9c686..697be13652c 100644 --- a/src/DataTypes/DataTypeAggregateFunction.h +++ b/src/DataTypes/DataTypeAggregateFunction.h @@ -45,6 +45,7 @@ public: String doGetName() const override; String getNameWithoutVersion() const; const char * getFamilyName() const override { return "AggregateFunction"; } + const char * getMySQLName() const override { return "text"; } TypeIndex getTypeId() const override { return TypeIndex::AggregateFunction; } Array getParameters() const { return parameters; } diff --git a/src/DataTypes/DataTypeArray.h b/src/DataTypes/DataTypeArray.h index 033a657c845..35462df9a4e 100644 --- a/src/DataTypes/DataTypeArray.h +++ b/src/DataTypes/DataTypeArray.h @@ -30,6 +30,10 @@ public: { return "Array"; } + const char * getMySQLName() const override + { + return "string"; + } bool canBeInsideNullable() const override { diff --git a/src/DataTypes/DataTypeDate.h b/src/DataTypes/DataTypeDate.h index 2f17207cc07..33bcb6123ff 100644 --- a/src/DataTypes/DataTypeDate.h +++ b/src/DataTypes/DataTypeDate.h @@ -13,6 +13,7 @@ public: TypeIndex getTypeId() const override { return TypeIndex::Date; } const char * getFamilyName() const override { return family_name; } + const char * getMySQLName() const override { return "date"; } bool canBeUsedAsVersion() const override { return true; } bool canBeInsideNullable() const override { return true; } diff --git a/src/DataTypes/DataTypeDate32.h b/src/DataTypes/DataTypeDate32.h index 9160b62dc15..56315f46e8c 100644 --- a/src/DataTypes/DataTypeDate32.h +++ b/src/DataTypes/DataTypeDate32.h @@ -13,6 +13,7 @@ public: TypeIndex getTypeId() const override { return TypeIndex::Date32; } const char * getFamilyName() const override { return family_name; } + const char * getMySQLName() const override { return "date"; } Field getDefault() const override { diff --git a/src/DataTypes/DataTypeDateTime.h b/src/DataTypes/DataTypeDateTime.h index 91a09ff7cb9..c868f92c311 100644 --- a/src/DataTypes/DataTypeDateTime.h +++ b/src/DataTypes/DataTypeDateTime.h @@ -36,6 +36,7 @@ public: static constexpr auto family_name = "DateTime"; const char * getFamilyName() const override { return family_name; } + const char * getMySQLName() const override { return "datetime"; } String doGetName() const override; TypeIndex getTypeId() const override { return TypeIndex::DateTime; } diff --git a/src/DataTypes/DataTypeDateTime64.h b/src/DataTypes/DataTypeDateTime64.h index aaa99485040..8d317bb9430 100644 --- a/src/DataTypes/DataTypeDateTime64.h +++ b/src/DataTypes/DataTypeDateTime64.h @@ -28,6 +28,7 @@ public: DataTypeDateTime64(UInt32 scale_, const TimezoneMixin & time_zone_info); const char * getFamilyName() const override { return family_name; } + const char * getMySQLName() const override { return "datetime"; } std::string doGetName() const override; TypeIndex getTypeId() const override { return type_id; } diff --git a/src/DataTypes/DataTypeEnum.cpp b/src/DataTypes/DataTypeEnum.cpp index 3c3ac2ae4e2..bfed4d4d5a2 100644 --- a/src/DataTypes/DataTypeEnum.cpp +++ b/src/DataTypes/DataTypeEnum.cpp @@ -36,6 +36,29 @@ const char * DataTypeEnum::getFamilyName() const return EnumName::value; } +template +std::string DataTypeEnum::generateMySQLName(const Values & values) +{ + WriteBufferFromOwnString out; + + writeString("enum", out); + writeChar('(', out); + + auto first = true; + for (const auto & name_and_value : values) + { + if (!first) + writeString(", ", out); + + first = false; + + writeQuotedString(name_and_value.first, out); + } + + writeChar(')', out); + + return out.str(); +} template std::string DataTypeEnum::generateName(const Values & values) @@ -67,6 +90,7 @@ template DataTypeEnum::DataTypeEnum(const Values & values_) : EnumValues(values_) , type_name(generateName(this->getValues())) + , my_sql_type_name(generateMySQLName(this->getValues())) { } diff --git a/src/DataTypes/DataTypeEnum.h b/src/DataTypes/DataTypeEnum.h index 2f607fc2aa6..c6e523adf96 100644 --- a/src/DataTypes/DataTypeEnum.h +++ b/src/DataTypes/DataTypeEnum.h @@ -45,13 +45,16 @@ public: private: std::string type_name; + std::string my_sql_type_name; static std::string generateName(const Values & values); + static std::string generateMySQLName(const Values & values); public: explicit DataTypeEnum(const Values & values_); std::string doGetName() const override { return type_name; } const char * getFamilyName() const override; + const char * getMySQLName() const override { return my_sql_type_name.c_str(); } TypeIndex getTypeId() const override { return type_id; } diff --git a/src/DataTypes/DataTypeFixedString.h b/src/DataTypes/DataTypeFixedString.h index 8d114121c1a..eb09914ec9c 100644 --- a/src/DataTypes/DataTypeFixedString.h +++ b/src/DataTypes/DataTypeFixedString.h @@ -42,6 +42,7 @@ public: TypeIndex getTypeId() const override { return type_id; } const char * getFamilyName() const override { return "FixedString"; } + const char * getMySQLName() const override { return "text"; } size_t getN() const { diff --git a/src/DataTypes/DataTypeFunction.h b/src/DataTypes/DataTypeFunction.h index 888bcb6a775..f3423796126 100644 --- a/src/DataTypes/DataTypeFunction.h +++ b/src/DataTypes/DataTypeFunction.h @@ -24,6 +24,7 @@ public: std::string doGetName() const override; const char * getFamilyName() const override { return "Function"; } + const char * getMySQLName() const override { return "text"; } TypeIndex getTypeId() const override { return TypeIndex::Function; } const DataTypes & getArgumentTypes() const diff --git a/src/DataTypes/DataTypeIPv4andIPv6.h b/src/DataTypes/DataTypeIPv4andIPv6.h index ad70bdae933..8f7fe79793b 100644 --- a/src/DataTypes/DataTypeIPv4andIPv6.h +++ b/src/DataTypes/DataTypeIPv4andIPv6.h @@ -19,6 +19,8 @@ public: static constexpr auto type_id = TypeToTypeIndex; const char * getFamilyName() const override { return TypeName.data(); } + const char * getMySQLName() const override { return "text"; } + TypeIndex getTypeId() const override { return type_id; } Field getDefault() const override { return IPv4{}; } @@ -59,6 +61,8 @@ public: static constexpr auto type_id = TypeToTypeIndex; const char * getFamilyName() const override { return TypeName.data(); } + const char * getMySQLName() const override { return "text"; } + TypeIndex getTypeId() const override { return type_id; } Field getDefault() const override { return IPv6{}; } diff --git a/src/DataTypes/DataTypeInterval.h b/src/DataTypes/DataTypeInterval.h index 05abe1d9b24..69a56e8aadd 100644 --- a/src/DataTypes/DataTypeInterval.h +++ b/src/DataTypes/DataTypeInterval.h @@ -26,6 +26,7 @@ public: std::string doGetName() const override { return fmt::format("Interval{}", kind.toString()); } const char * getFamilyName() const override { return "Interval"; } + const char * getMySQLName() const override { return "text"; } TypeIndex getTypeId() const override { return TypeIndex::Interval; } bool equals(const IDataType & rhs) const override; diff --git a/src/DataTypes/DataTypeLowCardinality.h b/src/DataTypes/DataTypeLowCardinality.h index d301a0f5443..6fd4344311c 100644 --- a/src/DataTypes/DataTypeLowCardinality.h +++ b/src/DataTypes/DataTypeLowCardinality.h @@ -22,6 +22,8 @@ public: return "LowCardinality(" + dictionary_type->getName() + ")"; } const char * getFamilyName() const override { return "LowCardinality"; } + const char * getMySQLName() const override { return "text"; } + TypeIndex getTypeId() const override { return TypeIndex::LowCardinality; } MutableColumnPtr createColumn() const override; diff --git a/src/DataTypes/DataTypeMap.h b/src/DataTypes/DataTypeMap.h index 4712f6bbdef..526dc321f44 100644 --- a/src/DataTypes/DataTypeMap.h +++ b/src/DataTypes/DataTypeMap.h @@ -30,6 +30,7 @@ public: TypeIndex getTypeId() const override { return TypeIndex::Map; } std::string doGetName() const override; const char * getFamilyName() const override { return "Map"; } + const char * getMySQLName() const override { return "json"; } bool canBeInsideNullable() const override { return false; } diff --git a/src/DataTypes/DataTypeNothing.h b/src/DataTypes/DataTypeNothing.h index c7d12388de9..fdef6026603 100644 --- a/src/DataTypes/DataTypeNothing.h +++ b/src/DataTypes/DataTypeNothing.h @@ -16,6 +16,8 @@ public: static constexpr bool is_parametric = false; const char * getFamilyName() const override { return "Nothing"; } + const char * getMySQLName() const override { return "text"; } + TypeIndex getTypeId() const override { return TypeIndex::Nothing; } MutableColumnPtr createColumn() const override; diff --git a/src/DataTypes/DataTypeNullable.h b/src/DataTypes/DataTypeNullable.h index 06d46fb15ed..64b201d32b2 100644 --- a/src/DataTypes/DataTypeNullable.h +++ b/src/DataTypes/DataTypeNullable.h @@ -16,6 +16,7 @@ public: explicit DataTypeNullable(const DataTypePtr & nested_data_type_); std::string doGetName() const override { return "Nullable(" + nested_data_type->getName() + ")"; } const char * getFamilyName() const override { return "Nullable"; } + const char * getMySQLName() const override { return nested_data_type->getMySQLName(); } TypeIndex getTypeId() const override { return TypeIndex::Nullable; } MutableColumnPtr createColumn() const override; diff --git a/src/DataTypes/DataTypeNumberBase.cpp b/src/DataTypes/DataTypeNumberBase.cpp index f668a4c522e..cd5e73ac4a1 100644 --- a/src/DataTypes/DataTypeNumberBase.cpp +++ b/src/DataTypes/DataTypeNumberBase.cpp @@ -30,6 +30,23 @@ bool DataTypeNumberBase::isValueRepresentedByUnsignedInteger() const return is_integer && is_unsigned_v; } +template +const std::map DataTypeNumberBase::mysqlTypeMap = { + {"UInt8", "tinyint unsigned"}, + {"UInt16", "smallint unsigned"}, + {"UInt32", "mediumint unsigned"}, + {"UInt64", "bigint unsigned"}, + {"UInt128", "bigint unsigned"}, + {"UInt256", "bigint unsigned"}, + {"Int8", "tinyint"}, + {"Int16", "smallint"}, + {"Int32", "int"}, + {"Int64", "bigint"}, + {"Int128", "bigint"}, + {"Int256", "bigint"}, + {"Float32", "float"}, + {"Float64", "double"}, +}; /// Explicit template instantiations - to avoid code bloat in headers. template class DataTypeNumberBase; diff --git a/src/DataTypes/DataTypeNumberBase.h b/src/DataTypes/DataTypeNumberBase.h index 3a5b11c5124..b5c963cf245 100644 --- a/src/DataTypes/DataTypeNumberBase.h +++ b/src/DataTypes/DataTypeNumberBase.h @@ -20,11 +20,14 @@ public: static constexpr bool is_parametric = false; static constexpr auto family_name = TypeName; static constexpr auto type_id = TypeToTypeIndex; + // Create a map from the name of the type to the name of the type in MySQL. + static const std::map mysqlTypeMap; using FieldType = T; using ColumnType = ColumnVector; const char * getFamilyName() const override { return TypeName.data(); } + const char * getMySQLName() const override { return mysqlTypeMap.at(TypeName.data()).c_str(); } TypeIndex getTypeId() const override { return TypeToTypeIndex; } Field getDefault() const override; diff --git a/src/DataTypes/DataTypeObject.h b/src/DataTypes/DataTypeObject.h index 937a9091371..8a2c36abcd7 100644 --- a/src/DataTypes/DataTypeObject.h +++ b/src/DataTypes/DataTypeObject.h @@ -23,6 +23,7 @@ public: DataTypeObject(const String & schema_format_, bool is_nullable_); const char * getFamilyName() const override { return "Object"; } + const char * getMySQLName() const override { return "json"; } String doGetName() const override; TypeIndex getTypeId() const override { return TypeIndex::Object; } diff --git a/src/DataTypes/DataTypeSet.h b/src/DataTypes/DataTypeSet.h index 7ddfeb9fe30..bdad638b5d5 100644 --- a/src/DataTypes/DataTypeSet.h +++ b/src/DataTypes/DataTypeSet.h @@ -15,6 +15,8 @@ class DataTypeSet final : public IDataTypeDummy public: static constexpr bool is_parametric = true; const char * getFamilyName() const override { return "Set"; } + const char * getMySQLName() const override { return "text"; } + TypeIndex getTypeId() const override { return TypeIndex::Set; } bool equals(const IDataType & rhs) const override { return typeid(rhs) == typeid(*this); } bool isParametric() const override { return true; } diff --git a/src/DataTypes/DataTypeString.h b/src/DataTypes/DataTypeString.h index 5f3bde43a13..3ac739fe68c 100644 --- a/src/DataTypes/DataTypeString.h +++ b/src/DataTypes/DataTypeString.h @@ -21,6 +21,9 @@ public: return "String"; } + // FIXME: string can contain arbitrary bytes, not only UTF-8 sequences + const char * getMySQLName() const override { return "text"; } + TypeIndex getTypeId() const override { return type_id; } MutableColumnPtr createColumn() const override; diff --git a/src/DataTypes/DataTypeTuple.h b/src/DataTypes/DataTypeTuple.h index 152f21015f5..d264cc97f60 100644 --- a/src/DataTypes/DataTypeTuple.h +++ b/src/DataTypes/DataTypeTuple.h @@ -33,6 +33,7 @@ public: TypeIndex getTypeId() const override { return TypeIndex::Tuple; } std::string doGetName() const override; const char * getFamilyName() const override { return "Tuple"; } + const char * getMySQLName() const override { return "json"; } bool canBeInsideNullable() const override { return false; } bool supportsSparseSerialization() const override { return true; } diff --git a/src/DataTypes/DataTypeUUID.h b/src/DataTypes/DataTypeUUID.h index af9f1f35ca5..4d54db42b45 100644 --- a/src/DataTypes/DataTypeUUID.h +++ b/src/DataTypes/DataTypeUUID.h @@ -18,6 +18,8 @@ public: static constexpr auto type_id = TypeIndex::UUID; const char * getFamilyName() const override { return "UUID"; } + const char * getMySQLName() const override { return "char"; } + TypeIndex getTypeId() const override { return type_id; } Field getDefault() const override; diff --git a/src/DataTypes/DataTypesDecimal.h b/src/DataTypes/DataTypesDecimal.h index 583f7ea804a..5c9405cb060 100644 --- a/src/DataTypes/DataTypesDecimal.h +++ b/src/DataTypes/DataTypesDecimal.h @@ -37,8 +37,11 @@ public: using Base::Base; static constexpr auto family_name = "Decimal"; + static constexpr auto mysql_name = "decimal"; const char * getFamilyName() const override { return family_name; } + const char * getMySQLName() const override { return mysql_name; } + std::string doGetName() const override; TypeIndex getTypeId() const override { return TypeToTypeIndex; } bool canBePromoted() const override { return true; } diff --git a/src/DataTypes/IDataType.h b/src/DataTypes/IDataType.h index 7cc18fea00c..2bed18897ce 100644 --- a/src/DataTypes/IDataType.h +++ b/src/DataTypes/IDataType.h @@ -71,10 +71,19 @@ public: return doGetName(); } + /// MySQL equivalent Name of data type (examples: UInt64, Array(String)). + String getMySQLTypeName() const + { + if (custom_name) + return custom_name->getName(); + else + return doGetMySQLName(); + } DataTypePtr getPtr() const { return shared_from_this(); } /// Name of data type family (example: FixedString, Array). virtual const char * getFamilyName() const = 0; + virtual const char * getMySQLName() const = 0; /// Data type id. It's used for runtime type checks. virtual TypeIndex getTypeId() const = 0; @@ -126,6 +135,7 @@ public: protected: virtual String doGetName() const { return getFamilyName(); } + virtual String doGetMySQLName() const { return getMySQLName(); } virtual SerializationPtr doGetDefaultSerialization() const = 0; public: diff --git a/src/Storages/System/StorageSystemColumns.cpp b/src/Storages/System/StorageSystemColumns.cpp index 18e7d269795..f391a392dbb 100644 --- a/src/Storages/System/StorageSystemColumns.cpp +++ b/src/Storages/System/StorageSystemColumns.cpp @@ -74,6 +74,7 @@ public: : ISource(header_) , columns_mask(std::move(columns_mask_)), max_block_size(max_block_size_) , databases(std::move(databases_)), tables(std::move(tables_)), storages(std::move(storages_)) + , clientInfo(context->getClientInfo()) , total_tables(tables->size()), access(context->getAccess()) , query_id(context->getCurrentQueryId()), lock_acquire_timeout(context->getSettingsRef().lock_acquire_timeout) { @@ -129,6 +130,17 @@ protected: bool check_access_for_columns = check_access_for_tables && !access->isGranted(AccessType::SHOW_COLUMNS, database_name, table_name); + auto get_type_name = [this](const IDataType& type) -> std::string + { + if (clientInfo.interface == DB::ClientInfo::Interface::MYSQL) + { + return type.getMySQLTypeName(); + } + else + { + return type.getName(); + } + }; size_t position = 0; for (const auto & column : columns) { @@ -146,7 +158,7 @@ protected: if (columns_mask[src_index++]) res_columns[res_index++]->insert(column.name); if (columns_mask[src_index++]) - res_columns[res_index++]->insert(column.type->getName()); + res_columns[res_index++]->insert(get_type_name(*column.type)); if (columns_mask[src_index++]) res_columns[res_index++]->insert(position); @@ -281,6 +293,7 @@ private: ColumnPtr databases; ColumnPtr tables; Storages storages; + ClientInfo clientInfo; size_t db_table_num = 0; size_t total_tables; std::shared_ptr access; diff --git a/tests/queries/0_stateless/02740_show_columns_mysql_compatibility.sh b/tests/queries/0_stateless/02740_show_columns_mysql_compatibility.sh new file mode 100755 index 00000000000..7f828d35679 --- /dev/null +++ b/tests/queries/0_stateless/02740_show_columns_mysql_compatibility.sh @@ -0,0 +1,116 @@ +#!/bin/bash + +# This script tests the MySQL compatibility of the SHOW COLUMNS command in ClickHouse +USER="default" +PASSWORD="" +HOST="127.0.0.1" +PORT=9004 + +# First run the clickhouse test to create the ClickHouse Tables + +echo "Drop tables if they exist" +${CLICKHOUSE_LOCAL} --query "DROP TABLE IF EXISTS tab" +${CLICKHOUSE_LOCAL} --query "DROP TABLE IF EXISTS database_123456789abcde" +${CLICKHOUSE_LOCAL} --query "DROP TABLE IF EXISTS database_123456789abcde.tab" + +echo "Create tab table " +${CLICKHOUSE_LOCAL} --query " + CREATE TABLE tab + ( + uint64 UInt64, + int32 Nullable(Int32), + float32 Float32, + float64 Float64, + decimal_value Decimal(10, 2), + boolean_value UInt8, -- Use 0 for false, 1 for true + string_value String, + fixed_string_value FixedString(10), + date_value Date, + date32_value Date32, + datetime_value DateTime, + datetime64_value DateTime64(3), + json_value String, -- Store JSON as a string + uuid_value UUID, + enum_value Enum8('apple' = 1, 'banana' = 2, 'orange' = 3), + low_cardinality LowCardinality(String), + array_value Array(Int32), + map_value Map(String, Int32), + tuple_value Tuple(Int32, String), + nullable_value Nullable(Int32), + ipv4_value IPv4, + ipv6_value IPv6, + nested Nested + ( + nested_int Int32, + nested_string String + ) + ) ENGINE = MergeTree + ORDER BY uint64; + " + + +echo "Create pseudo-random database name" +${CLICKHOUSE_LOCAL} --query "CREATE DATABASE database_123456789abcde;" + +echo "Create tab duplicate table" +${CLICKHOUSE_LOCAL} --query " + CREATE TABLE database_123456789abcde.tab + ( + uint64 UInt64, + int32 Nullable(Int32), + float32 Float32, + float64 Float64, + decimal_value Decimal(10, 2), + boolean_value UInt8, -- Use 0 for false, 1 for true + string_value String, + fixed_string_value FixedString(10), + date_value Date, + date32_value Date32, + datetime_value DateTime, + datetime64_value DateTime64(3), + json_value String, -- Store JSON as a string + uuid_value UUID, + enum_value Enum8('apple' = 1, 'banana' = 2, 'orange' = 3), + low_cardinality LowCardinality(String), + array_value Array(Int32), + map_value Map(String, Int32), + tuple_value Tuple(Int32, String), + nullable_value Nullable(Int32), + ipv4_value IPv4, + ipv6_value IPv6, + nested Nested + ( + nested_int Int32, + nested_string String + ) + ) ENGINE = MergeTree + ORDER BY uint64; + " + +# Write sql to temp file +TEMP_FILE=$(mktemp) + +cat < $TEMP_FILE +SHOW COLUMNS FROM tab; +SHOW EXTENDED COLUMNS FROM tab; +SHOW FULL COLUMNS FROM tab; +SHOW COLUMNS FROM tab LIKE '%int%'; +SHOW COLUMNS FROM tab NOT LIKE '%int%'; +SHOW COLUMNS FROM tab ILIKE '%INT%'; +SHOW COLUMNS FROM tab NOT ILIKE '%INT%'; +SHOW COLUMNS FROM tab WHERE field LIKE '%int%'; +SHOW COLUMNS FROM tab LIMIT 1; +SHOW COLUMNS FROM tab; +SHOW COLUMNS FROM tab FROM database_123456789abcde; +SHOW COLUMNS FROM database_123456789abcde.tab; +DROP DATABASE database_123456789abcde; +DROP TABLE tab; +EOT + +# Now run the MySQL test script on the ClickHouse DB +echo "Run MySQL test" +mysql --user="$USER" --password="$PASSWORD" --host="$HOST" --port="$PORT" < $TEMP_FILE + +# Clean up the temp file +rm $TEMP_FILE + From bd5a1ae2b97b66361e5b958811a6055f8f5cd2ae Mon Sep 17 00:00:00 2001 From: tpanetti Date: Tue, 30 May 2023 13:32:33 -0700 Subject: [PATCH 143/722] Revert "Change SHOW COLUMNS query to display MySQL types in MySQL Compatibility mode" This reverts commit ddbad79c5e67518acebbacaad5be0cad3967ac67. --- .../InterpreterShowColumnsQuery.cpp | 76 +------ .../InterpreterShowColumnsQuery.h | 1 - ...show_columns_mysql_compatibility.reference | 213 ------------------ .../02726_show_columns_mysql_compatibility.sh | 115 ---------- 4 files changed, 3 insertions(+), 402 deletions(-) delete mode 100644 tests/queries/0_stateless/02726_show_columns_mysql_compatibility.reference delete mode 100755 tests/queries/0_stateless/02726_show_columns_mysql_compatibility.sh diff --git a/src/Interpreters/InterpreterShowColumnsQuery.cpp b/src/Interpreters/InterpreterShowColumnsQuery.cpp index 0ad93e37b58..c86d3c753c4 100644 --- a/src/Interpreters/InterpreterShowColumnsQuery.cpp +++ b/src/Interpreters/InterpreterShowColumnsQuery.cpp @@ -42,11 +42,9 @@ SELECT if (default_kind IN ('ALIAS', 'DEFAULT', 'MATERIALIZED'), default_expression, NULL) AS default, '' AS extra )"; - rewritten_query += getMySQLQuery(); - } - else { - rewritten_query += "SELECT name AS field, type AS type, startsWith(type, 'Nullable') AS null, trim(concatWithSeparator(' ', if(is_in_primary_key, 'PRI', ''), if (is_in_sorting_key, 'SOR', ''))) AS key, if(default_kind IN ('ALIAS', 'DEFAULT', 'MATERIALIZED'), default_expression, NULL) AS default, '' AS extra "; - } + // TODO Interpret query.extended. It is supposed to show internal/virtual columns. Need to fetch virtual column names, see + // IStorage::getVirtuals(). We can't easily do that via SQL. + if (query.full) { /// "Full" mode is mostly for MySQL compat @@ -90,74 +88,6 @@ WHERE return rewritten_query; } -String InterpreterShowColumnsQuery::getMySQLQuery() -{ - String mysql_specific_query; - - mysql_specific_query = R"(SELECT name AS field, - CASE - WHEN startsWith(type, 'Nullable') THEN - CASE - WHEN substring(type, 10, length(type) - 10) IN ('UInt8', 'Int8') THEN 'tinyint' - WHEN substring(type, 10, length(type) - 10) IN ('UInt16', 'Int16') THEN 'smallint' - WHEN substring(type, 10, length(type) - 10) IN ('UInt32', 'Int32') THEN 'int' - WHEN substring(type, 10, length(type) - 10) IN ('UInt64', 'Int64', 'UInt128', 'Int128', 'UInt256', 'Int256') THEN 'bigint' - WHEN substring(type, 10, length(type) - 10) = 'Float32' THEN 'float' - WHEN substring(type, 10, length(type) - 10) = 'Float64' THEN 'double' - WHEN substring(type, 10, length(type) - 10) LIKE 'Decimal%' THEN 'decimal' - WHEN substring(type, 10, length(type) - 10) = 'Boolean' THEN 'tinyint' - WHEN substring(type, 10, length(type) - 10) = 'String' THEN 'text' - WHEN substring(type, 10, length(type) - 10) LIKE 'FixedString%' THEN 'text' - WHEN substring(type, 10, length(type) - 10) LIKE 'Date%' THEN 'date' - WHEN substring(type, 10, length(type) - 10) LIKE 'DateTime%' THEN 'datetime' - WHEN substring(type, 10, length(type) - 10) = 'JSON' THEN 'json' - WHEN substring(type, 10, length(type) - 10) = 'UUID' THEN 'binary' - WHEN substring(type, 10, length(type) - 10) LIKE 'Enum%' THEN 'enum' - WHEN substring(type, 10, length(type) - 10) LIKE 'LowCardinality%' THEN 'text' - WHEN substring(type, 10, length(type) - 10) LIKE 'Array%' THEN 'json' - WHEN substring(type, 10, length(type) - 10) LIKE 'Map%' THEN 'json' - WHEN substring(type, 10, length(type) - 10) IN ('SimpleAggregateFunction', 'AggregateFunction') THEN 'text' - WHEN substring(type, 10, length(type) - 10) = 'Nested' THEN 'json' - WHEN substring(type, 10, length(type) - 10) LIKE 'Tuple%' THEN 'json' - WHEN substring(type, 10, length(type) - 10) LIKE 'IPv%' THEN 'text' - WHEN substring(type, 10, length(type) - 10) IN ('Expression', 'Set', 'Nothing', 'Interval') THEN 'text' - ELSE substring(type, 10, length(type) - 10) - END - ELSE - CASE - WHEN type IN ('UInt8', 'Int8') THEN 'tinyint' - WHEN type IN ('UInt16', 'Int16') THEN 'smallint' - WHEN type IN ('UInt32', 'Int32') THEN 'int' - WHEN type IN ('UInt64', 'Int64', 'UInt128', 'Int128', 'UInt256', 'Int256') THEN 'bigint' - WHEN type = 'Float32' THEN 'float' - WHEN type = 'Float64' THEN 'double' - WHEN type LIKE 'Decimal%' THEN 'decimal' - WHEN type = 'Boolean' THEN 'tinyint' - WHEN type = 'String' THEN 'text' - WHEN type LIKE 'FixedString%' THEN 'text' - WHEN type LIKE 'Date%' THEN 'date' - WHEN type LIKE 'DateTime%' THEN 'datetime' - WHEN type = 'JSON' THEN 'json' - WHEN type = 'UUID' THEN 'binary' - WHEN type LIKE 'Enum%' THEN 'enum' - WHEN type LIKE 'LowCardinality%' THEN 'text' - WHEN type LIKE 'Array%' THEN 'json' - WHEN type LIKE 'Map%' THEN 'json' - WHEN type IN ('SimpleAggregateFunction', 'AggregateFunction') THEN 'text' - WHEN type = 'Nested' THEN 'json' - WHEN type LIKE 'Tuple%' THEN 'json' - WHEN type LIKE 'IPv%' THEN 'text' - WHEN type IN ('Expression', 'Set', 'Nothing', 'Interval') THEN 'text' - ELSE type - END - END AS type, - startsWith(type, 'Nullable') AS null, - trim(concatWithSeparator(' ', if(is_in_primary_key, 'PRI', ''), if (is_in_sorting_key, 'SOR', ''))) AS key, - if(default_kind IN ('ALIAS', 'DEFAULT', 'MATERIALIZED'), default_expression, NULL) AS default, - '' AS extra )"; - - return mysql_specific_query.str(); -} BlockIO InterpreterShowColumnsQuery::execute() { diff --git a/src/Interpreters/InterpreterShowColumnsQuery.h b/src/Interpreters/InterpreterShowColumnsQuery.h index b843a163978..ee6dcabd97b 100644 --- a/src/Interpreters/InterpreterShowColumnsQuery.h +++ b/src/Interpreters/InterpreterShowColumnsQuery.h @@ -26,7 +26,6 @@ private: ASTPtr query_ptr; String getRewrittenQuery(); - String getMySQLQuery(); }; diff --git a/tests/queries/0_stateless/02726_show_columns_mysql_compatibility.reference b/tests/queries/0_stateless/02726_show_columns_mysql_compatibility.reference deleted file mode 100644 index c9ad94a34c4..00000000000 --- a/tests/queries/0_stateless/02726_show_columns_mysql_compatibility.reference +++ /dev/null @@ -1,213 +0,0 @@ -Drop tables if they exist -Create tab table -Create pseudo-random database name -Create tab duplicate table -Run MySQL test -field type null key default extra -array_value json 0 NULL -boolean_value tinyint 0 NULL -date32_value date 0 NULL -date_value date 0 NULL -datetime64_value date 0 NULL -datetime_value date 0 NULL -decimal_value decimal 0 NULL -enum_value enum 0 NULL -fixed_string_value text 0 NULL -float32 float 0 NULL -float64 double 0 NULL -int32 int 0 NULL -ipv4_value text 0 NULL -ipv6_value text 0 NULL -json_value text 0 NULL -low_cardinality text 0 NULL -map_value json 0 NULL -nested.nested_int json 0 NULL -nested.nested_string json 0 NULL -nullable_value int 0 NULL -string_value text 0 NULL -tuple_value json 0 NULL -uint64 bigint 0 PRI SOR NULL -uuid_value binary 0 NULL -field type null key default extra -array_value json 0 NULL -boolean_value tinyint 0 NULL -date32_value date 0 NULL -date_value date 0 NULL -datetime64_value date 0 NULL -datetime_value date 0 NULL -decimal_value decimal 0 NULL -enum_value enum 0 NULL -fixed_string_value text 0 NULL -float32 float 0 NULL -float64 double 0 NULL -int32 int 0 NULL -ipv4_value text 0 NULL -ipv6_value text 0 NULL -json_value text 0 NULL -low_cardinality text 0 NULL -map_value json 0 NULL -nested.nested_int json 0 NULL -nested.nested_string json 0 NULL -nullable_value int 0 NULL -string_value text 0 NULL -tuple_value json 0 NULL -uint64 bigint 0 PRI SOR NULL -uuid_value binary 0 NULL -field type null key default extra collation comment privileges -array_value json 0 NULL NULL -boolean_value tinyint 0 NULL NULL -date32_value date 0 NULL NULL -date_value date 0 NULL NULL -datetime64_value date 0 NULL NULL -datetime_value date 0 NULL NULL -decimal_value decimal 0 NULL NULL -enum_value enum 0 NULL NULL -fixed_string_value text 0 NULL NULL -float32 float 0 NULL NULL -float64 double 0 NULL NULL -int32 int 0 NULL NULL -ipv4_value text 0 NULL NULL -ipv6_value text 0 NULL NULL -json_value text 0 NULL NULL -low_cardinality text 0 NULL NULL -map_value json 0 NULL NULL -nested.nested_int json 0 NULL NULL -nested.nested_string json 0 NULL NULL -nullable_value int 0 NULL NULL -string_value text 0 NULL NULL -tuple_value json 0 NULL NULL -uint64 bigint 0 PRI SOR NULL NULL -uuid_value binary 0 NULL NULL -field type null key default extra -int32 int 0 NULL -nested.nested_int json 0 NULL -uint64 bigint 0 PRI SOR NULL -field type null key default extra -array_value json 0 NULL -boolean_value tinyint 0 NULL -date32_value date 0 NULL -date_value date 0 NULL -datetime64_value date 0 NULL -datetime_value date 0 NULL -decimal_value decimal 0 NULL -enum_value enum 0 NULL -fixed_string_value text 0 NULL -float32 float 0 NULL -float64 double 0 NULL -ipv4_value text 0 NULL -ipv6_value text 0 NULL -json_value text 0 NULL -low_cardinality text 0 NULL -map_value json 0 NULL -nested.nested_string json 0 NULL -nullable_value int 0 NULL -string_value text 0 NULL -tuple_value json 0 NULL -uuid_value binary 0 NULL -field type null key default extra -int32 int 0 NULL -nested.nested_int json 0 NULL -uint64 bigint 0 PRI SOR NULL -field type null key default extra -array_value json 0 NULL -boolean_value tinyint 0 NULL -date32_value date 0 NULL -date_value date 0 NULL -datetime64_value date 0 NULL -datetime_value date 0 NULL -decimal_value decimal 0 NULL -enum_value enum 0 NULL -fixed_string_value text 0 NULL -float32 float 0 NULL -float64 double 0 NULL -ipv4_value text 0 NULL -ipv6_value text 0 NULL -json_value text 0 NULL -low_cardinality text 0 NULL -map_value json 0 NULL -nested.nested_string json 0 NULL -nullable_value int 0 NULL -string_value text 0 NULL -tuple_value json 0 NULL -uuid_value binary 0 NULL -field type null key default extra -int32 int 0 NULL -nested.nested_int json 0 NULL -uint64 bigint 0 PRI SOR NULL -field type null key default extra -array_value json 0 NULL -field type null key default extra -array_value json 0 NULL -boolean_value tinyint 0 NULL -date32_value date 0 NULL -date_value date 0 NULL -datetime64_value date 0 NULL -datetime_value date 0 NULL -decimal_value decimal 0 NULL -enum_value enum 0 NULL -fixed_string_value text 0 NULL -float32 float 0 NULL -float64 double 0 NULL -int32 int 0 NULL -ipv4_value text 0 NULL -ipv6_value text 0 NULL -json_value text 0 NULL -low_cardinality text 0 NULL -map_value json 0 NULL -nested.nested_int json 0 NULL -nested.nested_string json 0 NULL -nullable_value int 0 NULL -string_value text 0 NULL -tuple_value json 0 NULL -uint64 bigint 0 PRI SOR NULL -uuid_value binary 0 NULL -field type null key default extra -array_value json 0 NULL -boolean_value tinyint 0 NULL -date32_value date 0 NULL -date_value date 0 NULL -datetime64_value date 0 NULL -datetime_value date 0 NULL -decimal_value decimal 0 NULL -enum_value enum 0 NULL -fixed_string_value text 0 NULL -float32 float 0 NULL -float64 double 0 NULL -int32 int 0 NULL -ipv4_value text 0 NULL -ipv6_value text 0 NULL -json_value text 0 NULL -low_cardinality text 0 NULL -map_value json 0 NULL -nested.nested_int json 0 NULL -nested.nested_string json 0 NULL -nullable_value int 0 NULL -string_value text 0 NULL -tuple_value json 0 NULL -uint64 bigint 0 PRI SOR NULL -uuid_value binary 0 NULL -field type null key default extra -array_value json 0 NULL -boolean_value tinyint 0 NULL -date32_value date 0 NULL -date_value date 0 NULL -datetime64_value date 0 NULL -datetime_value date 0 NULL -decimal_value decimal 0 NULL -enum_value enum 0 NULL -fixed_string_value text 0 NULL -float32 float 0 NULL -float64 double 0 NULL -int32 int 0 NULL -ipv4_value text 0 NULL -ipv6_value text 0 NULL -json_value text 0 NULL -low_cardinality text 0 NULL -map_value json 0 NULL -nested.nested_int json 0 NULL -nested.nested_string json 0 NULL -nullable_value int 0 NULL -string_value text 0 NULL -tuple_value json 0 NULL -uint64 bigint 0 PRI SOR NULL -uuid_value binary 0 NULL diff --git a/tests/queries/0_stateless/02726_show_columns_mysql_compatibility.sh b/tests/queries/0_stateless/02726_show_columns_mysql_compatibility.sh deleted file mode 100755 index 5324496edd3..00000000000 --- a/tests/queries/0_stateless/02726_show_columns_mysql_compatibility.sh +++ /dev/null @@ -1,115 +0,0 @@ -#!/bin/bash - -# This script tests the MySQL compatibility of the SHOW COLUMNS command in ClickHouse -USER="default" -PASSWORD="" -HOST="127.0.0.1" -PORT=9004 - -# First run the clickhouse test to create the ClickHouse Tables - -echo "Drop tables if they exist" -${CLICKHOUSE_LOCAL} --query "DROP TABLE IF EXISTS tab" -${CLICKHOUSE_LOCAL} --query "DROP TABLE IF EXISTS database_123456789abcde" -${CLICKHOUSE_LOCAL} --query "DROP TABLE IF EXISTS database_123456789abcde.tab" - -echo "Create tab table " -${CLICKHOUSE_LOCAL} --query " - CREATE TABLE tab - ( - uint64 UInt64, - int32 Nullable(Int32), - float32 Float32, - float64 Float64, - decimal_value Decimal(10, 2), - boolean_value UInt8, -- Use 0 for false, 1 for true - string_value String, - fixed_string_value FixedString(10), - date_value Date, - date32_value Date32, - datetime_value DateTime, - datetime64_value DateTime64(3), - json_value String, -- Store JSON as a string - uuid_value UUID, - enum_value Enum8('apple' = 1, 'banana' = 2, 'orange' = 3), - low_cardinality LowCardinality(String), - array_value Array(Int32), - map_value Map(String, Int32), - tuple_value Tuple(Int32, String), - nullable_value Nullable(Int32), - ipv4_value IPv4, - ipv6_value IPv6, - nested Nested - ( - nested_int Int32, - nested_string String - ) - ) ENGINE = MergeTree - ORDER BY uint64; - " - - -echo "Create pseudo-random database name" -${CLICKHOUSE_LOCAL} --query "CREATE DATABASE database_123456789abcde;" - -echo "Create tab duplicate table" -${CLICKHOUSE_LOCAL} --query " - CREATE TABLE database_123456789abcde.tab - ( - uint64 UInt64, - int32 Nullable(Int32), - float32 Float32, - float64 Float64, - decimal_value Decimal(10, 2), - boolean_value UInt8, -- Use 0 for false, 1 for true - string_value String, - fixed_string_value FixedString(10), - date_value Date, - date32_value Date32, - datetime_value DateTime, - datetime64_value DateTime64(3), - json_value String, -- Store JSON as a string - uuid_value UUID, - enum_value Enum8('apple' = 1, 'banana' = 2, 'orange' = 3), - low_cardinality LowCardinality(String), - array_value Array(Int32), - map_value Map(String, Int32), - tuple_value Tuple(Int32, String), - nullable_value Nullable(Int32), - ipv4_value IPv4, - ipv6_value IPv6, - nested Nested - ( - nested_int Int32, - nested_string String - ) - ) ENGINE = MergeTree - ORDER BY uint64; - " - -# Write sql to temp file -TEMP_FILE=$(mktemp) - -cat < $TEMP_FILE -SHOW COLUMNS FROM tab; -SHOW EXTENDED COLUMNS FROM tab; -SHOW FULL COLUMNS FROM tab; -SHOW COLUMNS FROM tab LIKE '%int%'; -SHOW COLUMNS FROM tab NOT LIKE '%int%'; -SHOW COLUMNS FROM tab ILIKE '%INT%'; -SHOW COLUMNS FROM tab NOT ILIKE '%INT%'; -SHOW COLUMNS FROM tab WHERE field LIKE '%int%'; -SHOW COLUMNS FROM tab LIMIT 1; -SHOW COLUMNS FROM tab; -SHOW COLUMNS FROM tab FROM database_123456789abcde; -SHOW COLUMNS FROM database_123456789abcde.tab; -DROP DATABASE database_123456789abcde; -DROP TABLE tab; -EOT - -# Now run the MySQL test script on the ClickHouse DB -echo "Run MySQL test" -mysql --user="$USER" --password="$PASSWORD" --host="$HOST" --port="$PORT" < $TEMP_FILE - -# Clean up the temp file -rm $TEMP_FILE From e6e420da5517e61f2940cc9b349a62ed192e9822 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Wed, 31 May 2023 13:00:55 +0200 Subject: [PATCH 144/722] Add no-fasttest tag --- tests/queries/0_stateless/02771_skip_empty_files.sh | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tests/queries/0_stateless/02771_skip_empty_files.sh b/tests/queries/0_stateless/02771_skip_empty_files.sh index 99f43d7868a..2d1dc205dcd 100755 --- a/tests/queries/0_stateless/02771_skip_empty_files.sh +++ b/tests/queries/0_stateless/02771_skip_empty_files.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-fasttest CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh @@ -16,9 +17,4 @@ $CLICKHOUSE_LOCAL -q "select * from file('$FILE_PREFIX-*.parquet', auto, 'number $CLICKHOUSE_LOCAL -q "select * from file('$FILE_PREFIX-*.parquet') settings engine_file_skip_empty_files=1" $CLICKHOUSE_LOCAL -q "select * from file('$FILE_PREFIX-*.parquet', auto, 'number UInt64') settings engine_file_skip_empty_files=1" - - - - - rm $FILE_PREFIX-* From 444ce60aeb4cda9ac2e5c61bbc0ae03cc76cd2b2 Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Wed, 31 May 2023 11:50:25 +0000 Subject: [PATCH 145/722] Add tests with explicit cast --- .../01746_convert_type_with_default.reference | 47 +++++++++++++----- .../01746_convert_type_with_default.sql | 48 ++++++++++++++++--- 2 files changed, 77 insertions(+), 18 deletions(-) diff --git a/tests/queries/0_stateless/01746_convert_type_with_default.reference b/tests/queries/0_stateless/01746_convert_type_with_default.reference index 235a88157c8..892a12434b9 100644 --- a/tests/queries/0_stateless/01746_convert_type_with_default.reference +++ b/tests/queries/0_stateless/01746_convert_type_with_default.reference @@ -20,17 +20,40 @@ 2 -1 -2 -1970-01-01 -2023-05-30 -2023-05-30 -2023-05-30 -1970-01-01 -2023-05-30 -2023-05-30 -1970-01-01 -2023-05-30 14:38:20 -2023-05-30 14:38:20 -2023-05-30 14:38:20 -2023-05-30 14:38:20 61f0c404-5cb3-11e7-907b-a6006ad3dba0 59f0c404-5cb3-11e7-907b-a6006ad3dba0 +1970-01-01 +2023-05-30 +2023-05-30 +2023-05-30 +1970-01-01 +1970-01-20 +1970-01-20 +1970-01-20 +1970-01-20 +1970-01-20 +1970-01-20 +1970-01-20 +1970-01-20 +1970-01-20 +1970-01-20 +1970-01-20 +1970-01-20 +2023-05-30 +1970-01-01 +2023-05-30 14:38:20 +2023-05-30 14:38:20 +2023-05-30 14:38:20 +2023-05-30 14:38:20 +1970-01-01 00:00:19 +1970-01-01 00:00:19 +1970-01-01 00:00:19 +1970-01-01 00:00:19 +1970-01-01 00:00:19 +1970-01-01 00:00:19 +1970-01-01 00:00:19 +1970-01-01 00:00:19 +1970-01-01 00:00:19 +1970-01-01 00:00:19 +1970-01-01 00:00:19 +1970-01-01 00:00:19 diff --git a/tests/queries/0_stateless/01746_convert_type_with_default.sql b/tests/queries/0_stateless/01746_convert_type_with_default.sql index 40e4798721b..9fdd92491a7 100644 --- a/tests/queries/0_stateless/01746_convert_type_with_default.sql +++ b/tests/queries/0_stateless/01746_convert_type_with_default.sql @@ -26,19 +26,55 @@ select toUInt256OrDefault('1xx', cast(2 as UInt256)); select toInt256OrDefault('-1', cast(-2 as Int256)); select toInt256OrDefault('-1xx', cast(-2 as Int256)); -select toDateOrDefault('2020-0x-02'); +SELECT toUUIDOrDefault('61f0c404-5cb3-11e7-907b-a6006ad3dba0', cast('59f0c404-5cb3-11e7-907b-a6006ad3dba0' as UUID)); +SELECT toUUIDOrDefault('-----61f0c404-5cb3-11e7-907b-a6006ad3dba0', cast('59f0c404-5cb3-11e7-907b-a6006ad3dba0' as UUID)); + +select toDateOrDefault('1xxx'); select toDateOrDefault('2023-05-30'); select toDateOrDefault('2023-05-30', '2000-01-01'::Date); -select toDateOrDefault('2020-0x-02', '2023-05-30'::Date); +select toDateOrDefault('1xx', '2023-05-30'::Date); select toDateOrDefault(-1); -select toDateOrDefault(19507); + +select toDateOrDefault(cast(19 as Int8)); +select toDateOrDefault(cast(19 as UInt8)); + +select toDateOrDefault(cast(19 as Int16)); +select toDateOrDefault(cast(19 as UInt16)); + +select toDateOrDefault(cast(19 as Int32)); +select toDateOrDefault(cast(19 as UInt32)); + +select toDateOrDefault(cast(19 as Int64)); +select toDateOrDefault(cast(19 as UInt64)); + +select toDateOrDefault(cast(19 as Int128)); +select toDateOrDefault(cast(19 as UInt128)); + +select toDateOrDefault(cast(19 as Int256)); +select toDateOrDefault(cast(19 as UInt256)); + select toDateOrDefault(19507, '2000-01-01'::Date); select toDateOrDefault(-1, '2000-01-01'::Date); select toDateTimeOrDefault('2023-05-30 14:38:20'); select toDateTimeOrDefault('2023-05-30 14:38:20', 'UTC'); -select toString(toDateTimeOrDefault('s2023', 'Asia/Istanbul', '2023-05-30 14:38:20'::DateTime('Asia/Istanbul')), 'Asia/Istanbul'); +select toString(toDateTimeOrDefault('1xxx', 'Asia/Istanbul', '2023-05-30 14:38:20'::DateTime('Asia/Istanbul')), 'Asia/Istanbul'); select toDateTimeOrDefault(1685457500); -SELECT toUUIDOrDefault('61f0c404-5cb3-11e7-907b-a6006ad3dba0', cast('59f0c404-5cb3-11e7-907b-a6006ad3dba0' as UUID)); -SELECT toUUIDOrDefault('-----61f0c404-5cb3-11e7-907b-a6006ad3dba0', cast('59f0c404-5cb3-11e7-907b-a6006ad3dba0' as UUID)); \ No newline at end of file +select toDateTimeOrDefault(cast(19 as Int8)); +select toDateTimeOrDefault(cast(19 as UInt8)); + +select toDateTimeOrDefault(cast(19 as Int16)); +select toDateTimeOrDefault(cast(19 as UInt16)); + +select toDateTimeOrDefault(cast(19 as Int32)); +select toDateTimeOrDefault(cast(19 as UInt32)); + +select toDateTimeOrDefault(cast(19 as Int64)); +select toDateTimeOrDefault(cast(19 as UInt64)); + +select toDateTimeOrDefault(cast(19 as Int128)); +select toDateTimeOrDefault(cast(19 as UInt128)); + +select toDateTimeOrDefault(cast(19 as Int256)); +select toDateTimeOrDefault(cast(19 as UInt256)); \ No newline at end of file From 0d4ed32baca8eb2d897bcfd66eed0d04781af166 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy <99031427+yakov-olkhovskiy@users.noreply.github.com> Date: Wed, 31 May 2023 11:25:33 -0400 Subject: [PATCH 146/722] better exception message --- src/Functions/FunctionsCodingIP.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/Functions/FunctionsCodingIP.h b/src/Functions/FunctionsCodingIP.h index bd53fa7e043..9d090abb736 100644 --- a/src/Functions/FunctionsCodingIP.h +++ b/src/Functions/FunctionsCodingIP.h @@ -341,7 +341,11 @@ ColumnPtr convertIPv6ToIPv4(ColumnPtr column, const PaddedPODArray * null { if constexpr (exception_mode == IPStringToNumExceptionMode::Throw) { - throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "IPv6 in column {} is not in IPv4 mapping block", column->getName()); + char addr[IPV6_MAX_TEXT_LENGTH + 1] {}; + char * paddr = addr; + formatIPv6(src, paddr); + + throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "IPv6 {} in column {} is not in IPv4 mapping block", addr, column->getName()); } else if constexpr (exception_mode == IPStringToNumExceptionMode::Default) { From 3af7e0a6fa21d570f78fcf9366c299e3199d2b77 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy <99031427+yakov-olkhovskiy@users.noreply.github.com> Date: Wed, 31 May 2023 11:26:58 -0400 Subject: [PATCH 147/722] better exception message --- src/Functions/FunctionsConversion.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index 4d4efc84df1..6d22fb661c3 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -310,7 +310,13 @@ struct ConvertImpl const uint8_t ip4_cidr[] {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00}; const uint8_t * src = reinterpret_cast(&vec_from[i].toUnderType()); if (!matchIPv6Subnet(src, ip4_cidr, 96)) - throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "IPv6 in column {} is not in IPv4 mapping block", named_from.column->getName()); + { + char addr[IPV6_MAX_TEXT_LENGTH + 1] {}; + char * paddr = addr; + formatIPv6(src, paddr); + + throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "IPv6 {} in column {} is not in IPv4 mapping block", addr, named_from.column->getName()); + } uint8_t * dst = reinterpret_cast(&vec_to[i].toUnderType()); if constexpr (std::endian::native == std::endian::little) From 3936c4dc52f24b477cfb5bf4821bb17626bd69df Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Wed, 31 May 2023 16:41:26 +0000 Subject: [PATCH 148/722] Try to fix fast tests (add timezone) --- .../01746_convert_type_with_default.sql | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/tests/queries/0_stateless/01746_convert_type_with_default.sql b/tests/queries/0_stateless/01746_convert_type_with_default.sql index 9fdd92491a7..75e1510f330 100644 --- a/tests/queries/0_stateless/01746_convert_type_with_default.sql +++ b/tests/queries/0_stateless/01746_convert_type_with_default.sql @@ -58,23 +58,23 @@ select toDateOrDefault(-1, '2000-01-01'::Date); select toDateTimeOrDefault('2023-05-30 14:38:20'); select toDateTimeOrDefault('2023-05-30 14:38:20', 'UTC'); -select toString(toDateTimeOrDefault('1xxx', 'Asia/Istanbul', '2023-05-30 14:38:20'::DateTime('Asia/Istanbul')), 'Asia/Istanbul'); -select toDateTimeOrDefault(1685457500); +select toDateTimeOrDefault('1xxx', 'UTC', '2023-05-30 14:38:20'::DateTime('UTC')); +select toDateTimeOrDefault(1685457500, 'UTC'); -select toDateTimeOrDefault(cast(19 as Int8)); -select toDateTimeOrDefault(cast(19 as UInt8)); +select toDateTimeOrDefault(cast(19 as Int8), 'UTC'); +select toDateTimeOrDefault(cast(19 as UInt8), 'UTC'); -select toDateTimeOrDefault(cast(19 as Int16)); -select toDateTimeOrDefault(cast(19 as UInt16)); +select toDateTimeOrDefault(cast(19 as Int16), 'UTC'); +select toDateTimeOrDefault(cast(19 as UInt16), 'UTC'); -select toDateTimeOrDefault(cast(19 as Int32)); -select toDateTimeOrDefault(cast(19 as UInt32)); +select toDateTimeOrDefault(cast(19 as Int32), 'UTC'); +select toDateTimeOrDefault(cast(19 as UInt32), 'UTC'); -select toDateTimeOrDefault(cast(19 as Int64)); -select toDateTimeOrDefault(cast(19 as UInt64)); +select toDateTimeOrDefault(cast(19 as Int64), 'UTC'); +select toDateTimeOrDefault(cast(19 as UInt64), 'UTC'); -select toDateTimeOrDefault(cast(19 as Int128)); -select toDateTimeOrDefault(cast(19 as UInt128)); +select toDateTimeOrDefault(cast(19 as Int128), 'UTC'); +select toDateTimeOrDefault(cast(19 as UInt128), 'UTC'); -select toDateTimeOrDefault(cast(19 as Int256)); -select toDateTimeOrDefault(cast(19 as UInt256)); \ No newline at end of file +select toDateTimeOrDefault(cast(19 as Int256), 'UTC'); +select toDateTimeOrDefault(cast(19 as UInt256), 'UTC'); \ No newline at end of file From 0b62be649f9974a4433897b39fb8a59e9e7f30f2 Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 31 May 2023 17:52:29 +0000 Subject: [PATCH 149/722] Add docs, fix style --- .../table-engines/integrations/hdfs.md | 6 ++ .../engines/table-engines/integrations/s3.md | 6 ++ docs/en/engines/table-engines/special/file.md | 8 ++ docs/en/engines/table-engines/special/url.md | 4 + docs/en/operations/settings/settings.md | 92 ++++++++++++++----- docs/en/sql-reference/table-functions/file.md | 12 ++- docs/en/sql-reference/table-functions/hdfs.md | 6 ++ docs/en/sql-reference/table-functions/s3.md | 6 ++ docs/en/sql-reference/table-functions/url.md | 4 + src/Storages/StorageURL.cpp | 4 - tests/integration/test_storage_s3/test.py | 20 ++-- 11 files changed, 130 insertions(+), 38 deletions(-) diff --git a/docs/en/engines/table-engines/integrations/hdfs.md b/docs/en/engines/table-engines/integrations/hdfs.md index b9db0fae68f..b37ccb00ba6 100644 --- a/docs/en/engines/table-engines/integrations/hdfs.md +++ b/docs/en/engines/table-engines/integrations/hdfs.md @@ -233,6 +233,12 @@ libhdfs3 support HDFS namenode HA. - `_path` — Path to the file. - `_file` — Name of the file. +## Storage Settings {#storage-settings} + +- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default. +- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default. +- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default. + **See Also** - [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns) diff --git a/docs/en/engines/table-engines/integrations/s3.md b/docs/en/engines/table-engines/integrations/s3.md index 595bc0c344f..e8d0ab6255d 100644 --- a/docs/en/engines/table-engines/integrations/s3.md +++ b/docs/en/engines/table-engines/integrations/s3.md @@ -127,6 +127,12 @@ CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = S3('https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/{some,another}_folder/*', 'CSV'); ``` +## Storage Settings {#storage-settings} + +- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default. +- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default. +- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default. + ## S3-related Settings {#settings} The following settings can be set before query execution or placed into configuration file. diff --git a/docs/en/engines/table-engines/special/file.md b/docs/en/engines/table-engines/special/file.md index 9c4e87487b4..cf325961b6a 100644 --- a/docs/en/engines/table-engines/special/file.md +++ b/docs/en/engines/table-engines/special/file.md @@ -92,3 +92,11 @@ $ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64 `PARTITION BY` — Optional. It is possible to create separate files by partitioning the data on a partition key. In most cases, you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression). For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format. + +## Settings {#settings} + +- [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default. +- [engine_file_truncate_on_insert](/docs/en/operations/settings/settings.md#engine-file-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default. +- [engine_file_allow_create_multiple_files](/docs/en/operations/settings/settings.md#engine_file_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default. +- [engine_file_skip_empty_files](/docs/en/operations/settings/settings.md#engine_file_skip_empty_files) - allows to skip empty files while reading. Disabled by default. +- [storage_file_read_method](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - method of reading data from storage file, one of: read, pread, mmap (only for clickhouse-local). Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local. diff --git a/docs/en/engines/table-engines/special/url.md b/docs/en/engines/table-engines/special/url.md index a4530767e11..26d4975954f 100644 --- a/docs/en/engines/table-engines/special/url.md +++ b/docs/en/engines/table-engines/special/url.md @@ -102,3 +102,7 @@ SELECT * FROM url_engine_table `PARTITION BY` — Optional. It is possible to create separate files by partitioning the data on a partition key. In most cases, you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression). For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format. + +## Storage Settings {#storage-settings} + +- [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default. diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 4f3b4e43358..ac3e624387e 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -3195,27 +3195,7 @@ Possible values: Default value: `0`. -## s3_truncate_on_insert - -Enables or disables truncate before inserts in s3 engine tables. If disabled, an exception will be thrown on insert attempts if an S3 object already exists. - -Possible values: -- 0 — `INSERT` query appends new data to the end of the file. -- 1 — `INSERT` query replaces existing content of the file with the new data. - -Default value: `0`. - -## hdfs_truncate_on_insert - -Enables or disables truncation before an insert in hdfs engine tables. If disabled, an exception will be thrown on an attempt to insert if a file in HDFS already exists. - -Possible values: -- 0 — `INSERT` query appends new data to the end of the file. -- 1 — `INSERT` query replaces existing content of the file with the new data. - -Default value: `0`. - -## engine_file_allow_create_multiple_files +## engine_file_allow_create_multiple_files {#engine_file_allow_create_multiple_files} Enables or disables creating a new file on each insert in file engine tables if the format has the suffix (`JSON`, `ORC`, `Parquet`, etc.). If enabled, on each insert a new file will be created with a name following this pattern: @@ -3227,7 +3207,33 @@ Possible values: Default value: `0`. -## s3_create_new_file_on_insert +## engine_file_skip_empty_files {#engine_file_skip_empty_files} + +Enables or disables skipping empty files in [File](../../engines/table-engines/special/file.md) engine tables. + +Possible values: +- 0 — `SELECT` throws an exception if empty file is not compatible with requested format. +- 1 — `SELECT` returns empty result for empty file. + +Default value: `0`. + +## storage_file_read_method {#storage_file_read_method} + +Method of reading data from storage file, one of: `read`, `pread`, `mmap`. The mmap method does not apply to clickhouse-server (it's intended for clickhouse-local). + +Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local. + +## s3_truncate_on_insert {#s3_truncate_on_insert} + +Enables or disables truncate before inserts in s3 engine tables. If disabled, an exception will be thrown on insert attempts if an S3 object already exists. + +Possible values: +- 0 — `INSERT` query appends new data to the end of the file. +- 1 — `INSERT` query replaces existing content of the file with the new data. + +Default value: `0`. + +## s3_create_new_file_on_insert {#s3_create_new_file_on_insert} Enables or disables creating a new file on each insert in s3 engine tables. If enabled, on each insert a new S3 object will be created with the key, similar to this pattern: @@ -3239,7 +3245,27 @@ Possible values: Default value: `0`. -## hdfs_create_new_file_on_insert +## s3_skip_empty_files {#s3_skip_empty_files} + +Enables or disables skipping empty files in [S3](../../engines/table-engines/special/s3.md) engine tables. + +Possible values: +- 0 — `SELECT` throws an exception if empty file is not compatible with requested format. +- 1 — `SELECT` returns empty result for empty file. + +Default value: `0`. + +## hdfs_truncate_on_insert {#hdfs_truncate_on_insert} + +Enables or disables truncation before an insert in hdfs engine tables. If disabled, an exception will be thrown on an attempt to insert if a file in HDFS already exists. + +Possible values: +- 0 — `INSERT` query appends new data to the end of the file. +- 1 — `INSERT` query replaces existing content of the file with the new data. + +Default value: `0`. + +## hdfs_create_new_file_on_insert {#hdfs_create_new_file_on_insert Enables or disables creating a new file on each insert in HDFS engine tables. If enabled, on each insert a new HDFS file will be created with the name, similar to this pattern: @@ -3251,6 +3277,26 @@ Possible values: Default value: `0`. +## hdfs_skip_empty_files {#hdfs_skip_empty_files} + +Enables or disables skipping empty files in [HDFS](../../engines/table-engines/special/hdfs.md) engine tables. + +Possible values: +- 0 — `SELECT` throws an exception if empty file is not compatible with requested format. +- 1 — `SELECT` returns empty result for empty file. + +Default value: `0`. + +## engine_url_skip_empty_files {#engine_url_skip_empty_files} + +Enables or disables skipping empty files in [URL](../../engines/table-engines/special/url.md) engine tables. + +Possible values: +- 0 — `SELECT` throws an exception if empty file is not compatible with requested format. +- 1 — `SELECT` returns empty result for empty file. + +Default value: `0`. + ## database_atomic_wait_for_drop_and_detach_synchronously {#database_atomic_wait_for_drop_and_detach_synchronously} Adds a modifier `SYNC` to all `DROP` and `DETACH` queries. diff --git a/docs/en/sql-reference/table-functions/file.md b/docs/en/sql-reference/table-functions/file.md index 28c2dc9f1f3..b1903c990b1 100644 --- a/docs/en/sql-reference/table-functions/file.md +++ b/docs/en/sql-reference/table-functions/file.md @@ -18,7 +18,7 @@ file(path [,format] [,structure] [,compression]) **Parameters** -- `path` — The relative path to the file from [user_files_path](/docs/en/operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Path to file support following globs in read-only mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc', 'def'` — strings. +- `path` — The relative path to the file from [user_files_path](/docs/en/operations/server-configuration-parameters/settings.md/#server_configuration_parameters-user_files_path). Path to file support following globs in read-only mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc', 'def'` — strings. - `format` — The [format](/docs/en/interfaces/formats.md#formats) of the file. - `structure` — Structure of the table. Format: `'column1_name column1_type, column2_name column2_type, ...'`. - `compression` — The existing compression type when used in a `SELECT` query, or the desired compression type when used in an `INSERT` query. The supported compression types are `gz`, `br`, `xz`, `zst`, `lz4`, and `bz2`. @@ -196,6 +196,16 @@ SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt3 - `_path` — Path to the file. - `_file` — Name of the file. +## Settings + +- [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default. +- [engine_file_truncate_on_insert](/docs/en/operations/settings/settings.md#engine-file-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default. +- [engine_file_allow_create_multiple_files](/docs/en/operations/settings/settings.md#engine_file_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default. +- [engine_file_skip_empty_files](/docs/en/operations/settings/settings.md#engine_file_skip_empty_files) - allows to skip empty files while reading. Disabled by default. +- [storage_file_read_method](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - method of reading data from storage file, one of: read, pread, mmap (only for clickhouse-local). Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local. + + + **See Also** - [Virtual columns](/docs/en/engines/table-engines/index.md#table_engines-virtual_columns) diff --git a/docs/en/sql-reference/table-functions/hdfs.md b/docs/en/sql-reference/table-functions/hdfs.md index 6ba24211131..1b52e786de4 100644 --- a/docs/en/sql-reference/table-functions/hdfs.md +++ b/docs/en/sql-reference/table-functions/hdfs.md @@ -97,6 +97,12 @@ FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name Strin - `_path` — Path to the file. - `_file` — Name of the file. +## Storage Settings {#storage-settings} + +- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default. +- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default. +- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default. + **See Also** - [Virtual columns](../../engines/table-engines/index.md#table_engines-virtual_columns) diff --git a/docs/en/sql-reference/table-functions/s3.md b/docs/en/sql-reference/table-functions/s3.md index a9ddc286ec5..7068c208022 100644 --- a/docs/en/sql-reference/table-functions/s3.md +++ b/docs/en/sql-reference/table-functions/s3.md @@ -202,6 +202,12 @@ FROM s3( LIMIT 5; ``` +## Storage Settings {#storage-settings} + +- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default. +- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default. +- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default. + **See Also** - [S3 engine](../../engines/table-engines/integrations/s3.md) diff --git a/docs/en/sql-reference/table-functions/url.md b/docs/en/sql-reference/table-functions/url.md index f157a850a12..ac4162c15de 100644 --- a/docs/en/sql-reference/table-functions/url.md +++ b/docs/en/sql-reference/table-functions/url.md @@ -53,6 +53,10 @@ Character `|` inside patterns is used to specify failover addresses. They are it - `_path` — Path to the `URL`. - `_file` — Resource name of the `URL`. +## Storage Settings {#storage-settings} + +- [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default. + **See Also** - [Virtual columns](/docs/en/engines/table-engines/index.md#table_engines-virtual_columns) diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index 706ce481a24..e882138bf0d 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -14,10 +14,8 @@ #include #include -#include #include #include -#include #include #include @@ -29,7 +27,6 @@ #include #include #include -#include #include #include @@ -48,7 +45,6 @@ namespace ErrorCodes extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int NETWORK_ERROR; extern const int BAD_ARGUMENTS; - extern const int LOGICAL_ERROR; extern const int CANNOT_EXTRACT_TABLE_STRUCTURE; } diff --git a/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py index 516c8ed152a..174cbad1de4 100644 --- a/tests/integration/test_storage_s3/test.py +++ b/tests/integration/test_storage_s3/test.py @@ -1719,7 +1719,6 @@ def test_skip_empty_files(started_cluster): bucket = started_cluster.minio_bucket instance = started_cluster.instances["dummy"] - instance.query( f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/skip_empty_files1.parquet', TSVRaw) select * from numbers(0) settings s3_truncate_on_insert=1" ) @@ -1727,43 +1726,44 @@ def test_skip_empty_files(started_cluster): instance.query( f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/skip_empty_files2.parquet') select * from numbers(1) settings s3_truncate_on_insert=1" ) + def test(engine, setting): instance.query_and_get_error( f"select * from {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/skip_empty_files1.parquet') settings {setting}=0" ) - + instance.query_and_get_error( f"select * from {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/skip_empty_files1.parquet', auto, 'number UINt64') settings {setting}=0" ) - + instance.query_and_get_error( f"select * from {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/skip_empty_files1.parquet') settings {setting}=1" ) - + res = instance.query( f"select * from {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/skip_empty_files1.parquet', auto, 'number UInt64') settings {setting}=1" ) - + assert len(res) == 0 instance.query_and_get_error( f"select * from {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/skip_empty_files{{1,2}}.parquet') settings {setting}=0" ) - + instance.query_and_get_error( f"select * from {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/skip_empty_files{{1,2}}.parquet', auto, 'number UInt64') settings {setting}=0" ) - + res = instance.query( f"select * from {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/skip_empty_files{{1,2}}.parquet') settings {setting}=1" ) - + assert int(res) == 0 - + res = instance.query( f"select * from {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/skip_empty_files{{1,2}}.parquet', auto, 'number UInt64') settings {setting}=1" ) - + assert int(res) == 0 test("s3", "s3_skip_empty_files") From c9626314f7e7b128eccfd5fca7e04c9ff1fc45c5 Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 31 May 2023 19:22:44 +0000 Subject: [PATCH 150/722] Better --- contrib/capnproto | 2 +- src/Formats/CapnProtoSerializer.cpp | 1028 ++++++++--------- ...lumnsStructureToQueryWithClusterEngine.cpp | 52 + ...ColumnsStructureToQueryWithClusterEngine.h | 14 + 4 files changed, 529 insertions(+), 567 deletions(-) create mode 100644 src/Storages/addColumnsStructureToQueryWithClusterEngine.cpp create mode 100644 src/Storages/addColumnsStructureToQueryWithClusterEngine.h diff --git a/contrib/capnproto b/contrib/capnproto index dc8b50b9997..976209a6d18 160000 --- a/contrib/capnproto +++ b/contrib/capnproto @@ -1 +1 @@ -Subproject commit dc8b50b999777bcb23c89bb5907c785c3f654441 +Subproject commit 976209a6d18074804f60d18ef99b6a809d27dadf diff --git a/src/Formats/CapnProtoSerializer.cpp b/src/Formats/CapnProtoSerializer.cpp index 91e207a1846..e36f5fa4947 100644 --- a/src/Formats/CapnProtoSerializer.cpp +++ b/src/Formats/CapnProtoSerializer.cpp @@ -94,9 +94,21 @@ namespace std::vector> field_builders; }; + template + std::unique_ptr initStructBuilder(ParentBuilder & parent_builder, UInt32 offset_or_index, const capnp::_::StructSize & struct_size, size_t elements, const capnp::StructSchema & schema) + { + capnp::DynamicStruct::Builder builder_impl; + if constexpr (std::is_same_v) + builder_impl = capnp::DynamicStruct::Builder(schema, parent_builder.getBuilderImpl().getPointerField(offset_or_index).initStruct(struct_size)); + else + builder_impl = capnp::DynamicStruct::Builder(schema, parent_builder.getBuilderImpl().getStructElement(offset_or_index)); + return std::make_unique(std::move(builder_impl), elements); + } + class ICapnProtoSerializer { public: + /// Write row as struct field. virtual void writeRow( const ColumnPtr & column, std::unique_ptr & builder, @@ -104,6 +116,7 @@ namespace UInt32 slot_offset, size_t row_num) = 0; + /// Write row as list element. virtual void writeRow( const ColumnPtr & column, std::unique_ptr & builder, @@ -111,8 +124,10 @@ namespace UInt32 array_index, size_t row_num) = 0; + /// Read row from struct field at slot_offset. virtual void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) = 0; + /// Read row from list element at array_index. virtual void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) = 0; virtual ~ICapnProtoSerializer() = default; @@ -124,32 +139,32 @@ namespace public: void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - auto & builder_impl = parent_struct_builder.getBuilderImpl(); - CapnProtoNumericType value = static_cast(assert_cast &>(*column).getElement(row_num)); - builder_impl.setDataField(slot_offset, value); + parent_struct_builder.getBuilderImpl().setDataField(slot_offset, getValue(column, row_num)); } void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override { - auto & builder_impl = parent_list_builder.getBuilderImpl(); - CapnProtoNumericType value = static_cast(assert_cast &>(*column).getElement(row_num)); - builder_impl.setDataElement(array_index, value); + parent_list_builder.getBuilderImpl().setDataElement(array_index, getValue(column, row_num)); } void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override { - const auto & reader_impl = parent_struct_reader.getReaderImpl(); - CapnProtoNumericType value = reader_impl.getDataField(slot_offset); - if constexpr (convert_to_bool_on_read) - assert_cast(column).insertValue(static_cast(value)); - else - assert_cast &>(column).insertValue(static_cast(value)); + insertValue(column, parent_struct_reader.getReaderImpl().getDataField(slot_offset)); } void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override { - const auto & reader_impl = parent_list_reader.getReaderImpl(); - CapnProtoNumericType value = reader_impl.getDataElement(array_index); + insertValue(column, parent_list_reader.getReaderImpl().getDataElement(array_index)); + } + + private: + CapnProtoNumericType getValue(const ColumnPtr & column, size_t row_num) + { + return static_cast(assert_cast &>(*column).getElement(row_num)); + } + + void insertValue(IColumn & column, CapnProtoNumericType value) + { if constexpr (convert_to_bool_on_read) assert_cast(column).insertValue(static_cast(value)); else @@ -191,29 +206,32 @@ namespace public: void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - auto & builder_impl = parent_struct_builder.getBuilderImpl(); - CapnProtoFloatType value = static_cast(assert_cast &>(*column).getElement(row_num)); - builder_impl.setDataField(slot_offset, value); + parent_struct_builder.getBuilderImpl().setDataField(slot_offset, getValue(column, row_num)); } void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override { - auto & builder_impl = parent_list_builder.getBuilderImpl(); - CapnProtoFloatType value = static_cast(assert_cast &>(*column).getElement(row_num)); - builder_impl.setDataElement(array_index, value); + parent_list_builder.getBuilderImpl().setDataElement(array_index, getValue(column, row_num)); } void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override { - const auto & reader_impl = parent_struct_reader.getReaderImpl(); - CapnProtoFloatType value = reader_impl.getDataField(slot_offset); - assert_cast &>(column).insertValue(static_cast(value)); + insertValue(column, parent_struct_reader.getReaderImpl().getDataField(slot_offset)); } void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override { - const auto & reader_impl = parent_list_reader.getReaderImpl(); - CapnProtoFloatType value = reader_impl.getDataElement(array_index); + insertValue(column, parent_list_reader.getReaderImpl().getDataElement(array_index)); + } + + private: + CapnProtoFloatType getValue(const ColumnPtr & column, size_t row_num) + { + return static_cast(assert_cast &>(*column).getElement(row_num)); + } + + void insertValue(IColumn & column, CapnProtoFloatType value) + { assert_cast &>(column).insertValue(static_cast(value)); } }; @@ -298,57 +316,41 @@ namespace void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - auto & builder_impl = parent_struct_builder.getBuilderImpl(); - EnumType enum_value = assert_cast &>(*column).getElement(row_num); - UInt16 capnp_value; - if (enum_comparing_mode == FormatSettings::CapnProtoEnumComparingMode::BY_VALUES) - capnp_value = static_cast(enum_value); - else - capnp_value = ch_to_capnp_values[enum_value]; - - builder_impl.setDataField(slot_offset, capnp_value); + parent_struct_builder.getBuilderImpl().setDataField(slot_offset, getValue(column, row_num)); } void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override { - auto & builder_impl = parent_list_builder.getBuilderImpl(); - EnumType enum_value = assert_cast &>(*column).getElement(row_num); - UInt16 capnp_value; - if (enum_comparing_mode == FormatSettings::CapnProtoEnumComparingMode::BY_VALUES) - capnp_value = static_cast(enum_value); - else - capnp_value = ch_to_capnp_values[enum_value]; - - builder_impl.setDataElement(array_index, capnp_value); + parent_list_builder.getBuilderImpl().setDataElement(array_index, getValue(column, row_num)); } void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override { - const auto & reader_impl = parent_struct_reader.getReaderImpl(); - UInt16 capnp_value = reader_impl.getDataField(slot_offset); - EnumType value; - if (enum_comparing_mode == FormatSettings::CapnProtoEnumComparingMode::BY_VALUES) - value = static_cast(capnp_value); - else - value = capnp_to_ch_values[capnp_value]; - - assert_cast &>(column).insertValue(value); + insertValue(column, parent_struct_reader.getReaderImpl().getDataField(slot_offset)); } void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override { - const auto & reader_impl = parent_list_reader.getReaderImpl(); - UInt16 capnp_value = reader_impl.getDataElement(array_index); - EnumType value; - if (enum_comparing_mode == FormatSettings::CapnProtoEnumComparingMode::BY_VALUES) - value = static_cast(capnp_value); - else - value = capnp_to_ch_values[capnp_value]; - - assert_cast &>(column).insertValue(value); + insertValue(column, parent_list_reader.getReaderImpl().getDataElement(array_index)); } private: + UInt16 getValue(const ColumnPtr & column, size_t row_num) + { + EnumType enum_value = assert_cast &>(*column).getElement(row_num); + if (enum_comparing_mode == FormatSettings::CapnProtoEnumComparingMode::BY_VALUES) + return static_cast(enum_value); + return ch_to_capnp_values[enum_value]; + } + + void insertValue(IColumn & column, UInt16 capnp_enum_value) + { + if (enum_comparing_mode == FormatSettings::CapnProtoEnumComparingMode::BY_VALUES) + assert_cast &>(column).insertValue(static_cast(capnp_enum_value)); + else + assert_cast &>(column).insertValue(capnp_to_ch_values[capnp_enum_value]); + } + DataTypePtr data_type; capnp::EnumSchema enum_schema; const FormatSettings::CapnProtoEnumComparingMode enum_comparing_mode; @@ -367,29 +369,32 @@ namespace void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - auto & builder_impl = parent_struct_builder.getBuilderImpl(); - UInt16 value = assert_cast(*column).getElement(row_num); - builder_impl.setDataField(slot_offset, value); + parent_struct_builder.getBuilderImpl().setDataField(slot_offset, getValue(column, row_num)); } void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override { - auto & builder_impl = parent_list_builder.getBuilderImpl(); - UInt16 value = assert_cast(*column).getElement(row_num); - builder_impl.setDataElement(array_index, value); + parent_list_builder.getBuilderImpl().setDataElement(array_index, getValue(column, row_num)); } void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override { - const auto & reader_impl = parent_struct_reader.getReaderImpl(); - UInt16 value = reader_impl.getDataField(slot_offset); - assert_cast(column).insertValue(value); + insertValue(column, parent_struct_reader.getReaderImpl().getDataField(slot_offset)); } void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override { - const auto & reader_impl = parent_list_reader.getReaderImpl(); - UInt16 value = reader_impl.getDataElement(array_index); + insertValue(column, parent_list_reader.getReaderImpl().getDataElement(array_index)); + } + + private: + UInt16 getValue(const ColumnPtr & column, size_t row_num) + { + return assert_cast(*column).getElement(row_num); + } + + void insertValue(IColumn & column, UInt16 value) + { assert_cast(column).insertValue(value); } }; @@ -405,29 +410,32 @@ namespace void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - auto & builder_impl = parent_struct_builder.getBuilderImpl(); - Int32 value = assert_cast(*column).getElement(row_num); - builder_impl.setDataField(slot_offset, value); + parent_struct_builder.getBuilderImpl().setDataField(slot_offset, getValue(column, row_num)); } void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override { - auto & builder_impl = parent_list_builder.getBuilderImpl(); - Int32 value = assert_cast(*column).getElement(row_num); - builder_impl.setDataElement(array_index, value); + parent_list_builder.getBuilderImpl().setDataElement(array_index, getValue(column, row_num)); } void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override { - const auto & reader_impl = parent_struct_reader.getReaderImpl(); - Int32 value = reader_impl.getDataField(slot_offset); - assert_cast(column).insertValue(value); + insertValue(column, parent_struct_reader.getReaderImpl().getDataField(slot_offset)); } void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override { - const auto & reader_impl = parent_list_reader.getReaderImpl(); - Int32 value = reader_impl.getDataElement(array_index); + insertValue(column, parent_list_reader.getReaderImpl().getDataElement(array_index)); + } + + private: + Int32 getValue(const ColumnPtr & column, size_t row_num) + { + return assert_cast(*column).getElement(row_num); + } + + void insertValue(IColumn & column, Int32 value) + { assert_cast(column).insertValue(value); } }; @@ -443,29 +451,32 @@ namespace void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - auto & builder_impl = parent_struct_builder.getBuilderImpl(); - UInt32 value = assert_cast(*column).getElement(row_num); - builder_impl.setDataField(slot_offset, value); + parent_struct_builder.getBuilderImpl().setDataField(slot_offset, getValue(column, row_num)); } void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override { - auto & builder_impl = parent_list_builder.getBuilderImpl(); - UInt32 value = assert_cast(*column).getElement(row_num); - builder_impl.setDataElement(array_index, value); + parent_list_builder.getBuilderImpl().setDataElement(array_index, getValue(column, row_num)); } void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override { - const auto & reader_impl = parent_struct_reader.getReaderImpl(); - UInt32 value = reader_impl.getDataField(slot_offset); - assert_cast(column).insertValue(value); + insertValue(column, parent_struct_reader.getReaderImpl().getDataField(slot_offset)); } void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override { - const auto & reader_impl = parent_list_reader.getReaderImpl(); - UInt32 value = reader_impl.getDataElement(array_index); + insertValue(column, parent_list_reader.getReaderImpl().getDataElement(array_index)); + } + + private: + UInt32 getValue(const ColumnPtr & column, size_t row_num) + { + return assert_cast(*column).getElement(row_num); + } + + void insertValue(IColumn & column, UInt32 value) + { assert_cast(column).insertValue(value); } }; @@ -481,29 +492,32 @@ namespace void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - auto & builder_impl = parent_struct_builder.getBuilderImpl(); - Int64 value = assert_cast(*column).getElement(row_num); - builder_impl.setDataField(slot_offset, value); + parent_struct_builder.getBuilderImpl().setDataField(slot_offset, getValue(column, row_num)); } void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override { - auto & builder_impl = parent_list_builder.getBuilderImpl(); - Int64 value = assert_cast(*column).getElement(row_num); - builder_impl.setDataElement(array_index, value); + parent_list_builder.getBuilderImpl().setDataElement(array_index, getValue(column, row_num)); } void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override { - const auto & reader_impl = parent_struct_reader.getReaderImpl(); - Int64 value = reader_impl.getDataField(slot_offset); - assert_cast(column).insertValue(value); + insertValue(column, parent_struct_reader.getReaderImpl().getDataField(slot_offset)); } void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override { - const auto & reader_impl = parent_list_reader.getReaderImpl(); - Int64 value = reader_impl.getDataElement(array_index); + insertValue(column, parent_list_reader.getReaderImpl().getDataElement(array_index)); + } + + private: + Int64 getValue(const ColumnPtr & column, size_t row_num) + { + return assert_cast(*column).getElement(row_num); + } + + void insertValue(IColumn & column, Int64 value) + { assert_cast(column).insertValue(value); } }; @@ -523,275 +537,36 @@ namespace void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - auto & builder_impl = parent_struct_builder.getBuilderImpl(); - DecimalType value = assert_cast &>(*column).getElement(row_num); - builder_impl.setDataField(slot_offset, value); + parent_struct_builder.getBuilderImpl().setDataField(slot_offset, getValue(column, row_num)); } void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override { - auto & builder_impl = parent_list_builder.getBuilderImpl(); - DecimalType value = assert_cast &>(*column).getElement(row_num); - builder_impl.setDataElement(array_index, value); + parent_list_builder.getBuilderImpl().setDataElement(array_index, getValue(column, row_num)); } void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override { - const auto & reader_impl = parent_struct_reader.getReaderImpl(); - NativeType value = reader_impl.getDataField(slot_offset); - assert_cast &>(column).insertValue(value); + insertValue(column, parent_struct_reader.getReaderImpl().getDataField(slot_offset)); } void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override { - const auto & reader_impl = parent_list_reader.getReaderImpl(); - NativeType value = reader_impl.getDataElement(array_index); + insertValue(column, parent_list_reader.getReaderImpl().getDataElement(array_index)); + } + + private: + NativeType getValue(const ColumnPtr & column, size_t row_num) + { + return assert_cast &>(*column).getElement(row_num); + } + + void insertValue(IColumn & column, NativeType value) + { assert_cast &>(column).insertValue(value); } }; - template - class CapnProtoFixedSizeRawDataSerializer : public ICapnProtoSerializer - { - private: - static constexpr size_t value_size = sizeof(T); - - public: - CapnProtoFixedSizeRawDataSerializer(const DataTypePtr & data_type_, const String & column_name, const capnp::Type & capnp_type) : data_type(data_type_) - { - if (!capnp_type.isData()) - throwCannotConvert(data_type, column_name, capnp_type); - } - - void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override - { - auto & builder_impl = parent_struct_builder.getBuilderImpl(); - auto data = column->getDataAt(row_num); - capnp::Data::Reader value = capnp::Data::Reader(reinterpret_cast(data.data), data.size); - builder_impl.getPointerField(slot_offset).template setBlob(value); - } - - void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_struct_builder, UInt32 array_index, size_t row_num) override - { - auto & builder_impl = parent_struct_builder.getBuilderImpl(); - auto data = column->getDataAt(row_num); - capnp::Data::Reader value = capnp::Data::Reader(reinterpret_cast(data.data), data.size); - builder_impl.getPointerElement(array_index).setBlob(value); - } - - void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override - { - const auto & reader_impl = parent_struct_reader.getReaderImpl(); - capnp::Data::Reader value = reader_impl.getPointerField(slot_offset).template getBlob(nullptr, 0); - if (value.size() != value_size) - throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected size of {} value: {}", data_type->getName(), value.size()); - - column.insertData(reinterpret_cast(value.begin()), value.size()); - } - - void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override - { - const auto & reader_impl = parent_list_reader.getReaderImpl(); - capnp::Data::Reader value = reader_impl.getPointerElement(array_index).getBlob(nullptr, 0); - if (value.size() != value_size) - throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected size of {} value: {}", data_type->getName(), value.size()); - - column.insertData(reinterpret_cast(value.begin()), value.size()); - } - - private: - DataTypePtr data_type; - }; - - template - class CapnProtoStringSerializer : public ICapnProtoSerializer - { - public: - CapnProtoStringSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) - { - if (!capnp_type.isData() && !capnp_type.isText()) - throwCannotConvert(data_type, column_name, capnp_type); - } - - void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override - { - auto & builder_impl = parent_struct_builder.getBuilderImpl(); - auto data = column->getDataAt(row_num); - if constexpr (is_binary) - { - capnp::Data::Reader value = capnp::Data::Reader(reinterpret_cast(data.data), data.size); - builder_impl.getPointerField(slot_offset).setBlob(value); - } - else - { - capnp::Text::Reader value = capnp::Text::Reader(data.data, data.size); - builder_impl.getPointerField(slot_offset).setBlob(value); - } - } - - void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_struct_builder, UInt32 array_index, size_t row_num) override - { - auto & builder_impl = parent_struct_builder.getBuilderImpl(); - auto data = column->getDataAt(row_num); - if constexpr (is_binary) - { - capnp::Data::Reader value = capnp::Data::Reader(reinterpret_cast(data.data), data.size); - builder_impl.getPointerElement(array_index).setBlob(value); - } - else - { - capnp::Text::Reader value = capnp::Text::Reader(data.data, data.size); - builder_impl.getPointerElement(array_index).setBlob(value); - } - } - - void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override - { - const auto & reader_impl = parent_struct_reader.getReaderImpl(); - if constexpr (is_binary) - { - capnp::Data::Reader value = reader_impl.getPointerField(slot_offset).getBlob(nullptr, 0); - column.insertData(reinterpret_cast(value.begin()), value.size()); - } - else - { - capnp::Text::Reader value = reader_impl.getPointerField(slot_offset).getBlob(nullptr, 0); - column.insertData(reinterpret_cast(value.begin()), value.size()); - } - } - - void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override - { - const auto & reader_impl = parent_list_reader.getReaderImpl(); - if constexpr (is_binary) - { - capnp::Data::Reader value = reader_impl.getPointerElement(array_index).getBlob(nullptr, 0); - column.insertData(reinterpret_cast(value.begin()), value.size()); - } - else - { - capnp::Text::Reader value = reader_impl.getPointerElement(array_index).getBlob(nullptr, 0); - column.insertData(reinterpret_cast(value.begin()), value.size()); - } - } - }; - - template - class CapnProtoFixedStringSerializer : public ICapnProtoSerializer - { - public: - CapnProtoFixedStringSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type_) : capnp_type(capnp_type_) - { - if (!capnp_type.isData() && !capnp_type.isText()) - throwCannotConvert(data_type, column_name, capnp_type); - } - - void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override - { - auto & builder_impl = parent_struct_builder.getBuilderImpl(); - auto data = column->getDataAt(row_num); - if constexpr (is_binary) - { - capnp::Data::Reader value = capnp::Data::Reader(reinterpret_cast(data.data), data.size); - builder_impl.getPointerField(slot_offset).setBlob(value); - } - else - { - if (data.data[data.size - 1] == 0) - { - capnp::Text::Reader value = capnp::Text::Reader(data.data, data.size); - builder_impl.getPointerField(slot_offset).setBlob(value); - } - else - { - /// In TEXT type data should be null-terminated, but ClickHouse FixedString data could not be. - /// To make data null-terminated we should copy it to temporary String object and use it in capnp::Text::Reader. - /// Note that capnp::Text::Reader works only with pointer to the data and it's size, so we should - /// guarantee that new String object life time is longer than capnp::Text::Reader life time. - tmp_string = data.toString(); - capnp::Text::Reader value = capnp::Text::Reader(tmp_string.data(), tmp_string.size()); - builder_impl.getPointerField(slot_offset).setBlob(value); - } - } - } - - void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_struct_builder, UInt32 array_index, size_t row_num) override - { - auto & builder_impl = parent_struct_builder.getBuilderImpl(); - auto data = column->getDataAt(row_num); - if constexpr (is_binary) - { - capnp::Data::Reader value = capnp::Data::Reader(reinterpret_cast(data.data), data.size); - builder_impl.getPointerElement(array_index).setBlob(value); - } - else - { - if (data.data[data.size - 1] == 0) - { - capnp::Text::Reader value = capnp::Text::Reader(data.data, data.size); - builder_impl.getPointerElement(array_index).setBlob(value); - } - else - { - /// In TEXT type data should be null-terminated, but ClickHouse FixedString data could not be. - /// To make data null-terminated we should copy it to temporary String object and use it in capnp::Text::Reader. - /// Note that capnp::Text::Reader works only with pointer to the data and it's size, so we should - /// guarantee that new String object life time is longer than capnp::Text::Reader life time. - tmp_string = data.toString(); - capnp::Text::Reader value = capnp::Text::Reader(tmp_string.data(), tmp_string.size()); - builder_impl.getPointerElement(array_index).setBlob(value); - } - } - } - - void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override - { - const auto & reader_impl = parent_struct_reader.getReaderImpl(); - auto & fixed_string_column = assert_cast(column); - if constexpr (is_binary) - { - capnp::Data::Reader value = reader_impl.getPointerField(slot_offset).getBlob(nullptr, 0); - if (value.size() > fixed_string_column.getN()) - throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot read data with size {} to FixedString with size {}", value.size(), fixed_string_column.getN()); - - fixed_string_column.insertData(reinterpret_cast(value.begin()), value.size()); - } - else - { - capnp::Text::Reader value = reader_impl.getPointerField(slot_offset).getBlob(nullptr, 0); - if (value.size() > fixed_string_column.getN()) - throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot read data with size {} to FixedString with size {}", value.size(), fixed_string_column.getN()); - - fixed_string_column.insertData(reinterpret_cast(value.begin()), value.size()); - } - } - - void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override - { - const auto & reader_impl = parent_list_reader.getReaderImpl(); - auto & fixed_string_column = assert_cast(column); - if constexpr (is_binary) - { - capnp::Data::Reader value = reader_impl.getPointerElement(array_index).getBlob(nullptr, 0); - if (value.size() > fixed_string_column.getN()) - throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot read data with size {} to FixedString with size {}", value.size(), fixed_string_column.getN()); - - fixed_string_column.insertData(reinterpret_cast(value.begin()), value.size()); - } - else - { - capnp::Text::Reader value = reader_impl.getPointerElement(array_index).getBlob(nullptr, 0); - if (value.size() > fixed_string_column.getN()) - throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot read data with size {} to FixedString with size {}", value.size(), fixed_string_column.getN()); - - fixed_string_column.insertData(reinterpret_cast(value.begin()), value.size()); - } - } - - private: - String tmp_string; - capnp::Type capnp_type; - }; class CapnProtoIPv4Serializer : public ICapnProtoSerializer { @@ -804,33 +579,204 @@ namespace void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - auto & builder_impl = parent_struct_builder.getBuilderImpl(); - UInt32 value = assert_cast(*column).getElement(row_num); - builder_impl.setDataField(slot_offset, value); + parent_struct_builder.getBuilderImpl().setDataField(slot_offset, getValue(column, row_num)); } void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override { - auto & builder_impl = parent_list_builder.getBuilderImpl(); - UInt32 value = assert_cast(*column).getElement(row_num); - builder_impl.setDataElement(array_index, value); + parent_list_builder.getBuilderImpl().setDataElement(array_index, getValue(column, row_num)); } void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override { - const auto & reader_impl = parent_struct_reader.getReaderImpl(); - UInt32 value = reader_impl.getDataField(slot_offset); - assert_cast(column).insertValue(IPv4(value)); + insertValue(column, parent_struct_reader.getReaderImpl().getDataField(slot_offset)); } void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override { - const auto & reader_impl = parent_list_reader.getReaderImpl(); - UInt32 value = reader_impl.getDataElement(array_index); + insertValue(column, parent_list_reader.getReaderImpl().getDataElement(array_index)); + } + + private: + UInt32 getValue(const ColumnPtr & column, size_t row_num) + { + return assert_cast(*column).getElement(row_num); + } + + void insertValue(IColumn & column, UInt32 value) + { assert_cast(column).insertValue(IPv4(value)); } }; + template + class CapnProtoFixedSizeRawDataSerializer : public ICapnProtoSerializer + { + private: + static constexpr size_t expected_value_size = sizeof(T); + + public: + CapnProtoFixedSizeRawDataSerializer(const DataTypePtr & data_type_, const String & column_name, const capnp::Type & capnp_type) : data_type(data_type_) + { + if (!capnp_type.isData()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override + { + parent_struct_builder.getBuilderImpl().getPointerField(slot_offset).setBlob(getData(column, row_num)); + } + + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_struct_builder, UInt32 array_index, size_t row_num) override + { + parent_struct_builder.getBuilderImpl().getPointerElement(array_index).setBlob(getData(column, row_num)); + } + + void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override + { + insertData(column, parent_struct_reader.getReaderImpl().getPointerField(slot_offset).getBlob(nullptr, 0)); + } + + void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override + { + insertData(column, parent_list_reader.getReaderImpl().getPointerElement(array_index).getBlob(nullptr, 0)); + } + + private: + capnp::Data::Reader getData(const ColumnPtr & column, size_t row_num) + { + auto data = column->getDataAt(row_num); + return capnp::Data::Reader(reinterpret_cast(data.data), data.size); + } + + void insertData(IColumn & column, capnp::Data::Reader data) + { + if (data.size() != expected_value_size) + throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected size of {} value: {}", data_type->getName(), data.size()); + + column.insertData(reinterpret_cast(data.begin()), data.size()); + } + + DataTypePtr data_type; + }; + + template + class CapnProtoStringSerializer : public ICapnProtoSerializer + { + public: + CapnProtoStringSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type) + { + if (!capnp_type.isData() && !capnp_type.isText()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override + { + parent_struct_builder.getBuilderImpl().getPointerField(slot_offset).setBlob(getData(column, row_num)); + } + + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_struct_builder, UInt32 array_index, size_t row_num) override + { + parent_struct_builder.getBuilderImpl().getPointerElement(array_index).setBlob(getData(column, row_num)); + } + + void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override + { + insertData(column, parent_struct_reader.getReaderImpl().getPointerField(slot_offset).getBlob(nullptr, 0)); + } + + void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override + { + insertData(column, parent_list_reader.getReaderImpl().getPointerElement(array_index).getBlob(nullptr, 0)); + } + + private: + using Reader = typename CapnpType::Reader; + + CapnpType::Reader getData(const ColumnPtr & column, size_t row_num) + { + auto data = column->getDataAt(row_num); + if constexpr (std::is_same_v) + return Reader(reinterpret_cast(data.data), data.size); + else + return Reader(data.data, data.size); + } + + void insertData(IColumn & column, Reader data) + { + column.insertData(reinterpret_cast(data.begin()), data.size()); + } + }; + + template + class CapnProtoFixedStringSerializer : public ICapnProtoSerializer + { + private: + + public: + CapnProtoFixedStringSerializer(const DataTypePtr & data_type, const String & column_name, const capnp::Type & capnp_type_) : capnp_type(capnp_type_) + { + if (!capnp_type.isData() && !capnp_type.isText()) + throwCannotConvert(data_type, column_name, capnp_type); + } + + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override + { + parent_struct_builder.getBuilderImpl().getPointerField(slot_offset).setBlob(getData(column, row_num)); + } + + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_struct_builder, UInt32 array_index, size_t row_num) override + { + parent_struct_builder.getBuilderImpl().getPointerElement(array_index).setBlob(getData(column, row_num)); + } + + void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override + { + insertData(column, parent_struct_reader.getReaderImpl().getPointerField(slot_offset).getBlob(nullptr, 0)); + } + + void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override + { + insertData(column, parent_list_reader.getReaderImpl().getPointerElement(array_index).getBlob(nullptr, 0)); + } + + private: + using Reader = typename CapnpType::Reader; + + CapnpType::Reader getData(const ColumnPtr & column, size_t row_num) + { + auto data = column->getDataAt(row_num); + if constexpr (std::is_same_v) + { + return Reader(reinterpret_cast(data.data), data.size); + } + else + { + if (data.data[data.size - 1] == 0) + return Reader(data.data, data.size); + + /// In TEXT type data should be null-terminated, but ClickHouse FixedString data could not be. + /// To make data null-terminated we should copy it to temporary String object and use it in capnp::Text::Reader. + /// Note that capnp::Text::Reader works only with pointer to the data and it's size, so we should + /// guarantee that new String object life time is longer than capnp::Text::Reader life time. + tmp_string = data.toString(); + return Reader(tmp_string.data(), tmp_string.size()); + } + } + + void insertData(IColumn & column, Reader data) + { + auto & fixed_string_column = assert_cast(column); + if (data.size() > fixed_string_column.getN()) + throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot read data with size {} to FixedString with size {}", data.size(), fixed_string_column.getN()); + + fixed_string_column.insertData(reinterpret_cast(data.begin()), data.size()); + } + + String tmp_string; + capnp::Type capnp_type; + }; + std::unique_ptr createSerializer(const DataTypePtr & type, const String & name, const capnp::Type & capnp_type, const FormatSettings::CapnProto & settings); class CapnProtoLowCardinalitySerializer : public ICapnProtoSerializer @@ -843,37 +789,43 @@ namespace void writeRow(const ColumnPtr & column, std::unique_ptr & field_builder, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - const auto & low_cardinality_column = assert_cast(*column); - size_t index = low_cardinality_column.getIndexAt(row_num); - const auto & dict_column = low_cardinality_column.getDictionary().getNestedColumn(); - nested_serializer->writeRow(dict_column, field_builder, parent_struct_builder, slot_offset, index); + writeRowImpl(column, field_builder, parent_struct_builder, slot_offset, row_num); } void writeRow(const ColumnPtr & column, std::unique_ptr & field_builder, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override { - const auto & low_cardinality_column = assert_cast(*column); - size_t index = low_cardinality_column.getIndexAt(row_num); - const auto & dict_column = low_cardinality_column.getDictionary().getNestedColumn(); - nested_serializer->writeRow(dict_column, field_builder, parent_list_builder, array_index, index); + writeRowImpl(column, field_builder, parent_list_builder, array_index, row_num); } void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override { - auto & low_cardinality_column = assert_cast(column); - auto tmp_column = low_cardinality_column.getDictionary().getNestedColumn()->cloneEmpty(); - nested_serializer->readRow(*tmp_column, parent_struct_reader, slot_offset); - low_cardinality_column.insertFromFullColumn(*tmp_column, 0); + readRowImpl(column, parent_struct_reader, slot_offset); } void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override { - auto & low_cardinality_column = assert_cast(column); - auto tmp_column = low_cardinality_column.getDictionary().getNestedColumn()->cloneEmpty(); - nested_serializer->readRow(*tmp_column, parent_list_reader, array_index); - low_cardinality_column.insertFromFullColumn(*tmp_column, 0); + readRowImpl(column, parent_list_reader, array_index); } private: + template + void writeRowImpl(const ColumnPtr & column, std::unique_ptr & field_builder, ParentBuilder & parent_builder, UInt32 offset_or_index, size_t row_num) + { + const auto & low_cardinality_column = assert_cast(*column); + size_t index = low_cardinality_column.getIndexAt(row_num); + const auto & dict_column = low_cardinality_column.getDictionary().getNestedColumn(); + nested_serializer->writeRow(dict_column, field_builder, parent_builder, offset_or_index, index); + } + + template + void readRowImpl(IColumn & column, const ParentReader & parent_reader, UInt32 offset_or_index) + { + auto & low_cardinality_column = assert_cast(column); + auto tmp_column = low_cardinality_column.getDictionary().getNestedColumn()->cloneEmpty(); + nested_serializer->readRow(*tmp_column, parent_reader, offset_or_index); + low_cardinality_column.insertFromFullColumn(*tmp_column, 0); + } + std::unique_ptr nested_serializer; }; @@ -938,38 +890,32 @@ namespace void writeRow(const ColumnPtr & column, std::unique_ptr & field_builder, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - if (!field_builder) - { - auto builder_impl = parent_struct_builder.getBuilderImpl(); - auto struct_builder_impl = capnp::DynamicStruct::Builder(struct_schema, builder_impl.getPointerField(slot_offset).initStruct(struct_size)); - field_builder = std::make_unique(std::move(struct_builder_impl), 1); - } - - auto & struct_builder = assert_cast(*field_builder); - - const auto & nullable_column = assert_cast(*column); - if (nullable_column.isNullAt(row_num)) - { - auto struct_builder_impl = struct_builder.impl.getBuilderImpl(); - struct_builder_impl.setDataField(discriminant_offset, null_discriminant); - struct_builder_impl.setDataField(nested_slot_offset, capnp::Void()); - } - else - { - const auto & nested_column = nullable_column.getNestedColumnPtr(); - struct_builder.impl.getBuilderImpl().setDataField(discriminant_offset, nested_discriminant); - nested_serializer->writeRow(nested_column, struct_builder.field_builders[0], struct_builder.impl, nested_slot_offset, row_num); - } + writeRowImpl(column, field_builder, parent_struct_builder, slot_offset, row_num); } void writeRow(const ColumnPtr & column, std::unique_ptr & field_builder, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override + { + writeRowImpl(column, field_builder, parent_list_builder, array_index, row_num); + } + + void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override + { + auto struct_reader = capnp::DynamicStruct::Reader(struct_schema, parent_struct_reader.getReaderImpl().getPointerField(slot_offset).getStruct(nullptr)); + readRowImpl(column, struct_reader); + } + + void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override + { + auto struct_reader = capnp::DynamicStruct::Reader(struct_schema, parent_list_reader.getReaderImpl().getStructElement(array_index)); + readRowImpl(column, struct_reader); + } + + private: + template + void writeRowImpl(const ColumnPtr & column, std::unique_ptr & field_builder, ParentBuilder & parent_builder, UInt32 offset_or_index, size_t row_num) { if (!field_builder) - { - auto builder_impl = parent_list_builder.getBuilderImpl(); - auto struct_builder_impl = capnp::DynamicStruct::Builder(struct_schema, builder_impl.getStructElement(array_index)); - field_builder = std::make_unique(std::move(struct_builder_impl), 1); - } + field_builder = initStructBuilder(parent_builder, offset_or_index, struct_size, 1, struct_schema); auto & struct_builder = assert_cast(*field_builder); @@ -988,12 +934,9 @@ namespace } } - void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override + void readRowImpl(IColumn & column, capnp::DynamicStruct::Reader & struct_reader) { auto & nullable_column = assert_cast(column); - auto reader_impl = parent_struct_reader.getReaderImpl(); - auto struct_reader = capnp::DynamicStruct::Reader(struct_schema, reader_impl.getPointerField(slot_offset).getStruct(nullptr)); - auto discriminant = struct_reader.getReaderImpl().getDataField(discriminant_offset); if (discriminant == null_discriminant) @@ -1006,25 +949,7 @@ namespace } } - void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override - { - auto & nullable_column = assert_cast(column); - auto reader_impl = parent_list_reader.getReaderImpl(); - auto struct_reader = capnp::DynamicStruct::Reader(struct_schema, reader_impl.getStructElement(array_index)); - auto discriminant = struct_reader.getReaderImpl().getDataField(discriminant_offset); - - if (discriminant == null_discriminant) - nullable_column.insertDefault(); - else - { - auto & nested_column = nullable_column.getNestedColumn(); - nested_serializer->readRow(nested_column, struct_reader, nested_slot_offset); - nullable_column.getNullMapData().push_back(0); - } - } - - private: std::unique_ptr nested_serializer; capnp::StructSchema struct_schema; capnp::_::StructSize struct_size; @@ -1058,29 +983,29 @@ namespace void writeRow(const ColumnPtr & column, std::unique_ptr & field_builder, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - const auto * array_column = assert_cast(column.get()); - const auto & nested_column = array_column->getDataPtr(); - const auto & offsets = array_column->getOffsets(); - auto offset = offsets[row_num - 1]; - UInt32 size = static_cast(offsets[row_num] - offset); - - if (!field_builder) - { - auto builder_impl = parent_struct_builder.getBuilderImpl(); - capnp::DynamicList::Builder list_builder_impl; - if (element_is_struct) - list_builder_impl = capnp::DynamicList::Builder(list_schema, builder_impl.getPointerField(slot_offset).initStructList(size, element_struct_size)); - else - list_builder_impl = capnp::DynamicList::Builder(list_schema, builder_impl.getPointerField(slot_offset).initList(element_size, size)); - field_builder = std::make_unique(std::move(list_builder_impl), size); - } - - auto & list_builder = assert_cast(*field_builder); - for (UInt32 i = 0; i != size; ++i) - nested_serializer->writeRow(nested_column, list_builder.nested_builders[i], list_builder.impl, i, offset + i); + writeRowImpl(column, field_builder, parent_struct_builder, slot_offset, row_num); } void writeRow(const ColumnPtr & column, std::unique_ptr & field_builder, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override + { + writeRowImpl(column, field_builder, parent_list_builder, array_index, row_num); + } + + void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override + { + auto list_reader = capnp::DynamicList::Reader(list_schema, parent_struct_reader.getReaderImpl().getPointerField(slot_offset).getList(element_size, nullptr)); + readRowImpl(column, list_reader); + } + + void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override + { + auto list_reader = capnp::DynamicList::Reader(list_schema, parent_list_reader.getReaderImpl().getPointerElement(array_index).getList(element_size, nullptr)); + readRowImpl(column, list_reader); + } + + private: + template + void writeRowImpl(const ColumnPtr & column, std::unique_ptr & field_builder, ParentBuilder & parent_builder, UInt32 offset_or_index, size_t row_num) { const auto * array_column = assert_cast(column.get()); const auto & nested_column = array_column->getDataPtr(); @@ -1089,25 +1014,32 @@ namespace UInt32 size = static_cast(offsets[row_num] - offset); if (!field_builder) - { - auto builder_impl = parent_list_builder.getBuilderImpl(); - capnp::DynamicList::Builder list_builder_impl; - if (element_is_struct) - list_builder_impl = capnp::DynamicList::Builder(list_schema, builder_impl.getPointerElement(array_index).initStructList(size, element_struct_size)); - else - list_builder_impl = capnp::DynamicList::Builder(list_schema, builder_impl.getPointerElement(array_index).initList(element_size, size)); - field_builder = std::make_unique(std::move(list_builder_impl), size); - } + field_builder = std::make_unique(capnp::DynamicList::Builder(list_schema, initListBuilder(parent_builder, offset_or_index, size)), size); auto & list_builder = assert_cast(*field_builder); for (UInt32 i = 0; i != size; ++i) nested_serializer->writeRow(nested_column, list_builder.nested_builders[i], list_builder.impl, i, offset + i); } - void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override + template + capnp::_::ListBuilder initListBuilder(ParentBuilder & parent_builder, UInt32 offset_or_index, UInt32 size) + { + if (element_is_struct) + { + if constexpr (std::is_same_v) + return parent_builder.getBuilderImpl().getPointerField(offset_or_index).initStructList(size, element_struct_size); + else + return parent_builder.getBuilderImpl().getPointerElement(offset_or_index).initStructList(size, element_struct_size); + } + + if constexpr (std::is_same_v) + return parent_builder.getBuilderImpl().getPointerField(offset_or_index).initList(element_size, size); + else + return parent_builder.getBuilderImpl().getPointerElement(offset_or_index).initList(element_size, size); + } + + void readRowImpl(IColumn & column, const capnp::DynamicList::Reader & list_reader) { - const auto & reader_impl = parent_struct_reader.getReaderImpl(); - auto list_reader = capnp::DynamicList::Reader(list_schema, reader_impl.getPointerField(slot_offset).getList(element_size, nullptr)); UInt32 size = list_reader.size(); auto & column_array = assert_cast(column); auto & offsets = column_array.getOffsets(); @@ -1118,21 +1050,6 @@ namespace nested_serializer->readRow(nested_column, list_reader, i); } - void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override - { - const auto & reader_impl = parent_list_reader.getReaderImpl(); - auto list_reader = capnp::DynamicList::Reader(list_schema, reader_impl.getPointerElement(array_index).getList(element_size, nullptr)); - UInt32 size = list_reader.size(); - auto & column_array = assert_cast(column); - auto & offsets = column_array.getOffsets(); - offsets.push_back(offsets.back() + list_reader.size()); - - auto & nested_column = column_array.getData(); - for (UInt32 i = 0; i != size; ++i) - nested_serializer->readRow(nested_column, list_reader, i); - } - - private: capnp::ListSchema list_schema; std::unique_ptr nested_serializer; capnp::ElementSize element_size; @@ -1219,49 +1136,44 @@ namespace void writeRow(const ColumnPtr & column, std::unique_ptr & field_builder, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - if (!field_builder) - { - auto builder_impl = parent_struct_builder.getBuilderImpl(); - auto struct_builder_impl = capnp::DynamicStruct::Builder(struct_schema, builder_impl.getPointerField(slot_offset).initStruct(struct_size)); - field_builder = std::make_unique(std::move(struct_builder_impl), 1); - } - - auto & struct_builder = assert_cast(*field_builder); - const auto & entries_column = assert_cast(column.get())->getNestedColumnPtr(); - nested_serializer->writeRow(entries_column, struct_builder.field_builders[0], struct_builder.impl, entries_slot_offset, row_num); + writeRowImpl(column, field_builder, parent_struct_builder, slot_offset, row_num); } void writeRow(const ColumnPtr & column, std::unique_ptr & field_builder, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override + { + writeRowImpl(column, field_builder, parent_list_builder, array_index, row_num); + } + + void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override + { + auto struct_reader = capnp::DynamicStruct::Reader(struct_schema, parent_struct_reader.getReaderImpl().getPointerField(slot_offset).getStruct(nullptr)); + readRowImpl(column, struct_reader); + } + + void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override + { + auto struct_reader = capnp::DynamicStruct::Reader(struct_schema, parent_list_reader.getReaderImpl().getStructElement(array_index)); + readRowImpl(column, struct_reader); + } + + private: + template + void writeRowImpl(const ColumnPtr & column, std::unique_ptr & field_builder, ParentBuilder & parent_builder, UInt32 offset_or_index, size_t row_num) { if (!field_builder) - { - auto builder_impl = parent_list_builder.getBuilderImpl(); - auto struct_builder_impl = capnp::DynamicStruct::Builder(struct_schema, builder_impl.getStructElement(array_index)); - field_builder = std::make_unique(std::move(struct_builder_impl), 1); - } + field_builder = initStructBuilder(parent_builder, offset_or_index, struct_size, 1, struct_schema); auto & struct_builder = assert_cast(*field_builder); const auto & entries_column = assert_cast(column.get())->getNestedColumnPtr(); nested_serializer->writeRow(entries_column, struct_builder.field_builders[0], struct_builder.impl, entries_slot_offset, row_num); } - void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override + void readRowImpl(IColumn & column, const capnp::DynamicStruct::Reader & struct_reader) { - auto reader_impl = parent_struct_reader.getReaderImpl(); - auto struct_reader = capnp::DynamicStruct::Reader(struct_schema, reader_impl.getPointerField(slot_offset).getStruct(nullptr)); auto & entries_column = assert_cast(column).getNestedColumn(); nested_serializer->readRow(entries_column, struct_reader, entries_slot_offset); } - void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override - { - auto reader_impl = parent_list_reader.getReaderImpl(); - auto struct_reader = capnp::DynamicStruct::Reader(struct_schema, reader_impl.getStructElement(array_index)); - auto & entries_column = assert_cast(column).getNestedColumn(); - nested_serializer->readRow(entries_column, struct_reader, entries_slot_offset); - } - - private: std::unique_ptr nested_serializer; capnp::StructSchema struct_schema; capnp::_::StructSize struct_size; @@ -1332,48 +1244,15 @@ namespace void writeRow(const ColumnPtr & column, std::unique_ptr & field_builder, capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) override { - if (!field_builder) - { - auto builder_impl = parent_struct_builder.getBuilderImpl(); - auto struct_builder_impl = capnp::DynamicStruct::Builder(struct_schema, builder_impl.getPointerField(slot_offset).initStruct(struct_size)); - field_builder = std::make_unique(std::move(struct_builder_impl), fields_count); - } - - auto & struct_builder = assert_cast(*field_builder); - if (const auto * tuple_column = typeid_cast(column.get())) - { - const auto & columns = tuple_column->getColumns(); - for (size_t i = 0; i != columns.size(); ++i) - fields_serializers[i]->writeRow(columns[i], struct_builder.field_builders[fields_indexes[i]], struct_builder.impl, fields_offsets[i], row_num); - } - else - { - fields_serializers[0]->writeRow(column, struct_builder.field_builders[fields_indexes[0]], struct_builder.impl, fields_offsets[0], row_num); - } + writeRowImpl(column, field_builder, parent_struct_builder, slot_offset, row_num); } void writeRow(const ColumnPtr & column, std::unique_ptr & field_builder, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override { - if (!field_builder) - { - auto builder_impl = parent_list_builder.getBuilderImpl(); - auto struct_builder_impl = capnp::DynamicStruct::Builder(struct_schema, builder_impl.getStructElement(array_index)); - field_builder = std::make_unique(std::move(struct_builder_impl), fields_count); - } - - auto & struct_builder = assert_cast(*field_builder); - if (const auto * tuple_column = typeid_cast(column.get())) - { - const auto & columns = tuple_column->getColumns(); - for (size_t i = 0; i != columns.size(); ++i) - fields_serializers[i]->writeRow(columns[i], struct_builder.field_builders[fields_indexes[i]], struct_builder.impl, fields_offsets[i], row_num); - } - else - { - fields_serializers[0]->writeRow(column, struct_builder.field_builders[fields_indexes[0]], struct_builder.impl, fields_offsets[0], row_num); - } + writeRowImpl(column, field_builder, parent_list_builder, array_index, row_num); } + /// Method for writing root struct. void writeRow(const Columns & columns, StructBuilder & struct_builder, size_t row_num) { for (size_t i = 0; i != columns.size(); ++i) @@ -1382,30 +1261,17 @@ namespace void readRow(IColumn & column, const capnp::DynamicStruct::Reader & parent_struct_reader, UInt32 slot_offset) override { - auto reader_impl = parent_struct_reader.getReaderImpl(); - auto struct_reader = capnp::DynamicStruct::Reader(struct_schema, reader_impl.getPointerField(slot_offset).getStruct(nullptr)); - if (auto * tuple_column = typeid_cast(&column)) - { - for (size_t i = 0; i != tuple_column->tupleSize(); ++i) - fields_serializers[i]->readRow(tuple_column->getColumn(i), struct_reader, fields_offsets[i]); - } - else - fields_serializers[0]->readRow(column, struct_reader, fields_offsets[0]); + auto struct_reader = capnp::DynamicStruct::Reader(struct_schema, parent_struct_reader.getReaderImpl().getPointerField(slot_offset).getStruct(nullptr)); + readRowImpl(column, struct_reader); } void readRow(IColumn & column, const capnp::DynamicList::Reader & parent_list_reader, UInt32 array_index) override { - auto reader_impl = parent_list_reader.getReaderImpl(); - auto struct_reader = capnp::DynamicStruct::Reader(struct_schema, reader_impl.getStructElement(array_index)); - if (auto * tuple_column = typeid_cast(&column)) - { - for (size_t i = 0; i != tuple_column->tupleSize(); ++i) - fields_serializers[i]->readRow(tuple_column->getColumn(i), struct_reader, fields_offsets[i]); - } - else - fields_serializers[0]->readRow(column, struct_reader, fields_offsets[0]); + auto struct_reader = capnp::DynamicStruct::Reader(struct_schema, parent_list_reader.getReaderImpl().getStructElement(array_index)); + readRowImpl(column, struct_reader); } + /// Method for reading from root struct. void readRow(MutableColumns & columns, const capnp::DynamicStruct::Reader & reader) { for (size_t i = 0; i != columns.size(); ++i) @@ -1435,6 +1301,36 @@ namespace } } + template + void writeRowImpl(const ColumnPtr & column, std::unique_ptr & field_builder, ParentBuilder & parent_builder, UInt32 offset_or_index, size_t row_num) + { + if (!field_builder) + field_builder = initStructBuilder(parent_builder, offset_or_index, struct_size, fields_count, struct_schema); + + auto & struct_builder = assert_cast(*field_builder); + if (const auto * tuple_column = typeid_cast(column.get())) + { + const auto & columns = tuple_column->getColumns(); + for (size_t i = 0; i != columns.size(); ++i) + fields_serializers[i]->writeRow(columns[i], struct_builder.field_builders[fields_indexes[i]], struct_builder.impl, fields_offsets[i], row_num); + } + else + { + fields_serializers[0]->writeRow(column, struct_builder.field_builders[fields_indexes[0]], struct_builder.impl, fields_offsets[0], row_num); + } + } + + void readRowImpl(IColumn & column, const capnp::DynamicStruct::Reader & struct_reader) + { + if (auto * tuple_column = typeid_cast(&column)) + { + for (size_t i = 0; i != tuple_column->tupleSize(); ++i) + fields_serializers[i]->readRow(tuple_column->getColumn(i), struct_reader, fields_offsets[i]); + } + else + fields_serializers[0]->readRow(column, struct_reader, fields_offsets[0]); + } + capnp::StructSchema struct_schema; capnp::_::StructSize struct_size; size_t fields_count; @@ -1515,12 +1411,12 @@ namespace return std::make_unique>(type, name, capnp_type, settings.enum_comparing_mode); case TypeIndex::String: if (capnp_type.isData()) - return std::make_unique>(type, name, capnp_type); - return std::make_unique>(type, name, capnp_type); + return std::make_unique>(type, name, capnp_type); + return std::make_unique>(type, name, capnp_type); case TypeIndex::FixedString: if (capnp_type.isData()) - return std::make_unique>(type, name, capnp_type); - return std::make_unique>(type, name, capnp_type); + return std::make_unique>(type, name, capnp_type); + return std::make_unique>(type, name, capnp_type); case TypeIndex::LowCardinality: return std::make_unique(type, name, capnp_type, settings); case TypeIndex::Nullable: diff --git a/src/Storages/addColumnsStructureToQueryWithClusterEngine.cpp b/src/Storages/addColumnsStructureToQueryWithClusterEngine.cpp new file mode 100644 index 00000000000..106161ae620 --- /dev/null +++ b/src/Storages/addColumnsStructureToQueryWithClusterEngine.cpp @@ -0,0 +1,52 @@ +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +ASTExpressionList * extractTableFunctionArgumentsFromSelectQuery(ASTPtr & query) +{ + auto * select_query = query->as(); + if (!select_query || !select_query->tables()) + return nullptr; + + auto * tables = select_query->tables()->as(); + auto * table_expression = tables->children[0]->as()->table_expression->as(); + if (!table_expression->table_function) + return nullptr; + + auto * table_function = table_expression->table_function->as(); + return table_function->arguments->as(); +} + +void addColumnsStructureToQueryWithClusterEngine(ASTPtr & query, const String & structure, size_t max_arguments, const String & function_name) +{ + ASTExpressionList * expression_list = extractTableFunctionArgumentsFromSelectQuery(query); + if (!expression_list) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected SELECT query from table function {}, got '{}'", function_name, queryToString(query)); + auto structure_literal = std::make_shared(structure); + + if (expression_list->children.size() < 2 || expression_list->children.size() > max_arguments) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected 2 to {} arguments in {} table functions, got {}", + function_name, max_arguments, expression_list->children.size()); + + if (expression_list->children.size() == 2 || expression_list->children.size() == max_arguments - 1) + { + auto format_literal = std::make_shared("auto"); + expression_list->children.push_back(format_literal); + } + + expression_list->children.push_back(structure_literal); +} + +} diff --git a/src/Storages/addColumnsStructureToQueryWithClusterEngine.h b/src/Storages/addColumnsStructureToQueryWithClusterEngine.h new file mode 100644 index 00000000000..5939f3f43aa --- /dev/null +++ b/src/Storages/addColumnsStructureToQueryWithClusterEngine.h @@ -0,0 +1,14 @@ +#pragma once + +#include +#include + +namespace DB +{ + +ASTExpressionList * extractTableFunctionArgumentsFromSelectQuery(ASTPtr & query); + +/// Add structure argument for queries with s3Cluster/hdfsCluster table function. +void addColumnsStructureToQueryWithClusterEngine(ASTPtr & query, const String & structure, size_t max_arguments, const String & function_name); + +} From f2e076a4431e1515d46b039ef248ddd5fd33de81 Mon Sep 17 00:00:00 2001 From: ltrk2 <107155950+ltrk2@users.noreply.github.com> Date: Wed, 31 May 2023 19:33:32 +0000 Subject: [PATCH 151/722] Implement big-endian support for the deterministic reservoir sampler --- .../ReservoirSamplerDeterministic.h | 14 ++--- src/Common/TransformEndianness.hpp | 54 +++++++++++++++++++ src/IO/ReadHelpers.h | 22 +------- src/IO/WriteHelpers.h | 22 +------- 4 files changed, 65 insertions(+), 47 deletions(-) create mode 100644 src/Common/TransformEndianness.hpp diff --git a/src/AggregateFunctions/ReservoirSamplerDeterministic.h b/src/AggregateFunctions/ReservoirSamplerDeterministic.h index bde33260f5a..9dea821e839 100644 --- a/src/AggregateFunctions/ReservoirSamplerDeterministic.h +++ b/src/AggregateFunctions/ReservoirSamplerDeterministic.h @@ -157,8 +157,8 @@ public: void read(DB::ReadBuffer & buf) { size_t size = 0; - DB::readIntBinary(size, buf); - DB::readIntBinary(total_values, buf); + readBinaryLittleEndian(size, buf); + readBinaryLittleEndian(total_values, buf); /// Compatibility with old versions. if (size > total_values) @@ -171,16 +171,16 @@ public: samples.resize(size); for (size_t i = 0; i < size; ++i) - DB::readPODBinary(samples[i], buf); + readBinaryLittleEndian(samples[i], buf); sorted = false; } void write(DB::WriteBuffer & buf) const { - size_t size = samples.size(); - DB::writeIntBinary(size, buf); - DB::writeIntBinary(total_values, buf); + const auto size = samples.size(); + writeBinaryLittleEndian(size, buf); + writeBinaryLittleEndian(total_values, buf); for (size_t i = 0; i < size; ++i) { @@ -195,7 +195,7 @@ public: memset(&elem, 0, sizeof(elem)); elem = samples[i]; - DB::writePODBinary(elem, buf); + writeBinaryLittleEndian(elem, buf); } } diff --git a/src/Common/TransformEndianness.hpp b/src/Common/TransformEndianness.hpp new file mode 100644 index 00000000000..17cf441d17f --- /dev/null +++ b/src/Common/TransformEndianness.hpp @@ -0,0 +1,54 @@ +#pragma once + +#include +#include + +#include + +namespace DB +{ +template + requires is_big_int_v +inline void transformEndianness(T & x) +{ + if constexpr (std::endian::native != endian) + { + std::ranges::transform(x.items, std::begin(x.items), [](auto& item) { return std::byteswap(item); }); + std::ranges::reverse(x.items); + } +} + +template + requires is_decimal || std::is_floating_point_v +inline void transformEndianness(T & value) +{ + if constexpr (std::endian::native != endian) + { + auto * start = reinterpret_cast(&value); + std::reverse(start, start + sizeof(T)); + } +} + +template + requires std::is_integral_v && (sizeof(T) <= 8) +inline void transformEndianness(T & value) +{ + if constexpr (endian != std::endian::native) + value = std::byteswap(value); +} + +template + requires std::is_scoped_enum_v +inline void transformEndianness(T & x) +{ + using UnderlyingType = std::underlying_type_t; + transformEndianness(reinterpret_cast(x)); +} + +template +inline void transformEndianness(std::pair & pair) +{ + transformEndianness(pair.first); + transformEndianness(pair.second); +} +} diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index 32338552b66..c42e992c807 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -1098,30 +1099,11 @@ inline void readBinary(Decimal128 & x, ReadBuffer & buf) { readPODBinary(x, buf) inline void readBinary(Decimal256 & x, ReadBuffer & buf) { readPODBinary(x.value, buf); } inline void readBinary(LocalDate & x, ReadBuffer & buf) { readPODBinary(x, buf); } - template -requires is_arithmetic_v && (sizeof(T) <= 8) inline void readBinaryEndian(T & x, ReadBuffer & buf) { readPODBinary(x, buf); - if constexpr (std::endian::native != endian) - x = std::byteswap(x); -} - -template -requires is_big_int_v -inline void readBinaryEndian(T & x, ReadBuffer & buf) -{ - if constexpr (std::endian::native == endian) - { - for (size_t i = 0; i != std::size(x.items); ++i) - readBinaryEndian(x.items[i], buf); - } - else - { - for (size_t i = 0; i != std::size(x.items); ++i) - readBinaryEndian(x.items[std::size(x.items) - i - 1], buf); - } + transformEndianness(x); } template diff --git a/src/IO/WriteHelpers.h b/src/IO/WriteHelpers.h index cdbc952690c..26c999cb761 100644 --- a/src/IO/WriteHelpers.h +++ b/src/IO/WriteHelpers.h @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -1172,32 +1173,13 @@ inline void writeNullTerminatedString(const String & s, WriteBuffer & buffer) buffer.write(s.c_str(), s.size() + 1); } - template -requires is_arithmetic_v && (sizeof(T) <= 8) inline void writeBinaryEndian(T x, WriteBuffer & buf) { - if constexpr (std::endian::native != endian) - x = std::byteswap(x); + transformEndianness(x); writePODBinary(x, buf); } -template -requires is_big_int_v -inline void writeBinaryEndian(const T & x, WriteBuffer & buf) -{ - if constexpr (std::endian::native == endian) - { - for (size_t i = 0; i != std::size(x.items); ++i) - writeBinaryEndian(x.items[i], buf); - } - else - { - for (size_t i = 0; i != std::size(x.items); ++i) - writeBinaryEndian(x.items[std::size(x.items) - i - 1], buf); - } -} - template inline void writeBinaryLittleEndian(T x, WriteBuffer & buf) { From 4b46486491a80ab7a0f2bd62d6c6e4fb606aa429 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Wed, 31 May 2023 21:52:58 +0200 Subject: [PATCH 152/722] Clean up --- ...lumnsStructureToQueryWithClusterEngine.cpp | 52 ------------------- 1 file changed, 52 deletions(-) delete mode 100644 src/Storages/addColumnsStructureToQueryWithClusterEngine.cpp diff --git a/src/Storages/addColumnsStructureToQueryWithClusterEngine.cpp b/src/Storages/addColumnsStructureToQueryWithClusterEngine.cpp deleted file mode 100644 index 106161ae620..00000000000 --- a/src/Storages/addColumnsStructureToQueryWithClusterEngine.cpp +++ /dev/null @@ -1,52 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} - -ASTExpressionList * extractTableFunctionArgumentsFromSelectQuery(ASTPtr & query) -{ - auto * select_query = query->as(); - if (!select_query || !select_query->tables()) - return nullptr; - - auto * tables = select_query->tables()->as(); - auto * table_expression = tables->children[0]->as()->table_expression->as(); - if (!table_expression->table_function) - return nullptr; - - auto * table_function = table_expression->table_function->as(); - return table_function->arguments->as(); -} - -void addColumnsStructureToQueryWithClusterEngine(ASTPtr & query, const String & structure, size_t max_arguments, const String & function_name) -{ - ASTExpressionList * expression_list = extractTableFunctionArgumentsFromSelectQuery(query); - if (!expression_list) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected SELECT query from table function {}, got '{}'", function_name, queryToString(query)); - auto structure_literal = std::make_shared(structure); - - if (expression_list->children.size() < 2 || expression_list->children.size() > max_arguments) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected 2 to {} arguments in {} table functions, got {}", - function_name, max_arguments, expression_list->children.size()); - - if (expression_list->children.size() == 2 || expression_list->children.size() == max_arguments - 1) - { - auto format_literal = std::make_shared("auto"); - expression_list->children.push_back(format_literal); - } - - expression_list->children.push_back(structure_literal); -} - -} From 4987c4baaa0c10ed1114155a0d35db3953f34ab9 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Wed, 31 May 2023 21:53:21 +0200 Subject: [PATCH 153/722] Clean up --- .../addColumnsStructureToQueryWithClusterEngine.h | 14 -------------- 1 file changed, 14 deletions(-) delete mode 100644 src/Storages/addColumnsStructureToQueryWithClusterEngine.h diff --git a/src/Storages/addColumnsStructureToQueryWithClusterEngine.h b/src/Storages/addColumnsStructureToQueryWithClusterEngine.h deleted file mode 100644 index 5939f3f43aa..00000000000 --- a/src/Storages/addColumnsStructureToQueryWithClusterEngine.h +++ /dev/null @@ -1,14 +0,0 @@ -#pragma once - -#include -#include - -namespace DB -{ - -ASTExpressionList * extractTableFunctionArgumentsFromSelectQuery(ASTPtr & query); - -/// Add structure argument for queries with s3Cluster/hdfsCluster table function. -void addColumnsStructureToQueryWithClusterEngine(ASTPtr & query, const String & structure, size_t max_arguments, const String & function_name); - -} From 8b34a30455fd42928e5c89c503a25cf5d02ccff8 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Wed, 31 May 2023 22:14:57 +0200 Subject: [PATCH 154/722] Fix style --- src/Formats/CapnProtoSerializer.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Formats/CapnProtoSerializer.cpp b/src/Formats/CapnProtoSerializer.cpp index e36f5fa4947..e99db23bb5e 100644 --- a/src/Formats/CapnProtoSerializer.cpp +++ b/src/Formats/CapnProtoSerializer.cpp @@ -29,7 +29,6 @@ namespace DB namespace ErrorCodes { extern const int THERE_IS_NO_COLUMN; - extern const int LOGICAL_ERROR; extern const int CAPN_PROTO_BAD_CAST; extern const int INCORRECT_DATA; extern const int ILLEGAL_COLUMN; @@ -371,7 +370,7 @@ namespace { parent_struct_builder.getBuilderImpl().setDataField(slot_offset, getValue(column, row_num)); } - + void writeRow(const ColumnPtr & column, std::unique_ptr &, capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) override { parent_list_builder.getBuilderImpl().setDataElement(array_index, getValue(column, row_num)); From 4ba08a5cbc960cace3dfcf32b3497855b3ffe6fd Mon Sep 17 00:00:00 2001 From: zvonand Date: Wed, 31 May 2023 23:21:39 +0200 Subject: [PATCH 155/722] remove unused import --- src/DataTypes/Serializations/SerializationDate.h | 2 +- src/DataTypes/Serializations/SerializationDate32.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/DataTypes/Serializations/SerializationDate.h b/src/DataTypes/Serializations/SerializationDate.h index 4d6a6fa36ec..f751b06fba6 100644 --- a/src/DataTypes/Serializations/SerializationDate.h +++ b/src/DataTypes/Serializations/SerializationDate.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include namespace DB { diff --git a/src/DataTypes/Serializations/SerializationDate32.h b/src/DataTypes/Serializations/SerializationDate32.h index 6b6e5442240..49560fb6c7d 100644 --- a/src/DataTypes/Serializations/SerializationDate32.h +++ b/src/DataTypes/Serializations/SerializationDate32.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include namespace DB { From 4c92bc7aadf354b713d6b8f3c24728d6172f1867 Mon Sep 17 00:00:00 2001 From: tpanetti Date: Wed, 31 May 2023 15:30:26 -0700 Subject: [PATCH 156/722] Fix incompatible ClickHouse -> MySQL types for compability mode This adjusts specific incompatible ClickHouse types to a format that can be read and interpreted by MySQL (Ex: Int128 -> text) --- src/DataTypes/DataTypeArray.h | 2 +- src/DataTypes/DataTypeLowCardinality.cpp | 3 +- src/DataTypes/DataTypeLowCardinality.h | 4 +- src/DataTypes/DataTypeNumberBase.cpp | 8 +- src/DataTypes/DataTypeString.h | 2 +- ...show_columns_mysql_compatibility.reference | 229 ++++++++++++++++++ ...02775_show_columns_mysql_compatibility.sh} | 23 +- 7 files changed, 256 insertions(+), 15 deletions(-) create mode 100644 tests/queries/0_stateless/02775_show_columns_mysql_compatibility.reference rename tests/queries/0_stateless/{02740_show_columns_mysql_compatibility.sh => 02775_show_columns_mysql_compatibility.sh} (80%) diff --git a/src/DataTypes/DataTypeArray.h b/src/DataTypes/DataTypeArray.h index 35462df9a4e..b031f411975 100644 --- a/src/DataTypes/DataTypeArray.h +++ b/src/DataTypes/DataTypeArray.h @@ -32,7 +32,7 @@ public: } const char * getMySQLName() const override { - return "string"; + return "text"; } bool canBeInsideNullable() const override diff --git a/src/DataTypes/DataTypeLowCardinality.cpp b/src/DataTypes/DataTypeLowCardinality.cpp index 8293455cabc..b1c32317015 100644 --- a/src/DataTypes/DataTypeLowCardinality.cpp +++ b/src/DataTypes/DataTypeLowCardinality.cpp @@ -28,7 +28,8 @@ namespace ErrorCodes } DataTypeLowCardinality::DataTypeLowCardinality(DataTypePtr dictionary_type_) - : dictionary_type(std::move(dictionary_type_)) + : dictionary_type(std::move(dictionary_type_)), + mysql_name(dictionary_type->getMySQLName()) { auto inner_type = dictionary_type; if (dictionary_type->isNullable()) diff --git a/src/DataTypes/DataTypeLowCardinality.h b/src/DataTypes/DataTypeLowCardinality.h index 6fd4344311c..bcc39f58ff7 100644 --- a/src/DataTypes/DataTypeLowCardinality.h +++ b/src/DataTypes/DataTypeLowCardinality.h @@ -11,6 +11,8 @@ class DataTypeLowCardinality : public IDataType { private: DataTypePtr dictionary_type; + std::string mysql_name; + public: explicit DataTypeLowCardinality(DataTypePtr dictionary_type_); @@ -22,7 +24,7 @@ public: return "LowCardinality(" + dictionary_type->getName() + ")"; } const char * getFamilyName() const override { return "LowCardinality"; } - const char * getMySQLName() const override { return "text"; } + const char * getMySQLName() const override { return mysql_name.c_str(); } TypeIndex getTypeId() const override { return TypeIndex::LowCardinality; } diff --git a/src/DataTypes/DataTypeNumberBase.cpp b/src/DataTypes/DataTypeNumberBase.cpp index cd5e73ac4a1..7d200de7996 100644 --- a/src/DataTypes/DataTypeNumberBase.cpp +++ b/src/DataTypes/DataTypeNumberBase.cpp @@ -36,14 +36,14 @@ const std::map DataTypeNumberBase::mysqlTypeMap = { {"UInt16", "smallint unsigned"}, {"UInt32", "mediumint unsigned"}, {"UInt64", "bigint unsigned"}, - {"UInt128", "bigint unsigned"}, - {"UInt256", "bigint unsigned"}, + {"UInt128", "text"}, + {"UInt256", "text"}, {"Int8", "tinyint"}, {"Int16", "smallint"}, {"Int32", "int"}, {"Int64", "bigint"}, - {"Int128", "bigint"}, - {"Int256", "bigint"}, + {"Int128", "text"}, + {"Int256", "text"}, {"Float32", "float"}, {"Float64", "double"}, }; diff --git a/src/DataTypes/DataTypeString.h b/src/DataTypes/DataTypeString.h index 3ac739fe68c..bddfb4ae287 100644 --- a/src/DataTypes/DataTypeString.h +++ b/src/DataTypes/DataTypeString.h @@ -22,7 +22,7 @@ public: } // FIXME: string can contain arbitrary bytes, not only UTF-8 sequences - const char * getMySQLName() const override { return "text"; } + const char * getMySQLName() const override { return "blob"; } TypeIndex getTypeId() const override { return type_id; } diff --git a/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.reference b/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.reference new file mode 100644 index 00000000000..96e542611c6 --- /dev/null +++ b/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.reference @@ -0,0 +1,229 @@ +Drop tables if they exist +Create tab table +Create pseudo-random database name +Create tab duplicate table +Run MySQL test +field type null key default extra +aggregate_function text 0 NULL +array_value text 0 NULL +boolean_value tinyint unsigned 0 NULL +date32_value date 0 NULL +date_value date 0 NULL +datetime64_value datetime 0 NULL +datetime_value datetime 0 NULL +decimal_value decimal 0 NULL +enum_value enum('apple', 'banana', 'orange') 0 NULL +fixed_string_value text 0 NULL +float32 float 0 NULL +float64 double 0 NULL +int32 int 0 NULL +ipv4_value text 0 NULL +ipv6_value text 0 NULL +json_value json 0 NULL +low_cardinality blob 0 NULL +low_cardinality_date datetime 0 NULL +map_value json 0 NULL +nested.nested_int text 0 NULL +nested.nested_string text 0 NULL +nullable_value int 0 NULL +string_value blob 0 NULL +tuple_value json 0 NULL +uint64 bigint unsigned 0 PRI SOR NULL +uuid_value char 0 NULL +field type null key default extra +aggregate_function text 0 NULL +array_value text 0 NULL +boolean_value tinyint unsigned 0 NULL +date32_value date 0 NULL +date_value date 0 NULL +datetime64_value datetime 0 NULL +datetime_value datetime 0 NULL +decimal_value decimal 0 NULL +enum_value enum('apple', 'banana', 'orange') 0 NULL +fixed_string_value text 0 NULL +float32 float 0 NULL +float64 double 0 NULL +int32 int 0 NULL +ipv4_value text 0 NULL +ipv6_value text 0 NULL +json_value json 0 NULL +low_cardinality blob 0 NULL +low_cardinality_date datetime 0 NULL +map_value json 0 NULL +nested.nested_int text 0 NULL +nested.nested_string text 0 NULL +nullable_value int 0 NULL +string_value blob 0 NULL +tuple_value json 0 NULL +uint64 bigint unsigned 0 PRI SOR NULL +uuid_value char 0 NULL +field type null key default extra collation comment privileges +aggregate_function text 0 NULL NULL +array_value text 0 NULL NULL +boolean_value tinyint unsigned 0 NULL NULL +date32_value date 0 NULL NULL +date_value date 0 NULL NULL +datetime64_value datetime 0 NULL NULL +datetime_value datetime 0 NULL NULL +decimal_value decimal 0 NULL NULL +enum_value enum('apple', 'banana', 'orange') 0 NULL NULL +fixed_string_value text 0 NULL NULL +float32 float 0 NULL NULL +float64 double 0 NULL NULL +int32 int 0 NULL NULL +ipv4_value text 0 NULL NULL +ipv6_value text 0 NULL NULL +json_value json 0 NULL NULL +low_cardinality blob 0 NULL NULL +low_cardinality_date datetime 0 NULL NULL +map_value json 0 NULL NULL +nested.nested_int text 0 NULL NULL +nested.nested_string text 0 NULL NULL +nullable_value int 0 NULL NULL +string_value blob 0 NULL NULL +tuple_value json 0 NULL NULL +uint64 bigint unsigned 0 PRI SOR NULL NULL +uuid_value char 0 NULL NULL +field type null key default extra +int32 int 0 NULL +nested.nested_int text 0 NULL +uint64 bigint unsigned 0 PRI SOR NULL +field type null key default extra +aggregate_function text 0 NULL +array_value text 0 NULL +boolean_value tinyint unsigned 0 NULL +date32_value date 0 NULL +date_value date 0 NULL +datetime64_value datetime 0 NULL +datetime_value datetime 0 NULL +decimal_value decimal 0 NULL +enum_value enum('apple', 'banana', 'orange') 0 NULL +fixed_string_value text 0 NULL +float32 float 0 NULL +float64 double 0 NULL +ipv4_value text 0 NULL +ipv6_value text 0 NULL +json_value json 0 NULL +low_cardinality blob 0 NULL +low_cardinality_date datetime 0 NULL +map_value json 0 NULL +nested.nested_string text 0 NULL +nullable_value int 0 NULL +string_value blob 0 NULL +tuple_value json 0 NULL +uuid_value char 0 NULL +field type null key default extra +int32 int 0 NULL +nested.nested_int text 0 NULL +uint64 bigint unsigned 0 PRI SOR NULL +field type null key default extra +aggregate_function text 0 NULL +array_value text 0 NULL +boolean_value tinyint unsigned 0 NULL +date32_value date 0 NULL +date_value date 0 NULL +datetime64_value datetime 0 NULL +datetime_value datetime 0 NULL +decimal_value decimal 0 NULL +enum_value enum('apple', 'banana', 'orange') 0 NULL +fixed_string_value text 0 NULL +float32 float 0 NULL +float64 double 0 NULL +ipv4_value text 0 NULL +ipv6_value text 0 NULL +json_value json 0 NULL +low_cardinality blob 0 NULL +low_cardinality_date datetime 0 NULL +map_value json 0 NULL +nested.nested_string text 0 NULL +nullable_value int 0 NULL +string_value blob 0 NULL +tuple_value json 0 NULL +uuid_value char 0 NULL +field type null key default extra +int32 int 0 NULL +nested.nested_int text 0 NULL +uint64 bigint unsigned 0 PRI SOR NULL +field type null key default extra +aggregate_function text 0 NULL +field type null key default extra +aggregate_function text 0 NULL +array_value text 0 NULL +boolean_value tinyint unsigned 0 NULL +date32_value date 0 NULL +date_value date 0 NULL +datetime64_value datetime 0 NULL +datetime_value datetime 0 NULL +decimal_value decimal 0 NULL +enum_value enum('apple', 'banana', 'orange') 0 NULL +fixed_string_value text 0 NULL +float32 float 0 NULL +float64 double 0 NULL +int32 int 0 NULL +ipv4_value text 0 NULL +ipv6_value text 0 NULL +json_value json 0 NULL +low_cardinality blob 0 NULL +low_cardinality_date datetime 0 NULL +map_value json 0 NULL +nested.nested_int text 0 NULL +nested.nested_string text 0 NULL +nullable_value int 0 NULL +string_value blob 0 NULL +tuple_value json 0 NULL +uint64 bigint unsigned 0 PRI SOR NULL +uuid_value char 0 NULL +field type null key default extra +aggregate_function text 0 NULL +array_value text 0 NULL +boolean_value tinyint unsigned 0 NULL +date32_value date 0 NULL +date_value date 0 NULL +datetime64_value datetime 0 NULL +datetime_value datetime 0 NULL +decimal_value decimal 0 NULL +enum_value enum('apple', 'banana', 'orange') 0 NULL +fixed_string_value text 0 NULL +float32 float 0 NULL +float64 double 0 NULL +int32 int 0 NULL +ipv4_value text 0 NULL +ipv6_value text 0 NULL +json_value json 0 NULL +low_cardinality blob 0 NULL +low_cardinality_date datetime 0 NULL +map_value json 0 NULL +nested.nested_int text 0 NULL +nested.nested_string text 0 NULL +nullable_value int 0 NULL +string_value blob 0 NULL +tuple_value json 0 NULL +uint64 bigint unsigned 0 PRI SOR NULL +uuid_value char 0 NULL +field type null key default extra +aggregate_function text 0 NULL +array_value text 0 NULL +boolean_value tinyint unsigned 0 NULL +date32_value date 0 NULL +date_value date 0 NULL +datetime64_value datetime 0 NULL +datetime_value datetime 0 NULL +decimal_value decimal 0 NULL +enum_value enum('apple', 'banana', 'orange') 0 NULL +fixed_string_value text 0 NULL +float32 float 0 NULL +float64 double 0 NULL +int32 int 0 NULL +ipv4_value text 0 NULL +ipv6_value text 0 NULL +json_value json 0 NULL +low_cardinality blob 0 NULL +low_cardinality_date datetime 0 NULL +map_value json 0 NULL +nested.nested_int text 0 NULL +nested.nested_string text 0 NULL +nullable_value int 0 NULL +string_value blob 0 NULL +tuple_value json 0 NULL +uint64 bigint unsigned 0 PRI SOR NULL +uuid_value char 0 NULL diff --git a/tests/queries/0_stateless/02740_show_columns_mysql_compatibility.sh b/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh similarity index 80% rename from tests/queries/0_stateless/02740_show_columns_mysql_compatibility.sh rename to tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh index 7f828d35679..a446c6e817e 100755 --- a/tests/queries/0_stateless/02740_show_columns_mysql_compatibility.sh +++ b/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh @@ -13,8 +13,11 @@ ${CLICKHOUSE_LOCAL} --query "DROP TABLE IF EXISTS tab" ${CLICKHOUSE_LOCAL} --query "DROP TABLE IF EXISTS database_123456789abcde" ${CLICKHOUSE_LOCAL} --query "DROP TABLE IF EXISTS database_123456789abcde.tab" +#${CLICKHOUSE_LOCAL} --query "SET allow_suspicious_low_cardinality_types = 1;" echo "Create tab table " -${CLICKHOUSE_LOCAL} --query " +${CLICKHOUSE_LOCAL} -n -q " + SET allow_suspicious_low_cardinality_types=1; + SET allow_experimental_object_type =1; CREATE TABLE tab ( uint64 UInt64, @@ -22,17 +25,19 @@ ${CLICKHOUSE_LOCAL} --query " float32 Float32, float64 Float64, decimal_value Decimal(10, 2), - boolean_value UInt8, -- Use 0 for false, 1 for true + boolean_value UInt8, string_value String, fixed_string_value FixedString(10), date_value Date, date32_value Date32, datetime_value DateTime, datetime64_value DateTime64(3), - json_value String, -- Store JSON as a string + json_value JSON, uuid_value UUID, enum_value Enum8('apple' = 1, 'banana' = 2, 'orange' = 3), low_cardinality LowCardinality(String), + low_cardinality_date LowCardinality(DateTime), + aggregate_function AggregateFunction(sum, Int32), array_value Array(Int32), map_value Map(String, Int32), tuple_value Tuple(Int32, String), @@ -53,7 +58,9 @@ echo "Create pseudo-random database name" ${CLICKHOUSE_LOCAL} --query "CREATE DATABASE database_123456789abcde;" echo "Create tab duplicate table" -${CLICKHOUSE_LOCAL} --query " +${CLICKHOUSE_LOCAL} -n -q " + SET allow_suspicious_low_cardinality_types=1; + SET allow_experimental_object_type =1; CREATE TABLE database_123456789abcde.tab ( uint64 UInt64, @@ -61,17 +68,19 @@ ${CLICKHOUSE_LOCAL} --query " float32 Float32, float64 Float64, decimal_value Decimal(10, 2), - boolean_value UInt8, -- Use 0 for false, 1 for true + boolean_value UInt8, string_value String, fixed_string_value FixedString(10), date_value Date, date32_value Date32, datetime_value DateTime, datetime64_value DateTime64(3), - json_value String, -- Store JSON as a string + json_value JSON, uuid_value UUID, enum_value Enum8('apple' = 1, 'banana' = 2, 'orange' = 3), low_cardinality LowCardinality(String), + low_cardinality_date LowCardinality(DateTime), + aggregate_function AggregateFunction(sum, Int32), array_value Array(Int32), map_value Map(String, Int32), tuple_value Tuple(Int32, String), @@ -109,7 +118,7 @@ EOT # Now run the MySQL test script on the ClickHouse DB echo "Run MySQL test" -mysql --user="$USER" --password="$PASSWORD" --host="$HOST" --port="$PORT" < $TEMP_FILE +${MYSQL_CLIENT} --user="$USER" --password="$PASSWORD" --host="$HOST" --port="$PORT" < $TEMP_FILE # Clean up the temp file rm $TEMP_FILE From 54d526c75c0934b36dd170660ff7222de24a5a13 Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Thu, 1 Jun 2023 14:22:57 +0000 Subject: [PATCH 157/722] Add cast type supprt to DateTimeTransformImpl --- .../functions/type-conversion-functions.md | 85 +++++++++++++++++++ src/Functions/DateTimeTransforms.h | 36 +++++++- src/Functions/FunctionsConversion.h | 18 +++- .../0_stateless/01601_accurate_cast.reference | 4 + .../0_stateless/01601_accurate_cast.sql | 10 +++ 5 files changed, 147 insertions(+), 6 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index c7c66cc771f..a6fc6cd4dfc 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -319,6 +319,49 @@ SELECT ## toDateOrNull ## toDateOrDefault +Converts an input value to [Date](/docs/en/sql-reference/data-types/date.md) data type. +If unsuccessful, returns the lower border value supported by [Date](/docs/en/sql-reference/data-types/date.md). The default value can be specified as a second argument. +Similar to [toDate](#todate). + +**Syntax** + +``` sql +toDateOrDefault(expr [, default_value]) +``` + +**Arguments** + +- `expr` — The value. [String](/docs/en/sql-reference/data-types/string.md), [Int](/docs/en/sql-reference/data-types/int-uint.md), [Date](/docs/en/sql-reference/data-types/date.md) or [DateTime](/docs/en/sql-reference/data-types/datetime.md). +- `default_value` — The default value. [Date](/docs/en/sql-reference/data-types/date.md) + +If `expr` is a number and looks like a UNIX timestamp (is greater than 65535), it is interpreted as a DateTime, then truncated to Date in the current timezone. If `expr` is a number and it is smaller than 65536, it is interpreted as the number of days since 1970-01-01. + +**Returned value** + +- A calendar date. [Date](/docs/en/sql-reference/data-types/date.md) + +**Example** + +Query: + +``` sql +SELECT + toDateOrDefault('2021-01-01', '2023-01-01'::Date), + toDateOrDefault('xx2021-01-01', '2023-01-01'::Date); +``` + +Result: + +```response +┌─toDateOrDefault('2021-01-01', CAST('2023-01-01', 'Date'))─┬─toDateOrDefault('xx2021-01-01', CAST('2023-01-01', 'Date'))─┐ +│ 2021-01-01 │ 2023-01-01 │ +└───────────────────────────────────────────────────────────┴─────────────────────────────────────────────────────────────┘ +``` + +**See Also** +- [toDate](#todate) +- [toDate32OrDefault](#todate32ordefault) + ## toDateTime @@ -327,6 +370,48 @@ SELECT ## toDateTimeOrNull ## toDateTimeOrDefault +Converts an input value to [DateTime](/docs/en/sql-reference/data-types/datetime.md) data type. +If unsuccessful, returns the lower border value supported by [DateTime](/docs/en/sql-reference/data-types/datetime.md). The default value can be specified as a third argument. +Similar to [toDateTime](#todatetime). + +**Syntax** + +``` sql +toDateTimeOrDefault(expr, [, time_zone [, default_value]]) +``` + +**Arguments** + +- `expr` — The value. [String](/docs/en/sql-reference/data-types/string.md), [Int](/docs/en/sql-reference/data-types/int-uint.md), [Date](/docs/en/sql-reference/data-types/date.md) or [DateTime](/docs/en/sql-reference/data-types/datetime.md). +- `time_zone` — Time zone. +- `default_value` — The default value. [DateTime](/docs/en/sql-reference/data-types/datetime.md) + +If `expr` is a number, it is interpreted as the number of seconds since the beginning of the Unix Epoch (as Unix timestamp). + +**Returned value** + +- A date time. [DateTime](/docs/en/sql-reference/data-types/datetime.md) + +**Example** + +Query: + +``` sql +SELECT + toDateTimeOrDefault('2021-01-01', 'UTC', '2023-01-01'::DateTime('UTC')), + toDateTimeOrDefault('xx2021-01-01', 'UTC', '2023-01-01'::DateTime('UTC')); +``` + +Result: + +```response +┌─toDateTimeOrDefault('2021-01-01', 'UTC', CAST('2023-01-01', 'DateTime(\'UTC\')'))─┬─toDateTimeOrDefault('xx2021-01-01', 'UTC', CAST('2023-01-01', 'DateTime(\'UTC\')'))─┐ +│ 2021-01-01 00:00:00 │ 2023-01-01 00:00:00 │ +└───────────────────────────────────────────────────────────────────────────────────┴─────────────────────────────────────────────────────────────────────────────────────┘ +``` + +**See Also** +- [toDateTime](#todatetime) ## toDate32 diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index f179d9fbe60..81b1ec2e356 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -21,6 +21,7 @@ namespace ErrorCodes { extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int ILLEGAL_COLUMN; + extern const int CANNOT_CONVERT_TYPE; } /** Transformations. @@ -1425,8 +1426,10 @@ struct ToDateTimeComponentsImpl using FactorTransform = ZeroTransform; }; +struct DateTimeAccurateConvertStrategyAdditions {}; +struct DateTimeAccurateOrNullConvertStrategyAdditions {}; -template +template struct Transformer { template @@ -1438,6 +1441,33 @@ struct Transformer for (size_t i = 0; i < size; ++i) { + constexpr bool transformHasExtraCheck = requires(const Transform& t) + { + t.ExtraCheck(vec_from[i], time_zone); + }; + + if constexpr (transformHasExtraCheck) + { + // if constexpr (std::is_same_v + // || std::is_same_v) + { + bool checked = transform.ExtraCheck(vec_from[i], time_zone); + if (!checked) + { + if (std::is_same_v) + { + // vec_to[i] = 0; + // (*vec_null_map_to)[i] = true; + } + else + { + throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Value in column {} cannot be safely converted into type {}", + TypeName, TypeName); + } + } + } + } + if constexpr (is_extended_result) vec_to[i] = static_cast(transform.executeExtendedResult(vec_from[i], time_zone)); else @@ -1446,14 +1476,14 @@ struct Transformer } }; - template struct DateTimeTransformImpl { + template static ColumnPtr execute( const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /*input_rows_count*/, const Transform & transform = {}) { - using Op = Transformer; + using Op = Transformer; const ColumnPtr source_col = arguments[0].column; if (const auto * sources = checkAndGetColumn(source_col.get())) diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index d3676349318..d3ccbb82721 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -400,7 +400,11 @@ template struct ToDateTransform8Or16Signed { static constexpr auto name = "toDate"; - + static NO_SANITIZE_UNDEFINED bool ExtraCheck(const FromType & from, const DateLUTImpl &) + { + return from >= 0; + } + static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) { if (from < 0) @@ -2884,8 +2888,16 @@ private: if constexpr (IsDataTypeNumber && IsDataTypeDateOrDateTime) { - result_column = ConvertImpl::execute( - arguments, result_type, input_rows_count); + if (wrapper_cast_type == CastType::accurate) + { + result_column = ConvertImpl::template execute( + arguments, result_type, input_rows_count); + } + else + { + result_column = ConvertImpl::template execute( + arguments, result_type, input_rows_count); + } return true; } diff --git a/tests/queries/0_stateless/01601_accurate_cast.reference b/tests/queries/0_stateless/01601_accurate_cast.reference index c1e7feffbe6..b662319d263 100644 --- a/tests/queries/0_stateless/01601_accurate_cast.reference +++ b/tests/queries/0_stateless/01601_accurate_cast.reference @@ -6,3 +6,7 @@ 5 1 12 +2023-05-30 14:38:20 +1970-01-01 00:00:19 +2023-05-30 +1970-01-20 \ No newline at end of file diff --git a/tests/queries/0_stateless/01601_accurate_cast.sql b/tests/queries/0_stateless/01601_accurate_cast.sql index b5fd4fb04a4..1ab98e26d1a 100644 --- a/tests/queries/0_stateless/01601_accurate_cast.sql +++ b/tests/queries/0_stateless/01601_accurate_cast.sql @@ -22,3 +22,13 @@ SELECT accurateCast(-10, 'Decimal32(9)'); -- { serverError 407 } SELECT accurateCast('123', 'FixedString(2)'); -- { serverError 131 } SELECT accurateCast('12', 'FixedString(2)'); + +SELECT accurateCast(-1, 'DateTime'); -- { serverError 70 } +SELECT accurateCast('1xxx', 'DateTime'); -- { serverError 41 } +SELECT accurateCast('2023-05-30 14:38:20', 'DateTime'); +SELECT accurateCast(19, 'DateTime'); + +SELECT accurateCast(-1, 'Date'); -- { serverError 70 } +SELECT accurateCast('1xxx', 'Date'); -- { serverError 70 } +SELECT accurateCast('2023-05-30', 'Date'); +SELECT accurateCast(19, 'Date'); From 883350d5c221c22bef476a62857ae7e8f692dcbf Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 1 Jun 2023 14:51:03 +0000 Subject: [PATCH 158/722] Fix tests --- src/Formats/CapnProtoSchema.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Formats/CapnProtoSchema.cpp b/src/Formats/CapnProtoSchema.cpp index f9ab88d39ed..559047a6f8d 100644 --- a/src/Formats/CapnProtoSchema.cpp +++ b/src/Formats/CapnProtoSchema.cpp @@ -43,7 +43,7 @@ capnp::StructSchema CapnProtoSchemaParser::getMessageSchema(const FormatSchemaIn /// That's not good to determine the type of error by its description, but /// this is the only way to do it here, because kj doesn't specify the type of error. auto description = std::string_view(e.getDescription().cStr()); - if (description.find("No such file or directory") != String::npos || description.find("no such directory") != String::npos) + if (description.find("No such file or directory") != String::npos || description.find("no such directory") != String::npos || description.find("no such file") != String::npos) throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot open CapnProto schema, file {} doesn't exists", schema_info.absoluteSchemaPath()); if (description.find("Parse error") != String::npos) From f99a7366da356f145ee3ff124c4be80b4f5e903b Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Thu, 1 Jun 2023 16:56:18 +0200 Subject: [PATCH 159/722] Fix tests --- src/Storages/HDFS/StorageHDFS.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Storages/HDFS/StorageHDFS.cpp b/src/Storages/HDFS/StorageHDFS.cpp index 08114ed3cba..79b7b65adb4 100644 --- a/src/Storages/HDFS/StorageHDFS.cpp +++ b/src/Storages/HDFS/StorageHDFS.cpp @@ -233,6 +233,7 @@ ColumnsDescription StorageHDFS::getTableStructureFromData( if (ctx->getSettingsRef().hdfs_skip_empty_files && path_with_info.info && path_with_info.info->size == 0) return read_buffer_iterator(columns); + first = false; auto compression = chooseCompressionMethod(path_with_info.path, compression_method); auto impl = std::make_unique(my_uri_without_path, path_with_info.path, ctx->getGlobalContext()->getConfigRef(), ctx->getReadSettings()); const Int64 zstd_window_log_max = ctx->getSettingsRef().zstd_window_log_max; From 38abcd1c44bc580217081d9fb3d72a1dcd951fa3 Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Thu, 1 Jun 2023 19:25:53 +0000 Subject: [PATCH 160/722] Add nullable support to DateTimeTransformImpl --- src/Functions/DateTimeTransforms.h | 44 ++++++++++++------ src/Functions/FunctionsConversion.h | 46 ++++++++++++++++++- .../01556_accurate_cast_or_null.reference | 8 ++++ .../01556_accurate_cast_or_null.sql | 10 ++++ 4 files changed, 93 insertions(+), 15 deletions(-) diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index 81b1ec2e356..0008b36071b 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -5,6 +5,8 @@ #include #include #include +#include +#include #include #include #include @@ -1433,7 +1435,8 @@ template - static void vector(const FromTypeVector & vec_from, ToTypeVector & vec_to, const DateLUTImpl & time_zone, const Transform & transform) + static void vector(const FromTypeVector & vec_from, ToTypeVector & vec_to, const DateLUTImpl & time_zone, const Transform & transform, + ColumnUInt8::Container * vec_null_map_to [[maybe_unused]]) { using ValueType = typename ToTypeVector::value_type; size_t size = vec_from.size(); @@ -1441,28 +1444,30 @@ struct Transformer for (size_t i = 0; i < size; ++i) { - constexpr bool transformHasExtraCheck = requires(const Transform& t) + constexpr bool transformHasIsConvertible = requires(const Transform& t) { - t.ExtraCheck(vec_from[i], time_zone); + t.IsConvertible(vec_from[i], time_zone); }; - if constexpr (transformHasExtraCheck) + if constexpr (transformHasIsConvertible) { - // if constexpr (std::is_same_v - // || std::is_same_v) + if constexpr (std::is_same_v + || std::is_same_v) { - bool checked = transform.ExtraCheck(vec_from[i], time_zone); + bool checked = transform.IsConvertible(vec_from[i], time_zone); if (!checked) { - if (std::is_same_v) + if (std::is_same_v) { - // vec_to[i] = 0; - // (*vec_null_map_to)[i] = true; + vec_to[i] = 0; + if (vec_null_map_to) + (*vec_null_map_to)[i] = true; + continue; } else { throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Value in column {} cannot be safely converted into type {}", - TypeName, TypeName); + TypeName, TypeName); } } } @@ -1488,6 +1493,14 @@ struct DateTimeTransformImpl const ColumnPtr source_col = arguments[0].column; if (const auto * sources = checkAndGetColumn(source_col.get())) { + ColumnUInt8::MutablePtr col_null_map_to; + ColumnUInt8::Container * vec_null_map_to [[maybe_unused]] = nullptr; + if constexpr (std::is_same_v) + { + col_null_map_to = ColumnUInt8::create(sources->getData().size(), false); + vec_null_map_to = &col_null_map_to->getData(); + } + auto mutable_result_col = result_type->createColumn(); auto * col_to = assert_cast(mutable_result_col.get()); @@ -1495,7 +1508,7 @@ struct DateTimeTransformImpl if (result_data_type.isDateTime() || result_data_type.isDateTime64()) { const auto & time_zone = dynamic_cast(*result_type).getTimeZone(); - Op::vector(sources->getData(), col_to->getData(), time_zone, transform); + Op::vector(sources->getData(), col_to->getData(), time_zone, transform, vec_null_map_to); } else { @@ -1504,7 +1517,12 @@ struct DateTimeTransformImpl time_zone_argument_position = 2; const DateLUTImpl & time_zone = extractTimeZoneFromFunctionArguments(arguments, time_zone_argument_position, 0); - Op::vector(sources->getData(), col_to->getData(), time_zone, transform); + Op::vector(sources->getData(), col_to->getData(), time_zone, transform, vec_null_map_to); + } + + if (vec_null_map_to) + { + return ColumnNullable::create(std::move(mutable_result_col), std::move(col_null_map_to)); } return mutable_result_col; diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index d3ccbb82721..4b25a59ecc6 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -370,6 +370,11 @@ struct ToDateTransform32Or64 { static constexpr auto name = "toDate"; + static NO_SANITIZE_UNDEFINED bool IsConvertible(const FromType & from, const DateLUTImpl &) + { + return from >= 0; + } + static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) { // since converting to Date, no need in values outside of default LUT range. @@ -384,6 +389,11 @@ struct ToDateTransform32Or64Signed { static constexpr auto name = "toDate"; + static NO_SANITIZE_UNDEFINED bool IsConvertible(const FromType & from, const DateLUTImpl &) + { + return from >= 0; + } + static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) { // TODO: decide narrow or extended range based on FromType @@ -400,7 +410,8 @@ template struct ToDateTransform8Or16Signed { static constexpr auto name = "toDate"; - static NO_SANITIZE_UNDEFINED bool ExtraCheck(const FromType & from, const DateLUTImpl &) + + static NO_SANITIZE_UNDEFINED bool IsConvertible(const FromType & from, const DateLUTImpl &) { return from >= 0; } @@ -423,6 +434,11 @@ struct ToDate32Transform32Or64 { static constexpr auto name = "toDate32"; + static NO_SANITIZE_UNDEFINED bool IsConvertible(const FromType & from, const DateLUTImpl &) + { + return from >= 0; + } + static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) { return (from < DATE_LUT_MAX_EXTEND_DAY_NUM) @@ -436,6 +452,11 @@ struct ToDate32Transform32Or64Signed { static constexpr auto name = "toDate32"; + static NO_SANITIZE_UNDEFINED bool IsConvertible(const FromType & from, const DateLUTImpl &) + { + return from >= 0; + } + static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) { static const Int32 daynum_min_offset = -static_cast(DateLUT::instance().getDayNumOffsetEpoch()); @@ -452,6 +473,11 @@ struct ToDate32Transform8Or16Signed { static constexpr auto name = "toDate32"; + static NO_SANITIZE_UNDEFINED bool IsConvertible(const FromType & from, const DateLUTImpl &) + { + return from >= 0; + } + static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) { return from; @@ -507,6 +533,11 @@ struct ToDateTimeTransform64 { static constexpr auto name = "toDateTime"; + static NO_SANITIZE_UNDEFINED bool IsConvertible(const FromType & from, const DateLUTImpl &) + { + return from >= 0; + } + static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) { return static_cast(std::min(time_t(from), time_t(0xFFFFFFFF))); @@ -518,6 +549,11 @@ struct ToDateTimeTransformSigned { static constexpr auto name = "toDateTime"; + static NO_SANITIZE_UNDEFINED bool IsConvertible(const FromType & from, const DateLUTImpl &) + { + return from >= 0; + } + static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) { if (from < 0) @@ -531,6 +567,11 @@ struct ToDateTimeTransform64Signed { static constexpr auto name = "toDateTime"; + static NO_SANITIZE_UNDEFINED bool IsConvertible(const FromType & from, const DateLUTImpl &) + { + return from >= 0; + } + static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) { if (from < 0) @@ -2886,7 +2927,8 @@ private: return true; } - if constexpr (IsDataTypeNumber && IsDataTypeDateOrDateTime) + if constexpr (IsDataTypeNumber + && (std::is_same_v || std::is_same_v)) { if (wrapper_cast_type == CastType::accurate) { diff --git a/tests/queries/0_stateless/01556_accurate_cast_or_null.reference b/tests/queries/0_stateless/01556_accurate_cast_or_null.reference index b329aede01a..8429d5d0e64 100644 --- a/tests/queries/0_stateless/01556_accurate_cast_or_null.reference +++ b/tests/queries/0_stateless/01556_accurate_cast_or_null.reference @@ -30,3 +30,11 @@ \N 127 \N +\N +\N +2023-05-30 14:38:20 +1970-01-01 00:00:19 +\N +\N +2023-05-30 +1970-01-20 \ No newline at end of file diff --git a/tests/queries/0_stateless/01556_accurate_cast_or_null.sql b/tests/queries/0_stateless/01556_accurate_cast_or_null.sql index b45bbe35662..a9038a1d230 100644 --- a/tests/queries/0_stateless/01556_accurate_cast_or_null.sql +++ b/tests/queries/0_stateless/01556_accurate_cast_or_null.sql @@ -35,3 +35,13 @@ SELECT accurateCastOrNull(nan, 'UInt64'); SELECT accurateCastOrNull(nan, 'UInt256'); SELECT accurateCastOrNull(number + 127, 'Int8') AS x FROM numbers (2) ORDER BY x; + +SELECT accurateCastOrNull(-1, 'DateTime'); +SELECT accurateCastOrNull('1xxx', 'DateTime'); +SELECT accurateCastOrNull('2023-05-30 14:38:20', 'DateTime'); +SELECT accurateCastOrNull(19, 'DateTime'); + +SELECT accurateCastOrNull(-1, 'Date'); +SELECT accurateCastOrNull('1xxx', 'Date'); +SELECT accurateCastOrNull('2023-05-30', 'Date'); +SELECT accurateCastOrNull(19, 'Date'); From 11ead24bf9d0426e15a94d87746430f16035da20 Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Thu, 1 Jun 2023 19:38:46 +0000 Subject: [PATCH 161/722] Fix tests after nullable fixing --- .../queries/0_stateless/01556_accurate_cast_or_null.reference | 2 +- tests/queries/0_stateless/01601_accurate_cast.reference | 2 +- tests/queries/0_stateless/01601_accurate_cast.sql | 2 +- .../0_stateless/01746_convert_type_with_default.reference | 3 ++- tests/queries/0_stateless/01746_convert_type_with_default.sql | 3 ++- 5 files changed, 7 insertions(+), 5 deletions(-) diff --git a/tests/queries/0_stateless/01556_accurate_cast_or_null.reference b/tests/queries/0_stateless/01556_accurate_cast_or_null.reference index 8429d5d0e64..cbdf72e9910 100644 --- a/tests/queries/0_stateless/01556_accurate_cast_or_null.reference +++ b/tests/queries/0_stateless/01556_accurate_cast_or_null.reference @@ -37,4 +37,4 @@ \N \N 2023-05-30 -1970-01-20 \ No newline at end of file +1970-01-20 diff --git a/tests/queries/0_stateless/01601_accurate_cast.reference b/tests/queries/0_stateless/01601_accurate_cast.reference index b662319d263..3c6dceb1f16 100644 --- a/tests/queries/0_stateless/01601_accurate_cast.reference +++ b/tests/queries/0_stateless/01601_accurate_cast.reference @@ -9,4 +9,4 @@ 2023-05-30 14:38:20 1970-01-01 00:00:19 2023-05-30 -1970-01-20 \ No newline at end of file +1970-01-20 diff --git a/tests/queries/0_stateless/01601_accurate_cast.sql b/tests/queries/0_stateless/01601_accurate_cast.sql index 1ab98e26d1a..7611b1d96b9 100644 --- a/tests/queries/0_stateless/01601_accurate_cast.sql +++ b/tests/queries/0_stateless/01601_accurate_cast.sql @@ -29,6 +29,6 @@ SELECT accurateCast('2023-05-30 14:38:20', 'DateTime'); SELECT accurateCast(19, 'DateTime'); SELECT accurateCast(-1, 'Date'); -- { serverError 70 } -SELECT accurateCast('1xxx', 'Date'); -- { serverError 70 } +SELECT accurateCast('1xxx', 'Date'); -- { serverError 38 } SELECT accurateCast('2023-05-30', 'Date'); SELECT accurateCast(19, 'Date'); diff --git a/tests/queries/0_stateless/01746_convert_type_with_default.reference b/tests/queries/0_stateless/01746_convert_type_with_default.reference index 892a12434b9..85bf2064fdc 100644 --- a/tests/queries/0_stateless/01746_convert_type_with_default.reference +++ b/tests/queries/0_stateless/01746_convert_type_with_default.reference @@ -40,7 +40,8 @@ 1970-01-20 1970-01-20 2023-05-30 -1970-01-01 +2023-05-30 +2023-05-30 14:38:20 2023-05-30 14:38:20 2023-05-30 14:38:20 2023-05-30 14:38:20 diff --git a/tests/queries/0_stateless/01746_convert_type_with_default.sql b/tests/queries/0_stateless/01746_convert_type_with_default.sql index 75e1510f330..1065eefa94d 100644 --- a/tests/queries/0_stateless/01746_convert_type_with_default.sql +++ b/tests/queries/0_stateless/01746_convert_type_with_default.sql @@ -54,12 +54,13 @@ select toDateOrDefault(cast(19 as Int256)); select toDateOrDefault(cast(19 as UInt256)); select toDateOrDefault(19507, '2000-01-01'::Date); -select toDateOrDefault(-1, '2000-01-01'::Date); +select toDateOrDefault(-1, '2023-05-30'::Date); select toDateTimeOrDefault('2023-05-30 14:38:20'); select toDateTimeOrDefault('2023-05-30 14:38:20', 'UTC'); select toDateTimeOrDefault('1xxx', 'UTC', '2023-05-30 14:38:20'::DateTime('UTC')); select toDateTimeOrDefault(1685457500, 'UTC'); +select toDateTimeOrDefault(-1, 'UTC', '2023-05-30 14:38:20'::DateTime('UTC')); select toDateTimeOrDefault(cast(19 as Int8), 'UTC'); select toDateTimeOrDefault(cast(19 as UInt8), 'UTC'); From a22e80eed56ca0e6ab35ba0ad8c53c0a629a4839 Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Thu, 1 Jun 2023 19:52:48 +0000 Subject: [PATCH 162/722] Remove whitespaces --- src/Functions/DateTimeTransforms.h | 6 +++--- src/Functions/FunctionsConversion.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index 0008b36071b..9f8f4df2465 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -1448,7 +1448,7 @@ struct Transformer { t.IsConvertible(vec_from[i], time_zone); }; - + if constexpr (transformHasIsConvertible) { if constexpr (std::is_same_v @@ -1472,7 +1472,7 @@ struct Transformer } } } - + if constexpr (is_extended_result) vec_to[i] = static_cast(transform.executeExtendedResult(vec_from[i], time_zone)); else @@ -1500,7 +1500,7 @@ struct DateTimeTransformImpl col_null_map_to = ColumnUInt8::create(sources->getData().size(), false); vec_null_map_to = &col_null_map_to->getData(); } - + auto mutable_result_col = result_type->createColumn(); auto * col_to = assert_cast(mutable_result_col.get()); diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index 4b25a59ecc6..d77090afe71 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -415,7 +415,7 @@ struct ToDateTransform8Or16Signed { return from >= 0; } - + static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) { if (from < 0) From 7398b22fa5fc10f015be31035a65f3e6f5bd379f Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Wed, 17 May 2023 10:42:52 +0800 Subject: [PATCH 163/722] Add redis storage --- src/CMakeLists.txt | 1 + src/Common/ErrorCodes.cpp | 1 + src/Storages/NamedCollectionsHelpers.h | 4 + src/Storages/StorageRedis.cpp | 231 ++++++++++++++++++ src/Storages/StorageRedis.h | 91 +++++++ src/TableFunctions/TableFunctionRedis.cpp | 88 +++++++ src/TableFunctions/TableFunctionRedis.h | 29 +++ src/TableFunctions/registerTableFunctions.cpp | 1 + src/TableFunctions/registerTableFunctions.h | 1 + 9 files changed, 447 insertions(+) create mode 100644 src/Storages/StorageRedis.cpp create mode 100644 src/Storages/StorageRedis.h create mode 100644 src/TableFunctions/TableFunctionRedis.cpp create mode 100644 src/TableFunctions/TableFunctionRedis.h diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 622e18d4ff7..6608d86b5ed 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -412,6 +412,7 @@ dbms_target_link_libraries ( boost::system clickhouse_common_io Poco::MongoDB + Poco::Redis ) if (TARGET ch::mysqlxx) diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index 83a7314ac7a..505cf0aac8f 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -580,6 +580,7 @@ M(695, ASYNC_LOAD_FAILED) \ M(696, ASYNC_LOAD_CANCELED) \ M(697, CANNOT_RESTORE_TO_NONENCRYPTED_DISK) \ + M(698, INVALID_REDIS_STORAGE_TYPE) \ \ M(999, KEEPER_EXCEPTION) \ M(1000, POCO_EXCEPTION) \ diff --git a/src/Storages/NamedCollectionsHelpers.h b/src/Storages/NamedCollectionsHelpers.h index 1473a3fbe48..d0d6a526f9b 100644 --- a/src/Storages/NamedCollectionsHelpers.h +++ b/src/Storages/NamedCollectionsHelpers.h @@ -36,6 +36,10 @@ struct MongoDBEqualKeysSet static constexpr std::array, 4> equal_keys{ std::pair{"username", "user"}, std::pair{"database", "db"}, std::pair{"hostname", "host"}, std::pair{"table", "collection"}}; }; +struct RedisEqualKeysSet +{ + static constexpr std::array, 4> equal_keys{std::pair{"hostname", "host"}}; +}; template struct NamedCollectionValidateKey { diff --git a/src/Storages/StorageRedis.cpp b/src/Storages/StorageRedis.cpp new file mode 100644 index 00000000000..1daeed255ea --- /dev/null +++ b/src/Storages/StorageRedis.cpp @@ -0,0 +1,231 @@ +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int INVALID_REDIS_STORAGE_TYPE; + extern const int NOT_IMPLEMENTED; +} + +StorageRedis::StorageRedis( + const StorageID & table_id_, + const Configuration & configuration_, + const ColumnsDescription & columns_, + const ConstraintsDescription & constraints_, + const String & comment_) : ta +{ + +} + + +Pipe StorageRedis::read( + const Names & column_names, + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo & /*query_info*/, + ContextPtr /*context*/, + QueryProcessingStage::Enum /*processed_stage*/, + size_t max_block_size, + size_t /*num_streams*/) +{ + connectIfNotConnected(); + + storage_snapshot->check(column_names); + + Block sample_block; + for (const String & column_name : column_names) + { + auto column_data = storage_snapshot->metadata->getColumns().getPhysical(column_name); + sample_block.insert({column_data.type, column_data.name}); + } + + return Pipe(std::make_shared( + connection, createCursor(database_name, collection_name, sample_block), sample_block, max_block_size)); +} + + +SinkToStoragePtr StorageRedis::write( + const ASTPtr & /*query*/, + const StorageMetadataPtr & /*metadata_snapshot*/, + ContextPtr /*context*/) +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method write is unsupported for StorageRedis"); +} + +StorageRedis::Configuration StorageRedis::getConfiguration(ASTs engine_args, ContextPtr context) +{ + Configuration configuration; + + if (auto named_collection = tryGetNamedCollectionWithOverrides(engine_args, context)) + { + validateNamedCollection( + *named_collection, + ValidateKeysMultiset{"host", "port", "hostname", "password", "db_id", "storage_type"}, + {}); + + configuration.host = named_collection->getAny({"host", "hostname"}); + configuration.port = static_cast(named_collection->get("port")); + configuration.password = named_collection->get("password"); + configuration.db_id = named_collection->getAny({"db_id"}); + configuration.storage_type = toStorageType(named_collection->getOrDefault("storage_type", "")); + } + else + { + for (auto & engine_arg : engine_args) + engine_arg = evaluateConstantExpressionOrIdentifierAsLiteral(engine_arg, context); + + /// 6379 is the default Redis port. + auto parsed_host_port = parseAddress(checkAndGetLiteralArgument(engine_args[0], "host:port"), 6379); + + configuration.host = parsed_host_port.first; + configuration.port = parsed_host_port.second; + configuration.db_id = checkAndGetLiteralArgument(engine_args[1], "db_id"); + configuration.password = checkAndGetLiteralArgument(engine_args[2], "password"); + configuration.storage_type = toStorageType(checkAndGetLiteralArgument(engine_args[3], "storage_type")); + } + + context->getRemoteHostFilter().checkHostAndPort(configuration.host, toString(configuration.port)); + + return configuration; +} + +void StorageRedis::connectIfNotConnected() +{ + +} + + +class StorageRedisSink : public SinkToStorage +{ +public: + explicit StorageRedisSink( + const std::string & collection_name_, + const std::string & db_name_, + const StorageMetadataPtr & metadata_snapshot_, + std::shared_ptr connection_) + : SinkToStorage(metadata_snapshot_->getSampleBlock()) + , collection_name(collection_name_) + , db_name(db_name_) + , metadata_snapshot{metadata_snapshot_} + , connection(connection_) + { + } + + String getName() const override { return "StorageRedisSink"; } + + void consume(Chunk chunk) override + { + Poco::MongoDB::Database db(db_name); + Poco::MongoDB::Document::Ptr index = new Poco::MongoDB::Document(); + + auto block = getHeader().cloneWithColumns(chunk.detachColumns()); + + size_t num_rows = block.rows(); + size_t num_cols = block.columns(); + + const auto columns = block.getColumns(); + const auto data_types = block.getDataTypes(); + const auto data_names = block.getNames(); + + std::vector row(num_cols); + for (const auto i : collections::range(0, num_rows)) + { + for (const auto j : collections::range(0, num_cols)) + { + WriteBufferFromOwnString ostr; + data_types[j]->getDefaultSerialization()->serializeText(*columns[j], i, ostr, FormatSettings{}); + row[j] = ostr.str(); + index->add(data_names[j], row[j]); + } + } + Poco::SharedPtr insert_request = db.createInsertRequest(collection_name); + insert_request->documents().push_back(index); + connection->sendRequest(*insert_request); + } + +private: + String collection_name; + String db_name; + StorageMetadataPtr metadata_snapshot; + std::shared_ptr connection; +}; + + +using StorageType = StorageRedis::StorageType; + +String StorageRedis::toString(StorageType storage_type) +{ + static const std::unordered_map type_to_str_map + = {{StorageType::SIMPLE, "simple"}, + {StorageType::LIST, "list"}, + {StorageType::SET, "set"}, + {StorageType::HASH, "hash"}, + {StorageType::ZSET, "zset"}}; + + auto iter = type_to_str_map.find(storage_type); + return iter->second; +} + +StorageType StorageRedis::toStorageType(const String & storage_type) +{ + static const std::unordered_map str_to_type_map + = {{"simple", StorageType::SIMPLE}, + {"list", StorageType::LIST}, + {"set", StorageType::SET}, + {"hash", StorageType::HASH}, + {"zset", StorageType::ZSET}}; + + auto iter = str_to_type_map.find(storage_type); + if (iter == str_to_type_map.end()) + { + throw Exception(ErrorCodes::INVALID_REDIS_STORAGE_TYPE, "invalid redis storage type: {}", storage_type); + } + return iter->second; +} + +void registerStorageRedis(StorageFactory & factory) +{ + factory.registerStorage( + "MongoDB", + [](const StorageFactory::Arguments & args) + { + auto configuration = StorageRedis::getConfiguration(args.engine_args, args.getLocalContext()); + + return std::make_shared( + args.table_id, + configuration.host, + configuration.port, + configuration.database, + configuration.table, + configuration.username, + configuration.password, + configuration.options, + args.columns, + args.constraints, + args.comment); + }, + { + .source_access_type = AccessType::MONGO, + }); +} + +} diff --git a/src/Storages/StorageRedis.h b/src/Storages/StorageRedis.h new file mode 100644 index 00000000000..8ba4ec831bb --- /dev/null +++ b/src/Storages/StorageRedis.h @@ -0,0 +1,91 @@ +#pragma once + +#include +#include +#include + +namespace DB +{ +/* Implements storage in the Redis. + * Use ENGINE = Redis(host:port, db_id, password, storage_type); + * Read only. + */ +class StorageRedis : public IStorage +{ +public: + enum class StorageType + { + SIMPLE, + LIST, + SET, + HASH, + ZSET + }; + + static String toString(StorageType storage_type); + static StorageType toStorageType(const String & storage_type); + + struct Configuration + { + String host; + uint32_t port; + String db_id; + String password; + StorageType storage_type; + }; + + using RedisArray = Poco::Redis::Array; + using RedisCommand = Poco::Redis::Command; + + using ClientPtr = std::unique_ptr; + using Pool = BorrowedObjectPool; + using PoolPtr = std::shared_ptr; + + struct Connection + { + Connection(PoolPtr pool_, ClientPtr client_); + ~Connection(); + + PoolPtr pool; + ClientPtr client; + }; + + using ConnectionPtr = std::unique_ptr; + + static Configuration getConfiguration(ASTs engine_args, ContextPtr context); + + StorageRedis( + const StorageID & table_id_, + const Configuration & configuration_, + const ColumnsDescription & columns_, + const ConstraintsDescription & constraints_, + const String & comment_); + + std::string getName() const override { return "Redis"; } + + Pipe read( + const Names & column_names, + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo & query_info, + ContextPtr context, + QueryProcessingStage::Enum processed_stage, + size_t max_block_size, + size_t num_streams) override; + + SinkToStoragePtr write( + const ASTPtr & query, + const StorageMetadataPtr & /*metadata_snapshot*/, + ContextPtr context) override; + +private: + Configuration configuration; + StorageID table_id; + ColumnsDescription columns; + ConstraintsDescription constraints; + String comment; + + std::shared_ptr connection; + void connectIfNotConnected(); +}; + +} diff --git a/src/TableFunctions/TableFunctionRedis.cpp b/src/TableFunctions/TableFunctionRedis.cpp new file mode 100644 index 00000000000..9432f766aa8 --- /dev/null +++ b/src/TableFunctions/TableFunctionRedis.cpp @@ -0,0 +1,88 @@ +#include + +#include + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; +} + + +StoragePtr TableFunctionRedis::executeImpl( + const ASTPtr & /*ast_function*/, ContextPtr context, const String & table_name, ColumnsDescription /*cached_columns*/) const +{ + auto columns = getActualTableStructure(context); + auto storage = std::make_shared( + StorageID(configuration->db_id, table_name), configuration, columns, ConstraintsDescription(), String{});// TODO + storage->startup(); + return storage; +} + +ColumnsDescription TableFunctionRedis::getActualTableStructure(ContextPtr context) const +{ + /// generate table structure by storage type. + String structure; + switch (configuration->storage_type) + { + case StorageRedis::StorageType::SIMPLE: + structure = "key String, value String"; + break; + case StorageRedis::StorageType::HASH: + structure = "key String, field, String, value String"; + break; + case StorageRedis::StorageType::LIST: + structure = "key String, value Array(String)"; + break; + case StorageRedis::StorageType::SET: + structure = "key String, value Array(String)"; + break; + case StorageRedis::StorageType::ZSET: + structure = "key String, value Array(String)"; + break; + } + return parseColumnsListFromString(structure, context); +} + +void TableFunctionRedis::parseArguments(const ASTPtr & ast_function, ContextPtr context) +{ + const auto & func_args = ast_function->as(); + if (!func_args.arguments) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Table function 'redis' must have arguments."); + + ASTs & args = func_args.arguments->children; + + if (args.size() != 4) + { + throw Exception( + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Table function 'Redis' requires from 4 parameters: " + "redis('host:port', db_id, 'password', 'storage_type')"); + } + configuration = StorageRedis::getConfiguration(args, context); +} + + +void registerTableFunctionRedis(TableFunctionFactory & factory) +{ + factory.registerFunction(); +} + +} diff --git a/src/TableFunctions/TableFunctionRedis.h b/src/TableFunctions/TableFunctionRedis.h new file mode 100644 index 00000000000..d333cd5a42f --- /dev/null +++ b/src/TableFunctions/TableFunctionRedis.h @@ -0,0 +1,29 @@ +#pragma once + +#include +#include +#include + +namespace DB +{ + +class TableFunctionRedis : public ITableFunction +{ +public: + static constexpr auto name = "redis"; + String getName() const override { return name; } + +private: + StoragePtr executeImpl( + const ASTPtr & ast_function, ContextPtr context, + const String & table_name, ColumnsDescription cached_columns) const override; + + const char * getStorageTypeName() const override { return "Redis"; } + + ColumnsDescription getActualTableStructure(ContextPtr context) const override; + void parseArguments(const ASTPtr & ast_function, ContextPtr context) override; + + std::optional configuration; +}; + +} diff --git a/src/TableFunctions/registerTableFunctions.cpp b/src/TableFunctions/registerTableFunctions.cpp index 4f3411df4c5..bfb83818f22 100644 --- a/src/TableFunctions/registerTableFunctions.cpp +++ b/src/TableFunctions/registerTableFunctions.cpp @@ -21,6 +21,7 @@ void registerTableFunctions() registerTableFunctionInput(factory); registerTableFunctionGenerate(factory); registerTableFunctionMongoDB(factory); + registerTableFunctionRedis(factory); registerTableFunctionMeiliSearch(factory); diff --git a/src/TableFunctions/registerTableFunctions.h b/src/TableFunctions/registerTableFunctions.h index c51522a5e99..cf0dee7f792 100644 --- a/src/TableFunctions/registerTableFunctions.h +++ b/src/TableFunctions/registerTableFunctions.h @@ -18,6 +18,7 @@ void registerTableFunctionValues(TableFunctionFactory & factory); void registerTableFunctionInput(TableFunctionFactory & factory); void registerTableFunctionGenerate(TableFunctionFactory & factory); void registerTableFunctionMongoDB(TableFunctionFactory & factory); +void registerTableFunctionRedis(TableFunctionFactory & factory); void registerTableFunctionMeiliSearch(TableFunctionFactory & factory); From e91867373cca6d91455b51af05575c48e6d1af9e Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Sat, 20 May 2023 11:48:57 +0800 Subject: [PATCH 164/722] Add table function Redis --- src/Access/Common/AccessType.h | 1 + src/Dictionaries/RedisDictionarySource.cpp | 80 +---------- src/Dictionaries/RedisDictionarySource.h | 44 +----- src/Dictionaries/RedisSource.cpp | 7 +- src/Dictionaries/RedisSource.h | 19 +-- src/Storages/RedisCommon.cpp | 98 +++++++++++++ src/Storages/RedisCommon.h | 61 ++++++++ src/Storages/StorageRedis.cpp | 156 +++++---------------- src/Storages/StorageRedis.h | 55 ++------ src/Storages/registerStorages.cpp | 2 + src/TableFunctions/TableFunctionRedis.cpp | 28 ++-- src/TableFunctions/TableFunctionRedis.h | 2 +- 12 files changed, 231 insertions(+), 322 deletions(-) create mode 100644 src/Storages/RedisCommon.cpp create mode 100644 src/Storages/RedisCommon.h diff --git a/src/Access/Common/AccessType.h b/src/Access/Common/AccessType.h index 6394c0279a7..78c341cdcb5 100644 --- a/src/Access/Common/AccessType.h +++ b/src/Access/Common/AccessType.h @@ -201,6 +201,7 @@ enum class AccessType M(URL, "", GLOBAL, SOURCES) \ M(REMOTE, "", GLOBAL, SOURCES) \ M(MONGO, "", GLOBAL, SOURCES) \ + M(Redis, "", GLOBAL, SOURCES) \ M(MEILISEARCH, "", GLOBAL, SOURCES) \ M(MYSQL, "", GLOBAL, SOURCES) \ M(POSTGRES, "", GLOBAL, SOURCES) \ diff --git a/src/Dictionaries/RedisDictionarySource.cpp b/src/Dictionaries/RedisDictionarySource.cpp index 6e4c5d1d5d9..db27801a38e 100644 --- a/src/Dictionaries/RedisDictionarySource.cpp +++ b/src/Dictionaries/RedisDictionarySource.cpp @@ -3,10 +3,6 @@ #include "DictionaryStructure.h" #include "registerDictionaries.h" -#include -#include -#include -#include #include #include #include @@ -52,7 +48,7 @@ namespace DB auto port = config.getUInt(redis_config_prefix + ".port"); global_context->getRemoteHostFilter().checkHostAndPort(host, toString(port)); - RedisDictionarySource::Configuration configuration = + RedisConfiguration configuration = { .host = host, .port = static_cast(port), @@ -68,26 +64,13 @@ namespace DB factory.registerSource("redis", create_table_source); } - RedisDictionarySource::Connection::Connection(PoolPtr pool_, ClientPtr client_) - : pool(std::move(pool_)), client(std::move(client_)) - { - } - - RedisDictionarySource::Connection::~Connection() - { - pool->returnObject(std::move(client)); - } - - static constexpr size_t REDIS_MAX_BLOCK_SIZE = DEFAULT_BLOCK_SIZE; - static constexpr size_t REDIS_LOCK_ACQUIRE_TIMEOUT_MS = 5000; - RedisDictionarySource::RedisDictionarySource( const DictionaryStructure & dict_struct_, - const Configuration & configuration_, + const RedisConfiguration & configuration_, const Block & sample_block_) : dict_struct{dict_struct_} , configuration(configuration_) - , pool(std::make_shared(configuration.pool_size)) + , pool(std::make_shared(configuration.pool_size)) , sample_block{sample_block_} { if (dict_struct.attributes.size() != 1) @@ -139,7 +122,7 @@ namespace DB QueryPipeline RedisDictionarySource::loadAll() { - auto connection = getConnection(); + auto connection = getRedisConnection(pool, configuration); RedisCommand command_for_keys("KEYS"); command_for_keys << "*"; @@ -195,7 +178,7 @@ namespace DB QueryPipeline RedisDictionarySource::loadIds(const std::vector & ids) { - auto connection = getConnection(); + auto connection = getRedisConnection(pool, configuration); if (configuration.storage_type == RedisStorageType::HASH_MAP) throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Cannot use loadIds with 'hash_map' storage type"); @@ -215,7 +198,7 @@ namespace DB QueryPipeline RedisDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) { - auto connection = getConnection(); + auto connection = getRedisConnection(pool, configuration); if (key_columns.size() != dict_struct.key->size()) throw Exception(ErrorCodes::LOGICAL_ERROR, "The size of key_columns does not equal to the size of dictionary key"); @@ -248,55 +231,4 @@ namespace DB return "Redis: " + configuration.host + ':' + DB::toString(configuration.port); } - RedisDictionarySource::ConnectionPtr RedisDictionarySource::getConnection() const - { - ClientPtr client; - bool ok = pool->tryBorrowObject(client, - [] { return std::make_unique(); }, - REDIS_LOCK_ACQUIRE_TIMEOUT_MS); - - if (!ok) - throw Exception(ErrorCodes::TIMEOUT_EXCEEDED, - "Could not get connection from pool, timeout exceeded {} seconds", - REDIS_LOCK_ACQUIRE_TIMEOUT_MS); - - if (!client->isConnected()) - { - try - { - client->connect(configuration.host, configuration.port); - - if (!configuration.password.empty()) - { - RedisCommand command("AUTH"); - command << configuration.password; - String reply = client->execute(command); - if (reply != "OK") - throw Exception(ErrorCodes::INTERNAL_REDIS_ERROR, - "Authentication failed with reason {}", reply); - } - - if (configuration.db_index != 0) - { - RedisCommand command("SELECT"); - command << std::to_string(configuration.db_index); - String reply = client->execute(command); - if (reply != "OK") - throw Exception(ErrorCodes::INTERNAL_REDIS_ERROR, - "Selecting database with index {} failed with reason {}", - configuration.db_index, reply); - } - } - catch (...) - { - if (client->isConnected()) - client->disconnect(); - - pool->returnObject(std::move(client)); - throw; - } - } - - return std::make_unique(pool, std::move(client)); - } } diff --git a/src/Dictionaries/RedisDictionarySource.h b/src/Dictionaries/RedisDictionarySource.h index 8fb6f93193b..c7786284dc4 100644 --- a/src/Dictionaries/RedisDictionarySource.h +++ b/src/Dictionaries/RedisDictionarySource.h @@ -5,6 +5,7 @@ #include "DictionaryStructure.h" #include "IDictionarySource.h" +#include namespace Poco { @@ -23,47 +24,12 @@ namespace DB extern const int NOT_IMPLEMENTED; } - enum class RedisStorageType - { - SIMPLE, - HASH_MAP, - UNKNOWN - }; - class RedisDictionarySource final : public IDictionarySource { public: - using RedisArray = Poco::Redis::Array; - using RedisCommand = Poco::Redis::Command; - - using ClientPtr = std::unique_ptr; - using Pool = BorrowedObjectPool; - using PoolPtr = std::shared_ptr; - - struct Configuration - { - const std::string host; - const UInt16 port; - const UInt32 db_index; - const std::string password; - const RedisStorageType storage_type; - const size_t pool_size; - }; - - struct Connection - { - Connection(PoolPtr pool_, ClientPtr client_); - ~Connection(); - - PoolPtr pool; - ClientPtr client; - }; - - using ConnectionPtr = std::unique_ptr; - RedisDictionarySource( const DictionaryStructure & dict_struct_, - const Configuration & configuration_, + const RedisConfiguration & configuration_, const Block & sample_block_); RedisDictionarySource(const RedisDictionarySource & other); @@ -92,12 +58,10 @@ namespace DB std::string toString() const override; private: - ConnectionPtr getConnection() const; - const DictionaryStructure dict_struct; - const Configuration configuration; + const RedisConfiguration configuration; - PoolPtr pool; + RedisPoolPtr pool; Block sample_block; }; } diff --git a/src/Dictionaries/RedisSource.cpp b/src/Dictionaries/RedisSource.cpp index 4622f65a1a9..9abaf7f0ac5 100644 --- a/src/Dictionaries/RedisSource.cpp +++ b/src/Dictionaries/RedisSource.cpp @@ -3,11 +3,6 @@ #include #include -#include -#include -#include -#include - #include #include #include @@ -30,7 +25,7 @@ namespace DB RedisSource::RedisSource( - ConnectionPtr connection_, + RedisConnectionPtr connection_, const RedisArray & keys_, const RedisStorageType & storage_type_, const DB::Block & sample_block, diff --git a/src/Dictionaries/RedisSource.h b/src/Dictionaries/RedisSource.h index 0f8cc317003..4537e496061 100644 --- a/src/Dictionaries/RedisSource.h +++ b/src/Dictionaries/RedisSource.h @@ -6,29 +6,18 @@ #include #include #include +#include #include "RedisDictionarySource.h" -namespace Poco -{ - namespace Redis - { - class Client; - } -} - namespace DB { class RedisSource final : public ISource { public: - using RedisArray = Poco::Redis::Array; - using RedisBulkString = Poco::Redis::BulkString; - using ConnectionPtr = RedisDictionarySource::ConnectionPtr; - RedisSource( - ConnectionPtr connection_, - const Poco::Redis::Array & keys_, + RedisConnectionPtr connection_, + const RedisArray & keys_, const RedisStorageType & storage_type_, const Block & sample_block, size_t max_block_size); @@ -40,7 +29,7 @@ namespace DB private: Chunk generate() override; - ConnectionPtr connection; + RedisConnectionPtr connection; Poco::Redis::Array keys; RedisStorageType storage_type; const size_t max_block_size; diff --git a/src/Storages/RedisCommon.cpp b/src/Storages/RedisCommon.cpp new file mode 100644 index 00000000000..397189e7485 --- /dev/null +++ b/src/Storages/RedisCommon.cpp @@ -0,0 +1,98 @@ +#include "RedisCommon.h" +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int INVALID_REDIS_STORAGE_TYPE; + extern const int INTERNAL_REDIS_ERROR; + extern const int TIMEOUT_EXCEEDED; +} + +RedisConnection::RedisConnection(RedisPoolPtr pool_, RedisClientPtr client_) + : pool(std::move(pool_)), client(std::move(client_)) +{ +} + +RedisConnection::~RedisConnection() +{ + pool->returnObject(std::move(client)); +} + +String toString(RedisStorageType storage_type) +{ + static const std::unordered_map type_to_str_map + = {{RedisStorageType::SIMPLE, "simple"}, {RedisStorageType::HASH_MAP, "hash_map"}}; + + auto iter = type_to_str_map.find(storage_type); + return iter->second; +} + +RedisStorageType toRedisStorageType(const String & storage_type) +{ + static const std::unordered_map str_to_type_map + = {{"simple", RedisStorageType::SIMPLE}, {"hash", RedisStorageType::HASH_MAP}}; + + auto iter = str_to_type_map.find(storage_type); + if (iter == str_to_type_map.end()) + { + throw Exception(ErrorCodes::INVALID_REDIS_STORAGE_TYPE, "invalid redis storage type: {}", storage_type); + } + return iter->second; +} + +RedisConnectionPtr getRedisConnection(RedisPoolPtr pool, const RedisConfiguration & configuration) +{ + RedisClientPtr client; + bool ok = pool->tryBorrowObject(client, + [] { return std::make_unique(); }, + REDIS_LOCK_ACQUIRE_TIMEOUT_MS); + + if (!ok) + throw Exception(ErrorCodes::TIMEOUT_EXCEEDED, + "Could not get connection from pool, timeout exceeded {} seconds", + REDIS_LOCK_ACQUIRE_TIMEOUT_MS); + + if (!client->isConnected()) + { + try + { + client->connect(configuration.host, configuration.port); + + if (!configuration.password.empty()) + { + RedisCommand command("AUTH"); + command << configuration.password; + String reply = client->execute(command); + if (reply != "OK") + throw Exception(ErrorCodes::INTERNAL_REDIS_ERROR, + "Authentication failed with reason {}", reply); + } + + if (configuration.db_index != 0) + { + RedisCommand command("SELECT"); + command << std::to_string(configuration.db_index); + String reply = client->execute(command); + if (reply != "OK") + throw Exception(ErrorCodes::INTERNAL_REDIS_ERROR, + "Selecting database with index {} failed with reason {}", + configuration.db_index, reply); + } + } + catch (...) + { + if (client->isConnected()) + client->disconnect(); + + pool->returnObject(std::move(client)); + throw; + } + } + + return std::make_unique(pool, std::move(client)); +} + +} diff --git a/src/Storages/RedisCommon.h b/src/Storages/RedisCommon.h new file mode 100644 index 00000000000..6069d3d9a0c --- /dev/null +++ b/src/Storages/RedisCommon.h @@ -0,0 +1,61 @@ +#pragma once + +#include +#include +#include + +#include +#include + +namespace DB +{ +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + +static constexpr size_t REDIS_MAX_BLOCK_SIZE = DEFAULT_BLOCK_SIZE; +static constexpr size_t REDIS_LOCK_ACQUIRE_TIMEOUT_MS = 5000; + +enum class RedisStorageType +{ + SIMPLE, + HASH_MAP, + UNKNOWN +}; + +String toString(RedisStorageType storage_type); +RedisStorageType toRedisStorageType(const String & storage_type); + +struct RedisConfiguration +{ + String host; + uint32_t port; + uint32_t db_index; + String password; + RedisStorageType storage_type; + uint32_t pool_size; +}; + +using RedisArray = Poco::Redis::Array; +using RedisCommand = Poco::Redis::Command; +using RedisBulkString = Poco::Redis::BulkString; + +using RedisClientPtr = std::unique_ptr; +using RedisPool = BorrowedObjectPool; +using RedisPoolPtr = std::shared_ptr; + +struct RedisConnection +{ + RedisConnection(RedisPoolPtr pool_, RedisClientPtr client_); + ~RedisConnection(); + + RedisPoolPtr pool; + RedisClientPtr client; +}; + +using RedisConnectionPtr = std::unique_ptr; + +RedisConnectionPtr getRedisConnection(RedisPoolPtr pool, const RedisConfiguration & configuration) ; + +} diff --git a/src/Storages/StorageRedis.cpp b/src/Storages/StorageRedis.cpp index 1daeed255ea..055617b6a96 100644 --- a/src/Storages/StorageRedis.cpp +++ b/src/Storages/StorageRedis.cpp @@ -1,6 +1,6 @@ -#include -#include #include +#include +#include #include #include @@ -10,7 +10,6 @@ #include #include #include -#include #include #include #include @@ -23,21 +22,25 @@ namespace DB namespace ErrorCodes { extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; - extern const int INVALID_REDIS_STORAGE_TYPE; extern const int NOT_IMPLEMENTED; } StorageRedis::StorageRedis( const StorageID & table_id_, - const Configuration & configuration_, + const RedisConfiguration & configuration_, const ColumnsDescription & columns_, const ConstraintsDescription & constraints_, - const String & comment_) : ta + const String & comment_) + : IStorage(table_id_) + , table_id(table_id_) + , configuration(configuration_) + , columns(columns_) + , constraints(constraints_) + , comment(comment_) { - + pool = std::make_shared(configuration.pool_size); } - Pipe StorageRedis::read( const Names & column_names, const StorageSnapshotPtr & storage_snapshot, @@ -47,7 +50,7 @@ Pipe StorageRedis::read( size_t max_block_size, size_t /*num_streams*/) { - connectIfNotConnected(); + auto connection = getRedisConnection(pool, configuration); storage_snapshot->check(column_names); @@ -58,8 +61,14 @@ Pipe StorageRedis::read( sample_block.insert({column_data.type, column_data.name}); } - return Pipe(std::make_shared( - connection, createCursor(database_name, collection_name, sample_block), sample_block, max_block_size)); + RedisArray keys; + RedisCommand command_for_keys("KEYS"); + /// generate keys by table name prefix + command_for_keys << table_id.getTableName() + ":" + toString(configuration.storage_type) + ":*"; + + /// Get only keys for specified storage type. + auto all_keys = connection->client->execute(command_for_keys); + return Pipe(std::make_shared(std::move(connection), all_keys, configuration.storage_type, sample_block, max_block_size)); } @@ -71,22 +80,23 @@ SinkToStoragePtr StorageRedis::write( throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method write is unsupported for StorageRedis"); } -StorageRedis::Configuration StorageRedis::getConfiguration(ASTs engine_args, ContextPtr context) +RedisConfiguration StorageRedis::getConfiguration(ASTs engine_args, ContextPtr context) { - Configuration configuration; + RedisConfiguration configuration; if (auto named_collection = tryGetNamedCollectionWithOverrides(engine_args, context)) { validateNamedCollection( *named_collection, - ValidateKeysMultiset{"host", "port", "hostname", "password", "db_id", "storage_type"}, + ValidateKeysMultiset{"host", "port", "hostname", "password", "db_index", "storage_type"}, {}); configuration.host = named_collection->getAny({"host", "hostname"}); - configuration.port = static_cast(named_collection->get("port")); + configuration.port = static_cast(named_collection->get("port")); configuration.password = named_collection->get("password"); - configuration.db_id = named_collection->getAny({"db_id"}); - configuration.storage_type = toStorageType(named_collection->getOrDefault("storage_type", "")); + configuration.db_index = static_cast(named_collection->get({"db_index"})); + configuration.storage_type = toRedisStorageType(named_collection->getOrDefault("storage_type", "")); + configuration.pool_size = 16; /// TODO } else { @@ -98,133 +108,33 @@ StorageRedis::Configuration StorageRedis::getConfiguration(ASTs engine_args, Con configuration.host = parsed_host_port.first; configuration.port = parsed_host_port.second; - configuration.db_id = checkAndGetLiteralArgument(engine_args[1], "db_id"); + configuration.db_index = static_cast(checkAndGetLiteralArgument(engine_args[1], "db_index")); configuration.password = checkAndGetLiteralArgument(engine_args[2], "password"); - configuration.storage_type = toStorageType(checkAndGetLiteralArgument(engine_args[3], "storage_type")); + configuration.storage_type = toRedisStorageType(checkAndGetLiteralArgument(engine_args[3], "storage_type")); + configuration.pool_size = 16; /// TODO } context->getRemoteHostFilter().checkHostAndPort(configuration.host, toString(configuration.port)); - return configuration; } -void StorageRedis::connectIfNotConnected() -{ - -} - - -class StorageRedisSink : public SinkToStorage -{ -public: - explicit StorageRedisSink( - const std::string & collection_name_, - const std::string & db_name_, - const StorageMetadataPtr & metadata_snapshot_, - std::shared_ptr connection_) - : SinkToStorage(metadata_snapshot_->getSampleBlock()) - , collection_name(collection_name_) - , db_name(db_name_) - , metadata_snapshot{metadata_snapshot_} - , connection(connection_) - { - } - - String getName() const override { return "StorageRedisSink"; } - - void consume(Chunk chunk) override - { - Poco::MongoDB::Database db(db_name); - Poco::MongoDB::Document::Ptr index = new Poco::MongoDB::Document(); - - auto block = getHeader().cloneWithColumns(chunk.detachColumns()); - - size_t num_rows = block.rows(); - size_t num_cols = block.columns(); - - const auto columns = block.getColumns(); - const auto data_types = block.getDataTypes(); - const auto data_names = block.getNames(); - - std::vector row(num_cols); - for (const auto i : collections::range(0, num_rows)) - { - for (const auto j : collections::range(0, num_cols)) - { - WriteBufferFromOwnString ostr; - data_types[j]->getDefaultSerialization()->serializeText(*columns[j], i, ostr, FormatSettings{}); - row[j] = ostr.str(); - index->add(data_names[j], row[j]); - } - } - Poco::SharedPtr insert_request = db.createInsertRequest(collection_name); - insert_request->documents().push_back(index); - connection->sendRequest(*insert_request); - } - -private: - String collection_name; - String db_name; - StorageMetadataPtr metadata_snapshot; - std::shared_ptr connection; -}; - - -using StorageType = StorageRedis::StorageType; - -String StorageRedis::toString(StorageType storage_type) -{ - static const std::unordered_map type_to_str_map - = {{StorageType::SIMPLE, "simple"}, - {StorageType::LIST, "list"}, - {StorageType::SET, "set"}, - {StorageType::HASH, "hash"}, - {StorageType::ZSET, "zset"}}; - - auto iter = type_to_str_map.find(storage_type); - return iter->second; -} - -StorageType StorageRedis::toStorageType(const String & storage_type) -{ - static const std::unordered_map str_to_type_map - = {{"simple", StorageType::SIMPLE}, - {"list", StorageType::LIST}, - {"set", StorageType::SET}, - {"hash", StorageType::HASH}, - {"zset", StorageType::ZSET}}; - - auto iter = str_to_type_map.find(storage_type); - if (iter == str_to_type_map.end()) - { - throw Exception(ErrorCodes::INVALID_REDIS_STORAGE_TYPE, "invalid redis storage type: {}", storage_type); - } - return iter->second; -} - void registerStorageRedis(StorageFactory & factory) { factory.registerStorage( - "MongoDB", + "Redis", [](const StorageFactory::Arguments & args) { auto configuration = StorageRedis::getConfiguration(args.engine_args, args.getLocalContext()); return std::make_shared( args.table_id, - configuration.host, - configuration.port, - configuration.database, - configuration.table, - configuration.username, - configuration.password, - configuration.options, + configuration, args.columns, args.constraints, args.comment); }, { - .source_access_type = AccessType::MONGO, + .source_access_type = AccessType::Redis, }); } diff --git a/src/Storages/StorageRedis.h b/src/Storages/StorageRedis.h index 8ba4ec831bb..60db75dd384 100644 --- a/src/Storages/StorageRedis.h +++ b/src/Storages/StorageRedis.h @@ -7,56 +7,15 @@ namespace DB { /* Implements storage in the Redis. - * Use ENGINE = Redis(host:port, db_id, password, storage_type); + * Use ENGINE = Redis(host:port, db_index, password, storage_type); * Read only. */ class StorageRedis : public IStorage { public: - enum class StorageType - { - SIMPLE, - LIST, - SET, - HASH, - ZSET - }; - - static String toString(StorageType storage_type); - static StorageType toStorageType(const String & storage_type); - - struct Configuration - { - String host; - uint32_t port; - String db_id; - String password; - StorageType storage_type; - }; - - using RedisArray = Poco::Redis::Array; - using RedisCommand = Poco::Redis::Command; - - using ClientPtr = std::unique_ptr; - using Pool = BorrowedObjectPool; - using PoolPtr = std::shared_ptr; - - struct Connection - { - Connection(PoolPtr pool_, ClientPtr client_); - ~Connection(); - - PoolPtr pool; - ClientPtr client; - }; - - using ConnectionPtr = std::unique_ptr; - - static Configuration getConfiguration(ASTs engine_args, ContextPtr context); - StorageRedis( const StorageID & table_id_, - const Configuration & configuration_, + const RedisConfiguration & configuration_, const ColumnsDescription & columns_, const ConstraintsDescription & constraints_, const String & comment_); @@ -77,15 +36,17 @@ public: const StorageMetadataPtr & /*metadata_snapshot*/, ContextPtr context) override; + static RedisConfiguration getConfiguration(ASTs engine_args, ContextPtr context); + private: - Configuration configuration; StorageID table_id; + RedisConfiguration configuration; + ColumnsDescription columns; ConstraintsDescription constraints; - String comment; - std::shared_ptr connection; - void connectIfNotConnected(); + String comment; + RedisPoolPtr pool; }; } diff --git a/src/Storages/registerStorages.cpp b/src/Storages/registerStorages.cpp index 8be176a5375..84994298b8e 100644 --- a/src/Storages/registerStorages.cpp +++ b/src/Storages/registerStorages.cpp @@ -59,6 +59,7 @@ void registerStorageMySQL(StorageFactory & factory); #endif void registerStorageMongoDB(StorageFactory & factory); +void registerStorageRedis(StorageFactory & factory); #if USE_RDKAFKA @@ -156,6 +157,7 @@ void registerStorages() #endif registerStorageMongoDB(factory); + registerStorageRedis(factory); #if USE_RDKAFKA registerStorageKafka(factory); diff --git a/src/TableFunctions/TableFunctionRedis.cpp b/src/TableFunctions/TableFunctionRedis.cpp index 9432f766aa8..db612806652 100644 --- a/src/TableFunctions/TableFunctionRedis.cpp +++ b/src/TableFunctions/TableFunctionRedis.cpp @@ -1,17 +1,15 @@ #include #include +#include #include -#include #include #include -#include #include #include -#include #include #include @@ -23,6 +21,7 @@ namespace ErrorCodes { extern const int BAD_ARGUMENTS; extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int INVALID_REDIS_STORAGE_TYPE; } @@ -31,7 +30,11 @@ StoragePtr TableFunctionRedis::executeImpl( { auto columns = getActualTableStructure(context); auto storage = std::make_shared( - StorageID(configuration->db_id, table_name), configuration, columns, ConstraintsDescription(), String{});// TODO + StorageID(toString(configuration->db_index), table_name), // TODO + *configuration, + columns, + ConstraintsDescription(), + String{}); storage->startup(); return storage; } @@ -42,21 +45,14 @@ ColumnsDescription TableFunctionRedis::getActualTableStructure(ContextPtr contex String structure; switch (configuration->storage_type) { - case StorageRedis::StorageType::SIMPLE: + case RedisStorageType::SIMPLE: structure = "key String, value String"; break; - case StorageRedis::StorageType::HASH: + case RedisStorageType::HASH_MAP: structure = "key String, field, String, value String"; break; - case StorageRedis::StorageType::LIST: - structure = "key String, value Array(String)"; - break; - case StorageRedis::StorageType::SET: - structure = "key String, value Array(String)"; - break; - case StorageRedis::StorageType::ZSET: - structure = "key String, value Array(String)"; - break; + case RedisStorageType::UNKNOWN: + throw Exception(ErrorCodes::INVALID_REDIS_STORAGE_TYPE, "invalid redis storage type."); } return parseColumnsListFromString(structure, context); } @@ -74,7 +70,7 @@ void TableFunctionRedis::parseArguments(const ASTPtr & ast_function, ContextPtr throw Exception( ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Table function 'Redis' requires from 4 parameters: " - "redis('host:port', db_id, 'password', 'storage_type')"); + "redis('host:port', db_index, 'password', 'storage_type')"); } configuration = StorageRedis::getConfiguration(args, context); } diff --git a/src/TableFunctions/TableFunctionRedis.h b/src/TableFunctions/TableFunctionRedis.h index d333cd5a42f..5c6f483fda7 100644 --- a/src/TableFunctions/TableFunctionRedis.h +++ b/src/TableFunctions/TableFunctionRedis.h @@ -23,7 +23,7 @@ private: ColumnsDescription getActualTableStructure(ContextPtr context) const override; void parseArguments(const ASTPtr & ast_function, ContextPtr context) override; - std::optional configuration; + std::optional configuration; }; } From 9a495cbf997d4c0c4ff00b9c75f958b5fa7292d6 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Tue, 23 May 2023 15:31:50 +0800 Subject: [PATCH 165/722] Push down filter into Redis --- src/Dictionaries/RedisDictionarySource.cpp | 43 +----- src/Dictionaries/RedisSource.cpp | 52 +++++++- src/Dictionaries/RedisSource.h | 10 ++ src/Storages/RedisCommon.cpp | 68 +++++++--- src/Storages/RedisCommon.h | 28 +++- src/Storages/StorageRedis.cpp | 144 ++++++++++++++++++--- src/Storages/StorageRedis.h | 11 +- src/TableFunctions/TableFunctionRedis.cpp | 10 +- 8 files changed, 271 insertions(+), 95 deletions(-) diff --git a/src/Dictionaries/RedisDictionarySource.cpp b/src/Dictionaries/RedisDictionarySource.cpp index db27801a38e..f96c9231827 100644 --- a/src/Dictionaries/RedisDictionarySource.cpp +++ b/src/Dictionaries/RedisDictionarySource.cpp @@ -105,21 +105,6 @@ namespace DB RedisDictionarySource::~RedisDictionarySource() = default; - static String storageTypeToKeyType(RedisStorageType type) - { - switch (type) - { - case RedisStorageType::SIMPLE: - return "string"; - case RedisStorageType::HASH_MAP: - return "hash"; - default: - return "none"; - } - - UNREACHABLE(); - } - QueryPipeline RedisDictionarySource::loadAll() { auto connection = getRedisConnection(pool, configuration); @@ -142,33 +127,7 @@ namespace DB if (configuration.storage_type == RedisStorageType::HASH_MAP) { - RedisArray hkeys; - for (const auto & key : keys) - { - RedisCommand command_for_secondary_keys("HKEYS"); - command_for_secondary_keys.addRedisType(key); - - auto secondary_keys = connection->client->execute(command_for_secondary_keys); - - RedisArray primary_with_secondary; - primary_with_secondary.addRedisType(key); - for (const auto & secondary_key : secondary_keys) - { - primary_with_secondary.addRedisType(secondary_key); - /// Do not store more than max_block_size values for one request. - if (primary_with_secondary.size() == REDIS_MAX_BLOCK_SIZE + 1) - { - hkeys.add(primary_with_secondary); - primary_with_secondary.clear(); - primary_with_secondary.addRedisType(key); - } - } - - if (primary_with_secondary.size() > 1) - hkeys.add(primary_with_secondary); - } - - keys = hkeys; + keys = *getRedisHashMapKeys(connection, keys); } return QueryPipeline(std::make_shared( diff --git a/src/Dictionaries/RedisSource.cpp b/src/Dictionaries/RedisSource.cpp index 9abaf7f0ac5..20e0838886c 100644 --- a/src/Dictionaries/RedisSource.cpp +++ b/src/Dictionaries/RedisSource.cpp @@ -30,11 +30,29 @@ namespace DB const RedisStorageType & storage_type_, const DB::Block & sample_block, size_t max_block_size_) + : ISource(sample_block), max_block_size(max_block_size_)// TODO + { + RedisColumnTypes columns_types_; + if (storage_type_ == RedisStorageType::HASH_MAP) + columns_types_ = REDIS_HASH_MAP_COLUMN_TYPES; + else + columns_types_ = REDIS_SIMPLE_COLUMN_TYPES; + RedisSource(std::move(connection_), keys_, storage_type_, sample_block, columns_types_, max_block_size_); + } + + RedisSource::RedisSource( + RedisConnectionPtr connection_, + const RedisArray & keys_, + const RedisStorageType & storage_type_, + const DB::Block & sample_block, + const RedisColumnTypes & columns_types_, + size_t max_block_size_) : ISource(sample_block) , connection(std::move(connection_)) , keys(keys_) , storage_type(storage_type_) , max_block_size{max_block_size_} + , columns_types(columns_types_) { description.init(sample_block); } @@ -173,15 +191,27 @@ namespace DB const auto & primary_key = keys_array.get(0); for (size_t i = 0; i < values.size(); ++i) { - const auto & secondary_key = keys_array.get(i + 1); const auto & value = values.get(i); + const auto & secondary_key = keys_array.get(i + 1); /// null string means 'no value for requested key' if (!value.isNull()) { - insert_value_by_idx(0, primary_key); - insert_value_by_idx(1, secondary_key); - insert_value_by_idx(2, value); + for (size_t idx=0; idxreturnObject(std::move(client)); } -String toString(RedisStorageType storage_type) +String storageTypeToKeyType(RedisStorageType storage_type) { - static const std::unordered_map type_to_str_map - = {{RedisStorageType::SIMPLE, "simple"}, {RedisStorageType::HASH_MAP, "hash_map"}}; - - auto iter = type_to_str_map.find(storage_type); - return iter->second; + switch (storage_type) + { + case RedisStorageType::SIMPLE: + return "string"; + case RedisStorageType::HASH_MAP: + return "hash"; + default: + return "none"; + } } -RedisStorageType toRedisStorageType(const String & storage_type) +RedisStorageType keyTypeToStorageType(const String & key_type) { - static const std::unordered_map str_to_type_map - = {{"simple", RedisStorageType::SIMPLE}, {"hash", RedisStorageType::HASH_MAP}}; - - auto iter = str_to_type_map.find(storage_type); - if (iter == str_to_type_map.end()) - { - throw Exception(ErrorCodes::INVALID_REDIS_STORAGE_TYPE, "invalid redis storage type: {}", storage_type); - } - return iter->second; + if (key_type == "string") + return RedisStorageType::SIMPLE; + else if (key_type == "hash") + return RedisStorageType::HASH_MAP; + else + return RedisStorageType::UNKNOWN; } RedisConnectionPtr getRedisConnection(RedisPoolPtr pool, const RedisConfiguration & configuration) @@ -95,4 +99,36 @@ RedisConnectionPtr getRedisConnection(RedisPoolPtr pool, const RedisConfiguratio return std::make_unique(pool, std::move(client)); } + +RedisArrayPtr getRedisHashMapKeys(const RedisConnectionPtr & connection, RedisArray & keys) +{ + RedisArrayPtr hkeys = std::make_shared(); + for (const auto & key : keys) + { + RedisCommand command_for_secondary_keys("HKEYS"); + command_for_secondary_keys.addRedisType(key); + + auto secondary_keys = connection->client->execute(command_for_secondary_keys); + + RedisArray primary_with_secondary; + primary_with_secondary.addRedisType(key); + for (const auto & secondary_key : secondary_keys) + { + primary_with_secondary.addRedisType(secondary_key); + /// Do not store more than max_block_size values for one request. + if (primary_with_secondary.size() == REDIS_MAX_BLOCK_SIZE + 1) + { + hkeys->add(primary_with_secondary); + primary_with_secondary.clear(); + primary_with_secondary.addRedisType(key); + } + } + + if (primary_with_secondary.size() > 1) + hkeys->add(primary_with_secondary); + } + + return hkeys; +} + } diff --git a/src/Storages/RedisCommon.h b/src/Storages/RedisCommon.h index 6069d3d9a0c..590ea1476c4 100644 --- a/src/Storages/RedisCommon.h +++ b/src/Storages/RedisCommon.h @@ -24,8 +24,23 @@ enum class RedisStorageType UNKNOWN }; -String toString(RedisStorageType storage_type); -RedisStorageType toRedisStorageType(const String & storage_type); +enum class RedisColumnType +{ + /// Redis key + KEY, + /// Redis map field + FIELD, + /// Redis value + VALUE +}; + +using RedisColumnTypes = std::vector; + +extern RedisColumnTypes REDIS_HASH_MAP_COLUMN_TYPES; +extern RedisColumnTypes REDIS_SIMPLE_COLUMN_TYPES; + +String storageTypeToKeyType(RedisStorageType storage_type); +RedisStorageType keyTypeToStorageType(const String & key_type); struct RedisConfiguration { @@ -34,10 +49,13 @@ struct RedisConfiguration uint32_t db_index; String password; RedisStorageType storage_type; + /// column name of redis key + String key;// TODO remove uint32_t pool_size; }; using RedisArray = Poco::Redis::Array; +using RedisArrayPtr = std::shared_ptr; using RedisCommand = Poco::Redis::Command; using RedisBulkString = Poco::Redis::BulkString; @@ -56,6 +74,10 @@ struct RedisConnection using RedisConnectionPtr = std::unique_ptr; -RedisConnectionPtr getRedisConnection(RedisPoolPtr pool, const RedisConfiguration & configuration) ; +RedisConnectionPtr getRedisConnection(RedisPoolPtr pool, const RedisConfiguration & configuration); + +///get all redis hash key array +/// eg: keys -> [key1, key2] and get [[key1, field1, field2], [key2, field1, field2]] +RedisArrayPtr getRedisHashMapKeys(const RedisConnectionPtr & connection, RedisArray & keys); } diff --git a/src/Storages/StorageRedis.cpp b/src/Storages/StorageRedis.cpp index 055617b6a96..45ebe0696d6 100644 --- a/src/Storages/StorageRedis.cpp +++ b/src/Storages/StorageRedis.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include @@ -11,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -22,9 +24,33 @@ namespace DB namespace ErrorCodes { extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int INVALID_REDIS_STORAGE_TYPE; extern const int NOT_IMPLEMENTED; } +namespace +{ + RedisColumnType getRedisColumnType(RedisStorageType storage_type, const Names & all_columns, const String & column) + { + String redis_col_key = all_columns.at(0); + if (column == redis_col_key) + return RedisColumnType::KEY; + + if (storage_type == RedisStorageType::HASH_MAP) + { + String redis_col_field = all_columns.at(1); + if (column == redis_col_field) + return RedisColumnType::FIELD; + else + return RedisColumnType::VALUE; + } + else + { + return RedisColumnType::VALUE; + } + } +} + StorageRedis::StorageRedis( const StorageID & table_id_, const RedisConfiguration & configuration_, @@ -34,41 +60,120 @@ StorageRedis::StorageRedis( : IStorage(table_id_) , table_id(table_id_) , configuration(configuration_) - , columns(columns_) - , constraints(constraints_) - , comment(comment_) + , log(&Poco::Logger::get("StorageRedis")) { pool = std::make_shared(configuration.pool_size); + StorageInMemoryMetadata storage_metadata; + storage_metadata.setColumns(columns_); + storage_metadata.setConstraints(constraints_); + storage_metadata.setComment(comment_); + setInMemoryMetadata(storage_metadata); } Pipe StorageRedis::read( const Names & column_names, const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & /*query_info*/, - ContextPtr /*context*/, + SelectQueryInfo & query_info, + ContextPtr context, QueryProcessingStage::Enum /*processed_stage*/, size_t max_block_size, - size_t /*num_streams*/) + size_t num_streams) { + LOG_INFO(log, "num_streams {}", num_streams);// TODO delete auto connection = getRedisConnection(pool, configuration); storage_snapshot->check(column_names); Block sample_block; + RedisColumnTypes redis_types; + auto all_columns = storage_snapshot->metadata->getColumns().getNamesOfPhysical(); + for (const String & column_name : column_names) { auto column_data = storage_snapshot->metadata->getColumns().getPhysical(column_name); sample_block.insert({column_data.type, column_data.name}); + redis_types.push_back(getRedisColumnType(configuration.storage_type, all_columns, column_name)); + LOG_INFO(log, "Request column: {}, Redis type: {}", column_data.name, *redis_types.crbegin()); // TODO delete } - RedisArray keys; - RedisCommand command_for_keys("KEYS"); - /// generate keys by table name prefix - command_for_keys << table_id.getTableName() + ":" + toString(configuration.storage_type) + ":*"; + FieldVectorPtr fields; + bool all_scan = false; - /// Get only keys for specified storage type. - auto all_keys = connection->client->execute(command_for_keys); - return Pipe(std::make_shared(std::move(connection), all_keys, configuration.storage_type, sample_block, max_block_size)); + String primary_key = all_columns.at(0); + auto primary_key_data_type = sample_block.getByName(primary_key).type; + + std::tie(fields, all_scan) = getFilterKeys(primary_key, primary_key_data_type, query_info, context); + + /// TODO hash_map hgetall + if (all_scan) + { + RedisCommand command_for_keys("KEYS"); + /// generate keys by table name prefix + command_for_keys << table_id.getTableName() + ":" + toString(configuration.storage_type) + ":*"; + + auto all_keys = connection->client->execute(command_for_keys); + + if (all_keys.size() == 0) + return {}; + + Pipes pipes; + + size_t num_keys = all_keys.size(); + size_t num_threads = std::min(num_streams, all_keys.size()); + + assert(num_keys <= std::numeric_limits::max()); + + for (size_t thread_idx = 0; thread_idx < num_threads; ++thread_idx) + { + size_t begin = num_keys * thread_idx / num_threads; + size_t end = num_keys * (thread_idx + 1) / num_threads; + + RedisArray keys; + for (size_t pos=begin; pos(pos)); + + if (configuration.storage_type == RedisStorageType::HASH_MAP) + { + keys = *getRedisHashMapKeys(connection, keys); + } + + /// TODO reduce keys copy + pipes.emplace_back(std::make_shared( + std::move(connection), keys, configuration.storage_type, sample_block, redis_types, max_block_size)); + } + return Pipe::unitePipes(std::move(pipes)); + } + else + { + if (fields->empty()) + return {}; + + Pipes pipes; + + size_t num_keys = fields->size(); + size_t num_threads = std::min(num_streams, fields->size()); + + assert(num_keys <= std::numeric_limits::max()); + + for (size_t thread_idx = 0; thread_idx < num_threads; ++thread_idx) + { + size_t begin = num_keys * thread_idx / num_threads; + size_t end = num_keys * (thread_idx + 1) / num_threads; + + RedisArray keys; + for (size_t pos=begin; posat(pos).get()); + + if (configuration.storage_type == RedisStorageType::HASH_MAP) + { + keys = *getRedisHashMapKeys(connection, keys); + } + + pipes.emplace_back(std::make_shared( + std::move(connection), keys, configuration.storage_type, sample_block, redis_types, max_block_size)); + } + return Pipe::unitePipes(std::move(pipes)); + } } @@ -88,15 +193,15 @@ RedisConfiguration StorageRedis::getConfiguration(ASTs engine_args, ContextPtr c { validateNamedCollection( *named_collection, - ValidateKeysMultiset{"host", "port", "hostname", "password", "db_index", "storage_type"}, + ValidateKeysMultiset{"host", "port", "hostname", "password", "db_index", "storage_type", "pool_size"}, {}); configuration.host = named_collection->getAny({"host", "hostname"}); configuration.port = static_cast(named_collection->get("port")); configuration.password = named_collection->get("password"); configuration.db_index = static_cast(named_collection->get({"db_index"})); - configuration.storage_type = toRedisStorageType(named_collection->getOrDefault("storage_type", "")); - configuration.pool_size = 16; /// TODO + configuration.storage_type = keyTypeToStorageType(named_collection->getOrDefault("storage_type", "")); + configuration.pool_size = static_cast(named_collection->get("pool_size")); } else { @@ -110,10 +215,13 @@ RedisConfiguration StorageRedis::getConfiguration(ASTs engine_args, ContextPtr c configuration.port = parsed_host_port.second; configuration.db_index = static_cast(checkAndGetLiteralArgument(engine_args[1], "db_index")); configuration.password = checkAndGetLiteralArgument(engine_args[2], "password"); - configuration.storage_type = toRedisStorageType(checkAndGetLiteralArgument(engine_args[3], "storage_type")); - configuration.pool_size = 16; /// TODO + configuration.storage_type = keyTypeToStorageType(checkAndGetLiteralArgument(engine_args[3], "storage_type")); + configuration.pool_size = static_cast(checkAndGetLiteralArgument(engine_args[4], "pool_size")); } + if (configuration.storage_type == RedisStorageType::UNKNOWN) + throw Exception(ErrorCodes::INVALID_REDIS_STORAGE_TYPE, "Invalid Redis storage type"); + context->getRemoteHostFilter().checkHostAndPort(configuration.host, toString(configuration.port)); return configuration; } diff --git a/src/Storages/StorageRedis.h b/src/Storages/StorageRedis.h index 60db75dd384..1bffc6a64bf 100644 --- a/src/Storages/StorageRedis.h +++ b/src/Storages/StorageRedis.h @@ -7,8 +7,12 @@ namespace DB { /* Implements storage in the Redis. - * Use ENGINE = Redis(host:port, db_index, password, storage_type); + * Use ENGINE = Redis(host:port, db_index, password, storage_type, conn_pool_size); * Read only. + * + * Note If storage_type is + * simple: there should be 2 columns and the first one is key in Redis, the second one is value. + * hash_map: there should be 3 columns and the first one is key in Redis and the second is the field of Redis Map. */ class StorageRedis : public IStorage { @@ -42,10 +46,7 @@ private: StorageID table_id; RedisConfiguration configuration; - ColumnsDescription columns; - ConstraintsDescription constraints; - - String comment; + Poco::Logger * log; RedisPoolPtr pool; }; diff --git a/src/TableFunctions/TableFunctionRedis.cpp b/src/TableFunctions/TableFunctionRedis.cpp index db612806652..e410ad799a6 100644 --- a/src/TableFunctions/TableFunctionRedis.cpp +++ b/src/TableFunctions/TableFunctionRedis.cpp @@ -49,10 +49,10 @@ ColumnsDescription TableFunctionRedis::getActualTableStructure(ContextPtr contex structure = "key String, value String"; break; case RedisStorageType::HASH_MAP: - structure = "key String, field, String, value String"; + structure = "key String, field String, value String"; break; case RedisStorageType::UNKNOWN: - throw Exception(ErrorCodes::INVALID_REDIS_STORAGE_TYPE, "invalid redis storage type."); + throw Exception(ErrorCodes::INVALID_REDIS_STORAGE_TYPE, "Invalid Redis storage type."); } return parseColumnsListFromString(structure, context); } @@ -65,12 +65,12 @@ void TableFunctionRedis::parseArguments(const ASTPtr & ast_function, ContextPtr ASTs & args = func_args.arguments->children; - if (args.size() != 4) + if (args.size() != 5) { throw Exception( ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Table function 'Redis' requires from 4 parameters: " - "redis('host:port', db_index, 'password', 'storage_type')"); + "Table function 'Redis' requires from 5 parameters: " + "redis('host:port', db_index, 'password', 'storage_type', 'pool_size')"); } configuration = StorageRedis::getConfiguration(args, context); } From 23d6c835d831b2fa9650c693370219ee1d1f9727 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Tue, 23 May 2023 16:42:46 +0800 Subject: [PATCH 166/722] fix poco redis array NPE --- src/Storages/RedisCommon.cpp | 22 ++++++++++++++++++++++ src/Storages/RedisCommon.h | 7 ++++++- src/Storages/StorageRedis.cpp | 35 ++--------------------------------- src/Storages/StorageRedis.h | 4 ++-- 4 files changed, 32 insertions(+), 36 deletions(-) diff --git a/src/Storages/RedisCommon.cpp b/src/Storages/RedisCommon.cpp index b910759fe52..0a13e40b1ec 100644 --- a/src/Storages/RedisCommon.cpp +++ b/src/Storages/RedisCommon.cpp @@ -109,6 +109,8 @@ RedisArrayPtr getRedisHashMapKeys(const RedisConnectionPtr & connection, RedisAr command_for_secondary_keys.addRedisType(key); auto secondary_keys = connection->client->execute(command_for_secondary_keys); + if (secondary_keys.isNull()) + continue; RedisArray primary_with_secondary; primary_with_secondary.addRedisType(key); @@ -131,4 +133,24 @@ RedisArrayPtr getRedisHashMapKeys(const RedisConnectionPtr & connection, RedisAr return hkeys; } +RedisColumnType getRedisColumnType(RedisStorageType storage_type, const Names & all_columns, const String & column) +{ + String redis_col_key = all_columns.at(0); + if (column == redis_col_key) + return RedisColumnType::KEY; + + if (storage_type == RedisStorageType::HASH_MAP) + { + String redis_col_field = all_columns.at(1); + if (column == redis_col_field) + return RedisColumnType::FIELD; + else + return RedisColumnType::VALUE; + } + else + { + return RedisColumnType::VALUE; + } +} + } diff --git a/src/Storages/RedisCommon.h b/src/Storages/RedisCommon.h index 590ea1476c4..384a02d76e4 100644 --- a/src/Storages/RedisCommon.h +++ b/src/Storages/RedisCommon.h @@ -6,6 +6,7 @@ #include #include +#include namespace DB { @@ -28,7 +29,7 @@ enum class RedisColumnType { /// Redis key KEY, - /// Redis map field + /// Redis hash field FIELD, /// Redis value VALUE @@ -80,4 +81,8 @@ RedisConnectionPtr getRedisConnection(RedisPoolPtr pool, const RedisConfiguratio /// eg: keys -> [key1, key2] and get [[key1, field1, field2], [key2, field1, field2]] RedisArrayPtr getRedisHashMapKeys(const RedisConnectionPtr & connection, RedisArray & keys); +/// Get RedisColumnType of a column, If storage_type is +/// SIMPLE: all_columns must have 2 iterm and the first one is Redis key the second one is value +/// HASH_MAP: all_columns must have 2 iterm and the first one is Redis key the second is field, the third is value. +RedisColumnType getRedisColumnType(RedisStorageType storage_type, const Names & all_columns, const String & column); } diff --git a/src/Storages/StorageRedis.cpp b/src/Storages/StorageRedis.cpp index 45ebe0696d6..0e1b3a24e4f 100644 --- a/src/Storages/StorageRedis.cpp +++ b/src/Storages/StorageRedis.cpp @@ -5,15 +5,11 @@ #include #include -#include -#include -#include #include #include #include #include #include -#include #include #include #include @@ -28,29 +24,6 @@ namespace ErrorCodes extern const int NOT_IMPLEMENTED; } -namespace -{ - RedisColumnType getRedisColumnType(RedisStorageType storage_type, const Names & all_columns, const String & column) - { - String redis_col_key = all_columns.at(0); - if (column == redis_col_key) - return RedisColumnType::KEY; - - if (storage_type == RedisStorageType::HASH_MAP) - { - String redis_col_field = all_columns.at(1); - if (column == redis_col_field) - return RedisColumnType::FIELD; - else - return RedisColumnType::VALUE; - } - else - { - return RedisColumnType::VALUE; - } - } -} - StorageRedis::StorageRedis( const StorageID & table_id_, const RedisConfiguration & configuration_, @@ -79,9 +52,7 @@ Pipe StorageRedis::read( size_t max_block_size, size_t num_streams) { - LOG_INFO(log, "num_streams {}", num_streams);// TODO delete auto connection = getRedisConnection(pool, configuration); - storage_snapshot->check(column_names); Block sample_block; @@ -93,7 +64,6 @@ Pipe StorageRedis::read( auto column_data = storage_snapshot->metadata->getColumns().getPhysical(column_name); sample_block.insert({column_data.type, column_data.name}); redis_types.push_back(getRedisColumnType(configuration.storage_type, all_columns, column_name)); - LOG_INFO(log, "Request column: {}, Redis type: {}", column_data.name, *redis_types.crbegin()); // TODO delete } FieldVectorPtr fields; @@ -104,16 +74,15 @@ Pipe StorageRedis::read( std::tie(fields, all_scan) = getFilterKeys(primary_key, primary_key_data_type, query_info, context); - /// TODO hash_map hgetall if (all_scan) { RedisCommand command_for_keys("KEYS"); /// generate keys by table name prefix - command_for_keys << table_id.getTableName() + ":" + toString(configuration.storage_type) + ":*"; + command_for_keys << table_id.getTableName() + ":" + storageTypeToKeyType(configuration.storage_type) + ":*"; auto all_keys = connection->client->execute(command_for_keys); - if (all_keys.size() == 0) + if (all_keys.isNull() || all_keys.size() == 0) return {}; Pipes pipes; diff --git a/src/Storages/StorageRedis.h b/src/Storages/StorageRedis.h index 1bffc6a64bf..2c6c6193982 100644 --- a/src/Storages/StorageRedis.h +++ b/src/Storages/StorageRedis.h @@ -11,8 +11,8 @@ namespace DB * Read only. * * Note If storage_type is - * simple: there should be 2 columns and the first one is key in Redis, the second one is value. - * hash_map: there should be 3 columns and the first one is key in Redis and the second is the field of Redis Map. + * SIMPLE: there should be 2 columns and the first one is key in Redis, the second one is value. + * HASH_MAP: there should be 3 columns and the first one is key in Redis and the second is the field of Redis Map. */ class StorageRedis : public IStorage { From ce203b5ce6cb2329a7a26bcb4999e040ecbdbf7d Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Tue, 23 May 2023 20:54:26 +0800 Subject: [PATCH 167/722] Check redis table structure --- src/Common/ErrorCodes.cpp | 1 + src/Dictionaries/RedisSource.cpp | 5 +---- src/Dictionaries/RedisSource.h | 1 - src/Storages/RedisCommon.cpp | 13 +++++++++++++ src/Storages/RedisCommon.h | 7 +++++-- src/Storages/StorageRedis.cpp | 20 ++++++++++++++++---- src/TableFunctions/TableFunctionRedis.cpp | 6 ++++-- 7 files changed, 40 insertions(+), 13 deletions(-) diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index 505cf0aac8f..4c08d762df2 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -581,6 +581,7 @@ M(696, ASYNC_LOAD_CANCELED) \ M(697, CANNOT_RESTORE_TO_NONENCRYPTED_DISK) \ M(698, INVALID_REDIS_STORAGE_TYPE) \ + M(699, INVALID_REDIS_TABLE_STRUCTURE) \ \ M(999, KEEPER_EXCEPTION) \ M(1000, POCO_EXCEPTION) \ diff --git a/src/Dictionaries/RedisSource.cpp b/src/Dictionaries/RedisSource.cpp index 20e0838886c..27125077c10 100644 --- a/src/Dictionaries/RedisSource.cpp +++ b/src/Dictionaries/RedisSource.cpp @@ -1,15 +1,12 @@ #include "RedisSource.h" -#include #include - #include #include #include #include #include - -#include "DictionaryStructure.h" +#include namespace DB diff --git a/src/Dictionaries/RedisSource.h b/src/Dictionaries/RedisSource.h index fe5a973d57c..e8e78db67bc 100644 --- a/src/Dictionaries/RedisSource.h +++ b/src/Dictionaries/RedisSource.h @@ -7,7 +7,6 @@ #include #include #include -#include "RedisDictionarySource.h" namespace DB diff --git a/src/Storages/RedisCommon.cpp b/src/Storages/RedisCommon.cpp index 0a13e40b1ec..916ac3b69bc 100644 --- a/src/Storages/RedisCommon.cpp +++ b/src/Storages/RedisCommon.cpp @@ -7,6 +7,7 @@ namespace DB namespace ErrorCodes { extern const int INVALID_REDIS_STORAGE_TYPE; + extern const int INVALID_REDIS_TABLE_STRUCTURE; extern const int INTERNAL_REDIS_ERROR; extern const int TIMEOUT_EXCEEDED; } @@ -153,4 +154,16 @@ RedisColumnType getRedisColumnType(RedisStorageType storage_type, const Names & } } +void checkRedisTableStructure(const ColumnsDescription & columns, const RedisConfiguration & configuration) +{ + /// TODO check data type + if (configuration.storage_type == RedisStorageType::HASH_MAP && columns.size() != 3) + throw Exception(ErrorCodes::INVALID_REDIS_TABLE_STRUCTURE, + "Redis hash table must have 3 columns, but found {}", columns.size()); + + if (configuration.storage_type == RedisStorageType::SIMPLE && columns.size() != 2) + throw Exception(ErrorCodes::INVALID_REDIS_TABLE_STRUCTURE, + "Redis string table must have 2 columns, but found {}", columns.size()); +} + } diff --git a/src/Storages/RedisCommon.h b/src/Storages/RedisCommon.h index 384a02d76e4..e663faa5fab 100644 --- a/src/Storages/RedisCommon.h +++ b/src/Storages/RedisCommon.h @@ -7,6 +7,7 @@ #include #include #include +#include namespace DB { @@ -50,8 +51,6 @@ struct RedisConfiguration uint32_t db_index; String password; RedisStorageType storage_type; - /// column name of redis key - String key;// TODO remove uint32_t pool_size; }; @@ -85,4 +84,8 @@ RedisArrayPtr getRedisHashMapKeys(const RedisConnectionPtr & connection, RedisAr /// SIMPLE: all_columns must have 2 iterm and the first one is Redis key the second one is value /// HASH_MAP: all_columns must have 2 iterm and the first one is Redis key the second is field, the third is value. RedisColumnType getRedisColumnType(RedisStorageType storage_type, const Names & all_columns, const String & column); + +/// checking Redis table/table-function when creating +void checkRedisTableStructure(const ColumnsDescription & columns, const RedisConfiguration & configuration); + } diff --git a/src/Storages/StorageRedis.cpp b/src/Storages/StorageRedis.cpp index 0e1b3a24e4f..7721665e9dd 100644 --- a/src/Storages/StorageRedis.cpp +++ b/src/Storages/StorageRedis.cpp @@ -78,7 +78,8 @@ Pipe StorageRedis::read( { RedisCommand command_for_keys("KEYS"); /// generate keys by table name prefix - command_for_keys << table_id.getTableName() + ":" + storageTypeToKeyType(configuration.storage_type) + ":*"; +// command_for_keys << table_id.getTableName() + ":" + storageTypeToKeyType(configuration.storage_type) + ":*"; + command_for_keys << "*"; auto all_keys = connection->client->execute(command_for_keys); @@ -90,6 +91,7 @@ Pipe StorageRedis::read( size_t num_keys = all_keys.size(); size_t num_threads = std::min(num_streams, all_keys.size()); + num_threads = std::min(num_threads, configuration.pool_size); assert(num_keys <= std::numeric_limits::max()); for (size_t thread_idx = 0; thread_idx < num_threads; ++thread_idx) @@ -106,9 +108,12 @@ Pipe StorageRedis::read( keys = *getRedisHashMapKeys(connection, keys); } + delete connection.release(); + /// TODO reduce keys copy pipes.emplace_back(std::make_shared( - std::move(connection), keys, configuration.storage_type, sample_block, redis_types, max_block_size)); + getRedisConnection(pool, configuration), keys, + configuration.storage_type, sample_block, redis_types, max_block_size)); } return Pipe::unitePipes(std::move(pipes)); } @@ -122,6 +127,7 @@ Pipe StorageRedis::read( size_t num_keys = fields->size(); size_t num_threads = std::min(num_streams, fields->size()); + num_threads = std::min(num_threads, configuration.pool_size); assert(num_keys <= std::numeric_limits::max()); for (size_t thread_idx = 0; thread_idx < num_threads; ++thread_idx) @@ -138,8 +144,11 @@ Pipe StorageRedis::read( keys = *getRedisHashMapKeys(connection, keys); } + delete connection.release(); + pipes.emplace_back(std::make_shared( - std::move(connection), keys, configuration.storage_type, sample_block, redis_types, max_block_size)); + getRedisConnection(pool, configuration), keys, + configuration.storage_type, sample_block, redis_types, max_block_size)); } return Pipe::unitePipes(std::move(pipes)); } @@ -151,9 +160,10 @@ SinkToStoragePtr StorageRedis::write( const StorageMetadataPtr & /*metadata_snapshot*/, ContextPtr /*context*/) { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method write is unsupported for StorageRedis"); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Write is unsupported for StorageRedis"); } +/// TODO make "password", "db_index", "storage_type", "pool_size" optional RedisConfiguration StorageRedis::getConfiguration(ASTs engine_args, ContextPtr context) { RedisConfiguration configuration; @@ -203,6 +213,8 @@ void registerStorageRedis(StorageFactory & factory) { auto configuration = StorageRedis::getConfiguration(args.engine_args, args.getLocalContext()); + checkRedisTableStructure(args.columns, configuration); + return std::make_shared( args.table_id, configuration, diff --git a/src/TableFunctions/TableFunctionRedis.cpp b/src/TableFunctions/TableFunctionRedis.cpp index e410ad799a6..9e4a39b1b85 100644 --- a/src/TableFunctions/TableFunctionRedis.cpp +++ b/src/TableFunctions/TableFunctionRedis.cpp @@ -1,7 +1,6 @@ #include #include -#include #include @@ -29,8 +28,10 @@ StoragePtr TableFunctionRedis::executeImpl( const ASTPtr & /*ast_function*/, ContextPtr context, const String & table_name, ColumnsDescription /*cached_columns*/) const { auto columns = getActualTableStructure(context); + checkRedisTableStructure(columns, *configuration); + auto storage = std::make_shared( - StorageID(toString(configuration->db_index), table_name), // TODO + StorageID(toString(configuration->db_index), table_name), *configuration, columns, ConstraintsDescription(), @@ -39,6 +40,7 @@ StoragePtr TableFunctionRedis::executeImpl( return storage; } +/// TODO support user customized table structure ColumnsDescription TableFunctionRedis::getActualTableStructure(ContextPtr context) const { /// generate table structure by storage type. From 40cc8d210792cd7a3e7f80f9ed9bf95fe938e9d8 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Wed, 24 May 2023 10:34:37 +0800 Subject: [PATCH 168/722] fix code style --- src/Dictionaries/RedisDictionarySource.cpp | 2 -- src/Storages/RedisCommon.cpp | 1 - src/Storages/RedisCommon.h | 5 ----- src/Storages/StorageRedis.cpp | 1 - 4 files changed, 9 deletions(-) diff --git a/src/Dictionaries/RedisDictionarySource.cpp b/src/Dictionaries/RedisDictionarySource.cpp index f96c9231827..1056383bc84 100644 --- a/src/Dictionaries/RedisDictionarySource.cpp +++ b/src/Dictionaries/RedisDictionarySource.cpp @@ -17,9 +17,7 @@ namespace DB { extern const int UNSUPPORTED_METHOD; extern const int INVALID_CONFIG_PARAMETER; - extern const int INTERNAL_REDIS_ERROR; extern const int LOGICAL_ERROR; - extern const int TIMEOUT_EXCEEDED; } static RedisStorageType parseStorageType(const String & storage_type_str) diff --git a/src/Storages/RedisCommon.cpp b/src/Storages/RedisCommon.cpp index 916ac3b69bc..63a8d911bf0 100644 --- a/src/Storages/RedisCommon.cpp +++ b/src/Storages/RedisCommon.cpp @@ -6,7 +6,6 @@ namespace DB namespace ErrorCodes { - extern const int INVALID_REDIS_STORAGE_TYPE; extern const int INVALID_REDIS_TABLE_STRUCTURE; extern const int INTERNAL_REDIS_ERROR; extern const int TIMEOUT_EXCEEDED; diff --git a/src/Storages/RedisCommon.h b/src/Storages/RedisCommon.h index e663faa5fab..2668311125f 100644 --- a/src/Storages/RedisCommon.h +++ b/src/Storages/RedisCommon.h @@ -11,11 +11,6 @@ namespace DB { -namespace ErrorCodes -{ - extern const int NOT_IMPLEMENTED; -} - static constexpr size_t REDIS_MAX_BLOCK_SIZE = DEFAULT_BLOCK_SIZE; static constexpr size_t REDIS_LOCK_ACQUIRE_TIMEOUT_MS = 5000; diff --git a/src/Storages/StorageRedis.cpp b/src/Storages/StorageRedis.cpp index 7721665e9dd..3d7721bdc0e 100644 --- a/src/Storages/StorageRedis.cpp +++ b/src/Storages/StorageRedis.cpp @@ -19,7 +19,6 @@ namespace DB namespace ErrorCodes { - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int INVALID_REDIS_STORAGE_TYPE; extern const int NOT_IMPLEMENTED; } From 70cfd7a222342bfd1d9a3ddb59eaa2391d014d28 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Wed, 24 May 2023 10:45:21 +0800 Subject: [PATCH 169/722] fix typos --- src/Storages/RedisCommon.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/RedisCommon.h b/src/Storages/RedisCommon.h index 2668311125f..348c2494632 100644 --- a/src/Storages/RedisCommon.h +++ b/src/Storages/RedisCommon.h @@ -76,8 +76,8 @@ RedisConnectionPtr getRedisConnection(RedisPoolPtr pool, const RedisConfiguratio RedisArrayPtr getRedisHashMapKeys(const RedisConnectionPtr & connection, RedisArray & keys); /// Get RedisColumnType of a column, If storage_type is -/// SIMPLE: all_columns must have 2 iterm and the first one is Redis key the second one is value -/// HASH_MAP: all_columns must have 2 iterm and the first one is Redis key the second is field, the third is value. +/// SIMPLE: all_columns must have 2 items and the first one is Redis key the second one is value +/// HASH_MAP: all_columns must have 2 items and the first one is Redis key the second is field, the third is value. RedisColumnType getRedisColumnType(RedisStorageType storage_type, const Names & all_columns, const String & column); /// checking Redis table/table-function when creating From d594bb1c7a3d6f0fe2ef4e27bb01e132ed82a8e7 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Wed, 24 May 2023 10:53:42 +0800 Subject: [PATCH 170/722] fix fast tests --- src/Access/Common/AccessType.h | 2 +- tests/queries/0_stateless/01271_show_privileges.reference | 2 +- .../0_stateless/02117_show_create_table_system.reference | 2 +- .../02414_all_new_table_functions_must_be_documented.reference | 1 + 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/Access/Common/AccessType.h b/src/Access/Common/AccessType.h index 78c341cdcb5..c9cce610f2c 100644 --- a/src/Access/Common/AccessType.h +++ b/src/Access/Common/AccessType.h @@ -201,7 +201,7 @@ enum class AccessType M(URL, "", GLOBAL, SOURCES) \ M(REMOTE, "", GLOBAL, SOURCES) \ M(MONGO, "", GLOBAL, SOURCES) \ - M(Redis, "", GLOBAL, SOURCES) \ + M(REDIS, "", GLOBAL, SOURCES) \ M(MEILISEARCH, "", GLOBAL, SOURCES) \ M(MYSQL, "", GLOBAL, SOURCES) \ M(POSTGRES, "", GLOBAL, SOURCES) \ diff --git a/tests/queries/0_stateless/01271_show_privileges.reference b/tests/queries/0_stateless/01271_show_privileges.reference index ec245d8b9e0..5ada21e31f4 100644 --- a/tests/queries/0_stateless/01271_show_privileges.reference +++ b/tests/queries/0_stateless/01271_show_privileges.reference @@ -148,7 +148,7 @@ INTROSPECTION ['INTROSPECTION FUNCTIONS'] \N ALL FILE [] GLOBAL SOURCES URL [] GLOBAL SOURCES REMOTE [] GLOBAL SOURCES -MONGO [] GLOBAL SOURCES +REDIS [] GLOBAL SOURCES MEILISEARCH [] GLOBAL SOURCES MYSQL [] GLOBAL SOURCES POSTGRES [] GLOBAL SOURCES diff --git a/tests/queries/0_stateless/02117_show_create_table_system.reference b/tests/queries/0_stateless/02117_show_create_table_system.reference index 09cc62dac00..724118f7bc1 100644 --- a/tests/queries/0_stateless/02117_show_create_table_system.reference +++ b/tests/queries/0_stateless/02117_show_create_table_system.reference @@ -297,7 +297,7 @@ CREATE TABLE system.grants ( `user_name` Nullable(String), `role_name` Nullable(String), - `access_type` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION CONTROL' = 96, 'SYSTEM SHUTDOWN' = 97, 'SYSTEM DROP DNS CACHE' = 98, 'SYSTEM DROP MARK CACHE' = 99, 'SYSTEM DROP UNCOMPRESSED CACHE' = 100, 'SYSTEM DROP MMAP CACHE' = 101, 'SYSTEM DROP QUERY CACHE' = 102, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 103, 'SYSTEM DROP FILESYSTEM CACHE' = 104, 'SYSTEM DROP SCHEMA CACHE' = 105, 'SYSTEM DROP S3 CLIENT CACHE' = 106, 'SYSTEM DROP CACHE' = 107, 'SYSTEM RELOAD CONFIG' = 108, 'SYSTEM RELOAD USERS' = 109, 'SYSTEM RELOAD SYMBOLS' = 110, 'SYSTEM RELOAD DICTIONARY' = 111, 'SYSTEM RELOAD MODEL' = 112, 'SYSTEM RELOAD FUNCTION' = 113, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 114, 'SYSTEM RELOAD' = 115, 'SYSTEM RESTART DISK' = 116, 'SYSTEM MERGES' = 117, 'SYSTEM TTL MERGES' = 118, 'SYSTEM FETCHES' = 119, 'SYSTEM MOVES' = 120, 'SYSTEM DISTRIBUTED SENDS' = 121, 'SYSTEM REPLICATED SENDS' = 122, 'SYSTEM SENDS' = 123, 'SYSTEM REPLICATION QUEUES' = 124, 'SYSTEM DROP REPLICA' = 125, 'SYSTEM SYNC REPLICA' = 126, 'SYSTEM RESTART REPLICA' = 127, 'SYSTEM RESTORE REPLICA' = 128, 'SYSTEM WAIT LOADING PARTS' = 129, 'SYSTEM SYNC DATABASE REPLICA' = 130, 'SYSTEM SYNC TRANSACTION LOG' = 131, 'SYSTEM SYNC FILE CACHE' = 132, 'SYSTEM FLUSH DISTRIBUTED' = 133, 'SYSTEM FLUSH LOGS' = 134, 'SYSTEM FLUSH' = 135, 'SYSTEM THREAD FUZZER' = 136, 'SYSTEM UNFREEZE' = 137, 'SYSTEM FAILPOINT' = 138, 'SYSTEM' = 139, 'dictGet' = 140, 'displaySecretsInShowAndSelect' = 141, 'addressToLine' = 142, 'addressToLineWithInlines' = 143, 'addressToSymbol' = 144, 'demangle' = 145, 'INTROSPECTION' = 146, 'FILE' = 147, 'URL' = 148, 'REMOTE' = 149, 'MONGO' = 150, 'MEILISEARCH' = 151, 'MYSQL' = 152, 'POSTGRES' = 153, 'SQLITE' = 154, 'ODBC' = 155, 'JDBC' = 156, 'HDFS' = 157, 'S3' = 158, 'HIVE' = 159, 'SOURCES' = 160, 'CLUSTER' = 161, 'ALL' = 162, 'NONE' = 163), + `access_type` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION CONTROL' = 96, 'SYSTEM SHUTDOWN' = 97, 'SYSTEM DROP DNS CACHE' = 98, 'SYSTEM DROP MARK CACHE' = 99, 'SYSTEM DROP UNCOMPRESSED CACHE' = 100, 'SYSTEM DROP MMAP CACHE' = 101, 'SYSTEM DROP QUERY CACHE' = 102, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 103, 'SYSTEM DROP FILESYSTEM CACHE' = 104, 'SYSTEM DROP SCHEMA CACHE' = 105, 'SYSTEM DROP S3 CLIENT CACHE' = 106, 'SYSTEM DROP CACHE' = 107, 'SYSTEM RELOAD CONFIG' = 108, 'SYSTEM RELOAD USERS' = 109, 'SYSTEM RELOAD SYMBOLS' = 110, 'SYSTEM RELOAD DICTIONARY' = 111, 'SYSTEM RELOAD MODEL' = 112, 'SYSTEM RELOAD FUNCTION' = 113, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 114, 'SYSTEM RELOAD' = 115, 'SYSTEM RESTART DISK' = 116, 'SYSTEM MERGES' = 117, 'SYSTEM TTL MERGES' = 118, 'SYSTEM FETCHES' = 119, 'SYSTEM MOVES' = 120, 'SYSTEM DISTRIBUTED SENDS' = 121, 'SYSTEM REPLICATED SENDS' = 122, 'SYSTEM SENDS' = 123, 'SYSTEM REPLICATION QUEUES' = 124, 'SYSTEM DROP REPLICA' = 125, 'SYSTEM SYNC REPLICA' = 126, 'SYSTEM RESTART REPLICA' = 127, 'SYSTEM RESTORE REPLICA' = 128, 'SYSTEM WAIT LOADING PARTS' = 129, 'SYSTEM SYNC DATABASE REPLICA' = 130, 'SYSTEM SYNC TRANSACTION LOG' = 131, 'SYSTEM SYNC FILE CACHE' = 132, 'SYSTEM FLUSH DISTRIBUTED' = 133, 'SYSTEM FLUSH LOGS' = 134, 'SYSTEM FLUSH' = 135, 'SYSTEM THREAD FUZZER' = 136, 'SYSTEM UNFREEZE' = 137, 'SYSTEM FAILPOINT' = 138, 'SYSTEM' = 139, 'dictGet' = 140, 'displaySecretsInShowAndSelect' = 141, 'addressToLine' = 142, 'addressToLineWithInlines' = 143, 'addressToSymbol' = 144, 'demangle' = 145, 'INTROSPECTION' = 146, 'FILE' = 147, 'URL' = 148, 'REMOTE' = 149, 'MONGO' = 150, 'Redis' = 151, 'MEILISEARCH' = 152, 'MYSQL' = 153, 'POSTGRES' = 154, 'SQLITE' = 155, 'ODBC' = 156, 'JDBC' = 157, 'HDFS' = 158, 'S3' = 159, 'HIVE' = 160, 'SOURCES' = 161, 'CLUSTER' = 162, 'ALL' = 163, 'NONE' = 164), `database` Nullable(String), `table` Nullable(String), `column` Nullable(String), diff --git a/tests/queries/0_stateless/02414_all_new_table_functions_must_be_documented.reference b/tests/queries/0_stateless/02414_all_new_table_functions_must_be_documented.reference index 2277e19cf25..4f16e57d606 100644 --- a/tests/queries/0_stateless/02414_all_new_table_functions_must_be_documented.reference +++ b/tests/queries/0_stateless/02414_all_new_table_functions_must_be_documented.reference @@ -9,6 +9,7 @@ jdbc meilisearch merge mongodb +redis null numbers numbers_mt From b35867d907d39b790c9f8b3feb709cb9e76a6434 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Wed, 24 May 2023 18:06:42 +0800 Subject: [PATCH 171/722] unify storage type --- src/Dictionaries/RedisDictionarySource.cpp | 10 - src/Dictionaries/RedisDictionarySource.h | 10 - src/Storages/RedisCommon.cpp | 19 +- src/Storages/RedisCommon.h | 5 +- src/Storages/StorageRedis.cpp | 8 +- .../test_storage_redis/__init__.py | 0 .../configs/named_collections.xml | 12 + .../configs_secure/config.d/ssl_conf.xml | 8 + tests/integration/test_storage_redis/test.py | 426 ++++++++++++++++++ 9 files changed, 464 insertions(+), 34 deletions(-) create mode 100644 tests/integration/test_storage_redis/__init__.py create mode 100644 tests/integration/test_storage_redis/configs/named_collections.xml create mode 100644 tests/integration/test_storage_redis/configs_secure/config.d/ssl_conf.xml create mode 100644 tests/integration/test_storage_redis/test.py diff --git a/src/Dictionaries/RedisDictionarySource.cpp b/src/Dictionaries/RedisDictionarySource.cpp index 1056383bc84..d28b7528d23 100644 --- a/src/Dictionaries/RedisDictionarySource.cpp +++ b/src/Dictionaries/RedisDictionarySource.cpp @@ -20,16 +20,6 @@ namespace DB extern const int LOGICAL_ERROR; } - static RedisStorageType parseStorageType(const String & storage_type_str) - { - if (storage_type_str == "hash_map") - return RedisStorageType::HASH_MAP; - else if (!storage_type_str.empty() && storage_type_str != "simple") - throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "Unknown storage type {} for Redis dictionary", storage_type_str); - - return RedisStorageType::SIMPLE; - } - void registerDictionarySourceRedis(DictionarySourceFactory & factory) { auto create_table_source = [=](const DictionaryStructure & dict_struct, diff --git a/src/Dictionaries/RedisDictionarySource.h b/src/Dictionaries/RedisDictionarySource.h index c7786284dc4..a55f220321d 100644 --- a/src/Dictionaries/RedisDictionarySource.h +++ b/src/Dictionaries/RedisDictionarySource.h @@ -7,16 +7,6 @@ #include "IDictionarySource.h" #include -namespace Poco -{ - namespace Redis - { - class Client; - class Array; - class Command; - } -} - namespace DB { namespace ErrorCodes diff --git a/src/Storages/RedisCommon.cpp b/src/Storages/RedisCommon.cpp index 63a8d911bf0..8cc94c45dae 100644 --- a/src/Storages/RedisCommon.cpp +++ b/src/Storages/RedisCommon.cpp @@ -9,6 +9,7 @@ namespace ErrorCodes extern const int INVALID_REDIS_TABLE_STRUCTURE; extern const int INTERNAL_REDIS_ERROR; extern const int TIMEOUT_EXCEEDED; + extern const int INVALID_REDIS_STORAGE_TYPE; } RedisColumnTypes REDIS_HASH_MAP_COLUMN_TYPES = {RedisColumnType::KEY, RedisColumnType::FIELD, RedisColumnType::VALUE}; @@ -24,27 +25,27 @@ RedisConnection::~RedisConnection() pool->returnObject(std::move(client)); } -String storageTypeToKeyType(RedisStorageType storage_type) +String serializeStorageType(RedisStorageType storage_type) { switch (storage_type) { case RedisStorageType::SIMPLE: - return "string"; + return "simple"; case RedisStorageType::HASH_MAP: - return "hash"; + return "hash_map"; default: return "none"; } } -RedisStorageType keyTypeToStorageType(const String & key_type) +RedisStorageType parseStorageType(const String & storage_type_str) { - if (key_type == "string") - return RedisStorageType::SIMPLE; - else if (key_type == "hash") + if (storage_type_str == "hash_map") return RedisStorageType::HASH_MAP; - else - return RedisStorageType::UNKNOWN; + else if (!storage_type_str.empty() && storage_type_str != "simple") + throw Exception(ErrorCodes::INVALID_REDIS_STORAGE_TYPE, "Unknown storage type {} for Redis dictionary", storage_type_str); + + return RedisStorageType::SIMPLE; } RedisConnectionPtr getRedisConnection(RedisPoolPtr pool, const RedisConfiguration & configuration) diff --git a/src/Storages/RedisCommon.h b/src/Storages/RedisCommon.h index 348c2494632..02d0b435b9d 100644 --- a/src/Storages/RedisCommon.h +++ b/src/Storages/RedisCommon.h @@ -36,8 +36,11 @@ using RedisColumnTypes = std::vector; extern RedisColumnTypes REDIS_HASH_MAP_COLUMN_TYPES; extern RedisColumnTypes REDIS_SIMPLE_COLUMN_TYPES; +/// storage type to Redis key type String storageTypeToKeyType(RedisStorageType storage_type); -RedisStorageType keyTypeToStorageType(const String & key_type); + +RedisStorageType parseStorageType(const String & storage_type_str); +String serializeStorageType(RedisStorageType storage_type); struct RedisConfiguration { diff --git a/src/Storages/StorageRedis.cpp b/src/Storages/StorageRedis.cpp index 3d7721bdc0e..819ab01d733 100644 --- a/src/Storages/StorageRedis.cpp +++ b/src/Storages/StorageRedis.cpp @@ -77,7 +77,7 @@ Pipe StorageRedis::read( { RedisCommand command_for_keys("KEYS"); /// generate keys by table name prefix -// command_for_keys << table_id.getTableName() + ":" + storageTypeToKeyType(configuration.storage_type) + ":*"; +// command_for_keys << table_id.getTableName() + ":" + serializeStorageType(configuration.storage_type) + ":*"; command_for_keys << "*"; auto all_keys = connection->client->execute(command_for_keys); @@ -178,7 +178,7 @@ RedisConfiguration StorageRedis::getConfiguration(ASTs engine_args, ContextPtr c configuration.port = static_cast(named_collection->get("port")); configuration.password = named_collection->get("password"); configuration.db_index = static_cast(named_collection->get({"db_index"})); - configuration.storage_type = keyTypeToStorageType(named_collection->getOrDefault("storage_type", "")); + configuration.storage_type = parseStorageType(named_collection->getOrDefault("storage_type", "")); configuration.pool_size = static_cast(named_collection->get("pool_size")); } else @@ -193,7 +193,7 @@ RedisConfiguration StorageRedis::getConfiguration(ASTs engine_args, ContextPtr c configuration.port = parsed_host_port.second; configuration.db_index = static_cast(checkAndGetLiteralArgument(engine_args[1], "db_index")); configuration.password = checkAndGetLiteralArgument(engine_args[2], "password"); - configuration.storage_type = keyTypeToStorageType(checkAndGetLiteralArgument(engine_args[3], "storage_type")); + configuration.storage_type = parseStorageType(checkAndGetLiteralArgument(engine_args[3], "storage_type")); configuration.pool_size = static_cast(checkAndGetLiteralArgument(engine_args[4], "pool_size")); } @@ -222,7 +222,7 @@ void registerStorageRedis(StorageFactory & factory) args.comment); }, { - .source_access_type = AccessType::Redis, + .source_access_type = AccessType::REDIS, }); } diff --git a/tests/integration/test_storage_redis/__init__.py b/tests/integration/test_storage_redis/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_storage_redis/configs/named_collections.xml b/tests/integration/test_storage_redis/configs/named_collections.xml new file mode 100644 index 00000000000..5f7db390982 --- /dev/null +++ b/tests/integration/test_storage_redis/configs/named_collections.xml @@ -0,0 +1,12 @@ + + + + root + clickhouse + mongo1 + 27017 + test + simple_table + + + diff --git a/tests/integration/test_storage_redis/configs_secure/config.d/ssl_conf.xml b/tests/integration/test_storage_redis/configs_secure/config.d/ssl_conf.xml new file mode 100644 index 00000000000..3efe98e7045 --- /dev/null +++ b/tests/integration/test_storage_redis/configs_secure/config.d/ssl_conf.xml @@ -0,0 +1,8 @@ + + + + + none + + + diff --git a/tests/integration/test_storage_redis/test.py b/tests/integration/test_storage_redis/test.py new file mode 100644 index 00000000000..6ba5520704d --- /dev/null +++ b/tests/integration/test_storage_redis/test.py @@ -0,0 +1,426 @@ +import pymongo + +import pytest +from helpers.client import QueryRuntimeException + +from helpers.cluster import ClickHouseCluster +import datetime + + +@pytest.fixture(scope="module") +def started_cluster(request): + try: + cluster = ClickHouseCluster(__file__) + node = cluster.add_instance( + "node", + main_configs=[ + "configs_secure/config.d/ssl_conf.xml", + "configs/named_collections.xml", + ], + with_mongo=True, + with_mongo_secure=request.param, + ) + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +def get_mongo_connection(started_cluster, secure=False, with_credentials=True): + connection_str = "" + if with_credentials: + connection_str = "mongodb://root:clickhouse@localhost:{}".format( + started_cluster.mongo_port + ) + else: + connection_str = "mongodb://localhost:{}".format( + started_cluster.mongo_no_cred_port + ) + if secure: + connection_str += "/?tls=true&tlsAllowInvalidCertificates=true" + return pymongo.MongoClient(connection_str) + + +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) +def test_simple_select(started_cluster): + mongo_connection = get_mongo_connection(started_cluster) + db = mongo_connection["test"] + db.add_user("root", "clickhouse") + simple_mongo_table = db["simple_table"] + data = [] + for i in range(0, 100): + data.append({"key": i, "data": hex(i * i)}) + simple_mongo_table.insert_many(data) + + node = started_cluster.instances["node"] + node.query( + "CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse')" + ) + + assert node.query("SELECT COUNT() FROM simple_mongo_table") == "100\n" + assert ( + node.query("SELECT sum(key) FROM simple_mongo_table") + == str(sum(range(0, 100))) + "\n" + ) + + assert ( + node.query("SELECT data from simple_mongo_table where key = 42") + == hex(42 * 42) + "\n" + ) + node.query("DROP TABLE simple_mongo_table") + simple_mongo_table.drop() + + +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) +def test_arrays(started_cluster): + mongo_connection = get_mongo_connection(started_cluster) + db = mongo_connection["test"] + db.add_user("root", "clickhouse") + arrays_mongo_table = db["arrays_table"] + data = [] + for i in range(0, 100): + data.append( + { + "key": i, + "arr_int64": [-(i + 1), -(i + 2), -(i + 3)], + "arr_int32": [-(i + 1), -(i + 2), -(i + 3)], + "arr_int16": [-(i + 1), -(i + 2), -(i + 3)], + "arr_int8": [-(i + 1), -(i + 2), -(i + 3)], + "arr_uint64": [i + 1, i + 2, i + 3], + "arr_uint32": [i + 1, i + 2, i + 3], + "arr_uint16": [i + 1, i + 2, i + 3], + "arr_uint8": [i + 1, i + 2, i + 3], + "arr_float32": [i + 1.125, i + 2.5, i + 3.750], + "arr_float64": [i + 1.125, i + 2.5, i + 3.750], + "arr_date": [ + datetime.datetime(2002, 10, 27), + datetime.datetime(2024, 1, 8), + ], + "arr_datetime": [ + datetime.datetime(2023, 3, 31, 6, 3, 12), + datetime.datetime(1999, 2, 28, 12, 46, 34), + ], + "arr_string": [str(i + 1), str(i + 2), str(i + 3)], + "arr_uuid": [ + "f0e77736-91d1-48ce-8f01-15123ca1c7ed", + "93376a07-c044-4281-a76e-ad27cf6973c5", + ], + "arr_arr_bool": [ + [True, False, True], + [True], + [], + None, + [False], + [None], + ], + "arr_empty": [], + "arr_null": None, + "arr_nullable": None, + } + ) + + arrays_mongo_table.insert_many(data) + + node = started_cluster.instances["node"] + node.query( + "CREATE TABLE arrays_mongo_table(" + "key UInt64," + "arr_int64 Array(Int64)," + "arr_int32 Array(Int32)," + "arr_int16 Array(Int16)," + "arr_int8 Array(Int8)," + "arr_uint64 Array(UInt64)," + "arr_uint32 Array(UInt32)," + "arr_uint16 Array(UInt16)," + "arr_uint8 Array(UInt8)," + "arr_float32 Array(Float32)," + "arr_float64 Array(Float64)," + "arr_date Array(Date)," + "arr_datetime Array(DateTime)," + "arr_string Array(String)," + "arr_uuid Array(UUID)," + "arr_arr_bool Array(Array(Bool))," + "arr_empty Array(UInt64)," + "arr_null Array(UInt64)," + "arr_arr_null Array(Array(UInt64))," + "arr_nullable Array(Nullable(UInt64))" + ") ENGINE = MongoDB('mongo1:27017', 'test', 'arrays_table', 'root', 'clickhouse')" + ) + + assert node.query("SELECT COUNT() FROM arrays_mongo_table") == "100\n" + + for column_name in ["arr_int64", "arr_int32", "arr_int16", "arr_int8"]: + assert ( + node.query(f"SELECT {column_name} FROM arrays_mongo_table WHERE key = 42") + == "[-43,-44,-45]\n" + ) + + for column_name in ["arr_uint64", "arr_uint32", "arr_uint16", "arr_uint8"]: + assert ( + node.query(f"SELECT {column_name} FROM arrays_mongo_table WHERE key = 42") + == "[43,44,45]\n" + ) + + for column_name in ["arr_float32", "arr_float64"]: + assert ( + node.query(f"SELECT {column_name} FROM arrays_mongo_table WHERE key = 42") + == "[43.125,44.5,45.75]\n" + ) + + assert ( + node.query(f"SELECT arr_date FROM arrays_mongo_table WHERE key = 42") + == "['2002-10-27','2024-01-08']\n" + ) + + assert ( + node.query(f"SELECT arr_datetime FROM arrays_mongo_table WHERE key = 42") + == "['2023-03-31 06:03:12','1999-02-28 12:46:34']\n" + ) + + assert ( + node.query(f"SELECT arr_string FROM arrays_mongo_table WHERE key = 42") + == "['43','44','45']\n" + ) + + assert ( + node.query(f"SELECT arr_uuid FROM arrays_mongo_table WHERE key = 42") + == "['f0e77736-91d1-48ce-8f01-15123ca1c7ed','93376a07-c044-4281-a76e-ad27cf6973c5']\n" + ) + + assert ( + node.query(f"SELECT arr_arr_bool FROM arrays_mongo_table WHERE key = 42") + == "[[true,false,true],[true],[],[],[false],[false]]\n" + ) + + assert ( + node.query(f"SELECT arr_empty FROM arrays_mongo_table WHERE key = 42") == "[]\n" + ) + + assert ( + node.query(f"SELECT arr_null FROM arrays_mongo_table WHERE key = 42") == "[]\n" + ) + + assert ( + node.query(f"SELECT arr_arr_null FROM arrays_mongo_table WHERE key = 42") + == "[]\n" + ) + + assert ( + node.query(f"SELECT arr_nullable FROM arrays_mongo_table WHERE key = 42") + == "[]\n" + ) + + node.query("DROP TABLE arrays_mongo_table") + arrays_mongo_table.drop() + + +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) +def test_complex_data_type(started_cluster): + mongo_connection = get_mongo_connection(started_cluster) + db = mongo_connection["test"] + db.add_user("root", "clickhouse") + incomplete_mongo_table = db["complex_table"] + data = [] + for i in range(0, 100): + data.append({"key": i, "data": hex(i * i), "dict": {"a": i, "b": str(i)}}) + incomplete_mongo_table.insert_many(data) + + node = started_cluster.instances["node"] + node.query( + "CREATE TABLE incomplete_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse')" + ) + + assert node.query("SELECT COUNT() FROM incomplete_mongo_table") == "100\n" + assert ( + node.query("SELECT sum(key) FROM incomplete_mongo_table") + == str(sum(range(0, 100))) + "\n" + ) + + assert ( + node.query("SELECT data from incomplete_mongo_table where key = 42") + == hex(42 * 42) + "\n" + ) + node.query("DROP TABLE incomplete_mongo_table") + incomplete_mongo_table.drop() + + +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) +def test_incorrect_data_type(started_cluster): + mongo_connection = get_mongo_connection(started_cluster) + db = mongo_connection["test"] + db.add_user("root", "clickhouse") + strange_mongo_table = db["strange_table"] + data = [] + for i in range(0, 100): + data.append({"key": i, "data": hex(i * i), "aaaa": "Hello"}) + strange_mongo_table.insert_many(data) + + node = started_cluster.instances["node"] + node.query( + "CREATE TABLE strange_mongo_table(key String, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'strange_table', 'root', 'clickhouse')" + ) + + with pytest.raises(QueryRuntimeException): + node.query("SELECT COUNT() FROM strange_mongo_table") + + with pytest.raises(QueryRuntimeException): + node.query("SELECT uniq(key) FROM strange_mongo_table") + + node.query( + "CREATE TABLE strange_mongo_table2(key UInt64, data String, bbbb String) ENGINE = MongoDB('mongo1:27017', 'test', 'strange_table', 'root', 'clickhouse')" + ) + + node.query("DROP TABLE strange_mongo_table") + node.query("DROP TABLE strange_mongo_table2") + strange_mongo_table.drop() + + +@pytest.mark.parametrize("started_cluster", [True], indirect=["started_cluster"]) +def test_secure_connection(started_cluster): + mongo_connection = get_mongo_connection(started_cluster, secure=True) + db = mongo_connection["test"] + db.add_user("root", "clickhouse") + simple_mongo_table = db["simple_table"] + data = [] + for i in range(0, 100): + data.append({"key": i, "data": hex(i * i)}) + simple_mongo_table.insert_many(data) + + node = started_cluster.instances["node"] + node.query( + "CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', 'ssl=true')" + ) + + assert node.query("SELECT COUNT() FROM simple_mongo_table") == "100\n" + assert ( + node.query("SELECT sum(key) FROM simple_mongo_table") + == str(sum(range(0, 100))) + "\n" + ) + + assert ( + node.query("SELECT data from simple_mongo_table where key = 42") + == hex(42 * 42) + "\n" + ) + node.query("DROP TABLE simple_mongo_table") + simple_mongo_table.drop() + + +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) +def test_predefined_connection_configuration(started_cluster): + mongo_connection = get_mongo_connection(started_cluster) + db = mongo_connection["test"] + db.add_user("root", "clickhouse") + simple_mongo_table = db["simple_table"] + data = [] + for i in range(0, 100): + data.append({"key": i, "data": hex(i * i)}) + simple_mongo_table.insert_many(data) + + node = started_cluster.instances["node"] + node.query("drop table if exists simple_mongo_table") + node.query( + "create table simple_mongo_table(key UInt64, data String) engine = MongoDB(mongo1)" + ) + assert node.query("SELECT count() FROM simple_mongo_table") == "100\n" + simple_mongo_table.drop() + + +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) +def test_no_credentials(started_cluster): + mongo_connection = get_mongo_connection(started_cluster, with_credentials=False) + db = mongo_connection["test"] + simple_mongo_table = db["simple_table"] + data = [] + for i in range(0, 100): + data.append({"key": i, "data": hex(i * i)}) + simple_mongo_table.insert_many(data) + + node = started_cluster.instances["node"] + node.query( + "create table simple_mongo_table_2(key UInt64, data String) engine = MongoDB('mongo2:27017', 'test', 'simple_table', '', '')" + ) + assert node.query("SELECT count() FROM simple_mongo_table_2") == "100\n" + simple_mongo_table.drop() + + +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) +def test_auth_source(started_cluster): + mongo_connection = get_mongo_connection(started_cluster, with_credentials=False) + admin_db = mongo_connection["admin"] + admin_db.add_user( + "root", + "clickhouse", + roles=[{"role": "userAdminAnyDatabase", "db": "admin"}, "readWriteAnyDatabase"], + ) + simple_mongo_table = admin_db["simple_table"] + data = [] + for i in range(0, 50): + data.append({"key": i, "data": hex(i * i)}) + simple_mongo_table.insert_many(data) + db = mongo_connection["test"] + simple_mongo_table = db["simple_table"] + data = [] + for i in range(0, 100): + data.append({"key": i, "data": hex(i * i)}) + simple_mongo_table.insert_many(data) + + node = started_cluster.instances["node"] + node.query( + "create table simple_mongo_table_fail(key UInt64, data String) engine = MongoDB('mongo2:27017', 'test', 'simple_table', 'root', 'clickhouse')" + ) + node.query_and_get_error("SELECT count() FROM simple_mongo_table_fail") + node.query( + "create table simple_mongo_table_ok(key UInt64, data String) engine = MongoDB('mongo2:27017', 'test', 'simple_table', 'root', 'clickhouse', 'authSource=admin')" + ) + assert node.query("SELECT count() FROM simple_mongo_table_ok") == "100\n" + simple_mongo_table.drop() + + +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) +def test_missing_columns(started_cluster): + mongo_connection = get_mongo_connection(started_cluster) + db = mongo_connection["test"] + db.add_user("root", "clickhouse") + simple_mongo_table = db["simple_table"] + data = [] + for i in range(0, 10): + data.append({"key": i, "data": hex(i * i)}) + for i in range(0, 10): + data.append({"key": i}) + simple_mongo_table.insert_many(data) + + node = started_cluster.instances["node"] + node.query("drop table if exists simple_mongo_table") + node.query( + "create table simple_mongo_table(key UInt64, data Nullable(String)) engine = MongoDB(mongo1)" + ) + result = node.query("SELECT count() FROM simple_mongo_table WHERE isNull(data)") + assert result == "10\n" + simple_mongo_table.drop() + + +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) +def test_simple_insert_select(started_cluster): + mongo_connection = get_mongo_connection(started_cluster) + db = mongo_connection["test"] + db.add_user("root", "clickhouse") + simple_mongo_table = db["simple_table"] + + node = started_cluster.instances["node"] + node.query("DROP TABLE IF EXISTS simple_mongo_table") + node.query( + "CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse')" + ) + node.query("INSERT INTO simple_mongo_table SELECT 1, 'kek'") + + assert ( + node.query("SELECT data from simple_mongo_table where key = 1").strip() == "kek" + ) + node.query("INSERT INTO simple_mongo_table(key) SELECT 12") + assert int(node.query("SELECT count() from simple_mongo_table")) == 2 + assert ( + node.query("SELECT data from simple_mongo_table where key = 12").strip() == "" + ) + + node.query("DROP TABLE simple_mongo_table") + simple_mongo_table.drop() From 412d9ba259cb91243d28765326d65384a60e18cc Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Thu, 25 May 2023 12:33:07 +0800 Subject: [PATCH 172/722] add tests for redis storage --- src/Storages/RedisCommon.cpp | 15 + src/Storages/RedisCommon.h | 2 +- .../configs/named_collections.xml | 12 - tests/integration/test_storage_redis/test.py | 443 ++---------------- .../test_table_function_redis/__init__.py | 0 .../configs_secure/config.d/ssl_conf.xml | 0 .../test_table_function_redis/test.py | 276 +++++++++++ 7 files changed, 329 insertions(+), 419 deletions(-) delete mode 100644 tests/integration/test_storage_redis/configs/named_collections.xml create mode 100644 tests/integration/test_table_function_redis/__init__.py rename tests/integration/{test_storage_redis => test_table_function_redis}/configs_secure/config.d/ssl_conf.xml (100%) create mode 100644 tests/integration/test_table_function_redis/test.py diff --git a/src/Storages/RedisCommon.cpp b/src/Storages/RedisCommon.cpp index 8cc94c45dae..fc789057019 100644 --- a/src/Storages/RedisCommon.cpp +++ b/src/Storages/RedisCommon.cpp @@ -25,6 +25,21 @@ RedisConnection::~RedisConnection() pool->returnObject(std::move(client)); } +String storageTypeToKeyType(RedisStorageType type) +{ + switch (type) + { + case RedisStorageType::SIMPLE: + return "string"; + case RedisStorageType::HASH_MAP: + return "hash"; + default: + return "none"; + } + + UNREACHABLE(); +} + String serializeStorageType(RedisStorageType storage_type) { switch (storage_type) diff --git a/src/Storages/RedisCommon.h b/src/Storages/RedisCommon.h index 02d0b435b9d..d68f2567248 100644 --- a/src/Storages/RedisCommon.h +++ b/src/Storages/RedisCommon.h @@ -37,7 +37,7 @@ extern RedisColumnTypes REDIS_HASH_MAP_COLUMN_TYPES; extern RedisColumnTypes REDIS_SIMPLE_COLUMN_TYPES; /// storage type to Redis key type -String storageTypeToKeyType(RedisStorageType storage_type); +String storageTypeToKeyType(RedisStorageType type); RedisStorageType parseStorageType(const String & storage_type_str); String serializeStorageType(RedisStorageType storage_type); diff --git a/tests/integration/test_storage_redis/configs/named_collections.xml b/tests/integration/test_storage_redis/configs/named_collections.xml deleted file mode 100644 index 5f7db390982..00000000000 --- a/tests/integration/test_storage_redis/configs/named_collections.xml +++ /dev/null @@ -1,12 +0,0 @@ - - - - root - clickhouse - mongo1 - 27017 - test - simple_table - - - diff --git a/tests/integration/test_storage_redis/test.py b/tests/integration/test_storage_redis/test.py index 6ba5520704d..4220563c229 100644 --- a/tests/integration/test_storage_redis/test.py +++ b/tests/integration/test_storage_redis/test.py @@ -1,426 +1,57 @@ -import pymongo - +import redis import pytest -from helpers.client import QueryRuntimeException - from helpers.cluster import ClickHouseCluster -import datetime + +cluster = ClickHouseCluster(__file__) + +node = cluster.add_instance("node", with_redis=True) @pytest.fixture(scope="module") -def started_cluster(request): +def started_cluster(): try: - cluster = ClickHouseCluster(__file__) - node = cluster.add_instance( - "node", - main_configs=[ - "configs_secure/config.d/ssl_conf.xml", - "configs/named_collections.xml", - ], - with_mongo=True, - with_mongo_secure=request.param, - ) cluster.start() yield cluster finally: cluster.shutdown() -def get_mongo_connection(started_cluster, secure=False, with_credentials=True): - connection_str = "" - if with_credentials: - connection_str = "mongodb://root:clickhouse@localhost:{}".format( - started_cluster.mongo_port - ) - else: - connection_str = "mongodb://localhost:{}".format( - started_cluster.mongo_no_cred_port - ) - if secure: - connection_str += "/?tls=true&tlsAllowInvalidCertificates=true" - return pymongo.MongoClient(connection_str) +def get_redis_connection(db_id=0): + client = redis.Redis( + host=node.name, port=started_cluster.redis_port, password="clickhouse", db=db_id + ) + return client -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_simple_select(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) +def get_address(): + return node.name + started_cluster.redis_port - node = started_cluster.instances["node"] + +@pytest.mark.parametrize("started_cluster") +def test_storage_simple_select(started_cluster): + client = get_redis_connection() + address = get_address() + + data = {} + for i in range(100): + data['key{}'.format(i)] = 'value{}'.format(i) + + client.mset(data) + + # create table node.query( - "CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse')" + f""" + CREATE TABLE test_storage_simple_select( + k String, + v String + ) Engine=Redis('{address}', 0, '','simple', 10) + """ ) - assert node.query("SELECT COUNT() FROM simple_mongo_table") == "100\n" - assert ( - node.query("SELECT sum(key) FROM simple_mongo_table") - == str(sum(range(0, 100))) + "\n" - ) + select_query = "SELECT k, v from test_storage_simple_select where k='0' FORMAT Values" + assert (node.query(select_query) == "('0','0')") - assert ( - node.query("SELECT data from simple_mongo_table where key = 42") - == hex(42 * 42) + "\n" - ) - node.query("DROP TABLE simple_mongo_table") - simple_mongo_table.drop() + select_query = "SELECT * from test_storage_simple_select FORMAT Values" + assert (len(node.query(select_query)) == 100) + assert (node.query(select_query)[0] == "('0','0')") - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_arrays(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - arrays_mongo_table = db["arrays_table"] - data = [] - for i in range(0, 100): - data.append( - { - "key": i, - "arr_int64": [-(i + 1), -(i + 2), -(i + 3)], - "arr_int32": [-(i + 1), -(i + 2), -(i + 3)], - "arr_int16": [-(i + 1), -(i + 2), -(i + 3)], - "arr_int8": [-(i + 1), -(i + 2), -(i + 3)], - "arr_uint64": [i + 1, i + 2, i + 3], - "arr_uint32": [i + 1, i + 2, i + 3], - "arr_uint16": [i + 1, i + 2, i + 3], - "arr_uint8": [i + 1, i + 2, i + 3], - "arr_float32": [i + 1.125, i + 2.5, i + 3.750], - "arr_float64": [i + 1.125, i + 2.5, i + 3.750], - "arr_date": [ - datetime.datetime(2002, 10, 27), - datetime.datetime(2024, 1, 8), - ], - "arr_datetime": [ - datetime.datetime(2023, 3, 31, 6, 3, 12), - datetime.datetime(1999, 2, 28, 12, 46, 34), - ], - "arr_string": [str(i + 1), str(i + 2), str(i + 3)], - "arr_uuid": [ - "f0e77736-91d1-48ce-8f01-15123ca1c7ed", - "93376a07-c044-4281-a76e-ad27cf6973c5", - ], - "arr_arr_bool": [ - [True, False, True], - [True], - [], - None, - [False], - [None], - ], - "arr_empty": [], - "arr_null": None, - "arr_nullable": None, - } - ) - - arrays_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - node.query( - "CREATE TABLE arrays_mongo_table(" - "key UInt64," - "arr_int64 Array(Int64)," - "arr_int32 Array(Int32)," - "arr_int16 Array(Int16)," - "arr_int8 Array(Int8)," - "arr_uint64 Array(UInt64)," - "arr_uint32 Array(UInt32)," - "arr_uint16 Array(UInt16)," - "arr_uint8 Array(UInt8)," - "arr_float32 Array(Float32)," - "arr_float64 Array(Float64)," - "arr_date Array(Date)," - "arr_datetime Array(DateTime)," - "arr_string Array(String)," - "arr_uuid Array(UUID)," - "arr_arr_bool Array(Array(Bool))," - "arr_empty Array(UInt64)," - "arr_null Array(UInt64)," - "arr_arr_null Array(Array(UInt64))," - "arr_nullable Array(Nullable(UInt64))" - ") ENGINE = MongoDB('mongo1:27017', 'test', 'arrays_table', 'root', 'clickhouse')" - ) - - assert node.query("SELECT COUNT() FROM arrays_mongo_table") == "100\n" - - for column_name in ["arr_int64", "arr_int32", "arr_int16", "arr_int8"]: - assert ( - node.query(f"SELECT {column_name} FROM arrays_mongo_table WHERE key = 42") - == "[-43,-44,-45]\n" - ) - - for column_name in ["arr_uint64", "arr_uint32", "arr_uint16", "arr_uint8"]: - assert ( - node.query(f"SELECT {column_name} FROM arrays_mongo_table WHERE key = 42") - == "[43,44,45]\n" - ) - - for column_name in ["arr_float32", "arr_float64"]: - assert ( - node.query(f"SELECT {column_name} FROM arrays_mongo_table WHERE key = 42") - == "[43.125,44.5,45.75]\n" - ) - - assert ( - node.query(f"SELECT arr_date FROM arrays_mongo_table WHERE key = 42") - == "['2002-10-27','2024-01-08']\n" - ) - - assert ( - node.query(f"SELECT arr_datetime FROM arrays_mongo_table WHERE key = 42") - == "['2023-03-31 06:03:12','1999-02-28 12:46:34']\n" - ) - - assert ( - node.query(f"SELECT arr_string FROM arrays_mongo_table WHERE key = 42") - == "['43','44','45']\n" - ) - - assert ( - node.query(f"SELECT arr_uuid FROM arrays_mongo_table WHERE key = 42") - == "['f0e77736-91d1-48ce-8f01-15123ca1c7ed','93376a07-c044-4281-a76e-ad27cf6973c5']\n" - ) - - assert ( - node.query(f"SELECT arr_arr_bool FROM arrays_mongo_table WHERE key = 42") - == "[[true,false,true],[true],[],[],[false],[false]]\n" - ) - - assert ( - node.query(f"SELECT arr_empty FROM arrays_mongo_table WHERE key = 42") == "[]\n" - ) - - assert ( - node.query(f"SELECT arr_null FROM arrays_mongo_table WHERE key = 42") == "[]\n" - ) - - assert ( - node.query(f"SELECT arr_arr_null FROM arrays_mongo_table WHERE key = 42") - == "[]\n" - ) - - assert ( - node.query(f"SELECT arr_nullable FROM arrays_mongo_table WHERE key = 42") - == "[]\n" - ) - - node.query("DROP TABLE arrays_mongo_table") - arrays_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_complex_data_type(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - incomplete_mongo_table = db["complex_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i), "dict": {"a": i, "b": str(i)}}) - incomplete_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - node.query( - "CREATE TABLE incomplete_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse')" - ) - - assert node.query("SELECT COUNT() FROM incomplete_mongo_table") == "100\n" - assert ( - node.query("SELECT sum(key) FROM incomplete_mongo_table") - == str(sum(range(0, 100))) + "\n" - ) - - assert ( - node.query("SELECT data from incomplete_mongo_table where key = 42") - == hex(42 * 42) + "\n" - ) - node.query("DROP TABLE incomplete_mongo_table") - incomplete_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_incorrect_data_type(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - strange_mongo_table = db["strange_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i), "aaaa": "Hello"}) - strange_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - node.query( - "CREATE TABLE strange_mongo_table(key String, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'strange_table', 'root', 'clickhouse')" - ) - - with pytest.raises(QueryRuntimeException): - node.query("SELECT COUNT() FROM strange_mongo_table") - - with pytest.raises(QueryRuntimeException): - node.query("SELECT uniq(key) FROM strange_mongo_table") - - node.query( - "CREATE TABLE strange_mongo_table2(key UInt64, data String, bbbb String) ENGINE = MongoDB('mongo1:27017', 'test', 'strange_table', 'root', 'clickhouse')" - ) - - node.query("DROP TABLE strange_mongo_table") - node.query("DROP TABLE strange_mongo_table2") - strange_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [True], indirect=["started_cluster"]) -def test_secure_connection(started_cluster): - mongo_connection = get_mongo_connection(started_cluster, secure=True) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - node.query( - "CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', 'ssl=true')" - ) - - assert node.query("SELECT COUNT() FROM simple_mongo_table") == "100\n" - assert ( - node.query("SELECT sum(key) FROM simple_mongo_table") - == str(sum(range(0, 100))) + "\n" - ) - - assert ( - node.query("SELECT data from simple_mongo_table where key = 42") - == hex(42 * 42) + "\n" - ) - node.query("DROP TABLE simple_mongo_table") - simple_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_predefined_connection_configuration(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - node.query("drop table if exists simple_mongo_table") - node.query( - "create table simple_mongo_table(key UInt64, data String) engine = MongoDB(mongo1)" - ) - assert node.query("SELECT count() FROM simple_mongo_table") == "100\n" - simple_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_no_credentials(started_cluster): - mongo_connection = get_mongo_connection(started_cluster, with_credentials=False) - db = mongo_connection["test"] - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - node.query( - "create table simple_mongo_table_2(key UInt64, data String) engine = MongoDB('mongo2:27017', 'test', 'simple_table', '', '')" - ) - assert node.query("SELECT count() FROM simple_mongo_table_2") == "100\n" - simple_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_auth_source(started_cluster): - mongo_connection = get_mongo_connection(started_cluster, with_credentials=False) - admin_db = mongo_connection["admin"] - admin_db.add_user( - "root", - "clickhouse", - roles=[{"role": "userAdminAnyDatabase", "db": "admin"}, "readWriteAnyDatabase"], - ) - simple_mongo_table = admin_db["simple_table"] - data = [] - for i in range(0, 50): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - db = mongo_connection["test"] - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - node.query( - "create table simple_mongo_table_fail(key UInt64, data String) engine = MongoDB('mongo2:27017', 'test', 'simple_table', 'root', 'clickhouse')" - ) - node.query_and_get_error("SELECT count() FROM simple_mongo_table_fail") - node.query( - "create table simple_mongo_table_ok(key UInt64, data String) engine = MongoDB('mongo2:27017', 'test', 'simple_table', 'root', 'clickhouse', 'authSource=admin')" - ) - assert node.query("SELECT count() FROM simple_mongo_table_ok") == "100\n" - simple_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_missing_columns(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 10): - data.append({"key": i, "data": hex(i * i)}) - for i in range(0, 10): - data.append({"key": i}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - node.query("drop table if exists simple_mongo_table") - node.query( - "create table simple_mongo_table(key UInt64, data Nullable(String)) engine = MongoDB(mongo1)" - ) - result = node.query("SELECT count() FROM simple_mongo_table WHERE isNull(data)") - assert result == "10\n" - simple_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_simple_insert_select(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - simple_mongo_table = db["simple_table"] - - node = started_cluster.instances["node"] - node.query("DROP TABLE IF EXISTS simple_mongo_table") - node.query( - "CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse')" - ) - node.query("INSERT INTO simple_mongo_table SELECT 1, 'kek'") - - assert ( - node.query("SELECT data from simple_mongo_table where key = 1").strip() == "kek" - ) - node.query("INSERT INTO simple_mongo_table(key) SELECT 12") - assert int(node.query("SELECT count() from simple_mongo_table")) == 2 - assert ( - node.query("SELECT data from simple_mongo_table where key = 12").strip() == "" - ) - - node.query("DROP TABLE simple_mongo_table") - simple_mongo_table.drop() diff --git a/tests/integration/test_table_function_redis/__init__.py b/tests/integration/test_table_function_redis/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_storage_redis/configs_secure/config.d/ssl_conf.xml b/tests/integration/test_table_function_redis/configs_secure/config.d/ssl_conf.xml similarity index 100% rename from tests/integration/test_storage_redis/configs_secure/config.d/ssl_conf.xml rename to tests/integration/test_table_function_redis/configs_secure/config.d/ssl_conf.xml diff --git a/tests/integration/test_table_function_redis/test.py b/tests/integration/test_table_function_redis/test.py new file mode 100644 index 00000000000..e0ad71b0079 --- /dev/null +++ b/tests/integration/test_table_function_redis/test.py @@ -0,0 +1,276 @@ +import pymongo + +import pytest +from helpers.client import QueryRuntimeException + +from helpers.cluster import ClickHouseCluster + + +@pytest.fixture(scope="module") +def started_cluster(request): + try: + cluster = ClickHouseCluster(__file__) + node = cluster.add_instance( + "node", + with_mongo=True, + main_configs=[ + "configs_secure/config.d/ssl_conf.xml", + ], + with_mongo_secure=request.param, + ) + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +def get_mongo_connection(started_cluster, secure=False, with_credentials=True): + connection_str = "" + if with_credentials: + connection_str = "mongodb://root:clickhouse@localhost:{}".format( + started_cluster.mongo_port + ) + else: + connection_str = "mongodb://localhost:{}".format( + started_cluster.mongo_no_cred_port + ) + if secure: + connection_str += "/?tls=true&tlsAllowInvalidCertificates=true" + return pymongo.MongoClient(connection_str) + + +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) +def test_simple_select(started_cluster): + mongo_connection = get_mongo_connection(started_cluster) + db = mongo_connection["test"] + db.add_user("root", "clickhouse") + simple_mongo_table = db["simple_table"] + + node = started_cluster.instances["node"] + for i in range(0, 100): + node.query( + "INSERT INTO FUNCTION mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String') (key, data) VALUES ({}, '{}')".format( + i, hex(i * i) + ) + ) + assert ( + node.query( + "SELECT COUNT() FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String')" + ) + == "100\n" + ) + assert ( + node.query( + "SELECT sum(key) FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String')" + ) + == str(sum(range(0, 100))) + "\n" + ) + assert ( + node.query( + "SELECT sum(key) FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', 'key UInt64, data String')" + ) + == str(sum(range(0, 100))) + "\n" + ) + + assert ( + node.query( + "SELECT data from mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String') where key = 42" + ) + == hex(42 * 42) + "\n" + ) + simple_mongo_table.drop() + + +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) +def test_complex_data_type(started_cluster): + mongo_connection = get_mongo_connection(started_cluster) + db = mongo_connection["test"] + db.add_user("root", "clickhouse") + incomplete_mongo_table = db["complex_table"] + data = [] + for i in range(0, 100): + data.append({"key": i, "data": hex(i * i), "dict": {"a": i, "b": str(i)}}) + incomplete_mongo_table.insert_many(data) + + node = started_cluster.instances["node"] + + assert ( + node.query( + "SELECT COUNT() FROM mongodb('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse', structure='key UInt64, data String, dict Map(UInt64, String)')" + ) + == "100\n" + ) + assert ( + node.query( + "SELECT sum(key) FROM mongodb('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse', structure='key UInt64, data String, dict Map(UInt64, String)')" + ) + == str(sum(range(0, 100))) + "\n" + ) + + assert ( + node.query( + "SELECT data from mongodb('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse', structure='key UInt64, data String, dict Map(UInt64, String)') where key = 42" + ) + == hex(42 * 42) + "\n" + ) + incomplete_mongo_table.drop() + + +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) +def test_incorrect_data_type(started_cluster): + mongo_connection = get_mongo_connection(started_cluster) + db = mongo_connection["test"] + db.add_user("root", "clickhouse") + strange_mongo_table = db["strange_table"] + data = [] + for i in range(0, 100): + data.append({"key": i, "data": hex(i * i), "aaaa": "Hello"}) + strange_mongo_table.insert_many(data) + + node = started_cluster.instances["node"] + + with pytest.raises(QueryRuntimeException): + node.query( + "SELECT aaaa FROM mongodb('mongo1:27017', 'test', 'strange_table', 'root', 'clickhouse', structure='key UInt64, data String')" + ) + + strange_mongo_table.drop() + + +@pytest.mark.parametrize("started_cluster", [True], indirect=["started_cluster"]) +def test_secure_connection(started_cluster): + mongo_connection = get_mongo_connection(started_cluster, secure=True) + db = mongo_connection["test"] + db.add_user("root", "clickhouse") + simple_mongo_table = db["simple_table"] + data = [] + for i in range(0, 100): + data.append({"key": i, "data": hex(i * i)}) + simple_mongo_table.insert_many(data) + + node = started_cluster.instances["node"] + + assert ( + node.query( + "SELECT COUNT() FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='ssl=true')" + ) + == "100\n" + ) + assert ( + node.query( + "SELECT sum(key) FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='ssl=true')" + ) + == str(sum(range(0, 100))) + "\n" + ) + assert ( + node.query( + "SELECT sum(key) FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', 'key UInt64, data String', 'ssl=true')" + ) + == str(sum(range(0, 100))) + "\n" + ) + + assert ( + node.query( + "SELECT data from mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='ssl=true') where key = 42" + ) + == hex(42 * 42) + "\n" + ) + simple_mongo_table.drop() + + +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) +def test_predefined_connection_configuration(started_cluster): + mongo_connection = get_mongo_connection(started_cluster) + db = mongo_connection["test"] + db.add_user("root", "clickhouse") + simple_mongo_table = db["simple_table"] + data = [] + for i in range(0, 100): + data.append({"key": i, "data": hex(i * i)}) + simple_mongo_table.insert_many(data) + + node = started_cluster.instances["node"] + assert ( + node.query( + "SELECT count() FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String')" + ) + == "100\n" + ) + simple_mongo_table.drop() + + +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) +def test_no_credentials(started_cluster): + mongo_connection = get_mongo_connection(started_cluster, with_credentials=False) + db = mongo_connection["test"] + simple_mongo_table = db["simple_table"] + data = [] + for i in range(0, 100): + data.append({"key": i, "data": hex(i * i)}) + simple_mongo_table.insert_many(data) + + node = started_cluster.instances["node"] + assert ( + node.query( + "SELECT count() FROM mongodb('mongo2:27017', 'test', 'simple_table', '', '', structure='key UInt64, data String')" + ) + == "100\n" + ) + simple_mongo_table.drop() + + +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) +def test_auth_source(started_cluster): + mongo_connection = get_mongo_connection(started_cluster, with_credentials=False) + admin_db = mongo_connection["admin"] + admin_db.add_user( + "root", + "clickhouse", + roles=[{"role": "userAdminAnyDatabase", "db": "admin"}, "readWriteAnyDatabase"], + ) + simple_mongo_table = admin_db["simple_table"] + data = [] + for i in range(0, 50): + data.append({"key": i, "data": hex(i * i)}) + simple_mongo_table.insert_many(data) + db = mongo_connection["test"] + simple_mongo_table = db["simple_table"] + data = [] + for i in range(0, 100): + data.append({"key": i, "data": hex(i * i)}) + simple_mongo_table.insert_many(data) + + node = started_cluster.instances["node"] + + node.query_and_get_error( + "SELECT count() FROM mongodb('mongo2:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String')" + ) + + assert ( + node.query( + "SELECT count() FROM mongodb('mongo2:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='authSource=admin')" + ) + == "100\n" + ) + simple_mongo_table.drop() + + +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) +def test_missing_columns(started_cluster): + mongo_connection = get_mongo_connection(started_cluster) + db = mongo_connection["test"] + db.add_user("root", "clickhouse") + simple_mongo_table = db["simple_table"] + data = [] + for i in range(0, 10): + data.append({"key": i, "data": hex(i * i)}) + for i in range(0, 10): + data.append({"key": i}) + simple_mongo_table.insert_many(data) + + node = started_cluster.instances["node"] + result = node.query( + "SELECT count() FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data Nullable(String)') WHERE isNull(data)" + ) + assert result == "10\n" + simple_mongo_table.drop() From 3281aec3357777845f3899d159c2282f07715073 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Thu, 25 May 2023 17:12:56 +0800 Subject: [PATCH 173/722] make some redis engine args optional --- src/Storages/StorageRedis.cpp | 30 +++++++++++++++++++++--------- src/Storages/StorageRedis.h | 2 +- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/src/Storages/StorageRedis.cpp b/src/Storages/StorageRedis.cpp index 819ab01d733..0cc0e566d5c 100644 --- a/src/Storages/StorageRedis.cpp +++ b/src/Storages/StorageRedis.cpp @@ -166,6 +166,10 @@ SinkToStoragePtr StorageRedis::write( RedisConfiguration StorageRedis::getConfiguration(ASTs engine_args, ContextPtr context) { RedisConfiguration configuration; + configuration.db_index = 0; + configuration.password = ""; + configuration.storage_type = RedisStorageType::SIMPLE; + configuration.pool_size = 10; if (auto named_collection = tryGetNamedCollectionWithOverrides(engine_args, context)) { @@ -175,11 +179,15 @@ RedisConfiguration StorageRedis::getConfiguration(ASTs engine_args, ContextPtr c {}); configuration.host = named_collection->getAny({"host", "hostname"}); - configuration.port = static_cast(named_collection->get("port")); - configuration.password = named_collection->get("password"); - configuration.db_index = static_cast(named_collection->get({"db_index"})); - configuration.storage_type = parseStorageType(named_collection->getOrDefault("storage_type", "")); - configuration.pool_size = static_cast(named_collection->get("pool_size")); + configuration.port = static_cast(named_collection->getOrDefault("port", 6379)); + if (engine_args.size() > 1) + configuration.password = named_collection->get("password"); + if (engine_args.size() > 2) + configuration.db_index = static_cast(named_collection->get("db_index")); + if (engine_args.size() > 3) + configuration.storage_type = parseStorageType(named_collection->get("storage_type")); + if (engine_args.size() > 4) + configuration.pool_size = static_cast(named_collection->get("pool_size")); } else { @@ -191,10 +199,14 @@ RedisConfiguration StorageRedis::getConfiguration(ASTs engine_args, ContextPtr c configuration.host = parsed_host_port.first; configuration.port = parsed_host_port.second; - configuration.db_index = static_cast(checkAndGetLiteralArgument(engine_args[1], "db_index")); - configuration.password = checkAndGetLiteralArgument(engine_args[2], "password"); - configuration.storage_type = parseStorageType(checkAndGetLiteralArgument(engine_args[3], "storage_type")); - configuration.pool_size = static_cast(checkAndGetLiteralArgument(engine_args[4], "pool_size")); + if (engine_args.size() > 1) + configuration.db_index = static_cast(checkAndGetLiteralArgument(engine_args[1], "db_index")); + if (engine_args.size() > 2) + configuration.password = checkAndGetLiteralArgument(engine_args[2], "password"); + if (engine_args.size() > 3) + configuration.storage_type = parseStorageType(checkAndGetLiteralArgument(engine_args[3], "storage_type")); + if (engine_args.size() > 4) + configuration.pool_size = static_cast(checkAndGetLiteralArgument(engine_args[4], "pool_size")); } if (configuration.storage_type == RedisStorageType::UNKNOWN) diff --git a/src/Storages/StorageRedis.h b/src/Storages/StorageRedis.h index 2c6c6193982..1ae90b2d1ba 100644 --- a/src/Storages/StorageRedis.h +++ b/src/Storages/StorageRedis.h @@ -7,7 +7,7 @@ namespace DB { /* Implements storage in the Redis. - * Use ENGINE = Redis(host:port, db_index, password, storage_type, conn_pool_size); + * Use ENGINE = Redis(host:port, db_index, password, storage_type, pool_size); * Read only. * * Note If storage_type is From 3c2b44747299b7b87d7e1e1c4219409ade1d7c34 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Thu, 25 May 2023 17:29:22 +0800 Subject: [PATCH 174/722] move get configuration to RedisCommon --- src/Storages/RedisCommon.cpp | 57 ++++++++++++++++++++++- src/Storages/RedisCommon.h | 3 ++ src/Storages/StorageRedis.cpp | 54 --------------------- src/Storages/StorageRedis.h | 2 - src/TableFunctions/TableFunctionRedis.cpp | 2 +- 5 files changed, 59 insertions(+), 59 deletions(-) diff --git a/src/Storages/RedisCommon.cpp b/src/Storages/RedisCommon.cpp index fc789057019..0d33b9c7aa3 100644 --- a/src/Storages/RedisCommon.cpp +++ b/src/Storages/RedisCommon.cpp @@ -151,13 +151,13 @@ RedisArrayPtr getRedisHashMapKeys(const RedisConnectionPtr & connection, RedisAr RedisColumnType getRedisColumnType(RedisStorageType storage_type, const Names & all_columns, const String & column) { - String redis_col_key = all_columns.at(0); + const String & redis_col_key = all_columns.at(0); if (column == redis_col_key) return RedisColumnType::KEY; if (storage_type == RedisStorageType::HASH_MAP) { - String redis_col_field = all_columns.at(1); + const String & redis_col_field = all_columns.at(1); if (column == redis_col_field) return RedisColumnType::FIELD; else @@ -169,6 +169,59 @@ RedisColumnType getRedisColumnType(RedisStorageType storage_type, const Names & } } +RedisConfiguration getRedisConfiguration(const ASTs & engine_args, ContextPtr context) +{ + RedisConfiguration configuration; + configuration.db_index = 0; + configuration.password = ""; + configuration.storage_type = RedisStorageType::SIMPLE; + configuration.pool_size = 10; + + if (auto named_collection = tryGetNamedCollectionWithOverrides(engine_args, context)) + { + validateNamedCollection( + *named_collection, + ValidateKeysMultiset{"host", "port", "hostname", "password", "db_index", "storage_type", "pool_size"}, + {}); + + configuration.host = named_collection->getAny({"host", "hostname"}); + configuration.port = static_cast(named_collection->getOrDefault("port", 6379)); + if (engine_args.size() > 1) + configuration.password = named_collection->get("password"); + if (engine_args.size() > 2) + configuration.db_index = static_cast(named_collection->get("db_index")); + if (engine_args.size() > 3) + configuration.storage_type = parseStorageType(named_collection->get("storage_type")); + if (engine_args.size() > 4) + configuration.pool_size = static_cast(named_collection->get("pool_size")); + } + else + { + for (auto & engine_arg : engine_args) + engine_arg = evaluateConstantExpressionOrIdentifierAsLiteral(engine_arg, context); + + /// 6379 is the default Redis port. + auto parsed_host_port = parseAddress(checkAndGetLiteralArgument(engine_args[0], "host:port"), 6379); + + configuration.host = parsed_host_port.first; + configuration.port = parsed_host_port.second; + if (engine_args.size() > 1) + configuration.db_index = static_cast(checkAndGetLiteralArgument(engine_args[1], "db_index")); + if (engine_args.size() > 2) + configuration.password = checkAndGetLiteralArgument(engine_args[2], "password"); + if (engine_args.size() > 3) + configuration.storage_type = parseStorageType(checkAndGetLiteralArgument(engine_args[3], "storage_type")); + if (engine_args.size() > 4) + configuration.pool_size = static_cast(checkAndGetLiteralArgument(engine_args[4], "pool_size")); + } + + if (configuration.storage_type == RedisStorageType::UNKNOWN) + throw Exception(ErrorCodes::INVALID_REDIS_STORAGE_TYPE, "Invalid Redis storage type"); + + context->getRemoteHostFilter().checkHostAndPort(configuration.host, toString(configuration.port)); + return configuration; +} + void checkRedisTableStructure(const ColumnsDescription & columns, const RedisConfiguration & configuration) { /// TODO check data type diff --git a/src/Storages/RedisCommon.h b/src/Storages/RedisCommon.h index d68f2567248..30e771d2471 100644 --- a/src/Storages/RedisCommon.h +++ b/src/Storages/RedisCommon.h @@ -83,6 +83,9 @@ RedisArrayPtr getRedisHashMapKeys(const RedisConnectionPtr & connection, RedisAr /// HASH_MAP: all_columns must have 2 items and the first one is Redis key the second is field, the third is value. RedisColumnType getRedisColumnType(RedisStorageType storage_type, const Names & all_columns, const String & column); +/// parse redis table engine/function configuration from engine_args +RedisConfiguration getRedisConfiguration(const ASTs & engine_args, ContextPtr context); + /// checking Redis table/table-function when creating void checkRedisTableStructure(const ColumnsDescription & columns, const RedisConfiguration & configuration); diff --git a/src/Storages/StorageRedis.cpp b/src/Storages/StorageRedis.cpp index 0cc0e566d5c..5010aada8c4 100644 --- a/src/Storages/StorageRedis.cpp +++ b/src/Storages/StorageRedis.cpp @@ -162,60 +162,6 @@ SinkToStoragePtr StorageRedis::write( throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Write is unsupported for StorageRedis"); } -/// TODO make "password", "db_index", "storage_type", "pool_size" optional -RedisConfiguration StorageRedis::getConfiguration(ASTs engine_args, ContextPtr context) -{ - RedisConfiguration configuration; - configuration.db_index = 0; - configuration.password = ""; - configuration.storage_type = RedisStorageType::SIMPLE; - configuration.pool_size = 10; - - if (auto named_collection = tryGetNamedCollectionWithOverrides(engine_args, context)) - { - validateNamedCollection( - *named_collection, - ValidateKeysMultiset{"host", "port", "hostname", "password", "db_index", "storage_type", "pool_size"}, - {}); - - configuration.host = named_collection->getAny({"host", "hostname"}); - configuration.port = static_cast(named_collection->getOrDefault("port", 6379)); - if (engine_args.size() > 1) - configuration.password = named_collection->get("password"); - if (engine_args.size() > 2) - configuration.db_index = static_cast(named_collection->get("db_index")); - if (engine_args.size() > 3) - configuration.storage_type = parseStorageType(named_collection->get("storage_type")); - if (engine_args.size() > 4) - configuration.pool_size = static_cast(named_collection->get("pool_size")); - } - else - { - for (auto & engine_arg : engine_args) - engine_arg = evaluateConstantExpressionOrIdentifierAsLiteral(engine_arg, context); - - /// 6379 is the default Redis port. - auto parsed_host_port = parseAddress(checkAndGetLiteralArgument(engine_args[0], "host:port"), 6379); - - configuration.host = parsed_host_port.first; - configuration.port = parsed_host_port.second; - if (engine_args.size() > 1) - configuration.db_index = static_cast(checkAndGetLiteralArgument(engine_args[1], "db_index")); - if (engine_args.size() > 2) - configuration.password = checkAndGetLiteralArgument(engine_args[2], "password"); - if (engine_args.size() > 3) - configuration.storage_type = parseStorageType(checkAndGetLiteralArgument(engine_args[3], "storage_type")); - if (engine_args.size() > 4) - configuration.pool_size = static_cast(checkAndGetLiteralArgument(engine_args[4], "pool_size")); - } - - if (configuration.storage_type == RedisStorageType::UNKNOWN) - throw Exception(ErrorCodes::INVALID_REDIS_STORAGE_TYPE, "Invalid Redis storage type"); - - context->getRemoteHostFilter().checkHostAndPort(configuration.host, toString(configuration.port)); - return configuration; -} - void registerStorageRedis(StorageFactory & factory) { factory.registerStorage( diff --git a/src/Storages/StorageRedis.h b/src/Storages/StorageRedis.h index 1ae90b2d1ba..619a83f3851 100644 --- a/src/Storages/StorageRedis.h +++ b/src/Storages/StorageRedis.h @@ -40,8 +40,6 @@ public: const StorageMetadataPtr & /*metadata_snapshot*/, ContextPtr context) override; - static RedisConfiguration getConfiguration(ASTs engine_args, ContextPtr context); - private: StorageID table_id; RedisConfiguration configuration; diff --git a/src/TableFunctions/TableFunctionRedis.cpp b/src/TableFunctions/TableFunctionRedis.cpp index 9e4a39b1b85..cd08837aae2 100644 --- a/src/TableFunctions/TableFunctionRedis.cpp +++ b/src/TableFunctions/TableFunctionRedis.cpp @@ -74,7 +74,7 @@ void TableFunctionRedis::parseArguments(const ASTPtr & ast_function, ContextPtr "Table function 'Redis' requires from 5 parameters: " "redis('host:port', db_index, 'password', 'storage_type', 'pool_size')"); } - configuration = StorageRedis::getConfiguration(args, context); + configuration = getRedisConfiguration(args, context); } From ff961834d674a8ce099da43811eb2eedf3b0d011 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Thu, 25 May 2023 17:29:49 +0800 Subject: [PATCH 175/722] add tests to redis engine --- tests/integration/test_storage_redis/test.py | 151 +++++++++++++++++-- 1 file changed, 138 insertions(+), 13 deletions(-) diff --git a/tests/integration/test_storage_redis/test.py b/tests/integration/test_storage_redis/test.py index 4220563c229..af01c2e9ff1 100644 --- a/tests/integration/test_storage_redis/test.py +++ b/tests/integration/test_storage_redis/test.py @@ -1,6 +1,11 @@ +import time + import redis import pytest + +from helpers.client import QueryRuntimeException from helpers.cluster import ClickHouseCluster +from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) @@ -18,40 +23,160 @@ def started_cluster(): def get_redis_connection(db_id=0): client = redis.Redis( - host=node.name, port=started_cluster.redis_port, password="clickhouse", db=db_id + host='localhost', port=cluster.redis_port, password="clickhouse", db=db_id ) return client -def get_address(): - return node.name + started_cluster.redis_port +def get_address_for_ch(): + return cluster.redis_host + ':6379' + + +def drop_table(table): + node.query(f"DROP TABLE IF EXISTS {table} SYNC"); -@pytest.mark.parametrize("started_cluster") def test_storage_simple_select(started_cluster): client = get_redis_connection() - address = get_address() + address = get_address_for_ch() + + # clean all + client.flushall() + drop_table('test_storage_simple_select') data = {} for i in range(100): - data['key{}'.format(i)] = 'value{}'.format(i) + data[str(i)] = str(i) client.mset(data) + client.close() # create table node.query( f""" CREATE TABLE test_storage_simple_select( k String, - v String - ) Engine=Redis('{address}', 0, '','simple', 10) + v UInt32 + ) Engine=Redis('{address}', 0, 'clickhouse') """ ) - select_query = "SELECT k, v from test_storage_simple_select where k='0' FORMAT Values" - assert (node.query(select_query) == "('0','0')") + response = TSV.toMat(node.query("SELECT k, v from test_storage_simple_select where k='0' FORMAT TSV")) + assert (len(response) == 1) + assert (response[0] == ['0', '0']) - select_query = "SELECT * from test_storage_simple_select FORMAT Values" - assert (len(node.query(select_query)) == 100) - assert (node.query(select_query)[0] == "('0','0')") + response = TSV.toMat(node.query("SELECT * from test_storage_simple_select order by k FORMAT TSV")) + assert (len(response) == 100) + assert (response[0] == ['0', '0']) + + +def test_storage_hash_map_select(started_cluster): + client = get_redis_connection() + address = get_address_for_ch() + + # clean all + client.flushall() + drop_table('test_storage_hash_map_select') + + key = 'k' + data = {} + for i in range(100): + data[str(i)] = str(i) + + client.hset(key, mapping=data) + client.close() + + # create table + node.query( + f""" + CREATE TABLE test_storage_hash_map_select( + k String, + f String, + v UInt32 + ) Engine=Redis('{address}', 0, 'clickhouse','hash_map') + """ + ) + + response = TSV.toMat(node.query("SELECT k, f, v from test_storage_hash_map_select where f='0' FORMAT TSV")) + assert (len(response) == 1) + assert (response[0] == ['k', '0', '0']) + + response = TSV.toMat(node.query("SELECT * from test_storage_hash_map_select FORMAT TSV")) + assert (len(response) == 100) + assert (response[0] == ['k', '0', '0']) + + +def test_create_table(started_cluster): + address = get_address_for_ch() + + # simple creation + drop_table('test_create_table') + node.query( + f""" + CREATE TABLE test_create_table( + k String, + v UInt32 + ) Engine=Redis('{address}') + """ + ) + + # simple creation with full engine args + drop_table('test_create_table') + node.query( + f""" + CREATE TABLE test_create_table( + k String, + v UInt32 + ) Engine=Redis('{address}', 0, 'clickhouse','simple', 10) + """ + ) + + drop_table('test_create_table') + node.query( + f""" + CREATE TABLE test_create_table( + k String, + f String, + v UInt32 + ) Engine=Redis('{address}', 0, 'clickhouse','hash_map', 10) + """ + ) + + # illegal columns + drop_table('test_create_table') + with pytest.raises(QueryRuntimeException): + node.query( + f""" + CREATE TABLE test_create_table( + k String, + f String, + v UInt32 + ) Engine=Redis('{address}', 0, 'clickhouse','simple', 10) + """ + ) + + drop_table('test_create_table') + with pytest.raises(QueryRuntimeException): + node.query( + f""" + CREATE TABLE test_create_table( + k String, + f String, + v UInt32, + n UInt32 + ) Engine=Redis('{address}', 0, 'clickhouse','hash_map', 10) + """ + ) + + # illegal storage type + drop_table('test_create_table') + with pytest.raises(QueryRuntimeException): + node.query( + f""" + CREATE TABLE test_create_table( + k String, + v UInt32 + ) Engine=Redis('{address}', 0, 'clickhouse','not_exist', 10) + """ + ) From 8c822a7edfb919844a94a46485dfeebbf9caad57 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Fri, 26 May 2023 10:34:37 +0800 Subject: [PATCH 176/722] add tests to redis engine --- src/Storages/RedisCommon.cpp | 6 +- src/Storages/RedisCommon.h | 2 +- src/Storages/StorageRedis.cpp | 2 +- src/TableFunctions/TableFunctionRedis.cpp | 40 +- src/TableFunctions/TableFunctionRedis.h | 1 + tests/integration/test_storage_redis/test.py | 9 +- .../configs_secure/config.d/ssl_conf.xml | 8 - .../test_table_function_redis/test.py | 385 ++++++------------ 8 files changed, 166 insertions(+), 287 deletions(-) delete mode 100644 tests/integration/test_table_function_redis/configs_secure/config.d/ssl_conf.xml diff --git a/src/Storages/RedisCommon.cpp b/src/Storages/RedisCommon.cpp index 0d33b9c7aa3..86312f49f41 100644 --- a/src/Storages/RedisCommon.cpp +++ b/src/Storages/RedisCommon.cpp @@ -1,5 +1,9 @@ #include "RedisCommon.h" #include +#include +#include +#include +#include namespace DB { @@ -169,7 +173,7 @@ RedisColumnType getRedisColumnType(RedisStorageType storage_type, const Names & } } -RedisConfiguration getRedisConfiguration(const ASTs & engine_args, ContextPtr context) +RedisConfiguration getRedisConfiguration(ASTs & engine_args, ContextPtr context) { RedisConfiguration configuration; configuration.db_index = 0; diff --git a/src/Storages/RedisCommon.h b/src/Storages/RedisCommon.h index 30e771d2471..c378006f7a5 100644 --- a/src/Storages/RedisCommon.h +++ b/src/Storages/RedisCommon.h @@ -84,7 +84,7 @@ RedisArrayPtr getRedisHashMapKeys(const RedisConnectionPtr & connection, RedisAr RedisColumnType getRedisColumnType(RedisStorageType storage_type, const Names & all_columns, const String & column); /// parse redis table engine/function configuration from engine_args -RedisConfiguration getRedisConfiguration(const ASTs & engine_args, ContextPtr context); +RedisConfiguration getRedisConfiguration(ASTs & engine_args, ContextPtr context); /// checking Redis table/table-function when creating void checkRedisTableStructure(const ColumnsDescription & columns, const RedisConfiguration & configuration); diff --git a/src/Storages/StorageRedis.cpp b/src/Storages/StorageRedis.cpp index 5010aada8c4..cd1cd06b4c4 100644 --- a/src/Storages/StorageRedis.cpp +++ b/src/Storages/StorageRedis.cpp @@ -168,7 +168,7 @@ void registerStorageRedis(StorageFactory & factory) "Redis", [](const StorageFactory::Arguments & args) { - auto configuration = StorageRedis::getConfiguration(args.engine_args, args.getLocalContext()); + auto configuration = getRedisConfiguration(args.engine_args, args.getLocalContext()); checkRedisTableStructure(args.columns, configuration); diff --git a/src/TableFunctions/TableFunctionRedis.cpp b/src/TableFunctions/TableFunctionRedis.cpp index cd08837aae2..f90a30af8a1 100644 --- a/src/TableFunctions/TableFunctionRedis.cpp +++ b/src/TableFunctions/TableFunctionRedis.cpp @@ -11,6 +11,7 @@ #include #include #include +#include namespace DB @@ -43,19 +44,6 @@ StoragePtr TableFunctionRedis::executeImpl( /// TODO support user customized table structure ColumnsDescription TableFunctionRedis::getActualTableStructure(ContextPtr context) const { - /// generate table structure by storage type. - String structure; - switch (configuration->storage_type) - { - case RedisStorageType::SIMPLE: - structure = "key String, value String"; - break; - case RedisStorageType::HASH_MAP: - structure = "key String, field String, value String"; - break; - case RedisStorageType::UNKNOWN: - throw Exception(ErrorCodes::INVALID_REDIS_STORAGE_TYPE, "Invalid Redis storage type."); - } return parseColumnsListFromString(structure, context); } @@ -66,15 +54,25 @@ void TableFunctionRedis::parseArguments(const ASTPtr & ast_function, ContextPtr throw Exception(ErrorCodes::BAD_ARGUMENTS, "Table function 'redis' must have arguments."); ASTs & args = func_args.arguments->children; - - if (args.size() != 5) - { - throw Exception( - ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Table function 'Redis' requires from 5 parameters: " - "redis('host:port', db_index, 'password', 'storage_type', 'pool_size')"); - } configuration = getRedisConfiguration(args, context); + + if (args.size() > 5) + structure = checkAndGetLiteralArgument(args[5], "structure"); + + if (structure.empty()) + { + switch (configuration->storage_type) + { + case RedisStorageType::SIMPLE: + structure = "key String, value String"; + break; + case RedisStorageType::HASH_MAP: + structure = "key String, field String, value String"; + break; + case RedisStorageType::UNKNOWN: + throw Exception(ErrorCodes::INVALID_REDIS_STORAGE_TYPE, "Invalid Redis storage type."); + } + } } diff --git a/src/TableFunctions/TableFunctionRedis.h b/src/TableFunctions/TableFunctionRedis.h index 5c6f483fda7..1328d54a2a6 100644 --- a/src/TableFunctions/TableFunctionRedis.h +++ b/src/TableFunctions/TableFunctionRedis.h @@ -24,6 +24,7 @@ private: void parseArguments(const ASTPtr & ast_function, ContextPtr context) override; std::optional configuration; + String structure; }; } diff --git a/tests/integration/test_storage_redis/test.py b/tests/integration/test_storage_redis/test.py index af01c2e9ff1..d4fbdaddd7f 100644 --- a/tests/integration/test_storage_redis/test.py +++ b/tests/integration/test_storage_redis/test.py @@ -1,5 +1,6 @@ import time +## sudo -H pip install redis import redis import pytest @@ -61,11 +62,11 @@ def test_storage_simple_select(started_cluster): """ ) - response = TSV.toMat(node.query("SELECT k, v from test_storage_simple_select where k='0' FORMAT TSV")) + response = TSV.toMat(node.query("SELECT k, v FROM test_storage_simple_select WHERE k='0' FORMAT TSV")) assert (len(response) == 1) assert (response[0] == ['0', '0']) - response = TSV.toMat(node.query("SELECT * from test_storage_simple_select order by k FORMAT TSV")) + response = TSV.toMat(node.query("SELECT * FROM test_storage_simple_select ORDER BY k FORMAT TSV")) assert (len(response) == 100) assert (response[0] == ['0', '0']) @@ -97,11 +98,11 @@ def test_storage_hash_map_select(started_cluster): """ ) - response = TSV.toMat(node.query("SELECT k, f, v from test_storage_hash_map_select where f='0' FORMAT TSV")) + response = TSV.toMat(node.query("SELECT k, f, v FROM test_storage_hash_map_select WHERE f='0' FORMAT TSV")) assert (len(response) == 1) assert (response[0] == ['k', '0', '0']) - response = TSV.toMat(node.query("SELECT * from test_storage_hash_map_select FORMAT TSV")) + response = TSV.toMat(node.query("SELECT * FROM test_storage_hash_map_select ORDER BY f FORMAT TSV")) assert (len(response) == 100) assert (response[0] == ['k', '0', '0']) diff --git a/tests/integration/test_table_function_redis/configs_secure/config.d/ssl_conf.xml b/tests/integration/test_table_function_redis/configs_secure/config.d/ssl_conf.xml deleted file mode 100644 index 3efe98e7045..00000000000 --- a/tests/integration/test_table_function_redis/configs_secure/config.d/ssl_conf.xml +++ /dev/null @@ -1,8 +0,0 @@ - - - - - none - - - diff --git a/tests/integration/test_table_function_redis/test.py b/tests/integration/test_table_function_redis/test.py index e0ad71b0079..e53022095c9 100644 --- a/tests/integration/test_table_function_redis/test.py +++ b/tests/integration/test_table_function_redis/test.py @@ -1,276 +1,159 @@ -import pymongo +import time +import redis import pytest -from helpers.client import QueryRuntimeException +from helpers.client import QueryRuntimeException from helpers.cluster import ClickHouseCluster +from helpers.test_tools import TSV + +cluster = ClickHouseCluster(__file__) + +node = cluster.add_instance("node", with_redis=True) @pytest.fixture(scope="module") -def started_cluster(request): +def started_cluster(): try: - cluster = ClickHouseCluster(__file__) - node = cluster.add_instance( - "node", - with_mongo=True, - main_configs=[ - "configs_secure/config.d/ssl_conf.xml", - ], - with_mongo_secure=request.param, - ) cluster.start() yield cluster finally: cluster.shutdown() -def get_mongo_connection(started_cluster, secure=False, with_credentials=True): - connection_str = "" - if with_credentials: - connection_str = "mongodb://root:clickhouse@localhost:{}".format( - started_cluster.mongo_port - ) - else: - connection_str = "mongodb://localhost:{}".format( - started_cluster.mongo_no_cred_port - ) - if secure: - connection_str += "/?tls=true&tlsAllowInvalidCertificates=true" - return pymongo.MongoClient(connection_str) - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_simple_select(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - simple_mongo_table = db["simple_table"] - - node = started_cluster.instances["node"] - for i in range(0, 100): - node.query( - "INSERT INTO FUNCTION mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String') (key, data) VALUES ({}, '{}')".format( - i, hex(i * i) - ) - ) - assert ( - node.query( - "SELECT COUNT() FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String')" - ) - == "100\n" +def get_redis_connection(db_id=0): + client = redis.Redis( + host='localhost', port=cluster.redis_port, password="clickhouse", db=db_id ) - assert ( - node.query( - "SELECT sum(key) FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String')" - ) - == str(sum(range(0, 100))) + "\n" - ) - assert ( - node.query( - "SELECT sum(key) FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', 'key UInt64, data String')" - ) - == str(sum(range(0, 100))) + "\n" - ) - - assert ( - node.query( - "SELECT data from mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String') where key = 42" - ) - == hex(42 * 42) + "\n" - ) - simple_mongo_table.drop() + return client -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_complex_data_type(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - incomplete_mongo_table = db["complex_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i), "dict": {"a": i, "b": str(i)}}) - incomplete_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - - assert ( - node.query( - "SELECT COUNT() FROM mongodb('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse', structure='key UInt64, data String, dict Map(UInt64, String)')" - ) - == "100\n" - ) - assert ( - node.query( - "SELECT sum(key) FROM mongodb('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse', structure='key UInt64, data String, dict Map(UInt64, String)')" - ) - == str(sum(range(0, 100))) + "\n" - ) - - assert ( - node.query( - "SELECT data from mongodb('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse', structure='key UInt64, data String, dict Map(UInt64, String)') where key = 42" - ) - == hex(42 * 42) + "\n" - ) - incomplete_mongo_table.drop() +def get_address_for_ch(): + return cluster.redis_host + ':6379' -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_incorrect_data_type(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - strange_mongo_table = db["strange_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i), "aaaa": "Hello"}) - strange_mongo_table.insert_many(data) +def test_storage_simple(started_cluster): + client = get_redis_connection() + address = get_address_for_ch() - node = started_cluster.instances["node"] + # clean all + client.flushall() + data = {} + for i in range(100): + data[str(i)] = str(i) + + client.mset(data) + client.close() + + response = TSV.toMat(node.query( + f""" + SELECT + key, value + FROM + redis('{address}', 0, 'clickhouse') + WHERE + key='0' + FORMAT TSV + """)) + + assert (len(response) == 1) + assert (response[0] == ['0', '0']) + + response = TSV.toMat(node.query( + f""" + SELECT + * + FROM + redis('{address}', 0, 'clickhouse') + ORDER BY + key + FORMAT TSV + """)) + + assert (len(response) == 100) + assert (response[0] == ['0', '0']) + + +def test_storage_hash_map(started_cluster): + client = get_redis_connection() + address = get_address_for_ch() + + # clean all + client.flushall() + + key = 'k' + data = {} + for i in range(100): + data[str(i)] = str(i) + + client.hset(key, mapping=data) + client.close() + + response = TSV.toMat(node.query( + f""" + SELECT + key, field, value + FROM + redis('{address}', 0, 'clickhouse','hash_map') + WHERE + field='0' + FORMAT TSV + """)) + + assert (len(response) == 1) + assert (response[0] == ['k', '0', '0']) + + response = TSV.toMat(node.query( + f""" + SELECT + * + FROM + redis('{address}', 0, 'clickhouse','hash_map') + ORDER BY + field + FORMAT TSV + """)) + + assert (len(response) == 100) + assert (response[0] == ['k', '0', '0']) + + +def test_customized_table_structure(started_cluster): + address = get_address_for_ch() + + node.query( + f""" + SELECT + * + FROM + redis('{address}', 0, 'clickhouse', "simple", 10, "k String, v UInt8") + """) + + node.query( + f""" + SELECT + * + FROM + redis('{address}', 0, 'clickhouse', "hash_map", 10, "k String, f UInt8, v String") + """) + + # illegal columns with pytest.raises(QueryRuntimeException): node.query( - "SELECT aaaa FROM mongodb('mongo1:27017', 'test', 'strange_table', 'root', 'clickhouse', structure='key UInt64, data String')" - ) + f""" + SELECT + * + FROM + redis('{address}', 0, 'clickhouse', "hash_map", 10, "k String, v String") + """) - strange_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [True], indirect=["started_cluster"]) -def test_secure_connection(started_cluster): - mongo_connection = get_mongo_connection(started_cluster, secure=True) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - - assert ( + # illegal data type + with pytest.raises(QueryRuntimeException): node.query( - "SELECT COUNT() FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='ssl=true')" - ) - == "100\n" - ) - assert ( - node.query( - "SELECT sum(key) FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='ssl=true')" - ) - == str(sum(range(0, 100))) + "\n" - ) - assert ( - node.query( - "SELECT sum(key) FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', 'key UInt64, data String', 'ssl=true')" - ) - == str(sum(range(0, 100))) + "\n" - ) - - assert ( - node.query( - "SELECT data from mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='ssl=true') where key = 42" - ) - == hex(42 * 42) + "\n" - ) - simple_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_predefined_connection_configuration(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - assert ( - node.query( - "SELECT count() FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String')" - ) - == "100\n" - ) - simple_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_no_credentials(started_cluster): - mongo_connection = get_mongo_connection(started_cluster, with_credentials=False) - db = mongo_connection["test"] - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - assert ( - node.query( - "SELECT count() FROM mongodb('mongo2:27017', 'test', 'simple_table', '', '', structure='key UInt64, data String')" - ) - == "100\n" - ) - simple_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_auth_source(started_cluster): - mongo_connection = get_mongo_connection(started_cluster, with_credentials=False) - admin_db = mongo_connection["admin"] - admin_db.add_user( - "root", - "clickhouse", - roles=[{"role": "userAdminAnyDatabase", "db": "admin"}, "readWriteAnyDatabase"], - ) - simple_mongo_table = admin_db["simple_table"] - data = [] - for i in range(0, 50): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - db = mongo_connection["test"] - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - - node.query_and_get_error( - "SELECT count() FROM mongodb('mongo2:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String')" - ) - - assert ( - node.query( - "SELECT count() FROM mongodb('mongo2:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='authSource=admin')" - ) - == "100\n" - ) - simple_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_missing_columns(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 10): - data.append({"key": i, "data": hex(i * i)}) - for i in range(0, 10): - data.append({"key": i}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - result = node.query( - "SELECT count() FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data Nullable(String)') WHERE isNull(data)" - ) - assert result == "10\n" - simple_mongo_table.drop() + f""" + SELECT + * + FROM + redis('{address}', 0, 'clickhouse', "simple", 10, "k Ss, v String") + """) From 357df40c8f6af947223fc54360340e86016e4eae Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Mon, 29 May 2023 15:22:29 +0800 Subject: [PATCH 177/722] fix tests --- src/Dictionaries/RedisSource.cpp | 8 ++- src/Storages/StorageRedis.cpp | 14 ++++- tests/integration/test_storage_redis/test.py | 1 + .../test_table_function_redis/test.py | 60 +++++++++++++++++++ 4 files changed, 78 insertions(+), 5 deletions(-) diff --git a/src/Dictionaries/RedisSource.cpp b/src/Dictionaries/RedisSource.cpp index 27125077c10..261242c627f 100644 --- a/src/Dictionaries/RedisSource.cpp +++ b/src/Dictionaries/RedisSource.cpp @@ -27,14 +27,18 @@ namespace DB const RedisStorageType & storage_type_, const DB::Block & sample_block, size_t max_block_size_) - : ISource(sample_block), max_block_size(max_block_size_)// TODO + : ISource(sample_block) + , connection(std::move(connection_)) + , keys(keys_) + , storage_type(storage_type_) + , max_block_size{max_block_size_} { RedisColumnTypes columns_types_; if (storage_type_ == RedisStorageType::HASH_MAP) columns_types_ = REDIS_HASH_MAP_COLUMN_TYPES; else columns_types_ = REDIS_SIMPLE_COLUMN_TYPES; - RedisSource(std::move(connection_), keys_, storage_type_, sample_block, columns_types_, max_block_size_); + description.init(sample_block); } RedisSource::RedisSource( diff --git a/src/Storages/StorageRedis.cpp b/src/Storages/StorageRedis.cpp index cd1cd06b4c4..e670012d060 100644 --- a/src/Storages/StorageRedis.cpp +++ b/src/Storages/StorageRedis.cpp @@ -75,9 +75,8 @@ Pipe StorageRedis::read( if (all_scan) { + /// TODO use scan to avoid performance issue RedisCommand command_for_keys("KEYS"); - /// generate keys by table name prefix -// command_for_keys << table_id.getTableName() + ":" + serializeStorageType(configuration.storage_type) + ":*"; command_for_keys << "*"; auto all_keys = connection->client->execute(command_for_keys); @@ -136,7 +135,16 @@ Pipe StorageRedis::read( RedisArray keys; for (size_t pos=begin; posat(pos).get()); + { + if (WhichDataType(*primary_key_data_type).isStringOrFixedString()) + { + keys.add(fields->at(pos).get()); + } + else + { + keys.add(toString(fields->at(pos))); /// TODO redis source deserialize + } + } if (configuration.storage_type == RedisStorageType::HASH_MAP) { diff --git a/tests/integration/test_storage_redis/test.py b/tests/integration/test_storage_redis/test.py index d4fbdaddd7f..19e7b4e5340 100644 --- a/tests/integration/test_storage_redis/test.py +++ b/tests/integration/test_storage_redis/test.py @@ -181,3 +181,4 @@ def test_create_table(started_cluster): """ ) + diff --git a/tests/integration/test_table_function_redis/test.py b/tests/integration/test_table_function_redis/test.py index e53022095c9..7c342690027 100644 --- a/tests/integration/test_table_function_redis/test.py +++ b/tests/integration/test_table_function_redis/test.py @@ -157,3 +157,63 @@ def test_customized_table_structure(started_cluster): FROM redis('{address}', 0, 'clickhouse', "simple", 10, "k Ss, v String") """) + + +def test_data_type(started_cluster): + client = get_redis_connection() + address = get_address_for_ch() + + # string + client.flushall() + client.set('0', '0') + + response = TSV.toMat(node.query( + f""" + SELECT + * + FROM + redis('{address}', 0, 'clickhouse', 'simple', 10, "k String, v UInt8") + WHERE + k='0' + FORMAT TSV + """)) + + assert (len(response) == 1) + assert (response[0] == ['0', '0']) + + # number + client.flushall() + client.set('0', '0') + + response = TSV.toMat(node.query( + f""" + SELECT + * + FROM + redis('{address}', 0, 'clickhouse', 'simple', 10, "k UInt8, v UInt8") + WHERE + k=0 + FORMAT TSV + """)) + + assert (len(response) == 1) + assert (response[0] == ['0', '0']) + + # datetime + client.flushall() + client.set('2023-06-01 00:00:00', '0') + + response = TSV.toMat(node.query( + f""" + SELECT + * + FROM + redis('{address}', 0, 'clickhouse', 'simple', 10, "k DateTime, v UInt8") + WHERE + k='2023-06-01 00:00:00' + FORMAT TSV + """)) + + # TODO open + # assert (len(response) == 1) + # assert (response[0] == ['2023-06-01 00:00:00', '0']) From f4f939162dcd3b6814a9e4a288f5e0a0538ae283 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Tue, 30 May 2023 20:31:23 +0800 Subject: [PATCH 178/722] new redis engine schema design --- src/Dictionaries/RedisDictionarySource.cpp | 4 +- src/Dictionaries/RedisSource.cpp | 58 +-- src/Dictionaries/RedisSource.h | 10 - src/Storages/KVStorageUtils.cpp | 2 +- src/Storages/RedisCommon.cpp | 91 +--- src/Storages/RedisCommon.h | 33 +- src/Storages/StorageFactory.h | 3 +- src/Storages/StorageRedis.cpp | 425 +++++++++++++----- src/Storages/StorageRedis.h | 43 +- src/TableFunctions/TableFunctionRedis.cpp | 60 ++- src/TableFunctions/TableFunctionRedis.h | 6 +- tests/integration/test_storage_redis/test.py | 101 +++-- .../test_table_function_redis/test.py | 141 +++--- 13 files changed, 535 insertions(+), 442 deletions(-) diff --git a/src/Dictionaries/RedisDictionarySource.cpp b/src/Dictionaries/RedisDictionarySource.cpp index d28b7528d23..c52c3425d1b 100644 --- a/src/Dictionaries/RedisDictionarySource.cpp +++ b/src/Dictionaries/RedisDictionarySource.cpp @@ -40,10 +40,10 @@ namespace DB { .host = host, .port = static_cast(port), - .db_index = config.getUInt(redis_config_prefix + ".db_index", 0), + .db_index = config.getUInt(redis_config_prefix + ".db_index", DEFAULT_REDIS_DB_INDEX), .password = config.getString(redis_config_prefix + ".password", ""), .storage_type = parseStorageType(config.getString(redis_config_prefix + ".storage_type", "")), - .pool_size = config.getUInt(redis_config_prefix + ".pool_size", 16), + .pool_size = config.getUInt(redis_config_prefix + ".pool_size", DEFAULT_REDIS_POOL_SIZE), }; return std::make_unique(dict_struct, configuration, sample_block); diff --git a/src/Dictionaries/RedisSource.cpp b/src/Dictionaries/RedisSource.cpp index 261242c627f..5d8a475cad4 100644 --- a/src/Dictionaries/RedisSource.cpp +++ b/src/Dictionaries/RedisSource.cpp @@ -21,7 +21,7 @@ namespace DB } - RedisSource::RedisSource( + RedisSource::RedisSource( RedisConnectionPtr connection_, const RedisArray & keys_, const RedisStorageType & storage_type_, @@ -32,28 +32,6 @@ namespace DB , keys(keys_) , storage_type(storage_type_) , max_block_size{max_block_size_} - { - RedisColumnTypes columns_types_; - if (storage_type_ == RedisStorageType::HASH_MAP) - columns_types_ = REDIS_HASH_MAP_COLUMN_TYPES; - else - columns_types_ = REDIS_SIMPLE_COLUMN_TYPES; - description.init(sample_block); - } - - RedisSource::RedisSource( - RedisConnectionPtr connection_, - const RedisArray & keys_, - const RedisStorageType & storage_type_, - const DB::Block & sample_block, - const RedisColumnTypes & columns_types_, - size_t max_block_size_) - : ISource(sample_block) - , connection(std::move(connection_)) - , keys(keys_) - , storage_type(storage_type_) - , max_block_size{max_block_size_} - , columns_types(columns_types_) { description.init(sample_block); } @@ -192,27 +170,15 @@ namespace DB const auto & primary_key = keys_array.get(0); for (size_t i = 0; i < values.size(); ++i) { - const auto & value = values.get(i); const auto & secondary_key = keys_array.get(i + 1); + const auto & value = values.get(i); /// null string means 'no value for requested key' if (!value.isNull()) { - for (size_t idx=0; idx serializeKeysToRawString(const ColumnWithTypeAndName & return result; } -/// In current implementation rocks db can have key with only one column. +/// In current implementation rocks db/redis can have key with only one column. size_t getPrimaryKeyPos(const Block & header, const Names & primary_key) { if (primary_key.size() != 1) diff --git a/src/Storages/RedisCommon.cpp b/src/Storages/RedisCommon.cpp index 86312f49f41..ba7c02fdac5 100644 --- a/src/Storages/RedisCommon.cpp +++ b/src/Storages/RedisCommon.cpp @@ -1,9 +1,7 @@ #include "RedisCommon.h" #include #include -#include #include -#include namespace DB { @@ -13,12 +11,10 @@ namespace ErrorCodes extern const int INVALID_REDIS_TABLE_STRUCTURE; extern const int INTERNAL_REDIS_ERROR; extern const int TIMEOUT_EXCEEDED; + extern const int BAD_ARGUMENTS; extern const int INVALID_REDIS_STORAGE_TYPE; } -RedisColumnTypes REDIS_HASH_MAP_COLUMN_TYPES = {RedisColumnType::KEY, RedisColumnType::FIELD, RedisColumnType::VALUE}; -RedisColumnTypes REDIS_SIMPLE_COLUMN_TYPES = {RedisColumnType::KEY, RedisColumnType::VALUE}; - RedisConnection::RedisConnection(RedisPoolPtr pool_, RedisClientPtr client_) : pool(std::move(pool_)), client(std::move(client_)) { @@ -153,89 +149,4 @@ RedisArrayPtr getRedisHashMapKeys(const RedisConnectionPtr & connection, RedisAr return hkeys; } -RedisColumnType getRedisColumnType(RedisStorageType storage_type, const Names & all_columns, const String & column) -{ - const String & redis_col_key = all_columns.at(0); - if (column == redis_col_key) - return RedisColumnType::KEY; - - if (storage_type == RedisStorageType::HASH_MAP) - { - const String & redis_col_field = all_columns.at(1); - if (column == redis_col_field) - return RedisColumnType::FIELD; - else - return RedisColumnType::VALUE; - } - else - { - return RedisColumnType::VALUE; - } -} - -RedisConfiguration getRedisConfiguration(ASTs & engine_args, ContextPtr context) -{ - RedisConfiguration configuration; - configuration.db_index = 0; - configuration.password = ""; - configuration.storage_type = RedisStorageType::SIMPLE; - configuration.pool_size = 10; - - if (auto named_collection = tryGetNamedCollectionWithOverrides(engine_args, context)) - { - validateNamedCollection( - *named_collection, - ValidateKeysMultiset{"host", "port", "hostname", "password", "db_index", "storage_type", "pool_size"}, - {}); - - configuration.host = named_collection->getAny({"host", "hostname"}); - configuration.port = static_cast(named_collection->getOrDefault("port", 6379)); - if (engine_args.size() > 1) - configuration.password = named_collection->get("password"); - if (engine_args.size() > 2) - configuration.db_index = static_cast(named_collection->get("db_index")); - if (engine_args.size() > 3) - configuration.storage_type = parseStorageType(named_collection->get("storage_type")); - if (engine_args.size() > 4) - configuration.pool_size = static_cast(named_collection->get("pool_size")); - } - else - { - for (auto & engine_arg : engine_args) - engine_arg = evaluateConstantExpressionOrIdentifierAsLiteral(engine_arg, context); - - /// 6379 is the default Redis port. - auto parsed_host_port = parseAddress(checkAndGetLiteralArgument(engine_args[0], "host:port"), 6379); - - configuration.host = parsed_host_port.first; - configuration.port = parsed_host_port.second; - if (engine_args.size() > 1) - configuration.db_index = static_cast(checkAndGetLiteralArgument(engine_args[1], "db_index")); - if (engine_args.size() > 2) - configuration.password = checkAndGetLiteralArgument(engine_args[2], "password"); - if (engine_args.size() > 3) - configuration.storage_type = parseStorageType(checkAndGetLiteralArgument(engine_args[3], "storage_type")); - if (engine_args.size() > 4) - configuration.pool_size = static_cast(checkAndGetLiteralArgument(engine_args[4], "pool_size")); - } - - if (configuration.storage_type == RedisStorageType::UNKNOWN) - throw Exception(ErrorCodes::INVALID_REDIS_STORAGE_TYPE, "Invalid Redis storage type"); - - context->getRemoteHostFilter().checkHostAndPort(configuration.host, toString(configuration.port)); - return configuration; -} - -void checkRedisTableStructure(const ColumnsDescription & columns, const RedisConfiguration & configuration) -{ - /// TODO check data type - if (configuration.storage_type == RedisStorageType::HASH_MAP && columns.size() != 3) - throw Exception(ErrorCodes::INVALID_REDIS_TABLE_STRUCTURE, - "Redis hash table must have 3 columns, but found {}", columns.size()); - - if (configuration.storage_type == RedisStorageType::SIMPLE && columns.size() != 2) - throw Exception(ErrorCodes::INVALID_REDIS_TABLE_STRUCTURE, - "Redis string table must have 2 columns, but found {}", columns.size()); -} - } diff --git a/src/Storages/RedisCommon.h b/src/Storages/RedisCommon.h index c378006f7a5..cb551a9a11a 100644 --- a/src/Storages/RedisCommon.h +++ b/src/Storages/RedisCommon.h @@ -8,6 +8,7 @@ #include #include #include +#include namespace DB { @@ -21,20 +22,6 @@ enum class RedisStorageType UNKNOWN }; -enum class RedisColumnType -{ - /// Redis key - KEY, - /// Redis hash field - FIELD, - /// Redis value - VALUE -}; - -using RedisColumnTypes = std::vector; - -extern RedisColumnTypes REDIS_HASH_MAP_COLUMN_TYPES; -extern RedisColumnTypes REDIS_SIMPLE_COLUMN_TYPES; /// storage type to Redis key type String storageTypeToKeyType(RedisStorageType type); @@ -52,6 +39,10 @@ struct RedisConfiguration uint32_t pool_size; }; +static uint32_t DEFAULT_REDIS_DB_INDEX = 0; +static uint32_t DEFAULT_REDIS_POOL_SIZE = 16; +static String DEFAULT_REDIS_PASSWORD; + using RedisArray = Poco::Redis::Array; using RedisArrayPtr = std::shared_ptr; using RedisCommand = Poco::Redis::Command; @@ -61,6 +52,9 @@ using RedisClientPtr = std::unique_ptr; using RedisPool = BorrowedObjectPool; using RedisPoolPtr = std::shared_ptr; +/// Redis scan interator +using RedisIterator = int64_t; + struct RedisConnection { RedisConnection(RedisPoolPtr pool_, RedisClientPtr client_); @@ -78,15 +72,4 @@ RedisConnectionPtr getRedisConnection(RedisPoolPtr pool, const RedisConfiguratio /// eg: keys -> [key1, key2] and get [[key1, field1, field2], [key2, field1, field2]] RedisArrayPtr getRedisHashMapKeys(const RedisConnectionPtr & connection, RedisArray & keys); -/// Get RedisColumnType of a column, If storage_type is -/// SIMPLE: all_columns must have 2 items and the first one is Redis key the second one is value -/// HASH_MAP: all_columns must have 2 items and the first one is Redis key the second is field, the third is value. -RedisColumnType getRedisColumnType(RedisStorageType storage_type, const Names & all_columns, const String & column); - -/// parse redis table engine/function configuration from engine_args -RedisConfiguration getRedisConfiguration(ASTs & engine_args, ContextPtr context); - -/// checking Redis table/table-function when creating -void checkRedisTableStructure(const ColumnsDescription & columns, const RedisConfiguration & configuration); - } diff --git a/src/Storages/StorageFactory.h b/src/Storages/StorageFactory.h index 77309541374..f1c1c237393 100644 --- a/src/Storages/StorageFactory.h +++ b/src/Storages/StorageFactory.h @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -14,8 +15,6 @@ namespace DB { class Context; -class ASTCreateQuery; -class ASTStorage; struct StorageID; diff --git a/src/Storages/StorageRedis.cpp b/src/Storages/StorageRedis.cpp index e670012d060..ceed448b4a7 100644 --- a/src/Storages/StorageRedis.cpp +++ b/src/Storages/StorageRedis.cpp @@ -1,11 +1,9 @@ #include #include #include -#include #include #include -#include #include #include #include @@ -13,32 +11,142 @@ #include #include #include +#include +#include +#include +#include +#include namespace DB { namespace ErrorCodes { - extern const int INVALID_REDIS_STORAGE_TYPE; extern const int NOT_IMPLEMENTED; } +class RedisDataSource : public ISource +{ +public: + RedisDataSource( + StorageRedis & storage_, + const Block & header, + FieldVectorPtr keys_, + FieldVector::const_iterator begin_, + FieldVector::const_iterator end_, + const size_t max_block_size_) + : ISource(header) + , storage(storage_) + , primary_key_pos(getPrimaryKeyPos(header, storage.getPrimaryKey())) + , keys(keys_) + , begin(begin_) + , end(end_) + , it(begin) + , max_block_size(max_block_size_) + { + } + + RedisDataSource( + StorageRedis & storage_, + const Block & header, + const size_t max_block_size_, + const String & pattern_ = "*") + : ISource(header) + , storage(storage_) + , primary_key_pos(getPrimaryKeyPos(header, storage.getPrimaryKey())) + , iterator(-1) + , pattern(pattern_) + , max_block_size(max_block_size_) + { + } + + String getName() const override { return storage.getName(); } + + Chunk generate() override + { + if (keys) + return generateWithKeys(); + return generateFullScan(); + } + + Chunk generateWithKeys() + { + const auto & sample_block = getPort().getHeader(); + if (it >= end) + { + it = {}; + return {}; + } + + const auto & key_column_type = sample_block.getByName(storage.getPrimaryKey().at(0)).type; + auto raw_keys = serializeKeysToRawString(it, end, key_column_type, max_block_size); + return storage.getBySerializedKeys(raw_keys, nullptr); + } + + Chunk generateFullScan() + { + /// redis scan ending + if (iterator == 0) + return {}; + + RedisArray scan_keys; + RedisIterator next_iterator; + + std::tie(next_iterator, scan_keys) = storage.scan(iterator == -1 ? 0 : iterator, pattern, max_block_size); + iterator = next_iterator; + + /// redis scan can return nothing + if (scan_keys.isNull() || scan_keys.size() == 0) + return generateFullScan(); + + const auto & sample_block = getPort().getHeader(); + MutableColumns columns = sample_block.cloneEmptyColumns(); + + RedisArray values = storage.multiGet(scan_keys); + for (size_t i = 0; i(i).isNull(); i++) + { + fillColumns(scan_keys.get(i).value(), + values.get(i).value(), + primary_key_pos, sample_block, columns + ); + } + + Block block = sample_block.cloneWithColumns(std::move(columns)); + return Chunk(block.getColumns(), block.rows()); + } + +private: + StorageRedis & storage; + + size_t primary_key_pos; + + /// For key scan + FieldVectorPtr keys = nullptr; + FieldVector::const_iterator begin; + FieldVector::const_iterator end; + FieldVector::const_iterator it; + + /// For full scan + RedisIterator iterator; + String pattern; + + const size_t max_block_size; +}; + StorageRedis::StorageRedis( const StorageID & table_id_, const RedisConfiguration & configuration_, - const ColumnsDescription & columns_, - const ConstraintsDescription & constraints_, - const String & comment_) + ContextPtr context_, + const StorageInMemoryMetadata & storage_metadata, + const String & primary_key_) : IStorage(table_id_) + , WithContext(context_->getGlobalContext()) , table_id(table_id_) , configuration(configuration_) , log(&Poco::Logger::get("StorageRedis")) + , primary_key(primary_key_) { pool = std::make_shared(configuration.pool_size); - StorageInMemoryMetadata storage_metadata; - storage_metadata.setColumns(columns_); - storage_metadata.setConstraints(constraints_); - storage_metadata.setComment(comment_); setInMemoryMetadata(storage_metadata); } @@ -46,84 +154,37 @@ Pipe StorageRedis::read( const Names & column_names, const StorageSnapshotPtr & storage_snapshot, SelectQueryInfo & query_info, - ContextPtr context, + ContextPtr context_, QueryProcessingStage::Enum /*processed_stage*/, size_t max_block_size, size_t num_streams) { - auto connection = getRedisConnection(pool, configuration); storage_snapshot->check(column_names); - Block sample_block; - RedisColumnTypes redis_types; - auto all_columns = storage_snapshot->metadata->getColumns().getNamesOfPhysical(); - - for (const String & column_name : column_names) - { - auto column_data = storage_snapshot->metadata->getColumns().getPhysical(column_name); - sample_block.insert({column_data.type, column_data.name}); - redis_types.push_back(getRedisColumnType(configuration.storage_type, all_columns, column_name)); - } - - FieldVectorPtr fields; + FieldVectorPtr keys; bool all_scan = false; - String primary_key = all_columns.at(0); - auto primary_key_data_type = sample_block.getByName(primary_key).type; + Block header = storage_snapshot->metadata->getSampleBlock(); + auto primary_key_data_type = header.getByName(primary_key).type; - std::tie(fields, all_scan) = getFilterKeys(primary_key, primary_key_data_type, query_info, context); + std::tie(keys, all_scan) = getFilterKeys(primary_key, primary_key_data_type, query_info, context_); if (all_scan) { - /// TODO use scan to avoid performance issue - RedisCommand command_for_keys("KEYS"); - command_for_keys << "*"; - - auto all_keys = connection->client->execute(command_for_keys); - - if (all_keys.isNull() || all_keys.size() == 0) - return {}; - - Pipes pipes; - - size_t num_keys = all_keys.size(); - size_t num_threads = std::min(num_streams, all_keys.size()); - - num_threads = std::min(num_threads, configuration.pool_size); - assert(num_keys <= std::numeric_limits::max()); - - for (size_t thread_idx = 0; thread_idx < num_threads; ++thread_idx) - { - size_t begin = num_keys * thread_idx / num_threads; - size_t end = num_keys * (thread_idx + 1) / num_threads; - - RedisArray keys; - for (size_t pos=begin; pos(pos)); - - if (configuration.storage_type == RedisStorageType::HASH_MAP) - { - keys = *getRedisHashMapKeys(connection, keys); - } - - delete connection.release(); - - /// TODO reduce keys copy - pipes.emplace_back(std::make_shared( - getRedisConnection(pool, configuration), keys, - configuration.storage_type, sample_block, redis_types, max_block_size)); - } - return Pipe::unitePipes(std::move(pipes)); + return Pipe(std::make_shared(*this, header, max_block_size)); } else { - if (fields->empty()) + if (keys->empty()) return {}; Pipes pipes; - size_t num_keys = fields->size(); - size_t num_threads = std::min(num_streams, fields->size()); + ::sort(keys->begin(), keys->end()); + keys->erase(std::unique(keys->begin(), keys->end()), keys->end()); + + size_t num_keys = keys->size(); + size_t num_threads = std::min(num_streams, keys->size()); num_threads = std::min(num_threads, configuration.pool_size); assert(num_keys <= std::numeric_limits::max()); @@ -133,34 +194,191 @@ Pipe StorageRedis::read( size_t begin = num_keys * thread_idx / num_threads; size_t end = num_keys * (thread_idx + 1) / num_threads; - RedisArray keys; - for (size_t pos=begin; posat(pos).get()); - } - else - { - keys.add(toString(fields->at(pos))); /// TODO redis source deserialize - } - } - - if (configuration.storage_type == RedisStorageType::HASH_MAP) - { - keys = *getRedisHashMapKeys(connection, keys); - } - - delete connection.release(); - - pipes.emplace_back(std::make_shared( - getRedisConnection(pool, configuration), keys, - configuration.storage_type, sample_block, redis_types, max_block_size)); + pipes.emplace_back(std::make_shared( + *this, header, keys, keys->begin() + begin, keys->begin() + end, max_block_size)); } return Pipe::unitePipes(std::move(pipes)); } } +namespace +{ + // host:port, db_index, password, pool_size + RedisConfiguration getRedisConfiguration(ASTs & engine_args, ContextPtr context) + { + RedisConfiguration configuration; + + if (engine_args.size() < 1) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad arguments count when creating Redis table engine"); + + if (auto named_collection = tryGetNamedCollectionWithOverrides(engine_args, context)) + { + validateNamedCollection( + *named_collection, + ValidateKeysMultiset{"host", "port", "hostname", "password", "db_index", "pool_size"}, + {}); + + configuration.host = named_collection->getAny({"host", "hostname"}); + configuration.port = static_cast(named_collection->getOrDefault("port", 6379)); + configuration.password = named_collection->getOrDefault("password", DEFAULT_REDIS_PASSWORD); + configuration.db_index = static_cast(named_collection->getOrDefault("db_index", DEFAULT_REDIS_DB_INDEX)); + configuration.pool_size = static_cast(named_collection->getOrDefault("pool_size", DEFAULT_REDIS_POOL_SIZE)); + } + else + { + for (auto & engine_arg : engine_args) + engine_arg = evaluateConstantExpressionOrIdentifierAsLiteral(engine_arg, context); + + /// 6379 is the default Redis port. + auto parsed_host_port = parseAddress(checkAndGetLiteralArgument(engine_args[0], "host:port"), 6379); + configuration.host = parsed_host_port.first; + configuration.port = parsed_host_port.second; + + if (engine_args.size() > 1) + configuration.db_index = static_cast(checkAndGetLiteralArgument(engine_args[1], "db_index")); + else + configuration.db_index = DEFAULT_REDIS_DB_INDEX; + if (engine_args.size() > 2) + configuration.password = checkAndGetLiteralArgument(engine_args[2], "password"); + else + configuration.password = DEFAULT_REDIS_PASSWORD; + if (engine_args.size() > 3) + configuration.pool_size = static_cast(checkAndGetLiteralArgument(engine_args[3], "pool_size")); + else + configuration.pool_size = DEFAULT_REDIS_POOL_SIZE; + } + + context->getRemoteHostFilter().checkHostAndPort(configuration.host, toString(configuration.port)); + return configuration; + } + + StoragePtr createStorageRedis(const StorageFactory::Arguments & args) + { + auto configuration = getRedisConfiguration(args.engine_args, args.getLocalContext()); + + StorageInMemoryMetadata metadata; + metadata.setColumns(args.columns); + metadata.setConstraints(args.constraints); + metadata.setComment(args.comment); + + if (!args.storage_def->primary_key) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "StorageRedis must require one column in primary key"); + + auto primary_key_desc = KeyDescription::getKeyFromAST(args.storage_def->primary_key->ptr(), metadata.columns, args.getContext()); + auto primary_key_names = primary_key_desc.expression->getRequiredColumns(); + + if (primary_key_names.size() != 1) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, "StorageRedis must require one column in primary key"); + } + + return std::make_shared( + args.table_id, + configuration, + args.getContext(), + metadata, + primary_key_names[0]); + } +} + +Chunk StorageRedis::getBySerializedKeys( + const std::vector & keys, + PaddedPODArray * null_map) const +{ + RedisArray redis_keys; + for (const auto & key : keys) + redis_keys.add(key); + return getBySerializedKeys(redis_keys, null_map); +} + +Chunk StorageRedis::getBySerializedKeys( + const RedisArray & keys, + PaddedPODArray * null_map) const +{ + Block sample_block = getInMemoryMetadataPtr()->getSampleBlock(); + + size_t primary_key_pos = getPrimaryKeyPos(sample_block, getPrimaryKey()); + MutableColumns columns = sample_block.cloneEmptyColumns(); + + RedisArray values = multiGet(keys); + if (values.isNull() || values.size() == 0) + return {}; + + if (null_map) + { + null_map->clear(); + null_map->resize_fill(keys.size(), 1); + } + + for (size_t i = 0; i < values.size(); ++i) + { + if (!values.get(i).isNull()) + { + fillColumns(keys.get(i).value(), + values.get(i).value(), + primary_key_pos, sample_block, columns + ); + } + else /// key not found + { + if (null_map) + { + (*null_map)[i] = 0; + for (size_t col_idx = 0; col_idx < sample_block.columns(); ++col_idx) + { + columns[col_idx]->insert(sample_block.getByPosition(col_idx).type->getDefault()); + } + } + } + } + + size_t num_rows = columns.at(0)->size(); + return Chunk(std::move(columns), num_rows); +} + +std::pair StorageRedis::scan(RedisIterator iterator, const String & pattern, const uint64_t max_count) +{ + auto connection = getRedisConnection(pool, configuration); + RedisCommand scan("SCAN"); + scan << toString(iterator) << "MATCH" << pattern << "COUNT" << toString(max_count); + + const auto & result = connection->client->execute(scan); + RedisIterator next = parse(result.get(0).value()); + + return {next, result.get(1)}; +} + +RedisArray StorageRedis::multiGet(const RedisArray & keys) const +{ + auto connection = getRedisConnection(pool, configuration); + + RedisCommand cmd_mget("MGET"); + for (size_t i = 0; i < keys.size(); ++i) + cmd_mget.add(keys.get(i)); + + return connection->client->execute(cmd_mget); +} + +Chunk StorageRedis::getByKeys( + const ColumnsWithTypeAndName & keys, + PaddedPODArray & null_map, + const Names &) const +{ + if (keys.size() != 1) + throw Exception(ErrorCodes::LOGICAL_ERROR, "StorageRedis supports only one key, got: {}", keys.size()); + + auto raw_keys = serializeKeysToRawString(keys[0]); + + if (raw_keys.size() != keys[0].column->size()) + throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "Assertion failed: {} != {}", raw_keys.size(), keys[0].column->size()); + + return getBySerializedKeys(raw_keys, &null_map); +} + +Block StorageRedis::getSampleBlock(const Names &) const +{ + return getInMemoryMetadataPtr()->getSampleBlock(); +} SinkToStoragePtr StorageRedis::write( const ASTPtr & /*query*/, @@ -172,24 +390,13 @@ SinkToStoragePtr StorageRedis::write( void registerStorageRedis(StorageFactory & factory) { - factory.registerStorage( - "Redis", - [](const StorageFactory::Arguments & args) - { - auto configuration = getRedisConfiguration(args.engine_args, args.getLocalContext()); + StorageFactory::StorageFeatures features{ + .supports_sort_order = true, + .supports_parallel_insert = true, + .source_access_type = AccessType::REDIS, + }; - checkRedisTableStructure(args.columns, configuration); - - return std::make_shared( - args.table_id, - configuration, - args.columns, - args.constraints, - args.comment); - }, - { - .source_access_type = AccessType::REDIS, - }); + factory.registerStorage("Redis", createStorageRedis, features); } } diff --git a/src/Storages/StorageRedis.h b/src/Storages/StorageRedis.h index 619a83f3851..4a0418e6091 100644 --- a/src/Storages/StorageRedis.h +++ b/src/Storages/StorageRedis.h @@ -2,27 +2,24 @@ #include #include -#include +#include +#include +#include namespace DB { /* Implements storage in the Redis. - * Use ENGINE = Redis(host:port, db_index, password, storage_type, pool_size); - * Read only. - * - * Note If storage_type is - * SIMPLE: there should be 2 columns and the first one is key in Redis, the second one is value. - * HASH_MAP: there should be 3 columns and the first one is key in Redis and the second is the field of Redis Map. + * Use ENGINE = Redis(host:port[, db_index[, password[, pool_size]]]) PRIMARY KEY(key); */ -class StorageRedis : public IStorage +class StorageRedis : public IStorage, public IKeyValueEntity, WithContext { public: StorageRedis( const StorageID & table_id_, const RedisConfiguration & configuration_, - const ColumnsDescription & columns_, - const ConstraintsDescription & constraints_, - const String & comment_); + ContextPtr context_, + const StorageInMemoryMetadata & storage_metadata, + const String & primary_key_); std::string getName() const override { return "Redis"; } @@ -30,7 +27,7 @@ public: const Names & column_names, const StorageSnapshotPtr & storage_snapshot, SelectQueryInfo & query_info, - ContextPtr context, + ContextPtr context_, QueryProcessingStage::Enum processed_stage, size_t max_block_size, size_t num_streams) override; @@ -40,12 +37,34 @@ public: const StorageMetadataPtr & /*metadata_snapshot*/, ContextPtr context) override; + Names getPrimaryKey() const override { return {primary_key}; } + + /// Return chunk with data for given serialized keys. + /// If out_null_map is passed, fill it with 1/0 depending on key was/wasn't found. Result chunk may contain default values. + /// If out_null_map is not passed. Not found rows excluded from result chunk. + Chunk getBySerializedKeys( + const std::vector & keys, + PaddedPODArray * out_null_map) const; + + Chunk getBySerializedKeys( + const RedisArray & keys, + PaddedPODArray * out_null_map) const; + + std::pair scan(RedisIterator iterator, const String & pattern, const uint64_t max_count); + + RedisArray multiGet(const RedisArray & keys) const; + + Chunk getByKeys(const ColumnsWithTypeAndName & keys, PaddedPODArray & null_map, const Names &) const override; + + Block getSampleBlock(const Names &) const override; private: StorageID table_id; RedisConfiguration configuration; Poco::Logger * log; RedisPoolPtr pool; + + const String primary_key; }; } diff --git a/src/TableFunctions/TableFunctionRedis.cpp b/src/TableFunctions/TableFunctionRedis.cpp index f90a30af8a1..3db174fbcd8 100644 --- a/src/TableFunctions/TableFunctionRedis.cpp +++ b/src/TableFunctions/TableFunctionRedis.cpp @@ -1,6 +1,7 @@ #include #include +#include #include @@ -12,6 +13,8 @@ #include #include #include +#include + namespace DB @@ -21,7 +24,6 @@ namespace ErrorCodes { extern const int BAD_ARGUMENTS; extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; - extern const int INVALID_REDIS_STORAGE_TYPE; } @@ -29,19 +31,16 @@ StoragePtr TableFunctionRedis::executeImpl( const ASTPtr & /*ast_function*/, ContextPtr context, const String & table_name, ColumnsDescription /*cached_columns*/) const { auto columns = getActualTableStructure(context); - checkRedisTableStructure(columns, *configuration); + + StorageInMemoryMetadata metadata; + metadata.setColumns(columns); auto storage = std::make_shared( - StorageID(toString(configuration->db_index), table_name), - *configuration, - columns, - ConstraintsDescription(), - String{}); + StorageID(toString(configuration.db_index), table_name), configuration, context, metadata, primary_key); storage->startup(); return storage; } -/// TODO support user customized table structure ColumnsDescription TableFunctionRedis::getActualTableStructure(ContextPtr context) const { return parseColumnsListFromString(structure, context); @@ -54,25 +53,38 @@ void TableFunctionRedis::parseArguments(const ASTPtr & ast_function, ContextPtr throw Exception(ErrorCodes::BAD_ARGUMENTS, "Table function 'redis' must have arguments."); ASTs & args = func_args.arguments->children; - configuration = getRedisConfiguration(args, context); + if (args.size() < 3) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad arguments count when creating Redis table function"); + + for (auto & arg : args) + arg = evaluateConstantExpressionOrIdentifierAsLiteral(arg, context); + + auto parsed_host_port = parseAddress(checkAndGetLiteralArgument(args[0], "host:port"), 6379); + configuration.host = parsed_host_port.first; + configuration.port = parsed_host_port.second; + + primary_key = checkAndGetLiteralArgument(args[1], "key"); + structure = checkAndGetLiteralArgument(args[2], "structure"); + + if (args.size() > 3) + configuration.db_index = static_cast(checkAndGetLiteralArgument(args[3], "db_index")); + else + configuration.db_index = DEFAULT_REDIS_DB_INDEX; + if (args.size() > 4) + configuration.password = checkAndGetLiteralArgument(args[4], "password"); + else + configuration.password = DEFAULT_REDIS_PASSWORD; if (args.size() > 5) - structure = checkAndGetLiteralArgument(args[5], "structure"); + configuration.pool_size = static_cast(checkAndGetLiteralArgument(args[5], "pool_size")); + else + configuration.pool_size = DEFAULT_REDIS_POOL_SIZE; - if (structure.empty()) - { - switch (configuration->storage_type) - { - case RedisStorageType::SIMPLE: - structure = "key String, value String"; - break; - case RedisStorageType::HASH_MAP: - structure = "key String, field String, value String"; - break; - case RedisStorageType::UNKNOWN: - throw Exception(ErrorCodes::INVALID_REDIS_STORAGE_TYPE, "Invalid Redis storage type."); - } - } + context->getRemoteHostFilter().checkHostAndPort(configuration.host, toString(configuration.port)); + + auto columns = parseColumnsListFromString(structure, context); + if (!columns.has(primary_key)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad arguments redis table function structure should contains key."); } diff --git a/src/TableFunctions/TableFunctionRedis.h b/src/TableFunctions/TableFunctionRedis.h index 1328d54a2a6..b985a89e3d7 100644 --- a/src/TableFunctions/TableFunctionRedis.h +++ b/src/TableFunctions/TableFunctionRedis.h @@ -7,6 +7,9 @@ namespace DB { +/* Implements Redis table function. + * Use redis(host:port, key, structure[, db_index[, password[, pool_size]]]); + */ class TableFunctionRedis : public ITableFunction { public: @@ -23,8 +26,9 @@ private: ColumnsDescription getActualTableStructure(ContextPtr context) const override; void parseArguments(const ASTPtr & ast_function, ContextPtr context) override; - std::optional configuration; + RedisConfiguration configuration; String structure; + String primary_key; }; } diff --git a/tests/integration/test_storage_redis/test.py b/tests/integration/test_storage_redis/test.py index 19e7b4e5340..1f65a9df2f3 100644 --- a/tests/integration/test_storage_redis/test.py +++ b/tests/integration/test_storage_redis/test.py @@ -3,6 +3,8 @@ import time ## sudo -H pip install redis import redis import pytest +import struct +import sys from helpers.client import QueryRuntimeException from helpers.cluster import ClickHouseCluster @@ -37,17 +39,50 @@ def drop_table(table): node.query(f"DROP TABLE IF EXISTS {table} SYNC"); -def test_storage_simple_select(started_cluster): +# see SerializationString.serializeBinary +def serialize_binary_for_string(x): + var_uint_max = (1 << 63) - 1 + buf = bytearray() + # write length + length = len(x) + # length = (length << 1) ^ (length >> 63) + if length > var_uint_max: + raise ValueError("Value too large for varint encoding") + for i in range(9): + byte = length & 0x7F + if length > 0x7F: + byte |= 0x80 + buf += (bytes([byte])) + length >>= 7 + if not length: + break + # write data + buf += x.encode('utf-8') + return bytes(buf) + + +# see SerializationNumber.serializeBinary +def serialize_binary_for_uint32(x): + buf = bytearray() + packed_num = struct.pack('I', x) + buf += packed_num + if sys.byteorder != 'little': + buf.reverse() + return bytes(buf) + + +def test_simple_select(started_cluster): client = get_redis_connection() address = get_address_for_ch() # clean all client.flushall() - drop_table('test_storage_simple_select') + drop_table('test_simple_select') data = {} for i in range(100): - data[str(i)] = str(i) + packed = serialize_binary_for_string(str(i)) + data[packed] = packed client.mset(data) client.close() @@ -55,56 +90,55 @@ def test_storage_simple_select(started_cluster): # create table node.query( f""" - CREATE TABLE test_storage_simple_select( + CREATE TABLE test_simple_select( k String, - v UInt32 - ) Engine=Redis('{address}', 0, 'clickhouse') + v String + ) Engine=Redis('{address}', 0, 'clickhouse') PRIMARY KEY (k) """ ) - response = TSV.toMat(node.query("SELECT k, v FROM test_storage_simple_select WHERE k='0' FORMAT TSV")) + response = TSV.toMat(node.query("SELECT k, v FROM test_simple_select WHERE k='0' FORMAT TSV")) assert (len(response) == 1) assert (response[0] == ['0', '0']) - response = TSV.toMat(node.query("SELECT * FROM test_storage_simple_select ORDER BY k FORMAT TSV")) + response = TSV.toMat(node.query("SELECT * FROM test_simple_select ORDER BY k FORMAT TSV")) assert (len(response) == 100) assert (response[0] == ['0', '0']) -def test_storage_hash_map_select(started_cluster): +def test_select_int(started_cluster): client = get_redis_connection() address = get_address_for_ch() # clean all client.flushall() - drop_table('test_storage_hash_map_select') + drop_table('test_select_int') - key = 'k' data = {} for i in range(100): - data[str(i)] = str(i) + packed = serialize_binary_for_uint32(i) + data[packed] = packed - client.hset(key, mapping=data) + client.mset(data) client.close() # create table node.query( f""" - CREATE TABLE test_storage_hash_map_select( - k String, - f String, + CREATE TABLE test_select_int( + k UInt32, v UInt32 - ) Engine=Redis('{address}', 0, 'clickhouse','hash_map') + ) Engine=Redis('{address}', 0, 'clickhouse') PRIMARY KEY (k) """ ) - response = TSV.toMat(node.query("SELECT k, f, v FROM test_storage_hash_map_select WHERE f='0' FORMAT TSV")) + response = TSV.toMat(node.query("SELECT k, v FROM test_select_int WHERE k=0 FORMAT TSV")) assert (len(response) == 1) - assert (response[0] == ['k', '0', '0']) + assert (response[0] == ['0', '0']) - response = TSV.toMat(node.query("SELECT * FROM test_storage_hash_map_select ORDER BY f FORMAT TSV")) + response = TSV.toMat(node.query("SELECT * FROM test_select_int ORDER BY k FORMAT TSV")) assert (len(response) == 100) - assert (response[0] == ['k', '0', '0']) + assert (response[0] == ['0', '0']) def test_create_table(started_cluster): @@ -117,7 +151,7 @@ def test_create_table(started_cluster): CREATE TABLE test_create_table( k String, v UInt32 - ) Engine=Redis('{address}') + ) Engine=Redis('{address}') PRIMARY KEY (k) """ ) @@ -128,7 +162,7 @@ def test_create_table(started_cluster): CREATE TABLE test_create_table( k String, v UInt32 - ) Engine=Redis('{address}', 0, 'clickhouse','simple', 10) + ) Engine=Redis('{address}', 0, 'clickhouse', 10) PRIMARY KEY (k) """ ) @@ -139,11 +173,10 @@ def test_create_table(started_cluster): k String, f String, v UInt32 - ) Engine=Redis('{address}', 0, 'clickhouse','hash_map', 10) + ) Engine=Redis('{address}', 0, 'clickhouse', 10) PRIMARY KEY (k) """ ) - # illegal columns drop_table('test_create_table') with pytest.raises(QueryRuntimeException): node.query( @@ -152,7 +185,7 @@ def test_create_table(started_cluster): k String, f String, v UInt32 - ) Engine=Redis('{address}', 0, 'clickhouse','simple', 10) + ) Engine=Redis('{address}', 0, 'clickhouse', 10) PRIMARY KEY () """ ) @@ -163,22 +196,8 @@ def test_create_table(started_cluster): CREATE TABLE test_create_table( k String, f String, - v UInt32, - n UInt32 - ) Engine=Redis('{address}', 0, 'clickhouse','hash_map', 10) - """ - ) - - # illegal storage type - drop_table('test_create_table') - with pytest.raises(QueryRuntimeException): - node.query( - f""" - CREATE TABLE test_create_table( - k String, v UInt32 - ) Engine=Redis('{address}', 0, 'clickhouse','not_exist', 10) + ) Engine=Redis('{address}', 0, 'clickhouse', 10) """ ) - diff --git a/tests/integration/test_table_function_redis/test.py b/tests/integration/test_table_function_redis/test.py index 7c342690027..111276ec6dc 100644 --- a/tests/integration/test_table_function_redis/test.py +++ b/tests/integration/test_table_function_redis/test.py @@ -1,7 +1,9 @@ -import time +import datetime import redis import pytest +import sys +import struct from helpers.client import QueryRuntimeException from helpers.cluster import ClickHouseCluster @@ -32,7 +34,39 @@ def get_address_for_ch(): return cluster.redis_host + ':6379' -def test_storage_simple(started_cluster): +# see SerializationString.serializeBinary +def serialize_binary_for_string(x): + var_uint_max = (1 << 63) - 1 + buf = bytearray() + # write length + length = len(x) + # length = (length << 1) ^ (length >> 63) + if length > var_uint_max: + raise ValueError("Value too large for varint encoding") + for i in range(9): + byte = length & 0x7F + if length > 0x7F: + byte |= 0x80 + buf += (bytes([byte])) + length >>= 7 + if not length: + break + # write data + buf += x.encode('utf-8') + return bytes(buf) + + +# see SerializationNumber.serializeBinary +def serialize_binary_for_uint32(x): + buf = bytearray() + packed_num = struct.pack('I', x) + buf += packed_num + if sys.byteorder != 'little': + buf.reverse() + return bytes(buf) + + +def test_simple_select(started_cluster): client = get_redis_connection() address = get_address_for_ch() @@ -41,7 +75,8 @@ def test_storage_simple(started_cluster): data = {} for i in range(100): - data[str(i)] = str(i) + packed = serialize_binary_for_string(str(i)) + data[packed] = packed client.mset(data) client.close() @@ -51,7 +86,7 @@ def test_storage_simple(started_cluster): SELECT key, value FROM - redis('{address}', 0, 'clickhouse') + redis('{address}', 'key', 'key String, value String', 0, 'clickhouse', 10) WHERE key='0' FORMAT TSV @@ -65,7 +100,7 @@ def test_storage_simple(started_cluster): SELECT * FROM - redis('{address}', 0, 'clickhouse') + redis('{address}', 'key', 'key String, value String', 0, 'clickhouse', 10) ORDER BY key FORMAT TSV @@ -75,79 +110,22 @@ def test_storage_simple(started_cluster): assert (response[0] == ['0', '0']) -def test_storage_hash_map(started_cluster): +def test_create_table(started_cluster): client = get_redis_connection() address = get_address_for_ch() # clean all client.flushall() - - key = 'k' - data = {} - for i in range(100): - data[str(i)] = str(i) - - client.hset(key, mapping=data) client.close() - response = TSV.toMat(node.query( - f""" - SELECT - key, field, value - FROM - redis('{address}', 0, 'clickhouse','hash_map') - WHERE - field='0' - FORMAT TSV - """)) - - assert (len(response) == 1) - assert (response[0] == ['k', '0', '0']) - - response = TSV.toMat(node.query( - f""" - SELECT - * - FROM - redis('{address}', 0, 'clickhouse','hash_map') - ORDER BY - field - FORMAT TSV - """)) - - assert (len(response) == 100) - assert (response[0] == ['k', '0', '0']) - - -def test_customized_table_structure(started_cluster): - address = get_address_for_ch() - node.query( f""" SELECT * FROM - redis('{address}', 0, 'clickhouse', "simple", 10, "k String, v UInt8") + redis('{address}', 'k', 'k String, v UInt32', 0, 'clickhouse', 10) """) - node.query( - f""" - SELECT - * - FROM - redis('{address}', 0, 'clickhouse', "hash_map", 10, "k String, f UInt8, v String") - """) - - # illegal columns - with pytest.raises(QueryRuntimeException): - node.query( - f""" - SELECT - * - FROM - redis('{address}', 0, 'clickhouse', "hash_map", 10, "k String, v String") - """) - # illegal data type with pytest.raises(QueryRuntimeException): node.query( @@ -155,7 +133,17 @@ def test_customized_table_structure(started_cluster): SELECT * FROM - redis('{address}', 0, 'clickhouse', "simple", 10, "k Ss, v String") + redis('{address}', 'k', 'k not_exist_type, v String', 0, 'clickhouse', 10) + """) + + # illegal key + with pytest.raises(QueryRuntimeException): + node.query( + f""" + SELECT + * + FROM + redis('{address}', 'not_exist_key', 'k not_exist_type, v String', 0, 'clickhouse', 10) """) @@ -165,14 +153,15 @@ def test_data_type(started_cluster): # string client.flushall() - client.set('0', '0') + value = serialize_binary_for_string('0') + client.set(value, value) response = TSV.toMat(node.query( f""" SELECT * FROM - redis('{address}', 0, 'clickhouse', 'simple', 10, "k String, v UInt8") + redis('{address}', 'k', 'k String, v String', 0, 'clickhouse', 10) WHERE k='0' FORMAT TSV @@ -183,14 +172,15 @@ def test_data_type(started_cluster): # number client.flushall() - client.set('0', '0') + value = serialize_binary_for_uint32(0) + client.set(value, value) response = TSV.toMat(node.query( f""" SELECT * FROM - redis('{address}', 0, 'clickhouse', 'simple', 10, "k UInt8, v UInt8") + redis('{address}', 'k', 'k UInt32, v UInt32', 0, 'clickhouse', 10) WHERE k=0 FORMAT TSV @@ -201,19 +191,22 @@ def test_data_type(started_cluster): # datetime client.flushall() - client.set('2023-06-01 00:00:00', '0') + # clickhouse store datatime as uint32 in internal + dt = datetime.datetime(2023, 6, 1, 0, 0, 0) + seconds_since_epoch = dt.timestamp() + value = serialize_binary_for_uint32(int(seconds_since_epoch)) + client.set(value, value) response = TSV.toMat(node.query( f""" SELECT * FROM - redis('{address}', 0, 'clickhouse', 'simple', 10, "k DateTime, v UInt8") + redis('{address}', 'k', 'k DateTime, v DateTime', 0, 'clickhouse', 10) WHERE k='2023-06-01 00:00:00' FORMAT TSV """)) - # TODO open - # assert (len(response) == 1) - # assert (response[0] == ['2023-06-01 00:00:00', '0']) + assert (len(response) == 1) + assert (response[0] == ['2023-06-01 00:00:00', '2023-06-01 00:00:00']) From 1df1dfc3e54b9e9b2b0f05a516ffc83ff3147c76 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Wed, 31 May 2023 14:31:06 +0800 Subject: [PATCH 179/722] add update/delete/insert to redis storage --- src/Storages/RedisCommon.h | 4 +- src/Storages/StorageRedis.cpp | 198 ++++++++++++++++++- src/Storages/StorageRedis.h | 14 +- tests/integration/test_storage_redis/test.py | 135 ++++++++++++- 4 files changed, 344 insertions(+), 7 deletions(-) diff --git a/src/Storages/RedisCommon.h b/src/Storages/RedisCommon.h index cb551a9a11a..49c21c3277f 100644 --- a/src/Storages/RedisCommon.h +++ b/src/Storages/RedisCommon.h @@ -43,10 +43,12 @@ static uint32_t DEFAULT_REDIS_DB_INDEX = 0; static uint32_t DEFAULT_REDIS_POOL_SIZE = 16; static String DEFAULT_REDIS_PASSWORD; +using RedisCommand = Poco::Redis::Command; using RedisArray = Poco::Redis::Array; using RedisArrayPtr = std::shared_ptr; -using RedisCommand = Poco::Redis::Command; using RedisBulkString = Poco::Redis::BulkString; +using RedisSimpleString = String; +using RedisInteger = Int64; using RedisClientPtr = std::unique_ptr; using RedisPool = BorrowedObjectPool; diff --git a/src/Storages/StorageRedis.cpp b/src/Storages/StorageRedis.cpp index ceed448b4a7..f9a25470e2d 100644 --- a/src/Storages/StorageRedis.cpp +++ b/src/Storages/StorageRedis.cpp @@ -13,9 +13,12 @@ #include #include #include +#include #include #include #include +#include +#include namespace DB { @@ -23,6 +26,7 @@ namespace DB namespace ErrorCodes { extern const int NOT_IMPLEMENTED; + extern const int INTERNAL_REDIS_ERROR; } class RedisDataSource : public ISource @@ -133,6 +137,64 @@ private: const size_t max_block_size; }; + +class RedisSink : public SinkToStorage +{ +public: + RedisSink( + StorageRedis & storage_, + const StorageMetadataPtr & metadata_snapshot_); + + void consume(Chunk chunk) override; + String getName() const override { return "RedisSink"; } + +private: + StorageRedis & storage; + StorageMetadataPtr metadata_snapshot; + size_t primary_key_pos = 0; +}; + +RedisSink::RedisSink( + StorageRedis & storage_, + const StorageMetadataPtr & metadata_snapshot_) + : SinkToStorage(metadata_snapshot_->getSampleBlock()) + , storage(storage_) + , metadata_snapshot(metadata_snapshot_) +{ + for (const auto & column : getHeader()) + { + if (column.name == storage.getPrimaryKey()[0]) + break; + ++primary_key_pos; + } +} + +void RedisSink::consume(Chunk chunk) +{ + auto rows = chunk.getNumRows(); + auto block = getHeader().cloneWithColumns(chunk.detachColumns()); + + WriteBufferFromOwnString wb_key; + WriteBufferFromOwnString wb_value; + + RedisArray data; + for (size_t i = 0; i < rows; ++i) + { + wb_key.restart(); + wb_value.restart(); + + size_t idx = 0; + for (const auto & elem : block) + { + elem.type->getDefaultSerialization()->serializeBinary(*elem.column, i, idx == primary_key_pos ? wb_key : wb_value, {}); + ++idx; + } + data.add(wb_key.str()); + data.add(wb_value.str()); + } + storage.multiSet(data); +} + StorageRedis::StorageRedis( const StorageID & table_id_, const RedisConfiguration & configuration_, @@ -336,7 +398,7 @@ Chunk StorageRedis::getBySerializedKeys( return Chunk(std::move(columns), num_rows); } -std::pair StorageRedis::scan(RedisIterator iterator, const String & pattern, const uint64_t max_count) +std::pair StorageRedis::scan(RedisIterator iterator, const String & pattern, uint64_t max_count) { auto connection = getRedisConnection(pool, configuration); RedisCommand scan("SCAN"); @@ -359,6 +421,36 @@ RedisArray StorageRedis::multiGet(const RedisArray & keys) const return connection->client->execute(cmd_mget); } +void StorageRedis::multiSet(const RedisArray & data) const +{ + auto connection = getRedisConnection(pool, configuration); + + RedisCommand cmd_mget("MSET"); + for (size_t i = 0; i < data.size(); ++i) + cmd_mget.add(data.get(i)); + + auto ret = connection->client->execute(cmd_mget); + if (ret != "OK") + throw Exception(ErrorCodes::INTERNAL_REDIS_ERROR, "Fail to write to redis table {}, for {}", + table_id.getFullNameNotQuoted(), ret); +} + +RedisInteger StorageRedis::multiDelete(const RedisArray & keys) const +{ + auto connection = getRedisConnection(pool, configuration); + + RedisCommand cmd("DEL"); + for (size_t i = 0; i < keys.size(); ++i) + cmd.add(keys.get(i)); + + auto ret = connection->client->execute(cmd); + if (ret != static_cast(keys.size())) + LOG_DEBUG(log, "Try to delete {} rows but actually deleted {} rows from redis table {}.", + keys.size(), ret, table_id.getFullNameNotQuoted()); + + return ret; +} + Chunk StorageRedis::getByKeys( const ColumnsWithTypeAndName & keys, PaddedPODArray & null_map, @@ -382,10 +474,110 @@ Block StorageRedis::getSampleBlock(const Names &) const SinkToStoragePtr StorageRedis::write( const ASTPtr & /*query*/, - const StorageMetadataPtr & /*metadata_snapshot*/, + const StorageMetadataPtr & metadata_snapshot, ContextPtr /*context*/) { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Write is unsupported for StorageRedis"); + return std::make_shared(*this, metadata_snapshot); +} + +/// TODO use scan to reduce latency +void StorageRedis::truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPtr, TableExclusiveLockHolder &) +{ + auto connection = getRedisConnection(pool, configuration); + + RedisCommand cmd_flush_db("FLUSHDB"); + cmd_flush_db << toString(configuration.db_index); + auto ret = connection->client->execute(cmd_flush_db); + + if (ret.isNull() || ret.value() != "OK") + throw Exception(ErrorCodes::INTERNAL_REDIS_ERROR, "Fail to truncate redis table {}, for {}", table_id.getFullNameNotQuoted(), ret.value()); +} + +void StorageRedis::checkMutationIsPossible(const MutationCommands & commands, const Settings & /* settings */) const +{ + if (commands.empty()) + return; + + if (commands.size() > 1) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Mutations cannot be combined for StorageRedis"); + + const auto command_type = commands.front().type; + if (command_type != MutationCommand::Type::UPDATE && command_type != MutationCommand::Type::DELETE) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Only DELETE and UPDATE mutation supported for StorageRedis"); +} + +void StorageRedis::mutate(const MutationCommands & commands, ContextPtr context_) +{ + if (commands.empty()) + return; + + assert(commands.size() == 1); + + auto metadata_snapshot = getInMemoryMetadataPtr(); + auto storage = getStorageID(); + auto storage_ptr = DatabaseCatalog::instance().getTable(storage, context_); + + if (commands.front().type == MutationCommand::Type::DELETE) + { + auto interpreter = std::make_unique( + storage_ptr, + metadata_snapshot, + commands, + context_, + /*can_execute_*/ true, + /*return_all_columns_*/ true, + /*return_mutated_rows*/ true); + auto pipeline = QueryPipelineBuilder::getPipeline(interpreter->execute()); + PullingPipelineExecutor executor(pipeline); + + auto sink = std::make_shared(*this, metadata_snapshot); + + auto header = interpreter->getUpdatedHeader(); + auto primary_key_pos = header.getPositionByName(primary_key); + + Block block; + while (executor.pull(block)) + { + auto & column_type_name = block.getByPosition(primary_key_pos); + + auto column = column_type_name.column; + auto size = column->size(); + + RedisArray keys; + WriteBufferFromOwnString wb_key; + for (size_t i = 0; i < size; ++i) + { + wb_key.restart(); + column_type_name.type->getDefaultSerialization()->serializeBinary(*column, i, wb_key, {}); + keys.add(wb_key.str()); + } + multiDelete(keys); + } + return; + } + + assert(commands.front().type == MutationCommand::Type::UPDATE); + if (commands.front().column_to_update_expression.contains(primary_key)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Primary key cannot be updated (cannot update column {})", primary_key); + + auto interpreter = std::make_unique( + storage_ptr, + metadata_snapshot, + commands, + context_, + /*can_execute_*/ true, + /*return_all_columns*/ true, + /*return_mutated_rows*/ true); + auto pipeline = QueryPipelineBuilder::getPipeline(interpreter->execute()); + PullingPipelineExecutor executor(pipeline); + + auto sink = std::make_shared(*this, metadata_snapshot); + + Block block; + while (executor.pull(block)) + { + sink->consume(Chunk{block.getColumns(), block.rows()}); + } } void registerStorageRedis(StorageFactory & factory) diff --git a/src/Storages/StorageRedis.h b/src/Storages/StorageRedis.h index 4a0418e6091..a4ab9a6aa4e 100644 --- a/src/Storages/StorageRedis.h +++ b/src/Storages/StorageRedis.h @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB { @@ -37,6 +38,14 @@ public: const StorageMetadataPtr & /*metadata_snapshot*/, ContextPtr context) override; + void truncate(const ASTPtr &, + const StorageMetadataPtr & metadata_snapshot, + ContextPtr, + TableExclusiveLockHolder &) override; + + void checkMutationIsPossible(const MutationCommands & commands, const Settings & settings) const override; + void mutate(const MutationCommands &, ContextPtr) override; + Names getPrimaryKey() const override { return {primary_key}; } /// Return chunk with data for given serialized keys. @@ -50,13 +59,16 @@ public: const RedisArray & keys, PaddedPODArray * out_null_map) const; - std::pair scan(RedisIterator iterator, const String & pattern, const uint64_t max_count); + std::pair scan(RedisIterator iterator, const String & pattern, uint64_t max_count); RedisArray multiGet(const RedisArray & keys) const; + void multiSet(const RedisArray & data) const; + RedisInteger multiDelete(const RedisArray & keys) const; Chunk getByKeys(const ColumnsWithTypeAndName & keys, PaddedPODArray & null_map, const Names &) const override; Block getSampleBlock(const Names &) const override; + private: StorageID table_id; RedisConfiguration configuration; diff --git a/tests/integration/test_storage_redis/test.py b/tests/integration/test_storage_redis/test.py index 1f65a9df2f3..e77de99c649 100644 --- a/tests/integration/test_storage_redis/test.py +++ b/tests/integration/test_storage_redis/test.py @@ -1,5 +1,3 @@ -import time - ## sudo -H pip install redis import redis import pytest @@ -201,3 +199,136 @@ def test_create_table(started_cluster): """ ) + +def test_simple_insert(started_cluster): + client = get_redis_connection() + address = get_address_for_ch() + + # clean all + client.flushall() + drop_table('test_simple_insert') + + node.query( + f""" + CREATE TABLE test_simple_insert( + k UInt32, + m DateTime, + n String + ) Engine=Redis('{address}', 0, 'clickhouse') PRIMARY KEY (k) + """ + ) + + node.query( + """ + INSERT INTO test_simple_insert Values + (1, '2023-06-01 00:00:00', 'lili'), (2, '2023-06-02 00:00:00', 'lucy') + """ + ) + + response = node.query("SELECT COUNT(*) FROM test_simple_insert FORMAT Values") + assert (response == '(2)') + + response = TSV.toMat(node.query("SELECT k, m, n FROM test_simple_insert WHERE k=1 FORMAT TSV")) + assert (len(response) == 1) + assert (response[0] == ['1', '2023-06-01 00:00:00', 'lili']) + + response = TSV.toMat(node.query("SELECT k, m, n FROM test_simple_insert WHERE m='2023-06-01 00:00:00' FORMAT TSV")) + assert (len(response) == 1) + assert (response[0] == ['1', '2023-06-01 00:00:00', 'lili']) + + response = TSV.toMat(node.query("SELECT k, m, n FROM test_simple_insert WHERE n='lili' FORMAT TSV")) + assert (len(response) == 1) + assert (response[0] == ['1', '2023-06-01 00:00:00', 'lili']) + + +def test_update(started_cluster): + client = get_redis_connection() + address = get_address_for_ch() + # clean all + client.flushall() + drop_table('test_update') + + node.query( + f""" + CREATE TABLE test_update( + k UInt32, + m DateTime, + n String + ) Engine=Redis('{address}', 0, 'clickhouse') PRIMARY KEY (k) + """ + ) + + node.query( + """ + INSERT INTO test_update Values + (1, '2023-06-01 00:00:00', 'lili'), (2, '2023-06-02 00:00:00', 'lucy') + """ + ) + + response = node.query( + """ + ALTER TABLE test_update UPDATE m='2023-06-03 00:00:00' WHERE k=1 + """ + ) + + print("update response: ", response) + + response = TSV.toMat(node.query("SELECT k, m, n FROM test_update WHERE k=1 FORMAT TSV")) + assert (len(response) == 1) + assert (response[0] == ['1', '2023-06-03 00:00:00', 'lili']) + + # can not update key + with pytest.raises(QueryRuntimeException): + node.query( + """ + ALTER TABLE test_update UPDATE k=2 WHERE k=1 + """ + ) + + +def test_delete(started_cluster): + client = get_redis_connection() + address = get_address_for_ch() + + # clean all + client.flushall() + drop_table('test_delete') + + node.query( + f""" + CREATE TABLE test_delete( + k UInt32, + m DateTime, + n String + ) Engine=Redis('{address}', 0, 'clickhouse') PRIMARY KEY (k) + """ + ) + + node.query( + """ + INSERT INTO test_delete Values + (1, '2023-06-01 00:00:00', 'lili'), (2, '2023-06-02 00:00:00', 'lucy') + """ + ) + + response = node.query( + """ + ALTER TABLE test_delete DELETE WHERE k=1 + """ + ) + + print("delete response: ", response) + + response = TSV.toMat(node.query("SELECT k, m, n FROM test_delete FORMAT TSV")) + assert (len(response) == 1) + assert (response[0] == ['2', '2023-06-02 00:00:00', 'lucy']) + + response = node.query( + """ + ALTER TABLE test_delete DELETE WHERE m='2023-06-02 00:00:00' + """ + ) + + response = TSV.toMat(node.query("SELECT k, m, n FROM test_delete FORMAT TSV")) + assert (len(response) == 0) + From 010670457359862da5622baa24d50f0d5bf42557 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Wed, 31 May 2023 16:31:06 +0800 Subject: [PATCH 180/722] add truncate to redis storage --- src/Storages/StorageRedis.cpp | 11 +++--- tests/integration/test_storage_redis/test.py | 37 ++++++++++++++++++++ 2 files changed, 43 insertions(+), 5 deletions(-) diff --git a/src/Storages/StorageRedis.cpp b/src/Storages/StorageRedis.cpp index f9a25470e2d..dd33f6e6839 100644 --- a/src/Storages/StorageRedis.cpp +++ b/src/Storages/StorageRedis.cpp @@ -87,6 +87,7 @@ public: return storage.getBySerializedKeys(raw_keys, nullptr); } + /// TODO scan may get duplicated keys Chunk generateFullScan() { /// redis scan ending @@ -480,17 +481,16 @@ SinkToStoragePtr StorageRedis::write( return std::make_shared(*this, metadata_snapshot); } -/// TODO use scan to reduce latency void StorageRedis::truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPtr, TableExclusiveLockHolder &) { auto connection = getRedisConnection(pool, configuration); RedisCommand cmd_flush_db("FLUSHDB"); - cmd_flush_db << toString(configuration.db_index); - auto ret = connection->client->execute(cmd_flush_db); + cmd_flush_db.add("ASYNC"); + auto ret = connection->client->execute(cmd_flush_db); - if (ret.isNull() || ret.value() != "OK") - throw Exception(ErrorCodes::INTERNAL_REDIS_ERROR, "Fail to truncate redis table {}, for {}", table_id.getFullNameNotQuoted(), ret.value()); + if (ret != "OK") + throw Exception(ErrorCodes::INTERNAL_REDIS_ERROR, "Fail to truncate redis table {}, for {}", table_id.getFullNameNotQuoted(), ret); } void StorageRedis::checkMutationIsPossible(const MutationCommands & commands, const Settings & /* settings */) const @@ -580,6 +580,7 @@ void StorageRedis::mutate(const MutationCommands & commands, ContextPtr context_ } } +/// TODO support ttl void registerStorageRedis(StorageFactory & factory) { StorageFactory::StorageFeatures features{ diff --git a/tests/integration/test_storage_redis/test.py b/tests/integration/test_storage_redis/test.py index e77de99c649..ad1b0ada068 100644 --- a/tests/integration/test_storage_redis/test.py +++ b/tests/integration/test_storage_redis/test.py @@ -332,3 +332,40 @@ def test_delete(started_cluster): response = TSV.toMat(node.query("SELECT k, m, n FROM test_delete FORMAT TSV")) assert (len(response) == 0) + +def test_truncate(started_cluster): + client = get_redis_connection() + address = get_address_for_ch() + # clean all + client.flushall() + drop_table('test_truncate') + + node.query( + f""" + CREATE TABLE test_truncate( + k UInt32, + m DateTime, + n String + ) Engine=Redis('{address}', 0, 'clickhouse') PRIMARY KEY (k) + """ + ) + + node.query( + """ + INSERT INTO test_truncate Values + (1, '2023-06-01 00:00:00', 'lili'), (2, '2023-06-02 00:00:00', 'lucy') + """ + ) + + response = node.query( + """ + TRUNCATE TABLE test_truncate + """ + ) + + print("truncate table response: ", response) + + response = TSV.toMat(node.query("SELECT COUNT(*) FROM test_truncate FORMAT TSV")) + assert (len(response) == 1) + assert (response[0] == ['0']) + From bcf22c1ec79fad247be8c48f4bada508ef0d8063 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Wed, 31 May 2023 18:15:38 +0800 Subject: [PATCH 181/722] fix code style --- src/Storages/RedisCommon.cpp | 2 - src/Storages/RedisCommon.h | 2 +- src/Storages/StorageRedis.cpp | 3 +- src/TableFunctions/TableFunctionRedis.cpp | 3 - tests/integration/test_storage_redis/test.py | 119 ++++++++++-------- .../test_table_function_redis/test.py | 34 ++--- 6 files changed, 89 insertions(+), 74 deletions(-) diff --git a/src/Storages/RedisCommon.cpp b/src/Storages/RedisCommon.cpp index ba7c02fdac5..a0534a9e23b 100644 --- a/src/Storages/RedisCommon.cpp +++ b/src/Storages/RedisCommon.cpp @@ -8,10 +8,8 @@ namespace DB namespace ErrorCodes { - extern const int INVALID_REDIS_TABLE_STRUCTURE; extern const int INTERNAL_REDIS_ERROR; extern const int TIMEOUT_EXCEEDED; - extern const int BAD_ARGUMENTS; extern const int INVALID_REDIS_STORAGE_TYPE; } diff --git a/src/Storages/RedisCommon.h b/src/Storages/RedisCommon.h index 49c21c3277f..cf39be20ba9 100644 --- a/src/Storages/RedisCommon.h +++ b/src/Storages/RedisCommon.h @@ -54,7 +54,7 @@ using RedisClientPtr = std::unique_ptr; using RedisPool = BorrowedObjectPool; using RedisPoolPtr = std::shared_ptr; -/// Redis scan interator +/// Redis scan iterator using RedisIterator = int64_t; struct RedisConnection diff --git a/src/Storages/StorageRedis.cpp b/src/Storages/StorageRedis.cpp index dd33f6e6839..b17528c7eae 100644 --- a/src/Storages/StorageRedis.cpp +++ b/src/Storages/StorageRedis.cpp @@ -25,7 +25,8 @@ namespace DB namespace ErrorCodes { - extern const int NOT_IMPLEMENTED; + extern const int BAD_ARGUMENTS; + extern const int LOGICAL_ERROR; extern const int INTERNAL_REDIS_ERROR; } diff --git a/src/TableFunctions/TableFunctionRedis.cpp b/src/TableFunctions/TableFunctionRedis.cpp index 3db174fbcd8..bf147c08776 100644 --- a/src/TableFunctions/TableFunctionRedis.cpp +++ b/src/TableFunctions/TableFunctionRedis.cpp @@ -16,17 +16,14 @@ #include - namespace DB { namespace ErrorCodes { extern const int BAD_ARGUMENTS; - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; } - StoragePtr TableFunctionRedis::executeImpl( const ASTPtr & /*ast_function*/, ContextPtr context, const String & table_name, ColumnsDescription /*cached_columns*/) const { diff --git a/tests/integration/test_storage_redis/test.py b/tests/integration/test_storage_redis/test.py index ad1b0ada068..66d34ebc711 100644 --- a/tests/integration/test_storage_redis/test.py +++ b/tests/integration/test_storage_redis/test.py @@ -24,17 +24,17 @@ def started_cluster(): def get_redis_connection(db_id=0): client = redis.Redis( - host='localhost', port=cluster.redis_port, password="clickhouse", db=db_id + host="localhost", port=cluster.redis_port, password="clickhouse", db=db_id ) return client def get_address_for_ch(): - return cluster.redis_host + ':6379' + return cluster.redis_host + ":6379" def drop_table(table): - node.query(f"DROP TABLE IF EXISTS {table} SYNC"); + node.query(f"DROP TABLE IF EXISTS {table} SYNC") # see SerializationString.serializeBinary @@ -50,21 +50,21 @@ def serialize_binary_for_string(x): byte = length & 0x7F if length > 0x7F: byte |= 0x80 - buf += (bytes([byte])) + buf += bytes([byte]) length >>= 7 if not length: break # write data - buf += x.encode('utf-8') + buf += x.encode("utf-8") return bytes(buf) # see SerializationNumber.serializeBinary def serialize_binary_for_uint32(x): buf = bytearray() - packed_num = struct.pack('I', x) + packed_num = struct.pack("I", x) buf += packed_num - if sys.byteorder != 'little': + if sys.byteorder != "little": buf.reverse() return bytes(buf) @@ -75,7 +75,7 @@ def test_simple_select(started_cluster): # clean all client.flushall() - drop_table('test_simple_select') + drop_table("test_simple_select") data = {} for i in range(100): @@ -95,13 +95,17 @@ def test_simple_select(started_cluster): """ ) - response = TSV.toMat(node.query("SELECT k, v FROM test_simple_select WHERE k='0' FORMAT TSV")) - assert (len(response) == 1) - assert (response[0] == ['0', '0']) + response = TSV.toMat( + node.query("SELECT k, v FROM test_simple_select WHERE k='0' FORMAT TSV") + ) + assert len(response) == 1 + assert response[0] == ["0", "0"] - response = TSV.toMat(node.query("SELECT * FROM test_simple_select ORDER BY k FORMAT TSV")) - assert (len(response) == 100) - assert (response[0] == ['0', '0']) + response = TSV.toMat( + node.query("SELECT * FROM test_simple_select ORDER BY k FORMAT TSV") + ) + assert len(response) == 100 + assert response[0] == ["0", "0"] def test_select_int(started_cluster): @@ -110,7 +114,7 @@ def test_select_int(started_cluster): # clean all client.flushall() - drop_table('test_select_int') + drop_table("test_select_int") data = {} for i in range(100): @@ -130,20 +134,25 @@ def test_select_int(started_cluster): """ ) - response = TSV.toMat(node.query("SELECT k, v FROM test_select_int WHERE k=0 FORMAT TSV")) - assert (len(response) == 1) - assert (response[0] == ['0', '0']) + response = TSV.toMat( + node.query("SELECT k, v FROM test_select_int WHERE k=0 FORMAT TSV") + ) + assert len(response) == 1 + assert response[0] == ["0", "0"] + + response = TSV.toMat( + node.query("SELECT * FROM test_select_int ORDER BY k FORMAT TSV") + ) + assert len(response) == 100 + assert response[0] == ["0", "0"] - response = TSV.toMat(node.query("SELECT * FROM test_select_int ORDER BY k FORMAT TSV")) - assert (len(response) == 100) - assert (response[0] == ['0', '0']) def test_create_table(started_cluster): address = get_address_for_ch() # simple creation - drop_table('test_create_table') + drop_table("test_create_table") node.query( f""" CREATE TABLE test_create_table( @@ -154,7 +163,7 @@ def test_create_table(started_cluster): ) # simple creation with full engine args - drop_table('test_create_table') + drop_table("test_create_table") node.query( f""" CREATE TABLE test_create_table( @@ -164,7 +173,7 @@ def test_create_table(started_cluster): """ ) - drop_table('test_create_table') + drop_table("test_create_table") node.query( f""" CREATE TABLE test_create_table( @@ -175,7 +184,7 @@ def test_create_table(started_cluster): """ ) - drop_table('test_create_table') + drop_table("test_create_table") with pytest.raises(QueryRuntimeException): node.query( f""" @@ -187,7 +196,7 @@ def test_create_table(started_cluster): """ ) - drop_table('test_create_table') + drop_table("test_create_table") with pytest.raises(QueryRuntimeException): node.query( f""" @@ -226,19 +235,27 @@ def test_simple_insert(started_cluster): ) response = node.query("SELECT COUNT(*) FROM test_simple_insert FORMAT Values") - assert (response == '(2)') + assert response == "(2)" - response = TSV.toMat(node.query("SELECT k, m, n FROM test_simple_insert WHERE k=1 FORMAT TSV")) - assert (len(response) == 1) - assert (response[0] == ['1', '2023-06-01 00:00:00', 'lili']) + response = TSV.toMat( + node.query("SELECT k, m, n FROM test_simple_insert WHERE k=1 FORMAT TSV") + ) + assert len(response) == 1 + assert response[0] == ["1", "2023-06-01 00:00:00", "lili"] - response = TSV.toMat(node.query("SELECT k, m, n FROM test_simple_insert WHERE m='2023-06-01 00:00:00' FORMAT TSV")) - assert (len(response) == 1) - assert (response[0] == ['1', '2023-06-01 00:00:00', 'lili']) + response = TSV.toMat( + node.query( + "SELECT k, m, n FROM test_simple_insert WHERE m='2023-06-01 00:00:00' FORMAT TSV" + ) + ) + assert len(response) == 1 + assert response[0] == ["1", "2023-06-01 00:00:00", "lili"] - response = TSV.toMat(node.query("SELECT k, m, n FROM test_simple_insert WHERE n='lili' FORMAT TSV")) - assert (len(response) == 1) - assert (response[0] == ['1', '2023-06-01 00:00:00', 'lili']) + response = TSV.toMat( + node.query("SELECT k, m, n FROM test_simple_insert WHERE n='lili' FORMAT TSV") + ) + assert len(response) == 1 + assert response[0] == ["1", "2023-06-01 00:00:00", "lili"] def test_update(started_cluster): @@ -246,7 +263,7 @@ def test_update(started_cluster): address = get_address_for_ch() # clean all client.flushall() - drop_table('test_update') + drop_table("test_update") node.query( f""" @@ -271,11 +288,13 @@ def test_update(started_cluster): """ ) - print("update response: ", response) + print("update response: ", response) - response = TSV.toMat(node.query("SELECT k, m, n FROM test_update WHERE k=1 FORMAT TSV")) - assert (len(response) == 1) - assert (response[0] == ['1', '2023-06-03 00:00:00', 'lili']) + response = TSV.toMat( + node.query("SELECT k, m, n FROM test_update WHERE k=1 FORMAT TSV") + ) + assert len(response) == 1 + assert response[0] == ["1", "2023-06-03 00:00:00", "lili"] # can not update key with pytest.raises(QueryRuntimeException): @@ -292,7 +311,7 @@ def test_delete(started_cluster): # clean all client.flushall() - drop_table('test_delete') + drop_table("test_delete") node.query( f""" @@ -317,11 +336,11 @@ def test_delete(started_cluster): """ ) - print("delete response: ", response) + print("delete response: ", response) response = TSV.toMat(node.query("SELECT k, m, n FROM test_delete FORMAT TSV")) - assert (len(response) == 1) - assert (response[0] == ['2', '2023-06-02 00:00:00', 'lucy']) + assert len(response) == 1 + assert response[0] == ["2", "2023-06-02 00:00:00", "lucy"] response = node.query( """ @@ -330,7 +349,7 @@ def test_delete(started_cluster): ) response = TSV.toMat(node.query("SELECT k, m, n FROM test_delete FORMAT TSV")) - assert (len(response) == 0) + assert len(response) == 0 def test_truncate(started_cluster): @@ -338,7 +357,7 @@ def test_truncate(started_cluster): address = get_address_for_ch() # clean all client.flushall() - drop_table('test_truncate') + drop_table("test_truncate") node.query( f""" @@ -363,9 +382,9 @@ def test_truncate(started_cluster): """ ) - print("truncate table response: ", response) + print("truncate table response: ", response) response = TSV.toMat(node.query("SELECT COUNT(*) FROM test_truncate FORMAT TSV")) - assert (len(response) == 1) - assert (response[0] == ['0']) + assert len(response) == 1 + assert esponse[0] == ["0"] diff --git a/tests/integration/test_table_function_redis/test.py b/tests/integration/test_table_function_redis/test.py index 111276ec6dc..772e6d28141 100644 --- a/tests/integration/test_table_function_redis/test.py +++ b/tests/integration/test_table_function_redis/test.py @@ -25,13 +25,13 @@ def started_cluster(): def get_redis_connection(db_id=0): client = redis.Redis( - host='localhost', port=cluster.redis_port, password="clickhouse", db=db_id + host="localhost", port=cluster.redis_port, password="clickhouse", db=db_id ) return client def get_address_for_ch(): - return cluster.redis_host + ':6379' + return cluster.redis_host + ":6379" # see SerializationString.serializeBinary @@ -47,21 +47,21 @@ def serialize_binary_for_string(x): byte = length & 0x7F if length > 0x7F: byte |= 0x80 - buf += (bytes([byte])) + buf += bytes([byte]) length >>= 7 if not length: break # write data - buf += x.encode('utf-8') + buf += x.encode("utf-8") return bytes(buf) # see SerializationNumber.serializeBinary def serialize_binary_for_uint32(x): buf = bytearray() - packed_num = struct.pack('I', x) + packed_num = struct.pack("I", x) buf += packed_num - if sys.byteorder != 'little': + if sys.byteorder != "little": buf.reverse() return bytes(buf) @@ -92,8 +92,8 @@ def test_simple_select(started_cluster): FORMAT TSV """)) - assert (len(response) == 1) - assert (response[0] == ['0', '0']) + assert len(response) == 1 + assert response[0] == ["0", "0"] response = TSV.toMat(node.query( f""" @@ -106,8 +106,8 @@ def test_simple_select(started_cluster): FORMAT TSV """)) - assert (len(response) == 100) - assert (response[0] == ['0', '0']) + assert len(response) == 100 + assert response[0] == ["0", "0"] def test_create_table(started_cluster): @@ -153,7 +153,7 @@ def test_data_type(started_cluster): # string client.flushall() - value = serialize_binary_for_string('0') + value = serialize_binary_for_string("0") client.set(value, value) response = TSV.toMat(node.query( @@ -167,8 +167,8 @@ def test_data_type(started_cluster): FORMAT TSV """)) - assert (len(response) == 1) - assert (response[0] == ['0', '0']) + assert len(response) == 1 + assert response[0] == ["0", "0"] # number client.flushall() @@ -186,8 +186,8 @@ def test_data_type(started_cluster): FORMAT TSV """)) - assert (len(response) == 1) - assert (response[0] == ['0', '0']) + assert len(response) == 1 + assert response[0] == ["0", "0"] # datetime client.flushall() @@ -208,5 +208,5 @@ def test_data_type(started_cluster): FORMAT TSV """)) - assert (len(response) == 1) - assert (response[0] == ['2023-06-01 00:00:00', '2023-06-01 00:00:00']) + assert len(response) == 1 + assert response[0] == ["2023-06-01 00:00:00", "2023-06-01 00:00:00"] From 7cc37ab4b877dca8dcb76e328e39c8a62995cd0b Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Wed, 31 May 2023 19:35:17 +0800 Subject: [PATCH 182/722] add redis table engine/function docs --- docs/en/engines/table-engines/index.md | 1 + .../table-engines/integrations/index.md | 1 + .../table-engines/integrations/redis.md | 111 ++++++++++++++++++ .../en/sql-reference/table-functions/redis.md | 67 +++++++++++ 4 files changed, 180 insertions(+) create mode 100644 docs/en/engines/table-engines/integrations/redis.md create mode 100644 docs/en/sql-reference/table-functions/redis.md diff --git a/docs/en/engines/table-engines/index.md b/docs/en/engines/table-engines/index.md index d7c582164de..bd704d0e87e 100644 --- a/docs/en/engines/table-engines/index.md +++ b/docs/en/engines/table-engines/index.md @@ -53,6 +53,7 @@ Engines in the family: - [JDBC](../../engines/table-engines/integrations/jdbc.md) - [MySQL](../../engines/table-engines/integrations/mysql.md) - [MongoDB](../../engines/table-engines/integrations/mongodb.md) +- [Redis](../../engines/table-engines/integrations/redis.md) - [HDFS](../../engines/table-engines/integrations/hdfs.md) - [S3](../../engines/table-engines/integrations/s3.md) - [Kafka](../../engines/table-engines/integrations/kafka.md) diff --git a/docs/en/engines/table-engines/integrations/index.md b/docs/en/engines/table-engines/integrations/index.md index b321a644d32..93691a8adad 100644 --- a/docs/en/engines/table-engines/integrations/index.md +++ b/docs/en/engines/table-engines/integrations/index.md @@ -14,6 +14,7 @@ List of supported integrations: - [JDBC](../../../engines/table-engines/integrations/jdbc.md) - [MySQL](../../../engines/table-engines/integrations/mysql.md) - [MongoDB](../../../engines/table-engines/integrations/mongodb.md) +- [Redis](../../../engines/table-engines/integrations/redis.md) - [HDFS](../../../engines/table-engines/integrations/hdfs.md) - [S3](../../../engines/table-engines/integrations/s3.md) - [Kafka](../../../engines/table-engines/integrations/kafka.md) diff --git a/docs/en/engines/table-engines/integrations/redis.md b/docs/en/engines/table-engines/integrations/redis.md new file mode 100644 index 00000000000..8e5a974c459 --- /dev/null +++ b/docs/en/engines/table-engines/integrations/redis.md @@ -0,0 +1,111 @@ +--- +slug: /en/sql-reference/table-functions/redis +sidebar_position: 43 +sidebar_label: Redis +--- + +# Redis + +This engine allows integrating ClickHouse with [Redis](https://redis.io/). + +## Creating a Table {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name +( + name1 [type1], + name2 [type2], + ... +) ENGINE = Redis(host:port[, db_index[, password[, pool_size]]]) PRIMARY KEY(primary_key_name); +``` + +**Engine Parameters** + +- `host:port` — Redis server address, you can ignore port and default Redis port 6379 will be used. + +- `db_index` — Redis db index range from 0 to 15, default is 0. + +- `password` — User password, default is blank string. + +- `pool_size` — Redis max connection pool size, default is 16. + +- `primary_key_name` - any column name in the column list. + +- `primary` must be specified, it supports only one column in the primary key. The primary key will be serialized in binary as a Redis key. + +- columns other than the primary key will be serialized in binary as Redis value in corresponding order. + +- queries with key equals or in filtering will be optimized to multi keys lookup from Redis. If queries without filtering key full table scan will happen which is a heavy operation. + +## Usage Example {#usage-example} + +Create a table in ClickHouse which allows to read data from Redis: + +``` sql +CREATE TABLE redis_table +( + `k` String, + `m` String, + `n` UInt32 +) +ENGINE = Redis('redis1:6379') PRIMARY KEY(k); +``` + +Insert: + +```sql +INSERT INTO redis_table Values('1', 1, '1', 1.0), ('2', 2, '2', 2.0); +``` + +Query: + +``` sql +SELECT COUNT(*) FROM redis_table; +``` + +``` text +┌─count()─┐ +│ 2 │ +└─────────┘ +``` + +``` sql +SELECT * FROM redis_table WHERE key='1'; +``` + +```text +┌─key─┬─v1─┬─v2─┬─v3─┐ +│ 1 │ 1 │ 1 │ 1 │ +└─────┴────┴────┴────┘ +``` + +``` sql +SELECT * FROM redis_table WHERE v1=2; +``` + +```text +┌─key─┬─v1─┬─v2─┬─v3─┐ +│ 2 │ 2 │ 2 │ 2 │ +└─────┴────┴────┴────┘ +``` + +Update: + +Note that the primary key cannot be updated. + +```sql +ALTER TABLE redis_table UPDATE v1=2 WHERE key='1'; +``` + +Delete: + +```sql +ALTER TABLE redis_table DELETE WHERE key='1'; +``` + +Truncate: + +Redis engine will flush db asynchronously. +```sql +TRUNCATE TABLE redis_table; +``` diff --git a/docs/en/sql-reference/table-functions/redis.md b/docs/en/sql-reference/table-functions/redis.md new file mode 100644 index 00000000000..5b32f118fb8 --- /dev/null +++ b/docs/en/sql-reference/table-functions/redis.md @@ -0,0 +1,67 @@ +--- +slug: /en/sql-reference/table-functions/redis +sidebar_position: 10 +sidebar_label: Redis +--- + +# Redis + +This table function allows integrating ClickHouse with [Redis](https://redis.io/). + +**Syntax** + +```sql +redis(host:port, key, structure[, db_index[, password[, pool_size]]]) +``` + +**Arguments** + +- `host:port` — Redis server address, you can ignore port and default Redis port 6379 will be used. + +- `key` — any column name in the column list. + +- `structure` — The schema for the ClickHouse table returned from this function. + +- `db_index` — Redis db index range from 0 to 15, default is 0. + +- `password` — User password, default is blank string. + +- `pool_size` — Redis max connection pool size, default is 16. + +- `primary` must be specified, it supports only one column in the primary key. The primary key will be serialized in binary as a Redis key. + +- columns other than the primary key will be serialized in binary as Redis value in corresponding order. + +- queries with key equals or in filtering will be optimized to multi keys lookup from Redis. If queries without filtering key full table scan will happen which is a heavy operation. + + +**Returned Value** + +A table object with key as Redis key, other columns packaged together as Redis value. + +## Usage Example {#usage-example} + +Create a table in ClickHouse which allows to read data from Redis: + +``` sql +CREATE TABLE redis_table +( + `k` String, + `m` String, + `n` UInt32 +) +ENGINE = Redis('redis1:6379') PRIMARY KEY(k); +``` + +```sql +SELECT * FROM redis( + 'redis1:6379', + 'key', + 'key String, v1 String, v2 UInt32' +) +``` + +**See Also** + +- [The `Redis` table engine](/docs/en/engines/table-engines/integrations/redis.md) +- [Using redis as a dictionary source](/docs/en/sql-reference/dictionaries/index.md#redis) From 4302ba44d46107a7e72b5f31fa948b52b0c9c40b Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Wed, 31 May 2023 20:32:34 +0800 Subject: [PATCH 183/722] fix code style --- tests/integration/test_storage_redis/test.py | 6 +- .../test_table_function_redis/test.py | 121 ++++++++++-------- 2 files changed, 71 insertions(+), 56 deletions(-) diff --git a/tests/integration/test_storage_redis/test.py b/tests/integration/test_storage_redis/test.py index 66d34ebc711..2fd97b9bebd 100644 --- a/tests/integration/test_storage_redis/test.py +++ b/tests/integration/test_storage_redis/test.py @@ -147,7 +147,6 @@ def test_select_int(started_cluster): assert response[0] == ["0", "0"] - def test_create_table(started_cluster): address = get_address_for_ch() @@ -215,7 +214,7 @@ def test_simple_insert(started_cluster): # clean all client.flushall() - drop_table('test_simple_insert') + drop_table("test_simple_insert") node.query( f""" @@ -386,5 +385,4 @@ def test_truncate(started_cluster): response = TSV.toMat(node.query("SELECT COUNT(*) FROM test_truncate FORMAT TSV")) assert len(response) == 1 - assert esponse[0] == ["0"] - + assert response[0] == ["0"] diff --git a/tests/integration/test_table_function_redis/test.py b/tests/integration/test_table_function_redis/test.py index 772e6d28141..8e9dd66d9e5 100644 --- a/tests/integration/test_table_function_redis/test.py +++ b/tests/integration/test_table_function_redis/test.py @@ -81,30 +81,36 @@ def test_simple_select(started_cluster): client.mset(data) client.close() - response = TSV.toMat(node.query( - f""" - SELECT - key, value - FROM - redis('{address}', 'key', 'key String, value String', 0, 'clickhouse', 10) - WHERE - key='0' - FORMAT TSV - """)) + response = TSV.toMat( + node.query( + f""" + SELECT + key, value + FROM + redis('{address}', 'key', 'key String, value String', 0, 'clickhouse', 10) + WHERE + key='0' + FORMAT TSV + """ + ) + ) assert len(response) == 1 assert response[0] == ["0", "0"] - response = TSV.toMat(node.query( - f""" - SELECT - * - FROM - redis('{address}', 'key', 'key String, value String', 0, 'clickhouse', 10) - ORDER BY - key - FORMAT TSV - """)) + response = TSV.toMat( + node.query( + f""" + SELECT + * + FROM + redis('{address}', 'key', 'key String, value String', 0, 'clickhouse', 10) + ORDER BY + key + FORMAT TSV + """ + ) + ) assert len(response) == 100 assert response[0] == ["0", "0"] @@ -124,7 +130,8 @@ def test_create_table(started_cluster): * FROM redis('{address}', 'k', 'k String, v UInt32', 0, 'clickhouse', 10) - """) + """ + ) # illegal data type with pytest.raises(QueryRuntimeException): @@ -134,7 +141,8 @@ def test_create_table(started_cluster): * FROM redis('{address}', 'k', 'k not_exist_type, v String', 0, 'clickhouse', 10) - """) + """ + ) # illegal key with pytest.raises(QueryRuntimeException): @@ -156,16 +164,19 @@ def test_data_type(started_cluster): value = serialize_binary_for_string("0") client.set(value, value) - response = TSV.toMat(node.query( - f""" - SELECT - * - FROM - redis('{address}', 'k', 'k String, v String', 0, 'clickhouse', 10) - WHERE - k='0' - FORMAT TSV - """)) + response = TSV.toMat( + node.query( + f""" + SELECT + * + FROM + redis('{address}', 'k', 'k String, v String', 0, 'clickhouse', 10) + WHERE + k='0' + FORMAT TSV + """ + ) + ) assert len(response) == 1 assert response[0] == ["0", "0"] @@ -175,16 +186,19 @@ def test_data_type(started_cluster): value = serialize_binary_for_uint32(0) client.set(value, value) - response = TSV.toMat(node.query( - f""" - SELECT - * - FROM - redis('{address}', 'k', 'k UInt32, v UInt32', 0, 'clickhouse', 10) - WHERE - k=0 - FORMAT TSV - """)) + response = TSV.toMat( + node.query( + f""" + SELECT + * + FROM + redis('{address}', 'k', 'k UInt32, v UInt32', 0, 'clickhouse', 10) + WHERE + k=0 + FORMAT TSV + """ + ) + ) assert len(response) == 1 assert response[0] == ["0", "0"] @@ -197,16 +211,19 @@ def test_data_type(started_cluster): value = serialize_binary_for_uint32(int(seconds_since_epoch)) client.set(value, value) - response = TSV.toMat(node.query( - f""" - SELECT - * - FROM - redis('{address}', 'k', 'k DateTime, v DateTime', 0, 'clickhouse', 10) - WHERE - k='2023-06-01 00:00:00' - FORMAT TSV - """)) + response = TSV.toMat( + node.query( + f""" + SELECT + * + FROM + redis('{address}', 'k', 'k DateTime, v DateTime', 0, 'clickhouse', 10) + WHERE + k='2023-06-01 00:00:00' + FORMAT TSV + """ + ) + ) assert len(response) == 1 assert response[0] == ["2023-06-01 00:00:00", "2023-06-01 00:00:00"] From 119c2200a95563e6814042618c4f3365bf7386fb Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Wed, 31 May 2023 22:23:09 +0800 Subject: [PATCH 184/722] fix merged error --- src/Storages/StorageRedis.cpp | 16 ++++++++++------ .../test_table_function_redis/test.py | 3 ++- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/Storages/StorageRedis.cpp b/src/Storages/StorageRedis.cpp index b17528c7eae..973a77a5f98 100644 --- a/src/Storages/StorageRedis.cpp +++ b/src/Storages/StorageRedis.cpp @@ -520,14 +520,16 @@ void StorageRedis::mutate(const MutationCommands & commands, ContextPtr context_ if (commands.front().type == MutationCommand::Type::DELETE) { + MutationsInterpreter::Settings settings(true); + settings.return_all_columns = true; + settings.return_mutated_rows = true; + auto interpreter = std::make_unique( storage_ptr, metadata_snapshot, commands, context_, - /*can_execute_*/ true, - /*return_all_columns_*/ true, - /*return_mutated_rows*/ true); + settings); auto pipeline = QueryPipelineBuilder::getPipeline(interpreter->execute()); PullingPipelineExecutor executor(pipeline); @@ -561,14 +563,16 @@ void StorageRedis::mutate(const MutationCommands & commands, ContextPtr context_ if (commands.front().column_to_update_expression.contains(primary_key)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Primary key cannot be updated (cannot update column {})", primary_key); + MutationsInterpreter::Settings settings(true); + settings.return_all_columns = true; + settings.return_mutated_rows = true; + auto interpreter = std::make_unique( storage_ptr, metadata_snapshot, commands, context_, - /*can_execute_*/ true, - /*return_all_columns*/ true, - /*return_mutated_rows*/ true); + settings); auto pipeline = QueryPipelineBuilder::getPipeline(interpreter->execute()); PullingPipelineExecutor executor(pipeline); diff --git a/tests/integration/test_table_function_redis/test.py b/tests/integration/test_table_function_redis/test.py index 8e9dd66d9e5..f4bcebe2f90 100644 --- a/tests/integration/test_table_function_redis/test.py +++ b/tests/integration/test_table_function_redis/test.py @@ -152,7 +152,8 @@ def test_create_table(started_cluster): * FROM redis('{address}', 'not_exist_key', 'k not_exist_type, v String', 0, 'clickhouse', 10) - """) + """ + ) def test_data_type(started_cluster): From e6d1b3c35141d7f20e17fd1b823df4c86b7f6c29 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Wed, 31 May 2023 23:55:41 +0800 Subject: [PATCH 185/722] little fix --- src/Dictionaries/RedisDictionarySource.cpp | 2 +- src/Dictionaries/RedisSource.cpp | 2 +- src/Storages/StorageRedis.cpp | 113 ++++++++------------- 3 files changed, 45 insertions(+), 72 deletions(-) diff --git a/src/Dictionaries/RedisDictionarySource.cpp b/src/Dictionaries/RedisDictionarySource.cpp index c52c3425d1b..1736cdff306 100644 --- a/src/Dictionaries/RedisDictionarySource.cpp +++ b/src/Dictionaries/RedisDictionarySource.cpp @@ -41,7 +41,7 @@ namespace DB .host = host, .port = static_cast(port), .db_index = config.getUInt(redis_config_prefix + ".db_index", DEFAULT_REDIS_DB_INDEX), - .password = config.getString(redis_config_prefix + ".password", ""), + .password = config.getString(redis_config_prefix + ".password", DEFAULT_REDIS_PASSWORD), .storage_type = parseStorageType(config.getString(redis_config_prefix + ".storage_type", "")), .pool_size = config.getUInt(redis_config_prefix + ".pool_size", DEFAULT_REDIS_POOL_SIZE), }; diff --git a/src/Dictionaries/RedisSource.cpp b/src/Dictionaries/RedisSource.cpp index 5d8a475cad4..719c0278707 100644 --- a/src/Dictionaries/RedisSource.cpp +++ b/src/Dictionaries/RedisSource.cpp @@ -21,7 +21,7 @@ namespace DB } - RedisSource::RedisSource( + RedisSource::RedisSource( RedisConnectionPtr connection_, const RedisArray & keys_, const RedisStorageType & storage_type_, diff --git a/src/Storages/StorageRedis.cpp b/src/Storages/StorageRedis.cpp index 973a77a5f98..64e5c1ad5f7 100644 --- a/src/Storages/StorageRedis.cpp +++ b/src/Storages/StorageRedis.cpp @@ -1,24 +1,25 @@ -#include -#include -#include -#include - #include +#include +#include +#include +#include #include +#include #include #include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include namespace DB { @@ -51,11 +52,7 @@ public: { } - RedisDataSource( - StorageRedis & storage_, - const Block & header, - const size_t max_block_size_, - const String & pattern_ = "*") + RedisDataSource(StorageRedis & storage_, const Block & header, const size_t max_block_size_, const String & pattern_ = "*") : ISource(header) , storage(storage_) , primary_key_pos(getPrimaryKeyPos(header, storage.getPrimaryKey())) @@ -85,6 +82,7 @@ public: const auto & key_column_type = sample_block.getByName(storage.getPrimaryKey().at(0)).type; auto raw_keys = serializeKeysToRawString(it, end, key_column_type, max_block_size); + return storage.getBySerializedKeys(raw_keys, nullptr); } @@ -109,11 +107,11 @@ public: MutableColumns columns = sample_block.cloneEmptyColumns(); RedisArray values = storage.multiGet(scan_keys); - for (size_t i = 0; i(i).isNull(); i++) + for (size_t i = 0; i < scan_keys.size() && !values.get(i).isNull(); i++) { fillColumns(scan_keys.get(i).value(), - values.get(i).value(), - primary_key_pos, sample_block, columns + values.get(i).value(), + primary_key_pos, sample_block, columns ); } @@ -143,9 +141,7 @@ private: class RedisSink : public SinkToStorage { public: - RedisSink( - StorageRedis & storage_, - const StorageMetadataPtr & metadata_snapshot_); + RedisSink(StorageRedis & storage_, const StorageMetadataPtr & metadata_snapshot_); void consume(Chunk chunk) override; String getName() const override { return "RedisSink"; } @@ -156,9 +152,7 @@ private: size_t primary_key_pos = 0; }; -RedisSink::RedisSink( - StorageRedis & storage_, - const StorageMetadataPtr & metadata_snapshot_) +RedisSink::RedisSink(StorageRedis & storage_, const StorageMetadataPtr & metadata_snapshot_) : SinkToStorage(metadata_snapshot_->getSampleBlock()) , storage(storage_) , metadata_snapshot(metadata_snapshot_) @@ -194,6 +188,7 @@ void RedisSink::consume(Chunk chunk) data.add(wb_key.str()); data.add(wb_value.str()); } + storage.multiSet(data); } @@ -258,8 +253,8 @@ Pipe StorageRedis::read( size_t begin = num_keys * thread_idx / num_threads; size_t end = num_keys * (thread_idx + 1) / num_threads; - pipes.emplace_back(std::make_shared( - *this, header, keys, keys->begin() + begin, keys->begin() + end, max_block_size)); + pipes.emplace_back( + std::make_shared(*this, header, keys, keys->begin() + begin, keys->begin() + end, max_block_size)); } return Pipe::unitePipes(std::move(pipes)); } @@ -272,7 +267,7 @@ namespace { RedisConfiguration configuration; - if (engine_args.size() < 1) + if (engine_args.empty()) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad arguments count when creating Redis table engine"); if (auto named_collection = tryGetNamedCollectionWithOverrides(engine_args, context)) @@ -336,18 +331,11 @@ namespace throw Exception(ErrorCodes::BAD_ARGUMENTS, "StorageRedis must require one column in primary key"); } - return std::make_shared( - args.table_id, - configuration, - args.getContext(), - metadata, - primary_key_names[0]); + return std::make_shared(args.table_id, configuration, args.getContext(), metadata, primary_key_names[0]); } } -Chunk StorageRedis::getBySerializedKeys( - const std::vector & keys, - PaddedPODArray * null_map) const +Chunk StorageRedis::getBySerializedKeys(const std::vector & keys, PaddedPODArray * null_map) const { RedisArray redis_keys; for (const auto & key : keys) @@ -355,9 +343,7 @@ Chunk StorageRedis::getBySerializedKeys( return getBySerializedKeys(redis_keys, null_map); } -Chunk StorageRedis::getBySerializedKeys( - const RedisArray & keys, - PaddedPODArray * null_map) const +Chunk StorageRedis::getBySerializedKeys(const RedisArray & keys, PaddedPODArray * null_map) const { Block sample_block = getInMemoryMetadataPtr()->getSampleBlock(); @@ -379,8 +365,8 @@ Chunk StorageRedis::getBySerializedKeys( if (!values.get(i).isNull()) { fillColumns(keys.get(i).value(), - values.get(i).value(), - primary_key_pos, sample_block, columns + values.get(i).value(), + primary_key_pos, sample_block, columns ); } else /// key not found @@ -433,8 +419,7 @@ void StorageRedis::multiSet(const RedisArray & data) const auto ret = connection->client->execute(cmd_mget); if (ret != "OK") - throw Exception(ErrorCodes::INTERNAL_REDIS_ERROR, "Fail to write to redis table {}, for {}", - table_id.getFullNameNotQuoted(), ret); + throw Exception(ErrorCodes::INTERNAL_REDIS_ERROR, "Fail to write to redis table {}, for {}", table_id.getFullNameNotQuoted(), ret); } RedisInteger StorageRedis::multiDelete(const RedisArray & keys) const @@ -447,16 +432,17 @@ RedisInteger StorageRedis::multiDelete(const RedisArray & keys) const auto ret = connection->client->execute(cmd); if (ret != static_cast(keys.size())) - LOG_DEBUG(log, "Try to delete {} rows but actually deleted {} rows from redis table {}.", - keys.size(), ret, table_id.getFullNameNotQuoted()); + LOG_DEBUG( + log, + "Try to delete {} rows but actually deleted {} rows from redis table {}.", + keys.size(), + ret, + table_id.getFullNameNotQuoted()); return ret; } -Chunk StorageRedis::getByKeys( - const ColumnsWithTypeAndName & keys, - PaddedPODArray & null_map, - const Names &) const +Chunk StorageRedis::getByKeys(const ColumnsWithTypeAndName & keys, PaddedPODArray & null_map, const Names &) const { if (keys.size() != 1) throw Exception(ErrorCodes::LOGICAL_ERROR, "StorageRedis supports only one key, got: {}", keys.size()); @@ -474,10 +460,7 @@ Block StorageRedis::getSampleBlock(const Names &) const return getInMemoryMetadataPtr()->getSampleBlock(); } -SinkToStoragePtr StorageRedis::write( - const ASTPtr & /*query*/, - const StorageMetadataPtr & metadata_snapshot, - ContextPtr /*context*/) +SinkToStoragePtr StorageRedis::write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, ContextPtr /*context*/) { return std::make_shared(*this, metadata_snapshot); } @@ -524,12 +507,7 @@ void StorageRedis::mutate(const MutationCommands & commands, ContextPtr context_ settings.return_all_columns = true; settings.return_mutated_rows = true; - auto interpreter = std::make_unique( - storage_ptr, - metadata_snapshot, - commands, - context_, - settings); + auto interpreter = std::make_unique(storage_ptr, metadata_snapshot, commands, context_, settings); auto pipeline = QueryPipelineBuilder::getPipeline(interpreter->execute()); PullingPipelineExecutor executor(pipeline); @@ -567,12 +545,7 @@ void StorageRedis::mutate(const MutationCommands & commands, ContextPtr context_ settings.return_all_columns = true; settings.return_mutated_rows = true; - auto interpreter = std::make_unique( - storage_ptr, - metadata_snapshot, - commands, - context_, - settings); + auto interpreter = std::make_unique(storage_ptr, metadata_snapshot, commands, context_, settings); auto pipeline = QueryPipelineBuilder::getPipeline(interpreter->execute()); PullingPipelineExecutor executor(pipeline); From dc6102392785f6368c13ea59a5c0f5273425567c Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Thu, 1 Jun 2023 14:36:03 +0800 Subject: [PATCH 186/722] add sync mode to redis storage truncate --- docs/en/engines/table-engines/integrations/redis.md | 5 +++-- src/Storages/StorageRedis.cpp | 10 ++++++++-- .../02117_show_create_table_system.reference | 6 +++--- ...ll_new_table_functions_must_be_documented.reference | 2 +- 4 files changed, 15 insertions(+), 8 deletions(-) diff --git a/docs/en/engines/table-engines/integrations/redis.md b/docs/en/engines/table-engines/integrations/redis.md index 8e5a974c459..6cfc60c836c 100644 --- a/docs/en/engines/table-engines/integrations/redis.md +++ b/docs/en/engines/table-engines/integrations/redis.md @@ -105,7 +105,8 @@ ALTER TABLE redis_table DELETE WHERE key='1'; Truncate: -Redis engine will flush db asynchronously. +Flush Redis db asynchronously. Also `Truncate` support SYNC mode. + ```sql -TRUNCATE TABLE redis_table; +TRUNCATE TABLE redis_table SYNC; ``` diff --git a/src/Storages/StorageRedis.cpp b/src/Storages/StorageRedis.cpp index 64e5c1ad5f7..97f1dbce6da 100644 --- a/src/Storages/StorageRedis.cpp +++ b/src/Storages/StorageRedis.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -465,12 +466,17 @@ SinkToStoragePtr StorageRedis::write(const ASTPtr & /*query*/, const StorageMeta return std::make_shared(*this, metadata_snapshot); } -void StorageRedis::truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPtr, TableExclusiveLockHolder &) +void StorageRedis::truncate(const ASTPtr & query, const StorageMetadataPtr &, ContextPtr, TableExclusiveLockHolder &) { auto connection = getRedisConnection(pool, configuration); + auto * truncate_query = query->as(); + assert(truncate_query != nullptr); + RedisCommand cmd_flush_db("FLUSHDB"); - cmd_flush_db.add("ASYNC"); + if (!truncate_query->sync) + cmd_flush_db.add("ASYNC"); + auto ret = connection->client->execute(cmd_flush_db); if (ret != "OK") diff --git a/tests/queries/0_stateless/02117_show_create_table_system.reference b/tests/queries/0_stateless/02117_show_create_table_system.reference index 724118f7bc1..10149bfc7bf 100644 --- a/tests/queries/0_stateless/02117_show_create_table_system.reference +++ b/tests/queries/0_stateless/02117_show_create_table_system.reference @@ -297,7 +297,7 @@ CREATE TABLE system.grants ( `user_name` Nullable(String), `role_name` Nullable(String), - `access_type` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION CONTROL' = 96, 'SYSTEM SHUTDOWN' = 97, 'SYSTEM DROP DNS CACHE' = 98, 'SYSTEM DROP MARK CACHE' = 99, 'SYSTEM DROP UNCOMPRESSED CACHE' = 100, 'SYSTEM DROP MMAP CACHE' = 101, 'SYSTEM DROP QUERY CACHE' = 102, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 103, 'SYSTEM DROP FILESYSTEM CACHE' = 104, 'SYSTEM DROP SCHEMA CACHE' = 105, 'SYSTEM DROP S3 CLIENT CACHE' = 106, 'SYSTEM DROP CACHE' = 107, 'SYSTEM RELOAD CONFIG' = 108, 'SYSTEM RELOAD USERS' = 109, 'SYSTEM RELOAD SYMBOLS' = 110, 'SYSTEM RELOAD DICTIONARY' = 111, 'SYSTEM RELOAD MODEL' = 112, 'SYSTEM RELOAD FUNCTION' = 113, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 114, 'SYSTEM RELOAD' = 115, 'SYSTEM RESTART DISK' = 116, 'SYSTEM MERGES' = 117, 'SYSTEM TTL MERGES' = 118, 'SYSTEM FETCHES' = 119, 'SYSTEM MOVES' = 120, 'SYSTEM DISTRIBUTED SENDS' = 121, 'SYSTEM REPLICATED SENDS' = 122, 'SYSTEM SENDS' = 123, 'SYSTEM REPLICATION QUEUES' = 124, 'SYSTEM DROP REPLICA' = 125, 'SYSTEM SYNC REPLICA' = 126, 'SYSTEM RESTART REPLICA' = 127, 'SYSTEM RESTORE REPLICA' = 128, 'SYSTEM WAIT LOADING PARTS' = 129, 'SYSTEM SYNC DATABASE REPLICA' = 130, 'SYSTEM SYNC TRANSACTION LOG' = 131, 'SYSTEM SYNC FILE CACHE' = 132, 'SYSTEM FLUSH DISTRIBUTED' = 133, 'SYSTEM FLUSH LOGS' = 134, 'SYSTEM FLUSH' = 135, 'SYSTEM THREAD FUZZER' = 136, 'SYSTEM UNFREEZE' = 137, 'SYSTEM FAILPOINT' = 138, 'SYSTEM' = 139, 'dictGet' = 140, 'displaySecretsInShowAndSelect' = 141, 'addressToLine' = 142, 'addressToLineWithInlines' = 143, 'addressToSymbol' = 144, 'demangle' = 145, 'INTROSPECTION' = 146, 'FILE' = 147, 'URL' = 148, 'REMOTE' = 149, 'MONGO' = 150, 'Redis' = 151, 'MEILISEARCH' = 152, 'MYSQL' = 153, 'POSTGRES' = 154, 'SQLITE' = 155, 'ODBC' = 156, 'JDBC' = 157, 'HDFS' = 158, 'S3' = 159, 'HIVE' = 160, 'SOURCES' = 161, 'CLUSTER' = 162, 'ALL' = 163, 'NONE' = 164), + `access_type` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION CONTROL' = 96, 'SYSTEM SHUTDOWN' = 97, 'SYSTEM DROP DNS CACHE' = 98, 'SYSTEM DROP MARK CACHE' = 99, 'SYSTEM DROP UNCOMPRESSED CACHE' = 100, 'SYSTEM DROP MMAP CACHE' = 101, 'SYSTEM DROP QUERY CACHE' = 102, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 103, 'SYSTEM DROP FILESYSTEM CACHE' = 104, 'SYSTEM DROP SCHEMA CACHE' = 105, 'SYSTEM DROP S3 CLIENT CACHE' = 106, 'SYSTEM DROP CACHE' = 107, 'SYSTEM RELOAD CONFIG' = 108, 'SYSTEM RELOAD USERS' = 109, 'SYSTEM RELOAD SYMBOLS' = 110, 'SYSTEM RELOAD DICTIONARY' = 111, 'SYSTEM RELOAD MODEL' = 112, 'SYSTEM RELOAD FUNCTION' = 113, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 114, 'SYSTEM RELOAD' = 115, 'SYSTEM RESTART DISK' = 116, 'SYSTEM MERGES' = 117, 'SYSTEM TTL MERGES' = 118, 'SYSTEM FETCHES' = 119, 'SYSTEM MOVES' = 120, 'SYSTEM DISTRIBUTED SENDS' = 121, 'SYSTEM REPLICATED SENDS' = 122, 'SYSTEM SENDS' = 123, 'SYSTEM REPLICATION QUEUES' = 124, 'SYSTEM DROP REPLICA' = 125, 'SYSTEM SYNC REPLICA' = 126, 'SYSTEM RESTART REPLICA' = 127, 'SYSTEM RESTORE REPLICA' = 128, 'SYSTEM WAIT LOADING PARTS' = 129, 'SYSTEM SYNC DATABASE REPLICA' = 130, 'SYSTEM SYNC TRANSACTION LOG' = 131, 'SYSTEM SYNC FILE CACHE' = 132, 'SYSTEM FLUSH DISTRIBUTED' = 133, 'SYSTEM FLUSH LOGS' = 134, 'SYSTEM FLUSH' = 135, 'SYSTEM THREAD FUZZER' = 136, 'SYSTEM UNFREEZE' = 137, 'SYSTEM FAILPOINT' = 138, 'SYSTEM' = 139, 'dictGet' = 140, 'displaySecretsInShowAndSelect' = 141, 'addressToLine' = 142, 'addressToLineWithInlines' = 143, 'addressToSymbol' = 144, 'demangle' = 145, 'INTROSPECTION' = 146, 'FILE' = 147, 'URL' = 148, 'REMOTE' = 149, 'MONGO' = 150, 'REDIS' = 151, 'MEILISEARCH' = 152, 'MYSQL' = 153, 'POSTGRES' = 154, 'SQLITE' = 155, 'ODBC' = 156, 'JDBC' = 157, 'HDFS' = 158, 'S3' = 159, 'HIVE' = 160, 'SOURCES' = 161, 'CLUSTER' = 162, 'ALL' = 163, 'NONE' = 164), `database` Nullable(String), `table` Nullable(String), `column` Nullable(String), @@ -581,10 +581,10 @@ ENGINE = SystemPartsColumns COMMENT 'SYSTEM TABLE is built on the fly.' CREATE TABLE system.privileges ( - `privilege` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION CONTROL' = 96, 'SYSTEM SHUTDOWN' = 97, 'SYSTEM DROP DNS CACHE' = 98, 'SYSTEM DROP MARK CACHE' = 99, 'SYSTEM DROP UNCOMPRESSED CACHE' = 100, 'SYSTEM DROP MMAP CACHE' = 101, 'SYSTEM DROP QUERY CACHE' = 102, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 103, 'SYSTEM DROP FILESYSTEM CACHE' = 104, 'SYSTEM DROP SCHEMA CACHE' = 105, 'SYSTEM DROP S3 CLIENT CACHE' = 106, 'SYSTEM DROP CACHE' = 107, 'SYSTEM RELOAD CONFIG' = 108, 'SYSTEM RELOAD USERS' = 109, 'SYSTEM RELOAD SYMBOLS' = 110, 'SYSTEM RELOAD DICTIONARY' = 111, 'SYSTEM RELOAD MODEL' = 112, 'SYSTEM RELOAD FUNCTION' = 113, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 114, 'SYSTEM RELOAD' = 115, 'SYSTEM RESTART DISK' = 116, 'SYSTEM MERGES' = 117, 'SYSTEM TTL MERGES' = 118, 'SYSTEM FETCHES' = 119, 'SYSTEM MOVES' = 120, 'SYSTEM DISTRIBUTED SENDS' = 121, 'SYSTEM REPLICATED SENDS' = 122, 'SYSTEM SENDS' = 123, 'SYSTEM REPLICATION QUEUES' = 124, 'SYSTEM DROP REPLICA' = 125, 'SYSTEM SYNC REPLICA' = 126, 'SYSTEM RESTART REPLICA' = 127, 'SYSTEM RESTORE REPLICA' = 128, 'SYSTEM WAIT LOADING PARTS' = 129, 'SYSTEM SYNC DATABASE REPLICA' = 130, 'SYSTEM SYNC TRANSACTION LOG' = 131, 'SYSTEM SYNC FILE CACHE' = 132, 'SYSTEM FLUSH DISTRIBUTED' = 133, 'SYSTEM FLUSH LOGS' = 134, 'SYSTEM FLUSH' = 135, 'SYSTEM THREAD FUZZER' = 136, 'SYSTEM UNFREEZE' = 137, 'SYSTEM FAILPOINT' = 138, 'SYSTEM' = 139, 'dictGet' = 140, 'displaySecretsInShowAndSelect' = 141, 'addressToLine' = 142, 'addressToLineWithInlines' = 143, 'addressToSymbol' = 144, 'demangle' = 145, 'INTROSPECTION' = 146, 'FILE' = 147, 'URL' = 148, 'REMOTE' = 149, 'MONGO' = 150, 'MEILISEARCH' = 151, 'MYSQL' = 152, 'POSTGRES' = 153, 'SQLITE' = 154, 'ODBC' = 155, 'JDBC' = 156, 'HDFS' = 157, 'S3' = 158, 'HIVE' = 159, 'SOURCES' = 160, 'CLUSTER' = 161, 'ALL' = 162, 'NONE' = 163), + `privilege` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION CONTROL' = 96, 'SYSTEM SHUTDOWN' = 97, 'SYSTEM DROP DNS CACHE' = 98, 'SYSTEM DROP MARK CACHE' = 99, 'SYSTEM DROP UNCOMPRESSED CACHE' = 100, 'SYSTEM DROP MMAP CACHE' = 101, 'SYSTEM DROP QUERY CACHE' = 102, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 103, 'SYSTEM DROP FILESYSTEM CACHE' = 104, 'SYSTEM DROP SCHEMA CACHE' = 105, 'SYSTEM DROP S3 CLIENT CACHE' = 106, 'SYSTEM DROP CACHE' = 107, 'SYSTEM RELOAD CONFIG' = 108, 'SYSTEM RELOAD USERS' = 109, 'SYSTEM RELOAD SYMBOLS' = 110, 'SYSTEM RELOAD DICTIONARY' = 111, 'SYSTEM RELOAD MODEL' = 112, 'SYSTEM RELOAD FUNCTION' = 113, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 114, 'SYSTEM RELOAD' = 115, 'SYSTEM RESTART DISK' = 116, 'SYSTEM MERGES' = 117, 'SYSTEM TTL MERGES' = 118, 'SYSTEM FETCHES' = 119, 'SYSTEM MOVES' = 120, 'SYSTEM DISTRIBUTED SENDS' = 121, 'SYSTEM REPLICATED SENDS' = 122, 'SYSTEM SENDS' = 123, 'SYSTEM REPLICATION QUEUES' = 124, 'SYSTEM DROP REPLICA' = 125, 'SYSTEM SYNC REPLICA' = 126, 'SYSTEM RESTART REPLICA' = 127, 'SYSTEM RESTORE REPLICA' = 128, 'SYSTEM WAIT LOADING PARTS' = 129, 'SYSTEM SYNC DATABASE REPLICA' = 130, 'SYSTEM SYNC TRANSACTION LOG' = 131, 'SYSTEM SYNC FILE CACHE' = 132, 'SYSTEM FLUSH DISTRIBUTED' = 133, 'SYSTEM FLUSH LOGS' = 134, 'SYSTEM FLUSH' = 135, 'SYSTEM THREAD FUZZER' = 136, 'SYSTEM UNFREEZE' = 137, 'SYSTEM FAILPOINT' = 138, 'SYSTEM' = 139, 'dictGet' = 140, 'displaySecretsInShowAndSelect' = 141, 'addressToLine' = 142, 'addressToLineWithInlines' = 143, 'addressToSymbol' = 144, 'demangle' = 145, 'INTROSPECTION' = 146, 'FILE' = 147, 'URL' = 148, 'REMOTE' = 149, 'MONGO' = 150, 'REDIS' = 151, 'MEILISEARCH' = 152, 'MYSQL' = 153, 'POSTGRES' = 154, 'SQLITE' = 155, 'ODBC' = 156, 'JDBC' = 157, 'HDFS' = 158, 'S3' = 159, 'HIVE' = 160, 'SOURCES' = 161, 'CLUSTER' = 162, 'ALL' = 163, 'NONE' = 164), `aliases` Array(String), `level` Nullable(Enum8('GLOBAL' = 0, 'DATABASE' = 1, 'TABLE' = 2, 'DICTIONARY' = 3, 'VIEW' = 4, 'COLUMN' = 5, 'NAMED_COLLECTION' = 6)), - `parent_group` Nullable(Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION CONTROL' = 96, 'SYSTEM SHUTDOWN' = 97, 'SYSTEM DROP DNS CACHE' = 98, 'SYSTEM DROP MARK CACHE' = 99, 'SYSTEM DROP UNCOMPRESSED CACHE' = 100, 'SYSTEM DROP MMAP CACHE' = 101, 'SYSTEM DROP QUERY CACHE' = 102, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 103, 'SYSTEM DROP FILESYSTEM CACHE' = 104, 'SYSTEM DROP SCHEMA CACHE' = 105, 'SYSTEM DROP S3 CLIENT CACHE' = 106, 'SYSTEM DROP CACHE' = 107, 'SYSTEM RELOAD CONFIG' = 108, 'SYSTEM RELOAD USERS' = 109, 'SYSTEM RELOAD SYMBOLS' = 110, 'SYSTEM RELOAD DICTIONARY' = 111, 'SYSTEM RELOAD MODEL' = 112, 'SYSTEM RELOAD FUNCTION' = 113, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 114, 'SYSTEM RELOAD' = 115, 'SYSTEM RESTART DISK' = 116, 'SYSTEM MERGES' = 117, 'SYSTEM TTL MERGES' = 118, 'SYSTEM FETCHES' = 119, 'SYSTEM MOVES' = 120, 'SYSTEM DISTRIBUTED SENDS' = 121, 'SYSTEM REPLICATED SENDS' = 122, 'SYSTEM SENDS' = 123, 'SYSTEM REPLICATION QUEUES' = 124, 'SYSTEM DROP REPLICA' = 125, 'SYSTEM SYNC REPLICA' = 126, 'SYSTEM RESTART REPLICA' = 127, 'SYSTEM RESTORE REPLICA' = 128, 'SYSTEM WAIT LOADING PARTS' = 129, 'SYSTEM SYNC DATABASE REPLICA' = 130, 'SYSTEM SYNC TRANSACTION LOG' = 131, 'SYSTEM SYNC FILE CACHE' = 132, 'SYSTEM FLUSH DISTRIBUTED' = 133, 'SYSTEM FLUSH LOGS' = 134, 'SYSTEM FLUSH' = 135, 'SYSTEM THREAD FUZZER' = 136, 'SYSTEM UNFREEZE' = 137, 'SYSTEM FAILPOINT' = 138, 'SYSTEM' = 139, 'dictGet' = 140, 'displaySecretsInShowAndSelect' = 141, 'addressToLine' = 142, 'addressToLineWithInlines' = 143, 'addressToSymbol' = 144, 'demangle' = 145, 'INTROSPECTION' = 146, 'FILE' = 147, 'URL' = 148, 'REMOTE' = 149, 'MONGO' = 150, 'MEILISEARCH' = 151, 'MYSQL' = 152, 'POSTGRES' = 153, 'SQLITE' = 154, 'ODBC' = 155, 'JDBC' = 156, 'HDFS' = 157, 'S3' = 158, 'HIVE' = 159, 'SOURCES' = 160, 'CLUSTER' = 161, 'ALL' = 162, 'NONE' = 163)) + `parent_group` Nullable(Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION CONTROL' = 96, 'SYSTEM SHUTDOWN' = 97, 'SYSTEM DROP DNS CACHE' = 98, 'SYSTEM DROP MARK CACHE' = 99, 'SYSTEM DROP UNCOMPRESSED CACHE' = 100, 'SYSTEM DROP MMAP CACHE' = 101, 'SYSTEM DROP QUERY CACHE' = 102, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 103, 'SYSTEM DROP FILESYSTEM CACHE' = 104, 'SYSTEM DROP SCHEMA CACHE' = 105, 'SYSTEM DROP S3 CLIENT CACHE' = 106, 'SYSTEM DROP CACHE' = 107, 'SYSTEM RELOAD CONFIG' = 108, 'SYSTEM RELOAD USERS' = 109, 'SYSTEM RELOAD SYMBOLS' = 110, 'SYSTEM RELOAD DICTIONARY' = 111, 'SYSTEM RELOAD MODEL' = 112, 'SYSTEM RELOAD FUNCTION' = 113, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 114, 'SYSTEM RELOAD' = 115, 'SYSTEM RESTART DISK' = 116, 'SYSTEM MERGES' = 117, 'SYSTEM TTL MERGES' = 118, 'SYSTEM FETCHES' = 119, 'SYSTEM MOVES' = 120, 'SYSTEM DISTRIBUTED SENDS' = 121, 'SYSTEM REPLICATED SENDS' = 122, 'SYSTEM SENDS' = 123, 'SYSTEM REPLICATION QUEUES' = 124, 'SYSTEM DROP REPLICA' = 125, 'SYSTEM SYNC REPLICA' = 126, 'SYSTEM RESTART REPLICA' = 127, 'SYSTEM RESTORE REPLICA' = 128, 'SYSTEM WAIT LOADING PARTS' = 129, 'SYSTEM SYNC DATABASE REPLICA' = 130, 'SYSTEM SYNC TRANSACTION LOG' = 131, 'SYSTEM SYNC FILE CACHE' = 132, 'SYSTEM FLUSH DISTRIBUTED' = 133, 'SYSTEM FLUSH LOGS' = 134, 'SYSTEM FLUSH' = 135, 'SYSTEM THREAD FUZZER' = 136, 'SYSTEM UNFREEZE' = 137, 'SYSTEM FAILPOINT' = 138, 'SYSTEM' = 139, 'dictGet' = 140, 'displaySecretsInShowAndSelect' = 141, 'addressToLine' = 142, 'addressToLineWithInlines' = 143, 'addressToSymbol' = 144, 'demangle' = 145, 'INTROSPECTION' = 146, 'FILE' = 147, 'URL' = 148, 'REMOTE' = 149, 'MONGO' = 150, 'REDIS' = 151, 'MEILISEARCH' = 152, 'MYSQL' = 153, 'POSTGRES' = 154, 'SQLITE' = 155, 'ODBC' = 156, 'JDBC' = 157, 'HDFS' = 158, 'S3' = 159, 'HIVE' = 160, 'SOURCES' = 161, 'CLUSTER' = 162, 'ALL' = 163, 'NONE' = 164)) ) ENGINE = SystemPrivileges COMMENT 'SYSTEM TABLE is built on the fly.' diff --git a/tests/queries/0_stateless/02414_all_new_table_functions_must_be_documented.reference b/tests/queries/0_stateless/02414_all_new_table_functions_must_be_documented.reference index 4f16e57d606..bc83e626207 100644 --- a/tests/queries/0_stateless/02414_all_new_table_functions_must_be_documented.reference +++ b/tests/queries/0_stateless/02414_all_new_table_functions_must_be_documented.reference @@ -9,11 +9,11 @@ jdbc meilisearch merge mongodb -redis null numbers numbers_mt odbc +redis remote remoteSecure url From ee363920f804107c6b8d96b746364cb252c6ae8e Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Thu, 1 Jun 2023 15:34:21 +0800 Subject: [PATCH 187/722] fix fast test --- tests/queries/0_stateless/01271_show_privileges.reference | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/01271_show_privileges.reference b/tests/queries/0_stateless/01271_show_privileges.reference index 5ada21e31f4..2c98b8cc190 100644 --- a/tests/queries/0_stateless/01271_show_privileges.reference +++ b/tests/queries/0_stateless/01271_show_privileges.reference @@ -148,6 +148,7 @@ INTROSPECTION ['INTROSPECTION FUNCTIONS'] \N ALL FILE [] GLOBAL SOURCES URL [] GLOBAL SOURCES REMOTE [] GLOBAL SOURCES +MONGO [] GLOBAL SOURCES REDIS [] GLOBAL SOURCES MEILISEARCH [] GLOBAL SOURCES MYSQL [] GLOBAL SOURCES From ef6fde8264557a737fdc47a0486805f07b489abd Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Thu, 1 Jun 2023 19:28:13 +0800 Subject: [PATCH 188/722] fix build error for dwrwin --- src/Storages/RedisCommon.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Storages/RedisCommon.h b/src/Storages/RedisCommon.h index cf39be20ba9..4cc358e6536 100644 --- a/src/Storages/RedisCommon.h +++ b/src/Storages/RedisCommon.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include @@ -48,7 +49,7 @@ using RedisArray = Poco::Redis::Array; using RedisArrayPtr = std::shared_ptr; using RedisBulkString = Poco::Redis::BulkString; using RedisSimpleString = String; -using RedisInteger = Int64; +using RedisInteger = Poco::Int64; using RedisClientPtr = std::unique_ptr; using RedisPool = BorrowedObjectPool; From b01db870d8335ea0bdc9f4462b42c61c389774f7 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Fri, 2 Jun 2023 10:04:16 +0800 Subject: [PATCH 189/722] normalize redis table function db name --- src/TableFunctions/TableFunctionRedis.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/TableFunctions/TableFunctionRedis.cpp b/src/TableFunctions/TableFunctionRedis.cpp index bf147c08776..ec659ae61e0 100644 --- a/src/TableFunctions/TableFunctionRedis.cpp +++ b/src/TableFunctions/TableFunctionRedis.cpp @@ -32,8 +32,9 @@ StoragePtr TableFunctionRedis::executeImpl( StorageInMemoryMetadata metadata; metadata.setColumns(columns); + String db_name = "redis" + getDatabaseName() + "_db_" + toString(configuration.db_index); auto storage = std::make_shared( - StorageID(toString(configuration.db_index), table_name), configuration, context, metadata, primary_key); + StorageID(db_name, table_name), configuration, context, metadata, primary_key); storage->startup(); return storage; } From 8a10baec7f73d1e40fbd018daed0f1786d95442a Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Fri, 2 Jun 2023 06:25:00 +0000 Subject: [PATCH 190/722] Add dateTime range check --- src/Functions/FunctionsConversion.h | 38 +++++++++---------- .../01556_accurate_cast_or_null.reference | 1 + .../01556_accurate_cast_or_null.sql | 5 ++- .../0_stateless/01601_accurate_cast.sql | 4 +- 4 files changed, 24 insertions(+), 24 deletions(-) diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index d77090afe71..d5b5f6ae28a 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -365,6 +365,12 @@ template struct ConvertImpl +static bool CheckDateRange(const FromType & value) +{ + return value >= 0 && value <= DATE_LUT_MAX_DAY_NUM; +} + template struct ToDateTransform32Or64 { @@ -372,7 +378,7 @@ struct ToDateTransform32Or64 static NO_SANITIZE_UNDEFINED bool IsConvertible(const FromType & from, const DateLUTImpl &) { - return from >= 0; + return CheckDateRange(from); } static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) @@ -391,7 +397,7 @@ struct ToDateTransform32Or64Signed static NO_SANITIZE_UNDEFINED bool IsConvertible(const FromType & from, const DateLUTImpl &) { - return from >= 0; + return CheckDateRange(from); } static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) @@ -413,7 +419,7 @@ struct ToDateTransform8Or16Signed static NO_SANITIZE_UNDEFINED bool IsConvertible(const FromType & from, const DateLUTImpl &) { - return from >= 0; + return CheckDateRange(from); } static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) @@ -434,11 +440,6 @@ struct ToDate32Transform32Or64 { static constexpr auto name = "toDate32"; - static NO_SANITIZE_UNDEFINED bool IsConvertible(const FromType & from, const DateLUTImpl &) - { - return from >= 0; - } - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) { return (from < DATE_LUT_MAX_EXTEND_DAY_NUM) @@ -452,11 +453,6 @@ struct ToDate32Transform32Or64Signed { static constexpr auto name = "toDate32"; - static NO_SANITIZE_UNDEFINED bool IsConvertible(const FromType & from, const DateLUTImpl &) - { - return from >= 0; - } - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) { static const Int32 daynum_min_offset = -static_cast(DateLUT::instance().getDayNumOffsetEpoch()); @@ -473,11 +469,6 @@ struct ToDate32Transform8Or16Signed { static constexpr auto name = "toDate32"; - static NO_SANITIZE_UNDEFINED bool IsConvertible(const FromType & from, const DateLUTImpl &) - { - return from >= 0; - } - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) { return from; @@ -527,6 +518,11 @@ template struct ConvertImpl struct ConvertImpl : DateTimeTransformImpl> {}; +template +static bool CheckDateTimeRange(const FromType & value) +{ + return value >= 0 && value <= 0xFFFFFFFF; +} template struct ToDateTimeTransform64 @@ -535,7 +531,7 @@ struct ToDateTimeTransform64 static NO_SANITIZE_UNDEFINED bool IsConvertible(const FromType & from, const DateLUTImpl &) { - return from >= 0; + return CheckDateTimeRange(from); } static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) @@ -551,7 +547,7 @@ struct ToDateTimeTransformSigned static NO_SANITIZE_UNDEFINED bool IsConvertible(const FromType & from, const DateLUTImpl &) { - return from >= 0; + return CheckDateTimeRange(from); } static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) @@ -569,7 +565,7 @@ struct ToDateTimeTransform64Signed static NO_SANITIZE_UNDEFINED bool IsConvertible(const FromType & from, const DateLUTImpl &) { - return from >= 0; + return CheckDateTimeRange(from); } static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) diff --git a/tests/queries/0_stateless/01556_accurate_cast_or_null.reference b/tests/queries/0_stateless/01556_accurate_cast_or_null.reference index cbdf72e9910..21faa830636 100644 --- a/tests/queries/0_stateless/01556_accurate_cast_or_null.reference +++ b/tests/queries/0_stateless/01556_accurate_cast_or_null.reference @@ -32,6 +32,7 @@ \N \N \N +\N 2023-05-30 14:38:20 1970-01-01 00:00:19 \N diff --git a/tests/queries/0_stateless/01556_accurate_cast_or_null.sql b/tests/queries/0_stateless/01556_accurate_cast_or_null.sql index a9038a1d230..3f57358576e 100644 --- a/tests/queries/0_stateless/01556_accurate_cast_or_null.sql +++ b/tests/queries/0_stateless/01556_accurate_cast_or_null.sql @@ -37,9 +37,10 @@ SELECT accurateCastOrNull(nan, 'UInt256'); SELECT accurateCastOrNull(number + 127, 'Int8') AS x FROM numbers (2) ORDER BY x; SELECT accurateCastOrNull(-1, 'DateTime'); +SELECT accurateCastOrNull(5000000000, 'DateTime'); SELECT accurateCastOrNull('1xxx', 'DateTime'); -SELECT accurateCastOrNull('2023-05-30 14:38:20', 'DateTime'); -SELECT accurateCastOrNull(19, 'DateTime'); +select toString(accurateCastOrNull('2023-05-30 14:38:20', 'DateTime'), timezone()); +SELECT toString(accurateCastOrNull(19, 'DateTime'), 'UTC'); SELECT accurateCastOrNull(-1, 'Date'); SELECT accurateCastOrNull('1xxx', 'Date'); diff --git a/tests/queries/0_stateless/01601_accurate_cast.sql b/tests/queries/0_stateless/01601_accurate_cast.sql index 7611b1d96b9..f7f4d588ccc 100644 --- a/tests/queries/0_stateless/01601_accurate_cast.sql +++ b/tests/queries/0_stateless/01601_accurate_cast.sql @@ -24,11 +24,13 @@ SELECT accurateCast('123', 'FixedString(2)'); -- { serverError 131 } SELECT accurateCast('12', 'FixedString(2)'); SELECT accurateCast(-1, 'DateTime'); -- { serverError 70 } +SELECT accurateCast(5000000000, 'DateTime'); -- { serverError 70 } SELECT accurateCast('1xxx', 'DateTime'); -- { serverError 41 } SELECT accurateCast('2023-05-30 14:38:20', 'DateTime'); -SELECT accurateCast(19, 'DateTime'); +SELECT toString(accurateCast(19, 'DateTime'), 'UTC'); SELECT accurateCast(-1, 'Date'); -- { serverError 70 } +SELECT accurateCast(999999, 'Date'); -- { serverError 70 } SELECT accurateCast('1xxx', 'Date'); -- { serverError 38 } SELECT accurateCast('2023-05-30', 'Date'); SELECT accurateCast(19, 'Date'); From c7088a8180a245311885899e702e668719159123 Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Fri, 2 Jun 2023 07:26:55 +0000 Subject: [PATCH 191/722] Trying to fix build --- src/Functions/FunctionsConversion.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index d5b5f6ae28a..6aa5843ff65 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -521,7 +521,7 @@ template struct ConvertImpl static bool CheckDateTimeRange(const FromType & value) { - return value >= 0 && value <= 0xFFFFFFFF; + return value >= 0 && value <= 0xFFFFFFFFL; } template From 2f08b6738f307c7e04886a879632e1183b40b725 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 1 Jun 2023 18:34:00 +0200 Subject: [PATCH 192/722] Support parallel replicas with the analyzer --- src/Storages/StorageReplicatedMergeTree.cpp | 15 ++++-- ...02771_parallel_replicas_analyzer.reference | 12 +++++ .../02771_parallel_replicas_analyzer.sql | 52 +++++++++++++++++++ 3 files changed, 75 insertions(+), 4 deletions(-) create mode 100644 tests/queries/0_stateless/02771_parallel_replicas_analyzer.reference create mode 100644 tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 56896f88423..893e976d432 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -77,16 +77,17 @@ #include #include -#include #include +#include #include #include #include #include +#include #include +#include #include #include -#include #include @@ -4707,8 +4708,14 @@ void StorageReplicatedMergeTree::read( auto cluster = local_context->getCluster(local_context->getSettingsRef().cluster_for_parallel_replicas); - Block header = - InterpreterSelectQuery(modified_query_ast, local_context, SelectQueryOptions(processed_stage).analyze()).getSampleBlock(); + Block header; + + if (local_context->getSettingsRef().allow_experimental_analyzer) + header = InterpreterSelectQueryAnalyzer::getSampleBlock( + modified_query_ast, local_context, SelectQueryOptions(processed_stage).analyze()); + else + header + = InterpreterSelectQuery(modified_query_ast, local_context, SelectQueryOptions(processed_stage).analyze()).getSampleBlock(); ClusterProxy::SelectStreamFactory select_stream_factory = ClusterProxy::SelectStreamFactory( diff --git a/tests/queries/0_stateless/02771_parallel_replicas_analyzer.reference b/tests/queries/0_stateless/02771_parallel_replicas_analyzer.reference new file mode 100644 index 00000000000..4e93c530f7b --- /dev/null +++ b/tests/queries/0_stateless/02771_parallel_replicas_analyzer.reference @@ -0,0 +1,12 @@ +-8888150036649430454 +-2788931093724180887 +-75175454385331084 +368066018677693974 +821735343441964030 +2804162938822577320 +4357435422797280898 +5935810273536892891 +7885388429666205427 +8124171311239967992 +1 1 -- Simple query with analyzer and pure parallel replicas\nSELECT number\nFROM join_inner_table__fuzz_146_replicated\n SETTINGS\n allow_experimental_analyzer = 1,\n max_parallel_replicas = 2,\n cluster_for_parallel_replicas = \'test_cluster_one_shard_three_replicas_localhost\',\n allow_experimental_parallel_reading_from_replicas = 1,\n use_hedged_requests = 0; +0 2 SELECT `default`.`join_inner_table__fuzz_146_replicated`.`number` AS `number` FROM `default`.`join_inner_table__fuzz_146_replicated` diff --git a/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql b/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql new file mode 100644 index 00000000000..35089c0cedb --- /dev/null +++ b/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql @@ -0,0 +1,52 @@ +-- Tags: zookeeper + +CREATE TABLE join_inner_table__fuzz_146_replicated +( + `id` UUID, + `key` String, + `number` Int64, + `value1` String, + `value2` String, + `time` Nullable(Int64) +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/join_inner_table__fuzz_146_replicated', '{replica}') +ORDER BY (id, number, key) +SETTINGS index_granularity = 8192; + +INSERT INTO join_inner_table__fuzz_146_replicated + SELECT CAST('833c9e22-c245-4eb5-8745-117a9a1f26b1', 'UUID') AS id, CAST(rowNumberInAllBlocks(), 'String') AS key, * + FROM generateRandom('number Int64, value1 String, value2 String, time Int64', 1, 10, 2) LIMIT 10; + +-- Simple query with analyzer and pure parallel replicas +SELECT number +FROM join_inner_table__fuzz_146_replicated + SETTINGS + allow_experimental_analyzer = 1, + max_parallel_replicas = 2, + cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', + allow_experimental_parallel_reading_from_replicas = 1, + use_hedged_requests = 0; + +SYSTEM FLUSH LOGS; +-- There should be 2 different queries +-- The initial query +-- The query sent to each replica (which should appear 2 times as we are setting max_parallel_replicas to 2) +SELECT + is_initial_query, + count() as c, query, +FROM system.query_log +WHERE + event_date >= yesterday() + AND type = 'QueryFinish' + AND initial_query_id = + ( + SELECT query_id + FROM system.query_log + WHERE + current_database = currentDatabase() + AND event_date >= yesterday() + AND type = 'QueryFinish' + AND query LIKE '-- Simple query with analyzer and pure parallel replicas%' + ) +GROUP BY is_initial_query, query +ORDER BY is_initial_query DESC, c, query; From fa5f890a7ad43fde1cd75d6c07170a6df3ec119d Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Fri, 2 Jun 2023 12:03:50 +0000 Subject: [PATCH 193/722] Added ru function descriptions (docs) --- .../functions/type-conversion-functions.md | 2 +- .../functions/type-conversion-functions.md | 86 +++++++++++++++++++ src/Functions/DateTimeTransforms.h | 7 +- 3 files changed, 92 insertions(+), 3 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index a6fc6cd4dfc..eb210863c32 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -383,7 +383,7 @@ toDateTimeOrDefault(expr, [, time_zone [, default_value]]) **Arguments** - `expr` — The value. [String](/docs/en/sql-reference/data-types/string.md), [Int](/docs/en/sql-reference/data-types/int-uint.md), [Date](/docs/en/sql-reference/data-types/date.md) or [DateTime](/docs/en/sql-reference/data-types/datetime.md). -- `time_zone` — Time zone. +- `time_zone` — Time zone. [String](/docs/en/sql-reference/data-types/string.md). - `default_value` — The default value. [DateTime](/docs/en/sql-reference/data-types/datetime.md) If `expr` is a number, it is interpreted as the number of seconds since the beginning of the Unix Epoch (as Unix timestamp). diff --git a/docs/ru/sql-reference/functions/type-conversion-functions.md b/docs/ru/sql-reference/functions/type-conversion-functions.md index 298b7bbc93e..67d1732d34e 100644 --- a/docs/ru/sql-reference/functions/type-conversion-functions.md +++ b/docs/ru/sql-reference/functions/type-conversion-functions.md @@ -173,6 +173,49 @@ Cиноним: `DATE`. ## toDateOrDefault {#todateordefault} +Конвертирует аргумент в значение [Date](/docs/ru/sql-reference/data-types/date.md) data type. +Если получен недопустимый аргумент, то возвращает значение по умолчанию (нижняя граница [Date](/docs/ru/sql-reference/data-types/date.md). Значение по умолчанию может быть указано вторым аргументом. +Похожа на [toDate](#todate). + +**Синтаксис** + +``` sql +toDateOrDefault(expr [, default_value]) +``` + +**Аргументы** + +- `expr` — Значение для преобразования. [String](/docs/ru/sql-reference/data-types/string.md), [Int](/docs/ru/sql-reference/data-types/int-uint.md), [Date](/docs/ru/sql-reference/data-types/date.md) или [DateTime](/docs/ru/sql-reference/data-types/datetime.md). +- `default_value` — Значение по умолчанию. [Date](/docs/ru/sql-reference/data-types/date.md) + +Если `expr` является числом выглядит как UNIX timestamp (больше чем 65535), оно интерпретируется как DateTime, затем обрезается до Date учитывавая текущую часовой пояс. Если `expr` является числом и меньше чем 65536, оно интерпретируется как количество дней с 1970-01-01. + +**Возвращаемое значение** + +- Календарная дата. [Date](/docs/ru/sql-reference/data-types/date.md). + +**Пример** + +Запрос: + +``` sql +SELECT + toDateOrDefault('2021-01-01', '2023-01-01'::Date), + toDateOrDefault('xx2021-01-01', '2023-01-01'::Date); +``` + +Результат: + +```response +┌─toDateOrDefault('2021-01-01', CAST('2023-01-01', 'Date'))─┬─toDateOrDefault('xx2021-01-01', CAST('2023-01-01', 'Date'))─┐ +│ 2021-01-01 │ 2023-01-01 │ +└───────────────────────────────────────────────────────────┴─────────────────────────────────────────────────────────────┘ +``` + +**Смотрите также** +- [toDate](#todate) +- [toDate32OrDefault](#todate32ordefault) + ## toDateTime {#todatetime} ## toDateTimeOrZero {#todatetimeorzero} @@ -181,6 +224,49 @@ Cиноним: `DATE`. ## toDateTimeOrDefault {#todatetimeordefault} +Конвертирует аргумент в значение [DateTime](/docs/ru/sql-reference/data-types/datetime.md). +Если получен недопустимый аргумент, то возвращает значение по умолчанию (нижняя граница [DateTime](/docs/ru/sql-reference/data-types/datetime.md)). Значение по умолчанию может быть указано третьим аргументом. +Похожа на [toDateTime](#todatetime). + +**Синтаксис** + +``` sql +toDateTimeOrDefault(expr, [, time_zone [, default_value]]) +``` + +**Аргументы** + +- `expr` — Значение для преобразования. [String](/docs/ru/sql-reference/data-types/string.md), [Int](/docs/ru/sql-reference/data-types/int-uint.md), [Date](/docs/ru/sql-reference/data-types/date.md) или [DateTime](/docs/ru/sql-reference/data-types/datetime.md). +- `time_zone` — Часовой пояс. [String](/docs/ru/sql-reference/data-types/string.md). +- `default_value` — Значение по умолчанию. [DateTime](/docs/ru/sql-reference/data-types/datetime.md) + +Если `expr` является числом, оно интерпретируется как количество секунд от начала unix эпохи. + +**Возвращаемое значение** + +- Время. [DateTime](/docs/ru/sql-reference/data-types/datetime.md) + +**Пример** + +Запрос: + +``` sql +SELECT + toDateTimeOrDefault('2021-01-01', 'UTC', '2023-01-01'::DateTime('UTC')), + toDateTimeOrDefault('xx2021-01-01', 'UTC', '2023-01-01'::DateTime('UTC')); +``` + +Результат: + +```response +┌─toDateTimeOrDefault('2021-01-01', 'UTC', CAST('2023-01-01', 'DateTime(\'UTC\')'))─┬─toDateTimeOrDefault('xx2021-01-01', 'UTC', CAST('2023-01-01', 'DateTime(\'UTC\')'))─┐ +│ 2021-01-01 00:00:00 │ 2023-01-01 00:00:00 │ +└───────────────────────────────────────────────────────────────────────────────────┴─────────────────────────────────────────────────────────────────────────────────────┘ +``` + +**Смотрите также** +- [toDateTime](#todatetime) + ## toDate32 {#todate32} Конвертирует аргумент в значение типа [Date32](../../sql-reference/data-types/date32.md). Если значение выходит за границы диапазона, возвращается пограничное значение `Date32`. Если аргумент имеет тип [Date](../../sql-reference/data-types/date.md), учитываются границы типа `Date`. diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index 9f8f4df2465..d154dd9ffa2 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -1520,9 +1520,12 @@ struct DateTimeTransformImpl Op::vector(sources->getData(), col_to->getData(), time_zone, transform, vec_null_map_to); } - if (vec_null_map_to) + if constexpr (std::is_same_v) { - return ColumnNullable::create(std::move(mutable_result_col), std::move(col_null_map_to)); + if (vec_null_map_to) + { + return ColumnNullable::create(std::move(mutable_result_col), std::move(col_null_map_to)); + } } return mutable_result_col; From c1958c8bed68529c04c37a4b81d139088da3f2f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Fri, 2 Jun 2023 14:24:32 +0200 Subject: [PATCH 194/722] Remove 02764_parallel_replicas_plain_merge_tree from list of broken tests --- tests/broken_tests.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/broken_tests.txt b/tests/broken_tests.txt index 02935712325..96219323700 100644 --- a/tests/broken_tests.txt +++ b/tests/broken_tests.txt @@ -135,6 +135,5 @@ 02703_row_policy_for_database 02721_url_cluster 02534_s3_cluster_insert_select_schema_inference -02764_parallel_replicas_plain_merge_tree 02765_parallel_replicas_final_modifier From 897325967841cbd24de32f3a136ceb26385b75b9 Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 2 Jun 2023 15:00:24 +0000 Subject: [PATCH 195/722] Test attach gdb in stateless tests --- docker/test/stateless/run.sh | 39 ++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 5d0a7b50741..dfee7d84cde 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -85,6 +85,45 @@ fi sleep 5 +# Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog +# and clickhouse-server can do fork-exec, for example, to run some bridge. +# Do not set nostop noprint for all signals, because some it may cause gdb to hang, +# explicitly ignore non-fatal signals that are used by server. +# Number of SIGRTMIN can be determined only in runtime. +RTMIN=$(kill -l SIGRTMIN) +echo " +set follow-fork-mode parent +handle SIGHUP nostop noprint pass +handle SIGINT nostop noprint pass +handle SIGQUIT nostop noprint pass +handle SIGPIPE nostop noprint pass +handle SIGTERM nostop noprint pass +handle SIGUSR1 nostop noprint pass +handle SIGUSR2 nostop noprint pass +handle SIG$RTMIN nostop noprint pass +info signals +continue +backtrace full +thread apply all backtrace full +info registers +disassemble /s +up +disassemble /s +up +disassemble /s +p \"done\" +detach +quit +" > script.gdb + +# FIXME Hung check may work incorrectly because of attached gdb +# 1. False positives are possible +# 2. We cannot attach another gdb to get stacktraces if some queries hung +gdb -batch -command script.gdb -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log & +sleep 5 +# gdb will send SIGSTOP, spend some time loading debug info and then send SIGCONT, wait for it (up to send_timeout, 300s) +time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||: + function run_tests() { set -x From 423afec70542c266187d49cf571d5f6bb4324977 Mon Sep 17 00:00:00 2001 From: tpanetti Date: Fri, 2 Jun 2023 10:05:38 -0700 Subject: [PATCH 196/722] Change case and function name for MySQL Compatible types This changes the function name for MySQL Compatible types from getMySQLName to getSQLCompatibleName and changes the casing of the types to upper --- src/DataTypes/DataTypeAggregateFunction.h | 2 +- src/DataTypes/DataTypeArray.h | 4 +- src/DataTypes/DataTypeDate.h | 2 +- src/DataTypes/DataTypeDate32.h | 2 +- src/DataTypes/DataTypeDateTime.h | 2 +- src/DataTypes/DataTypeDateTime64.h | 2 +- src/DataTypes/DataTypeEnum.cpp | 2 +- src/DataTypes/DataTypeEnum.h | 2 +- src/DataTypes/DataTypeFixedString.h | 2 +- src/DataTypes/DataTypeFunction.h | 2 +- src/DataTypes/DataTypeIPv4andIPv6.h | 4 +- src/DataTypes/DataTypeInterval.h | 2 +- src/DataTypes/DataTypeLowCardinality.cpp | 2 +- src/DataTypes/DataTypeLowCardinality.h | 2 +- src/DataTypes/DataTypeMap.h | 2 +- src/DataTypes/DataTypeNothing.h | 2 +- src/DataTypes/DataTypeNullable.h | 2 +- src/DataTypes/DataTypeNumberBase.cpp | 28 +- src/DataTypes/DataTypeNumberBase.h | 2 +- src/DataTypes/DataTypeObject.h | 2 +- src/DataTypes/DataTypeSet.h | 2 +- src/DataTypes/DataTypeString.h | 2 +- src/DataTypes/DataTypeTuple.h | 2 +- src/DataTypes/DataTypeUUID.h | 2 +- src/DataTypes/DataTypesDecimal.h | 4 +- src/DataTypes/IDataType.h | 4 +- ...show_columns_mysql_compatibility.reference | 424 +++++++++--------- .../02775_show_columns_mysql_compatibility.sh | 6 +- 28 files changed, 260 insertions(+), 256 deletions(-) diff --git a/src/DataTypes/DataTypeAggregateFunction.h b/src/DataTypes/DataTypeAggregateFunction.h index 697be13652c..13ca3508580 100644 --- a/src/DataTypes/DataTypeAggregateFunction.h +++ b/src/DataTypes/DataTypeAggregateFunction.h @@ -45,7 +45,7 @@ public: String doGetName() const override; String getNameWithoutVersion() const; const char * getFamilyName() const override { return "AggregateFunction"; } - const char * getMySQLName() const override { return "text"; } + const char * getSQLCompatibleName() const override { return "TEXT"; } TypeIndex getTypeId() const override { return TypeIndex::AggregateFunction; } Array getParameters() const { return parameters; } diff --git a/src/DataTypes/DataTypeArray.h b/src/DataTypes/DataTypeArray.h index b031f411975..528062b60be 100644 --- a/src/DataTypes/DataTypeArray.h +++ b/src/DataTypes/DataTypeArray.h @@ -30,9 +30,9 @@ public: { return "Array"; } - const char * getMySQLName() const override + const char * getSQLCompatibleName() const override { - return "text"; + return "TEXT"; } bool canBeInsideNullable() const override diff --git a/src/DataTypes/DataTypeDate.h b/src/DataTypes/DataTypeDate.h index 33bcb6123ff..7b622ae04a3 100644 --- a/src/DataTypes/DataTypeDate.h +++ b/src/DataTypes/DataTypeDate.h @@ -13,7 +13,7 @@ public: TypeIndex getTypeId() const override { return TypeIndex::Date; } const char * getFamilyName() const override { return family_name; } - const char * getMySQLName() const override { return "date"; } + const char * getSQLCompatibleName() const override { return "DATE"; } bool canBeUsedAsVersion() const override { return true; } bool canBeInsideNullable() const override { return true; } diff --git a/src/DataTypes/DataTypeDate32.h b/src/DataTypes/DataTypeDate32.h index 56315f46e8c..65b0ec7407e 100644 --- a/src/DataTypes/DataTypeDate32.h +++ b/src/DataTypes/DataTypeDate32.h @@ -13,7 +13,7 @@ public: TypeIndex getTypeId() const override { return TypeIndex::Date32; } const char * getFamilyName() const override { return family_name; } - const char * getMySQLName() const override { return "date"; } + const char * getSQLCompatibleName() const override { return "DATE"; } Field getDefault() const override { diff --git a/src/DataTypes/DataTypeDateTime.h b/src/DataTypes/DataTypeDateTime.h index c868f92c311..2facc758f90 100644 --- a/src/DataTypes/DataTypeDateTime.h +++ b/src/DataTypes/DataTypeDateTime.h @@ -36,7 +36,7 @@ public: static constexpr auto family_name = "DateTime"; const char * getFamilyName() const override { return family_name; } - const char * getMySQLName() const override { return "datetime"; } + const char * getSQLCompatibleName() const override { return "DATETIME"; } String doGetName() const override; TypeIndex getTypeId() const override { return TypeIndex::DateTime; } diff --git a/src/DataTypes/DataTypeDateTime64.h b/src/DataTypes/DataTypeDateTime64.h index 8d317bb9430..b836b84918f 100644 --- a/src/DataTypes/DataTypeDateTime64.h +++ b/src/DataTypes/DataTypeDateTime64.h @@ -28,7 +28,7 @@ public: DataTypeDateTime64(UInt32 scale_, const TimezoneMixin & time_zone_info); const char * getFamilyName() const override { return family_name; } - const char * getMySQLName() const override { return "datetime"; } + const char * getSQLCompatibleName() const override { return "DATETIME"; } std::string doGetName() const override; TypeIndex getTypeId() const override { return type_id; } diff --git a/src/DataTypes/DataTypeEnum.cpp b/src/DataTypes/DataTypeEnum.cpp index bfed4d4d5a2..24a3976179d 100644 --- a/src/DataTypes/DataTypeEnum.cpp +++ b/src/DataTypes/DataTypeEnum.cpp @@ -41,7 +41,7 @@ std::string DataTypeEnum::generateMySQLName(const Values & values) { WriteBufferFromOwnString out; - writeString("enum", out); + writeString("ENUM", out); writeChar('(', out); auto first = true; diff --git a/src/DataTypes/DataTypeEnum.h b/src/DataTypes/DataTypeEnum.h index c6e523adf96..2cdaa2db06c 100644 --- a/src/DataTypes/DataTypeEnum.h +++ b/src/DataTypes/DataTypeEnum.h @@ -54,7 +54,7 @@ public: std::string doGetName() const override { return type_name; } const char * getFamilyName() const override; - const char * getMySQLName() const override { return my_sql_type_name.c_str(); } + const char * getSQLCompatibleName() const override { return my_sql_type_name.c_str(); } TypeIndex getTypeId() const override { return type_id; } diff --git a/src/DataTypes/DataTypeFixedString.h b/src/DataTypes/DataTypeFixedString.h index eb09914ec9c..2900efd5a34 100644 --- a/src/DataTypes/DataTypeFixedString.h +++ b/src/DataTypes/DataTypeFixedString.h @@ -42,7 +42,7 @@ public: TypeIndex getTypeId() const override { return type_id; } const char * getFamilyName() const override { return "FixedString"; } - const char * getMySQLName() const override { return "text"; } + const char * getSQLCompatibleName() const override { return "TEXT"; } size_t getN() const { diff --git a/src/DataTypes/DataTypeFunction.h b/src/DataTypes/DataTypeFunction.h index f3423796126..df59f7738b2 100644 --- a/src/DataTypes/DataTypeFunction.h +++ b/src/DataTypes/DataTypeFunction.h @@ -24,7 +24,7 @@ public: std::string doGetName() const override; const char * getFamilyName() const override { return "Function"; } - const char * getMySQLName() const override { return "text"; } + const char * getSQLCompatibleName() const override { return "TEXT"; } TypeIndex getTypeId() const override { return TypeIndex::Function; } const DataTypes & getArgumentTypes() const diff --git a/src/DataTypes/DataTypeIPv4andIPv6.h b/src/DataTypes/DataTypeIPv4andIPv6.h index 8f7fe79793b..be0ebb90f3c 100644 --- a/src/DataTypes/DataTypeIPv4andIPv6.h +++ b/src/DataTypes/DataTypeIPv4andIPv6.h @@ -19,7 +19,7 @@ public: static constexpr auto type_id = TypeToTypeIndex; const char * getFamilyName() const override { return TypeName.data(); } - const char * getMySQLName() const override { return "text"; } + const char * getSQLCompatibleName() const override { return "TEXT"; } TypeIndex getTypeId() const override { return type_id; } @@ -61,7 +61,7 @@ public: static constexpr auto type_id = TypeToTypeIndex; const char * getFamilyName() const override { return TypeName.data(); } - const char * getMySQLName() const override { return "text"; } + const char * getSQLCompatibleName() const override { return "TEXT"; } TypeIndex getTypeId() const override { return type_id; } diff --git a/src/DataTypes/DataTypeInterval.h b/src/DataTypes/DataTypeInterval.h index 69a56e8aadd..ee2157431dd 100644 --- a/src/DataTypes/DataTypeInterval.h +++ b/src/DataTypes/DataTypeInterval.h @@ -26,7 +26,7 @@ public: std::string doGetName() const override { return fmt::format("Interval{}", kind.toString()); } const char * getFamilyName() const override { return "Interval"; } - const char * getMySQLName() const override { return "text"; } + const char * getSQLCompatibleName() const override { return "TEXT"; } TypeIndex getTypeId() const override { return TypeIndex::Interval; } bool equals(const IDataType & rhs) const override; diff --git a/src/DataTypes/DataTypeLowCardinality.cpp b/src/DataTypes/DataTypeLowCardinality.cpp index b1c32317015..e59613e6974 100644 --- a/src/DataTypes/DataTypeLowCardinality.cpp +++ b/src/DataTypes/DataTypeLowCardinality.cpp @@ -29,7 +29,7 @@ namespace ErrorCodes DataTypeLowCardinality::DataTypeLowCardinality(DataTypePtr dictionary_type_) : dictionary_type(std::move(dictionary_type_)), - mysql_name(dictionary_type->getMySQLName()) + mysql_name(dictionary_type->getSQLCompatibleName()) { auto inner_type = dictionary_type; if (dictionary_type->isNullable()) diff --git a/src/DataTypes/DataTypeLowCardinality.h b/src/DataTypes/DataTypeLowCardinality.h index bcc39f58ff7..4dee8565568 100644 --- a/src/DataTypes/DataTypeLowCardinality.h +++ b/src/DataTypes/DataTypeLowCardinality.h @@ -24,7 +24,7 @@ public: return "LowCardinality(" + dictionary_type->getName() + ")"; } const char * getFamilyName() const override { return "LowCardinality"; } - const char * getMySQLName() const override { return mysql_name.c_str(); } + const char * getSQLCompatibleName() const override { return mysql_name.c_str(); } TypeIndex getTypeId() const override { return TypeIndex::LowCardinality; } diff --git a/src/DataTypes/DataTypeMap.h b/src/DataTypes/DataTypeMap.h index 526dc321f44..299119f1759 100644 --- a/src/DataTypes/DataTypeMap.h +++ b/src/DataTypes/DataTypeMap.h @@ -30,7 +30,7 @@ public: TypeIndex getTypeId() const override { return TypeIndex::Map; } std::string doGetName() const override; const char * getFamilyName() const override { return "Map"; } - const char * getMySQLName() const override { return "json"; } + const char * getSQLCompatibleName() const override { return "JSON"; } bool canBeInsideNullable() const override { return false; } diff --git a/src/DataTypes/DataTypeNothing.h b/src/DataTypes/DataTypeNothing.h index fdef6026603..b35ced5dcb3 100644 --- a/src/DataTypes/DataTypeNothing.h +++ b/src/DataTypes/DataTypeNothing.h @@ -16,7 +16,7 @@ public: static constexpr bool is_parametric = false; const char * getFamilyName() const override { return "Nothing"; } - const char * getMySQLName() const override { return "text"; } + const char * getSQLCompatibleName() const override { return "TEXT"; } TypeIndex getTypeId() const override { return TypeIndex::Nothing; } diff --git a/src/DataTypes/DataTypeNullable.h b/src/DataTypes/DataTypeNullable.h index 64b201d32b2..b5fe1bb2dd9 100644 --- a/src/DataTypes/DataTypeNullable.h +++ b/src/DataTypes/DataTypeNullable.h @@ -16,7 +16,7 @@ public: explicit DataTypeNullable(const DataTypePtr & nested_data_type_); std::string doGetName() const override { return "Nullable(" + nested_data_type->getName() + ")"; } const char * getFamilyName() const override { return "Nullable"; } - const char * getMySQLName() const override { return nested_data_type->getMySQLName(); } + const char * getSQLCompatibleName() const override { return nested_data_type->getSQLCompatibleName(); } TypeIndex getTypeId() const override { return TypeIndex::Nullable; } MutableColumnPtr createColumn() const override; diff --git a/src/DataTypes/DataTypeNumberBase.cpp b/src/DataTypes/DataTypeNumberBase.cpp index 7d200de7996..db654448e83 100644 --- a/src/DataTypes/DataTypeNumberBase.cpp +++ b/src/DataTypes/DataTypeNumberBase.cpp @@ -32,20 +32,20 @@ bool DataTypeNumberBase::isValueRepresentedByUnsignedInteger() const template const std::map DataTypeNumberBase::mysqlTypeMap = { - {"UInt8", "tinyint unsigned"}, - {"UInt16", "smallint unsigned"}, - {"UInt32", "mediumint unsigned"}, - {"UInt64", "bigint unsigned"}, - {"UInt128", "text"}, - {"UInt256", "text"}, - {"Int8", "tinyint"}, - {"Int16", "smallint"}, - {"Int32", "int"}, - {"Int64", "bigint"}, - {"Int128", "text"}, - {"Int256", "text"}, - {"Float32", "float"}, - {"Float64", "double"}, + {"UInt8", "TINYINT UNSIGNED"}, + {"UInt16", "SMALLINT UNSIGNED"}, + {"UInt32", "MEDIUMINT UNSIGNEd"}, + {"UInt64", "BIGINT UNSIGNED"}, + {"UInt128", "TEXT"}, + {"UInt256", "TEXT"}, + {"Int8", "TINYINT"}, + {"Int16", "SMALLINT"}, + {"Int32", "INT"}, + {"Int64", "BIGINT"}, + {"Int128", "TEXT"}, + {"Int256", "TEXT"}, + {"Float32", "FLOAT"}, + {"Float64", "DOUBLE"}, }; /// Explicit template instantiations - to avoid code bloat in headers. diff --git a/src/DataTypes/DataTypeNumberBase.h b/src/DataTypes/DataTypeNumberBase.h index b5c963cf245..1a855a974f0 100644 --- a/src/DataTypes/DataTypeNumberBase.h +++ b/src/DataTypes/DataTypeNumberBase.h @@ -27,7 +27,7 @@ public: using ColumnType = ColumnVector; const char * getFamilyName() const override { return TypeName.data(); } - const char * getMySQLName() const override { return mysqlTypeMap.at(TypeName.data()).c_str(); } + const char * getSQLCompatibleName() const override { return mysqlTypeMap.at(TypeName.data()).c_str(); } TypeIndex getTypeId() const override { return TypeToTypeIndex; } Field getDefault() const override; diff --git a/src/DataTypes/DataTypeObject.h b/src/DataTypes/DataTypeObject.h index 8a2c36abcd7..618c7389758 100644 --- a/src/DataTypes/DataTypeObject.h +++ b/src/DataTypes/DataTypeObject.h @@ -23,7 +23,7 @@ public: DataTypeObject(const String & schema_format_, bool is_nullable_); const char * getFamilyName() const override { return "Object"; } - const char * getMySQLName() const override { return "json"; } + const char * getSQLCompatibleName() const override { return "JSON"; } String doGetName() const override; TypeIndex getTypeId() const override { return TypeIndex::Object; } diff --git a/src/DataTypes/DataTypeSet.h b/src/DataTypes/DataTypeSet.h index bdad638b5d5..916b4f071a5 100644 --- a/src/DataTypes/DataTypeSet.h +++ b/src/DataTypes/DataTypeSet.h @@ -15,7 +15,7 @@ class DataTypeSet final : public IDataTypeDummy public: static constexpr bool is_parametric = true; const char * getFamilyName() const override { return "Set"; } - const char * getMySQLName() const override { return "text"; } + const char * getSQLCompatibleName() const override { return "TEXT"; } TypeIndex getTypeId() const override { return TypeIndex::Set; } bool equals(const IDataType & rhs) const override { return typeid(rhs) == typeid(*this); } diff --git a/src/DataTypes/DataTypeString.h b/src/DataTypes/DataTypeString.h index bddfb4ae287..338b3846266 100644 --- a/src/DataTypes/DataTypeString.h +++ b/src/DataTypes/DataTypeString.h @@ -22,7 +22,7 @@ public: } // FIXME: string can contain arbitrary bytes, not only UTF-8 sequences - const char * getMySQLName() const override { return "blob"; } + const char * getSQLCompatibleName() const override { return "BLOB"; } TypeIndex getTypeId() const override { return type_id; } diff --git a/src/DataTypes/DataTypeTuple.h b/src/DataTypes/DataTypeTuple.h index d264cc97f60..93fa87b1332 100644 --- a/src/DataTypes/DataTypeTuple.h +++ b/src/DataTypes/DataTypeTuple.h @@ -33,7 +33,7 @@ public: TypeIndex getTypeId() const override { return TypeIndex::Tuple; } std::string doGetName() const override; const char * getFamilyName() const override { return "Tuple"; } - const char * getMySQLName() const override { return "json"; } + const char * getSQLCompatibleName() const override { return "JSON"; } bool canBeInsideNullable() const override { return false; } bool supportsSparseSerialization() const override { return true; } diff --git a/src/DataTypes/DataTypeUUID.h b/src/DataTypes/DataTypeUUID.h index 4d54db42b45..bbf35074df3 100644 --- a/src/DataTypes/DataTypeUUID.h +++ b/src/DataTypes/DataTypeUUID.h @@ -18,7 +18,7 @@ public: static constexpr auto type_id = TypeIndex::UUID; const char * getFamilyName() const override { return "UUID"; } - const char * getMySQLName() const override { return "char"; } + const char * getSQLCompatibleName() const override { return "CHAR"; } TypeIndex getTypeId() const override { return type_id; } diff --git a/src/DataTypes/DataTypesDecimal.h b/src/DataTypes/DataTypesDecimal.h index 5c9405cb060..6f3bf582aeb 100644 --- a/src/DataTypes/DataTypesDecimal.h +++ b/src/DataTypes/DataTypesDecimal.h @@ -37,10 +37,10 @@ public: using Base::Base; static constexpr auto family_name = "Decimal"; - static constexpr auto mysql_name = "decimal"; + static constexpr auto mysql_name = "DECIMAL"; const char * getFamilyName() const override { return family_name; } - const char * getMySQLName() const override { return mysql_name; } + const char * getSQLCompatibleName() const override { return mysql_name; } std::string doGetName() const override; TypeIndex getTypeId() const override { return TypeToTypeIndex; } diff --git a/src/DataTypes/IDataType.h b/src/DataTypes/IDataType.h index 2bed18897ce..93fdbab05ef 100644 --- a/src/DataTypes/IDataType.h +++ b/src/DataTypes/IDataType.h @@ -83,7 +83,7 @@ public: /// Name of data type family (example: FixedString, Array). virtual const char * getFamilyName() const = 0; - virtual const char * getMySQLName() const = 0; + virtual const char * getSQLCompatibleName() const = 0; /// Data type id. It's used for runtime type checks. virtual TypeIndex getTypeId() const = 0; @@ -135,7 +135,7 @@ public: protected: virtual String doGetName() const { return getFamilyName(); } - virtual String doGetMySQLName() const { return getMySQLName(); } + virtual String doGetMySQLName() const { return getSQLCompatibleName(); } virtual SerializationPtr doGetDefaultSerialization() const = 0; public: diff --git a/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.reference b/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.reference index 96e542611c6..1742cd9c90c 100644 --- a/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.reference +++ b/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.reference @@ -4,226 +4,226 @@ Create pseudo-random database name Create tab duplicate table Run MySQL test field type null key default extra -aggregate_function text 0 NULL -array_value text 0 NULL -boolean_value tinyint unsigned 0 NULL -date32_value date 0 NULL -date_value date 0 NULL -datetime64_value datetime 0 NULL -datetime_value datetime 0 NULL -decimal_value decimal 0 NULL -enum_value enum('apple', 'banana', 'orange') 0 NULL -fixed_string_value text 0 NULL -float32 float 0 NULL -float64 double 0 NULL -int32 int 0 NULL -ipv4_value text 0 NULL -ipv6_value text 0 NULL -json_value json 0 NULL -low_cardinality blob 0 NULL -low_cardinality_date datetime 0 NULL -map_value json 0 NULL -nested.nested_int text 0 NULL -nested.nested_string text 0 NULL -nullable_value int 0 NULL -string_value blob 0 NULL -tuple_value json 0 NULL -uint64 bigint unsigned 0 PRI SOR NULL -uuid_value char 0 NULL +aggregate_function TEXT 0 NULL +array_value TEXT 0 NULL +boolean_value TINYINT UNSIGNED 0 NULL +date32_value DATE 0 NULL +date_value DATE 0 NULL +datetime64_value DATETIME 0 NULL +datetime_value DATETIME 0 NULL +decimal_value DECIMAL 0 NULL +enum_value ENUM('apple', 'banana', 'orange') 0 NULL +fixed_string_value TEXT 0 NULL +float32 FLOAT 0 NULL +float64 DOUBLE 0 NULL +int32 INT 0 NULL +ipv4_value TEXT 0 NULL +ipv6_value TEXT 0 NULL +json_value JSON 0 NULL +low_cardinality BLOB 0 NULL +low_cardinality_date DATETIME 0 NULL +map_value JSON 0 NULL +nested.nested_int TEXT 0 NULL +nested.nested_string TEXT 0 NULL +nullable_value INT 0 NULL +string_value BLOB 0 NULL +tuple_value JSON 0 NULL +uint64 BIGINT UNSIGNED 0 PRI SOR NULL +uuid_value CHAR 0 NULL field type null key default extra -aggregate_function text 0 NULL -array_value text 0 NULL -boolean_value tinyint unsigned 0 NULL -date32_value date 0 NULL -date_value date 0 NULL -datetime64_value datetime 0 NULL -datetime_value datetime 0 NULL -decimal_value decimal 0 NULL -enum_value enum('apple', 'banana', 'orange') 0 NULL -fixed_string_value text 0 NULL -float32 float 0 NULL -float64 double 0 NULL -int32 int 0 NULL -ipv4_value text 0 NULL -ipv6_value text 0 NULL -json_value json 0 NULL -low_cardinality blob 0 NULL -low_cardinality_date datetime 0 NULL -map_value json 0 NULL -nested.nested_int text 0 NULL -nested.nested_string text 0 NULL -nullable_value int 0 NULL -string_value blob 0 NULL -tuple_value json 0 NULL -uint64 bigint unsigned 0 PRI SOR NULL -uuid_value char 0 NULL +aggregate_function TEXT 0 NULL +array_value TEXT 0 NULL +boolean_value TINYINT UNSIGNED 0 NULL +date32_value DATE 0 NULL +date_value DATE 0 NULL +datetime64_value DATETIME 0 NULL +datetime_value DATETIME 0 NULL +decimal_value DECIMAL 0 NULL +enum_value ENUM('apple', 'banana', 'orange') 0 NULL +fixed_string_value TEXT 0 NULL +float32 FLOAT 0 NULL +float64 DOUBLE 0 NULL +int32 INT 0 NULL +ipv4_value TEXT 0 NULL +ipv6_value TEXT 0 NULL +json_value JSON 0 NULL +low_cardinality BLOB 0 NULL +low_cardinality_date DATETIME 0 NULL +map_value JSON 0 NULL +nested.nested_int TEXT 0 NULL +nested.nested_string TEXT 0 NULL +nullable_value INT 0 NULL +string_value BLOB 0 NULL +tuple_value JSON 0 NULL +uint64 BIGINT UNSIGNED 0 PRI SOR NULL +uuid_value CHAR 0 NULL field type null key default extra collation comment privileges -aggregate_function text 0 NULL NULL -array_value text 0 NULL NULL -boolean_value tinyint unsigned 0 NULL NULL -date32_value date 0 NULL NULL -date_value date 0 NULL NULL -datetime64_value datetime 0 NULL NULL -datetime_value datetime 0 NULL NULL -decimal_value decimal 0 NULL NULL -enum_value enum('apple', 'banana', 'orange') 0 NULL NULL -fixed_string_value text 0 NULL NULL -float32 float 0 NULL NULL -float64 double 0 NULL NULL -int32 int 0 NULL NULL -ipv4_value text 0 NULL NULL -ipv6_value text 0 NULL NULL -json_value json 0 NULL NULL -low_cardinality blob 0 NULL NULL -low_cardinality_date datetime 0 NULL NULL -map_value json 0 NULL NULL -nested.nested_int text 0 NULL NULL -nested.nested_string text 0 NULL NULL -nullable_value int 0 NULL NULL -string_value blob 0 NULL NULL -tuple_value json 0 NULL NULL -uint64 bigint unsigned 0 PRI SOR NULL NULL -uuid_value char 0 NULL NULL +aggregate_function TEXT 0 NULL NULL +array_value TEXT 0 NULL NULL +boolean_value TINYINT UNSIGNED 0 NULL NULL +date32_value DATE 0 NULL NULL +date_value DATE 0 NULL NULL +datetime64_value DATETIME 0 NULL NULL +datetime_value DATETIME 0 NULL NULL +decimal_value DECIMAL 0 NULL NULL +enum_value ENUM('apple', 'banana', 'orange') 0 NULL NULL +fixed_string_value TEXT 0 NULL NULL +float32 FLOAT 0 NULL NULL +float64 DOUBLE 0 NULL NULL +int32 INT 0 NULL NULL +ipv4_value TEXT 0 NULL NULL +ipv6_value TEXT 0 NULL NULL +json_value JSON 0 NULL NULL +low_cardinality BLOB 0 NULL NULL +low_cardinality_date DATETIME 0 NULL NULL +map_value JSON 0 NULL NULL +nested.nested_int TEXT 0 NULL NULL +nested.nested_string TEXT 0 NULL NULL +nullable_value INT 0 NULL NULL +string_value BLOB 0 NULL NULL +tuple_value JSON 0 NULL NULL +uint64 BIGINT UNSIGNED 0 PRI SOR NULL NULL +uuid_value CHAR 0 NULL NULL field type null key default extra -int32 int 0 NULL -nested.nested_int text 0 NULL -uint64 bigint unsigned 0 PRI SOR NULL +int32 INT 0 NULL +nested.nested_int TEXT 0 NULL +uint64 BIGINT UNSIGNED 0 PRI SOR NULL field type null key default extra -aggregate_function text 0 NULL -array_value text 0 NULL -boolean_value tinyint unsigned 0 NULL -date32_value date 0 NULL -date_value date 0 NULL -datetime64_value datetime 0 NULL -datetime_value datetime 0 NULL -decimal_value decimal 0 NULL -enum_value enum('apple', 'banana', 'orange') 0 NULL -fixed_string_value text 0 NULL -float32 float 0 NULL -float64 double 0 NULL -ipv4_value text 0 NULL -ipv6_value text 0 NULL -json_value json 0 NULL -low_cardinality blob 0 NULL -low_cardinality_date datetime 0 NULL -map_value json 0 NULL -nested.nested_string text 0 NULL -nullable_value int 0 NULL -string_value blob 0 NULL -tuple_value json 0 NULL -uuid_value char 0 NULL +aggregate_function TEXT 0 NULL +array_value TEXT 0 NULL +boolean_value TINYINT UNSIGNED 0 NULL +date32_value DATE 0 NULL +date_value DATE 0 NULL +datetime64_value DATETIME 0 NULL +datetime_value DATETIME 0 NULL +decimal_value DECIMAL 0 NULL +enum_value ENUM('apple', 'banana', 'orange') 0 NULL +fixed_string_value TEXT 0 NULL +float32 FLOAT 0 NULL +float64 DOUBLE 0 NULL +ipv4_value TEXT 0 NULL +ipv6_value TEXT 0 NULL +json_value JSON 0 NULL +low_cardinality BLOB 0 NULL +low_cardinality_date DATETIME 0 NULL +map_value JSON 0 NULL +nested.nested_string TEXT 0 NULL +nullable_value INT 0 NULL +string_value BLOB 0 NULL +tuple_value JSON 0 NULL +uuid_value CHAR 0 NULL field type null key default extra -int32 int 0 NULL -nested.nested_int text 0 NULL -uint64 bigint unsigned 0 PRI SOR NULL +int32 INT 0 NULL +nested.nested_int TEXT 0 NULL +uint64 BIGINT UNSIGNED 0 PRI SOR NULL field type null key default extra -aggregate_function text 0 NULL -array_value text 0 NULL -boolean_value tinyint unsigned 0 NULL -date32_value date 0 NULL -date_value date 0 NULL -datetime64_value datetime 0 NULL -datetime_value datetime 0 NULL -decimal_value decimal 0 NULL -enum_value enum('apple', 'banana', 'orange') 0 NULL -fixed_string_value text 0 NULL -float32 float 0 NULL -float64 double 0 NULL -ipv4_value text 0 NULL -ipv6_value text 0 NULL -json_value json 0 NULL -low_cardinality blob 0 NULL -low_cardinality_date datetime 0 NULL -map_value json 0 NULL -nested.nested_string text 0 NULL -nullable_value int 0 NULL -string_value blob 0 NULL -tuple_value json 0 NULL -uuid_value char 0 NULL +aggregate_function TEXT 0 NULL +array_value TEXT 0 NULL +boolean_value TINYINT UNSIGNED 0 NULL +date32_value DATE 0 NULL +date_value DATE 0 NULL +datetime64_value DATETIME 0 NULL +datetime_value DATETIME 0 NULL +decimal_value DECIMAL 0 NULL +enum_value ENUM('apple', 'banana', 'orange') 0 NULL +fixed_string_value TEXT 0 NULL +float32 FLOAT 0 NULL +float64 DOUBLE 0 NULL +ipv4_value TEXT 0 NULL +ipv6_value TEXT 0 NULL +json_value JSON 0 NULL +low_cardinality BLOB 0 NULL +low_cardinality_date DATETIME 0 NULL +map_value JSON 0 NULL +nested.nested_string TEXT 0 NULL +nullable_value INT 0 NULL +string_value BLOB 0 NULL +tuple_value JSON 0 NULL +uuid_value CHAR 0 NULL field type null key default extra -int32 int 0 NULL -nested.nested_int text 0 NULL -uint64 bigint unsigned 0 PRI SOR NULL +int32 INT 0 NULL +nested.nested_int TEXT 0 NULL +uint64 BIGINT UNSIGNED 0 PRI SOR NULL field type null key default extra -aggregate_function text 0 NULL +aggregate_function TEXT 0 NULL field type null key default extra -aggregate_function text 0 NULL -array_value text 0 NULL -boolean_value tinyint unsigned 0 NULL -date32_value date 0 NULL -date_value date 0 NULL -datetime64_value datetime 0 NULL -datetime_value datetime 0 NULL -decimal_value decimal 0 NULL -enum_value enum('apple', 'banana', 'orange') 0 NULL -fixed_string_value text 0 NULL -float32 float 0 NULL -float64 double 0 NULL -int32 int 0 NULL -ipv4_value text 0 NULL -ipv6_value text 0 NULL -json_value json 0 NULL -low_cardinality blob 0 NULL -low_cardinality_date datetime 0 NULL -map_value json 0 NULL -nested.nested_int text 0 NULL -nested.nested_string text 0 NULL -nullable_value int 0 NULL -string_value blob 0 NULL -tuple_value json 0 NULL -uint64 bigint unsigned 0 PRI SOR NULL -uuid_value char 0 NULL +aggregate_function TEXT 0 NULL +array_value TEXT 0 NULL +boolean_value TINYINT UNSIGNED 0 NULL +date32_value DATE 0 NULL +date_value DATE 0 NULL +datetime64_value DATETIME 0 NULL +datetime_value DATETIME 0 NULL +decimal_value DECIMAL 0 NULL +enum_value ENUM('apple', 'banana', 'orange') 0 NULL +fixed_string_value TEXT 0 NULL +float32 FLOAT 0 NULL +float64 DOUBLE 0 NULL +int32 INT 0 NULL +ipv4_value TEXT 0 NULL +ipv6_value TEXT 0 NULL +json_value JSON 0 NULL +low_cardinality BLOB 0 NULL +low_cardinality_date DATETIME 0 NULL +map_value JSON 0 NULL +nested.nested_int TEXT 0 NULL +nested.nested_string TEXT 0 NULL +nullable_value INT 0 NULL +string_value BLOB 0 NULL +tuple_value JSON 0 NULL +uint64 BIGINT UNSIGNED 0 PRI SOR NULL +uuid_value CHAR 0 NULL field type null key default extra -aggregate_function text 0 NULL -array_value text 0 NULL -boolean_value tinyint unsigned 0 NULL -date32_value date 0 NULL -date_value date 0 NULL -datetime64_value datetime 0 NULL -datetime_value datetime 0 NULL -decimal_value decimal 0 NULL -enum_value enum('apple', 'banana', 'orange') 0 NULL -fixed_string_value text 0 NULL -float32 float 0 NULL -float64 double 0 NULL -int32 int 0 NULL -ipv4_value text 0 NULL -ipv6_value text 0 NULL -json_value json 0 NULL -low_cardinality blob 0 NULL -low_cardinality_date datetime 0 NULL -map_value json 0 NULL -nested.nested_int text 0 NULL -nested.nested_string text 0 NULL -nullable_value int 0 NULL -string_value blob 0 NULL -tuple_value json 0 NULL -uint64 bigint unsigned 0 PRI SOR NULL -uuid_value char 0 NULL +aggregate_function TEXT 0 NULL +array_value TEXT 0 NULL +boolean_value TINYINT UNSIGNED 0 NULL +date32_value DATE 0 NULL +date_value DATE 0 NULL +datetime64_value DATETIME 0 NULL +datetime_value DATETIME 0 NULL +decimal_value DECIMAL 0 NULL +enum_value ENUM('apple', 'banana', 'orange') 0 NULL +fixed_string_value TEXT 0 NULL +float32 FLOAT 0 NULL +float64 DOUBLE 0 NULL +int32 INT 0 NULL +ipv4_value TEXT 0 NULL +ipv6_value TEXT 0 NULL +json_value JSON 0 NULL +low_cardinality BLOB 0 NULL +low_cardinality_date DATETIME 0 NULL +map_value JSON 0 NULL +nested.nested_int TEXT 0 NULL +nested.nested_string TEXT 0 NULL +nullable_value INT 0 NULL +string_value BLOB 0 NULL +tuple_value JSON 0 NULL +uint64 BIGINT UNSIGNED 0 PRI SOR NULL +uuid_value CHAR 0 NULL field type null key default extra -aggregate_function text 0 NULL -array_value text 0 NULL -boolean_value tinyint unsigned 0 NULL -date32_value date 0 NULL -date_value date 0 NULL -datetime64_value datetime 0 NULL -datetime_value datetime 0 NULL -decimal_value decimal 0 NULL -enum_value enum('apple', 'banana', 'orange') 0 NULL -fixed_string_value text 0 NULL -float32 float 0 NULL -float64 double 0 NULL -int32 int 0 NULL -ipv4_value text 0 NULL -ipv6_value text 0 NULL -json_value json 0 NULL -low_cardinality blob 0 NULL -low_cardinality_date datetime 0 NULL -map_value json 0 NULL -nested.nested_int text 0 NULL -nested.nested_string text 0 NULL -nullable_value int 0 NULL -string_value blob 0 NULL -tuple_value json 0 NULL -uint64 bigint unsigned 0 PRI SOR NULL -uuid_value char 0 NULL +aggregate_function TEXT 0 NULL +array_value TEXT 0 NULL +boolean_value TINYINT UNSIGNED 0 NULL +date32_value DATE 0 NULL +date_value DATE 0 NULL +datetime64_value DATETIME 0 NULL +datetime_value DATETIME 0 NULL +decimal_value DECIMAL 0 NULL +enum_value ENUM('apple', 'banana', 'orange') 0 NULL +fixed_string_value TEXT 0 NULL +float32 FLOAT 0 NULL +float64 DOUBLE 0 NULL +int32 INT 0 NULL +ipv4_value TEXT 0 NULL +ipv6_value TEXT 0 NULL +json_value JSON 0 NULL +low_cardinality BLOB 0 NULL +low_cardinality_date DATETIME 0 NULL +map_value JSON 0 NULL +nested.nested_int TEXT 0 NULL +nested.nested_string TEXT 0 NULL +nullable_value INT 0 NULL +string_value BLOB 0 NULL +tuple_value JSON 0 NULL +uint64 BIGINT UNSIGNED 0 PRI SOR NULL +uuid_value CHAR 0 NULL diff --git a/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh b/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh index a446c6e817e..fd1ad92f060 100755 --- a/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh +++ b/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh @@ -1,4 +1,8 @@ -#!/bin/bash +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh # This script tests the MySQL compatibility of the SHOW COLUMNS command in ClickHouse USER="default" From 97bd3f048316b421966b2e4d6ced8258808fdefe Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Mon, 22 May 2023 23:25:59 +0800 Subject: [PATCH 197/722] Proper mutation of skip indices and projections --- src/Interpreters/MutationsInterpreter.cpp | 93 ++++- src/Interpreters/MutationsInterpreter.h | 1 + src/Storages/MergeTree/IMergeTreeDataPart.cpp | 25 +- src/Storages/MergeTree/IMergeTreeDataPart.h | 2 +- src/Storages/MergeTree/MergeTreeData.cpp | 14 +- src/Storages/MergeTree/MergeTreeData.h | 6 +- .../MergeTree/MergeTreeDataWriter.cpp | 20 +- src/Storages/MergeTree/MutateTask.cpp | 366 +++++++++--------- src/Storages/StorageInMemoryMetadata.cpp | 16 +- src/Storages/StorageInMemoryMetadata.h | 9 +- ...ith_skip_indices_and_projections.reference | 0 ...part_with_skip_indices_and_projections.sql | 31 ++ 12 files changed, 359 insertions(+), 224 deletions(-) create mode 100644 tests/queries/0_stateless/02763_mutate_compact_part_with_skip_indices_and_projections.reference create mode 100644 tests/queries/0_stateless/02763_mutate_compact_part_with_skip_indices_and_projections.sql diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 713ebade1d5..791018a3f38 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -109,13 +109,16 @@ QueryTreeNodePtr prepareQueryAffectedQueryTree(const std::vector & has_index_or_projection) { NameSet new_updated_columns = updated_columns; ColumnDependencies dependencies; while (!new_updated_columns.empty()) { - auto new_dependencies = metadata_snapshot->getColumnDependencies(new_updated_columns, true); + auto new_dependencies = metadata_snapshot->getColumnDependencies(new_updated_columns, true, has_index_or_projection); new_updated_columns.clear(); for (const auto & dependency : new_dependencies) { @@ -288,6 +291,11 @@ bool MutationsInterpreter::Source::materializeTTLRecalculateOnly() const return data && data->getSettings()->materialize_ttl_recalculate_only; } +bool MutationsInterpreter::Source::hasIndexOrProjection(const String & file_name) const +{ + return part && part->checksums.has(file_name); +} + static Names getAvailableColumnsWithVirtuals(StorageMetadataPtr metadata_snapshot, const IStorage & storage) { auto all_columns = metadata_snapshot->getColumns().getNamesOfPhysical(); @@ -524,8 +532,54 @@ void MutationsInterpreter::prepare(bool dry_run) validateUpdateColumns(source, metadata_snapshot, updated_columns, column_to_affected_materialized); } + for (const auto & [_, names] : column_to_affected_materialized) + updated_columns.insert(names.begin(), names.end()); + + std::function has_index_or_projection + = [&](const String & file_name) { return source.hasIndexOrProjection(file_name); }; + if (settings.recalculate_dependencies_of_updated_columns) - dependencies = getAllColumnDependencies(metadata_snapshot, updated_columns); + dependencies = getAllColumnDependencies(metadata_snapshot, updated_columns, has_index_or_projection); + + for (const auto & index : metadata_snapshot->getSecondaryIndices()) + { + if (source.hasIndexOrProjection("skp_idx_" + index.name + ".idx") || source.hasIndexOrProjection("skp_idx_" + index.name + ".idx2")) + { + // If some dependent columns gets mutated + bool mutate = false; + const auto & index_cols = index.expression->getRequiredColumns(); + for (const auto & col : index_cols) + { + if (updated_columns.contains(col)) + { + mutate = true; + break; + } + } + if (mutate) + materialized_indices.insert(index.name); + } + } + + for (const auto & projection : metadata_snapshot->getProjections()) + { + if (source.hasIndexOrProjection(projection.getDirectoryName())) + { + // If some dependent columns gets mutated + bool mutate = false; + const auto & projection_cols = projection.required_columns; + for (const auto & col : projection_cols) + { + if (updated_columns.contains(col)) + { + mutate = true; + break; + } + } + if (mutate) + materialized_projections.insert(projection.name); + } + } std::vector read_columns; /// First, break a sequence of commands into stages. @@ -680,20 +734,27 @@ void MutationsInterpreter::prepare(bool dry_run) if (it == std::cend(indices_desc)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unknown index: {}", command.index_name); - auto query = (*it).expression_list_ast->clone(); - auto syntax_result = TreeRewriter(context).analyze(query, all_columns); - const auto required_columns = syntax_result->requiredSourceColumns(); - for (const auto & column : required_columns) - dependencies.emplace(column, ColumnDependency::SKIP_INDEX); - materialized_indices.emplace(command.index_name); + if (!source.hasIndexOrProjection("skp_idx_" + it->name + ".idx") + && !source.hasIndexOrProjection("skp_idx_" + it->name + ".idx2")) + { + auto query = (*it).expression_list_ast->clone(); + auto syntax_result = TreeRewriter(context).analyze(query, all_columns); + const auto required_columns = syntax_result->requiredSourceColumns(); + for (const auto & column : required_columns) + dependencies.emplace(column, ColumnDependency::SKIP_INDEX); + materialized_indices.emplace(command.index_name); + } } else if (command.type == MutationCommand::MATERIALIZE_PROJECTION) { mutation_kind.set(MutationKind::MUTATE_INDEX_PROJECTION); const auto & projection = projections_desc.get(command.projection_name); - for (const auto & column : projection.required_columns) - dependencies.emplace(column, ColumnDependency::PROJECTION); - materialized_projections.emplace(command.projection_name); + if (!source.hasIndexOrProjection(projection.getDirectoryName())) + { + for (const auto & column : projection.required_columns) + dependencies.emplace(column, ColumnDependency::PROJECTION); + materialized_projections.emplace(command.projection_name); + } } else if (command.type == MutationCommand::DROP_INDEX) { @@ -712,7 +773,8 @@ void MutationsInterpreter::prepare(bool dry_run) { // just recalculate ttl_infos without remove expired data auto all_columns_vec = all_columns.getNames(); - auto new_dependencies = metadata_snapshot->getColumnDependencies(NameSet(all_columns_vec.begin(), all_columns_vec.end()), false); + auto new_dependencies = metadata_snapshot->getColumnDependencies( + NameSet(all_columns_vec.begin(), all_columns_vec.end()), false, has_index_or_projection); for (const auto & dependency : new_dependencies) { if (dependency.kind == ColumnDependency::TTL_EXPRESSION) @@ -737,7 +799,8 @@ void MutationsInterpreter::prepare(bool dry_run) } auto all_columns_vec = all_columns.getNames(); - auto all_dependencies = getAllColumnDependencies(metadata_snapshot, NameSet(all_columns_vec.begin(), all_columns_vec.end())); + auto all_dependencies = getAllColumnDependencies( + metadata_snapshot, NameSet(all_columns_vec.begin(), all_columns_vec.end()), has_index_or_projection); for (const auto & dependency : all_dependencies) { @@ -746,7 +809,7 @@ void MutationsInterpreter::prepare(bool dry_run) } /// Recalc only skip indices and projections of columns which could be updated by TTL. - auto new_dependencies = metadata_snapshot->getColumnDependencies(new_updated_columns, true); + auto new_dependencies = metadata_snapshot->getColumnDependencies(new_updated_columns, true, has_index_or_projection); for (const auto & dependency : new_dependencies) { if (dependency.kind == ColumnDependency::SKIP_INDEX || dependency.kind == ColumnDependency::PROJECTION) diff --git a/src/Interpreters/MutationsInterpreter.h b/src/Interpreters/MutationsInterpreter.h index 49ba07641d9..d783b503531 100644 --- a/src/Interpreters/MutationsInterpreter.h +++ b/src/Interpreters/MutationsInterpreter.h @@ -120,6 +120,7 @@ public: bool supportsLightweightDelete() const; bool hasLightweightDeleteMask() const; bool materializeTTLRecalculateOnly() const; + bool hasIndexOrProjection(const String & file_name) const; void read( Stage & first_stage, diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index d27b03fff44..ca814a2afd5 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -632,7 +632,7 @@ void IMergeTreeDataPart::loadColumnsChecksumsIndexes(bool require_columns_checks if (!parent_part) { loadTTLInfos(); - loadProjections(require_columns_checksums, check_consistency); + loadProjections(require_columns_checksums, check_consistency, false /* if_not_loaded */); } if (check_consistency) @@ -690,13 +690,13 @@ void IMergeTreeDataPart::addProjectionPart( const String & projection_name, std::shared_ptr && projection_part) { - /// Here should be a check that projection we are trying to add - /// does not exist, but unfortunately this check fails in tests. - /// TODO: fix. + if (hasProjection(projection_name)) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Projection part {} in part {} is already loaded. This is a bug", projection_name, name); + projection_parts[projection_name] = std::move(projection_part); } -void IMergeTreeDataPart::loadProjections(bool require_columns_checksums, bool check_consistency) +void IMergeTreeDataPart::loadProjections(bool require_columns_checksums, bool check_consistency, bool if_not_loaded) { auto metadata_snapshot = storage.getInMemoryMetadataPtr(); for (const auto & projection : metadata_snapshot->projections) @@ -704,9 +704,18 @@ void IMergeTreeDataPart::loadProjections(bool require_columns_checksums, bool ch auto path = projection.name + ".proj"; if (getDataPartStorage().exists(path)) { - auto part = getProjectionPartBuilder(projection.name).withPartFormatFromDisk().build(); - part->loadColumnsChecksumsIndexes(require_columns_checksums, check_consistency); - addProjectionPart(projection.name, std::move(part)); + if (hasProjection(projection.name)) + { + if (!if_not_loaded) + throw Exception( + ErrorCodes::LOGICAL_ERROR, "Projection part {} in part {} is already loaded. This is a bug", projection.name, name); + } + else + { + auto part = getProjectionPartBuilder(projection.name).withPartFormatFromDisk().build(); + part->loadColumnsChecksumsIndexes(require_columns_checksums, check_consistency); + addProjectionPart(projection.name, std::move(part)); + } } } } diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index 411de3af982..b6b6d8c6693 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -388,7 +388,7 @@ public: bool hasProjection(const String & projection_name) const { return projection_parts.contains(projection_name); } - void loadProjections(bool require_columns_checksums, bool check_consistency); + void loadProjections(bool require_columns_checksums, bool check_consistency, bool if_not_loaded = false); /// Return set of metadata file names without checksums. For example, /// columns.txt or checksums.txt itself. diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 32665429051..0115ce07b2c 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -605,14 +605,14 @@ namespace ExpressionActionsPtr getCombinedIndicesExpression( const KeyDescription & key, - const IndicesDescription & indices, + const MergeTreeIndices & indices, const ColumnsDescription & columns, ContextPtr context) { ASTPtr combined_expr_list = key.expression_list_ast->clone(); for (const auto & index : indices) - for (const auto & index_expr : index.expression_list_ast->children) + for (const auto & index_expr : index->index.expression_list_ast->children) combined_expr_list->children.push_back(index_expr->clone()); auto syntax_result = TreeRewriter(context).analyze(combined_expr_list, columns.getAllPhysical()); @@ -644,14 +644,16 @@ DataTypes MergeTreeData::getMinMaxColumnsTypes(const KeyDescription & partition_ return {}; } -ExpressionActionsPtr MergeTreeData::getPrimaryKeyAndSkipIndicesExpression(const StorageMetadataPtr & metadata_snapshot) const +ExpressionActionsPtr +MergeTreeData::getPrimaryKeyAndSkipIndicesExpression(const StorageMetadataPtr & metadata_snapshot, const MergeTreeIndices & indices) const { - return getCombinedIndicesExpression(metadata_snapshot->getPrimaryKey(), metadata_snapshot->getSecondaryIndices(), metadata_snapshot->getColumns(), getContext()); + return getCombinedIndicesExpression(metadata_snapshot->getPrimaryKey(), indices, metadata_snapshot->getColumns(), getContext()); } -ExpressionActionsPtr MergeTreeData::getSortingKeyAndSkipIndicesExpression(const StorageMetadataPtr & metadata_snapshot) const +ExpressionActionsPtr +MergeTreeData::getSortingKeyAndSkipIndicesExpression(const StorageMetadataPtr & metadata_snapshot, const MergeTreeIndices & indices) const { - return getCombinedIndicesExpression(metadata_snapshot->getSortingKey(), metadata_snapshot->getSecondaryIndices(), metadata_snapshot->getColumns(), getContext()); + return getCombinedIndicesExpression(metadata_snapshot->getSortingKey(), indices, metadata_snapshot->getColumns(), getContext()); } diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 1c41de6fa19..6fd9d223f32 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -956,8 +956,10 @@ public: /// Get column types required for partition key static DataTypes getMinMaxColumnsTypes(const KeyDescription & partition_key); - ExpressionActionsPtr getPrimaryKeyAndSkipIndicesExpression(const StorageMetadataPtr & metadata_snapshot) const; - ExpressionActionsPtr getSortingKeyAndSkipIndicesExpression(const StorageMetadataPtr & metadata_snapshot) const; + ExpressionActionsPtr + getPrimaryKeyAndSkipIndicesExpression(const StorageMetadataPtr & metadata_snapshot, const MergeTreeIndices & indices) const; + ExpressionActionsPtr + getSortingKeyAndSkipIndicesExpression(const StorageMetadataPtr & metadata_snapshot, const MergeTreeIndices & indices) const; /// Get compression codec for part according to TTL rules and /// section from config.xml. diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index db486b163eb..6ff4d6be870 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -398,9 +398,11 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPartImpl( temp_part.temporary_directory_lock = data.getTemporaryPartDirectoryHolder(part_dir); + auto indices = MergeTreeIndexFactory::instance().getMany(metadata_snapshot->getSecondaryIndices()); + /// If we need to calculate some columns to sort. if (metadata_snapshot->hasSortingKey() || metadata_snapshot->hasSecondaryIndices()) - data.getSortingKeyAndSkipIndicesExpression(metadata_snapshot)->execute(block); + data.getSortingKeyAndSkipIndicesExpression(metadata_snapshot, indices)->execute(block); Names sort_columns = metadata_snapshot->getSortingKeyColumns(); SortDescription sort_description; @@ -517,10 +519,16 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPartImpl( /// either default lz4 or compression method with zero thresholds on absolute and relative part size. auto compression_codec = data.getContext()->chooseCompressionCodec(0, 0); - const auto & index_factory = MergeTreeIndexFactory::instance(); - auto out = std::make_unique(new_data_part, metadata_snapshot, columns, - index_factory.getMany(metadata_snapshot->getSecondaryIndices()), compression_codec, - context->getCurrentTransaction(), false, false, context->getWriteSettings()); + auto out = std::make_unique( + new_data_part, + metadata_snapshot, + columns, + indices, + compression_codec, + context->getCurrentTransaction(), + false, + false, + context->getWriteSettings()); out->writeWithPermutation(block, perm_ptr); @@ -606,7 +614,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPartImpl( /// If we need to calculate some columns to sort. if (metadata_snapshot->hasSortingKey() || metadata_snapshot->hasSecondaryIndices()) - data.getSortingKeyAndSkipIndicesExpression(metadata_snapshot)->execute(block); + data.getSortingKeyAndSkipIndicesExpression(metadata_snapshot, {})->execute(block); Names sort_columns = metadata_snapshot->getSortingKeyColumns(); SortDescription sort_description; diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 76096d00641..d65897ac97d 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -200,8 +200,7 @@ static void splitAndModifyMutationCommands( { for_file_renames.push_back(command); } - /// If we don't have this column in source part, than we don't need - /// to materialize it + /// If we don't have this column in source part, we don't need to materialize it. else if (part_columns.has(command.column_name)) { if (command.type == MutationCommand::Type::READ_COLUMN) @@ -438,51 +437,13 @@ static ExecuteTTLType shouldExecuteTTL(const StorageMetadataPtr & metadata_snaps } -/// Get skip indices, that should exists in the resulting data part. -static MergeTreeIndices getIndicesForNewDataPart( - const IndicesDescription & all_indices, - const MutationCommands & commands_for_removes) -{ - NameSet removed_indices; - for (const auto & command : commands_for_removes) - if (command.type == MutationCommand::DROP_INDEX) - removed_indices.insert(command.column_name); - - MergeTreeIndices new_indices; - for (const auto & index : all_indices) - if (!removed_indices.contains(index.name)) - new_indices.push_back(MergeTreeIndexFactory::instance().get(index)); - - return new_indices; -} - -static std::vector getProjectionsForNewDataPart( - const ProjectionsDescription & all_projections, - const MutationCommands & commands_for_removes) -{ - NameSet removed_projections; - for (const auto & command : commands_for_removes) - if (command.type == MutationCommand::DROP_PROJECTION) - removed_projections.insert(command.column_name); - - std::vector new_projections; - for (const auto & projection : all_projections) - if (!removed_projections.contains(projection.name)) - new_projections.push_back(&projection); - - return new_projections; -} - - /// Return set of indices which should be recalculated during mutation also /// wraps input stream into additional expression stream static std::set getIndicesToRecalculate( QueryPipelineBuilder & builder, - const NameSet & updated_columns, const StorageMetadataPtr & metadata_snapshot, ContextPtr context, - const NameSet & materialized_indices, - const MergeTreeData::DataPartPtr & source_part) + const NameSet & materialized_indices) { /// Checks if columns used in skipping indexes modified. const auto & index_factory = MergeTreeIndexFactory::instance(); @@ -492,11 +453,7 @@ static std::set getIndicesToRecalculate( for (const auto & index : indices) { - bool has_index = - source_part->checksums.has(INDEX_FILE_PREFIX + index.name + ".idx") || - source_part->checksums.has(INDEX_FILE_PREFIX + index.name + ".idx2"); - // If we ask to materialize and it already exists - if (!has_index && materialized_indices.contains(index.name)) + if (materialized_indices.contains(index.name)) { if (indices_to_recalc.insert(index_factory.get(index)).second) { @@ -505,26 +462,6 @@ static std::set getIndicesToRecalculate( indices_recalc_expr_list->children.push_back(expr->clone()); } } - // If some dependent columns gets mutated - else - { - bool mutate = false; - const auto & index_cols = index.expression->getRequiredColumns(); - for (const auto & col : index_cols) - { - if (updated_columns.contains(col)) - { - mutate = true; - break; - } - } - if (mutate && indices_to_recalc.insert(index_factory.get(index)).second) - { - ASTPtr expr_list = index.expression_list_ast->clone(); - for (const auto & expr : expr_list->children) - indices_recalc_expr_list->children.push_back(expr->clone()); - } - } } if (!indices_to_recalc.empty() && builder.initialized()) @@ -545,37 +482,15 @@ static std::set getIndicesToRecalculate( return indices_to_recalc; } -std::set getProjectionsToRecalculate( - const NameSet & updated_columns, +static std::set getProjectionsToRecalculate( const StorageMetadataPtr & metadata_snapshot, - const NameSet & materialized_projections, - const MergeTreeData::DataPartPtr & source_part) + const NameSet & materialized_projections) { - /// Checks if columns used in projections modified. std::set projections_to_recalc; for (const auto & projection : metadata_snapshot->getProjections()) { - // If we ask to materialize and it doesn't exist - if (!source_part->checksums.has(projection.name + ".proj") && materialized_projections.contains(projection.name)) - { + if (materialized_projections.contains(projection.name)) projections_to_recalc.insert(&projection); - } - else - { - // If some dependent columns gets mutated - bool mutate = false; - const auto & projection_cols = projection.required_columns; - for (const auto & col : projection_cols) - { - if (updated_columns.contains(col)) - { - mutate = true; - break; - } - } - if (mutate) - projections_to_recalc.insert(&projection); - } } return projections_to_recalc; } @@ -618,33 +533,6 @@ static NameSet collectFilesToSkip( /// Do not hardlink this file because it's always rewritten at the end of mutation. files_to_skip.insert(IMergeTreeDataPart::SERIALIZATION_FILE_NAME); - auto new_stream_counts = getStreamCounts(new_part, new_part->getColumns().getNames()); - auto source_updated_stream_counts = getStreamCounts(source_part, updated_header.getNames()); - auto new_updated_stream_counts = getStreamCounts(new_part, updated_header.getNames()); - - /// Skip all modified files in new part. - for (const auto & [stream_name, _] : new_updated_stream_counts) - { - files_to_skip.insert(stream_name + ".bin"); - files_to_skip.insert(stream_name + mrk_extension); - } - - /// Skip files that we read from source part and do not write in new part. - /// E.g. ALTER MODIFY from LowCardinality(String) to String. - for (const auto & [stream_name, _] : source_updated_stream_counts) - { - /// If we read shared stream and do not write it - /// (e.g. while ALTER MODIFY COLUMN from array of Nested type to String), - /// we need to hardlink its files, because they will be lost otherwise. - bool need_hardlink = new_updated_stream_counts[stream_name] == 0 && new_stream_counts[stream_name] != 0; - - if (!need_hardlink) - { - files_to_skip.insert(stream_name + ".bin"); - files_to_skip.insert(stream_name + mrk_extension); - } - } - for (const auto & index : indices_to_recalc) { /// Since MinMax index has .idx2 extension, we need to add correct extension. @@ -655,6 +543,36 @@ static NameSet collectFilesToSkip( for (const auto & projection : projections_to_recalc) files_to_skip.insert(projection->getDirectoryName()); + if (isWidePart(source_part)) + { + auto new_stream_counts = getStreamCounts(new_part, new_part->getColumns().getNames()); + auto source_updated_stream_counts = getStreamCounts(source_part, updated_header.getNames()); + auto new_updated_stream_counts = getStreamCounts(new_part, updated_header.getNames()); + + /// Skip all modified files in new part. + for (const auto & [stream_name, _] : new_updated_stream_counts) + { + files_to_skip.insert(stream_name + ".bin"); + files_to_skip.insert(stream_name + mrk_extension); + } + + /// Skip files that we read from source part and do not write in new part. + /// E.g. ALTER MODIFY from LowCardinality(String) to String. + for (const auto & [stream_name, _] : source_updated_stream_counts) + { + /// If we read shared stream and do not write it + /// (e.g. while ALTER MODIFY COLUMN from array of Nested type to String), + /// we need to hardlink its files, because they will be lost otherwise. + bool need_hardlink = new_updated_stream_counts[stream_name] == 0 && new_stream_counts[stream_name] != 0; + + if (!need_hardlink) + { + files_to_skip.insert(stream_name + ".bin"); + files_to_skip.insert(stream_name + mrk_extension); + } + } + } + return files_to_skip; } @@ -701,57 +619,60 @@ static NameToNameVector collectFilesForRenames( if (source_part->checksums.has(command.column_name + ".proj")) add_rename(command.column_name + ".proj", ""); } - else if (command.type == MutationCommand::Type::DROP_COLUMN) + else if (isWidePart(source_part)) { - ISerialization::StreamCallback callback = [&](const ISerialization::SubstreamPath & substream_path) + if (command.type == MutationCommand::Type::DROP_COLUMN) { - String stream_name = ISerialization::getFileNameForStream({command.column_name, command.data_type}, substream_path); - /// Delete files if they are no longer shared with another column. - if (--stream_counts[stream_name] == 0) + ISerialization::StreamCallback callback = [&](const ISerialization::SubstreamPath & substream_path) { - add_rename(stream_name + ".bin", ""); - add_rename(stream_name + mrk_extension, ""); - } - }; + String stream_name = ISerialization::getFileNameForStream({command.column_name, command.data_type}, substream_path); + /// Delete files if they are no longer shared with another column. + if (--stream_counts[stream_name] == 0) + { + add_rename(stream_name + ".bin", ""); + add_rename(stream_name + mrk_extension, ""); + } + }; - if (auto serialization = source_part->tryGetSerialization(command.column_name)) - serialization->enumerateStreams(callback); - } - else if (command.type == MutationCommand::Type::RENAME_COLUMN) - { - String escaped_name_from = escapeForFileName(command.column_name); - String escaped_name_to = escapeForFileName(command.rename_to); - - ISerialization::StreamCallback callback = [&](const ISerialization::SubstreamPath & substream_path) + if (auto serialization = source_part->tryGetSerialization(command.column_name)) + serialization->enumerateStreams(callback); + } + else if (command.type == MutationCommand::Type::RENAME_COLUMN) { - String stream_from = ISerialization::getFileNameForStream(command.column_name, substream_path); - String stream_to = boost::replace_first_copy(stream_from, escaped_name_from, escaped_name_to); + String escaped_name_from = escapeForFileName(command.column_name); + String escaped_name_to = escapeForFileName(command.rename_to); - if (stream_from != stream_to) + ISerialization::StreamCallback callback = [&](const ISerialization::SubstreamPath & substream_path) { - add_rename(stream_from + ".bin", stream_to + ".bin"); - add_rename(stream_from + mrk_extension, stream_to + mrk_extension); - } - }; + String stream_from = ISerialization::getFileNameForStream(command.column_name, substream_path); + String stream_to = boost::replace_first_copy(stream_from, escaped_name_from, escaped_name_to); - if (auto serialization = source_part->tryGetSerialization(command.column_name)) - serialization->enumerateStreams(callback); - } - else if (command.type == MutationCommand::Type::READ_COLUMN) - { - /// Remove files for streams that exist in source_part, - /// but were removed in new_part by MODIFY COLUMN from - /// type with higher number of streams (e.g. LowCardinality -> String). + if (stream_from != stream_to) + { + add_rename(stream_from + ".bin", stream_to + ".bin"); + add_rename(stream_from + mrk_extension, stream_to + mrk_extension); + } + }; - auto old_streams = getStreamCounts(source_part, source_part->getColumns().getNames()); - auto new_streams = getStreamCounts(new_part, source_part->getColumns().getNames()); - - for (const auto & [old_stream, _] : old_streams) + if (auto serialization = source_part->tryGetSerialization(command.column_name)) + serialization->enumerateStreams(callback); + } + else if (command.type == MutationCommand::Type::READ_COLUMN) { - if (!new_streams.contains(old_stream) && --stream_counts[old_stream] == 0) + /// Remove files for streams that exist in source_part, + /// but were removed in new_part by MODIFY COLUMN from + /// type with higher number of streams (e.g. LowCardinality -> String). + + auto old_streams = getStreamCounts(source_part, source_part->getColumns().getNames()); + auto new_streams = getStreamCounts(new_part, source_part->getColumns().getNames()); + + for (const auto & [old_stream, _] : old_streams) { - add_rename(old_stream + ".bin", ""); - add_rename(old_stream + mrk_extension, ""); + if (!new_streams.contains(old_stream) && --stream_counts[old_stream] == 0) + { + add_rename(old_stream + ".bin", ""); + add_rename(old_stream + mrk_extension, ""); + } } } } @@ -851,11 +772,8 @@ void finalizeMutatedPart( new_data_part->minmax_idx = source_part->minmax_idx; new_data_part->modification_time = time(nullptr); - /// This line should not be here because at that moment - /// of executing of mutation all projections should be loaded. - /// But unfortunately without it some tests fail. - /// TODO: fix. - new_data_part->loadProjections(false, false); + /// Load rest projections which are hardlinked + new_data_part->loadProjections(false, false, true /* if_not_loaded */); /// All information about sizes is stored in checksums. /// It doesn't make sense to touch filesystem for sizes. @@ -917,9 +835,9 @@ struct MutationContext std::vector projections_to_build; IMergeTreeDataPart::MinMaxIndexPtr minmax_idx{nullptr}; - NameSet updated_columns; std::set indices_to_recalc; std::set projections_to_recalc; + MergeTreeData::DataPart::Checksums existing_indices_checksums; NameSet files_to_skip; NameToNameVector files_to_rename; @@ -1331,10 +1249,102 @@ private: /// (which is locked in shared mode when input streams are created) and when inserting new data /// the order is reverse. This annoys TSan even though one lock is locked in shared mode and thus /// deadlock is impossible. - ctx->compression_codec = ctx->data->getCompressionCodecForPart(ctx->source_part->getBytesOnDisk(), ctx->source_part->ttl_infos, ctx->time_of_mutation); + ctx->compression_codec + = ctx->data->getCompressionCodecForPart(ctx->source_part->getBytesOnDisk(), ctx->source_part->ttl_infos, ctx->time_of_mutation); - auto skip_part_indices = MutationHelpers::getIndicesForNewDataPart(ctx->metadata_snapshot->getSecondaryIndices(), ctx->for_file_renames); - ctx->projections_to_build = MutationHelpers::getProjectionsForNewDataPart(ctx->metadata_snapshot->getProjections(), ctx->for_file_renames); + NameSet entries_to_hardlink; + + NameSet removed_indices; + for (const auto & command : ctx->for_file_renames) + { + if (command.type == MutationCommand::DROP_INDEX) + removed_indices.insert(command.column_name); + } + + const auto & indices = ctx->metadata_snapshot->getSecondaryIndices(); + MergeTreeIndices skip_indices; + for (const auto & idx : indices) + { + if (removed_indices.contains(idx.name)) + continue; + + if (ctx->materialized_indices.contains(idx.name)) + skip_indices.push_back(MergeTreeIndexFactory::instance().get(idx)); + + auto hardlink_index = [&](const String & idx_name) + { + if (ctx->source_part->checksums.has(idx_name)) + { + auto it = ctx->source_part->checksums.files.find(idx_name); + if (it != ctx->source_part->checksums.files.end()) + { + entries_to_hardlink.insert(idx_name); + ctx->existing_indices_checksums.addFile(idx_name, it->second.file_size, it->second.file_hash); + } + } + }; + hardlink_index(INDEX_FILE_PREFIX + idx.name + ".idx"); + hardlink_index(INDEX_FILE_PREFIX + idx.name + ".idx2"); + } + + NameSet removed_projections; + for (const auto & command : ctx->for_file_renames) + { + if (command.type == MutationCommand::DROP_PROJECTION) + removed_projections.insert(command.column_name); + } + + const auto & projections = ctx->metadata_snapshot->getProjections(); + for (const auto & projection : projections) + { + if (removed_projections.contains(projection.name)) + continue; + + if (ctx->materialized_projections.contains(projection.name)) + ctx->projections_to_build.push_back(&projection); + + if (ctx->source_part->checksums.has(projection.getDirectoryName())) + entries_to_hardlink.insert(projection.getDirectoryName()); + } + + NameSet hardlinked_files; + /// Create hardlinks for unchanged files + for (auto it = ctx->source_part->getDataPartStorage().iterate(); it->isValid(); it->next()) + { + if (!entries_to_hardlink.contains(it->name())) + continue; + + if (it->isFile()) + { + ctx->new_data_part->getDataPartStorage().createHardLinkFrom( + ctx->source_part->getDataPartStorage(), it->name(), it->name()); + hardlinked_files.insert(it->name()); + } + else + { + // it's a projection part directory + ctx->new_data_part->getDataPartStorage().createProjection(it->name()); + + auto projection_data_part_storage_src = ctx->source_part->getDataPartStorage().getProjection(it->name()); + auto projection_data_part_storage_dst = ctx->new_data_part->getDataPartStorage().getProjection(it->name()); + + for (auto p_it = projection_data_part_storage_src->iterate(); p_it->isValid(); p_it->next()) + { + projection_data_part_storage_dst->createHardLinkFrom( + *projection_data_part_storage_src, p_it->name(), p_it->name()); + + auto file_name_with_projection_prefix = fs::path(projection_data_part_storage_src->getPartDirectory()) / p_it->name(); + hardlinked_files.insert(file_name_with_projection_prefix); + } + } + } + + /// Tracking of hardlinked files required for zero-copy replication. + /// We don't remove them when we delete last copy of source part because + /// new part can use them. + ctx->hardlinked_files.source_table_shared_id = ctx->source_part->storage.getTableSharedID(); + ctx->hardlinked_files.source_part_name = ctx->source_part->name; + ctx->hardlinked_files.hardlinks_from_source_part = std::move(hardlinked_files); if (!ctx->mutating_pipeline_builder.initialized()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot mutate part columns with uninitialized mutations stream. It's a bug"); @@ -1343,8 +1353,8 @@ private: if (ctx->metadata_snapshot->hasPrimaryKey() || ctx->metadata_snapshot->hasSecondaryIndices()) { - builder.addTransform( - std::make_shared(builder.getHeader(), ctx->data->getPrimaryKeyAndSkipIndicesExpression(ctx->metadata_snapshot))); + builder.addTransform(std::make_shared( + builder.getHeader(), ctx->data->getPrimaryKeyAndSkipIndicesExpression(ctx->metadata_snapshot, skip_indices))); builder.addTransform(std::make_shared(builder.getHeader())); } @@ -1361,7 +1371,7 @@ private: ctx->new_data_part, ctx->metadata_snapshot, ctx->new_data_part->getColumns(), - skip_part_indices, + skip_indices, ctx->compression_codec, ctx->txn, /*reset_columns=*/ true, @@ -1381,10 +1391,12 @@ private: void finalize() { ctx->new_data_part->minmax_idx = std::move(ctx->minmax_idx); + ctx->new_data_part->loadProjections(false, false, true /* if_not_loaded */); ctx->mutating_executor.reset(); ctx->mutating_pipeline.reset(); - static_pointer_cast(ctx->out)->finalizePart(ctx->new_data_part, ctx->need_sync); + static_pointer_cast(ctx->out)->finalizePart( + ctx->new_data_part, ctx->need_sync, nullptr, &ctx->existing_indices_checksums); ctx->out.reset(); } @@ -1530,7 +1542,7 @@ private: /// new part can use them. ctx->hardlinked_files.source_table_shared_id = ctx->source_part->storage.getTableSharedID(); ctx->hardlinked_files.source_part_name = ctx->source_part->name; - ctx->hardlinked_files.hardlinks_from_source_part = hardlinked_files; + ctx->hardlinked_files.hardlinks_from_source_part = std::move(hardlinked_files); (*ctx->mutate_entry)->columns_written = ctx->storage_columns.size() - ctx->updated_header.columns(); @@ -1878,14 +1890,10 @@ bool MutateTask::prepare() } else /// TODO: check that we modify only non-key columns in this case. { - /// We will modify only some of the columns. Other columns and key values can be copied as-is. - for (const auto & name_type : ctx->updated_header.getNamesAndTypesList()) - ctx->updated_columns.emplace(name_type.name); - ctx->indices_to_recalc = MutationHelpers::getIndicesToRecalculate( - ctx->mutating_pipeline_builder, ctx->updated_columns, ctx->metadata_snapshot, ctx->context, ctx->materialized_indices, ctx->source_part); - ctx->projections_to_recalc = MutationHelpers::getProjectionsToRecalculate( - ctx->updated_columns, ctx->metadata_snapshot, ctx->materialized_projections, ctx->source_part); + ctx->mutating_pipeline_builder, ctx->metadata_snapshot, ctx->context, ctx->materialized_indices); + + ctx->projections_to_recalc = MutationHelpers::getProjectionsToRecalculate(ctx->metadata_snapshot, ctx->materialized_projections); ctx->files_to_skip = MutationHelpers::collectFilesToSkip( ctx->source_part, diff --git a/src/Storages/StorageInMemoryMetadata.cpp b/src/Storages/StorageInMemoryMetadata.cpp index 45abd4bebef..afe75349864 100644 --- a/src/Storages/StorageInMemoryMetadata.cpp +++ b/src/Storages/StorageInMemoryMetadata.cpp @@ -236,7 +236,10 @@ bool StorageInMemoryMetadata::hasAnyGroupByTTL() const return !table_ttl.group_by_ttl.empty(); } -ColumnDependencies StorageInMemoryMetadata::getColumnDependencies(const NameSet & updated_columns, bool include_ttl_target) const +ColumnDependencies StorageInMemoryMetadata::getColumnDependencies( + const NameSet & updated_columns, + bool include_ttl_target, + const std::function & has_indice_or_projection) const { if (updated_columns.empty()) return {}; @@ -264,10 +267,16 @@ ColumnDependencies StorageInMemoryMetadata::getColumnDependencies(const NameSet }; for (const auto & index : getSecondaryIndices()) - add_dependent_columns(index.expression, indices_columns); + { + if (has_indice_or_projection("skp_idx_" + index.name + ".idx") || has_indice_or_projection("skp_idx_" + index.name + ".idx2")) + add_dependent_columns(index.expression, indices_columns); + } for (const auto & projection : getProjections()) - add_dependent_columns(&projection, projections_columns); + { + if (has_indice_or_projection(projection.getDirectoryName())) + add_dependent_columns(&projection, projections_columns); + } auto add_for_rows_ttl = [&](const auto & expression, auto & to_set) { @@ -312,7 +321,6 @@ ColumnDependencies StorageInMemoryMetadata::getColumnDependencies(const NameSet res.emplace(column, ColumnDependency::TTL_TARGET); return res; - } Block StorageInMemoryMetadata::getSampleBlockInsertable() const diff --git a/src/Storages/StorageInMemoryMetadata.h b/src/Storages/StorageInMemoryMetadata.h index 25618c5b03f..4ed7eb8bf29 100644 --- a/src/Storages/StorageInMemoryMetadata.h +++ b/src/Storages/StorageInMemoryMetadata.h @@ -147,9 +147,12 @@ struct StorageInMemoryMetadata TTLDescriptions getGroupByTTLs() const; bool hasAnyGroupByTTL() const; - /// Returns columns, which will be needed to calculate dependencies (skip - /// indices, TTL expressions) if we update @updated_columns set of columns. - ColumnDependencies getColumnDependencies(const NameSet & updated_columns, bool include_ttl_target) const; + /// Returns columns, which will be needed to calculate dependencies (skip indices, projections, + /// TTL expressions) if we update @updated_columns set of columns. + ColumnDependencies getColumnDependencies( + const NameSet & updated_columns, + bool include_ttl_target, + const std::function & has_indice_or_projection) const; /// Block with ordinary + materialized columns. Block getSampleBlock() const; diff --git a/tests/queries/0_stateless/02763_mutate_compact_part_with_skip_indices_and_projections.reference b/tests/queries/0_stateless/02763_mutate_compact_part_with_skip_indices_and_projections.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02763_mutate_compact_part_with_skip_indices_and_projections.sql b/tests/queries/0_stateless/02763_mutate_compact_part_with_skip_indices_and_projections.sql new file mode 100644 index 00000000000..bb9825fe5a0 --- /dev/null +++ b/tests/queries/0_stateless/02763_mutate_compact_part_with_skip_indices_and_projections.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test ( col1 Int64, dt Date ) ENGINE = MergeTree PARTITION BY dt ORDER BY tuple(); + +INSERT INTO test FORMAT Values (1, today()); + +ALTER TABLE test ADD COLUMN col2 String; + +ALTER TABLE test ADD INDEX i1 (col1, col2) TYPE set(100) GRANULARITY 1; + +ALTER TABLE test MATERIALIZE INDEX i1; + +ALTER TABLE test ADD COLUMN col3 String; + +ALTER TABLE test DROP COLUMN col3; + +DROP TABLE IF EXISTS test; + +CREATE TABLE test ( col1 Int64, dt Date ) ENGINE = MergeTree PARTITION BY dt ORDER BY tuple(); + +INSERT INTO test FORMAT Values (1, today()); + +ALTER TABLE test ADD COLUMN col2 String; + +ALTER TABLE test ADD PROJECTION p1 ( SELECT col2, sum(col1) GROUP BY col2 ); + +ALTER TABLE test MATERIALIZE PROJECTION p1; + +ALTER TABLE test ADD COLUMN col3 String; + +ALTER TABLE test DROP COLUMN col3; From 92b2200c55f27eefceb610535e813e62bd49ffce Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Tue, 23 May 2023 20:46:50 +0800 Subject: [PATCH 198/722] mutation stages can be empty --- src/Interpreters/MutationsInterpreter.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 791018a3f38..1059e1fdae5 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -901,6 +901,11 @@ void MutationsInterpreter::prepare(bool dry_run) } } + /// Stages might be empty when we materialize skip indices or projections which don't add any + /// column dependencies. + if (stages.empty()) + stages.emplace_back(context); + is_prepared = true; prepareMutationStages(stages, dry_run); } From e24c9267bc12026ff3acde78675464a17f6d5cc1 Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Sun, 4 Jun 2023 20:06:27 +0800 Subject: [PATCH 199/722] fix --- src/Interpreters/MutationsInterpreter.cpp | 68 +++++++++-------------- src/Storages/MergeTree/MutateTask.cpp | 39 +++++++------ 2 files changed, 47 insertions(+), 60 deletions(-) diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 1059e1fdae5..25bb3fc5e82 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -532,55 +532,12 @@ void MutationsInterpreter::prepare(bool dry_run) validateUpdateColumns(source, metadata_snapshot, updated_columns, column_to_affected_materialized); } - for (const auto & [_, names] : column_to_affected_materialized) - updated_columns.insert(names.begin(), names.end()); - std::function has_index_or_projection = [&](const String & file_name) { return source.hasIndexOrProjection(file_name); }; if (settings.recalculate_dependencies_of_updated_columns) dependencies = getAllColumnDependencies(metadata_snapshot, updated_columns, has_index_or_projection); - for (const auto & index : metadata_snapshot->getSecondaryIndices()) - { - if (source.hasIndexOrProjection("skp_idx_" + index.name + ".idx") || source.hasIndexOrProjection("skp_idx_" + index.name + ".idx2")) - { - // If some dependent columns gets mutated - bool mutate = false; - const auto & index_cols = index.expression->getRequiredColumns(); - for (const auto & col : index_cols) - { - if (updated_columns.contains(col)) - { - mutate = true; - break; - } - } - if (mutate) - materialized_indices.insert(index.name); - } - } - - for (const auto & projection : metadata_snapshot->getProjections()) - { - if (source.hasIndexOrProjection(projection.getDirectoryName())) - { - // If some dependent columns gets mutated - bool mutate = false; - const auto & projection_cols = projection.required_columns; - for (const auto & col : projection_cols) - { - if (updated_columns.contains(col)) - { - mutate = true; - break; - } - } - if (mutate) - materialized_projections.insert(projection.name); - } - } - std::vector read_columns; /// First, break a sequence of commands into stages. for (auto & command : commands) @@ -869,6 +826,31 @@ void MutationsInterpreter::prepare(bool dry_run) for (const auto & column : changed_columns) stages.back().column_to_updated.emplace( column, std::make_shared(column)); + + for (const auto & index : metadata_snapshot->getSecondaryIndices()) + { + if (source.hasIndexOrProjection("skp_idx_" + index.name + ".idx") + || source.hasIndexOrProjection("skp_idx_" + index.name + ".idx2")) + { + const auto & index_cols = index.expression->getRequiredColumns(); + bool changed = std::any_of( + index_cols.begin(), index_cols.end(), [&](const auto & col) { return changed_columns.contains(col); }); + if (changed) + materialized_indices.insert(index.name); + } + } + + for (const auto & projection : metadata_snapshot->getProjections()) + { + if (source.hasIndexOrProjection(projection.getDirectoryName())) + { + const auto & projection_cols = projection.required_columns; + bool changed = std::any_of( + projection_cols.begin(), projection_cols.end(), [&](const auto & col) { return changed_columns.contains(col); }); + if (changed) + materialized_projections.insert(projection.name); + } + } } if (!unchanged_columns.empty()) diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index d65897ac97d..7031027002d 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -1269,22 +1269,23 @@ private: continue; if (ctx->materialized_indices.contains(idx.name)) - skip_indices.push_back(MergeTreeIndexFactory::instance().get(idx)); - - auto hardlink_index = [&](const String & idx_name) { - if (ctx->source_part->checksums.has(idx_name)) + skip_indices.push_back(MergeTreeIndexFactory::instance().get(idx)); + } + else + { + auto prefix = fmt::format("{}{}.", INDEX_FILE_PREFIX, idx.name); + auto it = ctx->source_part->checksums.files.upper_bound(prefix); + while (it != ctx->source_part->checksums.files.end()) { - auto it = ctx->source_part->checksums.files.find(idx_name); - if (it != ctx->source_part->checksums.files.end()) - { - entries_to_hardlink.insert(idx_name); - ctx->existing_indices_checksums.addFile(idx_name, it->second.file_size, it->second.file_hash); - } + if (!startsWith(it->first, prefix)) + break; + + entries_to_hardlink.insert(it->first); + ctx->existing_indices_checksums.addFile(it->first, it->second.file_size, it->second.file_hash); + ++it; } - }; - hardlink_index(INDEX_FILE_PREFIX + idx.name + ".idx"); - hardlink_index(INDEX_FILE_PREFIX + idx.name + ".idx2"); + } } NameSet removed_projections; @@ -1301,10 +1302,14 @@ private: continue; if (ctx->materialized_projections.contains(projection.name)) + { ctx->projections_to_build.push_back(&projection); - - if (ctx->source_part->checksums.has(projection.getDirectoryName())) - entries_to_hardlink.insert(projection.getDirectoryName()); + } + else + { + if (ctx->source_part->checksums.has(projection.getDirectoryName())) + entries_to_hardlink.insert(projection.getDirectoryName()); + } } NameSet hardlinked_files; @@ -1354,7 +1359,7 @@ private: if (ctx->metadata_snapshot->hasPrimaryKey() || ctx->metadata_snapshot->hasSecondaryIndices()) { builder.addTransform(std::make_shared( - builder.getHeader(), ctx->data->getPrimaryKeyAndSkipIndicesExpression(ctx->metadata_snapshot, skip_indices))); + builder.getHeader(), ctx->data->getPrimaryKeyAndSkipIndicesExpression(ctx->metadata_snapshot, skip_indices))); builder.addTransform(std::make_shared(builder.getHeader())); } From a66f68e5df5584cf08a42ff6dd12b4e935f2cb3a Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Mon, 5 Jun 2023 01:48:13 +0800 Subject: [PATCH 200/722] fix again --- src/Interpreters/MutationsInterpreter.cpp | 58 ++++++++++++----------- 1 file changed, 31 insertions(+), 27 deletions(-) diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 25bb3fc5e82..25c52ad8925 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -38,6 +38,7 @@ #include #include #include +#include namespace DB @@ -804,10 +805,10 @@ void MutationsInterpreter::prepare(bool dry_run) /// We care about affected indices and projections because we also need to rewrite them /// when one of index columns updated or filtered with delete. /// The same about columns, that are needed for calculation of TTL expressions. + NameSet changed_columns; + NameSet unchanged_columns; if (!dependencies.empty()) { - NameSet changed_columns; - NameSet unchanged_columns; for (const auto & dependency : dependencies) { if (dependency.isReadOnly()) @@ -826,31 +827,6 @@ void MutationsInterpreter::prepare(bool dry_run) for (const auto & column : changed_columns) stages.back().column_to_updated.emplace( column, std::make_shared(column)); - - for (const auto & index : metadata_snapshot->getSecondaryIndices()) - { - if (source.hasIndexOrProjection("skp_idx_" + index.name + ".idx") - || source.hasIndexOrProjection("skp_idx_" + index.name + ".idx2")) - { - const auto & index_cols = index.expression->getRequiredColumns(); - bool changed = std::any_of( - index_cols.begin(), index_cols.end(), [&](const auto & col) { return changed_columns.contains(col); }); - if (changed) - materialized_indices.insert(index.name); - } - } - - for (const auto & projection : metadata_snapshot->getProjections()) - { - if (source.hasIndexOrProjection(projection.getDirectoryName())) - { - const auto & projection_cols = projection.required_columns; - bool changed = std::any_of( - projection_cols.begin(), projection_cols.end(), [&](const auto & col) { return changed_columns.contains(col); }); - if (changed) - materialized_projections.insert(projection.name); - } - } } if (!unchanged_columns.empty()) @@ -883,6 +859,34 @@ void MutationsInterpreter::prepare(bool dry_run) } } + for (const auto & index : metadata_snapshot->getSecondaryIndices()) + { + if (source.hasIndexOrProjection("skp_idx_" + index.name + ".idx") || source.hasIndexOrProjection("skp_idx_" + index.name + ".idx2")) + { + const auto & index_cols = index.expression->getRequiredColumns(); + bool changed = std::any_of( + index_cols.begin(), + index_cols.end(), + [&](const auto & col) { return updated_columns.contains(col) || changed_columns.contains(col); }); + if (changed) + materialized_indices.insert(index.name); + } + } + + for (const auto & projection : metadata_snapshot->getProjections()) + { + if (source.hasIndexOrProjection(projection.getDirectoryName())) + { + const auto & projection_cols = projection.required_columns; + bool changed = std::any_of( + projection_cols.begin(), + projection_cols.end(), + [&](const auto & col) { return updated_columns.contains(col) || changed_columns.contains(col); }); + if (changed) + materialized_projections.insert(projection.name); + } + } + /// Stages might be empty when we materialize skip indices or projections which don't add any /// column dependencies. if (stages.empty()) From 2cc457141ed83a50c7a6e4dc395325c6fd4a898d Mon Sep 17 00:00:00 2001 From: Denny Crane Date: Sun, 4 Jun 2023 15:32:54 -0300 Subject: [PATCH 201/722] clean documentation of ip4 ip6 from domains --- docs/en/interfaces/formats.md | 34 +++++++++---------- docs/en/operations/system-tables/query_log.md | 4 +-- .../system-tables/query_thread_log.md | 4 +-- .../operations/system-tables/session_log.md | 2 +- .../operations/system-tables/zookeeper_log.md | 2 +- docs/en/sql-reference/data-types/index.md | 2 +- .../data-types/{domains => }/ipv4.md | 27 +++------------ .../data-types/{domains => }/ipv6.md | 29 +++------------- .../functions/ip-address-functions.md | 6 ++-- docs/redirects.txt | 10 +++--- 10 files changed, 41 insertions(+), 79 deletions(-) rename docs/en/sql-reference/data-types/{domains => }/ipv4.md (60%) rename docs/en/sql-reference/data-types/{domains => }/ipv6.md (61%) diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 2ab9e8caec4..d75fb32b571 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -1292,8 +1292,8 @@ For output it uses the following correspondence between ClickHouse types and BSO | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `\x04` array | | [Named Tuple](/docs/en/sql-reference/data-types/tuple.md) | `\x03` document | | [Map](/docs/en/sql-reference/data-types/map.md) | `\x03` document | -| [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `\x10` int32 | -| [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `\x05` binary, `\x00` binary subtype | +| [IPv4](/docs/en/sql-reference/data-types/ipv4.md) | `\x10` int32 | +| [IPv6](/docs/en/sql-reference/data-types/ipv6.md) | `\x05` binary, `\x00` binary subtype | For input it uses the following correspondence between BSON types and ClickHouse types: @@ -1303,7 +1303,7 @@ For input it uses the following correspondence between BSON types and ClickHouse | `\x02` string | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) | | `\x03` document | [Map](/docs/en/sql-reference/data-types/map.md)/[Named Tuple](/docs/en/sql-reference/data-types/tuple.md) | | `\x04` array | [Array](/docs/en/sql-reference/data-types/array.md)/[Tuple](/docs/en/sql-reference/data-types/tuple.md) | -| `\x05` binary, `\x00` binary subtype | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md)/[IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | +| `\x05` binary, `\x00` binary subtype | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md)/[IPv6](/docs/en/sql-reference/data-types/ipv6.md) | | `\x05` binary, `\x02` old binary subtype | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) | | `\x05` binary, `\x03` old uuid subtype | [UUID](/docs/en/sql-reference/data-types/uuid.md) | | `\x05` binary, `\x04` uuid subtype | [UUID](/docs/en/sql-reference/data-types/uuid.md) | @@ -1313,7 +1313,7 @@ For input it uses the following correspondence between BSON types and ClickHouse | `\x0A` null value | [NULL](/docs/en/sql-reference/data-types/nullable.md) | | `\x0D` JavaScript code | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) | | `\x0E` symbol | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) | -| `\x10` int32 | [Int32/UInt32](/docs/en/sql-reference/data-types/int-uint.md)/[Decimal32](/docs/en/sql-reference/data-types/decimal.md)/[IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md)/[Enum8/Enum16](/docs/en/sql-reference/data-types/enum.md) | +| `\x10` int32 | [Int32/UInt32](/docs/en/sql-reference/data-types/int-uint.md)/[Decimal32](/docs/en/sql-reference/data-types/decimal.md)/[IPv4](/docs/en/sql-reference/data-types/ipv4.md)/[Enum8/Enum16](/docs/en/sql-reference/data-types/enum.md) | | `\x12` int64 | [Int64/UInt64](/docs/en/sql-reference/data-types/int-uint.md)/[Decimal64](/docs/en/sql-reference/data-types/decimal.md)/[DateTime64](/docs/en/sql-reference/data-types/datetime64.md) | Other BSON types are not supported. Also, it performs conversion between different integer types (for example, you can insert BSON int32 value into ClickHouse UInt8). @@ -1663,8 +1663,8 @@ The table below shows supported data types and how they match ClickHouse [data t | `ENUM` | [Enum(8/16)](/docs/en/sql-reference/data-types/enum.md) | `ENUM` | | `LIST` | [Array](/docs/en/sql-reference/data-types/array.md) | `LIST` | | `STRUCT` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `STRUCT` | -| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `UINT32` | -| `DATA` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `DATA` | +| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/ipv4.md) | `UINT32` | +| `DATA` | [IPv6](/docs/en/sql-reference/data-types/ipv6.md) | `DATA` | | `DATA` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `DATA` | | `DATA` | [Decimal128/Decimal256](/docs/en/sql-reference/data-types/decimal.md) | `DATA` | | `STRUCT(entries LIST(STRUCT(key Key, value Value)))` | [Map](/docs/en/sql-reference/data-types/map.md) | `STRUCT(entries LIST(STRUCT(key Key, value Value)))` | @@ -1866,8 +1866,8 @@ The table below shows supported data types and how they match ClickHouse [data t | `long (timestamp-millis)` \** | [DateTime64(3)](/docs/en/sql-reference/data-types/datetime.md) | `long (timestamp-millis)` \** | | `long (timestamp-micros)` \** | [DateTime64(6)](/docs/en/sql-reference/data-types/datetime.md) | `long (timestamp-micros)` \** | | `bytes (decimal)` \** | [DateTime64(N)](/docs/en/sql-reference/data-types/datetime.md) | `bytes (decimal)` \** | -| `int` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `int` | -| `fixed(16)` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `fixed(16)` | +| `int` | [IPv4](/docs/en/sql-reference/data-types/ipv4.md) | `int` | +| `fixed(16)` | [IPv6](/docs/en/sql-reference/data-types/ipv6.md) | `fixed(16)` | | `bytes (decimal)` \** | [Decimal(P, S)](/docs/en/sql-reference/data-types/decimal.md) | `bytes (decimal)` \** | | `string (uuid)` \** | [UUID](/docs/en/sql-reference/data-types/uuid.md) | `string (uuid)` \** | | `fixed(16)` | [Int128/UInt128](/docs/en/sql-reference/data-types/int-uint.md) | `fixed(16)` | @@ -2001,9 +2001,9 @@ The table below shows supported data types and how they match ClickHouse [data t | `LIST` | [Array](/docs/en/sql-reference/data-types/array.md) | `LIST` | | `STRUCT` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `STRUCT` | | `MAP` | [Map](/docs/en/sql-reference/data-types/map.md) | `MAP` | -| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `UINT32` | -| `FIXED_LENGTH_BYTE_ARRAY`, `BINARY` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `FIXED_LENGTH_BYTE_ARRAY` | -| `FIXED_LENGTH_BYTE_ARRAY`, `BINARY` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `FIXED_LENGTH_BYTE_ARRAY` | +| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/ipv4.md) | `UINT32` | +| `FIXED_LENGTH_BYTE_ARRAY`, `BINARY` | [IPv6](/docs/en/sql-reference/data-types/ipv6.md) | `FIXED_LENGTH_BYTE_ARRAY` | +| `FIXED_LENGTH_BYTE_ARRAY`, `BINARY` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `FIXED_LENGTH_BYTE_ARRAY` | Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested. @@ -2057,7 +2057,7 @@ Special format for reading Parquet file metadata (https://parquet.apache.org/doc - logical_type - column logical type - compression - compression used for this column - total_uncompressed_size - total uncompressed bytes size of the column, calculated as the sum of total_uncompressed_size of the column from all row groups - - total_compressed_size - total compressed bytes size of the column, calculated as the sum of total_compressed_size of the column from all row groups + - total_compressed_size - total compressed bytes size of the column, calculated as the sum of total_compressed_size of the column from all row groups - space_saved - percent of space saved by compression, calculated as (1 - total_compressed_size/total_uncompressed_size). - encodings - the list of encodings used for this column - row_groups - the list of row groups metadata with the next structure: @@ -2204,9 +2204,9 @@ The table below shows supported data types and how they match ClickHouse [data t | `LIST` | [Array](/docs/en/sql-reference/data-types/array.md) | `LIST` | | `STRUCT` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `STRUCT` | | `MAP` | [Map](/docs/en/sql-reference/data-types/map.md) | `MAP` | -| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `UINT32` | -| `FIXED_SIZE_BINARY`, `BINARY` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `FIXED_SIZE_BINARY` | -| `FIXED_SIZE_BINARY`, `BINARY` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `FIXED_SIZE_BINARY` | +| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/ipv4.md) | `UINT32` | +| `FIXED_SIZE_BINARY`, `BINARY` | [IPv6](/docs/en/sql-reference/data-types/ipv6.md) | `FIXED_SIZE_BINARY` | +| `FIXED_SIZE_BINARY`, `BINARY` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `FIXED_SIZE_BINARY` | Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested. @@ -2272,7 +2272,7 @@ The table below shows supported data types and how they match ClickHouse [data t | `Struct` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `Struct` | | `Map` | [Map](/docs/en/sql-reference/data-types/map.md) | `Map` | | `Int` | [IPv4](/docs/en/sql-reference/data-types/int-uint.md) | `Int` | -| `Binary` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `Binary` | +| `Binary` | [IPv6](/docs/en/sql-reference/data-types/ipv6.md) | `Binary` | | `Binary` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `Binary` | | `Binary` | [Decimal256](/docs/en/sql-reference/data-types/decimal.md) | `Binary` | @@ -2485,7 +2485,7 @@ ClickHouse supports reading and writing [MessagePack](https://msgpack.org/) data | `uint 64` | [DateTime64](/docs/en/sql-reference/data-types/datetime.md) | `uint 64` | | `fixarray`, `array 16`, `array 32` | [Array](/docs/en/sql-reference/data-types/array.md)/[Tuple](/docs/en/sql-reference/data-types/tuple.md) | `fixarray`, `array 16`, `array 32` | | `fixmap`, `map 16`, `map 32` | [Map](/docs/en/sql-reference/data-types/map.md) | `fixmap`, `map 16`, `map 32` | -| `uint 32` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `uint 32` | +| `uint 32` | [IPv4](/docs/en/sql-reference/data-types/ipv4.md) | `uint 32` | | `bin 8` | [String](/docs/en/sql-reference/data-types/string.md) | `bin 8` | | `int 8` | [Enum8](/docs/en/sql-reference/data-types/enum.md) | `int 8` | | `bin 8` | [(U)Int128/(U)Int256](/docs/en/sql-reference/data-types/int-uint.md) | `bin 8` | diff --git a/docs/en/operations/system-tables/query_log.md b/docs/en/operations/system-tables/query_log.md index 71e1452cef1..b9fdd19c643 100644 --- a/docs/en/operations/system-tables/query_log.md +++ b/docs/en/operations/system-tables/query_log.md @@ -71,11 +71,11 @@ Columns: - 0 — Query was initiated by another query as part of distributed query execution. - `user` ([String](../../sql-reference/data-types/string.md)) — Name of the user who initiated the current query. - `query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the query. -- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that was used to make the query. +- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP address that was used to make the query. - `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to make the query. - `initial_user` ([String](../../sql-reference/data-types/string.md)) — Name of the user who ran the initial query (for distributed query execution). - `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the initial query (for distributed query execution). -- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that the parent query was launched from. +- `initial_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP address that the parent query was launched from. - `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to make the parent query. - `initial_query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Initial query starting time (for distributed query execution). - `initial_query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Initial query starting time with microseconds precision (for distributed query execution). diff --git a/docs/en/operations/system-tables/query_thread_log.md b/docs/en/operations/system-tables/query_thread_log.md index cdd23bb15db..a6d5632ade9 100644 --- a/docs/en/operations/system-tables/query_thread_log.md +++ b/docs/en/operations/system-tables/query_thread_log.md @@ -40,11 +40,11 @@ Columns: - 0 — Query was initiated by another query for distributed query execution. - `user` ([String](../../sql-reference/data-types/string.md)) — Name of the user who initiated the current query. - `query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the query. -- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that was used to make the query. +- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP address that was used to make the query. - `port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — The client port that was used to make the query. - `initial_user` ([String](../../sql-reference/data-types/string.md)) — Name of the user who ran the initial query (for distributed query execution). - `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the initial query (for distributed query execution). -- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that the parent query was launched from. +- `initial_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP address that the parent query was launched from. - `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — The client port that was used to make the parent query. - `interface` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Interface that the query was initiated from. Possible values: - 1 — TCP. diff --git a/docs/en/operations/system-tables/session_log.md b/docs/en/operations/system-tables/session_log.md index 661d34677e4..5b1a2b2a489 100644 --- a/docs/en/operations/system-tables/session_log.md +++ b/docs/en/operations/system-tables/session_log.md @@ -28,7 +28,7 @@ Columns: - `profiles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — The list of profiles set for all roles and/or users. - `roles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — The list of roles to which the profile is applied. - `settings` ([Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md), [String](../../sql-reference/data-types/string.md)))) — Settings that were changed when the client logged in/out. -- `client_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — The IP address that was used to log in/out. +- `client_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — The IP address that was used to log in/out. - `client_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to log in/out. - `interface` ([Enum8](../../sql-reference/data-types/enum.md)) — The interface from which the login was initiated. Possible values: - `TCP` diff --git a/docs/en/operations/system-tables/zookeeper_log.md b/docs/en/operations/system-tables/zookeeper_log.md index b7cc4e22cd6..dce5be29f62 100644 --- a/docs/en/operations/system-tables/zookeeper_log.md +++ b/docs/en/operations/system-tables/zookeeper_log.md @@ -15,7 +15,7 @@ Columns with request parameters: - `Finalize` — The connection is lost, no response was received. - `event_date` ([Date](../../sql-reference/data-types/date.md)) — The date when the event happened. - `event_time` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — The date and time when the event happened. -- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address of ZooKeeper server that was used to make the request. +- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP address of ZooKeeper server that was used to make the request. - `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The port of ZooKeeper server that was used to make the request. - `session_id` ([Int64](../../sql-reference/data-types/int-uint.md)) — The session ID that the ZooKeeper server sets for each connection. - `xid` ([Int32](../../sql-reference/data-types/int-uint.md)) — The ID of the request within the session. This is usually a sequential request number. It is the same for the request row and the paired `response`/`finalize` row. diff --git a/docs/en/sql-reference/data-types/index.md b/docs/en/sql-reference/data-types/index.md index 508307a0543..ffd063590fa 100644 --- a/docs/en/sql-reference/data-types/index.md +++ b/docs/en/sql-reference/data-types/index.md @@ -28,6 +28,6 @@ ClickHouse data types include: - **Nested data structures**: A [`Nested` data structure](./nested-data-structures/index.md) is like a table inside a cell - **Tuples**: A [`Tuple` of elements](./tuple.md), each having an individual type. - **Nullable**: [`Nullable`](./nullable.md) allows you to store a value as `NULL` when a value is "missing" (instead of the column settings its default value for the data type) -- **IP addresses**: use [`IPv4`](./domains/ipv4.md) and [`IPv6`](./domains/ipv6.md) to efficiently store IP addresses +- **IP addresses**: use [`IPv4`](./ipv4.md) and [`IPv6`](./ipv6.md) to efficiently store IP addresses - **Geo types**: for [geographical data](./geo.md), including `Point`, `Ring`, `Polygon` and `MultiPolygon` - **Special data types**: including [`Expression`](./special-data-types/expression.md), [`Set`](./special-data-types/set.md), [`Nothing`](./special-data-types/nothing.md) and [`Interval`](./special-data-types/interval.md) diff --git a/docs/en/sql-reference/data-types/domains/ipv4.md b/docs/en/sql-reference/data-types/ipv4.md similarity index 60% rename from docs/en/sql-reference/data-types/domains/ipv4.md rename to docs/en/sql-reference/data-types/ipv4.md index b34814211fc..288806f47b3 100644 --- a/docs/en/sql-reference/data-types/domains/ipv4.md +++ b/docs/en/sql-reference/data-types/ipv4.md @@ -1,12 +1,12 @@ --- -slug: /en/sql-reference/data-types/domains/ipv4 +slug: /en/sql-reference/data-types/ipv4 sidebar_position: 59 sidebar_label: IPv4 --- ## IPv4 -`IPv4` is a domain based on `UInt32` type and serves as a typed replacement for storing IPv4 values. It provides compact storage with the human-friendly input-output format and column type information on inspection. +IPv4 addresses. Stored in 4 bytes as UInt32. ### Basic Usage @@ -57,25 +57,6 @@ SELECT toTypeName(from), hex(from) FROM hits LIMIT 1; └──────────────────┴───────────┘ ``` -Domain values are not implicitly convertible to types other than `UInt32`. -If you want to convert `IPv4` value to a string, you have to do that explicitly with `IPv4NumToString()` function: +**See Also** -``` sql -SELECT toTypeName(s), IPv4NumToString(from) as s FROM hits LIMIT 1; -``` - - ┌─toTypeName(IPv4NumToString(from))─┬─s──────────────┐ - │ String │ 183.247.232.58 │ - └───────────────────────────────────┴────────────────┘ - -Or cast to a `UInt32` value: - -``` sql -SELECT toTypeName(i), CAST(from as UInt32) as i FROM hits LIMIT 1; -``` - -``` text -┌─toTypeName(CAST(from, 'UInt32'))─┬──────────i─┐ -│ UInt32 │ 3086477370 │ -└──────────────────────────────────┴────────────┘ -``` +- [Functions for Working with IPv4 and IPv6 Addresses](../functions/ip-address-functions.md) diff --git a/docs/en/sql-reference/data-types/domains/ipv6.md b/docs/en/sql-reference/data-types/ipv6.md similarity index 61% rename from docs/en/sql-reference/data-types/domains/ipv6.md rename to docs/en/sql-reference/data-types/ipv6.md index dcb22e3cb6d..284a1f80854 100644 --- a/docs/en/sql-reference/data-types/domains/ipv6.md +++ b/docs/en/sql-reference/data-types/ipv6.md @@ -1,12 +1,12 @@ --- -slug: /en/sql-reference/data-types/domains/ipv6 +slug: /en/sql-reference/data-types/ipv6 sidebar_position: 60 sidebar_label: IPv6 --- ## IPv6 -`IPv6` is a domain based on `FixedString(16)` type and serves as a typed replacement for storing IPv6 values. It provides compact storage with the human-friendly input-output format and column type information on inspection. +IPv6 addresses. Stored in 16 bytes as UInt128. ### Basic Usage @@ -57,27 +57,6 @@ SELECT toTypeName(from), hex(from) FROM hits LIMIT 1; └──────────────────┴──────────────────────────────────┘ ``` -Domain values are not implicitly convertible to types other than `FixedString(16)`. -If you want to convert `IPv6` value to a string, you have to do that explicitly with `IPv6NumToString()` function: +**See Also** -``` sql -SELECT toTypeName(s), IPv6NumToString(from) as s FROM hits LIMIT 1; -``` - -``` text -┌─toTypeName(IPv6NumToString(from))─┬─s─────────────────────────────┐ -│ String │ 2001:44c8:129:2632:33:0:252:2 │ -└───────────────────────────────────┴───────────────────────────────┘ -``` - -Or cast to a `FixedString(16)` value: - -``` sql -SELECT toTypeName(i), CAST(from as FixedString(16)) as i FROM hits LIMIT 1; -``` - -``` text -┌─toTypeName(CAST(from, 'FixedString(16)'))─┬─i───────┐ -│ FixedString(16) │ ��� │ -└───────────────────────────────────────────┴─────────┘ -``` +- [Functions for Working with IPv4 and IPv6 Addresses](../functions/ip-address-functions.md) diff --git a/docs/en/sql-reference/functions/ip-address-functions.md b/docs/en/sql-reference/functions/ip-address-functions.md index 0dc1db1161b..33c788a632e 100644 --- a/docs/en/sql-reference/functions/ip-address-functions.md +++ b/docs/en/sql-reference/functions/ip-address-functions.md @@ -248,7 +248,7 @@ SELECT IPv6CIDRToRange(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32); ## toIPv4(string) -An alias to `IPv4StringToNum()` that takes a string form of IPv4 address and returns value of [IPv4](../../sql-reference/data-types/domains/ipv4.md) type, which is binary equal to value returned by `IPv4StringToNum()`. +An alias to `IPv4StringToNum()` that takes a string form of IPv4 address and returns value of [IPv4](../../sql-reference/data-types/ipv4.md) type, which is binary equal to value returned by `IPv4StringToNum()`. ``` sql WITH @@ -296,7 +296,7 @@ Same as `toIPv6`, but if the IPv6 address has an invalid format, it returns null ## toIPv6 -Converts a string form of IPv6 address to [IPv6](../../sql-reference/data-types/domains/ipv6.md) type. If the IPv6 address has an invalid format, returns an empty value. +Converts a string form of IPv6 address to [IPv6](../../sql-reference/data-types/ipv6.md) type. If the IPv6 address has an invalid format, returns an empty value. Similar to [IPv6StringToNum](#ipv6stringtonums) function, which converts IPv6 address to binary format. If the input string contains a valid IPv4 address, then the IPv6 equivalent of the IPv4 address is returned. @@ -315,7 +315,7 @@ toIPv6(string) - IP address. -Type: [IPv6](../../sql-reference/data-types/domains/ipv6.md). +Type: [IPv6](../../sql-reference/data-types/ipv6.md). **Examples** diff --git a/docs/redirects.txt b/docs/redirects.txt index cea138f7237..3abc8df2b7f 100644 --- a/docs/redirects.txt +++ b/docs/redirects.txt @@ -14,7 +14,7 @@ data_types/datetime.md sql-reference/data-types/datetime.md data_types/datetime64.md sql-reference/data-types/datetime64.md data_types/decimal.md sql-reference/data-types/decimal.md data_types/domains/ipv4.md sql-reference/data-types/domains/ipv4.md -data_types/domains/ipv6.md sql-reference/data-types/domains/ipv6.md +data_types/domains/ipv6.md sql-reference/data-types/ipv6.md data_types/domains/overview.md sql-reference/data-types/domains/overview.md data_types/enum.md sql-reference/data-types/enum.md data_types/fixedstring.md sql-reference/data-types/fixedstring.md @@ -162,7 +162,7 @@ interfaces/third-party/client_libraries.md interfaces/third-party/client-librari interfaces/third-party_client_libraries.md interfaces/third-party/client-libraries.md interfaces/third-party_gui.md interfaces/third-party/gui.md interfaces/third_party/index.md interfaces/third-party/index.md -introduction/index.md +introduction/index.md introduction/distinctive_features.md introduction/distinctive-features.md introduction/features_considered_disadvantages.md introduction/distinctive-features.md introduction/possible_silly_questions.md faq/general.md @@ -305,8 +305,10 @@ sql_reference/data_types/datetime.md sql-reference/data-types/datetime.md sql_reference/data_types/datetime64.md sql-reference/data-types/datetime64.md sql_reference/data_types/decimal.md sql-reference/data-types/decimal.md sql_reference/data_types/domains/index.md sql-reference/data-types/domains/index.md -sql_reference/data_types/domains/ipv4.md sql-reference/data-types/domains/ipv4.md -sql_reference/data_types/domains/ipv6.md sql-reference/data-types/domains/ipv6.md +sql_reference/data_types/domains/ipv4.md sql-reference/data-types/ipv4.md +sql_reference/data-types/domains/ipv4.md sql-reference/data-types/ipv4.md +sql_reference/data_types/domains/ipv6.md sql-reference/data-types/ipv6.md +sql_reference/data-types/domains/ipv6.md sql-reference/data-types/ipv6.md sql_reference/data_types/domains/overview.md sql-reference/data-types/domains/overview.md sql_reference/data_types/enum.md sql-reference/data-types/enum.md sql_reference/data_types/fixedstring.md sql-reference/data-types/fixedstring.md From 2923d57757cd28dde82d8edd762c1912129a68e4 Mon Sep 17 00:00:00 2001 From: Denny Crane Date: Sun, 4 Jun 2023 15:37:32 -0300 Subject: [PATCH 202/722] Update redirects.txt --- docs/redirects.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/redirects.txt b/docs/redirects.txt index 3abc8df2b7f..98d6f6b8f7c 100644 --- a/docs/redirects.txt +++ b/docs/redirects.txt @@ -13,7 +13,7 @@ data_types/date.md sql-reference/data-types/date.md data_types/datetime.md sql-reference/data-types/datetime.md data_types/datetime64.md sql-reference/data-types/datetime64.md data_types/decimal.md sql-reference/data-types/decimal.md -data_types/domains/ipv4.md sql-reference/data-types/domains/ipv4.md +data_types/domains/ipv4.md sql-reference/data-types/ipv4.md data_types/domains/ipv6.md sql-reference/data-types/ipv6.md data_types/domains/overview.md sql-reference/data-types/domains/overview.md data_types/enum.md sql-reference/data-types/enum.md From aa35689cb10dbdbab0c8475a7f92b8978e6eb6b8 Mon Sep 17 00:00:00 2001 From: Denny Crane Date: Sun, 4 Jun 2023 19:39:30 -0300 Subject: [PATCH 203/722] fix links in other lang-s --- docs/ru/operations/system-tables/query_log.md | 4 ++-- docs/ru/operations/system-tables/query_thread_log.md | 4 ++-- docs/ru/operations/system-tables/session_log.md | 2 +- docs/ru/operations/system-tables/zookeeper_log.md | 2 +- docs/ru/sql-reference/data-types/{domains => }/ipv4.md | 2 +- docs/ru/sql-reference/data-types/{domains => }/ipv6.md | 2 +- docs/ru/sql-reference/functions/ip-address-functions.md | 4 ++-- docs/zh/operations/system-tables/query_log.md | 4 ++-- docs/zh/operations/system-tables/query_thread_log.md | 4 ++-- docs/zh/operations/system-tables/zookeeper_log.md | 2 +- docs/zh/sql-reference/data-types/{domains => }/ipv4.md | 2 +- docs/zh/sql-reference/data-types/{domains => }/ipv6.md | 2 +- 12 files changed, 17 insertions(+), 17 deletions(-) rename docs/ru/sql-reference/data-types/{domains => }/ipv4.md (98%) rename docs/ru/sql-reference/data-types/{domains => }/ipv6.md (98%) rename docs/zh/sql-reference/data-types/{domains => }/ipv4.md (98%) rename docs/zh/sql-reference/data-types/{domains => }/ipv6.md (98%) diff --git a/docs/ru/operations/system-tables/query_log.md b/docs/ru/operations/system-tables/query_log.md index a55528bd829..8f858c14fb1 100644 --- a/docs/ru/operations/system-tables/query_log.md +++ b/docs/ru/operations/system-tables/query_log.md @@ -69,11 +69,11 @@ ClickHouse не удаляет данные из таблица автомати - 0 — запрос был инициирован другим запросом при выполнении распределенного запроса. - `user` ([String](../../sql-reference/data-types/string.md)) — пользователь, запустивший текущий запрос. - `query_id` ([String](../../sql-reference/data-types/string.md)) — ID запроса. -- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP адрес, с которого пришел запрос. +- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP адрес, с которого пришел запрос. - `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт, с которого клиент сделал запрос - `initial_user` ([String](../../sql-reference/data-types/string.md)) — пользователь, запустивший первоначальный запрос (для распределенных запросов). - `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID родительского запроса. -- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP адрес, с которого пришел родительский запрос. +- `initial_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP адрес, с которого пришел родительский запрос. - `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт, с которого клиент сделал родительский запрос. - `initial_query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время начала обработки запроса (для распределенных запросов). - `initial_query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — время начала обработки запроса с точностью до микросекунд (для распределенных запросов). diff --git a/docs/ru/operations/system-tables/query_thread_log.md b/docs/ru/operations/system-tables/query_thread_log.md index c9aabb02cad..1a256e1657a 100644 --- a/docs/ru/operations/system-tables/query_thread_log.md +++ b/docs/ru/operations/system-tables/query_thread_log.md @@ -39,11 +39,11 @@ ClickHouse не удаляет данные из таблицы автомати - 0 — запрос был инициирован другим запросом при распределенном запросе. - `user` ([String](../../sql-reference/data-types/string.md)) — пользователь, запустивший текущий запрос. - `query_id` ([String](../../sql-reference/data-types/string.md)) — ID запроса. -- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP адрес, с которого пришел запрос. +- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP адрес, с которого пришел запрос. - `port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — порт, с которого пришел запрос. - `initial_user` ([String](../../sql-reference/data-types/string.md)) — пользователь, запустивший первоначальный запрос (для распределенных запросов). - `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID родительского запроса. -- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP адрес, с которого пришел родительский запрос. +- `initial_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP адрес, с которого пришел родительский запрос. - `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — порт, пришел родительский запрос. - `interface` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — интерфейс, с которого ушёл запрос. Возможные значения: - 1 — TCP. diff --git a/docs/ru/operations/system-tables/session_log.md b/docs/ru/operations/system-tables/session_log.md index 1f313e7815a..5849cb51ab4 100644 --- a/docs/ru/operations/system-tables/session_log.md +++ b/docs/ru/operations/system-tables/session_log.md @@ -27,7 +27,7 @@ slug: /ru/operations/system-tables/session_log - `profiles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — список профилей, установленных для всех ролей и (или) пользователей. - `roles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — список ролей, к которым применяется данный профиль. - `settings` ([Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md), [String](../../sql-reference/data-types/string.md)))) — настройки, которые были изменены при входе или выходе клиента из системы. -- `client_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP-адрес, который использовался для входа или выхода из системы. +- `client_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP-адрес, который использовался для входа или выхода из системы. - `client_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт клиента, который использовался для входа или выхода из системы. - `interface` ([Enum8](../../sql-reference/data-types/enum.md)) — интерфейс, с которого был инициирован вход в систему. Возможные значения: - `TCP` diff --git a/docs/ru/operations/system-tables/zookeeper_log.md b/docs/ru/operations/system-tables/zookeeper_log.md index ccbdd5110ad..9874cb3a269 100644 --- a/docs/ru/operations/system-tables/zookeeper_log.md +++ b/docs/ru/operations/system-tables/zookeeper_log.md @@ -15,7 +15,7 @@ slug: /ru/operations/system-tables/zookeeper_log - `Finalize` — соединение разорвано, ответ не получен. - `event_date` ([Date](../../sql-reference/data-types/date.md)) — дата, когда произошло событие. - `event_time` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — дата и время, когда произошло событие. -- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP адрес сервера ZooKeeper, с которого был сделан запрос. +- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP адрес сервера ZooKeeper, с которого был сделан запрос. - `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт сервера ZooKeeper, с которого был сделан запрос. - `session_id` ([Int64](../../sql-reference/data-types/int-uint.md)) — идентификатор сессии, который сервер ZooKeeper создает для каждого соединения. - `xid` ([Int32](../../sql-reference/data-types/int-uint.md)) — идентификатор запроса внутри сессии. Обычно это последовательный номер запроса, одинаковый у строки запроса и у парной строки `response`/`finalize`. diff --git a/docs/ru/sql-reference/data-types/domains/ipv4.md b/docs/ru/sql-reference/data-types/ipv4.md similarity index 98% rename from docs/ru/sql-reference/data-types/domains/ipv4.md rename to docs/ru/sql-reference/data-types/ipv4.md index 57a19e282ae..8d308785eea 100644 --- a/docs/ru/sql-reference/data-types/domains/ipv4.md +++ b/docs/ru/sql-reference/data-types/ipv4.md @@ -1,5 +1,5 @@ --- -slug: /ru/sql-reference/data-types/domains/ipv4 +slug: /ru/sql-reference/data-types/ipv4 sidebar_position: 59 sidebar_label: IPv4 --- diff --git a/docs/ru/sql-reference/data-types/domains/ipv6.md b/docs/ru/sql-reference/data-types/ipv6.md similarity index 98% rename from docs/ru/sql-reference/data-types/domains/ipv6.md rename to docs/ru/sql-reference/data-types/ipv6.md index fdfb26f68c1..808068ce90a 100644 --- a/docs/ru/sql-reference/data-types/domains/ipv6.md +++ b/docs/ru/sql-reference/data-types/ipv6.md @@ -1,5 +1,5 @@ --- -slug: /ru/sql-reference/data-types/domains/ipv6 +slug: /ru/sql-reference/data-types/ipv6 sidebar_position: 60 sidebar_label: IPv6 --- diff --git a/docs/ru/sql-reference/functions/ip-address-functions.md b/docs/ru/sql-reference/functions/ip-address-functions.md index 96d4b737c88..d1a72b82b67 100644 --- a/docs/ru/sql-reference/functions/ip-address-functions.md +++ b/docs/ru/sql-reference/functions/ip-address-functions.md @@ -265,7 +265,7 @@ SELECT ## toIPv6 {#toipv6string} -Приводит строку с адресом в формате IPv6 к типу [IPv6](../../sql-reference/data-types/domains/ipv6.md). Возвращает пустое значение, если входящая строка не является корректным IP адресом. +Приводит строку с адресом в формате IPv6 к типу [IPv6](../../sql-reference/data-types/ipv6.md). Возвращает пустое значение, если входящая строка не является корректным IP адресом. Похоже на функцию [IPv6StringToNum](#ipv6stringtonums), которая представляет адрес IPv6 в двоичном виде. Если входящая строка содержит корректный IPv4 адрес, функция возвращает его IPv6 эквивалент. @@ -284,7 +284,7 @@ toIPv6(string) - IP адрес. -Тип: [IPv6](../../sql-reference/data-types/domains/ipv6.md). +Тип: [IPv6](../../sql-reference/data-types/ipv6.md). **Примеры** diff --git a/docs/zh/operations/system-tables/query_log.md b/docs/zh/operations/system-tables/query_log.md index 7149282dfcc..0ba669906cb 100644 --- a/docs/zh/operations/system-tables/query_log.md +++ b/docs/zh/operations/system-tables/query_log.md @@ -60,11 +60,11 @@ ClickHouse不会自动从表中删除数据。更多详情请看 [introduction]( - 0 — 由另一个查询发起的,作为分布式查询的一部分. - `user` ([String](../../sql-reference/data-types/string.md)) — 发起查询的用户. - `query_id` ([String](../../sql-reference/data-types/string.md)) — 查询ID. -- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — 发起查询的客户端IP地址. +- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — 发起查询的客户端IP地址. - `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — 发起查询的客户端端口. - `initial_user` ([String](../../sql-reference/data-types/string.md)) — 初始查询的用户名(用于分布式查询执行). - `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — 运行初始查询的ID(用于分布式查询执行). -- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — 运行父查询的IP地址. +- `initial_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — 运行父查询的IP地址. - `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — 发起父查询的客户端端口. - `interface` ([UInt8](../../sql-reference/data-types/int-uint.md)) — 发起查询的接口. 可能的值: - 1 — TCP. diff --git a/docs/zh/operations/system-tables/query_thread_log.md b/docs/zh/operations/system-tables/query_thread_log.md index 8a41c1501a6..c4b7e2f1043 100644 --- a/docs/zh/operations/system-tables/query_thread_log.md +++ b/docs/zh/operations/system-tables/query_thread_log.md @@ -36,11 +36,11 @@ ClickHouse不会自动从表中删除数据。 欲了解更多详情,请参照 - 0 — 由其他查询发起的分布式查询。 - `user` ([字符串](../../sql-reference/data-types/string.md)) — 发起查询的用户名。 - `query_id` ([字符串](../../sql-reference/data-types/string.md)) — 查询的ID。 -- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — 发起查询的IP地址。 +- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — 发起查询的IP地址。 - `port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 发起查询的端口。 - `initial_user` ([字符串](../../sql-reference/data-types/string.md)) — 首次发起查询的用户名(对于分布式查询)。 - `initial_query_id` ([字符串](../../sql-reference/data-types/string.md)) — 首次发起查询的ID(对于分布式查询)。 -- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — 发起该查询的父查询IP地址。 +- `initial_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — 发起该查询的父查询IP地址。 - `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 发起该查询的父查询端口。 - `interface` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 发起查询的界面,可能的值: - 1 — TCP. diff --git a/docs/zh/operations/system-tables/zookeeper_log.md b/docs/zh/operations/system-tables/zookeeper_log.md index 59dcdaecdc1..ebc51a2e79d 100644 --- a/docs/zh/operations/system-tables/zookeeper_log.md +++ b/docs/zh/operations/system-tables/zookeeper_log.md @@ -15,7 +15,7 @@ slug: /zh/operations/system-tables/zookeeper_log - `Finalize` — 连接丢失, 未收到响应. - `event_date` ([Date](../../sql-reference/data-types/date.md)) — 事件发生的日期. - `event_time` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — 事件发生的日期和时间. -- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — 用于发出请求的 ZooKeeper 服务器的 IP 地址. +- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — 用于发出请求的 ZooKeeper 服务器的 IP 地址. - `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — 用于发出请求的 ZooKeeper 服务器的端口. - `session_id` ([Int64](../../sql-reference/data-types/int-uint.md)) — ZooKeeper 服务器为每个连接设置的会话 ID. - `xid` ([Int32](../../sql-reference/data-types/int-uint.md)) — 会话中请求的 ID. 这通常是一个连续的请求编号. 请求行和配对的 `response`/`finalize` 行相同. diff --git a/docs/zh/sql-reference/data-types/domains/ipv4.md b/docs/zh/sql-reference/data-types/ipv4.md similarity index 98% rename from docs/zh/sql-reference/data-types/domains/ipv4.md rename to docs/zh/sql-reference/data-types/ipv4.md index 69e17b2f617..b89af974b87 100644 --- a/docs/zh/sql-reference/data-types/domains/ipv4.md +++ b/docs/zh/sql-reference/data-types/ipv4.md @@ -1,5 +1,5 @@ --- -slug: /zh/sql-reference/data-types/domains/ipv4 +slug: /zh/sql-reference/data-types/ipv4 --- ## IPv4 {#ipv4} diff --git a/docs/zh/sql-reference/data-types/domains/ipv6.md b/docs/zh/sql-reference/data-types/ipv6.md similarity index 98% rename from docs/zh/sql-reference/data-types/domains/ipv6.md rename to docs/zh/sql-reference/data-types/ipv6.md index 9dd88692c37..3896bb873d8 100644 --- a/docs/zh/sql-reference/data-types/domains/ipv6.md +++ b/docs/zh/sql-reference/data-types/ipv6.md @@ -1,5 +1,5 @@ --- -slug: /zh/sql-reference/data-types/domains/ipv6 +slug: /zh/sql-reference/data-types/ipv6 --- ## IPv6 {#ipv6} From 2e187e0a0eae7f0109c6af30bd6baad0e75c9b71 Mon Sep 17 00:00:00 2001 From: Denny Crane Date: Sun, 4 Jun 2023 20:12:35 -0300 Subject: [PATCH 204/722] try to fix redirect --- docs/redirects.txt | 2 -- docs/ru/sql-reference/data-types/ipv4.md | 27 +++--------------------- 2 files changed, 3 insertions(+), 26 deletions(-) diff --git a/docs/redirects.txt b/docs/redirects.txt index 98d6f6b8f7c..ddfc66aa48b 100644 --- a/docs/redirects.txt +++ b/docs/redirects.txt @@ -306,9 +306,7 @@ sql_reference/data_types/datetime64.md sql-reference/data-types/datetime64.md sql_reference/data_types/decimal.md sql-reference/data-types/decimal.md sql_reference/data_types/domains/index.md sql-reference/data-types/domains/index.md sql_reference/data_types/domains/ipv4.md sql-reference/data-types/ipv4.md -sql_reference/data-types/domains/ipv4.md sql-reference/data-types/ipv4.md sql_reference/data_types/domains/ipv6.md sql-reference/data-types/ipv6.md -sql_reference/data-types/domains/ipv6.md sql-reference/data-types/ipv6.md sql_reference/data_types/domains/overview.md sql-reference/data-types/domains/overview.md sql_reference/data_types/enum.md sql-reference/data-types/enum.md sql_reference/data_types/fixedstring.md sql-reference/data-types/fixedstring.md diff --git a/docs/ru/sql-reference/data-types/ipv4.md b/docs/ru/sql-reference/data-types/ipv4.md index 8d308785eea..5cb977c64c9 100644 --- a/docs/ru/sql-reference/data-types/ipv4.md +++ b/docs/ru/sql-reference/data-types/ipv4.md @@ -6,7 +6,7 @@ sidebar_label: IPv4 ## IPv4 {#ipv4} -`IPv4` — это домен, базирующийся на типе данных `UInt32` предназначенный для хранения адресов IPv4. Он обеспечивает компактное хранение данных с удобным для человека форматом ввода-вывода, и явно отображаемым типом данных в структуре таблицы. +IPv4-адреса. Хранится в 4 байтах как UInt32. ### Применение {#primenenie} @@ -57,27 +57,6 @@ SELECT toTypeName(from), hex(from) FROM hits LIMIT 1; └──────────────────┴───────────┘ ``` -Значения с доменным типом данных не преобразуются неявно в другие типы данных, кроме `UInt32`. -Если необходимо преобразовать значение типа `IPv4` в строку, то это необходимо делать явно с помощью функции `IPv4NumToString()`: +**См. также** -``` sql -SELECT toTypeName(s), IPv4NumToString(from) AS s FROM hits LIMIT 1; -``` - -``` text -┌─toTypeName(IPv4NumToString(from))─┬─s──────────────┐ -│ String │ 183.247.232.58 │ -└───────────────────────────────────┴────────────────┘ -``` - -Или приводить к типу данных `UInt32`: - -``` sql -SELECT toTypeName(i), CAST(from AS UInt32) AS i FROM hits LIMIT 1; -``` - -``` text -┌─toTypeName(CAST(from, 'UInt32'))─┬──────────i─┐ -│ UInt32 │ 3086477370 │ -└──────────────────────────────────┴────────────┘ -``` +- [Functions for Working with IPv4 and IPv6 Addresses](../functions/ip-address-functions.md) From fe1354f22184181c3ed996e0928509917bdc5f7d Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Mon, 5 Jun 2023 12:32:08 +0000 Subject: [PATCH 205/722] Analyzer: Do not apply Query Tree optimizations on shards --- src/Interpreters/InterpreterSelectQueryAnalyzer.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/InterpreterSelectQueryAnalyzer.cpp b/src/Interpreters/InterpreterSelectQueryAnalyzer.cpp index 98f70c25dcd..4f2f05dc7eb 100644 --- a/src/Interpreters/InterpreterSelectQueryAnalyzer.cpp +++ b/src/Interpreters/InterpreterSelectQueryAnalyzer.cpp @@ -135,7 +135,8 @@ QueryTreeNodePtr buildQueryTreeAndRunPasses(const ASTPtr & query, QueryTreePassManager query_tree_pass_manager(context); addQueryTreePasses(query_tree_pass_manager); - if (select_query_options.ignore_ast_optimizations) + if (select_query_options.ignore_ast_optimizations + || context->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY) query_tree_pass_manager.run(query_tree, 1 /*up_to_pass_index*/); else query_tree_pass_manager.run(query_tree); From 5fb4f1fc614a749bd0706e5dcd952224e821bf77 Mon Sep 17 00:00:00 2001 From: ltrk2 <107155950+ltrk2@users.noreply.github.com> Date: Mon, 5 Jun 2023 06:55:34 -0700 Subject: [PATCH 206/722] Implement review comments --- .../ReservoirSamplerDeterministic.h | 2 +- src/Common/TransformEndianness.hpp | 35 ++++++++++++------- 2 files changed, 23 insertions(+), 14 deletions(-) diff --git a/src/AggregateFunctions/ReservoirSamplerDeterministic.h b/src/AggregateFunctions/ReservoirSamplerDeterministic.h index 9dea821e839..b1a39a5dcc5 100644 --- a/src/AggregateFunctions/ReservoirSamplerDeterministic.h +++ b/src/AggregateFunctions/ReservoirSamplerDeterministic.h @@ -178,7 +178,7 @@ public: void write(DB::WriteBuffer & buf) const { - const auto size = samples.size(); + const size_t size = samples.size(); writeBinaryLittleEndian(size, buf); writeBinaryLittleEndian(total_values, buf); diff --git a/src/Common/TransformEndianness.hpp b/src/Common/TransformEndianness.hpp index 17cf441d17f..228490d24a1 100644 --- a/src/Common/TransformEndianness.hpp +++ b/src/Common/TransformEndianness.hpp @@ -8,18 +8,35 @@ namespace DB { template - requires is_big_int_v +requires std::is_integral_v +inline void transformEndianness(T & value) +{ + if constexpr (endian != std::endian::native) + value = std::byteswap(value); +} + +template +requires is_big_int_v inline void transformEndianness(T & x) { if constexpr (std::endian::native != endian) { - std::ranges::transform(x.items, std::begin(x.items), [](auto& item) { return std::byteswap(item); }); - std::ranges::reverse(x.items); + auto & items = x.items; + std::transform(std::begin(items), std::end(items), std::begin(items), [](auto & item) { return std::byteswap(item); }); + std::reverse(std::begin(items), std::end(items)); } } template - requires is_decimal || std::is_floating_point_v +requires is_decimal +inline void transformEndianness(T & x) +{ + if constexpr (std::endian::native != endian) + transformEndianness(x.value); +} + +template +requires std::is_floating_point_v inline void transformEndianness(T & value) { if constexpr (std::endian::native != endian) @@ -30,15 +47,7 @@ inline void transformEndianness(T & value) } template - requires std::is_integral_v && (sizeof(T) <= 8) -inline void transformEndianness(T & value) -{ - if constexpr (endian != std::endian::native) - value = std::byteswap(value); -} - -template - requires std::is_scoped_enum_v +requires std::is_scoped_enum_v inline void transformEndianness(T & x) { using UnderlyingType = std::underlying_type_t; From 50430ed304d8a4d4ce2bafc66bc1b7b74afec678 Mon Sep 17 00:00:00 2001 From: ltrk2 <107155950+ltrk2@users.noreply.github.com> Date: Mon, 5 Jun 2023 06:55:52 -0700 Subject: [PATCH 207/722] Configure rule for concepts requires clause --- .clang-format | 1 + 1 file changed, 1 insertion(+) diff --git a/.clang-format b/.clang-format index 2da3911dced..893d9c613f1 100644 --- a/.clang-format +++ b/.clang-format @@ -74,6 +74,7 @@ ConstructorInitializerIndentWidth: 4 ContinuationIndentWidth: 4 DerivePointerAlignment: false DisableFormat: false +IndentRequiresClause: false IndentWidth: 4 IndentWrappedFunctionNames: false MacroBlockBegin: '' From 33e51d4f3b25aa1af5dedf751b5fec5229dc6eac Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 5 Jun 2023 15:22:04 +0000 Subject: [PATCH 208/722] Add setting to limit the number of bytes to read in schema inference --- docs/en/interfaces/schema-inference.md | 16 +++++++------ .../operations/settings/settings-formats.md | 6 +++++ src/Core/Settings.h | 1 + src/Formats/EscapingRuleUtils.cpp | 3 ++- src/Formats/FormatFactory.cpp | 1 + src/Formats/FormatSettings.h | 3 ++- src/Formats/ReadSchemaUtils.cpp | 14 +++++++---- src/Processors/Formats/ISchemaReader.cpp | 24 +++++++++++-------- src/Processors/Formats/ISchemaReader.h | 9 +++++-- .../Impl/JSONColumnsBlockInputFormat.cpp | 2 +- .../Impl/JSONColumnsBlockInputFormatBase.cpp | 12 ++++++---- .../Impl/JSONColumnsBlockInputFormatBase.h | 16 +++++++++++-- .../JSONCompactColumnsBlockInputFormat.cpp | 2 +- ...ytes_to_read_in_schema_inference.reference | 1 + ..._max_bytes_to_read_in_schema_inference.sql | 4 ++++ 15 files changed, 80 insertions(+), 34 deletions(-) create mode 100644 tests/queries/0_stateless/02783_max_bytes_to_read_in_schema_inference.reference create mode 100644 tests/queries/0_stateless/02783_max_bytes_to_read_in_schema_inference.sql diff --git a/docs/en/interfaces/schema-inference.md b/docs/en/interfaces/schema-inference.md index c448d0aee47..a757a032b7d 100644 --- a/docs/en/interfaces/schema-inference.md +++ b/docs/en/interfaces/schema-inference.md @@ -329,8 +329,8 @@ SELECT count() FROM system.schema_inference_cache WHERE storage='S3' ## Text formats {#text-formats} For text formats, ClickHouse reads the data row by row, extracts column values according to the format, -and then uses some recursive parsers and heuristics to determine the type for each value. The maximum number of rows read from the data in schema inference -is controlled by the setting `input_format_max_rows_to_read_for_schema_inference` with default value 25000. +and then uses some recursive parsers and heuristics to determine the type for each value. The maximum number of rows and bytes read from the data in schema inference +is controlled by the settings `input_format_max_rows_to_read_for_schema_inference` (25000 by default) and `input_format_max_bytes_to_read_for_schema_inference` (32Mb by default). By default, all inferred types are [Nullable](../sql-reference/data-types/nullable.md), but you can change this by setting `schema_inference_make_columns_nullable` (see examples in the [settings](#settings-for-text-formats) section). ### JSON formats {#json-formats} @@ -1144,13 +1144,15 @@ Line: value_1=2, value_2="Some string 2", value_3="[4, 5, NULL]"$$) ### Settings for text formats {#settings-for-text-formats} -#### input_format_max_rows_to_read_for_schema_inference +#### input_format_max_rows_to_read_for_schema_inference/input_format_max_bytes_to_read_for_schema_inference -This setting controls the maximum number of rows to be read while schema inference. -The more rows are read, the more time is spent on schema inference, but the greater the chance to +These settings control the amount of data to be read while schema inference. +The more rows/bytes are read, the more time is spent on schema inference, but the greater the chance to correctly determine the types (especially when the data contains a lot of nulls). -Default value: `25000`. +Default values: +- `25000` for `input_format_max_rows_to_read_for_schema_inference`. +- `33554432` (32 Mb) for `input_format_max_bytes_to_read_for_schema_inference`. #### column_names_for_schema_inference @@ -1623,7 +1625,7 @@ In schema inference for CapnProto format ClickHouse uses the following type matc ## Strong-typed binary formats {#strong-typed-binary-formats} In such formats, each serialized value contains information about its type (and possibly about its name), but there is no information about the whole table. -In schema inference for such formats, ClickHouse reads data row by row (up to `input_format_max_rows_to_read_for_schema_inference` rows) and extracts +In schema inference for such formats, ClickHouse reads data row by row (up to `input_format_max_rows_to_read_for_schema_inference` rows or `input_format_max_bytes_to_read_for_schema_inference` bytes) and extracts the type (and possibly name) for each value from the data and then converts these types to ClickHouse types. ### MsgPack {#msgpack} diff --git a/docs/en/operations/settings/settings-formats.md b/docs/en/operations/settings/settings-formats.md index 65038d3a256..e4a8c916bcf 100644 --- a/docs/en/operations/settings/settings-formats.md +++ b/docs/en/operations/settings/settings-formats.md @@ -137,6 +137,12 @@ The maximum rows of data to read for automatic schema inference. Default value: `25'000`. +## input_format_max_bytes_to_read_for_schema_inference {#input_format_max_bytes_to_read_for_schema_inference} + +The maximum amount of data in bytes to read for automatic schema inference. + +Default value: `33554432` (32 Mb). + ## column_names_for_schema_inference {#column_names_for_schema_inference} The list of column names to use in schema inference for formats without column names. The format: 'column1,column2,column3,...' diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 67c92a0be8b..f1e6c518f30 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -844,6 +844,7 @@ class IColumn; M(UInt64, input_format_msgpack_number_of_columns, 0, "The number of columns in inserted MsgPack data. Used for automatic schema inference from data.", 0) \ M(MsgPackUUIDRepresentation, output_format_msgpack_uuid_representation, FormatSettings::MsgPackUUIDRepresentation::EXT, "The way how to output UUID in MsgPack format.", 0) \ M(UInt64, input_format_max_rows_to_read_for_schema_inference, 25000, "The maximum rows of data to read for automatic schema inference", 0) \ + M(UInt64, input_format_max_bytes_to_read_for_schema_inference, 32 * 1024 * 1024, "The maximum bytes of data to read for automatic schema inference", 0) \ M(Bool, input_format_csv_use_best_effort_in_schema_inference, true, "Use some tweaks and heuristics to infer schema in CSV format", 0) \ M(Bool, input_format_tsv_use_best_effort_in_schema_inference, true, "Use some tweaks and heuristics to infer schema in TSV format", 0) \ M(Bool, input_format_csv_detect_header, true, "Automatically detect header with names and types in CSV format", 0) \ diff --git a/src/Formats/EscapingRuleUtils.cpp b/src/Formats/EscapingRuleUtils.cpp index 804f32e4b46..9f744218da2 100644 --- a/src/Formats/EscapingRuleUtils.cpp +++ b/src/Formats/EscapingRuleUtils.cpp @@ -408,9 +408,10 @@ DataTypes getDefaultDataTypeForEscapingRules(const std::vectormax_rows_to_read_for_schema_inference : context->getSettingsRef().input_format_max_rows_to_read_for_schema_inference; + size_t max_bytes_to_read = format_settings ? format_settings->max_bytes_to_read_for_schema_inference + : context->getSettingsRef().input_format_max_bytes_to_read_for_schema_inference; size_t iterations = 0; ColumnsDescription cached_columns; while (true) @@ -120,7 +122,7 @@ ColumnsDescription readSchemaFromFormat( try { schema_reader = FormatFactory::instance().getSchemaReader(format_name, *buf, context, format_settings); - schema_reader->setMaxRowsToRead(max_rows_to_read); + schema_reader->setMaxRowsAndBytesToRead(max_rows_to_read, max_bytes_to_read); names_and_types = schema_reader->readSchema(); break; } @@ -132,10 +134,14 @@ ColumnsDescription readSchemaFromFormat( size_t rows_read = schema_reader->getNumRowsRead(); assert(rows_read <= max_rows_to_read); max_rows_to_read -= schema_reader->getNumRowsRead(); - if (rows_read != 0 && max_rows_to_read == 0) + size_t bytes_read = buf->count(); + /// We could exceed max_bytes_to_read a bit to complete row parsing. + max_bytes_to_read -= std::min(bytes_read, max_bytes_to_read); + if (rows_read != 0 && (max_rows_to_read == 0 || max_bytes_to_read == 0)) { - exception_message += "\nTo increase the maximum number of rows to read for structure determination, use setting " - "input_format_max_rows_to_read_for_schema_inference"; + exception_message += "\nTo increase the maximum number of rows/bytes to read for structure determination, use setting " + "input_format_max_rows_to_read_for_schema_inference/input_format_max_bytes_to_read_for_schema_inference"; + if (iterations > 1) { exception_messages += "\n" + exception_message; diff --git a/src/Processors/Formats/ISchemaReader.cpp b/src/Processors/Formats/ISchemaReader.cpp index c96cb373a2d..9f26a3543d0 100644 --- a/src/Processors/Formats/ISchemaReader.cpp +++ b/src/Processors/Formats/ISchemaReader.cpp @@ -57,11 +57,15 @@ void checkFinalInferredType( } IIRowSchemaReader::IIRowSchemaReader(ReadBuffer & in_, const FormatSettings & format_settings_, DataTypePtr default_type_) - : ISchemaReader(in_), default_type(default_type_), hints_str(format_settings_.schema_inference_hints), format_settings(format_settings_) + : ISchemaReader(in_) + , max_rows_to_read(format_settings_.max_rows_to_read_for_schema_inference) + , max_bytes_to_read(format_settings_.max_bytes_to_read_for_schema_inference) + , default_type(default_type_) + , hints_str(format_settings_.schema_inference_hints) + , format_settings(format_settings_) { } - void IIRowSchemaReader::setContext(ContextPtr & context) { ColumnsDescription columns; @@ -99,11 +103,11 @@ IRowSchemaReader::IRowSchemaReader(ReadBuffer & in_, const FormatSettings & form NamesAndTypesList IRowSchemaReader::readSchema() { - if (max_rows_to_read == 0) + if (max_rows_to_read == 0 || max_bytes_to_read == 0) throw Exception( ErrorCodes::BAD_ARGUMENTS, - "Cannot read rows to determine the schema, the maximum number of rows to read is set to 0. " - "Most likely setting input_format_max_rows_to_read_for_schema_inference is set to 0"); + "Cannot read rows to determine the schema, the maximum number of rows (or bytes) to read is set to 0. " + "Most likely setting input_format_max_rows_to_read_for_schema_inference or input_format_max_bytes_to_read_for_schema_inference is set to 0"); DataTypes data_types = readRowAndGetDataTypes(); @@ -143,7 +147,7 @@ NamesAndTypesList IRowSchemaReader::readSchema() data_types[i] = hint_it->second; } - for (rows_read = 1; rows_read < max_rows_to_read; ++rows_read) + for (rows_read = 1; rows_read < max_rows_to_read && in.count() < max_bytes_to_read; ++rows_read) { DataTypes new_data_types = readRowAndGetDataTypes(); if (new_data_types.empty()) @@ -220,11 +224,11 @@ IRowWithNamesSchemaReader::IRowWithNamesSchemaReader(ReadBuffer & in_, const For NamesAndTypesList IRowWithNamesSchemaReader::readSchema() { - if (max_rows_to_read == 0) + if (max_rows_to_read == 0 || max_bytes_to_read == 0) throw Exception( ErrorCodes::BAD_ARGUMENTS, - "Cannot read rows to determine the schema, the maximum number of rows to read is set to 0. " - "Most likely setting input_format_max_rows_to_read_for_schema_inference is set to 0"); + "Cannot read rows to determine the schema, the maximum number of rows (or bytes) to read is set to 0. " + "Most likely setting input_format_max_rows_to_read_for_schema_inference or input_format_max_bytes_to_read_for_schema_inference is set to 0"); bool eof = false; auto names_and_types = readRowAndGetNamesAndDataTypes(eof); @@ -245,7 +249,7 @@ NamesAndTypesList IRowWithNamesSchemaReader::readSchema() names_order.push_back(name); } - for (rows_read = 1; rows_read < max_rows_to_read; ++rows_read) + for (rows_read = 1; rows_read < max_rows_to_read && in.count() < max_bytes_to_read; ++rows_read) { auto new_names_and_types = readRowAndGetNamesAndDataTypes(eof); if (eof) diff --git a/src/Processors/Formats/ISchemaReader.h b/src/Processors/Formats/ISchemaReader.h index 78b34a07840..40702198a57 100644 --- a/src/Processors/Formats/ISchemaReader.h +++ b/src/Processors/Formats/ISchemaReader.h @@ -32,7 +32,7 @@ public: virtual bool needContext() const { return false; } virtual void setContext(ContextPtr &) {} - virtual void setMaxRowsToRead(size_t) {} + virtual void setMaxRowsAndBytesToRead(size_t, size_t) {} virtual size_t getNumRowsRead() const { return 0; } virtual ~ISchemaReader() = default; @@ -54,12 +54,17 @@ public: virtual void transformTypesIfNeeded(DataTypePtr & type, DataTypePtr & new_type); protected: - void setMaxRowsToRead(size_t max_rows) override { max_rows_to_read = max_rows; } + void setMaxRowsAndBytesToRead(size_t max_rows, size_t max_bytes) override + { + max_rows_to_read = max_rows; + max_bytes_to_read = max_bytes; + } size_t getNumRowsRead() const override { return rows_read; } virtual void transformFinalTypeIfNeeded(DataTypePtr &) {} size_t max_rows_to_read; + size_t max_bytes_to_read; size_t rows_read = 0; DataTypePtr default_type; String hints_str; diff --git a/src/Processors/Formats/Impl/JSONColumnsBlockInputFormat.cpp b/src/Processors/Formats/Impl/JSONColumnsBlockInputFormat.cpp index 8d4c4b0c6cf..3d003658e64 100644 --- a/src/Processors/Formats/Impl/JSONColumnsBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONColumnsBlockInputFormat.cpp @@ -55,7 +55,7 @@ void registerJSONColumnsSchemaReader(FormatFactory & factory) ); factory.registerAdditionalInfoForSchemaCacheGetter("JSONColumns", [](const FormatSettings & settings) { - return getAdditionalFormatInfoByEscapingRule(settings, FormatSettings::EscapingRule::JSON); + return getAdditionalFormatInfoForAllRowBasedFormats(settings) + getAdditionalFormatInfoByEscapingRule(settings, FormatSettings::EscapingRule::JSON); }); } diff --git a/src/Processors/Formats/Impl/JSONColumnsBlockInputFormatBase.cpp b/src/Processors/Formats/Impl/JSONColumnsBlockInputFormatBase.cpp index 2e264c59f56..84a07ebc8fb 100644 --- a/src/Processors/Formats/Impl/JSONColumnsBlockInputFormatBase.cpp +++ b/src/Processors/Formats/Impl/JSONColumnsBlockInputFormatBase.cpp @@ -176,6 +176,8 @@ JSONColumnsSchemaReaderBase::JSONColumnsSchemaReaderBase( , hints_str(format_settings_.schema_inference_hints) , reader(std::move(reader_)) , column_names_from_settings(splitColumnNames(format_settings_.column_names_for_schema_inference)) + , max_rows_to_read(format_settings_.max_rows_to_read_for_schema_inference) + , max_bytes_to_read(format_settings_.max_bytes_to_read_for_schema_inference) { } @@ -196,12 +198,12 @@ void JSONColumnsSchemaReaderBase::transformTypesIfNeeded(DataTypePtr & type, Dat NamesAndTypesList JSONColumnsSchemaReaderBase::readSchema() { - size_t total_rows_read = 0; std::unordered_map names_to_types; std::vector names_order; /// Read data block by block and determine the type for each column - /// until max_rows_to_read_for_schema_inference is reached. - while (total_rows_read < format_settings.max_rows_to_read_for_schema_inference) + /// until max_rows_to_read/max_bytes_to_read is reached. + /// Note that we can exceed max_bytes_to_read to compete block parsing. + while (total_rows_read < max_rows_to_read && in.count() < max_bytes_to_read) { if (in.eof()) break; @@ -268,7 +270,7 @@ NamesAndTypesList JSONColumnsSchemaReaderBase::readSchema() return result; } -DataTypePtr JSONColumnsSchemaReaderBase::readColumnAndGetDataType(const String & column_name, size_t & rows_read, size_t max_rows_to_read) +DataTypePtr JSONColumnsSchemaReaderBase::readColumnAndGetDataType(const String & column_name, size_t & rows_read, size_t max_rows) { /// Check for empty column. if (reader->checkColumnEnd()) @@ -279,7 +281,7 @@ DataTypePtr JSONColumnsSchemaReaderBase::readColumnAndGetDataType(const String & do { /// If we reached max_rows_to_read, skip the rest part of this column. - if (rows_read == max_rows_to_read) + if (rows_read == max_rows) { reader->skipColumn(); break; diff --git a/src/Processors/Formats/Impl/JSONColumnsBlockInputFormatBase.h b/src/Processors/Formats/Impl/JSONColumnsBlockInputFormatBase.h index 2babc0734f9..886c8841540 100644 --- a/src/Processors/Formats/Impl/JSONColumnsBlockInputFormatBase.h +++ b/src/Processors/Formats/Impl/JSONColumnsBlockInputFormatBase.h @@ -82,11 +82,19 @@ public: bool needContext() const override { return !hints_str.empty(); } void setContext(ContextPtr & ctx) override; + void setMaxRowsAndBytesToRead(size_t max_rows, size_t max_bytes) override + { + max_rows_to_read = max_rows; + max_bytes_to_read = max_bytes; + } + + size_t getNumRowsRead() const override { return total_rows_read; } + private: NamesAndTypesList readSchema() override; - /// Read whole column in the block (up to max_rows_to_read rows) and extract the data type. - DataTypePtr readColumnAndGetDataType(const String & column_name, size_t & rows_read, size_t max_rows_to_read); + /// Read whole column in the block (up to max_rows rows) and extract the data type. + DataTypePtr readColumnAndGetDataType(const String & column_name, size_t & rows_read, size_t max_rows); const FormatSettings format_settings; String hints_str; @@ -95,6 +103,10 @@ private: std::unique_ptr reader; Names column_names_from_settings; JSONInferenceInfo inference_info; + + size_t total_rows_read = 0; + size_t max_rows_to_read; + size_t max_bytes_to_read; }; } diff --git a/src/Processors/Formats/Impl/JSONCompactColumnsBlockInputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactColumnsBlockInputFormat.cpp index ade18d21892..09df7beaa73 100644 --- a/src/Processors/Formats/Impl/JSONCompactColumnsBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONCompactColumnsBlockInputFormat.cpp @@ -53,7 +53,7 @@ void registerJSONCompactColumnsSchemaReader(FormatFactory & factory) ); factory.registerAdditionalInfoForSchemaCacheGetter("JSONCompactColumns", [](const FormatSettings & settings) { - auto result = getAdditionalFormatInfoByEscapingRule(settings, FormatSettings::EscapingRule::JSON); + auto result = getAdditionalFormatInfoForAllRowBasedFormats(settings) + getAdditionalFormatInfoByEscapingRule(settings, FormatSettings::EscapingRule::JSON); return result + fmt::format(", column_names_for_schema_inference={}", settings.column_names_for_schema_inference); }); } diff --git a/tests/queries/0_stateless/02783_max_bytes_to_read_in_schema_inference.reference b/tests/queries/0_stateless/02783_max_bytes_to_read_in_schema_inference.reference new file mode 100644 index 00000000000..d45098ddc0f --- /dev/null +++ b/tests/queries/0_stateless/02783_max_bytes_to_read_in_schema_inference.reference @@ -0,0 +1 @@ +a Nullable(Int64) diff --git a/tests/queries/0_stateless/02783_max_bytes_to_read_in_schema_inference.sql b/tests/queries/0_stateless/02783_max_bytes_to_read_in_schema_inference.sql new file mode 100644 index 00000000000..9dbf176472d --- /dev/null +++ b/tests/queries/0_stateless/02783_max_bytes_to_read_in_schema_inference.sql @@ -0,0 +1,4 @@ +set input_format_max_rows_to_read_for_schema_inference=2; +desc format('JSONEachRow', '{"a" : null}, {"a" : 42}') settings input_format_max_bytes_to_read_for_schema_inference=10; -- {serverError ONLY_NULLS_WHILE_READING_SCHEMA} +desc format('JSONEachRow', '{"a" : null}, {"a" : 42}') settings input_format_max_bytes_to_read_for_schema_inference=20; + From 79cbebaf0dcc98b947a78dcfa490493cf18f076b Mon Sep 17 00:00:00 2001 From: ltrk2 <107155950+ltrk2@users.noreply.github.com> Date: Mon, 5 Jun 2023 15:34:04 +0000 Subject: [PATCH 209/722] Remove unnecessary conditional expression --- src/Common/TransformEndianness.hpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Common/TransformEndianness.hpp b/src/Common/TransformEndianness.hpp index 228490d24a1..4d690d75d9e 100644 --- a/src/Common/TransformEndianness.hpp +++ b/src/Common/TransformEndianness.hpp @@ -31,8 +31,7 @@ template requires is_decimal inline void transformEndianness(T & x) { - if constexpr (std::endian::native != endian) - transformEndianness(x.value); + transformEndianness(x.value); } template From 67af505ed63fc49d253f67f75b814dcf551e3a2c Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 5 Jun 2023 17:04:55 +0000 Subject: [PATCH 210/722] Respect setting input_format_as_default in schema inference --- src/Processors/Formats/ISchemaReader.cpp | 6 ++++++ .../02784_schema_inference_null_as_default.reference | 9 +++++++++ .../02784_schema_inference_null_as_default.sql | 7 +++++++ 3 files changed, 22 insertions(+) create mode 100644 tests/queries/0_stateless/02784_schema_inference_null_as_default.reference create mode 100644 tests/queries/0_stateless/02784_schema_inference_null_as_default.sql diff --git a/src/Processors/Formats/ISchemaReader.cpp b/src/Processors/Formats/ISchemaReader.cpp index c96cb373a2d..0cb6499f423 100644 --- a/src/Processors/Formats/ISchemaReader.cpp +++ b/src/Processors/Formats/ISchemaReader.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include @@ -54,6 +55,11 @@ void checkFinalInferredType( if (settings.schema_inference_make_columns_nullable) type = makeNullableRecursively(type); + /// In case when data for some column could contain nulls and regular values, + /// resulting inferred type is Nullable. + /// If input_format_null_as_default is enabled, we should remove Nullable type. + else if (settings.null_as_default) + type = removeNullable(type); } IIRowSchemaReader::IIRowSchemaReader(ReadBuffer & in_, const FormatSettings & format_settings_, DataTypePtr default_type_) diff --git a/tests/queries/0_stateless/02784_schema_inference_null_as_default.reference b/tests/queries/0_stateless/02784_schema_inference_null_as_default.reference new file mode 100644 index 00000000000..c83819ab2e2 --- /dev/null +++ b/tests/queries/0_stateless/02784_schema_inference_null_as_default.reference @@ -0,0 +1,9 @@ +x Nullable(Int64) +\N +42 +x Nullable(Int64) +\N +42 +x Int64 +0 +42 diff --git a/tests/queries/0_stateless/02784_schema_inference_null_as_default.sql b/tests/queries/0_stateless/02784_schema_inference_null_as_default.sql new file mode 100644 index 00000000000..9c9f99d8283 --- /dev/null +++ b/tests/queries/0_stateless/02784_schema_inference_null_as_default.sql @@ -0,0 +1,7 @@ +desc format(JSONEachRow, '{"x" : null}, {"x" : 42}') settings schema_inference_make_columns_nullable=1; +select * from format(JSONEachRow, '{"x" : null}, {"x" : 42}') settings schema_inference_make_columns_nullable=1; +desc format(JSONEachRow, '{"x" : null}, {"x" : 42}') settings schema_inference_make_columns_nullable=0, input_format_null_as_default=0; +select * from format(JSONEachRow, '{"x" : null}, {"x" : 42}') settings schema_inference_make_columns_nullable=0, input_format_null_as_default=0; +desc format(JSONEachRow, '{"x" : null}, {"x" : 42}') settings schema_inference_make_columns_nullable=0, input_format_null_as_default=1; +select * from format(JSONEachRow, '{"x" : null}, {"x" : 42}') settings schema_inference_make_columns_nullable=0, input_format_null_as_default=1; + From 028e48dfa716c7e7aa9f5e3df56adb563d653b02 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 5 Jun 2023 17:33:10 +0000 Subject: [PATCH 211/722] Update docs --- docs/en/interfaces/schema-inference.md | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/docs/en/interfaces/schema-inference.md b/docs/en/interfaces/schema-inference.md index c448d0aee47..bef858eaba0 100644 --- a/docs/en/interfaces/schema-inference.md +++ b/docs/en/interfaces/schema-inference.md @@ -1192,7 +1192,7 @@ DESC format(JSONEachRow, '{"id" : 1, "age" : 25, "name" : "Josh", "status" : nul #### schema_inference_make_columns_nullable Controls making inferred types `Nullable` in schema inference for formats without information about nullability. -If the setting is enabled, all inferred type will be `Nullable`, if disabled, the inferred type will be `Nullable` only if the column contains `NULL` in a sample that is parsed during schema inference. +If the setting is enabled, all inferred type will be `Nullable`, if disabled, the inferred type will be `Nullable` only if `input_format_null_as_default` is disabled and the column contains `NULL` in a sample that is parsed during schema inference. Enabled by default. @@ -1215,7 +1215,8 @@ DESC format(JSONEachRow, $$ └─────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` ```sql -SET schema_inference_make_columns_nullable = 0 +SET schema_inference_make_columns_nullable = 0; +SET input_format_null_as_default = 0; DESC format(JSONEachRow, $$ {"id" : 1, "age" : 25, "name" : "Josh", "status" : null, "hobbies" : ["football", "cooking"]} {"id" : 2, "age" : 19, "name" : "Alan", "status" : "married", "hobbies" : ["tennis", "art"]} @@ -1232,6 +1233,25 @@ DESC format(JSONEachRow, $$ └─────────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` +```sql +SET schema_inference_make_columns_nullable = 0; +SET input_format_null_as_default = 1; +DESC format(JSONEachRow, $$ + {"id" : 1, "age" : 25, "name" : "Josh", "status" : null, "hobbies" : ["football", "cooking"]} + {"id" : 2, "age" : 19, "name" : "Alan", "status" : "married", "hobbies" : ["tennis", "art"]} + $$) +``` +```response + +┌─name────┬─type──────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ +│ id │ Int64 │ │ │ │ │ │ +│ age │ Int64 │ │ │ │ │ │ +│ name │ String │ │ │ │ │ │ +│ status │ String │ │ │ │ │ │ +│ hobbies │ Array(String) │ │ │ │ │ │ +└─────────┴───────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ +``` + #### input_format_try_infer_integers If enabled, ClickHouse will try to infer integers instead of floats in schema inference for text formats. From ad85faabd1941b891b406225ecf7c6b568c8328f Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky Date: Mon, 5 Jun 2023 16:09:53 +0000 Subject: [PATCH 212/722] Fix test --- tests/integration/test_storage_mongodb/test.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_storage_mongodb/test.py b/tests/integration/test_storage_mongodb/test.py index e6e77c64515..6ce71fb91fa 100644 --- a/tests/integration/test_storage_mongodb/test.py +++ b/tests/integration/test_storage_mongodb/test.py @@ -71,6 +71,7 @@ def test_simple_select(started_cluster): simple_mongo_table.drop() +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) def test_simple_select_from_view(started_cluster): mongo_connection = get_mongo_connection(started_cluster) db = mongo_connection["test"] @@ -86,7 +87,7 @@ def test_simple_select_from_view(started_cluster): node = started_cluster.instances["node"] node.query( - "CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo2:27017', 'test', 'simple_table_view', 'root', 'clickhouse')" + "CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table_view', 'root', 'clickhouse')" ) assert node.query("SELECT COUNT() FROM simple_mongo_table") == "100\n" From e8c6c7967b38ebdd467fb9c04966419731b5a689 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 5 Jun 2023 18:21:40 +0000 Subject: [PATCH 213/722] Move attaching gdb to separate lib --- docker/test/stateless/run.sh | 41 +++-------------------------------- tests/ci/attach_gdb.lib | 42 ++++++++++++++++++++++++++++++++++++ tests/ci/stress_tests.lib | 41 +++-------------------------------- 3 files changed, 48 insertions(+), 76 deletions(-) create mode 100644 tests/ci/attach_gdb.lib diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index dfee7d84cde..df650b37cc6 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -15,6 +15,8 @@ dpkg -i package_folder/clickhouse-client_*.deb ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test +source /usr/share/clickhouse-test/ci/attach_gdb.lib + # install test configs /usr/share/clickhouse-test/config/install.sh @@ -85,44 +87,7 @@ fi sleep 5 -# Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog -# and clickhouse-server can do fork-exec, for example, to run some bridge. -# Do not set nostop noprint for all signals, because some it may cause gdb to hang, -# explicitly ignore non-fatal signals that are used by server. -# Number of SIGRTMIN can be determined only in runtime. -RTMIN=$(kill -l SIGRTMIN) -echo " -set follow-fork-mode parent -handle SIGHUP nostop noprint pass -handle SIGINT nostop noprint pass -handle SIGQUIT nostop noprint pass -handle SIGPIPE nostop noprint pass -handle SIGTERM nostop noprint pass -handle SIGUSR1 nostop noprint pass -handle SIGUSR2 nostop noprint pass -handle SIG$RTMIN nostop noprint pass -info signals -continue -backtrace full -thread apply all backtrace full -info registers -disassemble /s -up -disassemble /s -up -disassemble /s -p \"done\" -detach -quit -" > script.gdb - -# FIXME Hung check may work incorrectly because of attached gdb -# 1. False positives are possible -# 2. We cannot attach another gdb to get stacktraces if some queries hung -gdb -batch -command script.gdb -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log & -sleep 5 -# gdb will send SIGSTOP, spend some time loading debug info and then send SIGCONT, wait for it (up to send_timeout, 300s) -time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||: +attach_gdb_to_clickhouse function run_tests() { diff --git a/tests/ci/attach_gdb.lib b/tests/ci/attach_gdb.lib new file mode 100644 index 00000000000..2df6243f796 --- /dev/null +++ b/tests/ci/attach_gdb.lib @@ -0,0 +1,42 @@ +#!/bin/bash + +function attach_gdb_to_clickhouse() +{ + # Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog + # and clickhouse-server can do fork-exec, for example, to run some bridge. + # Do not set nostop noprint for all signals, because some it may cause gdb to hang, + # explicitly ignore non-fatal signals that are used by server. + # Number of SIGRTMIN can be determined only in runtime. + RTMIN=$(kill -l SIGRTMIN) + echo " +set follow-fork-mode parent +handle SIGHUP nostop noprint pass +handle SIGINT nostop noprint pass +handle SIGQUIT nostop noprint pass +handle SIGPIPE nostop noprint pass +handle SIGTERM nostop noprint pass +handle SIGUSR1 nostop noprint pass +handle SIGUSR2 nostop noprint pass +handle SIG$RTMIN nostop noprint pass +info signals +continue +backtrace full +thread apply all backtrace full +info registers +disassemble /s +up +disassemble /s +up +disassemble /s +p \"done\" +detach +quit +" > script.gdb + + # FIXME Hung check may work incorrectly because of attached gdb + # We cannot attach another gdb to get stacktraces if some queries hung + gdb -batch -command script.gdb -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log & + sleep 5 + # gdb will send SIGSTOP, spend some time loading debug info and then send SIGCONT, wait for it (up to send_timeout, 300s) + time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||: +} diff --git a/tests/ci/stress_tests.lib b/tests/ci/stress_tests.lib index 04df50b3248..2b8ac77b952 100644 --- a/tests/ci/stress_tests.lib +++ b/tests/ci/stress_tests.lib @@ -9,6 +9,8 @@ FAIL="\tFAIL\t\\N\t" FAILURE_CONTEXT_LINES=100 FAILURE_CONTEXT_MAX_LINE_WIDTH=300 +source attach_gdb.lib + function escaped() { # That's the simplest way I found to escape a string in bash. Yep, bash is the most convenient programming language. @@ -184,44 +186,7 @@ function start() counter=$((counter + 1)) done - # Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog - # and clickhouse-server can do fork-exec, for example, to run some bridge. - # Do not set nostop noprint for all signals, because some it may cause gdb to hang, - # explicitly ignore non-fatal signals that are used by server. - # Number of SIGRTMIN can be determined only in runtime. - RTMIN=$(kill -l SIGRTMIN) - echo " -set follow-fork-mode parent -handle SIGHUP nostop noprint pass -handle SIGINT nostop noprint pass -handle SIGQUIT nostop noprint pass -handle SIGPIPE nostop noprint pass -handle SIGTERM nostop noprint pass -handle SIGUSR1 nostop noprint pass -handle SIGUSR2 nostop noprint pass -handle SIG$RTMIN nostop noprint pass -info signals -continue -backtrace full -thread apply all backtrace full -info registers -disassemble /s -up -disassemble /s -up -disassemble /s -p \"done\" -detach -quit -" > script.gdb - - # FIXME Hung check may work incorrectly because of attached gdb - # 1. False positives are possible - # 2. We cannot attach another gdb to get stacktraces if some queries hung - gdb -batch -command script.gdb -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log & - sleep 5 - # gdb will send SIGSTOP, spend some time loading debug info and then send SIGCONT, wait for it (up to send_timeout, 300s) - time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||: + attach_gdb_to_clickhouse } function check_server_start() From 59095c445d7a48ae12ac48c8748d4b48699a1274 Mon Sep 17 00:00:00 2001 From: Han Fei Date: Mon, 5 Jun 2023 20:44:20 +0200 Subject: [PATCH 214/722] Revert "Revert "make filter push down through cross join"" --- .../Optimizations/filterPushDown.cpp | 6 +++--- .../01763_filter_push_down_bugs.reference | 19 +++++++++++++++++++ .../01763_filter_push_down_bugs.sql | 19 +++++++++++++++++++ 3 files changed, 41 insertions(+), 3 deletions(-) diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp index 37bc894339f..db29038999b 100644 --- a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -272,7 +272,7 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes { /// If totals step has HAVING expression, skip it for now. /// TODO: - /// We can merge HAVING expression with current filer. + /// We can merge HAVING expression with current filter. /// Also, we can push down part of HAVING which depend only on aggregation keys. if (totals_having->getActions()) return 0; @@ -323,9 +323,9 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes { const auto & table_join = join ? join->getJoin()->getTableJoin() : filled_join->getJoin()->getTableJoin(); - /// Only inner and left(/right) join are supported. Other types may generate default values for left table keys. + /// Only inner, cross and left(/right) join are supported. Other types may generate default values for left table keys. /// So, if we push down a condition like `key != 0`, not all rows may be filtered. - if (table_join.kind() != JoinKind::Inner && table_join.kind() != kind) + if (table_join.kind() != JoinKind::Inner && table_join.kind() != JoinKind::Cross && table_join.kind() != kind) return 0; bool is_left = kind == JoinKind::Left; diff --git a/tests/queries/0_stateless/01763_filter_push_down_bugs.reference b/tests/queries/0_stateless/01763_filter_push_down_bugs.reference index 5aa2e645509..7df35e2948d 100644 --- a/tests/queries/0_stateless/01763_filter_push_down_bugs.reference +++ b/tests/queries/0_stateless/01763_filter_push_down_bugs.reference @@ -6,3 +6,22 @@ String1_0 String2_0 String3_0 String4_0 1 String1_0 String2_0 String3_0 String4_0 1 1 [0,1,2] 1 +Expression ((Projection + Before ORDER BY)) + Filter (WHERE) + Join (JOIN FillRightFirst) + Filter (( + Before JOIN)) + ReadFromMergeTree (default.t1) + Indexes: + PrimaryKey + Keys: + id + Condition: (id in [101, 101]) + Parts: 1/1 + Granules: 1/1 + Expression ((Joined actions + (Rename joined columns + (Projection + Before ORDER BY)))) + ReadFromMergeTree (default.t2) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 1/1 diff --git a/tests/queries/0_stateless/01763_filter_push_down_bugs.sql b/tests/queries/0_stateless/01763_filter_push_down_bugs.sql index 1058bf75144..2ee249b5ce7 100644 --- a/tests/queries/0_stateless/01763_filter_push_down_bugs.sql +++ b/tests/queries/0_stateless/01763_filter_push_down_bugs.sql @@ -38,6 +38,25 @@ DROP TABLE IF EXISTS Test; select x, y from (select [0, 1, 2] as y, 1 as a, 2 as b) array join y as x where a = 1 and b = 2 and (x = 1 or x != 1) and x = 1; +DROP TABLE IF EXISTS t; create table t(a UInt8) engine=MergeTree order by a; insert into t select * from numbers(2); select a from t t1 join t t2 on t1.a = t2.a where t1.a; +DROP TABLE IF EXISTS t; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +CREATE TABLE t1 (id Int64, create_time DateTime) ENGINE = MergeTree ORDER BY id; +CREATE TABLE t2 (delete_time DateTime) ENGINE = MergeTree ORDER BY delete_time; + +insert into t1 values (101, '2023-05-28 00:00:00'), (102, '2023-05-28 00:00:00'); +insert into t2 values ('2023-05-31 00:00:00'); + +EXPLAIN indexes=1 SELECT id, delete_time FROM t1 + CROSS JOIN ( + SELECT delete_time + FROM t2 +) AS d WHERE create_time < delete_time AND id = 101; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; From fd39616e780ac216fd07aa2625119a65c85661ee Mon Sep 17 00:00:00 2001 From: Han Fei Date: Mon, 5 Jun 2023 22:09:32 +0200 Subject: [PATCH 215/722] suppress some tests for analyzer --- tests/queries/0_stateless/01479_cross_join_9855.sql | 4 ++-- tests/queries/0_stateless/01763_filter_push_down_bugs.sql | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/01479_cross_join_9855.sql b/tests/queries/0_stateless/01479_cross_join_9855.sql index 6dc76f22057..9dcf209a1cd 100644 --- a/tests/queries/0_stateless/01479_cross_join_9855.sql +++ b/tests/queries/0_stateless/01479_cross_join_9855.sql @@ -2,8 +2,8 @@ SET cross_to_inner_join_rewrite = 1; SELECT count() FROM numbers(4) AS n1, numbers(3) AS n2 -WHERE n1.number > (select avg(n.number) from numbers(3) n); +WHERE n1.number > (select avg(n.number) from numbers(3) n) SETTINGS allow_experimental_analyzer=0; SELECT count() FROM numbers(4) AS n1, numbers(3) AS n2, numbers(6) AS n3 -WHERE n1.number > (select avg(n.number) from numbers(3) n); +WHERE n1.number > (select avg(n.number) from numbers(3) n) SETTINGS allow_experimental_analyzer=0; diff --git a/tests/queries/0_stateless/01763_filter_push_down_bugs.sql b/tests/queries/0_stateless/01763_filter_push_down_bugs.sql index 2ee249b5ce7..5f7f4379714 100644 --- a/tests/queries/0_stateless/01763_filter_push_down_bugs.sql +++ b/tests/queries/0_stateless/01763_filter_push_down_bugs.sql @@ -56,7 +56,7 @@ EXPLAIN indexes=1 SELECT id, delete_time FROM t1 CROSS JOIN ( SELECT delete_time FROM t2 -) AS d WHERE create_time < delete_time AND id = 101; +) AS d WHERE create_time < delete_time AND id = 101 SETTINGS allow_experimental_analyzer=0; DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; From 495482cdb2b6a6a2d272c50bb3995b0409f7fb91 Mon Sep 17 00:00:00 2001 From: tpanetti Date: Mon, 5 Jun 2023 15:22:29 -0700 Subject: [PATCH 216/722] Refactor ClickHouse->MySQL Type conversion and add configuration setting to trigger type conversion --- src/Core/Settings.h | 1 + src/DataTypes/DataTypeAggregateFunction.h | 2 +- src/DataTypes/DataTypeArray.h | 2 +- src/DataTypes/DataTypeDate.h | 2 +- src/DataTypes/DataTypeDate32.h | 2 +- src/DataTypes/DataTypeDateTime.h | 2 +- src/DataTypes/DataTypeDateTime64.h | 2 +- src/DataTypes/DataTypeEnum.cpp | 1 - src/DataTypes/DataTypeEnum.h | 3 +- src/DataTypes/DataTypeFixedString.h | 3 +- src/DataTypes/DataTypeFunction.h | 2 +- src/DataTypes/DataTypeIPv4andIPv6.h | 4 +- src/DataTypes/DataTypeInterval.h | 2 +- src/DataTypes/DataTypeLowCardinality.cpp | 3 +- src/DataTypes/DataTypeLowCardinality.h | 3 +- src/DataTypes/DataTypeMap.h | 2 +- src/DataTypes/DataTypeNothing.h | 2 +- src/DataTypes/DataTypeNullable.h | 2 +- src/DataTypes/DataTypeNumberBase.cpp | 67 +++++-- src/DataTypes/DataTypeNumberBase.h | 4 +- src/DataTypes/DataTypeObject.h | 2 +- src/DataTypes/DataTypeSet.h | 2 +- src/DataTypes/DataTypeString.h | 3 +- src/DataTypes/DataTypeTuple.h | 2 +- src/DataTypes/DataTypeUUID.h | 2 +- src/DataTypes/DataTypesDecimal.cpp | 5 + src/DataTypes/DataTypesDecimal.h | 3 +- src/DataTypes/IDataType.h | 12 +- src/Storages/System/StorageSystemColumns.cpp | 11 +- ...show_columns_mysql_compatibility.reference | 187 +++++++++++++++--- .../02775_show_columns_mysql_compatibility.sh | 31 ++- 31 files changed, 278 insertions(+), 93 deletions(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 67c92a0be8b..1ce30ff121f 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -190,6 +190,7 @@ class IColumn; M(Bool, allow_experimental_inverted_index, false, "If it is set to true, allow to use experimental inverted index.", 0) \ \ M(UInt64, mysql_max_rows_to_insert, 65536, "The maximum number of rows in MySQL batch insertion of the MySQL storage engine", 0) \ + M(Bool, output_format_mysql_types, false, "Use MySQL converted types when connected via MySQL compatibility", 0) \ \ M(UInt64, optimize_min_equality_disjunction_chain_length, 3, "The minimum length of the expression `expr = x1 OR ... expr = xN` for optimization ", 0) \ \ diff --git a/src/DataTypes/DataTypeAggregateFunction.h b/src/DataTypes/DataTypeAggregateFunction.h index 13ca3508580..83c9f10f407 100644 --- a/src/DataTypes/DataTypeAggregateFunction.h +++ b/src/DataTypes/DataTypeAggregateFunction.h @@ -45,7 +45,7 @@ public: String doGetName() const override; String getNameWithoutVersion() const; const char * getFamilyName() const override { return "AggregateFunction"; } - const char * getSQLCompatibleName() const override { return "TEXT"; } + String getSQLCompatibleName() const override { return "TEXT"; } TypeIndex getTypeId() const override { return TypeIndex::AggregateFunction; } Array getParameters() const { return parameters; } diff --git a/src/DataTypes/DataTypeArray.h b/src/DataTypes/DataTypeArray.h index 528062b60be..2714ca1d023 100644 --- a/src/DataTypes/DataTypeArray.h +++ b/src/DataTypes/DataTypeArray.h @@ -30,7 +30,7 @@ public: { return "Array"; } - const char * getSQLCompatibleName() const override + String getSQLCompatibleName() const override { return "TEXT"; } diff --git a/src/DataTypes/DataTypeDate.h b/src/DataTypes/DataTypeDate.h index 7b622ae04a3..0d557cad5f0 100644 --- a/src/DataTypes/DataTypeDate.h +++ b/src/DataTypes/DataTypeDate.h @@ -13,7 +13,7 @@ public: TypeIndex getTypeId() const override { return TypeIndex::Date; } const char * getFamilyName() const override { return family_name; } - const char * getSQLCompatibleName() const override { return "DATE"; } + String getSQLCompatibleName() const override { return "DATE"; } bool canBeUsedAsVersion() const override { return true; } bool canBeInsideNullable() const override { return true; } diff --git a/src/DataTypes/DataTypeDate32.h b/src/DataTypes/DataTypeDate32.h index 65b0ec7407e..0879a404179 100644 --- a/src/DataTypes/DataTypeDate32.h +++ b/src/DataTypes/DataTypeDate32.h @@ -13,7 +13,7 @@ public: TypeIndex getTypeId() const override { return TypeIndex::Date32; } const char * getFamilyName() const override { return family_name; } - const char * getSQLCompatibleName() const override { return "DATE"; } + String getSQLCompatibleName() const override { return "DATE"; } Field getDefault() const override { diff --git a/src/DataTypes/DataTypeDateTime.h b/src/DataTypes/DataTypeDateTime.h index 2facc758f90..edc8b016490 100644 --- a/src/DataTypes/DataTypeDateTime.h +++ b/src/DataTypes/DataTypeDateTime.h @@ -36,7 +36,7 @@ public: static constexpr auto family_name = "DateTime"; const char * getFamilyName() const override { return family_name; } - const char * getSQLCompatibleName() const override { return "DATETIME"; } + String getSQLCompatibleName() const override { return "DATETIME"; } String doGetName() const override; TypeIndex getTypeId() const override { return TypeIndex::DateTime; } diff --git a/src/DataTypes/DataTypeDateTime64.h b/src/DataTypes/DataTypeDateTime64.h index b836b84918f..e786cc09f28 100644 --- a/src/DataTypes/DataTypeDateTime64.h +++ b/src/DataTypes/DataTypeDateTime64.h @@ -28,7 +28,7 @@ public: DataTypeDateTime64(UInt32 scale_, const TimezoneMixin & time_zone_info); const char * getFamilyName() const override { return family_name; } - const char * getSQLCompatibleName() const override { return "DATETIME"; } + String getSQLCompatibleName() const override { return "DATETIME"; } std::string doGetName() const override; TypeIndex getTypeId() const override { return type_id; } diff --git a/src/DataTypes/DataTypeEnum.cpp b/src/DataTypes/DataTypeEnum.cpp index 24a3976179d..1750ae785bf 100644 --- a/src/DataTypes/DataTypeEnum.cpp +++ b/src/DataTypes/DataTypeEnum.cpp @@ -90,7 +90,6 @@ template DataTypeEnum::DataTypeEnum(const Values & values_) : EnumValues(values_) , type_name(generateName(this->getValues())) - , my_sql_type_name(generateMySQLName(this->getValues())) { } diff --git a/src/DataTypes/DataTypeEnum.h b/src/DataTypes/DataTypeEnum.h index 2cdaa2db06c..d148f753c82 100644 --- a/src/DataTypes/DataTypeEnum.h +++ b/src/DataTypes/DataTypeEnum.h @@ -45,7 +45,6 @@ public: private: std::string type_name; - std::string my_sql_type_name; static std::string generateName(const Values & values); static std::string generateMySQLName(const Values & values); @@ -54,7 +53,7 @@ public: std::string doGetName() const override { return type_name; } const char * getFamilyName() const override; - const char * getSQLCompatibleName() const override { return my_sql_type_name.c_str(); } + String getSQLCompatibleName() const override { return generateMySQLName(this->getValues()); } TypeIndex getTypeId() const override { return type_id; } diff --git a/src/DataTypes/DataTypeFixedString.h b/src/DataTypes/DataTypeFixedString.h index 2900efd5a34..22ec793208d 100644 --- a/src/DataTypes/DataTypeFixedString.h +++ b/src/DataTypes/DataTypeFixedString.h @@ -42,7 +42,8 @@ public: TypeIndex getTypeId() const override { return type_id; } const char * getFamilyName() const override { return "FixedString"; } - const char * getSQLCompatibleName() const override { return "TEXT"; } + /// Use TEXT for compatibility with MySQL to allow arbitrary bytes. + String getSQLCompatibleName() const override { return "TEXT"; } size_t getN() const { diff --git a/src/DataTypes/DataTypeFunction.h b/src/DataTypes/DataTypeFunction.h index df59f7738b2..b57c0587dde 100644 --- a/src/DataTypes/DataTypeFunction.h +++ b/src/DataTypes/DataTypeFunction.h @@ -24,7 +24,7 @@ public: std::string doGetName() const override; const char * getFamilyName() const override { return "Function"; } - const char * getSQLCompatibleName() const override { return "TEXT"; } + String getSQLCompatibleName() const override { return "TEXT"; } TypeIndex getTypeId() const override { return TypeIndex::Function; } const DataTypes & getArgumentTypes() const diff --git a/src/DataTypes/DataTypeIPv4andIPv6.h b/src/DataTypes/DataTypeIPv4andIPv6.h index be0ebb90f3c..487ce04f67c 100644 --- a/src/DataTypes/DataTypeIPv4andIPv6.h +++ b/src/DataTypes/DataTypeIPv4andIPv6.h @@ -19,7 +19,7 @@ public: static constexpr auto type_id = TypeToTypeIndex; const char * getFamilyName() const override { return TypeName.data(); } - const char * getSQLCompatibleName() const override { return "TEXT"; } + String getSQLCompatibleName() const override { return "TEXT"; } TypeIndex getTypeId() const override { return type_id; } @@ -61,7 +61,7 @@ public: static constexpr auto type_id = TypeToTypeIndex; const char * getFamilyName() const override { return TypeName.data(); } - const char * getSQLCompatibleName() const override { return "TEXT"; } + String getSQLCompatibleName() const override { return "TEXT"; } TypeIndex getTypeId() const override { return type_id; } diff --git a/src/DataTypes/DataTypeInterval.h b/src/DataTypes/DataTypeInterval.h index ee2157431dd..7de56c13b56 100644 --- a/src/DataTypes/DataTypeInterval.h +++ b/src/DataTypes/DataTypeInterval.h @@ -26,7 +26,7 @@ public: std::string doGetName() const override { return fmt::format("Interval{}", kind.toString()); } const char * getFamilyName() const override { return "Interval"; } - const char * getSQLCompatibleName() const override { return "TEXT"; } + String getSQLCompatibleName() const override { return "TEXT"; } TypeIndex getTypeId() const override { return TypeIndex::Interval; } bool equals(const IDataType & rhs) const override; diff --git a/src/DataTypes/DataTypeLowCardinality.cpp b/src/DataTypes/DataTypeLowCardinality.cpp index e59613e6974..8293455cabc 100644 --- a/src/DataTypes/DataTypeLowCardinality.cpp +++ b/src/DataTypes/DataTypeLowCardinality.cpp @@ -28,8 +28,7 @@ namespace ErrorCodes } DataTypeLowCardinality::DataTypeLowCardinality(DataTypePtr dictionary_type_) - : dictionary_type(std::move(dictionary_type_)), - mysql_name(dictionary_type->getSQLCompatibleName()) + : dictionary_type(std::move(dictionary_type_)) { auto inner_type = dictionary_type; if (dictionary_type->isNullable()) diff --git a/src/DataTypes/DataTypeLowCardinality.h b/src/DataTypes/DataTypeLowCardinality.h index 4dee8565568..f6d8d07a312 100644 --- a/src/DataTypes/DataTypeLowCardinality.h +++ b/src/DataTypes/DataTypeLowCardinality.h @@ -11,7 +11,6 @@ class DataTypeLowCardinality : public IDataType { private: DataTypePtr dictionary_type; - std::string mysql_name; public: @@ -24,7 +23,7 @@ public: return "LowCardinality(" + dictionary_type->getName() + ")"; } const char * getFamilyName() const override { return "LowCardinality"; } - const char * getSQLCompatibleName() const override { return mysql_name.c_str(); } + String getSQLCompatibleName() const override { return dictionary_type->getSQLCompatibleName(); } TypeIndex getTypeId() const override { return TypeIndex::LowCardinality; } diff --git a/src/DataTypes/DataTypeMap.h b/src/DataTypes/DataTypeMap.h index 299119f1759..294c5d7ac77 100644 --- a/src/DataTypes/DataTypeMap.h +++ b/src/DataTypes/DataTypeMap.h @@ -30,7 +30,7 @@ public: TypeIndex getTypeId() const override { return TypeIndex::Map; } std::string doGetName() const override; const char * getFamilyName() const override { return "Map"; } - const char * getSQLCompatibleName() const override { return "JSON"; } + String getSQLCompatibleName() const override { return "JSON"; } bool canBeInsideNullable() const override { return false; } diff --git a/src/DataTypes/DataTypeNothing.h b/src/DataTypes/DataTypeNothing.h index b35ced5dcb3..c3a7e2d09f0 100644 --- a/src/DataTypes/DataTypeNothing.h +++ b/src/DataTypes/DataTypeNothing.h @@ -16,7 +16,7 @@ public: static constexpr bool is_parametric = false; const char * getFamilyName() const override { return "Nothing"; } - const char * getSQLCompatibleName() const override { return "TEXT"; } + String getSQLCompatibleName() const override { return "TEXT"; } TypeIndex getTypeId() const override { return TypeIndex::Nothing; } diff --git a/src/DataTypes/DataTypeNullable.h b/src/DataTypes/DataTypeNullable.h index b5fe1bb2dd9..e3165414c07 100644 --- a/src/DataTypes/DataTypeNullable.h +++ b/src/DataTypes/DataTypeNullable.h @@ -16,7 +16,7 @@ public: explicit DataTypeNullable(const DataTypePtr & nested_data_type_); std::string doGetName() const override { return "Nullable(" + nested_data_type->getName() + ")"; } const char * getFamilyName() const override { return "Nullable"; } - const char * getSQLCompatibleName() const override { return nested_data_type->getSQLCompatibleName(); } + String getSQLCompatibleName() const override { return nested_data_type->getSQLCompatibleName(); } TypeIndex getTypeId() const override { return TypeIndex::Nullable; } MutableColumnPtr createColumn() const override; diff --git a/src/DataTypes/DataTypeNumberBase.cpp b/src/DataTypes/DataTypeNumberBase.cpp index db654448e83..e4c0fb96483 100644 --- a/src/DataTypes/DataTypeNumberBase.cpp +++ b/src/DataTypes/DataTypeNumberBase.cpp @@ -11,6 +11,55 @@ Field DataTypeNumberBase::getDefault() const { return NearestFieldType(); } +template +String DataTypeNumberBase::getSQLCompatibleName() const +{ + if constexpr (std::is_same_v) + { + return "TINYINT"; + } + else if constexpr (std::is_same_v) + { + return "SMALLINT"; + } + else if constexpr (std::is_same_v) + { + return "INTEGER"; + } + else if constexpr (std::is_same_v) + { + return "BIGINT"; + } + else if constexpr (std::is_same_v) + { + return "TINYINT UNSIGNED"; + } + else if constexpr (std::is_same_v) + { + return "SMALLINT UNSIGNED"; + } + else if constexpr (std::is_same_v) + { + return "INTEGER UNSIGNED"; + } + else if constexpr (std::is_same_v) + { + return "BIGINT UNSIGNED"; + } + else if constexpr (std::is_same_v) + { + return "FLOAT"; + } + else if constexpr (std::is_same_v) + { + return "DOUBLE"; + } + /// Unsupported types are converted to TEXT + else + { + return "TEXT"; + } +} template MutableColumnPtr DataTypeNumberBase::createColumn() const @@ -30,24 +79,6 @@ bool DataTypeNumberBase::isValueRepresentedByUnsignedInteger() const return is_integer && is_unsigned_v; } -template -const std::map DataTypeNumberBase::mysqlTypeMap = { - {"UInt8", "TINYINT UNSIGNED"}, - {"UInt16", "SMALLINT UNSIGNED"}, - {"UInt32", "MEDIUMINT UNSIGNEd"}, - {"UInt64", "BIGINT UNSIGNED"}, - {"UInt128", "TEXT"}, - {"UInt256", "TEXT"}, - {"Int8", "TINYINT"}, - {"Int16", "SMALLINT"}, - {"Int32", "INT"}, - {"Int64", "BIGINT"}, - {"Int128", "TEXT"}, - {"Int256", "TEXT"}, - {"Float32", "FLOAT"}, - {"Float64", "DOUBLE"}, -}; - /// Explicit template instantiations - to avoid code bloat in headers. template class DataTypeNumberBase; template class DataTypeNumberBase; diff --git a/src/DataTypes/DataTypeNumberBase.h b/src/DataTypes/DataTypeNumberBase.h index 1a855a974f0..d902c62505e 100644 --- a/src/DataTypes/DataTypeNumberBase.h +++ b/src/DataTypes/DataTypeNumberBase.h @@ -20,14 +20,12 @@ public: static constexpr bool is_parametric = false; static constexpr auto family_name = TypeName; static constexpr auto type_id = TypeToTypeIndex; - // Create a map from the name of the type to the name of the type in MySQL. - static const std::map mysqlTypeMap; using FieldType = T; using ColumnType = ColumnVector; const char * getFamilyName() const override { return TypeName.data(); } - const char * getSQLCompatibleName() const override { return mysqlTypeMap.at(TypeName.data()).c_str(); } + String getSQLCompatibleName() const override; TypeIndex getTypeId() const override { return TypeToTypeIndex; } Field getDefault() const override; diff --git a/src/DataTypes/DataTypeObject.h b/src/DataTypes/DataTypeObject.h index 618c7389758..2e1e5398f7e 100644 --- a/src/DataTypes/DataTypeObject.h +++ b/src/DataTypes/DataTypeObject.h @@ -23,7 +23,7 @@ public: DataTypeObject(const String & schema_format_, bool is_nullable_); const char * getFamilyName() const override { return "Object"; } - const char * getSQLCompatibleName() const override { return "JSON"; } + String getSQLCompatibleName() const override { return "JSON"; } String doGetName() const override; TypeIndex getTypeId() const override { return TypeIndex::Object; } diff --git a/src/DataTypes/DataTypeSet.h b/src/DataTypes/DataTypeSet.h index 916b4f071a5..d88d76b31be 100644 --- a/src/DataTypes/DataTypeSet.h +++ b/src/DataTypes/DataTypeSet.h @@ -15,7 +15,7 @@ class DataTypeSet final : public IDataTypeDummy public: static constexpr bool is_parametric = true; const char * getFamilyName() const override { return "Set"; } - const char * getSQLCompatibleName() const override { return "TEXT"; } + String getSQLCompatibleName() const override { return "TEXT"; } TypeIndex getTypeId() const override { return TypeIndex::Set; } bool equals(const IDataType & rhs) const override { return typeid(rhs) == typeid(*this); } diff --git a/src/DataTypes/DataTypeString.h b/src/DataTypes/DataTypeString.h index 338b3846266..c39fa90f6e7 100644 --- a/src/DataTypes/DataTypeString.h +++ b/src/DataTypes/DataTypeString.h @@ -21,8 +21,7 @@ public: return "String"; } - // FIXME: string can contain arbitrary bytes, not only UTF-8 sequences - const char * getSQLCompatibleName() const override { return "BLOB"; } + String getSQLCompatibleName() const override { return "BLOB"; } TypeIndex getTypeId() const override { return type_id; } diff --git a/src/DataTypes/DataTypeTuple.h b/src/DataTypes/DataTypeTuple.h index 93fa87b1332..ea05e6ae59b 100644 --- a/src/DataTypes/DataTypeTuple.h +++ b/src/DataTypes/DataTypeTuple.h @@ -33,7 +33,7 @@ public: TypeIndex getTypeId() const override { return TypeIndex::Tuple; } std::string doGetName() const override; const char * getFamilyName() const override { return "Tuple"; } - const char * getSQLCompatibleName() const override { return "JSON"; } + String getSQLCompatibleName() const override { return "JSON"; } bool canBeInsideNullable() const override { return false; } bool supportsSparseSerialization() const override { return true; } diff --git a/src/DataTypes/DataTypeUUID.h b/src/DataTypes/DataTypeUUID.h index bbf35074df3..8664c3bcfd1 100644 --- a/src/DataTypes/DataTypeUUID.h +++ b/src/DataTypes/DataTypeUUID.h @@ -18,7 +18,7 @@ public: static constexpr auto type_id = TypeIndex::UUID; const char * getFamilyName() const override { return "UUID"; } - const char * getSQLCompatibleName() const override { return "CHAR"; } + String getSQLCompatibleName() const override { return "CHAR"; } TypeIndex getTypeId() const override { return type_id; } diff --git a/src/DataTypes/DataTypesDecimal.cpp b/src/DataTypes/DataTypesDecimal.cpp index 1c2a63371ee..fa044d4ac9c 100644 --- a/src/DataTypes/DataTypesDecimal.cpp +++ b/src/DataTypes/DataTypesDecimal.cpp @@ -28,6 +28,11 @@ std::string DataTypeDecimal::doGetName() const return fmt::format("Decimal({}, {})", this->precision, this->scale); } +template +std::string DataTypeDecimal::getSQLCompatibleName() const +{ + return fmt::format("DECIMAL({}, {})", this->precision, this->scale); +} template bool DataTypeDecimal::equals(const IDataType & rhs) const diff --git a/src/DataTypes/DataTypesDecimal.h b/src/DataTypes/DataTypesDecimal.h index 6f3bf582aeb..5e4cfab7928 100644 --- a/src/DataTypes/DataTypesDecimal.h +++ b/src/DataTypes/DataTypesDecimal.h @@ -37,10 +37,9 @@ public: using Base::Base; static constexpr auto family_name = "Decimal"; - static constexpr auto mysql_name = "DECIMAL"; const char * getFamilyName() const override { return family_name; } - const char * getSQLCompatibleName() const override { return mysql_name; } + String getSQLCompatibleName() const override; std::string doGetName() const override; TypeIndex getTypeId() const override { return TypeToTypeIndex; } diff --git a/src/DataTypes/IDataType.h b/src/DataTypes/IDataType.h index 93fdbab05ef..51a9ecef0cc 100644 --- a/src/DataTypes/IDataType.h +++ b/src/DataTypes/IDataType.h @@ -71,19 +71,12 @@ public: return doGetName(); } - /// MySQL equivalent Name of data type (examples: UInt64, Array(String)). - String getMySQLTypeName() const - { - if (custom_name) - return custom_name->getName(); - else - return doGetMySQLName(); - } DataTypePtr getPtr() const { return shared_from_this(); } /// Name of data type family (example: FixedString, Array). virtual const char * getFamilyName() const = 0; - virtual const char * getSQLCompatibleName() const = 0; + /// Name of corresponding data type in MySQL (exampe: Bigint, Blob, etc) + virtual String getSQLCompatibleName() const = 0; /// Data type id. It's used for runtime type checks. virtual TypeIndex getTypeId() const = 0; @@ -135,7 +128,6 @@ public: protected: virtual String doGetName() const { return getFamilyName(); } - virtual String doGetMySQLName() const { return getSQLCompatibleName(); } virtual SerializationPtr doGetDefaultSerialization() const = 0; public: diff --git a/src/Storages/System/StorageSystemColumns.cpp b/src/Storages/System/StorageSystemColumns.cpp index f391a392dbb..684c35709a4 100644 --- a/src/Storages/System/StorageSystemColumns.cpp +++ b/src/Storages/System/StorageSystemColumns.cpp @@ -74,7 +74,8 @@ public: : ISource(header_) , columns_mask(std::move(columns_mask_)), max_block_size(max_block_size_) , databases(std::move(databases_)), tables(std::move(tables_)), storages(std::move(storages_)) - , clientInfo(context->getClientInfo()) + , client_info_interface(context->getClientInfo().interface) + , use_mysql_types(context->getSettingsRef().output_format_mysql_types) , total_tables(tables->size()), access(context->getAccess()) , query_id(context->getCurrentQueryId()), lock_acquire_timeout(context->getSettingsRef().lock_acquire_timeout) { @@ -132,9 +133,10 @@ protected: auto get_type_name = [this](const IDataType& type) -> std::string { - if (clientInfo.interface == DB::ClientInfo::Interface::MYSQL) + // Check if the output_format_mysql_types setting is enabled and client is connected via MySQL protocol + if (use_mysql_types && client_info_interface == DB::ClientInfo::Interface::MYSQL) { - return type.getMySQLTypeName(); + return type.getSQLCompatibleName(); } else { @@ -293,7 +295,8 @@ private: ColumnPtr databases; ColumnPtr tables; Storages storages; - ClientInfo clientInfo; + ClientInfo::Interface client_info_interface; + bool use_mysql_types; size_t db_table_num = 0; size_t total_tables; std::shared_ptr access; diff --git a/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.reference b/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.reference index 1742cd9c90c..68e7be9ae6f 100644 --- a/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.reference +++ b/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.reference @@ -4,6 +4,44 @@ Create pseudo-random database name Create tab duplicate table Run MySQL test field type null key default extra +aggregate_function AggregateFunction(sum, Int32) 0 NULL +array_value Array(Int32) 0 NULL +boolean_value UInt8 0 NULL +date32_value Date32 0 NULL +date_value Date 0 NULL +datetime64_value DateTime64(3) 0 NULL +datetime_value DateTime 0 NULL +decimal_value Decimal(10, 2) 0 NULL +enum_value Enum8('apple' = 1, 'banana' = 2, 'orange' = 3) 0 NULL +fixed_string_value FixedString(10) 0 NULL +float32 Float32 0 NULL +float64 Float64 0 NULL +int128 Int128 0 NULL +int16 Int16 0 NULL +int256 Int256 0 NULL +int32 Int32 0 NULL +int64 Int64 0 NULL +int8 Int8 0 NULL +ipv4_value IPv4 0 NULL +ipv6_value IPv6 0 NULL +json_value Object('json') 0 NULL +low_cardinality LowCardinality(String) 0 NULL +low_cardinality_date LowCardinality(DateTime) 0 NULL +map_value Map(String, Int32) 0 NULL +nested.nested_int Array(Int32) 0 NULL +nested.nested_string Array(String) 0 NULL +nint32 Nullable(Int32) 1 NULL +nullable_value Nullable(Int32) 1 NULL +string_value String 0 NULL +tuple_value Tuple(Int32, String) 0 NULL +uint128 UInt128 0 NULL +uint16 UInt16 0 NULL +uint256 UInt256 0 NULL +uint32 UInt32 0 NULL +uint64 UInt64 0 PRI SOR NULL +uint8 UInt8 0 NULL +uuid_value UUID 0 NULL +field type null key default extra aggregate_function TEXT 0 NULL array_value TEXT 0 NULL boolean_value TINYINT UNSIGNED 0 NULL @@ -11,12 +49,17 @@ date32_value DATE 0 NULL date_value DATE 0 NULL datetime64_value DATETIME 0 NULL datetime_value DATETIME 0 NULL -decimal_value DECIMAL 0 NULL +decimal_value DECIMAL(10, 2) 0 NULL enum_value ENUM('apple', 'banana', 'orange') 0 NULL fixed_string_value TEXT 0 NULL float32 FLOAT 0 NULL float64 DOUBLE 0 NULL -int32 INT 0 NULL +int128 TEXT 0 NULL +int16 SMALLINT 0 NULL +int256 TEXT 0 NULL +int32 INTEGER 0 NULL +int64 BIGINT 0 NULL +int8 TINYINT 0 NULL ipv4_value TEXT 0 NULL ipv6_value TEXT 0 NULL json_value JSON 0 NULL @@ -25,10 +68,16 @@ low_cardinality_date DATETIME 0 NULL map_value JSON 0 NULL nested.nested_int TEXT 0 NULL nested.nested_string TEXT 0 NULL -nullable_value INT 0 NULL +nint32 INTEGER 0 NULL +nullable_value INTEGER 0 NULL string_value BLOB 0 NULL tuple_value JSON 0 NULL +uint128 TEXT 0 NULL +uint16 SMALLINT UNSIGNED 0 NULL +uint256 TEXT 0 NULL +uint32 INTEGER UNSIGNED 0 NULL uint64 BIGINT UNSIGNED 0 PRI SOR NULL +uint8 TINYINT UNSIGNED 0 NULL uuid_value CHAR 0 NULL field type null key default extra aggregate_function TEXT 0 NULL @@ -38,12 +87,17 @@ date32_value DATE 0 NULL date_value DATE 0 NULL datetime64_value DATETIME 0 NULL datetime_value DATETIME 0 NULL -decimal_value DECIMAL 0 NULL +decimal_value DECIMAL(10, 2) 0 NULL enum_value ENUM('apple', 'banana', 'orange') 0 NULL fixed_string_value TEXT 0 NULL float32 FLOAT 0 NULL float64 DOUBLE 0 NULL -int32 INT 0 NULL +int128 TEXT 0 NULL +int16 SMALLINT 0 NULL +int256 TEXT 0 NULL +int32 INTEGER 0 NULL +int64 BIGINT 0 NULL +int8 TINYINT 0 NULL ipv4_value TEXT 0 NULL ipv6_value TEXT 0 NULL json_value JSON 0 NULL @@ -52,10 +106,16 @@ low_cardinality_date DATETIME 0 NULL map_value JSON 0 NULL nested.nested_int TEXT 0 NULL nested.nested_string TEXT 0 NULL -nullable_value INT 0 NULL +nint32 INTEGER 0 NULL +nullable_value INTEGER 0 NULL string_value BLOB 0 NULL tuple_value JSON 0 NULL +uint128 TEXT 0 NULL +uint16 SMALLINT UNSIGNED 0 NULL +uint256 TEXT 0 NULL +uint32 INTEGER UNSIGNED 0 NULL uint64 BIGINT UNSIGNED 0 PRI SOR NULL +uint8 TINYINT UNSIGNED 0 NULL uuid_value CHAR 0 NULL field type null key default extra collation comment privileges aggregate_function TEXT 0 NULL NULL @@ -65,12 +125,17 @@ date32_value DATE 0 NULL NULL date_value DATE 0 NULL NULL datetime64_value DATETIME 0 NULL NULL datetime_value DATETIME 0 NULL NULL -decimal_value DECIMAL 0 NULL NULL +decimal_value DECIMAL(10, 2) 0 NULL NULL enum_value ENUM('apple', 'banana', 'orange') 0 NULL NULL fixed_string_value TEXT 0 NULL NULL float32 FLOAT 0 NULL NULL float64 DOUBLE 0 NULL NULL -int32 INT 0 NULL NULL +int128 TEXT 0 NULL NULL +int16 SMALLINT 0 NULL NULL +int256 TEXT 0 NULL NULL +int32 INTEGER 0 NULL NULL +int64 BIGINT 0 NULL NULL +int8 TINYINT 0 NULL NULL ipv4_value TEXT 0 NULL NULL ipv6_value TEXT 0 NULL NULL json_value JSON 0 NULL NULL @@ -79,15 +144,32 @@ low_cardinality_date DATETIME 0 NULL NULL map_value JSON 0 NULL NULL nested.nested_int TEXT 0 NULL NULL nested.nested_string TEXT 0 NULL NULL -nullable_value INT 0 NULL NULL +nint32 INTEGER 0 NULL NULL +nullable_value INTEGER 0 NULL NULL string_value BLOB 0 NULL NULL tuple_value JSON 0 NULL NULL +uint128 TEXT 0 NULL NULL +uint16 SMALLINT UNSIGNED 0 NULL NULL +uint256 TEXT 0 NULL NULL +uint32 INTEGER UNSIGNED 0 NULL NULL uint64 BIGINT UNSIGNED 0 PRI SOR NULL NULL +uint8 TINYINT UNSIGNED 0 NULL NULL uuid_value CHAR 0 NULL NULL field type null key default extra -int32 INT 0 NULL +int128 TEXT 0 NULL +int16 SMALLINT 0 NULL +int256 TEXT 0 NULL +int32 INTEGER 0 NULL +int64 BIGINT 0 NULL +int8 TINYINT 0 NULL nested.nested_int TEXT 0 NULL +nint32 INTEGER 0 NULL +uint128 TEXT 0 NULL +uint16 SMALLINT UNSIGNED 0 NULL +uint256 TEXT 0 NULL +uint32 INTEGER UNSIGNED 0 NULL uint64 BIGINT UNSIGNED 0 PRI SOR NULL +uint8 TINYINT UNSIGNED 0 NULL field type null key default extra aggregate_function TEXT 0 NULL array_value TEXT 0 NULL @@ -96,7 +178,7 @@ date32_value DATE 0 NULL date_value DATE 0 NULL datetime64_value DATETIME 0 NULL datetime_value DATETIME 0 NULL -decimal_value DECIMAL 0 NULL +decimal_value DECIMAL(10, 2) 0 NULL enum_value ENUM('apple', 'banana', 'orange') 0 NULL fixed_string_value TEXT 0 NULL float32 FLOAT 0 NULL @@ -108,14 +190,25 @@ low_cardinality BLOB 0 NULL low_cardinality_date DATETIME 0 NULL map_value JSON 0 NULL nested.nested_string TEXT 0 NULL -nullable_value INT 0 NULL +nullable_value INTEGER 0 NULL string_value BLOB 0 NULL tuple_value JSON 0 NULL uuid_value CHAR 0 NULL field type null key default extra -int32 INT 0 NULL +int128 TEXT 0 NULL +int16 SMALLINT 0 NULL +int256 TEXT 0 NULL +int32 INTEGER 0 NULL +int64 BIGINT 0 NULL +int8 TINYINT 0 NULL nested.nested_int TEXT 0 NULL +nint32 INTEGER 0 NULL +uint128 TEXT 0 NULL +uint16 SMALLINT UNSIGNED 0 NULL +uint256 TEXT 0 NULL +uint32 INTEGER UNSIGNED 0 NULL uint64 BIGINT UNSIGNED 0 PRI SOR NULL +uint8 TINYINT UNSIGNED 0 NULL field type null key default extra aggregate_function TEXT 0 NULL array_value TEXT 0 NULL @@ -124,7 +217,7 @@ date32_value DATE 0 NULL date_value DATE 0 NULL datetime64_value DATETIME 0 NULL datetime_value DATETIME 0 NULL -decimal_value DECIMAL 0 NULL +decimal_value DECIMAL(10, 2) 0 NULL enum_value ENUM('apple', 'banana', 'orange') 0 NULL fixed_string_value TEXT 0 NULL float32 FLOAT 0 NULL @@ -136,14 +229,25 @@ low_cardinality BLOB 0 NULL low_cardinality_date DATETIME 0 NULL map_value JSON 0 NULL nested.nested_string TEXT 0 NULL -nullable_value INT 0 NULL +nullable_value INTEGER 0 NULL string_value BLOB 0 NULL tuple_value JSON 0 NULL uuid_value CHAR 0 NULL field type null key default extra -int32 INT 0 NULL +int128 TEXT 0 NULL +int16 SMALLINT 0 NULL +int256 TEXT 0 NULL +int32 INTEGER 0 NULL +int64 BIGINT 0 NULL +int8 TINYINT 0 NULL nested.nested_int TEXT 0 NULL +nint32 INTEGER 0 NULL +uint128 TEXT 0 NULL +uint16 SMALLINT UNSIGNED 0 NULL +uint256 TEXT 0 NULL +uint32 INTEGER UNSIGNED 0 NULL uint64 BIGINT UNSIGNED 0 PRI SOR NULL +uint8 TINYINT UNSIGNED 0 NULL field type null key default extra aggregate_function TEXT 0 NULL field type null key default extra @@ -154,12 +258,17 @@ date32_value DATE 0 NULL date_value DATE 0 NULL datetime64_value DATETIME 0 NULL datetime_value DATETIME 0 NULL -decimal_value DECIMAL 0 NULL +decimal_value DECIMAL(10, 2) 0 NULL enum_value ENUM('apple', 'banana', 'orange') 0 NULL fixed_string_value TEXT 0 NULL float32 FLOAT 0 NULL float64 DOUBLE 0 NULL -int32 INT 0 NULL +int128 TEXT 0 NULL +int16 SMALLINT 0 NULL +int256 TEXT 0 NULL +int32 INTEGER 0 NULL +int64 BIGINT 0 NULL +int8 TINYINT 0 NULL ipv4_value TEXT 0 NULL ipv6_value TEXT 0 NULL json_value JSON 0 NULL @@ -168,10 +277,16 @@ low_cardinality_date DATETIME 0 NULL map_value JSON 0 NULL nested.nested_int TEXT 0 NULL nested.nested_string TEXT 0 NULL -nullable_value INT 0 NULL +nint32 INTEGER 0 NULL +nullable_value INTEGER 0 NULL string_value BLOB 0 NULL tuple_value JSON 0 NULL +uint128 TEXT 0 NULL +uint16 SMALLINT UNSIGNED 0 NULL +uint256 TEXT 0 NULL +uint32 INTEGER UNSIGNED 0 NULL uint64 BIGINT UNSIGNED 0 PRI SOR NULL +uint8 TINYINT UNSIGNED 0 NULL uuid_value CHAR 0 NULL field type null key default extra aggregate_function TEXT 0 NULL @@ -181,12 +296,17 @@ date32_value DATE 0 NULL date_value DATE 0 NULL datetime64_value DATETIME 0 NULL datetime_value DATETIME 0 NULL -decimal_value DECIMAL 0 NULL +decimal_value DECIMAL(10, 2) 0 NULL enum_value ENUM('apple', 'banana', 'orange') 0 NULL fixed_string_value TEXT 0 NULL float32 FLOAT 0 NULL float64 DOUBLE 0 NULL -int32 INT 0 NULL +int128 TEXT 0 NULL +int16 SMALLINT 0 NULL +int256 TEXT 0 NULL +int32 INTEGER 0 NULL +int64 BIGINT 0 NULL +int8 TINYINT 0 NULL ipv4_value TEXT 0 NULL ipv6_value TEXT 0 NULL json_value JSON 0 NULL @@ -195,10 +315,16 @@ low_cardinality_date DATETIME 0 NULL map_value JSON 0 NULL nested.nested_int TEXT 0 NULL nested.nested_string TEXT 0 NULL -nullable_value INT 0 NULL +nint32 INTEGER 0 NULL +nullable_value INTEGER 0 NULL string_value BLOB 0 NULL tuple_value JSON 0 NULL +uint128 TEXT 0 NULL +uint16 SMALLINT UNSIGNED 0 NULL +uint256 TEXT 0 NULL +uint32 INTEGER UNSIGNED 0 NULL uint64 BIGINT UNSIGNED 0 PRI SOR NULL +uint8 TINYINT UNSIGNED 0 NULL uuid_value CHAR 0 NULL field type null key default extra aggregate_function TEXT 0 NULL @@ -208,12 +334,17 @@ date32_value DATE 0 NULL date_value DATE 0 NULL datetime64_value DATETIME 0 NULL datetime_value DATETIME 0 NULL -decimal_value DECIMAL 0 NULL +decimal_value DECIMAL(10, 2) 0 NULL enum_value ENUM('apple', 'banana', 'orange') 0 NULL fixed_string_value TEXT 0 NULL float32 FLOAT 0 NULL float64 DOUBLE 0 NULL -int32 INT 0 NULL +int128 TEXT 0 NULL +int16 SMALLINT 0 NULL +int256 TEXT 0 NULL +int32 INTEGER 0 NULL +int64 BIGINT 0 NULL +int8 TINYINT 0 NULL ipv4_value TEXT 0 NULL ipv6_value TEXT 0 NULL json_value JSON 0 NULL @@ -222,8 +353,14 @@ low_cardinality_date DATETIME 0 NULL map_value JSON 0 NULL nested.nested_int TEXT 0 NULL nested.nested_string TEXT 0 NULL -nullable_value INT 0 NULL +nint32 INTEGER 0 NULL +nullable_value INTEGER 0 NULL string_value BLOB 0 NULL tuple_value JSON 0 NULL +uint128 TEXT 0 NULL +uint16 SMALLINT UNSIGNED 0 NULL +uint256 TEXT 0 NULL +uint32 INTEGER UNSIGNED 0 NULL uint64 BIGINT UNSIGNED 0 PRI SOR NULL +uint8 TINYINT UNSIGNED 0 NULL uuid_value CHAR 0 NULL diff --git a/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh b/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh index fd1ad92f060..938102cb5fc 100755 --- a/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh +++ b/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh @@ -17,15 +17,25 @@ ${CLICKHOUSE_LOCAL} --query "DROP TABLE IF EXISTS tab" ${CLICKHOUSE_LOCAL} --query "DROP TABLE IF EXISTS database_123456789abcde" ${CLICKHOUSE_LOCAL} --query "DROP TABLE IF EXISTS database_123456789abcde.tab" -#${CLICKHOUSE_LOCAL} --query "SET allow_suspicious_low_cardinality_types = 1;" echo "Create tab table " ${CLICKHOUSE_LOCAL} -n -q " SET allow_suspicious_low_cardinality_types=1; - SET allow_experimental_object_type =1; + SET allow_experimental_object_type=1; CREATE TABLE tab ( + uint8 UInt8, + uint16 UInt16, + uint32 UInt32, uint64 UInt64, - int32 Nullable(Int32), + uint128 UInt128, + uint256 UInt256, + int8 Int8, + int16 Int16, + int32 Int32, + int64 Int64, + int128 Int128, + int256 Int256, + nint32 Nullable(Int32), float32 Float32, float64 Float64, decimal_value Decimal(10, 2), @@ -67,8 +77,19 @@ ${CLICKHOUSE_LOCAL} -n -q " SET allow_experimental_object_type =1; CREATE TABLE database_123456789abcde.tab ( + uint8 UInt8, + uint16 UInt16, + uint32 UInt32, uint64 UInt64, - int32 Nullable(Int32), + uint128 UInt128, + uint256 UInt256, + int8 Int8, + int16 Int16, + int32 Int32, + int64 Int64, + int128 Int128, + int256 Int256, + nint32 Nullable(Int32), float32 Float32, float64 Float64, decimal_value Decimal(10, 2), @@ -105,6 +126,8 @@ TEMP_FILE=$(mktemp) cat < $TEMP_FILE SHOW COLUMNS FROM tab; +SET output_format_mysql_types=1; +SHOW COLUMNS FROM tab; SHOW EXTENDED COLUMNS FROM tab; SHOW FULL COLUMNS FROM tab; SHOW COLUMNS FROM tab LIKE '%int%'; From 6d25e5a0d75325ddeee2be0d689da6ea4395fccc Mon Sep 17 00:00:00 2001 From: Victor Krasnov Date: Tue, 6 Jun 2023 07:37:14 +0300 Subject: [PATCH 217/722] Substitute missing year in parseDateTimeBestEffortImpl() --- src/IO/parseDateTimeBestEffort.cpp | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/IO/parseDateTimeBestEffort.cpp b/src/IO/parseDateTimeBestEffort.cpp index b370bee6f3f..f753e3c0b4e 100644 --- a/src/IO/parseDateTimeBestEffort.cpp +++ b/src/IO/parseDateTimeBestEffort.cpp @@ -578,12 +578,16 @@ ReturnType parseDateTimeBestEffortImpl( if (!year && !month && !day_of_month && !has_time) return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: neither Date nor Time was parsed successfully"); - if (!year) - year = 2000; - if (!month) - month = 1; if (!day_of_month) day_of_month = 1; + if (!month) + month = 1; + if (!year) + { + time_t now = time(nullptr); + UInt16 curr_year = local_time_zone.toYear(now); + year = now < local_time_zone.makeDateTime(year, month, day_of_month, hour, minute, second) ? curr_year - 1 : curr_year; + } auto is_leap_year = (year % 400 == 0) || (year % 100 != 0 && year % 4 == 0); From 3f77b778e368c1c5e3cd3012d54107193094502b Mon Sep 17 00:00:00 2001 From: Victor Krasnov Date: Tue, 6 Jun 2023 11:42:45 +0300 Subject: [PATCH 218/722] Fix runtime bug --- src/IO/parseDateTimeBestEffort.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/IO/parseDateTimeBestEffort.cpp b/src/IO/parseDateTimeBestEffort.cpp index f753e3c0b4e..6bdba251c36 100644 --- a/src/IO/parseDateTimeBestEffort.cpp +++ b/src/IO/parseDateTimeBestEffort.cpp @@ -586,7 +586,7 @@ ReturnType parseDateTimeBestEffortImpl( { time_t now = time(nullptr); UInt16 curr_year = local_time_zone.toYear(now); - year = now < local_time_zone.makeDateTime(year, month, day_of_month, hour, minute, second) ? curr_year - 1 : curr_year; + year = now < local_time_zone.makeDateTime(curr_year, month, day_of_month, hour, minute, second) ? curr_year - 1 : curr_year; } auto is_leap_year = (year % 400 == 0) || (year % 100 != 0 && year % 4 == 0); From 5b3cece42eb540f5b05d04faa7fe3de7cd1ccb86 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Tue, 6 Jun 2023 12:23:00 +0200 Subject: [PATCH 219/722] Fix shellcheck --- docker/test/stateless/run.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index df650b37cc6..c0acb0291a4 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -15,6 +15,7 @@ dpkg -i package_folder/clickhouse-client_*.deb ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test +# shellcheck disable=SC1091 source /usr/share/clickhouse-test/ci/attach_gdb.lib # install test configs From 8d044e8880a588a7854a6f6e2513226a6335ff48 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 6 Jun 2023 10:57:13 +0000 Subject: [PATCH 220/722] Increase max array size in group bitmap --- src/AggregateFunctions/AggregateFunctionGroupBitmapData.h | 2 +- tests/queries/0_stateless/02782_bitmap_overflow.reference | 0 tests/queries/0_stateless/02782_bitmap_overflow.sql | 2 ++ 3 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/02782_bitmap_overflow.reference create mode 100644 tests/queries/0_stateless/02782_bitmap_overflow.sql diff --git a/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h b/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h index d99f0bf16ee..7ea1ebe7749 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h +++ b/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h @@ -122,7 +122,7 @@ public: size_t size; readVarUInt(size, in); - static constexpr size_t max_size = 1_GiB; + static constexpr size_t max_size = 100_GiB; if (size == 0) throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect size (0) in groupBitmap."); diff --git a/tests/queries/0_stateless/02782_bitmap_overflow.reference b/tests/queries/0_stateless/02782_bitmap_overflow.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02782_bitmap_overflow.sql b/tests/queries/0_stateless/02782_bitmap_overflow.sql new file mode 100644 index 00000000000..656a3e7c144 --- /dev/null +++ b/tests/queries/0_stateless/02782_bitmap_overflow.sql @@ -0,0 +1,2 @@ +select unhex('0181808080908380808000')::AggregateFunction(groupBitmap, UInt64); -- {serverError TOO_LARGE_ARRAY_SIZE} + From cf886d8ced474a1a80a985587af251a39701d1b2 Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Tue, 6 Jun 2023 11:08:21 +0000 Subject: [PATCH 221/722] Remove IsConvertible() --- src/Functions/DateTimeTransforms.h | 46 +++++++++++++++-------------- src/Functions/FunctionsConversion.h | 41 ------------------------- 2 files changed, 24 insertions(+), 63 deletions(-) diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index d154dd9ffa2..823272e0324 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -1444,31 +1444,33 @@ struct Transformer for (size_t i = 0; i < size; ++i) { - constexpr bool transformHasIsConvertible = requires(const Transform& t) + if constexpr (std::is_same_v + || std::is_same_v) { - t.IsConvertible(vec_from[i], time_zone); - }; + bool check_range_result = true; - if constexpr (transformHasIsConvertible) - { - if constexpr (std::is_same_v - || std::is_same_v) + if constexpr (std::is_same_v) { - bool checked = transform.IsConvertible(vec_from[i], time_zone); - if (!checked) + check_range_result = vec_from[i] >= 0 && vec_from[i] <= DATE_LUT_MAX_DAY_NUM; + } + else if constexpr (std::is_same_v) + { + check_range_result = vec_from[i] >= 0 && vec_from[i] <= 0xFFFFFFFFL; + } + + if (!check_range_result) + { + if (std::is_same_v) { - if (std::is_same_v) - { - vec_to[i] = 0; - if (vec_null_map_to) - (*vec_null_map_to)[i] = true; - continue; - } - else - { - throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Value in column {} cannot be safely converted into type {}", - TypeName, TypeName); - } + vec_to[i] = 0; + if (vec_null_map_to) + (*vec_null_map_to)[i] = true; + continue; + } + else + { + throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Value in column {} cannot be safely converted into type {}", + TypeName, TypeName); } } } @@ -1488,7 +1490,7 @@ struct DateTimeTransformImpl static ColumnPtr execute( const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /*input_rows_count*/, const Transform & transform = {}) { - using Op = Transformer; + using Op = Transformer; const ColumnPtr source_col = arguments[0].column; if (const auto * sources = checkAndGetColumn(source_col.get())) diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index 6aa5843ff65..3a8ddcc9094 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -365,22 +365,11 @@ template struct ConvertImpl -static bool CheckDateRange(const FromType & value) -{ - return value >= 0 && value <= DATE_LUT_MAX_DAY_NUM; -} - template struct ToDateTransform32Or64 { static constexpr auto name = "toDate"; - static NO_SANITIZE_UNDEFINED bool IsConvertible(const FromType & from, const DateLUTImpl &) - { - return CheckDateRange(from); - } - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) { // since converting to Date, no need in values outside of default LUT range. @@ -395,11 +384,6 @@ struct ToDateTransform32Or64Signed { static constexpr auto name = "toDate"; - static NO_SANITIZE_UNDEFINED bool IsConvertible(const FromType & from, const DateLUTImpl &) - { - return CheckDateRange(from); - } - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) { // TODO: decide narrow or extended range based on FromType @@ -417,11 +401,6 @@ struct ToDateTransform8Or16Signed { static constexpr auto name = "toDate"; - static NO_SANITIZE_UNDEFINED bool IsConvertible(const FromType & from, const DateLUTImpl &) - { - return CheckDateRange(from); - } - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) { if (from < 0) @@ -518,22 +497,12 @@ template struct ConvertImpl struct ConvertImpl : DateTimeTransformImpl> {}; -template -static bool CheckDateTimeRange(const FromType & value) -{ - return value >= 0 && value <= 0xFFFFFFFFL; -} template struct ToDateTimeTransform64 { static constexpr auto name = "toDateTime"; - static NO_SANITIZE_UNDEFINED bool IsConvertible(const FromType & from, const DateLUTImpl &) - { - return CheckDateTimeRange(from); - } - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) { return static_cast(std::min(time_t(from), time_t(0xFFFFFFFF))); @@ -545,11 +514,6 @@ struct ToDateTimeTransformSigned { static constexpr auto name = "toDateTime"; - static NO_SANITIZE_UNDEFINED bool IsConvertible(const FromType & from, const DateLUTImpl &) - { - return CheckDateTimeRange(from); - } - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) { if (from < 0) @@ -563,11 +527,6 @@ struct ToDateTimeTransform64Signed { static constexpr auto name = "toDateTime"; - static NO_SANITIZE_UNDEFINED bool IsConvertible(const FromType & from, const DateLUTImpl &) - { - return CheckDateTimeRange(from); - } - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) { if (from < 0) From 6b41a02f7fbdd46ee2774651fea3518d26844407 Mon Sep 17 00:00:00 2001 From: Han Fei Date: Tue, 6 Jun 2023 13:14:52 +0200 Subject: [PATCH 222/722] resolve tests --- src/Client/Suggest.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Client/Suggest.cpp b/src/Client/Suggest.cpp index 4ffa828dd40..e249aa1bb04 100644 --- a/src/Client/Suggest.cpp +++ b/src/Client/Suggest.cpp @@ -101,7 +101,9 @@ static String getLoadSuggestionQuery(Int32 suggestion_limit, bool basic_suggesti add_column("name", "columns", true, suggestion_limit); } - query = "SELECT DISTINCT arrayJoin(extractAll(name, '[\\\\w_]{2,}')) AS res FROM (" + query + ") WHERE notEmpty(res)"; + /// FIXME: Forbid this query using new analyzer because of bug https://github.com/ClickHouse/ClickHouse/pull/50430#issuecomment-1576860893 + /// We should remove this restriction after resolving this bug. + query = "SELECT DISTINCT arrayJoin(extractAll(name, '[\\\\w_]{2,}')) AS res FROM (" + query + ") WHERE notEmpty(res) SETTINGS allow_experimental_analyzer=0"; return query; } From f096bfcad29b95ea02f720f0becd5931c5b7eb37 Mon Sep 17 00:00:00 2001 From: Victor Krasnov Date: Tue, 6 Jun 2023 15:47:34 +0300 Subject: [PATCH 223/722] Adjust 00569_parse_date_time_best_effort and 01543_parse_datetime_besteffort_or_null_empty_string to the new feature --- .../00569_parse_date_time_best_effort.reference | 8 ++++---- .../0_stateless/00569_parse_date_time_best_effort.sql | 8 ++++---- ...543_parse_datetime_besteffort_or_null_empty_string.sql | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/queries/0_stateless/00569_parse_date_time_best_effort.reference b/tests/queries/0_stateless/00569_parse_date_time_best_effort.reference index ad7c17b7717..0729a7628f2 100644 --- a/tests/queries/0_stateless/00569_parse_date_time_best_effort.reference +++ b/tests/queries/0_stateless/00569_parse_date_time_best_effort.reference @@ -2,8 +2,8 @@ 0 ᴺᵁᴸᴸ 1970-01-01 00:00:00 0000 ᴺᵁᴸᴸ 1970-01-01 00:00:00 - 00:00:00 2000-01-01 00:00:00 2000-01-01 00:00:00 - 01:00:00 2000-01-01 01:00:00 2000-01-01 01:00:00 + 2000-01-01 00:00:00 2000-01-01 00:00:00 2000-01-01 00:00:00 + 2000-01-01 01:00:00 2000-01-01 01:00:00 2000-01-01 01:00:00 02/01/17 010203 MSK 2017-01-01 22:02:03 2017-01-01 22:02:03 02/01/17 010203 MSK+0100 2017-01-01 21:02:03 2017-01-01 21:02:03 02/01/17 010203 UTC+0300 2017-01-01 22:02:03 2017-01-01 22:02:03 @@ -11,13 +11,13 @@ 02/01/1970 010203Z 1970-01-02 01:02:03 1970-01-02 01:02:03 02/01/70 010203Z 1970-01-02 01:02:03 1970-01-02 01:02:03 11 Feb 2018 06:40:50 +0300 2018-02-11 03:40:50 2018-02-11 03:40:50 - 17 Apr 2 1:2:3 2000-04-17 01:02:03 2000-04-17 01:02:03 + 17 Apr 2000 2 1:2:3 2000-04-17 01:02:03 2000-04-17 01:02:03 19700102 01:00:00 1970-01-02 01:00:00 1970-01-02 01:00:00 1970010201:00:00 ᴺᵁᴸᴸ 1970-01-01 00:00:00 19700102010203 1970-01-02 01:02:03 1970-01-02 01:02:03 19700102010203Z 1970-01-02 01:02:03 1970-01-02 01:02:03 1970/01/02 010203Z 1970-01-02 01:02:03 1970-01-02 01:02:03 - 20 2000-01-20 00:00:00 2000-01-20 00:00:00 + 20 2000 2000-01-20 00:00:00 2000-01-20 00:00:00 201 ᴺᵁᴸᴸ 1970-01-01 00:00:00 20160101 2016-01-01 00:00:00 2016-01-01 00:00:00 2016-01-01 2016-01-01 00:00:00 2016-01-01 00:00:00 diff --git a/tests/queries/0_stateless/00569_parse_date_time_best_effort.sql b/tests/queries/0_stateless/00569_parse_date_time_best_effort.sql index 5f71efa1485..511addb4e4d 100644 --- a/tests/queries/0_stateless/00569_parse_date_time_best_effort.sql +++ b/tests/queries/0_stateless/00569_parse_date_time_best_effort.sql @@ -7,8 +7,8 @@ FROM SELECT arrayJoin([ '0', '0000', -'00:00:00', -'01:00:00', +'2000-01-01 00:00:00', +'2000-01-01 01:00:00', '02/01/17 010203 MSK', '02/01/17 010203 MSK+0100', '02/01/17 010203 UTC+0300', @@ -16,13 +16,13 @@ FROM '02/01/1970 010203Z', '02/01/70 010203Z', '11 Feb 2018 06:40:50 +0300', -'17 Apr 2 1:2:3', +'17 Apr 2000 2 1:2:3', '19700102 01:00:00', '1970010201:00:00', '19700102010203', '19700102010203Z', '1970/01/02 010203Z', -'20', +'20 2000', '201', '20160101', '2016-01-01', diff --git a/tests/queries/0_stateless/01543_parse_datetime_besteffort_or_null_empty_string.sql b/tests/queries/0_stateless/01543_parse_datetime_besteffort_or_null_empty_string.sql index ad14c4ede06..7098028963d 100644 --- a/tests/queries/0_stateless/01543_parse_datetime_besteffort_or_null_empty_string.sql +++ b/tests/queries/0_stateless/01543_parse_datetime_besteffort_or_null_empty_string.sql @@ -4,7 +4,7 @@ SELECT parseDateTimeBestEffortOrNull('2020-01-01 11:01:01 am'); SELECT parseDateTimeBestEffortOrNull('2020-01-01 11:01:01 pm'); SELECT parseDateTimeBestEffortOrNull('2020-01-01 12:01:01 am'); SELECT parseDateTimeBestEffortOrNull('2020-01-01 12:01:01 pm'); -SELECT parseDateTimeBestEffortOrNull('01:01:01'); +SELECT parseDateTimeBestEffortOrNull('2000-01-01 01:01:01'); SELECT parseDateTimeBestEffortOrNull('20100'); SELECT parseDateTimeBestEffortOrNull('0100:0100:0000'); SELECT parseDateTimeBestEffortOrNull('x'); From 5ffbe2d9d4de6e47268be38ac84e5de45faded49 Mon Sep 17 00:00:00 2001 From: Denny Crane Date: Tue, 6 Jun 2023 10:14:31 -0300 Subject: [PATCH 224/722] Update docs/en/sql-reference/data-types/ipv6.md Co-authored-by: Yakov Olkhovskiy <99031427+yakov-olkhovskiy@users.noreply.github.com> --- docs/en/sql-reference/data-types/ipv6.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/data-types/ipv6.md b/docs/en/sql-reference/data-types/ipv6.md index 284a1f80854..97959308b58 100644 --- a/docs/en/sql-reference/data-types/ipv6.md +++ b/docs/en/sql-reference/data-types/ipv6.md @@ -6,7 +6,7 @@ sidebar_label: IPv6 ## IPv6 -IPv6 addresses. Stored in 16 bytes as UInt128. +IPv6 addresses. Stored in 16 bytes as UInt128 big-endian. ### Basic Usage From df50833b709ea862ea49f24763c74cf898b10907 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 6 Jun 2023 17:33:05 +0000 Subject: [PATCH 225/722] Allow to skip trailing empty lines in CSV/TSV/CustomeSeparated formats --- docs/en/interfaces/formats.md | 6 +++++- .../en/operations/settings/settings-formats.md | 18 ++++++++++++++++++ src/Core/Settings.h | 3 +++ src/Formats/FormatFactory.cpp | 3 +++ src/Formats/FormatSettings.h | 3 +++ .../Formats/Impl/CSVRowInputFormat.cpp | 14 ++++++++++++++ .../Formats/Impl/CSVRowInputFormat.h | 1 + .../Impl/CustomSeparatedRowInputFormat.cpp | 4 ++++ .../Impl/TabSeparatedRowInputFormat.cpp | 14 ++++++++++++++ .../Formats/Impl/TabSeparatedRowInputFormat.h | 2 ++ ..._custom_skip_trailing_empty_lines.reference | 3 +++ ...sv_csv_custom_skip_trailing_empty_lines.sql | 12 ++++++++++++ 12 files changed, 82 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/02771_tsv_csv_custom_skip_trailing_empty_lines.reference create mode 100644 tests/queries/0_stateless/02771_tsv_csv_custom_skip_trailing_empty_lines.sql diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 2ab9e8caec4..70479b8ac71 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -193,6 +193,7 @@ SELECT * FROM nestedt FORMAT TSV - [output_format_tsv_crlf_end_of_line](/docs/en/operations/settings/settings-formats.md/#output_format_tsv_crlf_end_of_line) - if it is set true, end of line in TSV output format will be `\r\n` instead of `\n`. Default value - `false`. - [input_format_tsv_skip_first_lines](/docs/en/operations/settings/settings-formats.md/#input_format_tsv_skip_first_lines) - skip specified number of lines at the beginning of data. Default value - `0`. - [input_format_tsv_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_tsv_detect_header) - automatically detect header with names and types in TSV format. Default value - `true`. +- [input_format_tsv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_tsv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`. ## TabSeparatedRaw {#tabseparatedraw} @@ -467,6 +468,7 @@ The CSV format supports the output of totals and extremes the same way as `TabSe - [output_format_csv_crlf_end_of_line](/docs/en/operations/settings/settings-formats.md/#output_format_csv_crlf_end_of_line) - if it is set to true, end of line in CSV output format will be `\r\n` instead of `\n`. Default value - `false`. - [input_format_csv_skip_first_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_first_lines) - skip the specified number of lines at the beginning of data. Default value - `0`. - [input_format_csv_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_csv_detect_header) - automatically detect header with names and types in CSV format. Default value - `true`. +- [input_format_csv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`. ## CSVWithNames {#csvwithnames} @@ -494,7 +496,9 @@ the types from input data will be compared with the types of the corresponding c Similar to [Template](#format-template), but it prints or reads all names and types of columns and uses escaping rule from [format_custom_escaping_rule](/docs/en/operations/settings/settings-formats.md/#format_custom_escaping_rule) setting and delimiters from [format_custom_field_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_field_delimiter), [format_custom_row_before_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_row_before_delimiter), [format_custom_row_after_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_row_after_delimiter), [format_custom_row_between_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_row_between_delimiter), [format_custom_result_before_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_result_before_delimiter) and [format_custom_result_after_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_result_after_delimiter) settings, not from format strings. -If setting [input_format_custom_detect_header](/docs/en/operations/settings/settings.md/#input_format_custom_detect_header) is enabled, ClickHouse will automatically detect header with names and types if any. +If setting [input_format_custom_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_custom_detect_header) is enabled, ClickHouse will automatically detect header with names and types if any. + +If setting [input_format_tsv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_custom_detect_header) is enabled, trailing empty lines at the end of file will be skipped. There is also `CustomSeparatedIgnoreSpaces` format, which is similar to [TemplateIgnoreSpaces](#templateignorespaces). diff --git a/docs/en/operations/settings/settings-formats.md b/docs/en/operations/settings/settings-formats.md index 65038d3a256..a1a75446c37 100644 --- a/docs/en/operations/settings/settings-formats.md +++ b/docs/en/operations/settings/settings-formats.md @@ -728,6 +728,12 @@ My NULL My NULL ``` +### input_format_tsv_skip_trailing_empty_lines {input_format_tsv_skip_trailing_empty_lines} + +When enabled, trailing empty lines at the end of TSV file will be skipped. + +Disabled by default. + ## CSV format settings {#csv-format-settings} ### format_csv_delimiter {#format_csv_delimiter} @@ -882,6 +888,12 @@ My NULL My NULL ``` +### input_format_csv_skip_trailing_empty_lines {input_format_csv_skip_trailing_empty_lines} + +When enabled, trailing empty lines at the end of CSV file will be skipped. + +Disabled by default. + ## Values format settings {#values-format-settings} ### input_format_values_interpret_expressions {#input_format_values_interpret_expressions} @@ -1443,6 +1455,12 @@ Sets the character that is interpreted as a suffix after the result set for [Cus Default value: `''`. +### input_format_custom_skip_trailing_empty_lines {input_format_custom_skip_trailing_empty_lines} + +When enabled, trailing empty lines at the end of file in CustomSeparated format will be skipped. + +Disabled by default. + ## Regexp format settings {#regexp-format-settings} ### format_regexp_escaping_rule {#format_regexp_escaping_rule} diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 67c92a0be8b..f688811028e 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -873,6 +873,9 @@ class IColumn; M(Bool, output_format_protobuf_nullables_with_google_wrappers, false, "When serializing Nullable columns with Google wrappers, serialize default values as empty wrappers. If turned off, default and null values are not serialized", 0) \ M(UInt64, input_format_csv_skip_first_lines, 0, "Skip specified number of lines at the beginning of data in CSV format", 0) \ M(UInt64, input_format_tsv_skip_first_lines, 0, "Skip specified number of lines at the beginning of data in TSV format", 0) \ + M(Bool, input_format_csv_skip_trailing_empty_lines, false, "Skip trailing empty lines in CSV format", 0) \ + M(Bool, input_format_tsv_skip_trailing_empty_lines, false, "Skip trailing empty lines in TSV format", 0) \ + M(Bool, input_format_custom_skip_trailing_empty_lines, false, "Skip trailing empty lines in CustomSeparated format", 0) \ \ M(Bool, input_format_native_allow_types_conversion, true, "Allow data types conversion in Native input format", 0) \ \ diff --git a/src/Formats/FormatFactory.cpp b/src/Formats/FormatFactory.cpp index 586e1bb7251..021ccd35602 100644 --- a/src/Formats/FormatFactory.cpp +++ b/src/Formats/FormatFactory.cpp @@ -69,6 +69,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings) format_settings.csv.use_best_effort_in_schema_inference = settings.input_format_csv_use_best_effort_in_schema_inference; format_settings.csv.skip_first_lines = settings.input_format_csv_skip_first_lines; format_settings.csv.try_detect_header = settings.input_format_csv_detect_header; + format_settings.csv.skip_trailing_empty_lines = settings.input_format_csv_skip_trailing_empty_lines; format_settings.hive_text.fields_delimiter = settings.input_format_hive_text_fields_delimiter; format_settings.hive_text.collection_items_delimiter = settings.input_format_hive_text_collection_items_delimiter; format_settings.hive_text.map_keys_delimiter = settings.input_format_hive_text_map_keys_delimiter; @@ -80,6 +81,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings) format_settings.custom.row_before_delimiter = settings.format_custom_row_before_delimiter; format_settings.custom.row_between_delimiter = settings.format_custom_row_between_delimiter; format_settings.custom.try_detect_header = settings.input_format_custom_detect_header; + format_settings.custom.skip_trailing_empty_lines = settings.input_format_custom_skip_trailing_empty_lines; format_settings.date_time_input_format = settings.date_time_input_format; format_settings.date_time_output_format = settings.date_time_output_format; format_settings.input_format_ipv4_default_on_conversion_error = settings.input_format_ipv4_default_on_conversion_error; @@ -149,6 +151,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings) format_settings.tsv.use_best_effort_in_schema_inference = settings.input_format_tsv_use_best_effort_in_schema_inference; format_settings.tsv.skip_first_lines = settings.input_format_tsv_skip_first_lines; format_settings.tsv.try_detect_header = settings.input_format_tsv_detect_header; + format_settings.tsv.skip_trailing_empty_lines = settings.input_format_tsv_skip_trailing_empty_lines; format_settings.values.accurate_types_of_literals = settings.input_format_values_accurate_types_of_literals; format_settings.values.deduce_templates_of_expressions = settings.input_format_values_deduce_templates_of_expressions; format_settings.values.interpret_expressions = settings.input_format_values_interpret_expressions; diff --git a/src/Formats/FormatSettings.h b/src/Formats/FormatSettings.h index e332bd749a1..9d6cd384b68 100644 --- a/src/Formats/FormatSettings.h +++ b/src/Formats/FormatSettings.h @@ -136,6 +136,7 @@ struct FormatSettings UInt64 skip_first_lines = 0; String custom_delimiter; bool try_detect_header = true; + bool skip_trailing_empty_lines = false; } csv; struct HiveText @@ -156,6 +157,7 @@ struct FormatSettings std::string field_delimiter; EscapingRule escaping_rule = EscapingRule::Escaped; bool try_detect_header = true; + bool skip_trailing_empty_lines = false; } custom; struct @@ -291,6 +293,7 @@ struct FormatSettings bool use_best_effort_in_schema_inference = true; UInt64 skip_first_lines = 0; bool try_detect_header = true; + bool skip_trailing_empty_lines = false; } tsv; struct diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp index de955d81651..f01f20a0a3c 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp @@ -322,6 +322,20 @@ void CSVFormatReader::setReadBuffer(ReadBuffer & in_) FormatWithNamesAndTypesReader::setReadBuffer(*buf); } +bool CSVFormatReader::checkForSuffix() +{ + if (!format_settings.csv.skip_trailing_empty_lines) + return buf->eof(); + + PeekableReadBufferCheckpoint checkpoint(*buf); + while (checkChar('\n', *buf) || checkChar('\r', *buf)); + if (buf->eof()) + return true; + + buf->rollbackToCheckpoint(); + return false; +} + CSVSchemaReader::CSVSchemaReader(ReadBuffer & in_, bool with_names_, bool with_types_, const FormatSettings & format_settings_) : FormatWithNamesAndTypesSchemaReader( buf, diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.h b/src/Processors/Formats/Impl/CSVRowInputFormat.h index f51f674e4af..0c8099a216c 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.h +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.h @@ -75,6 +75,7 @@ public: std::vector readRow() { return readRowImpl(); } std::vector readRowForHeaderDetection() override { return readHeaderRow(); } + bool checkForSuffix() override; template std::vector readRowImpl(); diff --git a/src/Processors/Formats/Impl/CustomSeparatedRowInputFormat.cpp b/src/Processors/Formats/Impl/CustomSeparatedRowInputFormat.cpp index 1c2efe3a41d..1e67db79a2c 100644 --- a/src/Processors/Formats/Impl/CustomSeparatedRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CustomSeparatedRowInputFormat.cpp @@ -283,6 +283,8 @@ bool CustomSeparatedFormatReader::checkForSuffixImpl(bool check_eof) /// Allow optional \n before eof. checkChar('\n', *buf); + if (format_settings.custom.skip_trailing_empty_lines) + while (checkChar('\n', *buf) || checkChar('\r', *buf)); return buf->eof(); } @@ -294,6 +296,8 @@ bool CustomSeparatedFormatReader::checkForSuffixImpl(bool check_eof) /// Allow optional \n before eof. checkChar('\n', *buf); + if (format_settings.custom.skip_trailing_empty_lines) + while (checkChar('\n', *buf) || checkChar('\r', *buf)); if (buf->eof()) return true; } diff --git a/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp index af5f1f90732..2239c8539e3 100644 --- a/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp @@ -286,6 +286,20 @@ void TabSeparatedFormatReader::setReadBuffer(ReadBuffer & in_) FormatWithNamesAndTypesReader::setReadBuffer(*buf); } +bool TabSeparatedFormatReader::checkForSuffix() +{ + if (!format_settings.tsv.skip_trailing_empty_lines) + return buf->eof(); + + PeekableReadBufferCheckpoint checkpoint(*buf); + while (checkChar('\n', *buf) || checkChar('\r', *buf)); + if (buf->eof()) + return true; + + buf->rollbackToCheckpoint(); + return false; +} + TabSeparatedSchemaReader::TabSeparatedSchemaReader( ReadBuffer & in_, bool with_names_, bool with_types_, bool is_raw_, const FormatSettings & format_settings_) : FormatWithNamesAndTypesSchemaReader( diff --git a/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h index 0f4bff8d7d0..8df57675cf5 100644 --- a/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h +++ b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h @@ -75,6 +75,8 @@ public: void setReadBuffer(ReadBuffer & in_) override; + bool checkForSuffix() override; + private: template std::vector readRowImpl(); diff --git a/tests/queries/0_stateless/02771_tsv_csv_custom_skip_trailing_empty_lines.reference b/tests/queries/0_stateless/02771_tsv_csv_custom_skip_trailing_empty_lines.reference new file mode 100644 index 00000000000..37e32ce62ee --- /dev/null +++ b/tests/queries/0_stateless/02771_tsv_csv_custom_skip_trailing_empty_lines.reference @@ -0,0 +1,3 @@ +1 2 +1 2 +1 2 diff --git a/tests/queries/0_stateless/02771_tsv_csv_custom_skip_trailing_empty_lines.sql b/tests/queries/0_stateless/02771_tsv_csv_custom_skip_trailing_empty_lines.sql new file mode 100644 index 00000000000..917a434cd58 --- /dev/null +++ b/tests/queries/0_stateless/02771_tsv_csv_custom_skip_trailing_empty_lines.sql @@ -0,0 +1,12 @@ +select * from format(TSV, 'x UInt32, y UInt32', '1\t2\n\n') settings input_format_tsv_skip_trailing_empty_lines=0; -- {serverError CANNOT_PARSE_INPUT_ASSERTION_FAILED} +select * from format(TSV, 'x UInt32, y UInt32', '1\t2\n\n') settings input_format_tsv_skip_trailing_empty_lines=1; +select * from format(TSV, 'x UInt32, y UInt32', '1\t2\n\n1\t2\n') settings input_format_tsv_skip_trailing_empty_lines=1; -- {serverError CANNOT_PARSE_INPUT_ASSERTION_FAILED} + +select * from format(CSV, 'x UInt32, y UInt32', '1,2\n\n') settings input_format_csv_skip_trailing_empty_lines=0; -- {serverError CANNOT_PARSE_INPUT_ASSERTION_FAILED} +select * from format(CSV, 'x UInt32, y UInt32', '1,2\n\n') settings input_format_csv_skip_trailing_empty_lines=1; +select * from format(CSV, 'x UInt32, y UInt32', '1,2\n\n1,2\n') settings input_format_csv_skip_trailing_empty_lines=1; -- {serverError CANNOT_PARSE_INPUT_ASSERTION_FAILED} + +select * from format(CustomSeparated, 'x UInt32, y UInt32', '1\t2\n\n\n') settings input_format_custom_skip_trailing_empty_lines=0; -- {serverError CANNOT_PARSE_INPUT_ASSERTION_FAILED} +select * from format(CustomSeparated, 'x UInt32, y UInt32', '1\t2\n\n\n') settings input_format_custom_skip_trailing_empty_lines=1; +select * from format(CustomSeparated, 'x UInt32, y UInt32', '1\t2\n\n\n1\t2\n\n\n') settings input_format_custom_skip_trailing_empty_lines=1; -- {serverError CANNOT_PARSE_INPUT_ASSERTION_FAILED} + From 23a30268369c3166965d34815fd963db33740a64 Mon Sep 17 00:00:00 2001 From: Alexey Gerasimchuck Date: Wed, 7 Jun 2023 03:16:29 +0000 Subject: [PATCH 226/722] Implemented connection string --- docs/en/interfaces/cli.md | 110 +++++++++ docs/ru/interfaces/cli.md | 110 +++++++++ programs/client/Client.cpp | 13 +- src/Client/ConnectionString.cpp | 219 ++++++++++++++++++ src/Client/ConnectionString.h | 22 ++ .../02784_connection_string.reference | 125 ++++++++++ .../0_stateless/02784_connection_string.sh | 156 +++++++++++++ 7 files changed, 753 insertions(+), 2 deletions(-) create mode 100644 src/Client/ConnectionString.cpp create mode 100644 src/Client/ConnectionString.h create mode 100644 tests/queries/0_stateless/02784_connection_string.reference create mode 100755 tests/queries/0_stateless/02784_connection_string.sh diff --git a/docs/en/interfaces/cli.md b/docs/en/interfaces/cli.md index f670d464006..5255657ddfd 100644 --- a/docs/en/interfaces/cli.md +++ b/docs/en/interfaces/cli.md @@ -158,6 +158,116 @@ $ clickhouse-client --param_tuple_in_tuple="(10, ('dt', 10))" -q "SELECT * FROM $ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="number" --query "SELECT {col:Identifier} FROM {db:Identifier}.{tbl:Identifier} LIMIT 10" ``` +## Connection string {#connection_string} + +The connection string for clickhouse-client is presented in URI format: + +```text +clickhouse://[user_info@][hosts_and_ports][/dbname][?query_parameters] +``` + +where user_info is: ```user[:password]``` +and hosts_and_ports is a list of values: ```[host][:port],[host][:port]``` Port is not mandatory. +and query_parameters is a list of parameter[=value]: ```param_name[=value]¶m_name[=value]...``` value may not be required for some of parameters. + +Allowed query_parameters keys: + +- **secure** or shorthanded **s** - no value. If specified, client will connect to the server over a secure connection (TLS). See **secure** in [command-line-options](#command-line-options) + +These examples illustrate valid connection strings for clickhouse-client: + +```text +clickhouse: +clickhouse://localhost +clickhouse://localhost:9000 +clickhouse://localhost/default +clickhouse://default@localhost +clickhouse://user:password@localhost +clickhouse://user_name@localhost/some_database?secure +clickhouse://host1:9000,host2:5000/some_database +``` + +The host component can either be an IP address or a host name. Put an IPv6 address in square brackets to specify it: + +```text +clickhouse://[2001:db8::1234] +``` + +If user or/and password are not specified, default values will be used. +If host is not specified, the default host will be used (localhost). +If port is not specified, the default port will be used (9000). +If database is not specified, the default database will be used. + +User, password, and database can be specified in the connection string either in --user command line option. + +The connection string must be specified in the first argument of clickhouse-client. The connection string can be combined with other [command-line-options](#command-line-options) except **--host(h)** and **--port**. + +### Multiple hosts {#connection_string_multiple_hosts} + +URI allows multiple hosts to be connected to, and the client will try to connect to those hosts using the order from URI and command line options. The hosts and ports in the URI accept comma-separated lists of values. + +If more than one host is supplied, or if a single host name is translated to more than one address, each host and address will be attempted one at a time until one is successful. The remaining hosts after successful connection in the list are not tried. + +### Percent encoding {#connection_string_uri_percent_encoding} + +Hosts, user name, password, database and query parameters should be [Percent-Encoded](https://en.wikipedia.org/wiki/URL_encoding) if values contain URI invalid characters. + +### Examples {#connection_string_examples} + +Connect to localhost using port 9000 and executes the query "SELECT 1". + +``` bash +clickhouse-client "clickhouse://localhost:9000" --query "SELECT 1" +``` + +Connect to localhost using port 9000 in interactive, multiline mode. + +``` bash +clickhouse-client "clickhouse://localhost:9000" -m +``` + +Connect to localhost using port 9000 in interactive mode with the user specified in --user option. + +``` bash +clickhouse-client "clickhouse://localhost:9000" --user default +``` + +Connect to localhost using port 9000 in interactive mode with database 'my_database' specified in command line option + +``` bash +clickhouse-client "clickhouse://localhost:9000" --database my_database +``` + +Connect to localhost using port 9000 in interactive mode with the database specified in the connection string. + +``` bash +clickhouse-client "clickhouse://localhost:9000/my_database" +``` + +Connect to localhost using port 9000 in interactive mode with a database specified in the connection string and a secure connection using shorthanded 's' URI parameter. + +```bash +clickhouse-client "clickhouse://localhost/my_database?s" +``` + +Connect to default host using the default port, default user, and default database. + +``` bash +clickhouse-client "clickhouse:" +``` + +Connect to the default host using the default port, using user user_name and no password. + +``` bash +clickhouse-client "clickhouse://user_name@" +``` + +Connect to localhost using email user name. Symbol '@' is percent encoded to '%40'. + +``` bash +clickhouse-client "clickhouse://some_user%40some_mail.com@localhost:9000" +``` + ## Configuring {#interfaces_cli_configuration} You can pass parameters to `clickhouse-client` (all parameters have a default value) using: diff --git a/docs/ru/interfaces/cli.md b/docs/ru/interfaces/cli.md index 4c22eae0207..06642800cc6 100644 --- a/docs/ru/interfaces/cli.md +++ b/docs/ru/interfaces/cli.md @@ -110,6 +110,116 @@ $ clickhouse-client --param_tuple_in_tuple="(10, ('dt', 10))" -q "SELECT * FROM $ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="number" --query "SELECT {col:Identifier} FROM {db:Identifier}.{tbl:Identifier} LIMIT 10" ``` +## Строка подключения {#connection_string} + +Строка подключения для clickhouse-client представлена в формате URI: + +```text +clickhouse://[user_info@][hosts_and_ports][/dbname][?query_parameters] +``` + +где user_info - это: ```user[:password]``` +hosts_and_ports - это список значений: ```[host][:port],[host][:port]```. Port может быть не задан. +query_parameters - это список пар ключ[=значение]: ```param_name[=value]¶m_name[=value]...```. Значение может быть пустым + +Допустимые ключи query_parameters: + +- **secure** или сокращенно **s** - без значение. Если параметр указан, то соединение с сервером будет осуществляться по защищенному каналу (TLS). См. **secure** в [command-line-options](#command-line-options). + +Эти примеры иллюстрируют допустимые строки подключения для clickhouse-client: + +```text +clickhouse: +clickhouse://localhost +clickhouse://localhost:9000 +clickhouse://localhost/default +clickhouse://default@localhost +clickhouse://user:password@localhost +clickhouse://имя_пользователя@localhost/some_database?secure +clickhouse://host1:9000,host2:5000/some_database +``` + +Параметр host может быть либо IP-адресом, либо именем хоста. Для указания IPv6-адреса поместите его в квадратные скобки: + +```text +clickhouse://[2001:db8::1234] +``` + +Если пользователь или/и пароль не указаны, будут использоваться значения по умолчанию. +Если host не указан, будет использован хост по умолчанию (localhost). +Если port не указан, будет использоваться порт по умолчанию (9000). +Если база данных не указана, будет использоваться база данных по умолчанию (default). + +Пользователь, пароль и база данных могут быть указаны в строке подключения либо в опциях командной строки --user, --password, --database. + +Строка подключения должна быть указана в первом аргументе clickhouse-client. Строка подключения может комбинироваться с другими [параметрами командной строки] (#command-line-options) кроме **--host(h)** и **--port**. + +### Несколько хостов {#connection_string_multiple_hosts} + +URI позволяет подключаться к нескольким хостам, и клиент будет пытаться подключиться к этим хостам, используя порядок из URI и опций командной строки. Хосты и порты в URI принимают списки значений, разделенные запятыми. + +Если указано более одного хоста или если одно имя хоста транслируется в несколько адресов, Клиент будет будет пытаться подключится к каждому хосту и адресу в порядке в котором они встречаются в URI И опциях клиента, пока не будет установлено соединение. Соединение разрывается, если соединение установлено и аутентификация прошла успешно, остальные хосты в списке игнорируются. + +### Кодирование URI {#connection_string_uri_percent_encoding} + +Хосты, имя пользователя, пароль, имя базы данных, и параметры запроса должны быть [закодированы](https://ru.wikipedia.org/wiki/URL#%D0%9A%D0%BE%D0%B4%D0%B8%D1%80%D0%BE%D0%B2%D0%B0%D0%BD%D0%B8%D0%B5_URL), если значения содержат невалидные символы URI. + +### Примеры {#connection_string_examples} + +Подключиться к localhost через порт 9000 и выполнить запрос "SELECT 1" + +``` bash +clickhouse-client "clickhouse://localhost:9000" --query "SELECT 1" +``` + +Подключиться к localhost через порт 9000 в интерактивном, многострочном режиме. + +``` bash +clickhouse-client "clickhouse://localhost:9000" -m +``` + +Подключиться к localhost через порт 9000 в интерактивном режиме с пользователем default, указанным в опции --user. + +``` bash +clickhouse-client "clickhouse://localhost:9000" --user default +``` + +Подключиться к localhost, используя порт 9000 в интерактивном режиме с базой данных 'my_database', указанной в опции командной строки. + +``` bash +clickhouse-client "clickhouse://localhost:9000" --database my_database +``` + +Подключиться к localhost через порт 9000 в интерактивном режиме с базой данных my_database, указанной в строке подключения. + +``` bash +clickhouse-client "clickhouse://localhost:9000/my_database" +``` + +Подключиться к localhost через порт 9000 в интерактивном режиме с базой данных, указанной в строке подключения, и безопасным соединением с использованием сокращенного параметра URI 's'. + +``` bash +clickhouse-client "clickhouse://localhost/my_database?s" +``` + +Подключиться к хосту по умолчанию с использованием порта по умолчанию, пользователя по умолчанию, и базы данных по умолчанию. + +``` bash +clickhouse-client "clickhouse:" +``` + +Подключиться к хосту по умолчанию через порт по умолчанию, используя имя пользователя user_name без пароля. + +``` bash +clickhouse-client "clickhouse://user_name@" +``` + +Подключиться к localhost, используя электронную почту, как имя пользователя. Символ '@' закодирован как '%40'. + +``` bash +clickhouse-client "clickhouse://some_user%40some_mail.com@localhost:9000" +``` + ## Конфигурирование {#interfaces_cli_configuration} В `clickhouse-client` можно передавать различные параметры (все параметры имеют значения по умолчанию) с помощью: diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 32a07284d26..e513314387f 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -5,13 +5,13 @@ #include #include #include -#include #include #include #include #include #include #include "Client.h" +#include "Client/ConnectionString.h" #include "Core/Protocol.h" #include "Parsers/formatAST.h" @@ -1248,6 +1248,9 @@ void Client::readArguments( std::vector & external_tables_arguments, std::vector & hosts_and_ports_arguments) { + bool has_connection_string = argc >= 2 && tryParseConnectionString(std::string_view(argv[1]), common_arguments, hosts_and_ports_arguments); + int start_argument_index = has_connection_string ? 2 : 1; + /** We allow different groups of arguments: * - common arguments; * - arguments for any number of external tables each in form "--external args...", @@ -1260,7 +1263,7 @@ void Client::readArguments( std::string prev_host_arg; std::string prev_port_arg; - for (int arg_num = 1; arg_num < argc; ++arg_num) + for (int arg_num = start_argument_index; arg_num < argc; ++arg_num) { std::string_view arg = argv[arg_num]; @@ -1322,6 +1325,9 @@ void Client::readArguments( } else if (arg.starts_with("--host") || arg.starts_with("-h")) { + if (has_connection_string) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Mixing connection string and --host/--port client arguments is prohibited"); + std::string host_arg; /// --host host if (arg == "--host" || arg == "-h") @@ -1353,6 +1359,9 @@ void Client::readArguments( } else if (arg.starts_with("--port")) { + if (has_connection_string) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Mixing connection string and --host/--port client arguments is prohibited"); + auto port_arg = String{arg}; /// --port port if (arg == "--port") diff --git a/src/Client/ConnectionString.cpp b/src/Client/ConnectionString.cpp new file mode 100644 index 00000000000..a8b87726a65 --- /dev/null +++ b/src/Client/ConnectionString.cpp @@ -0,0 +1,219 @@ +#include "ConnectionString.h" + +#include +#include +#include + +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; +} + +} + +namespace +{ + +using namespace std::string_literals; +using namespace std::literals::string_view_literals; + +constexpr auto CONNECTION_URI_SCHEME = "clickhouse:"sv; + +void uriDecode(std::string & uri_encoded_string, bool plus_as_space) +{ + std::string temp; + Poco::URI::decode(uri_encoded_string, temp, plus_as_space); + std::swap(temp, uri_encoded_string); +} + +void getHostAndPort(const Poco::URI & uri, std::vector> & hosts_and_ports_arguments) +{ + auto host = uri.getHost(); + std::vector host_and_port; + if (!host.empty()) + { + uriDecode(host, false); + host_and_port.push_back("--host="s + host); + } + + // Port can be written without host (":9000"). Empty host name equals to default host. + auto port = uri.getPort(); + if (port != 0) + host_and_port.push_back("--port="s + std::to_string(port)); + + if (!host_and_port.empty()) + hosts_and_ports_arguments.push_back(std::move(host_and_port)); +} + +void getHostAndPort( + Poco::URI & uri, + std::vector> & hosts_and_ports_arguments, + const char * host_begin, + const char * host_end, + const char * right_part_start, + const char * connection_string_end) +{ + // User info does not matter in sub URI + std::string uri_string = {CONNECTION_URI_SCHEME.begin(), CONNECTION_URI_SCHEME.end()}; + if (host_begin != nullptr && host_begin != host_end) + { + uri_string.append("//"); + uri_string.append(host_begin, host_end); + } + + // Right part from string includes '/database?[params]' + uri_string.append(right_part_start, connection_string_end); + try + { + uri = Poco::URI(uri_string); + } + catch (const Poco::URISyntaxException & invalid_uri_exception) + { + throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, + "Invalid connection string syntax {}: {}", uri_string, invalid_uri_exception.what()); + } + + getHostAndPort(uri, hosts_and_ports_arguments); +} + +std::string makeArgument(const std::string & connection_string_parameter_name) +{ + return (connection_string_parameter_name.size() == 1 ? "-"s : "--"s) + connection_string_parameter_name; +} + +} + +namespace DB +{ + +bool tryParseConnectionString( + std::string_view connection_string, + std::vector & common_arguments, + std::vector> & hosts_and_ports_arguments) +{ + if (!connection_string.starts_with(CONNECTION_URI_SCHEME)) + return false; + + if (connection_string.size() == CONNECTION_URI_SCHEME.size()) + return true; + + auto offset = CONNECTION_URI_SCHEME.size(); + if ((connection_string.substr(offset).starts_with("//"))) + offset += 2; + + auto hosts_end_pos = std::string_view::npos; + auto hosts_or_user_info_end_pos = connection_string.find_first_of("?/@", offset); + + auto has_user_info = hosts_or_user_info_end_pos != std::string_view::npos && connection_string[hosts_or_user_info_end_pos] == '@'; + if (has_user_info) + { + // Move offset right after user info + offset = hosts_or_user_info_end_pos + 1; + hosts_end_pos = connection_string.find_first_of("?/@", offset); + // Several '@' symbols in connection string is prohibited. + // If user name contains '@' then it should be percent-encoded. + // several users: 'usr1@host1,@usr2@host2' is invalid. + if (hosts_end_pos != std::string_view::npos && connection_string[hosts_end_pos] == '@') + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Symbols '@' in URI in password or user name should be percent-encoded. Individual user names for different hosts also prohibited. {}", + connection_string); + } + } + else + hosts_end_pos = hosts_or_user_info_end_pos; + + auto hosts_end = hosts_end_pos != std::string_view::npos ? connection_string.begin() + hosts_end_pos + : connection_string.end(); + + try + { + // Poco::URI doesn't support several hosts in URI. + // Split string clickhouse:[user_info]host1:port1, ... , hostN:portN[database]?[query_parameters] + // into multiple string for each host: + // clickhouse:[user_info]host1:port1[database]?[query_parameters] + // ... + // clickhouse:[user_info]hostN:portN[database]?[query_parameters] + Poco::URI uri; + auto last_host_begin = connection_string.begin() + offset; + for (auto it = last_host_begin; it != hosts_end; ++it) + { + if (*it == ',') + { + getHostAndPort(uri, hosts_and_ports_arguments, last_host_begin, it, hosts_end, connection_string.end()); + last_host_begin = it + 1; + } + } + + if (uri.empty()) + { + // URI has no host specified + uri = std::string{connection_string.begin(), connection_string.end()}; + getHostAndPort(uri, hosts_and_ports_arguments); + } + else + getHostAndPort(uri, hosts_and_ports_arguments, last_host_begin, hosts_end, hosts_end, connection_string.end()); + + Poco::URI::QueryParameters params = uri.getQueryParameters(); + for (const auto & param : params) + { + if (param.first == "secure" || param.first == "s") + { + if (!param.second.empty()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "secure URI argument does not require value"); + + common_arguments.push_back(makeArgument(param.first)); + } + else + throw Exception(ErrorCodes::BAD_ARGUMENTS, "URI argument {} is unknown", param.first); + } + + auto user_info = uri.getUserInfo(); + if (!user_info.empty()) + { + // Poco::URI doesn't decode user name/password by default. + // But ClickHouse allows to have users with email user name like: 'john@some_mail.com' + // john@some_mail.com should be percent-encoded: 'john%40some_mail.com' + uriDecode(user_info, true); + std::string::size_type pos = user_info.find(':'); + if (pos != std::string::npos) + { + common_arguments.push_back("--user"); + common_arguments.push_back(user_info.substr(0, pos)); + + ++pos; // Skip ':' + common_arguments.push_back("--password"); + common_arguments.push_back(user_info.substr(pos)); + } + else + { + common_arguments.push_back("--user"); + common_arguments.push_back(user_info); + } + } + + const auto & database_name = uri.getPath(); + size_t start_symbol = database_name.size() > 0u && database_name[0] == '/' ? 1u : 0u; + if (database_name.size() > start_symbol) + { + common_arguments.push_back("--database"); + common_arguments.push_back(start_symbol == 0u ? database_name : database_name.substr(start_symbol)); + } + } + catch (const Poco::URISyntaxException & invalid_uri_exception) + { + throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, + "Invalid connection string {}: {}", connection_string, invalid_uri_exception.what()); + } + + return true; +} + +} diff --git a/src/Client/ConnectionString.h b/src/Client/ConnectionString.h new file mode 100644 index 00000000000..aafb1139b00 --- /dev/null +++ b/src/Client/ConnectionString.h @@ -0,0 +1,22 @@ +#pragma once + +#include +#include +#include + +namespace DB +{ +/** Tries to parse ClickHouse connection string. + * if @connection_string starts with 'clickhouse:' then connection string will be parsed + * and converted into a set of arguments for the client. + * Connection string format is similar to URI "clickhouse:[//[user_info@][hosts_and_ports]][/dbname][?query_parameters]" + * with the difference that hosts_and_ports can contain multiple hosts separated by ','. + * example: clickhouse://user@host1:port1,host2:port2 + * @return returns true if there is a URI, false otherwise. + * @exception throws DB::Exception if URI has valid scheme (clickhouse:), but invalid internals. +*/ +bool tryParseConnectionString( + std::string_view connection_string, + std::vector & common_arguments, + std::vector> & hosts_and_ports_arguments); +} diff --git a/tests/queries/0_stateless/02784_connection_string.reference b/tests/queries/0_stateless/02784_connection_string.reference new file mode 100644 index 00000000000..6a36abae8e0 --- /dev/null +++ b/tests/queries/0_stateless/02784_connection_string.reference @@ -0,0 +1,125 @@ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +500 +501 +502 +1000 +1001 +1002 +1003 +Bad arguments +Bad arguments +Bad arguments +Bad arguments +Bad arguments +Bad arguments +Bad arguments +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +BAD_ARGUMENTS +Authentication failed +Authentication failed diff --git a/tests/queries/0_stateless/02784_connection_string.sh b/tests/queries/0_stateless/02784_connection_string.sh new file mode 100755 index 00000000000..fce93fdad74 --- /dev/null +++ b/tests/queries/0_stateless/02784_connection_string.sh @@ -0,0 +1,156 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +USER_INFOS=('default' '') +HOSTS_PORTS=("$CLICKHOUSE_HOST:$CLICKHOUSE_PORT_TCP" "$CLICKHOUSE_HOST" "$CLICKHOUSE_HOST:" ":$CLICKHOUSE_PORT_TCP" "127.0.0.1" "127.0.0.1:$CLICKHOUSE_PORT_TCP" "$CLICKHOUSE_HOST:$CLICKHOUSE_PORT_TCP,invalid_host:9000" "[0000:0000:0000:0000:0000:0000:0000:0001]" "[::1]" "[::1]:$CLICKHOUSE_PORT_TCP" "" ) +DATABASES=("$CLICKHOUSE_DATABASE" "") + +TEST_INDEX=0 + +function runClient() +{ + $CLICKHOUSE_CLIENT_BINARY "$@" -q "SELECT $TEST_INDEX" --log_comment 02766_connection_string.sh --send_logs_level=warning + ((++TEST_INDEX)) +} + +function testConnectionString() +{ + if [ "$database" == "" ]; then + runClient "clickhouse:$1" + runClient "clickhouse:$1/" + else + runClient "clickhouse:$1/$database" + fi +} + +function testConnectionWithUserName() +{ +if [ "$user_info" == "" ] && [ "$host_port" == "" ]; then + testConnectionString "//" + testConnectionString "" + else + testConnectionString "//$user_info@$host_port" + fi +} + +for user_info in "${USER_INFOS[@]}" +do + for host_port in "${HOSTS_PORTS[@]}" + do + for database in "${DATABASES[@]}" + do + testConnectionWithUserName + done + done +done + +# Specific user and password +TEST_INDEX=500 +TEST_USER_NAME="test_user_02771_$$" +TEST_USER_EMAIL_NAME="test_user_02771_$$@some_mail.com" +TEST_USER_EMAIL_NAME_ENCODED="test_user_02771_$$%40some_mail.com" + +TEST_USER_PASSWORD="zyx%$&abc" +# %, $, & percent encoded +TEST_USER_PASSWORD_ENCODED="zyx%25%24%26abc" + +$CLICKHOUSE_CLIENT -q "CREATE USER '$TEST_USER_NAME'" +$CLICKHOUSE_CLIENT -q "CREATE USER '$TEST_USER_EMAIL_NAME' IDENTIFIED WITH plaintext_password BY '$TEST_USER_PASSWORD'" + +runClient "clickhouse://$TEST_USER_NAME@$CLICKHOUSE_HOST/$CLICKHOUSE_DATABASE" +runClient "clickhouse://$TEST_USER_EMAIL_NAME_ENCODED:$TEST_USER_PASSWORD_ENCODED@$CLICKHOUSE_HOST/$CLICKHOUSE_DATABASE" + +$CLICKHOUSE_CLIENT -q "DROP USER '$TEST_USER_NAME'" +$CLICKHOUSE_CLIENT -q "DROP USER '$TEST_USER_EMAIL_NAME'" + +# Percent-encoded database in non-ascii symbols +UTF8_DATABASE="БазаДанных_$$" +UTF8_DATABASE_PERCENT_ENCODED="%D0%91%D0%B0%D0%B7%D0%B0%D0%94%D0%B0%D0%BD%D0%BD%D1%8B%D1%85_$$" +$CLICKHOUSE_CLIENT -q "CREATE DATABASE IF NOT EXISTS \`$UTF8_DATABASE\`" +runClient "clickhouse://default@$CLICKHOUSE_HOST/$UTF8_DATABASE_PERCENT_ENCODED" +$CLICKHOUSE_CLIENT -q "DROP DATABASE IF EXISTS \`$UTF8_DATABASE\`" + +# clickhouse-client extra options cases +TEST_INDEX=1000 + +runClient "clickhouse://$CLICKHOUSE_HOST/" --user 'default' +runClient "clickhouse://$CLICKHOUSE_HOST/default" --user 'default' +runClient "clickhouse:" --database "$CLICKHOUSE_DATABASE" + +# User 'default' and default host +runClient "clickhouse://default@" + +# Invalid URI cases +TEST_INDEX=10000 +runClient "clickhouse://default:@$CLICKHOUSE_HOST/" --user 'default' 2>&1 | grep -o 'Bad arguments' +runClient "clickhouse://default:pswrd@$CLICKHOUSE_HOST/" --user 'default' 2>&1 | grep -o 'Bad arguments' +runClient "clickhouse://default:pswrd@$CLICKHOUSE_HOST/" --password 'pswrd' 2>&1 | grep -o 'Bad arguments' +runClient "clickhouse:///$CLICKHOUSE_DATABASE" --database "$CLICKHOUSE_DATABASE" 2>&1 | grep -o 'Bad arguments' +runClient "clickhouse://$CLICKHOUSE_HOST/$CLICKHOUSE_DATABASE" --database "$CLICKHOUSE_DATABASE" 2>&1 | grep -o 'Bad arguments' +runClient "clickhouse://$CLICKHOUSE_HOST/$CLICKHOUSE_DATABASE?s" --database "$CLICKHOUSE_DATABASE" 2>&1 | grep -o 'Bad arguments' +runClient "clickhouse:/$CLICKHOUSE_DATABASE?s" --database "$CLICKHOUSE_DATABASE" 2>&1 | grep -o 'Bad arguments' + +runClient "http://" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "click_house:" 2>&1 | grep -o 'BAD_ARGUMENTS' + +TEST_INDEX=1000087 +# Using connection string prohibits to use --host and --port options +runClient "clickhouse://default:@$CLICKHOUSE_HOST/" --host "$CLICKHOUSE_HOST" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse://default:@$CLICKHOUSE_HOST/" --host "$CLICKHOUSE_HOST" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse://default:@$CLICKHOUSE_HOST:$CLICKHOUSE_PORT_TCP/" --host "$CLICKHOUSE_HOST" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse://default:@$CLICKHOUSE_HOST:$CLICKHOUSE_PORT_TCP/" --port "$CLICKHOUSE_PORT_TCP" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse://default:@$CLICKHOUSE_HOST:$CLICKHOUSE_PORT_TCP/" --host "$CLICKHOUSE_HOST" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse://default:@$CLICKHOUSE_HOST:$CLICKHOUSE_PORT_TCP/" --host "$CLICKHOUSE_HOST" --host "$CLICKHOUSE_HOST" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse://default:@$CLICKHOUSE_HOST:$CLICKHOUSE_PORT_TCP/" --port "$CLICKHOUSE_PORT_TCP" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse://default:@$CLICKHOUSE_HOST/" --port "$CLICKHOUSE_PORT_TCP" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse://$CLICKHOUSE_HOST/" --port "$CLICKHOUSE_PORT_TCP" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse://:@$CLICKHOUSE_HOST/" --port "$CLICKHOUSE_PORT_TCP" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse://$CLICKHOUSE_HOST/" --port "$CLICKHOUSE_PORT_TCP" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse:" --port "$CLICKHOUSE_PORT_TCP" --host "$CLICKHOUSE_HOST" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse://" --port "$CLICKHOUSE_PORT_TCP" --host "$CLICKHOUSE_HOST" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse:///" --port "$CLICKHOUSE_PORT_TCP" --host "$CLICKHOUSE_HOST" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse:///?" --port "$CLICKHOUSE_PORT_TCP" --host "$CLICKHOUSE_HOST" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse://:/?" --port "$CLICKHOUSE_PORT_TCP" --host "$CLICKHOUSE_HOST" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse:" --database "$CLICKHOUSE_DATABASE" --port "$CLICKHOUSE_PORT_TCP" --host "$CLICKHOUSE_HOST" 2>&1 | grep -o 'BAD_ARGUMENTS' + +# Space is used in connection string (This is prohibited). +runClient " clickhouse:" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse: " 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse://host1 /" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse://host1, host2/" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse://host1 ,host2/" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse://host1 host2/" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse://host1/ database:" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse://user :password@host1" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse://user: password@host1" 2>&1 | grep -o 'BAD_ARGUMENTS' + +# Query is not first argument +runClient --multiline "clickhouse://default:@$CLICKHOUSE_HOST/" 2>&1 | grep -o 'BAD_ARGUMENTS' +# Query used as the first and the second argument of client +runClient "clickhouse://default:@$CLICKHOUSE_HOST/" "clickhouse://default:@$CLICKHOUSE_HOST/" 2>&1 | grep -o 'BAD_ARGUMENTS' + +# Invalid hosts +runClient "clickhouse://host1,,," 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse://," 2>&1 | grep -o 'BAD_ARGUMENTS' + +# Invalid parameters +runClient "clickhouse:?invalid_parameter" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse:?invalid_parameter&secure" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse:?s&invalid_parameter" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse:?s&invalid_parameter=val" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse:?invalid_parameter=arg" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse:?invalid_parameter=arg&s" 2>&1 | grep -o 'BAD_ARGUMENTS' +# Several users prohibited +runClient "clickhouse://user1@localhost,default@localhost/" 2>&1 | grep -o 'BAD_ARGUMENTS' +# Using '@' in user name is prohibited. User name should be percent-encoded. +runClient "clickhouse://my_mail@email.com@host/" 2>&1 | grep -o 'BAD_ARGUMENTS' + +# Wrong input cases +TEST_INDEX=100000 +# Invalid user name +runClient "clickhouse://non_exist_user@$CLICKHOUSE_HOST:$CLICKHOUSE_PORT_TCP/" 2>&1 | grep -o 'Authentication failed' +# Invalid password +runClient "clickhouse://default:invalid_password@$CLICKHOUSE_HOST:$CLICKHOUSE_PORT_TCP/" 2>&1 | grep -o 'Authentication failed' From 17754bf6941aa0754db2bb2de5c7098f890c2898 Mon Sep 17 00:00:00 2001 From: Alexey Gerasimchuck Date: Wed, 7 Jun 2023 05:59:13 +0000 Subject: [PATCH 227/722] minor changes in documentation --- docs/en/interfaces/cli.md | 4 ++-- docs/ru/interfaces/cli.md | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/en/interfaces/cli.md b/docs/en/interfaces/cli.md index 5255657ddfd..94f1fbf9e41 100644 --- a/docs/en/interfaces/cli.md +++ b/docs/en/interfaces/cli.md @@ -168,7 +168,7 @@ clickhouse://[user_info@][hosts_and_ports][/dbname][?query_parameters] where user_info is: ```user[:password]``` and hosts_and_ports is a list of values: ```[host][:port],[host][:port]``` Port is not mandatory. -and query_parameters is a list of parameter[=value]: ```param_name[=value]¶m_name[=value]...``` value may not be required for some of parameters. +and query_parameters is a list of parameter[=value]: ```param_name[=value]¶m_name[=value]...``` value may not be required for some of parameters. Parameter names are case sensitive. Allowed query_parameters keys: @@ -198,7 +198,7 @@ If host is not specified, the default host will be used (localhost). If port is not specified, the default port will be used (9000). If database is not specified, the default database will be used. -User, password, and database can be specified in the connection string either in --user command line option. +User, password, and database can be specified in the connection string either in --user, --password, --database command line options. The connection string must be specified in the first argument of clickhouse-client. The connection string can be combined with other [command-line-options](#command-line-options) except **--host(h)** and **--port**. diff --git a/docs/ru/interfaces/cli.md b/docs/ru/interfaces/cli.md index 06642800cc6..30cd9757ebb 100644 --- a/docs/ru/interfaces/cli.md +++ b/docs/ru/interfaces/cli.md @@ -120,7 +120,8 @@ clickhouse://[user_info@][hosts_and_ports][/dbname][?query_parameters] где user_info - это: ```user[:password]``` hosts_and_ports - это список значений: ```[host][:port],[host][:port]```. Port может быть не задан. -query_parameters - это список пар ключ[=значение]: ```param_name[=value]¶m_name[=value]...```. Значение может быть пустым +query_parameters - это список пар ключ[=значение]: ```param_name[=value]¶m_name[=value]...```. Значение может быть пустым. +Имена параметров чувствительны к регистру. Допустимые ключи query_parameters: From 4a0ccc25d21e9f333057a421bd8009d648df17ae Mon Sep 17 00:00:00 2001 From: Alexey Gerasimchuck Date: Wed, 7 Jun 2023 06:15:10 +0000 Subject: [PATCH 228/722] Minor improvement --- src/Client/ConnectionString.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Client/ConnectionString.cpp b/src/Client/ConnectionString.cpp index a8b87726a65..7d76deb6238 100644 --- a/src/Client/ConnectionString.cpp +++ b/src/Client/ConnectionString.cpp @@ -61,7 +61,7 @@ void getHostAndPort( const char * connection_string_end) { // User info does not matter in sub URI - std::string uri_string = {CONNECTION_URI_SCHEME.begin(), CONNECTION_URI_SCHEME.end()}; + auto uri_string = std::string(CONNECTION_URI_SCHEME); if (host_begin != nullptr && host_begin != host_end) { uri_string.append("//"); From aaa4d0367e9d51cd5308a7d5a02fd8333e9e7bb1 Mon Sep 17 00:00:00 2001 From: Alexey Gerasimchuck Date: Wed, 7 Jun 2023 06:29:14 +0000 Subject: [PATCH 229/722] Minor improvement for connection string --- src/Client/ConnectionString.cpp | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/src/Client/ConnectionString.cpp b/src/Client/ConnectionString.cpp index 7d76deb6238..aeb1c1dca02 100644 --- a/src/Client/ConnectionString.cpp +++ b/src/Client/ConnectionString.cpp @@ -55,21 +55,19 @@ void getHostAndPort(const Poco::URI & uri, std::vector> void getHostAndPort( Poco::URI & uri, std::vector> & hosts_and_ports_arguments, - const char * host_begin, - const char * host_end, - const char * right_part_start, - const char * connection_string_end) + std::string_view host_and_port, + std::string_view right_part) { // User info does not matter in sub URI auto uri_string = std::string(CONNECTION_URI_SCHEME); - if (host_begin != nullptr && host_begin != host_end) + if (!host_and_port.empty()) { uri_string.append("//"); - uri_string.append(host_begin, host_end); + uri_string.append(host_and_port); } // Right part from string includes '/database?[params]' - uri_string.append(right_part_start, connection_string_end); + uri_string.append(right_part); try { uri = Poco::URI(uri_string); @@ -147,7 +145,7 @@ bool tryParseConnectionString( { if (*it == ',') { - getHostAndPort(uri, hosts_and_ports_arguments, last_host_begin, it, hosts_end, connection_string.end()); + getHostAndPort(uri, hosts_and_ports_arguments, {last_host_begin, it}, {hosts_end, connection_string.end()}); last_host_begin = it + 1; } } @@ -159,7 +157,7 @@ bool tryParseConnectionString( getHostAndPort(uri, hosts_and_ports_arguments); } else - getHostAndPort(uri, hosts_and_ports_arguments, last_host_begin, hosts_end, hosts_end, connection_string.end()); + getHostAndPort(uri, hosts_and_ports_arguments, {last_host_begin, hosts_end}, {hosts_end, connection_string.end()}); Poco::URI::QueryParameters params = uri.getQueryParameters(); for (const auto & param : params) From db1c03d6db270a4a6b059d4b7f09c5d264f13081 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 2 Jun 2023 15:54:39 +0000 Subject: [PATCH 230/722] Cleanup moving parts --- src/Storages/MergeTree/MergeTreeData.cpp | 26 +++++++++----- src/Storages/MergeTree/MergeTreeData.h | 1 + .../MergeTree/MergeTreePartsMover.cpp | 36 ++++++++++++++----- src/Storages/MergeTree/MergeTreePartsMover.h | 10 +++++- 4 files changed, 55 insertions(+), 18 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index e806e1bb93f..047f063cb7c 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1998,8 +1998,18 @@ static bool isOldPartDirectory(const DiskPtr & disk, const String & directory_pa return true; } - size_t MergeTreeData::clearOldTemporaryDirectories(size_t custom_directories_lifetime_seconds, const NameSet & valid_prefixes) +{ + size_t cleared_count = 0; + + cleared_count += clearOldTemporaryDirectories(relative_data_path, custom_directories_lifetime_seconds, valid_prefixes); + + /// Clear _all_ parts from the `moving` directory + cleared_count += clearOldTemporaryDirectories(fs::path(relative_data_path) / "moving", custom_directories_lifetime_seconds, {""}); + return cleared_count; +} + +size_t MergeTreeData::clearOldTemporaryDirectories(const String & root_path, size_t custom_directories_lifetime_seconds, const NameSet & valid_prefixes) { /// If the method is already called from another thread, then we don't need to do anything. std::unique_lock lock(clear_old_temporary_directories_mutex, std::defer_lock); @@ -2018,7 +2028,7 @@ size_t MergeTreeData::clearOldTemporaryDirectories(size_t custom_directories_lif if (disk->isBroken()) continue; - for (auto it = disk->iterateDirectory(relative_data_path); it->isValid(); it->next()) + for (auto it = disk->iterateDirectory(root_path); it->isValid(); it->next()) { const std::string & basename = it->name(); bool start_with_valid_prefix = false; @@ -7802,7 +7812,7 @@ MovePartsOutcome MergeTreeData::moveParts(const CurrentlyMovingPartsTaggerPtr & for (const auto & moving_part : moving_tagger->parts_to_move) { Stopwatch stopwatch; - MutableDataPartPtr cloned_part; + MergeTreePartsMover::TemporaryClonedPart cloned_part; ProfileEventsScope profile_events_scope; auto write_part_log = [&](const ExecutionStatus & execution_status) @@ -7812,7 +7822,7 @@ MovePartsOutcome MergeTreeData::moveParts(const CurrentlyMovingPartsTaggerPtr & execution_status, stopwatch.elapsed(), moving_part.part->name, - cloned_part, + cloned_part.part, {moving_part.part}, nullptr, profile_events_scope.getSnapshot()); @@ -7854,7 +7864,7 @@ MovePartsOutcome MergeTreeData::moveParts(const CurrentlyMovingPartsTaggerPtr & if (lock->isLocked()) { cloned_part = parts_mover.clonePart(moving_part); - parts_mover.swapClonedPart(cloned_part); + parts_mover.swapClonedPart(cloned_part.part); break; } else if (wait_for_move_if_zero_copy) @@ -7881,15 +7891,15 @@ MovePartsOutcome MergeTreeData::moveParts(const CurrentlyMovingPartsTaggerPtr & else /// Ordinary move as it should be { cloned_part = parts_mover.clonePart(moving_part); - parts_mover.swapClonedPart(cloned_part); + parts_mover.swapClonedPart(cloned_part.part); } write_part_log({}); } catch (...) { write_part_log(ExecutionStatus::fromCurrentException("", true)); - if (cloned_part) - cloned_part->remove(); + if (cloned_part.part) + cloned_part.part->remove(); throw; } diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 2f254f9a787..444bd8f47ac 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -676,6 +676,7 @@ public: /// Delete all directories which names begin with "tmp" /// Must be called with locked lockForShare() because it's using relative_data_path. size_t clearOldTemporaryDirectories(size_t custom_directories_lifetime_seconds, const NameSet & valid_prefixes = {"tmp_", "tmp-fetch_"}); + size_t clearOldTemporaryDirectories(const String & root_path, size_t custom_directories_lifetime_seconds, const NameSet & valid_prefixes); size_t clearEmptyParts(); diff --git a/src/Storages/MergeTree/MergeTreePartsMover.cpp b/src/Storages/MergeTree/MergeTreePartsMover.cpp index e1da57744b3..08815fa1f0c 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.cpp +++ b/src/Storages/MergeTree/MergeTreePartsMover.cpp @@ -11,6 +11,7 @@ namespace DB namespace ErrorCodes { extern const int ABORTED; + extern const int DIRECTORY_ALREADY_EXISTS; } namespace @@ -203,7 +204,7 @@ bool MergeTreePartsMover::selectPartsForMove( return false; } -MergeTreeMutableDataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEntry & moving_part) const +MergeTreePartsMover::TemporaryClonedPart MergeTreePartsMover::clonePart(const MergeTreeMoveEntry & moving_part) const { if (moves_blocker.isCancelled()) throw Exception(ErrorCodes::ABORTED, "Cancelled moving parts."); @@ -222,8 +223,10 @@ MergeTreeMutableDataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEn String relative_path = part->getDataPartStorage().getPartDirectory(); if (disk->exists(path_to_clone + relative_path)) { - LOG_WARNING(log, "Path {} already exists. Will remove it and clone again.", fullPath(disk, path_to_clone + relative_path)); - disk->removeRecursive(fs::path(path_to_clone) / relative_path / ""); + throw Exception(ErrorCodes::DIRECTORY_ALREADY_EXISTS, + "Cannot clone part {} from '{}' to '{}': path '{}' already exists", + part->name, part->getDataPartStorage().getDiskName(), disk->getName(), + fullPath(disk, path_to_clone + relative_path)); } disk->createDirectories(path_to_clone); @@ -240,14 +243,22 @@ MergeTreeMutableDataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEn { cloned_part_storage = part->makeCloneOnDisk(disk, MergeTreeData::MOVING_DIR_NAME); } + String data_part_directory = cloned_part_storage->getFullPath(); + + TemporaryClonedPart cloned_part; + cloned_part.temporary_directory_lock = data->getTemporaryPartDirectoryHolder(data_part_directory); MergeTreeDataPartBuilder builder(*data, part->name, cloned_part_storage); - auto cloned_part = std::move(builder).withPartFormatFromDisk().build(); - LOG_TRACE(log, "Part {} was cloned to {}", part->name, cloned_part->getDataPartStorage().getFullPath()); + cloned_part.part = std::move(builder).withPartFormatFromDisk().build(); - cloned_part->loadColumnsChecksumsIndexes(true, true); - cloned_part->loadVersionMetadata(); - cloned_part->modification_time = cloned_part->getDataPartStorage().getLastModified().epochTime(); + String part_directory = cloned_part.part->getDataPartStorage().getFullPath(); + + LOG_TRACE(log, "Part {} was cloned to {}", part->name, data_part_directory); + + cloned_part.part->loadColumnsChecksumsIndexes(true, true); + cloned_part.part->loadVersionMetadata(); + cloned_part.part->modification_time = cloned_part.part->getDataPartStorage().getLastModified().epochTime(); + cloned_part.part->is_temp = true; return cloned_part; } @@ -262,10 +273,17 @@ void MergeTreePartsMover::swapClonedPart(const MergeTreeMutableDataPartPtr & clo /// It's ok, because we don't block moving parts for merges or mutations if (!active_part || active_part->name != cloned_part->name) { - LOG_INFO(log, "Failed to swap {}. Active part doesn't exist. Possible it was merged or mutated. Will remove copy on path '{}'.", cloned_part->name, cloned_part->getDataPartStorage().getFullPath()); + LOG_INFO(log, + "Failed to swap {}. Active part doesn't exist (containing part {}). " + "Possible it was merged or mutated. Will remove copy on path '{}'", + cloned_part->name, + active_part ? active_part->name : "doesn't exist", + cloned_part->getDataPartStorage().getFullPath()); return; } + cloned_part->is_temp = false; + /// Don't remove new directory but throw an error because it may contain part which is currently in use. cloned_part->renameTo(active_part->name, false); diff --git a/src/Storages/MergeTree/MergeTreePartsMover.h b/src/Storages/MergeTree/MergeTreePartsMover.h index 1cee98bcba9..dde2ff1a630 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.h +++ b/src/Storages/MergeTree/MergeTreePartsMover.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -43,12 +44,19 @@ private: using AllowedMovingPredicate = std::function &, String * reason)>; public: + explicit MergeTreePartsMover(MergeTreeData * data_) : data(data_) , log(&Poco::Logger::get("MergeTreePartsMover")) { } + struct TemporaryClonedPart + { + MergeTreeMutableDataPartPtr part; + scope_guard temporary_directory_lock; + }; + /// Select parts for background moves according to storage_policy configuration. /// Returns true if at least one part was selected for move. bool selectPartsForMove( @@ -57,7 +65,7 @@ public: const std::lock_guard & moving_parts_lock); /// Copies part to selected reservation in detached folder. Throws exception if part already exists. - MergeTreeMutableDataPartPtr clonePart(const MergeTreeMoveEntry & moving_part) const; + TemporaryClonedPart clonePart(const MergeTreeMoveEntry & moving_part) const; /// Replaces cloned part from detached directory into active data parts set. /// Replacing part changes state to DeleteOnDestroy and will be removed from disk after destructor of From 19bb802b04cb9c31585b24f6ebe4765f0dc46c87 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 2 Jun 2023 17:41:16 +0000 Subject: [PATCH 231/722] Set temporary_directories_lifetime to integration tests with MOVE --- .../test_consistant_parts_after_move_partition/test.py | 2 +- tests/integration/test_encrypted_disk/test.py | 2 +- tests/integration/test_merge_tree_azure_blob_storage/test.py | 1 + tests/integration/test_merge_tree_hdfs/test.py | 3 ++- tests/integration/test_merge_tree_s3/test.py | 1 + .../test_move_partition_to_disk_on_cluster/test.py | 2 +- .../configs/config.d/storage_configuration.xml | 4 ++++ .../configs/config.d/storage_configuration.xml | 3 ++- .../test_replicated_merge_tree_hdfs_zero_copy/test.py | 2 +- tests/integration/test_s3_zero_copy_replication/test.py | 2 +- tests/integration/test_ttl_move/test.py | 2 +- tests/integration/test_zero_copy_fetch/test.py | 2 +- 12 files changed, 17 insertions(+), 9 deletions(-) diff --git a/tests/integration/test_consistant_parts_after_move_partition/test.py b/tests/integration/test_consistant_parts_after_move_partition/test.py index 63a51472773..0b19e194e0e 100644 --- a/tests/integration/test_consistant_parts_after_move_partition/test.py +++ b/tests/integration/test_consistant_parts_after_move_partition/test.py @@ -18,7 +18,7 @@ def initialize_database(nodes, shard): CREATE TABLE `{database}`.dest (p UInt64, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/{database}/tables/test_consistent_shard2{shard}/replicated', '{replica}') ORDER BY d PARTITION BY p - SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0; + SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0, temporary_directories_lifetime=1; """.format( shard=shard, replica=node.name, database=CLICKHOUSE_DATABASE ) diff --git a/tests/integration/test_encrypted_disk/test.py b/tests/integration/test_encrypted_disk/test.py index 9f5415f4bea..fbf2b59785b 100644 --- a/tests/integration/test_encrypted_disk/test.py +++ b/tests/integration/test_encrypted_disk/test.py @@ -96,7 +96,7 @@ def test_part_move(policy, destination_disks): data String ) ENGINE=MergeTree() ORDER BY id - SETTINGS storage_policy='{}' + SETTINGS storage_policy='{}', temporary_directories_lifetime=1 """.format( policy ) diff --git a/tests/integration/test_merge_tree_azure_blob_storage/test.py b/tests/integration/test_merge_tree_azure_blob_storage/test.py index 8bf4df17c39..761b5257a34 100644 --- a/tests/integration/test_merge_tree_azure_blob_storage/test.py +++ b/tests/integration/test_merge_tree_azure_blob_storage/test.py @@ -66,6 +66,7 @@ def create_table(node, table_name, **additional_settings): "storage_policy": "blob_storage_policy", "old_parts_lifetime": 1, "index_granularity": 512, + "temporary_directories_lifetime": 1, } settings.update(additional_settings) diff --git a/tests/integration/test_merge_tree_hdfs/test.py b/tests/integration/test_merge_tree_hdfs/test.py index c79986c34f0..d1a145c00c1 100644 --- a/tests/integration/test_merge_tree_hdfs/test.py +++ b/tests/integration/test_merge_tree_hdfs/test.py @@ -29,7 +29,8 @@ def create_table(cluster, table_name, additional_settings=None): SETTINGS storage_policy='hdfs', old_parts_lifetime=0, - index_granularity=512 + index_granularity=512, + temporary_directories_lifetime=1 """.format( table_name ) diff --git a/tests/integration/test_merge_tree_s3/test.py b/tests/integration/test_merge_tree_s3/test.py index 2ccd517923a..3ab31f4728b 100644 --- a/tests/integration/test_merge_tree_s3/test.py +++ b/tests/integration/test_merge_tree_s3/test.py @@ -75,6 +75,7 @@ def create_table(node, table_name, **additional_settings): "storage_policy": "s3", "old_parts_lifetime": 0, "index_granularity": 512, + "temporary_directories_lifetime": 1, } settings.update(additional_settings) diff --git a/tests/integration/test_move_partition_to_disk_on_cluster/test.py b/tests/integration/test_move_partition_to_disk_on_cluster/test.py index 90753fc8ce3..c639e080cdf 100644 --- a/tests/integration/test_move_partition_to_disk_on_cluster/test.py +++ b/tests/integration/test_move_partition_to_disk_on_cluster/test.py @@ -46,7 +46,7 @@ def test_move_partition_to_disk_on_cluster(start_cluster): "(x UInt64) " "ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_local_table', '{replica}') " "ORDER BY tuple()" - "SETTINGS storage_policy = 'jbod_with_external';", + "SETTINGS storage_policy = 'jbod_with_external', temporary_directories_lifetime=1;", ) node1.query("INSERT INTO test_local_table VALUES (0)") diff --git a/tests/integration/test_multiple_disks/configs/config.d/storage_configuration.xml b/tests/integration/test_multiple_disks/configs/config.d/storage_configuration.xml index ef40bfb0a0e..e7a87fb77b1 100644 --- a/tests/integration/test_multiple_disks/configs/config.d/storage_configuration.xml +++ b/tests/integration/test_multiple_disks/configs/config.d/storage_configuration.xml @@ -123,4 +123,8 @@ + + 1 + + diff --git a/tests/integration/test_rename_column/configs/config.d/storage_configuration.xml b/tests/integration/test_rename_column/configs/config.d/storage_configuration.xml index da297e40037..12a598c64b5 100644 --- a/tests/integration/test_rename_column/configs/config.d/storage_configuration.xml +++ b/tests/integration/test_rename_column/configs/config.d/storage_configuration.xml @@ -24,9 +24,10 @@ - + 0 + 1 diff --git a/tests/integration/test_replicated_merge_tree_hdfs_zero_copy/test.py b/tests/integration/test_replicated_merge_tree_hdfs_zero_copy/test.py index bd1c890950a..eb3d62eb718 100644 --- a/tests/integration/test_replicated_merge_tree_hdfs_zero_copy/test.py +++ b/tests/integration/test_replicated_merge_tree_hdfs_zero_copy/test.py @@ -128,7 +128,7 @@ def test_hdfs_zero_copy_replication_single_move(cluster, storage_policy, init_ob CREATE TABLE single_node_move_test (dt DateTime, id Int64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/single_node_move_test', '{replica}') ORDER BY (dt, id) - SETTINGS storage_policy='$policy' + SETTINGS storage_policy='$policy',temporary_directories_lifetime=1 """ ).substitute(policy=storage_policy) ) diff --git a/tests/integration/test_s3_zero_copy_replication/test.py b/tests/integration/test_s3_zero_copy_replication/test.py index 100f062de2f..bc13c127610 100644 --- a/tests/integration/test_s3_zero_copy_replication/test.py +++ b/tests/integration/test_s3_zero_copy_replication/test.py @@ -163,7 +163,7 @@ def test_s3_zero_copy_on_hybrid_storage(started_cluster): CREATE TABLE hybrid_test ON CLUSTER test_cluster (id UInt32, value String) ENGINE=ReplicatedMergeTree('/clickhouse/tables/hybrid_test', '{}') ORDER BY id - SETTINGS storage_policy='hybrid' + SETTINGS storage_policy='hybrid',temporary_directories_lifetime=1 """.format( "{replica}" ) diff --git a/tests/integration/test_ttl_move/test.py b/tests/integration/test_ttl_move/test.py index 7635d784fef..a2f28e21666 100644 --- a/tests/integration/test_ttl_move/test.py +++ b/tests/integration/test_ttl_move/test.py @@ -1549,7 +1549,7 @@ def test_double_move_while_select(started_cluster, name, positive): ) ENGINE = MergeTree ORDER BY tuple() PARTITION BY n - SETTINGS storage_policy='small_jbod_with_external' + SETTINGS storage_policy='small_jbod_with_external',temporary_directories_lifetime=1 """.format( name=name ) diff --git a/tests/integration/test_zero_copy_fetch/test.py b/tests/integration/test_zero_copy_fetch/test.py index 9b9aa5e0da7..4f3d42096c3 100644 --- a/tests/integration/test_zero_copy_fetch/test.py +++ b/tests/integration/test_zero_copy_fetch/test.py @@ -45,7 +45,7 @@ CREATE TABLE test1 (EventDate Date, CounterID UInt32) ENGINE = ReplicatedMergeTree('/clickhouse-tables/test1', 'r1') PARTITION BY toMonday(EventDate) ORDER BY (CounterID, EventDate) -SETTINGS index_granularity = 8192, storage_policy = 's3'""" +SETTINGS index_granularity = 8192, storage_policy = 's3', temporary_directories_lifetime=1""" ) node1.query( From 09fecace434aaeb1c54049f94a855a2843766145 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 2 Jun 2023 17:43:08 +0000 Subject: [PATCH 232/722] upd --- src/Storages/MergeTree/MergeTreeData.cpp | 3 --- src/Storages/MergeTree/MergeTreePartsMover.cpp | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 047f063cb7c..0e542aa3407 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -7898,9 +7898,6 @@ MovePartsOutcome MergeTreeData::moveParts(const CurrentlyMovingPartsTaggerPtr & catch (...) { write_part_log(ExecutionStatus::fromCurrentException("", true)); - if (cloned_part.part) - cloned_part.part->remove(); - throw; } } diff --git a/src/Storages/MergeTree/MergeTreePartsMover.cpp b/src/Storages/MergeTree/MergeTreePartsMover.cpp index 08815fa1f0c..2c3b3d1a621 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.cpp +++ b/src/Storages/MergeTree/MergeTreePartsMover.cpp @@ -255,10 +255,10 @@ MergeTreePartsMover::TemporaryClonedPart MergeTreePartsMover::clonePart(const Me LOG_TRACE(log, "Part {} was cloned to {}", part->name, data_part_directory); + cloned_part.part->is_temp = true; cloned_part.part->loadColumnsChecksumsIndexes(true, true); cloned_part.part->loadVersionMetadata(); cloned_part.part->modification_time = cloned_part.part->getDataPartStorage().getLastModified().epochTime(); - cloned_part.part->is_temp = true; return cloned_part; } From 80f918d4b77cbad22aeb0371ac2f7881fe603550 Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 5 Jun 2023 18:22:41 +0000 Subject: [PATCH 233/722] Fixes for cleanup moving parts --- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 13 ++++++-- src/Storages/MergeTree/MergeTreeData.cpp | 4 +-- .../MergeTree/MergeTreePartsMover.cpp | 31 +++++++++---------- src/Storages/MergeTree/MergeTreePartsMover.h | 2 +- 4 files changed, 28 insertions(+), 22 deletions(-) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index d27b03fff44..9084b5790af 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -492,13 +492,17 @@ void IMergeTreeDataPart::removeIfNeeded() if (is_temp) { - String file_name = fileName(getDataPartStorage().getPartDirectory()); + const auto & part_directory = getDataPartStorage().getPartDirectory(); + + String file_name = fileName(part_directory); if (file_name.empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "relative_path {} of part {} is invalid or not set", getDataPartStorage().getPartDirectory(), name); - if (!startsWith(file_name, "tmp") && !endsWith(file_name, ".tmp_proj")) + const auto part_parent_directory = directoryPath(part_directory); + bool is_moving_part = part_parent_directory.ends_with("moving/"); + if (!startsWith(file_name, "tmp") && !endsWith(file_name, ".tmp_proj") && !is_moving_part) { LOG_ERROR( storage.log, @@ -507,6 +511,11 @@ void IMergeTreeDataPart::removeIfNeeded() path); return; } + + if (is_moving_part) + { + LOG_TRACE(storage.log, "Removing unneeded moved part from {}", path); + } } remove(); diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 0e542aa3407..7fe3efaf6d5 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -7864,7 +7864,7 @@ MovePartsOutcome MergeTreeData::moveParts(const CurrentlyMovingPartsTaggerPtr & if (lock->isLocked()) { cloned_part = parts_mover.clonePart(moving_part); - parts_mover.swapClonedPart(cloned_part.part); + parts_mover.swapClonedPart(cloned_part); break; } else if (wait_for_move_if_zero_copy) @@ -7891,7 +7891,7 @@ MovePartsOutcome MergeTreeData::moveParts(const CurrentlyMovingPartsTaggerPtr & else /// Ordinary move as it should be { cloned_part = parts_mover.clonePart(moving_part); - parts_mover.swapClonedPart(cloned_part.part); + parts_mover.swapClonedPart(cloned_part); } write_part_log({}); } diff --git a/src/Storages/MergeTree/MergeTreePartsMover.cpp b/src/Storages/MergeTree/MergeTreePartsMover.cpp index 2c3b3d1a621..656167de986 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.cpp +++ b/src/Storages/MergeTree/MergeTreePartsMover.cpp @@ -213,6 +213,8 @@ MergeTreePartsMover::TemporaryClonedPart MergeTreePartsMover::clonePart(const Me auto part = moving_part.part; auto disk = moving_part.reserved_space->getDisk(); LOG_DEBUG(log, "Cloning part {} from '{}' to '{}'", part->name, part->getDataPartStorage().getDiskName(), disk->getName()); + TemporaryClonedPart cloned_part; + cloned_part.temporary_directory_lock = data->getTemporaryPartDirectoryHolder(part->name); MutableDataPartStoragePtr cloned_part_storage; if (disk->supportZeroCopyReplication() && settings->allow_remote_fs_zero_copy_replication) @@ -243,17 +245,10 @@ MergeTreePartsMover::TemporaryClonedPart MergeTreePartsMover::clonePart(const Me { cloned_part_storage = part->makeCloneOnDisk(disk, MergeTreeData::MOVING_DIR_NAME); } - String data_part_directory = cloned_part_storage->getFullPath(); - - TemporaryClonedPart cloned_part; - cloned_part.temporary_directory_lock = data->getTemporaryPartDirectoryHolder(data_part_directory); MergeTreeDataPartBuilder builder(*data, part->name, cloned_part_storage); cloned_part.part = std::move(builder).withPartFormatFromDisk().build(); - - String part_directory = cloned_part.part->getDataPartStorage().getFullPath(); - - LOG_TRACE(log, "Part {} was cloned to {}", part->name, data_part_directory); + LOG_TRACE(log, "Part {} was cloned to {}", part->name, cloned_part.part->getDataPartStorage().getFullPath()); cloned_part.part->is_temp = true; cloned_part.part->loadColumnsChecksumsIndexes(true, true); @@ -263,34 +258,36 @@ MergeTreePartsMover::TemporaryClonedPart MergeTreePartsMover::clonePart(const Me } -void MergeTreePartsMover::swapClonedPart(const MergeTreeMutableDataPartPtr & cloned_part) const +void MergeTreePartsMover::swapClonedPart(TemporaryClonedPart & cloned_part) const { if (moves_blocker.isCancelled()) throw Exception(ErrorCodes::ABORTED, "Cancelled moving parts."); - auto active_part = data->getActiveContainingPart(cloned_part->name); + auto active_part = data->getActiveContainingPart(cloned_part.part->name); /// It's ok, because we don't block moving parts for merges or mutations - if (!active_part || active_part->name != cloned_part->name) + if (!active_part || active_part->name != cloned_part.part->name) { LOG_INFO(log, "Failed to swap {}. Active part doesn't exist (containing part {}). " "Possible it was merged or mutated. Will remove copy on path '{}'", - cloned_part->name, + cloned_part.part->name, active_part ? active_part->name : "doesn't exist", - cloned_part->getDataPartStorage().getFullPath()); + cloned_part.part->getDataPartStorage().getFullPath()); return; } - cloned_part->is_temp = false; + cloned_part.part->is_temp = false; /// Don't remove new directory but throw an error because it may contain part which is currently in use. - cloned_part->renameTo(active_part->name, false); + cloned_part.part->renameTo(active_part->name, false); /// TODO what happen if server goes down here? - data->swapActivePart(cloned_part); + data->swapActivePart(cloned_part.part); - LOG_TRACE(log, "Part {} was moved to {}", cloned_part->name, cloned_part->getDataPartStorage().getFullPath()); + LOG_TRACE(log, "Part {} was moved to {}", cloned_part.part->name, cloned_part.part->getDataPartStorage().getFullPath()); + + cloned_part.temporary_directory_lock = {}; } } diff --git a/src/Storages/MergeTree/MergeTreePartsMover.h b/src/Storages/MergeTree/MergeTreePartsMover.h index dde2ff1a630..82fd271ee5f 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.h +++ b/src/Storages/MergeTree/MergeTreePartsMover.h @@ -72,7 +72,7 @@ public: /// IMergeTreeDataPart called. If replacing part doesn't exists or not active (committed) than /// cloned part will be removed and log message will be reported. It may happen in case of concurrent /// merge or mutation. - void swapClonedPart(const MergeTreeMutableDataPartPtr & cloned_parts) const; + void swapClonedPart(TemporaryClonedPart & cloned_part) const; /// Can stop background moves and moves from queries ActionBlocker moves_blocker; From b410a4d44ce6f1ac1048efb565f87bae0e97c183 Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 5 Jun 2023 18:23:24 +0000 Subject: [PATCH 234/722] Add test test_alter_moving_garbage --- .../test_alter_moving_garbage/__init__.py | 0 .../configs/config.d/storage_conf.xml | 26 ++++++ .../configs/config.xml | 7 ++ .../test_alter_moving_garbage/test.py | 90 +++++++++++++++++++ 4 files changed, 123 insertions(+) create mode 100644 tests/integration/test_alter_moving_garbage/__init__.py create mode 100644 tests/integration/test_alter_moving_garbage/configs/config.d/storage_conf.xml create mode 100644 tests/integration/test_alter_moving_garbage/configs/config.xml create mode 100644 tests/integration/test_alter_moving_garbage/test.py diff --git a/tests/integration/test_alter_moving_garbage/__init__.py b/tests/integration/test_alter_moving_garbage/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_alter_moving_garbage/configs/config.d/storage_conf.xml b/tests/integration/test_alter_moving_garbage/configs/config.d/storage_conf.xml new file mode 100644 index 00000000000..659f59a41b2 --- /dev/null +++ b/tests/integration/test_alter_moving_garbage/configs/config.d/storage_conf.xml @@ -0,0 +1,26 @@ + + + + + + + s3 + http://minio1:9001/root/data/ + minio + minio123 + + + + + + + default + + + s3 + + + + + + diff --git a/tests/integration/test_alter_moving_garbage/configs/config.xml b/tests/integration/test_alter_moving_garbage/configs/config.xml new file mode 100644 index 00000000000..f4be5ab6b7c --- /dev/null +++ b/tests/integration/test_alter_moving_garbage/configs/config.xml @@ -0,0 +1,7 @@ + + 9000 + 127.0.0.1 + 500 + ./clickhouse/ + users.xml + diff --git a/tests/integration/test_alter_moving_garbage/test.py b/tests/integration/test_alter_moving_garbage/test.py new file mode 100644 index 00000000000..b369c9ad377 --- /dev/null +++ b/tests/integration/test_alter_moving_garbage/test.py @@ -0,0 +1,90 @@ +import logging +import time + +import pytest +import threading + +from helpers.client import QueryRuntimeException +from helpers.cluster import ClickHouseCluster + + +@pytest.fixture(scope="module") +def cluster(): + try: + cluster = ClickHouseCluster(__file__) + cluster.add_instance( + "node1", + main_configs=[ + "configs/config.d/storage_conf.xml", + ], + with_minio=True, + ) + logging.info("Starting cluster...") + cluster.start() + logging.info("Cluster started") + + yield cluster + finally: + cluster.shutdown() + + +def create_table(node, table_name, **additional_settings): + settings = { + "storage_policy": "two_disks", + "old_parts_lifetime": 1, + "index_granularity": 512, + "temporary_directories_lifetime": 0, + "merge_tree_clear_old_temporary_directories_interval_seconds": 1, + } + settings.update(additional_settings) + + create_table_statement = f""" + CREATE TABLE {table_name} ( + dt Date, + id Int64, + data String, + INDEX min_max (id) TYPE minmax GRANULARITY 3 + ) ENGINE=MergeTree() + PARTITION BY dt + ORDER BY (dt, id) + SETTINGS {",".join((k+"="+repr(v) for k, v in settings.items()))}""" + + node.query(create_table_statement) + + +def test_create_table(cluster): + node = cluster.instances["node1"] + create_table(node, "test_table") + node.query( + "INSERT INTO test_table SELECT toDate('2021-01-01') + INTERVAL number % 10 DAY, number, toString(sipHash64(number)) FROM numbers(100_000)" + ) + + stop_alter = False + + def alter(): + d = 0 + node.query(f"ALTER TABLE test_table ADD COLUMN col0 String") + while not stop_alter: + d = d + 1 + node.query(f"DELETE FROM test_table WHERE id < {d}") + time.sleep(0.1) + + alter_thread = threading.Thread(target=alter) + alter_thread.start() + + for i in range(1, 10): + partition = f"2021-01-{i:02d}" + try: + node.query( + f"ALTER TABLE test_table MOVE PARTITION '{partition}' TO DISK 's3'", + ) + except QueryRuntimeException as e: + # PART_IS_TEMPORARILY_LOCKED + assert 384 == e.returncode + continue + + # clear old temporary directories wakes up every 1 second + time.sleep(0.5) + + stop_alter = True + alter_thread.join() From ea1aa4bd9e312a36b578fff3ec3573ff0844d9d5 Mon Sep 17 00:00:00 2001 From: Han Fei Date: Wed, 7 Jun 2023 16:02:16 +0200 Subject: [PATCH 235/722] update comment --- src/Client/Suggest.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Client/Suggest.cpp b/src/Client/Suggest.cpp index e249aa1bb04..4e38add0ef5 100644 --- a/src/Client/Suggest.cpp +++ b/src/Client/Suggest.cpp @@ -101,7 +101,7 @@ static String getLoadSuggestionQuery(Int32 suggestion_limit, bool basic_suggesti add_column("name", "columns", true, suggestion_limit); } - /// FIXME: Forbid this query using new analyzer because of bug https://github.com/ClickHouse/ClickHouse/pull/50430#issuecomment-1576860893 + /// FIXME: Forbid this query using new analyzer because of bug https://github.com/ClickHouse/ClickHouse/issues/50669 /// We should remove this restriction after resolving this bug. query = "SELECT DISTINCT arrayJoin(extractAll(name, '[\\\\w_]{2,}')) AS res FROM (" + query + ") WHERE notEmpty(res) SETTINGS allow_experimental_analyzer=0"; return query; From 4fd64a28b20966246743ec6408a3018e8724249e Mon Sep 17 00:00:00 2001 From: Han Fei Date: Wed, 7 Jun 2023 16:07:18 +0200 Subject: [PATCH 236/722] and add more tests --- .../01763_filter_push_down_bugs.reference | 23 +++++++++++++++---- .../01763_filter_push_down_bugs.sql | 6 +++++ 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/01763_filter_push_down_bugs.reference b/tests/queries/0_stateless/01763_filter_push_down_bugs.reference index 7df35e2948d..db9cd7a2d16 100644 --- a/tests/queries/0_stateless/01763_filter_push_down_bugs.reference +++ b/tests/queries/0_stateless/01763_filter_push_down_bugs.reference @@ -9,13 +9,11 @@ String1_0 String2_0 String3_0 String4_0 1 Expression ((Projection + Before ORDER BY)) Filter (WHERE) Join (JOIN FillRightFirst) - Filter (( + Before JOIN)) + Expression (Before JOIN) ReadFromMergeTree (default.t1) Indexes: PrimaryKey - Keys: - id - Condition: (id in [101, 101]) + Condition: true Parts: 1/1 Granules: 1/1 Expression ((Joined actions + (Rename joined columns + (Projection + Before ORDER BY)))) @@ -25,3 +23,20 @@ Expression ((Projection + Before ORDER BY)) Condition: true Parts: 1/1 Granules: 1/1 +Expression ((Project names + Projection)) + Filter ((WHERE + DROP unused columns after JOIN)) + Join (JOIN FillRightFirst) + Expression (Change column names to column identifiers) + ReadFromMergeTree (default.t1) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 1/1 + Expression ((Change column names to column identifiers + (Project names + (Projection + Change column names to column identifiers)))) + ReadFromMergeTree (default.t2) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 1/1 diff --git a/tests/queries/0_stateless/01763_filter_push_down_bugs.sql b/tests/queries/0_stateless/01763_filter_push_down_bugs.sql index 5f7f4379714..9a5ef4727c5 100644 --- a/tests/queries/0_stateless/01763_filter_push_down_bugs.sql +++ b/tests/queries/0_stateless/01763_filter_push_down_bugs.sql @@ -58,5 +58,11 @@ EXPLAIN indexes=1 SELECT id, delete_time FROM t1 FROM t2 ) AS d WHERE create_time < delete_time AND id = 101 SETTINGS allow_experimental_analyzer=0; +EXPLAIN indexes=1 SELECT id, delete_time FROM t1 + CROSS JOIN ( + SELECT delete_time + FROM t2 +) AS d WHERE create_time < delete_time AND id = 101 SETTINGS allow_experimental_analyzer=1; + DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; From 0dd75d7648d3ba12b9593b312ce428e1b12799f8 Mon Sep 17 00:00:00 2001 From: Victor Krasnov Date: Wed, 7 Jun 2023 17:50:20 +0300 Subject: [PATCH 237/722] Add 02783_parseDateTimeBestEffort_syslog test --- ...3_parseDateTimeBestEffort_syslog.reference | 20 +++++ .../02783_parseDateTimeBestEffort_syslog.sql | 83 +++++++++++++++++++ 2 files changed, 103 insertions(+) create mode 100644 tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.reference create mode 100644 tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.sql diff --git a/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.reference b/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.reference new file mode 100644 index 00000000000..7409b413260 --- /dev/null +++ b/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.reference @@ -0,0 +1,20 @@ +parseDateTimeBestEffort + dt_ref res res_sam res_auc res_null res_null_sam res_null_auc res_zero res_zero_sam res_zero_auc + + Jun 7 04:55:00 2023-06-07 04:55:00 2022-06-07 04:55:00 2023-06-07 04:55:00 2023-06-07 04:55:00 2022-06-07 04:55:00 2023-06-07 04:55:00 2023-06-07 04:55:00 2022-06-07 04:55:00 2023-06-07 04:55:00 + Jun 7 04:56:00 2022-06-07 04:56:00 2022-06-07 04:56:00 2023-06-07 04:56:00 2022-06-07 04:56:00 2022-06-07 04:56:00 2023-06-07 04:56:00 2022-06-07 04:56:00 2022-06-07 04:56:00 2023-06-07 04:56:00 +parseDateTimeBestEffortUS + dt_ref res res_sam res_auc res_null res_null_sam res_null_auc res_zero res_zero_sam res_zero_auc + + Jun 7 04:55:00 2023-06-07 04:55:00 2022-06-07 04:55:00 2023-06-07 04:55:00 2023-06-07 04:55:00 2022-06-07 04:55:00 2023-06-07 04:55:00 2023-06-07 04:55:00 2022-06-07 04:55:00 2023-06-07 04:55:00 + Jun 7 04:56:00 2022-06-07 04:56:00 2022-06-07 04:56:00 2023-06-07 04:56:00 2022-06-07 04:56:00 2022-06-07 04:56:00 2023-06-07 04:56:00 2022-06-07 04:56:00 2022-06-07 04:56:00 2023-06-07 04:56:00 +parseDateTime64BestEffort + dt_ref res res_sam res_auc res_null res_null_sam res_null_auc res_zero res_zero_sam res_zero_auc + + Jun 7 04:55:00 2023-06-07 04:55:00.000 2022-06-07 04:55:00.000 2023-06-07 04:55:00.000 2023-06-07 04:55:00.000 2022-06-07 04:55:00.000 2023-06-07 04:55:00.000 2023-06-07 04:55:00.000 2022-06-07 04:55:00.000 2023-06-07 04:55:00.000 + Jun 7 04:56:00 2022-06-07 04:56:00.000 2022-06-07 04:56:00.000 2023-06-07 04:56:00.000 2022-06-07 04:56:00.000 2022-06-07 04:56:00.000 2023-06-07 04:56:00.000 2022-06-07 04:56:00.000 2022-06-07 04:56:00.000 2023-06-07 04:56:00.000 +parseDateTime64BestEffortUS + dt_ref res res_sam res_auc res_null res_null_sam res_null_auc res_zero res_zero_sam res_zero_auc + + Jun 7 04:55:00 2023-06-07 04:55:00.000 2022-06-07 04:55:00.000 2023-06-07 04:55:00.000 2023-06-07 04:55:00.000 2022-06-07 04:55:00.000 2023-06-07 04:55:00.000 2023-06-07 04:55:00.000 2022-06-07 04:55:00.000 2023-06-07 04:55:00.000 + Jun 7 04:56:00 2022-06-07 04:56:00.000 2022-06-07 04:56:00.000 2023-06-07 04:56:00.000 2022-06-07 04:56:00.000 2022-06-07 04:56:00.000 2023-06-07 04:56:00.000 2022-06-07 04:56:00.000 2022-06-07 04:56:00.000 2023-06-07 04:56:00.000 diff --git a/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.sql b/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.sql new file mode 100644 index 00000000000..91ae230205b --- /dev/null +++ b/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.sql @@ -0,0 +1,83 @@ +SELECT 'parseDateTimeBestEffort'; + +WITH + now() AS ts_now, + '2023-06-07 04:55:30' AS ref_point, + dateDiff('second', toDateTime(ref_point), ts_now) AS impedimenta, + formatDateTime(ts_around, '%b %e %T') AS dt_curr +SELECT + formatDateTime(ts_around - impedimenta, '%b %e %H:%i:%s') AS dt_ref, + parseDateTimeBestEffort(dt_curr) - impedimenta AS res, + parseDateTimeBestEffort(dt_curr, 'US/Samoa') - impedimenta AS res_sam, + parseDateTimeBestEffort(dt_curr, 'Pacific/Auckland') - impedimenta AS res_auc, + parseDateTimeBestEffortOrNull(dt_curr) - impedimenta AS res_null, + parseDateTimeBestEffortOrNull(dt_curr, 'US/Samoa') - impedimenta AS res_null_sam, + parseDateTimeBestEffortOrNull(dt_curr, 'Pacific/Auckland') - impedimenta AS res_null_auc, + parseDateTimeBestEffortOrZero(dt_curr) - impedimenta AS res_zero, + parseDateTimeBestEffortOrZero(dt_curr, 'US/Samoa') - impedimenta AS res_zero_sam, + parseDateTimeBestEffortOrZero(dt_curr, 'Pacific/Auckland') - impedimenta AS res_zero_auc +FROM (SELECT arrayJoin([ts_now - 30, ts_now + 30]) AS ts_around) +FORMAT PrettySpaceNoEscapes; + +SELECT 'parseDateTimeBestEffortUS'; + +WITH + now() AS ts_now, + '2023-06-07 04:55:30' AS ref_point, + dateDiff('second', toDateTime(ref_point), ts_now) AS impedimenta, + formatDateTime(ts_around, '%b %e %T') AS dt_curr +SELECT + formatDateTime(ts_around - impedimenta, '%b %e %H:%i:%s') AS dt_ref, + parseDateTimeBestEffortUS(dt_curr) - impedimenta AS res, + parseDateTimeBestEffortUS(dt_curr, 'US/Samoa') - impedimenta AS res_sam, + parseDateTimeBestEffortUS(dt_curr, 'Pacific/Auckland') - impedimenta AS res_auc, + parseDateTimeBestEffortUSOrNull(dt_curr) - impedimenta AS res_null, + parseDateTimeBestEffortUSOrNull(dt_curr, 'US/Samoa') - impedimenta AS res_null_sam, + parseDateTimeBestEffortUSOrNull(dt_curr, 'Pacific/Auckland') - impedimenta AS res_null_auc, + parseDateTimeBestEffortUSOrZero(dt_curr) - impedimenta AS res_zero, + parseDateTimeBestEffortUSOrZero(dt_curr, 'US/Samoa') - impedimenta AS res_zero_sam, + parseDateTimeBestEffortUSOrZero(dt_curr, 'Pacific/Auckland') - impedimenta AS res_zero_auc +FROM (SELECT arrayJoin([ts_now - 30, ts_now + 30]) AS ts_around) +FORMAT PrettySpaceNoEscapes; + +SELECT 'parseDateTime64BestEffort'; + +WITH + now() AS ts_now, + '2023-06-07 04:55:30' AS ref_point, + dateDiff('second', toDateTime(ref_point), ts_now) AS impedimenta, + formatDateTime(ts_around, '%b %e %T') AS dt_curr +SELECT + formatDateTime(ts_around - impedimenta, '%b %e %H:%i:%s') AS dt_ref, + parseDateTime64BestEffort(dt_curr) - impedimenta AS res, + parseDateTime64BestEffort(dt_curr, 3, 'US/Samoa') - impedimenta AS res_sam, + parseDateTime64BestEffort(dt_curr, 3, 'Pacific/Auckland') - impedimenta AS res_auc, + parseDateTime64BestEffortOrNull(dt_curr) - impedimenta AS res_null, + parseDateTime64BestEffortOrNull(dt_curr, 3, 'US/Samoa') - impedimenta AS res_null_sam, + parseDateTime64BestEffortOrNull(dt_curr, 3, 'Pacific/Auckland') - impedimenta AS res_null_auc, + parseDateTime64BestEffortOrZero(dt_curr) - impedimenta AS res_zero, + parseDateTime64BestEffortOrZero(dt_curr, 3, 'US/Samoa') - impedimenta AS res_zero_sam, + parseDateTime64BestEffortOrZero(dt_curr, 3, 'Pacific/Auckland') - impedimenta AS res_zero_auc +FROM (SELECT arrayJoin([ts_now - 30, ts_now + 30]) AS ts_around) +FORMAT PrettySpaceNoEscapes; + +SELECT 'parseDateTime64BestEffortUS'; + +WITH + now() AS ts_now, + '2023-06-07 04:55:30' AS ref_point, + dateDiff('second', toDateTime(ref_point), ts_now) AS impedimenta, + formatDateTime(ts_around, '%b %e %T') AS dt_curr +SELECT + formatDateTime(ts_around - impedimenta, '%b %e %H:%i:%s') AS dt_ref, + parseDateTime64BestEffortUS(dt_curr) - impedimenta AS res, + parseDateTime64BestEffortUS(dt_curr, 3, 'US/Samoa') - impedimenta AS res_sam, + parseDateTime64BestEffortUS(dt_curr, 3, 'Pacific/Auckland') - impedimenta AS res_auc, + parseDateTime64BestEffortUSOrNull(dt_curr) - impedimenta AS res_null, + parseDateTime64BestEffortUSOrNull(dt_curr, 3, 'US/Samoa') - impedimenta AS res_null_sam, + parseDateTime64BestEffortUSOrNull(dt_curr, 3, 'Pacific/Auckland') - impedimenta AS res_null_auc, + parseDateTime64BestEffortUSOrZero(dt_curr) - impedimenta AS res_zero, + parseDateTime64BestEffortUSOrZero(dt_curr, 3, 'US/Samoa') - impedimenta AS res_zero_sam, + parseDateTime64BestEffortUSOrZero(dt_curr, 3, 'Pacific/Auckland') - impedimenta AS res_zero_auc +FROM (SELECT arrayJoin([ts_now - 30, ts_now + 30]) AS ts_around) +FORMAT PrettySpaceNoEscapes; From b567dc2a1dce59d3ced34463601e63326d56aa50 Mon Sep 17 00:00:00 2001 From: Han Fei Date: Wed, 7 Jun 2023 17:48:06 +0200 Subject: [PATCH 238/722] fix test --- .../0_stateless/01763_filter_push_down_bugs.reference | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/01763_filter_push_down_bugs.reference b/tests/queries/0_stateless/01763_filter_push_down_bugs.reference index db9cd7a2d16..c8045dd26f5 100644 --- a/tests/queries/0_stateless/01763_filter_push_down_bugs.reference +++ b/tests/queries/0_stateless/01763_filter_push_down_bugs.reference @@ -9,11 +9,13 @@ String1_0 String2_0 String3_0 String4_0 1 Expression ((Projection + Before ORDER BY)) Filter (WHERE) Join (JOIN FillRightFirst) - Expression (Before JOIN) + Filter (( + Before JOIN)) ReadFromMergeTree (default.t1) Indexes: PrimaryKey - Condition: true + Keys: + id + Condition: (id in [101, 101]) Parts: 1/1 Granules: 1/1 Expression ((Joined actions + (Rename joined columns + (Projection + Before ORDER BY)))) @@ -30,7 +32,9 @@ Expression ((Project names + Projection)) ReadFromMergeTree (default.t1) Indexes: PrimaryKey - Condition: true + Keys: + id + Condition: (id in [101, 101]) Parts: 1/1 Granules: 1/1 Expression ((Change column names to column identifiers + (Project names + (Projection + Change column names to column identifiers)))) From 2df0e0c66962ce91e47eb4c98bae4fabe1ce9cc1 Mon Sep 17 00:00:00 2001 From: serxa Date: Wed, 7 Jun 2023 16:25:52 +0000 Subject: [PATCH 239/722] Unify priorities for connections --- src/Client/ConnectionPool.cpp | 4 ++-- src/Client/ConnectionPool.h | 13 +++++++------ src/Client/ConnectionPoolWithFailover.cpp | 2 +- src/Client/ConnectionPoolWithFailover.h | 2 +- src/Common/GetPriorityForLoadBalancing.cpp | 15 ++++++++------- src/Common/GetPriorityForLoadBalancing.h | 2 +- src/Common/PoolWithFailoverBase.h | 9 +++++---- src/Common/ZooKeeper/ZooKeeper.cpp | 2 +- src/Common/ZooKeeper/ZooKeeper.h | 2 +- src/Databases/DatabaseReplicated.cpp | 2 +- src/Functions/hasColumnInTable.cpp | 2 +- .../tests/gtest_resource_manager_static.cpp | 4 ++-- src/Interpreters/Cluster.cpp | 2 +- src/Interpreters/Cluster.h | 5 +++-- src/Interpreters/ClusterDiscovery.cpp | 2 +- src/TableFunctions/TableFunctionRemote.cpp | 2 +- 16 files changed, 37 insertions(+), 33 deletions(-) diff --git a/src/Client/ConnectionPool.cpp b/src/Client/ConnectionPool.cpp index 8433b0833fa..5cabb1465d1 100644 --- a/src/Client/ConnectionPool.cpp +++ b/src/Client/ConnectionPool.cpp @@ -18,7 +18,7 @@ ConnectionPoolPtr ConnectionPoolFactory::get( String client_name, Protocol::Compression compression, Protocol::Secure secure, - Int64 priority) + Priority priority) { Key key{ max_connections, host, port, default_database, user, password, quota_key, cluster, cluster_secret, client_name, compression, secure, priority}; @@ -74,7 +74,7 @@ size_t ConnectionPoolFactory::KeyHash::operator()(const ConnectionPoolFactory::K hash_combine(seed, hash_value(k.client_name)); hash_combine(seed, hash_value(k.compression)); hash_combine(seed, hash_value(k.secure)); - hash_combine(seed, hash_value(k.priority)); + hash_combine(seed, hash_value(k.priority.value)); return seed; } diff --git a/src/Client/ConnectionPool.h b/src/Client/ConnectionPool.h index aacd0a063c7..b6d03daacfb 100644 --- a/src/Client/ConnectionPool.h +++ b/src/Client/ConnectionPool.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include #include @@ -34,7 +35,7 @@ public: const Settings * settings = nullptr, bool force_connected = true) = 0; - virtual Int64 getPriority() const { return 1; } + virtual Priority getPriority() const { return Priority{1}; } }; using ConnectionPoolPtr = std::shared_ptr; @@ -60,7 +61,7 @@ public: const String & client_name_, Protocol::Compression compression_, Protocol::Secure secure_, - Int64 priority_ = 1) + Priority priority_ = Priority{1}) : Base(max_connections_, &Poco::Logger::get("ConnectionPool (" + host_ + ":" + toString(port_) + ")")), host(host_), @@ -103,7 +104,7 @@ public: return host + ":" + toString(port); } - Int64 getPriority() const override + Priority getPriority() const override { return priority; } @@ -134,7 +135,7 @@ private: String client_name; Protocol::Compression compression; /// Whether to compress data when interacting with the server. Protocol::Secure secure; /// Whether to encrypt data when interacting with the server. - Int64 priority; /// priority from + Priority priority; /// priority from }; /** @@ -157,7 +158,7 @@ public: String client_name; Protocol::Compression compression; Protocol::Secure secure; - Int64 priority; + Priority priority; }; struct KeyHash @@ -180,7 +181,7 @@ public: String client_name, Protocol::Compression compression, Protocol::Secure secure, - Int64 priority); + Priority priority); private: mutable std::mutex mutex; using ConnectionPoolWeakPtr = std::weak_ptr; diff --git a/src/Client/ConnectionPoolWithFailover.cpp b/src/Client/ConnectionPoolWithFailover.cpp index 129bc10bc27..feb4c01c374 100644 --- a/src/Client/ConnectionPoolWithFailover.cpp +++ b/src/Client/ConnectionPoolWithFailover.cpp @@ -71,7 +71,7 @@ IConnectionPool::Entry ConnectionPoolWithFailover::get(const ConnectionTimeouts return Base::get(max_ignored_errors, fallback_to_stale_replicas, try_get_entry, get_priority); } -Int64 ConnectionPoolWithFailover::getPriority() const +Priority ConnectionPoolWithFailover::getPriority() const { return (*std::max_element(nested_pools.begin(), nested_pools.end(), [](const auto & a, const auto & b) { diff --git a/src/Client/ConnectionPoolWithFailover.h b/src/Client/ConnectionPoolWithFailover.h index 0273ce41589..75a0dafd977 100644 --- a/src/Client/ConnectionPoolWithFailover.h +++ b/src/Client/ConnectionPoolWithFailover.h @@ -48,7 +48,7 @@ public: const Settings * settings, bool force_connected) override; /// From IConnectionPool - Int64 getPriority() const override; /// From IConnectionPool + Priority getPriority() const override; /// From IConnectionPool /** Allocates up to the specified number of connections to work. * Connections provide access to different replicas of one shard. diff --git a/src/Common/GetPriorityForLoadBalancing.cpp b/src/Common/GetPriorityForLoadBalancing.cpp index 5da60fb1bae..c4d36acc70c 100644 --- a/src/Common/GetPriorityForLoadBalancing.cpp +++ b/src/Common/GetPriorityForLoadBalancing.cpp @@ -1,4 +1,5 @@ #include +#include namespace DB { @@ -8,23 +9,23 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -std::function GetPriorityForLoadBalancing::getPriorityFunc(LoadBalancing load_balance, size_t offset, size_t pool_size) const +std::function GetPriorityForLoadBalancing::getPriorityFunc(LoadBalancing load_balance, size_t offset, size_t pool_size) const { - std::function get_priority; + std::function get_priority; switch (load_balance) { case LoadBalancing::NEAREST_HOSTNAME: if (hostname_differences.empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "It's a bug: hostname_differences is not initialized"); - get_priority = [this](size_t i) { return hostname_differences[i]; }; + get_priority = [this](size_t i) { return Priority{static_cast(hostname_differences[i])}; }; break; case LoadBalancing::IN_ORDER: - get_priority = [](size_t i) { return i; }; + get_priority = [](size_t i) { return Priority{static_cast(i)}; }; break; case LoadBalancing::RANDOM: break; case LoadBalancing::FIRST_OR_RANDOM: - get_priority = [offset](size_t i) -> size_t { return i != offset; }; + get_priority = [offset](size_t i) { return i != offset ? Priority{1} : Priority{0}; }; break; case LoadBalancing::ROUND_ROBIN: if (last_used >= pool_size) @@ -38,8 +39,8 @@ std::function GetPriorityForLoadBalancing::getPriorityFunc * */ get_priority = [this, pool_size](size_t i) { - ++i; - return i < last_used ? pool_size - i : i - last_used; + ++i; // To make `i` indexing start with 1 instead of 0 as `last_used` does + return Priority{static_cast(i < last_used ? pool_size - i : i - last_used)}; }; break; } diff --git a/src/Common/GetPriorityForLoadBalancing.h b/src/Common/GetPriorityForLoadBalancing.h index e57b02b5e90..8052185ac13 100644 --- a/src/Common/GetPriorityForLoadBalancing.h +++ b/src/Common/GetPriorityForLoadBalancing.h @@ -21,7 +21,7 @@ public: return !(*this == other); } - std::function getPriorityFunc(LoadBalancing load_balance, size_t offset, size_t pool_size) const; + std::function getPriorityFunc(LoadBalancing load_balance, size_t offset, size_t pool_size) const; std::vector hostname_differences; /// Distances from name of this host to the names of hosts of pools. diff --git a/src/Common/PoolWithFailoverBase.h b/src/Common/PoolWithFailoverBase.h index 646e10d6443..c6f44a7701a 100644 --- a/src/Common/PoolWithFailoverBase.h +++ b/src/Common/PoolWithFailoverBase.h @@ -13,6 +13,7 @@ #include #include #include +#include namespace DB @@ -34,7 +35,7 @@ namespace ProfileEvents /// This class provides a pool with fault tolerance. It is used for pooling of connections to replicated DB. /// Initialized by several PoolBase objects. /// When a connection is requested, tries to create or choose an alive connection from one of the nested pools. -/// Pools are tried in the order consistent with lexicographical order of (error count, priority, random number) tuples. +/// Pools are tried in the order consistent with lexicographical order of (error count, slowdown count, config priority, priority, random number) tuples. /// Number of tries for a single pool is limited by max_tries parameter. /// The client can set nested pool priority by passing a GetPriority functor. /// @@ -113,7 +114,7 @@ public: /// The client can provide this functor to affect load balancing - the index of a pool is passed to /// this functor. The pools with lower result value will be tried first. - using GetPriorityFunc = std::function; + using GetPriorityFunc = std::function; /// Returns at least min_entries and at most max_entries connections (at most one connection per nested pool). /// The method will throw if it is unable to get min_entries alive connections or @@ -336,9 +337,9 @@ struct PoolWithFailoverBase::PoolState /// The number of slowdowns that led to changing replica in HedgedRequestsFactory UInt64 slowdown_count = 0; /// Priority from the configuration. - Int64 config_priority = 1; + Priority config_priority{1}; /// Priority from the GetPriorityFunc. - Int64 priority = 0; + Priority priority{0}; UInt64 random = 0; void randomize() diff --git a/src/Common/ZooKeeper/ZooKeeper.cpp b/src/Common/ZooKeeper/ZooKeeper.cpp index a587ad6caf4..62807fe2433 100644 --- a/src/Common/ZooKeeper/ZooKeeper.cpp +++ b/src/Common/ZooKeeper/ZooKeeper.cpp @@ -179,7 +179,7 @@ ZooKeeper::ZooKeeper(const Poco::Util::AbstractConfiguration & config, const std std::vector ZooKeeper::shuffleHosts() const { - std::function get_priority = args.get_priority_load_balancing.getPriorityFunc(args.get_priority_load_balancing.load_balancing, 0, args.hosts.size()); + std::function get_priority = args.get_priority_load_balancing.getPriorityFunc(args.get_priority_load_balancing.load_balancing, 0, args.hosts.size()); std::vector shuffle_hosts; for (size_t i = 0; i < args.hosts.size(); ++i) { diff --git a/src/Common/ZooKeeper/ZooKeeper.h b/src/Common/ZooKeeper/ZooKeeper.h index 96f9914b597..d48ca0a4ef5 100644 --- a/src/Common/ZooKeeper/ZooKeeper.h +++ b/src/Common/ZooKeeper/ZooKeeper.h @@ -49,7 +49,7 @@ constexpr size_t MULTI_BATCH_SIZE = 100; struct ShuffleHost { String host; - Int64 priority = 0; + Priority priority; UInt64 random = 0; void randomize() diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index 9bbf5b9565d..583607bda1d 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -252,7 +252,7 @@ ClusterPtr DatabaseReplicated::getClusterImpl() const treat_local_as_remote, treat_local_port_as_remote, cluster_auth_info.cluster_secure_connection, - /*priority=*/ 1, + Priority{1}, TSA_SUPPRESS_WARNING_FOR_READ(database_name), /// FIXME cluster_auth_info.cluster_secret}; diff --git a/src/Functions/hasColumnInTable.cpp b/src/Functions/hasColumnInTable.cpp index 4676b4083b7..66ed515e490 100644 --- a/src/Functions/hasColumnInTable.cpp +++ b/src/Functions/hasColumnInTable.cpp @@ -137,7 +137,7 @@ ColumnPtr FunctionHasColumnInTable::executeImpl(const ColumnsWithTypeAndName & a treat_local_as_remote, treat_local_port_as_remote, /* secure= */ false, - /* priority= */ 1, + /* priority= */ Priority{1}, /* cluster_name= */ "", /* password= */ "" }; diff --git a/src/IO/Resource/tests/gtest_resource_manager_static.cpp b/src/IO/Resource/tests/gtest_resource_manager_static.cpp index 091f6923714..976eac41a49 100644 --- a/src/IO/Resource/tests/gtest_resource_manager_static.cpp +++ b/src/IO/Resource/tests/gtest_resource_manager_static.cpp @@ -44,8 +44,8 @@ TEST(IOResourceStaticResourceManager, Smoke) TEST(IOResourceStaticResourceManager, Prioritization) { - std::optional last_priority; - auto check = [&] (Int64 priority) + std::optional last_priority; + auto check = [&] (Priority priority) { // Lock is not required here because this is called during request execution and we have max_requests = 1 if (last_priority) diff --git a/src/Interpreters/Cluster.cpp b/src/Interpreters/Cluster.cpp index 8c30dbe230f..edbef77ef02 100644 --- a/src/Interpreters/Cluster.cpp +++ b/src/Interpreters/Cluster.cpp @@ -108,7 +108,7 @@ Cluster::Address::Address( password = config.getString(config_prefix + ".password", ""); default_database = config.getString(config_prefix + ".default_database", ""); secure = ConfigHelper::getBool(config, config_prefix + ".secure", false, /* empty_as */true) ? Protocol::Secure::Enable : Protocol::Secure::Disable; - priority = config.getInt(config_prefix + ".priority", 1); + priority = Priority{config.getInt(config_prefix + ".priority", 1)}; const char * port_type = secure == Protocol::Secure::Enable ? "tcp_port_secure" : "tcp_port"; auto default_port = config.getInt(port_type, 0); diff --git a/src/Interpreters/Cluster.h b/src/Interpreters/Cluster.h index 4798384f29c..de10a445d01 100644 --- a/src/Interpreters/Cluster.h +++ b/src/Interpreters/Cluster.h @@ -4,6 +4,7 @@ #include #include #include +#include #include @@ -44,7 +45,7 @@ struct ClusterConnectionParameters bool treat_local_as_remote; bool treat_local_port_as_remote; bool secure = false; - Int64 priority = 1; + Priority priority{1}; String cluster_name; String cluster_secret; }; @@ -131,7 +132,7 @@ public: Protocol::Compression compression = Protocol::Compression::Enable; Protocol::Secure secure = Protocol::Secure::Disable; - Int64 priority = 1; + Priority priority{1}; Address() = default; diff --git a/src/Interpreters/ClusterDiscovery.cpp b/src/Interpreters/ClusterDiscovery.cpp index 884e3b87343..553488edf50 100644 --- a/src/Interpreters/ClusterDiscovery.cpp +++ b/src/Interpreters/ClusterDiscovery.cpp @@ -246,7 +246,7 @@ ClusterPtr ClusterDiscovery::makeCluster(const ClusterInfo & cluster_info) /* treat_local_as_remote= */ false, /* treat_local_port_as_remote= */ false, /// should be set only for clickhouse-local, but cluster discovery is not used there /* secure= */ secure, - /* priority= */ 1, + /* priority= */ Priority{1}, /* cluster_name= */ "", /* password= */ ""}; auto cluster = std::make_shared( diff --git a/src/TableFunctions/TableFunctionRemote.cpp b/src/TableFunctions/TableFunctionRemote.cpp index b2f09adf773..4143014a7b3 100644 --- a/src/TableFunctions/TableFunctionRemote.cpp +++ b/src/TableFunctions/TableFunctionRemote.cpp @@ -262,7 +262,7 @@ void TableFunctionRemote::parseArguments(const ASTPtr & ast_function, ContextPtr treat_local_as_remote, treat_local_port_as_remote, secure, - /* priority= */ 1, + /* priority= */ Priority{1}, /* cluster_name= */ "", /* password= */ "" }; From 32372967e9814e629cbad2ce2ff57f82aba86e97 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Wed, 7 Jun 2023 16:55:14 +0200 Subject: [PATCH 240/722] fix --- src/Storages/StorageReplicatedMergeTree.cpp | 10 +++------- src/Storages/StorageReplicatedMergeTree.h | 2 +- .../0_stateless/02432_s3_parallel_parts_cleanup.sql | 6 +----- .../0_stateless/02448_clone_replica_lost_part.sql | 9 ++++++--- 4 files changed, 11 insertions(+), 16 deletions(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 6edd7531ec1..36bc3476e91 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -6743,14 +6743,12 @@ size_t StorageReplicatedMergeTree::clearOldPartsAndRemoveFromZK() if (parts.empty()) return total_parts_to_remove; - size_t res = 0; - NOEXCEPT_SCOPE({ res = clearOldPartsAndRemoveFromZKImpl(zookeeper, std::move(parts)); }); - return res; + NOEXCEPT_SCOPE({ clearOldPartsAndRemoveFromZKImpl(zookeeper, std::move(parts)); }); + return total_parts_to_remove; } -size_t StorageReplicatedMergeTree::clearOldPartsAndRemoveFromZKImpl(zkutil::ZooKeeperPtr zookeeper, DataPartsVector && parts) +void StorageReplicatedMergeTree::clearOldPartsAndRemoveFromZKImpl(zkutil::ZooKeeperPtr zookeeper, DataPartsVector && parts) { - DataPartsVector parts_to_delete_only_from_filesystem; // Only duplicates DataPartsVector parts_to_delete_completely; // All parts except duplicates DataPartsVector parts_to_retry_deletion; // Parts that should be retried due to network problems @@ -6861,8 +6859,6 @@ size_t StorageReplicatedMergeTree::clearOldPartsAndRemoveFromZKImpl(zkutil::ZooK /// Otherwise nobody will try to remove them again (see grabOldParts). delete_parts_from_fs_and_rollback_in_case_of_error(parts_to_remove_from_filesystem, "old"); } - - return total_parts_to_remove; } diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index 368d7d1b948..290266ca00c 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -344,7 +344,7 @@ private: /// Delete old parts from disk and from ZooKeeper. Returns the number of removed parts size_t clearOldPartsAndRemoveFromZK(); - size_t clearOldPartsAndRemoveFromZKImpl(zkutil::ZooKeeperPtr zookeeper, DataPartsVector && parts); + void clearOldPartsAndRemoveFromZKImpl(zkutil::ZooKeeperPtr zookeeper, DataPartsVector && parts); template friend class ReplicatedMergeTreeSinkImpl; diff --git a/tests/queries/0_stateless/02432_s3_parallel_parts_cleanup.sql b/tests/queries/0_stateless/02432_s3_parallel_parts_cleanup.sql index 3f8aa545298..948ec9e9e8a 100644 --- a/tests/queries/0_stateless/02432_s3_parallel_parts_cleanup.sql +++ b/tests/queries/0_stateless/02432_s3_parallel_parts_cleanup.sql @@ -38,11 +38,7 @@ select count(), sum(n), sum(m) from rmt; -- New table can assign merges/mutations and can remove old parts create table rmt2 (n int, m int, k String) engine=ReplicatedMergeTree('/test/02432/{database}', '2') order by tuple() settings storage_policy = 's3_cache', allow_remote_fs_zero_copy_replication=1, -<<<<<<< HEAD - max_part_removal_threads=10, concurrent_part_removal_threshold=1, cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0, -======= - concurrent_part_removal_threshold=1, cleanup_delay_period=1, cleanup_delay_period_random_add=1, ->>>>>>> master + concurrent_part_removal_threshold=1, cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0, min_bytes_for_wide_part=0, min_rows_for_wide_part=0, max_replicated_merges_in_queue=1, old_parts_lifetime=0; diff --git a/tests/queries/0_stateless/02448_clone_replica_lost_part.sql b/tests/queries/0_stateless/02448_clone_replica_lost_part.sql index 1e99e1869cc..eb4d0f255a7 100644 --- a/tests/queries/0_stateless/02448_clone_replica_lost_part.sql +++ b/tests/queries/0_stateless/02448_clone_replica_lost_part.sql @@ -7,11 +7,11 @@ drop table if exists rmt2; create table rmt1 (n int) engine=ReplicatedMergeTree('/test/02448/{database}/rmt', '1') order by tuple() settings min_replicated_logs_to_keep=1, max_replicated_logs_to_keep=2, cleanup_delay_period=0, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0, old_parts_lifetime=0, max_parts_to_merge_at_once=4, - merge_selecting_sleep_ms=100, max_merge_selecting_sleep_ms=500; + merge_selecting_sleep_ms=1000, max_merge_selecting_sleep_ms=2000; create table rmt2 (n int) engine=ReplicatedMergeTree('/test/02448/{database}/rmt', '2') order by tuple() settings min_replicated_logs_to_keep=1, max_replicated_logs_to_keep=2, cleanup_delay_period=0, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0, old_parts_lifetime=0, max_parts_to_merge_at_once=4, - merge_selecting_sleep_ms=100, max_merge_selecting_sleep_ms=500; + merge_selecting_sleep_ms=1000, max_merge_selecting_sleep_ms=2000; -- insert part only on one replica system stop replicated sends rmt1; @@ -141,7 +141,10 @@ system sync replica rmt2; -- merge through gap optimize table rmt2; -- give it a chance to cleanup log -select sleep(2) format Null; -- increases probability of reproducing the issue + +select sleepEachRow(2) from url('http://localhost:8123/?param_tries={1..10}&query=' || encodeURLComponent( + 'select value from system.zookeeper where path=''//test/02448/' || currentDatabase() || '/rmt/replicas/1/is_lost'' and value=''1''' + ), 'LineAsString', 's String') settings max_threads=1 format Null; -- rmt1 will mimic rmt2, but will not be able to fetch parts for a while system stop replicated sends rmt2; From 6f6d806f927ba1e2c539a92e78906334bee20ce4 Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 6 Jun 2023 15:17:06 +0000 Subject: [PATCH 241/722] Upd test test_alter_moving_garbage --- .../configs/config.d/storage_conf.xml | 2 -- .../test_alter_moving_garbage/test.py | 31 +++++++++++++------ 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/tests/integration/test_alter_moving_garbage/configs/config.d/storage_conf.xml b/tests/integration/test_alter_moving_garbage/configs/config.d/storage_conf.xml index 659f59a41b2..f6898ed1d7e 100644 --- a/tests/integration/test_alter_moving_garbage/configs/config.d/storage_conf.xml +++ b/tests/integration/test_alter_moving_garbage/configs/config.d/storage_conf.xml @@ -1,6 +1,4 @@ - - diff --git a/tests/integration/test_alter_moving_garbage/test.py b/tests/integration/test_alter_moving_garbage/test.py index b369c9ad377..4a42b73b8d4 100644 --- a/tests/integration/test_alter_moving_garbage/test.py +++ b/tests/integration/test_alter_moving_garbage/test.py @@ -28,7 +28,7 @@ def cluster(): cluster.shutdown() -def create_table(node, table_name, **additional_settings): +def create_table(node, table_name, additional_settings): settings = { "storage_policy": "two_disks", "old_parts_lifetime": 1, @@ -52,21 +52,32 @@ def create_table(node, table_name, **additional_settings): node.query(create_table_statement) -def test_create_table(cluster): +@pytest.mark.parametrize("allow_remote_fs_zero_copy_replication", [False, True]) +def test_create_table(cluster, allow_remote_fs_zero_copy_replication): node = cluster.instances["node1"] - create_table(node, "test_table") + + additional_settings = {} + table_name = "test_table" + + if allow_remote_fs_zero_copy_replication: + # different names for logs readability + table_name = "test_table_zero_copy" + additional_settings["allow_remote_fs_zero_copy_replication"] = 1 + + create_table(node, table_name, additional_settings) + node.query( - "INSERT INTO test_table SELECT toDate('2021-01-01') + INTERVAL number % 10 DAY, number, toString(sipHash64(number)) FROM numbers(100_000)" + f"INSERT INTO {table_name} SELECT toDate('2021-01-01') + INTERVAL number % 10 DAY, number, toString(sipHash64(number)) FROM numbers(100_000)" ) stop_alter = False def alter(): d = 0 - node.query(f"ALTER TABLE test_table ADD COLUMN col0 String") + node.query(f"ALTER TABLE {table_name} ADD COLUMN col0 String") while not stop_alter: d = d + 1 - node.query(f"DELETE FROM test_table WHERE id < {d}") + node.query(f"DELETE FROM {table_name} WHERE id < {d}") time.sleep(0.1) alter_thread = threading.Thread(target=alter) @@ -76,12 +87,12 @@ def test_create_table(cluster): partition = f"2021-01-{i:02d}" try: node.query( - f"ALTER TABLE test_table MOVE PARTITION '{partition}' TO DISK 's3'", + f"ALTER TABLE {table_name} MOVE PARTITION '{partition}' TO DISK 's3'", ) except QueryRuntimeException as e: - # PART_IS_TEMPORARILY_LOCKED - assert 384 == e.returncode - continue + if "PART_IS_TEMPORARILY_LOCKED" in str(e): + continue + raise e # clear old temporary directories wakes up every 1 second time.sleep(0.5) From 989540e5b1ae4c10605e7609a7906e789ad755a4 Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 7 Jun 2023 17:37:32 +0000 Subject: [PATCH 242/722] Upd test_alter_moving_garbage: use replicated engine --- .../configs/config.d/remote_servers.xml | 16 +++ .../test_alter_moving_garbage/test.py | 99 ++++++++++++++----- 2 files changed, 88 insertions(+), 27 deletions(-) create mode 100644 tests/integration/test_alter_moving_garbage/configs/config.d/remote_servers.xml diff --git a/tests/integration/test_alter_moving_garbage/configs/config.d/remote_servers.xml b/tests/integration/test_alter_moving_garbage/configs/config.d/remote_servers.xml new file mode 100644 index 00000000000..45713eaed59 --- /dev/null +++ b/tests/integration/test_alter_moving_garbage/configs/config.d/remote_servers.xml @@ -0,0 +1,16 @@ + + + + + + node1 + 9000 + + + node2 + 9000 + + + + + diff --git a/tests/integration/test_alter_moving_garbage/test.py b/tests/integration/test_alter_moving_garbage/test.py index 4a42b73b8d4..dc3f6c35ead 100644 --- a/tests/integration/test_alter_moving_garbage/test.py +++ b/tests/integration/test_alter_moving_garbage/test.py @@ -3,22 +3,29 @@ import time import pytest import threading +import random from helpers.client import QueryRuntimeException from helpers.cluster import ClickHouseCluster +# two replicas in remote_servers.xml +REPLICA_COUNT = 2 @pytest.fixture(scope="module") def cluster(): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance( - "node1", - main_configs=[ - "configs/config.d/storage_conf.xml", - ], - with_minio=True, - ) + for i in range(1, REPLICA_COUNT + 1): + cluster.add_instance( + f"node{i}", + main_configs=[ + "configs/config.d/storage_conf.xml", + "configs/config.d/remote_servers.xml", + ], + with_minio=True, + with_zookeeper=True, + ) + logging.info("Starting cluster...") cluster.start() logging.info("Cluster started") @@ -28,7 +35,7 @@ def cluster(): cluster.shutdown() -def create_table(node, table_name, additional_settings): +def create_table(node, table_name, replicated, additional_settings): settings = { "storage_policy": "two_disks", "old_parts_lifetime": 1, @@ -38,55 +45,91 @@ def create_table(node, table_name, additional_settings): } settings.update(additional_settings) + table_engine = ( + f"ReplicatedMergeTree('/clickhouse/tables/0/{table_name}', '{node.name}')" + if replicated + else "MergeTree()" + ) + create_table_statement = f""" CREATE TABLE {table_name} ( dt Date, id Int64, data String, INDEX min_max (id) TYPE minmax GRANULARITY 3 - ) ENGINE=MergeTree() + ) ENGINE = {table_engine} PARTITION BY dt ORDER BY (dt, id) SETTINGS {",".join((k+"="+repr(v) for k, v in settings.items()))}""" - node.query(create_table_statement) + if replicated: + node.query_with_retry(create_table_statement) + else: + node.query(create_table_statement) -@pytest.mark.parametrize("allow_remote_fs_zero_copy_replication", [False, True]) -def test_create_table(cluster, allow_remote_fs_zero_copy_replication): - node = cluster.instances["node1"] +@pytest.mark.parametrize( + "allow_remote_fs_zero_copy_replication,replicated_engine", + [(False, False), (False, True), (True, True)], +) +def test_create_table( + cluster, allow_remote_fs_zero_copy_replication, replicated_engine +): + if replicated_engine: + nodes = list(cluster.instances.values()) + else: + nodes = [cluster.instances["node1"]] additional_settings = {} - table_name = "test_table" + # different names for logs readability + table_name = "test_table" if allow_remote_fs_zero_copy_replication: - # different names for logs readability table_name = "test_table_zero_copy" additional_settings["allow_remote_fs_zero_copy_replication"] = 1 + if replicated_engine: + table_name = table_name + "_replicated" - create_table(node, table_name, additional_settings) + for node in nodes: + create_table(node, table_name, replicated_engine, additional_settings) - node.query( - f"INSERT INTO {table_name} SELECT toDate('2021-01-01') + INTERVAL number % 10 DAY, number, toString(sipHash64(number)) FROM numbers(100_000)" - ) + for i in range(1, 11): + partition = f"2021-01-{i:02d}" + random.choice(nodes).query( + f"INSERT INTO {table_name} SELECT toDate('{partition}'), number as id, toString(sipHash64(number, {i})) FROM numbers(10_000)" + ) + + def check_count(): + if replicated_engine: + return random.choice(nodes).query_with_retry( + f"SELECT countDistinct(dt, data) FROM clusterAllReplicas(test_cluster, default.{table_name}) WHERE id % 100 = 0" + ) + else: + return random.choice(nodes).query( + f"SELECT countDistinct(dt, data) FROM {table_name} WHERE id % 100 = 0" + ) + + assert check_count() == "1000\n" stop_alter = False def alter(): - d = 0 - node.query(f"ALTER TABLE {table_name} ADD COLUMN col0 String") - while not stop_alter: - d = d + 1 - node.query(f"DELETE FROM {table_name} WHERE id < {d}") + random.choice(nodes).query(f"ALTER TABLE {table_name} ADD COLUMN col0 String") + for d in range(1, 100): + if stop_alter: + break + # I managed to reproduce issue with DELETE, but it can be any other lightweight mutation + # Do not delete rows with id % 100 = 0, because they are used in check_count to check that data is not corrupted + random.choice(nodes).query(f"DELETE FROM {table_name} WHERE id % 100 = {d}") time.sleep(0.1) alter_thread = threading.Thread(target=alter) alter_thread.start() - for i in range(1, 10): + for i in range(1, 11): partition = f"2021-01-{i:02d}" try: - node.query( + random.choice(nodes).query( f"ALTER TABLE {table_name} MOVE PARTITION '{partition}' TO DISK 's3'", ) except QueryRuntimeException as e: @@ -94,8 +137,10 @@ def test_create_table(cluster, allow_remote_fs_zero_copy_replication): continue raise e - # clear old temporary directories wakes up every 1 second + # Function to clear old temporary directories wakes up every 1 second, sleep to make sure it is called time.sleep(0.5) stop_alter = True alter_thread.join() + + assert check_count() == "1000\n" From d3eb0805d44564fbbf093c5c1d39fbad532b0f59 Mon Sep 17 00:00:00 2001 From: Alexey Gerasimchuck Date: Wed, 7 Jun 2023 23:28:19 +0000 Subject: [PATCH 243/722] clang-tidy run + changes in docs --- docs/en/interfaces/cli.md | 12 ++++++------ docs/ru/interfaces/cli.md | 10 +++++----- src/Client/ConnectionString.cpp | 23 ++++++++++++----------- 3 files changed, 23 insertions(+), 22 deletions(-) diff --git a/docs/en/interfaces/cli.md b/docs/en/interfaces/cli.md index 94f1fbf9e41..ba54694faa9 100644 --- a/docs/en/interfaces/cli.md +++ b/docs/en/interfaces/cli.md @@ -198,9 +198,9 @@ If host is not specified, the default host will be used (localhost). If port is not specified, the default port will be used (9000). If database is not specified, the default database will be used. -User, password, and database can be specified in the connection string either in --user, --password, --database command line options. +User, password, and database can be specified in the connection string either in `--user`, `--password`, `--database` command line options. -The connection string must be specified in the first argument of clickhouse-client. The connection string can be combined with other [command-line-options](#command-line-options) except **--host(h)** and **--port**. +The connection string must be specified in the first argument of clickhouse-client. The connection string can be combined with other [command-line-options](#command-line-options) except `--host(h)` and `--port`. ### Multiple hosts {#connection_string_multiple_hosts} @@ -226,25 +226,25 @@ Connect to localhost using port 9000 in interactive, multiline mode. clickhouse-client "clickhouse://localhost:9000" -m ``` -Connect to localhost using port 9000 in interactive mode with the user specified in --user option. +Connect to localhost using port 9000 in interactive mode with the user specified in `--user` option. ``` bash clickhouse-client "clickhouse://localhost:9000" --user default ``` -Connect to localhost using port 9000 in interactive mode with database 'my_database' specified in command line option +Connect to localhost using port 9000 in interactive mode to `my_database` database specified in command line option ``` bash clickhouse-client "clickhouse://localhost:9000" --database my_database ``` -Connect to localhost using port 9000 in interactive mode with the database specified in the connection string. +Connect to localhost using port 9000 in interactive mode to `my_database` database specified in the connection string. ``` bash clickhouse-client "clickhouse://localhost:9000/my_database" ``` -Connect to localhost using port 9000 in interactive mode with a database specified in the connection string and a secure connection using shorthanded 's' URI parameter. +Connect to localhost using port 9000 in interactive mode to `my_database` database specified in the connection string and a secure connection using shorthanded 's' URI parameter. ```bash clickhouse-client "clickhouse://localhost/my_database?s" diff --git a/docs/ru/interfaces/cli.md b/docs/ru/interfaces/cli.md index 30cd9757ebb..5f119ad9544 100644 --- a/docs/ru/interfaces/cli.md +++ b/docs/ru/interfaces/cli.md @@ -151,9 +151,9 @@ clickhouse://[2001:db8::1234] Если port не указан, будет использоваться порт по умолчанию (9000). Если база данных не указана, будет использоваться база данных по умолчанию (default). -Пользователь, пароль и база данных могут быть указаны в строке подключения либо в опциях командной строки --user, --password, --database. +Пользователь, пароль и база данных могут быть указаны в строке подключения либо в опциях командной строки `--user`, `--password`, `--database`. -Строка подключения должна быть указана в первом аргументе clickhouse-client. Строка подключения может комбинироваться с другими [параметрами командной строки] (#command-line-options) кроме **--host(h)** и **--port**. +Строка подключения должна быть указана в первом аргументе clickhouse-client. Строка подключения может комбинироваться с другими [параметрами командной строки] (#command-line-options) кроме `--host (h)` и `--port`. ### Несколько хостов {#connection_string_multiple_hosts} @@ -185,19 +185,19 @@ clickhouse-client "clickhouse://localhost:9000" -m clickhouse-client "clickhouse://localhost:9000" --user default ``` -Подключиться к localhost, используя порт 9000 в интерактивном режиме с базой данных 'my_database', указанной в опции командной строки. +Подключиться к localhost, используя порт 9000 в интерактивном режиме с базой данных `my_database`, указанной в опции командной строки. ``` bash clickhouse-client "clickhouse://localhost:9000" --database my_database ``` -Подключиться к localhost через порт 9000 в интерактивном режиме с базой данных my_database, указанной в строке подключения. +Подключиться к localhost через порт 9000 в интерактивном режиме с базой данных `my_database`, указанной в строке подключения. ``` bash clickhouse-client "clickhouse://localhost:9000/my_database" ``` -Подключиться к localhost через порт 9000 в интерактивном режиме с базой данных, указанной в строке подключения, и безопасным соединением с использованием сокращенного параметра URI 's'. +Подключиться к localhost через порт 9000 в интерактивном режиме с базой данных `my_database`, указанной в строке подключения, и безопасным соединением, используя короткий вариант команды URI 's'. ``` bash clickhouse-client "clickhouse://localhost/my_database?s" diff --git a/src/Client/ConnectionString.cpp b/src/Client/ConnectionString.cpp index aeb1c1dca02..95fec5b52ee 100644 --- a/src/Client/ConnectionString.cpp +++ b/src/Client/ConnectionString.cpp @@ -128,20 +128,21 @@ bool tryParseConnectionString( else hosts_end_pos = hosts_or_user_info_end_pos; - auto hosts_end = hosts_end_pos != std::string_view::npos ? connection_string.begin() + hosts_end_pos - : connection_string.end(); + const auto * hosts_end = hosts_end_pos != std::string_view::npos ? connection_string.begin() + hosts_end_pos + : connection_string.end(); try { - // Poco::URI doesn't support several hosts in URI. - // Split string clickhouse:[user_info]host1:port1, ... , hostN:portN[database]?[query_parameters] - // into multiple string for each host: - // clickhouse:[user_info]host1:port1[database]?[query_parameters] - // ... - // clickhouse:[user_info]hostN:portN[database]?[query_parameters] + /** Poco::URI doesn't support several hosts in URI. + * Split string clickhouse:[user_info]host1:port1, ... , hostN:portN[database]?[query_parameters] + * into multiple string for each host: + * clickhouse:[user_info]host1:port1[database]?[query_parameters] + * ... + * clickhouse:[user_info]hostN:portN[database]?[query_parameters] + */ Poco::URI uri; - auto last_host_begin = connection_string.begin() + offset; - for (auto it = last_host_begin; it != hosts_end; ++it) + const auto * last_host_begin = connection_string.begin() + offset; + for (const auto * it = last_host_begin; it != hosts_end; ++it) { if (*it == ',') { @@ -198,7 +199,7 @@ bool tryParseConnectionString( } const auto & database_name = uri.getPath(); - size_t start_symbol = database_name.size() > 0u && database_name[0] == '/' ? 1u : 0u; + size_t start_symbol = !database_name.empty() && database_name[0] == '/' ? 1u : 0u; if (database_name.size() > start_symbol) { common_arguments.push_back("--database"); From cf24d70bfd54dd45606ad1c53a136495c317bb9f Mon Sep 17 00:00:00 2001 From: Alexey Gerasimchuck Date: Thu, 8 Jun 2023 02:20:50 +0000 Subject: [PATCH 244/722] minor documentation changes --- docs/en/interfaces/cli.md | 18 +++++++++--------- docs/ru/interfaces/cli.md | 10 +++++----- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/docs/en/interfaces/cli.md b/docs/en/interfaces/cli.md index ba54694faa9..c36887672c7 100644 --- a/docs/en/interfaces/cli.md +++ b/docs/en/interfaces/cli.md @@ -166,13 +166,13 @@ The connection string for clickhouse-client is presented in URI format: clickhouse://[user_info@][hosts_and_ports][/dbname][?query_parameters] ``` -where user_info is: ```user[:password]``` -and hosts_and_ports is a list of values: ```[host][:port],[host][:port]``` Port is not mandatory. -and query_parameters is a list of parameter[=value]: ```param_name[=value]¶m_name[=value]...``` value may not be required for some of parameters. Parameter names are case sensitive. +where user_info is: `user[:password]` +and hosts_and_ports is a list of values: `[host][:port],[host][:port]` Port is not mandatory. +and query_parameters is a list of parameter[=value]: `param_name[=value]¶m_name[=value]...` value may not be required for some of the parameters. Parameter names are case sensitive. Allowed query_parameters keys: -- **secure** or shorthanded **s** - no value. If specified, client will connect to the server over a secure connection (TLS). See **secure** in [command-line-options](#command-line-options) +- `secure` or shorthanded `s` - no value. If specified, client will connect to the server over a secure connection (TLS). See `secure` in [command-line-options](#command-line-options) These examples illustrate valid connection strings for clickhouse-client: @@ -210,11 +210,11 @@ If more than one host is supplied, or if a single host name is translated to mor ### Percent encoding {#connection_string_uri_percent_encoding} -Hosts, user name, password, database and query parameters should be [Percent-Encoded](https://en.wikipedia.org/wiki/URL_encoding) if values contain URI invalid characters. +Hosts, user name, password, database, and query parameters should be [Percent-Encoded](https://en.wikipedia.org/wiki/URL_encoding) if values contain invalid URI characters. ### Examples {#connection_string_examples} -Connect to localhost using port 9000 and executes the query "SELECT 1". +Connect to localhost using port 9000 and execute the query "SELECT 1". ``` bash clickhouse-client "clickhouse://localhost:9000" --query "SELECT 1" @@ -232,7 +232,7 @@ Connect to localhost using port 9000 in interactive mode with the user specified clickhouse-client "clickhouse://localhost:9000" --user default ``` -Connect to localhost using port 9000 in interactive mode to `my_database` database specified in command line option +Connect to localhost using port 9000 in interactive mode to `my_database` database specified in the command line option. ``` bash clickhouse-client "clickhouse://localhost:9000" --database my_database @@ -250,7 +250,7 @@ Connect to localhost using port 9000 in interactive mode to `my_database` databa clickhouse-client "clickhouse://localhost/my_database?s" ``` -Connect to default host using the default port, default user, and default database. +Connect to default host using default port, default user, and default database. ``` bash clickhouse-client "clickhouse:" @@ -262,7 +262,7 @@ Connect to the default host using the default port, using user user_name and no clickhouse-client "clickhouse://user_name@" ``` -Connect to localhost using email user name. Symbol '@' is percent encoded to '%40'. +Connect to localhost using email as the user name. `@` symbol is percent encoded to `%40`. ``` bash clickhouse-client "clickhouse://some_user%40some_mail.com@localhost:9000" diff --git a/docs/ru/interfaces/cli.md b/docs/ru/interfaces/cli.md index 5f119ad9544..801a72e48ec 100644 --- a/docs/ru/interfaces/cli.md +++ b/docs/ru/interfaces/cli.md @@ -118,14 +118,14 @@ $ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="numbe clickhouse://[user_info@][hosts_and_ports][/dbname][?query_parameters] ``` -где user_info - это: ```user[:password]``` -hosts_and_ports - это список значений: ```[host][:port],[host][:port]```. Port может быть не задан. -query_parameters - это список пар ключ[=значение]: ```param_name[=value]¶m_name[=value]...```. Значение может быть пустым. +где user_info - это: `user[:password]` +hosts_and_ports - это список значений: `[host][:port],[host][:port]`. Port может быть не задан. +query_parameters - это список пар ключ[=значение]: `param_name[=value]¶m_name[=value]...`. Значение может быть пустым. Имена параметров чувствительны к регистру. Допустимые ключи query_parameters: -- **secure** или сокращенно **s** - без значение. Если параметр указан, то соединение с сервером будет осуществляться по защищенному каналу (TLS). См. **secure** в [command-line-options](#command-line-options). +- `secure` или сокращенно `s` - без значение. Если параметр указан, то соединение с сервером будет осуществляться по защищенному каналу (TLS). См. `secure` в [command-line-options](#command-line-options). Эти примеры иллюстрируют допустимые строки подключения для clickhouse-client: @@ -215,7 +215,7 @@ clickhouse-client "clickhouse:" clickhouse-client "clickhouse://user_name@" ``` -Подключиться к localhost, используя электронную почту, как имя пользователя. Символ '@' закодирован как '%40'. +Подключиться к localhost, используя электронную почту, как имя пользователя. Символ `@` закодирован как `%40`. ``` bash clickhouse-client "clickhouse://some_user%40some_mail.com@localhost:9000" From 7263769d20a30747f7a80a45a6def3abf41cccfa Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Thu, 8 Jun 2023 06:12:54 +0000 Subject: [PATCH 245/722] Add constexpr / fix date check --- src/Functions/DateTimeTransforms.h | 8 ++------ .../0_stateless/01746_convert_type_with_default.reference | 1 - .../0_stateless/01746_convert_type_with_default.sql | 1 - 3 files changed, 2 insertions(+), 8 deletions(-) diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index 823272e0324..09b0d71daf8 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -1449,18 +1449,14 @@ struct Transformer { bool check_range_result = true; - if constexpr (std::is_same_v) - { - check_range_result = vec_from[i] >= 0 && vec_from[i] <= DATE_LUT_MAX_DAY_NUM; - } - else if constexpr (std::is_same_v) + if constexpr (std::is_same_v || std::is_same_v) { check_range_result = vec_from[i] >= 0 && vec_from[i] <= 0xFFFFFFFFL; } if (!check_range_result) { - if (std::is_same_v) + if constexpr (std::is_same_v) { vec_to[i] = 0; if (vec_null_map_to) diff --git a/tests/queries/0_stateless/01746_convert_type_with_default.reference b/tests/queries/0_stateless/01746_convert_type_with_default.reference index 85bf2064fdc..959ee29b5e7 100644 --- a/tests/queries/0_stateless/01746_convert_type_with_default.reference +++ b/tests/queries/0_stateless/01746_convert_type_with_default.reference @@ -45,7 +45,6 @@ 2023-05-30 14:38:20 2023-05-30 14:38:20 2023-05-30 14:38:20 -2023-05-30 14:38:20 1970-01-01 00:00:19 1970-01-01 00:00:19 1970-01-01 00:00:19 diff --git a/tests/queries/0_stateless/01746_convert_type_with_default.sql b/tests/queries/0_stateless/01746_convert_type_with_default.sql index 1065eefa94d..099652a8a39 100644 --- a/tests/queries/0_stateless/01746_convert_type_with_default.sql +++ b/tests/queries/0_stateless/01746_convert_type_with_default.sql @@ -56,7 +56,6 @@ select toDateOrDefault(cast(19 as UInt256)); select toDateOrDefault(19507, '2000-01-01'::Date); select toDateOrDefault(-1, '2023-05-30'::Date); -select toDateTimeOrDefault('2023-05-30 14:38:20'); select toDateTimeOrDefault('2023-05-30 14:38:20', 'UTC'); select toDateTimeOrDefault('1xxx', 'UTC', '2023-05-30 14:38:20'::DateTime('UTC')); select toDateTimeOrDefault(1685457500, 'UTC'); From f5816c27aa3c675c3cd02ce292675d2214e5e56f Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Thu, 8 Jun 2023 06:20:14 +0000 Subject: [PATCH 246/722] Use hex value in tests --- tests/queries/0_stateless/01601_accurate_cast.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01601_accurate_cast.sql b/tests/queries/0_stateless/01601_accurate_cast.sql index f7f4d588ccc..5555129f0ad 100644 --- a/tests/queries/0_stateless/01601_accurate_cast.sql +++ b/tests/queries/0_stateless/01601_accurate_cast.sql @@ -24,13 +24,13 @@ SELECT accurateCast('123', 'FixedString(2)'); -- { serverError 131 } SELECT accurateCast('12', 'FixedString(2)'); SELECT accurateCast(-1, 'DateTime'); -- { serverError 70 } -SELECT accurateCast(5000000000, 'DateTime'); -- { serverError 70 } +SELECT accurateCast(0xFFFFFFFF + 1, 'DateTime'); -- { serverError 70 } SELECT accurateCast('1xxx', 'DateTime'); -- { serverError 41 } SELECT accurateCast('2023-05-30 14:38:20', 'DateTime'); SELECT toString(accurateCast(19, 'DateTime'), 'UTC'); SELECT accurateCast(-1, 'Date'); -- { serverError 70 } -SELECT accurateCast(999999, 'Date'); -- { serverError 70 } +SELECT accurateCast(0xFFFFFFFF + 1, 'Date'); -- { serverError 70 } SELECT accurateCast('1xxx', 'Date'); -- { serverError 38 } SELECT accurateCast('2023-05-30', 'Date'); SELECT accurateCast(19, 'Date'); From 17a560cca7cd5bb71bf673fafea76695855b3c6a Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Thu, 8 Jun 2023 09:11:02 +0000 Subject: [PATCH 247/722] Add datetime test --- .../0_stateless/01746_convert_type_with_default.reference | 2 ++ tests/queries/0_stateless/01746_convert_type_with_default.sql | 3 +++ 2 files changed, 5 insertions(+) diff --git a/tests/queries/0_stateless/01746_convert_type_with_default.reference b/tests/queries/0_stateless/01746_convert_type_with_default.reference index 959ee29b5e7..541580d67f5 100644 --- a/tests/queries/0_stateless/01746_convert_type_with_default.reference +++ b/tests/queries/0_stateless/01746_convert_type_with_default.reference @@ -39,6 +39,8 @@ 1970-01-20 1970-01-20 1970-01-20 +2149-06-06 +1970-01-01 2023-05-30 2023-05-30 2023-05-30 14:38:20 diff --git a/tests/queries/0_stateless/01746_convert_type_with_default.sql b/tests/queries/0_stateless/01746_convert_type_with_default.sql index 099652a8a39..2620780cfb9 100644 --- a/tests/queries/0_stateless/01746_convert_type_with_default.sql +++ b/tests/queries/0_stateless/01746_convert_type_with_default.sql @@ -53,6 +53,9 @@ select toDateOrDefault(cast(19 as UInt128)); select toDateOrDefault(cast(19 as Int256)); select toDateOrDefault(cast(19 as UInt256)); +select toDateOrDefault(65535); +select toDateOrDefault(65536); + select toDateOrDefault(19507, '2000-01-01'::Date); select toDateOrDefault(-1, '2023-05-30'::Date); From 44c68ffdab34642192f16fb1f9ecb9bf96bdd73b Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 8 Jun 2023 10:17:02 +0000 Subject: [PATCH 248/722] Add config param allow_remove_stale_moving_parts --- src/Storages/MergeTree/MergeTreeData.cpp | 15 +++++++++++++-- src/Storages/MergeTree/MergeTreeData.h | 3 +++ src/Storages/MergeTree/MergeTreePartsMover.cpp | 7 ++++--- .../configs/config.d/storage_conf.xml | 2 ++ .../configs/remote_servers.xml | 1 + .../test_encrypted_disk/configs/storage.xml | 1 + .../configs/config.xml | 1 + .../configs/config.d/storage_conf.xml | 1 + .../test_merge_tree_s3/configs/config.xml | 1 + .../configs/config.d/storage_configuration.xml | 1 + .../configs/config.d/storage_configuration.xml | 1 + .../configs/config.d/storage_configuration.xml | 1 + .../configs/config.d/storage_conf.xml | 1 + .../configs/config.d/s3.xml | 1 + .../configs/config.d/storage_configuration.xml | 1 + .../test_zero_copy_fetch/configs/storage_conf.xml | 1 + 16 files changed, 34 insertions(+), 5 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 7fe3efaf6d5..9ce4e55e341 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -1998,14 +1999,19 @@ static bool isOldPartDirectory(const DiskPtr & disk, const String & directory_pa return true; } + size_t MergeTreeData::clearOldTemporaryDirectories(size_t custom_directories_lifetime_seconds, const NameSet & valid_prefixes) { size_t cleared_count = 0; cleared_count += clearOldTemporaryDirectories(relative_data_path, custom_directories_lifetime_seconds, valid_prefixes); - /// Clear _all_ parts from the `moving` directory - cleared_count += clearOldTemporaryDirectories(fs::path(relative_data_path) / "moving", custom_directories_lifetime_seconds, {""}); + if (allowRemoveStaleMovingParts()) + { + /// Clear _all_ parts from the `moving` directory + cleared_count += clearOldTemporaryDirectories(fs::path(relative_data_path) / "moving", custom_directories_lifetime_seconds, {""}); + } + return cleared_count; } @@ -8412,6 +8418,11 @@ MergeTreeData::MutableDataPartPtr MergeTreeData::createEmptyPart( return new_data_part; } +bool MergeTreeData::allowRemoveStaleMovingParts() const +{ + return ConfigHelper::getBool(getContext()->getConfigRef(), "allow_remove_stale_moving_parts"); +} + CurrentlySubmergingEmergingTagger::~CurrentlySubmergingEmergingTagger() { std::lock_guard lock(storage.currently_submerging_emerging_mutex); diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 444bd8f47ac..8755ffebd7e 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -1060,6 +1060,9 @@ public: void waitForOutdatedPartsToBeLoaded() const; bool canUsePolymorphicParts() const; + /// TODO: make enabled by default in the next release if no problems found. + bool allowRemoveStaleMovingParts() const; + protected: friend class IMergeTreeDataPart; friend class MergeTreeDataMergerMutator; diff --git a/src/Storages/MergeTree/MergeTreePartsMover.cpp b/src/Storages/MergeTree/MergeTreePartsMover.cpp index 656167de986..8fa4ac6c78a 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.cpp +++ b/src/Storages/MergeTree/MergeTreePartsMover.cpp @@ -250,7 +250,7 @@ MergeTreePartsMover::TemporaryClonedPart MergeTreePartsMover::clonePart(const Me cloned_part.part = std::move(builder).withPartFormatFromDisk().build(); LOG_TRACE(log, "Part {} was cloned to {}", part->name, cloned_part.part->getDataPartStorage().getFullPath()); - cloned_part.part->is_temp = true; + cloned_part.part->is_temp = data->allowRemoveStaleMovingParts(); cloned_part.part->loadColumnsChecksumsIndexes(true, true); cloned_part.part->loadVersionMetadata(); cloned_part.part->modification_time = cloned_part.part->getDataPartStorage().getLastModified().epochTime(); @@ -270,10 +270,11 @@ void MergeTreePartsMover::swapClonedPart(TemporaryClonedPart & cloned_part) cons { LOG_INFO(log, "Failed to swap {}. Active part doesn't exist (containing part {}). " - "Possible it was merged or mutated. Will remove copy on path '{}'", + "Possible it was merged or mutated. Part on path '{}' {}", cloned_part.part->name, active_part ? active_part->name : "doesn't exist", - cloned_part.part->getDataPartStorage().getFullPath()); + cloned_part.part->getDataPartStorage().getFullPath(), + data->allowRemoveStaleMovingParts() ? "will be removed" : "will remain intact (set in config.xml, exercise caution when using)"); return; } diff --git a/tests/integration/test_alter_moving_garbage/configs/config.d/storage_conf.xml b/tests/integration/test_alter_moving_garbage/configs/config.d/storage_conf.xml index f6898ed1d7e..1450a459257 100644 --- a/tests/integration/test_alter_moving_garbage/configs/config.d/storage_conf.xml +++ b/tests/integration/test_alter_moving_garbage/configs/config.d/storage_conf.xml @@ -21,4 +21,6 @@ + + true diff --git a/tests/integration/test_consistant_parts_after_move_partition/configs/remote_servers.xml b/tests/integration/test_consistant_parts_after_move_partition/configs/remote_servers.xml index 3d4ccd584b1..f22b7dc4032 100644 --- a/tests/integration/test_consistant_parts_after_move_partition/configs/remote_servers.xml +++ b/tests/integration/test_consistant_parts_after_move_partition/configs/remote_servers.xml @@ -16,4 +16,5 @@ + true diff --git a/tests/integration/test_encrypted_disk/configs/storage.xml b/tests/integration/test_encrypted_disk/configs/storage.xml index 1e48c80d50f..2b84e0d6daa 100644 --- a/tests/integration/test_encrypted_disk/configs/storage.xml +++ b/tests/integration/test_encrypted_disk/configs/storage.xml @@ -105,4 +105,5 @@ + true diff --git a/tests/integration/test_merge_tree_azure_blob_storage/configs/config.xml b/tests/integration/test_merge_tree_azure_blob_storage/configs/config.xml index feb537ebbce..a6e0d26f695 100644 --- a/tests/integration/test_merge_tree_azure_blob_storage/configs/config.xml +++ b/tests/integration/test_merge_tree_azure_blob_storage/configs/config.xml @@ -15,4 +15,5 @@ 500 ./clickhouse/ users.xml + true diff --git a/tests/integration/test_merge_tree_hdfs/configs/config.d/storage_conf.xml b/tests/integration/test_merge_tree_hdfs/configs/config.d/storage_conf.xml index 890c396ed95..858d77e9ea0 100644 --- a/tests/integration/test_merge_tree_hdfs/configs/config.d/storage_conf.xml +++ b/tests/integration/test_merge_tree_hdfs/configs/config.d/storage_conf.xml @@ -29,4 +29,5 @@ 0 + true diff --git a/tests/integration/test_merge_tree_s3/configs/config.xml b/tests/integration/test_merge_tree_s3/configs/config.xml index 314f23f5788..a25da96215e 100644 --- a/tests/integration/test_merge_tree_s3/configs/config.xml +++ b/tests/integration/test_merge_tree_s3/configs/config.xml @@ -8,4 +8,5 @@ true + true diff --git a/tests/integration/test_move_partition_to_disk_on_cluster/configs/config.d/storage_configuration.xml b/tests/integration/test_move_partition_to_disk_on_cluster/configs/config.d/storage_configuration.xml index 3289186c175..cd2f0867c61 100644 --- a/tests/integration/test_move_partition_to_disk_on_cluster/configs/config.d/storage_configuration.xml +++ b/tests/integration/test_move_partition_to_disk_on_cluster/configs/config.d/storage_configuration.xml @@ -24,5 +24,6 @@ +true diff --git a/tests/integration/test_multiple_disks/configs/config.d/storage_configuration.xml b/tests/integration/test_multiple_disks/configs/config.d/storage_configuration.xml index e7a87fb77b1..033699f4634 100644 --- a/tests/integration/test_multiple_disks/configs/config.d/storage_configuration.xml +++ b/tests/integration/test_multiple_disks/configs/config.d/storage_configuration.xml @@ -122,6 +122,7 @@ +true 1 diff --git a/tests/integration/test_rename_column/configs/config.d/storage_configuration.xml b/tests/integration/test_rename_column/configs/config.d/storage_configuration.xml index 12a598c64b5..65cac905e9a 100644 --- a/tests/integration/test_rename_column/configs/config.d/storage_configuration.xml +++ b/tests/integration/test_rename_column/configs/config.d/storage_configuration.xml @@ -29,5 +29,6 @@ 0 1 +true diff --git a/tests/integration/test_replicated_merge_tree_hdfs_zero_copy/configs/config.d/storage_conf.xml b/tests/integration/test_replicated_merge_tree_hdfs_zero_copy/configs/config.d/storage_conf.xml index cb444c728c9..bb4aba94e0b 100644 --- a/tests/integration/test_replicated_merge_tree_hdfs_zero_copy/configs/config.d/storage_conf.xml +++ b/tests/integration/test_replicated_merge_tree_hdfs_zero_copy/configs/config.d/storage_conf.xml @@ -89,4 +89,5 @@ test_cluster 1 + true diff --git a/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml b/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml index f7d9efc2cae..63162c3c19b 100644 --- a/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml +++ b/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml @@ -93,4 +93,5 @@ test_cluster + true diff --git a/tests/integration/test_ttl_move/configs/config.d/storage_configuration.xml b/tests/integration/test_ttl_move/configs/config.d/storage_configuration.xml index ae1dc9dd038..09e6fc99411 100644 --- a/tests/integration/test_ttl_move/configs/config.d/storage_configuration.xml +++ b/tests/integration/test_ttl_move/configs/config.d/storage_configuration.xml @@ -107,4 +107,5 @@ +true diff --git a/tests/integration/test_zero_copy_fetch/configs/storage_conf.xml b/tests/integration/test_zero_copy_fetch/configs/storage_conf.xml index b3ce0735a3c..9e9dab6a972 100644 --- a/tests/integration/test_zero_copy_fetch/configs/storage_conf.xml +++ b/tests/integration/test_zero_copy_fetch/configs/storage_conf.xml @@ -38,4 +38,5 @@ true + true From ac638615aeace304b6a196e68561d73a03e86344 Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 8 Jun 2023 10:29:01 +0000 Subject: [PATCH 249/722] Upd test_alter_moving_garbage --- .../test_alter_moving_garbage/test.py | 39 ++++++++++++------- 1 file changed, 24 insertions(+), 15 deletions(-) diff --git a/tests/integration/test_alter_moving_garbage/test.py b/tests/integration/test_alter_moving_garbage/test.py index dc3f6c35ead..330df3ac490 100644 --- a/tests/integration/test_alter_moving_garbage/test.py +++ b/tests/integration/test_alter_moving_garbage/test.py @@ -11,6 +11,7 @@ from helpers.cluster import ClickHouseCluster # two replicas in remote_servers.xml REPLICA_COUNT = 2 + @pytest.fixture(scope="module") def cluster(): try: @@ -82,7 +83,7 @@ def test_create_table( additional_settings = {} - # different names for logs readability + # Different names for logs readability table_name = "test_table" if allow_remote_fs_zero_copy_replication: table_name = "test_table_zero_copy" @@ -99,17 +100,7 @@ def test_create_table( f"INSERT INTO {table_name} SELECT toDate('{partition}'), number as id, toString(sipHash64(number, {i})) FROM numbers(10_000)" ) - def check_count(): - if replicated_engine: - return random.choice(nodes).query_with_retry( - f"SELECT countDistinct(dt, data) FROM clusterAllReplicas(test_cluster, default.{table_name}) WHERE id % 100 = 0" - ) - else: - return random.choice(nodes).query( - f"SELECT countDistinct(dt, data) FROM {table_name} WHERE id % 100 = 0" - ) - - assert check_count() == "1000\n" + # Run ALTER in parallel with moving parts stop_alter = False @@ -118,9 +109,14 @@ def test_create_table( for d in range(1, 100): if stop_alter: break - # I managed to reproduce issue with DELETE, but it can be any other lightweight mutation - # Do not delete rows with id % 100 = 0, because they are used in check_count to check that data is not corrupted + + # Some lightweight mutation should change moving part before it is swapped, then we will have to cleanup it. + # Messages `Failed to swap {}. Active part doesn't exist` should appear in logs. + # + # I managed to reproduce issue with DELETE (`ALTER TABLE {table_name} ADD/DROP COLUMN` also works on real s3 instead of minio) + # Note: do not delete rows with id % 100 = 0, because they are used in `check_count` to use them in check that data is not corrupted random.choice(nodes).query(f"DELETE FROM {table_name} WHERE id % 100 = {d}") + time.sleep(0.1) alter_thread = threading.Thread(target=alter) @@ -143,4 +139,17 @@ def test_create_table( stop_alter = True alter_thread.join() - assert check_count() == "1000\n" + # Check that no data was lost + + data_digest = None + if replicated_engine: + # We don't know what data was replicated, so we need to check all replicas and take unique values + data_digest = random.choice(nodes).query_with_retry( + f"SELECT countDistinct(dt, data) FROM clusterAllReplicas(test_cluster, default.{table_name}) WHERE id % 100 == 0" + ) + else: + data_digest = random.choice(nodes).query( + f"SELECT countDistinct(dt, data) FROM {table_name} WHERE id % 100 == 0" + ) + + assert data_digest == "1000\n" From e66531affac094567d95d038cfbf8c9ca90027ae Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Thu, 8 Jun 2023 12:50:26 +0200 Subject: [PATCH 250/722] Update test --- tests/queries/0_stateless/02782_bitmap_overflow.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/02782_bitmap_overflow.sql b/tests/queries/0_stateless/02782_bitmap_overflow.sql index 656a3e7c144..71ddce5c3b9 100644 --- a/tests/queries/0_stateless/02782_bitmap_overflow.sql +++ b/tests/queries/0_stateless/02782_bitmap_overflow.sql @@ -1,2 +1,4 @@ +-- Tags: no-msan, no-asan + select unhex('0181808080908380808000')::AggregateFunction(groupBitmap, UInt64); -- {serverError TOO_LARGE_ARRAY_SIZE} From 44ee530b4d50ed09c39133fb81e827e6c3402a31 Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 8 Jun 2023 11:04:01 +0000 Subject: [PATCH 251/722] Kill gdb in clickhouse-test before getting stacktraces --- tests/clickhouse-test | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index d8fad77b95c..a7ec7b15e16 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -340,9 +340,22 @@ def get_transactions_list(args): return f"Cannot get list of transactions: {e}" +def kill_gdb(): + for i in range(5): + code = subprocess.call("kill -TERM $(pidof gdb)", shell=True, stderr=subprocess.STDOUT, timeout=30) + if code != 0: + time.sleep(i) + else: + break + # collect server stacktraces using gdb def get_stacktraces_from_gdb(server_pid): try: + # We could attach gdb to clickhouse-server before running some tests + # to print stacktraces of all crashes even if clickhouse cannot print it for some reason. + # We should kill existing gdb if any before starting new one. + kill_gdb() + cmd = f"gdb -batch -ex 'thread apply all backtrace' -p {server_pid}" return subprocess.check_output(cmd, shell=True).decode("utf-8") except Exception as e: From 54414be47b33f352bea49ee51cc2502f1f41b21d Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 8 Jun 2023 11:14:43 +0000 Subject: [PATCH 252/722] Better --- tests/clickhouse-test | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index a7ec7b15e16..56cf2f0ce0f 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -340,9 +340,14 @@ def get_transactions_list(args): return f"Cannot get list of transactions: {e}" -def kill_gdb(): +def kill_gdb_if_any(): + # Check if we have running gdb. + code = subprocess.call("pidof gdb", shell=True) + if code != 0: + return + for i in range(5): - code = subprocess.call("kill -TERM $(pidof gdb)", shell=True, stderr=subprocess.STDOUT, timeout=30) + code = subprocess.call("kill -TERM $(pidof gdb)", shell=True, timeout=30) if code != 0: time.sleep(i) else: @@ -354,7 +359,7 @@ def get_stacktraces_from_gdb(server_pid): # We could attach gdb to clickhouse-server before running some tests # to print stacktraces of all crashes even if clickhouse cannot print it for some reason. # We should kill existing gdb if any before starting new one. - kill_gdb() + kill_gdb_if_any() cmd = f"gdb -batch -ex 'thread apply all backtrace' -p {server_pid}" return subprocess.check_output(cmd, shell=True).decode("utf-8") From 7079b4c885656fba788400002f012d3ff43e01de Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Thu, 8 Jun 2023 12:40:16 +0300 Subject: [PATCH 253/722] ReverseTransform small improvement --- src/Processors/Transforms/ReverseTransform.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Processors/Transforms/ReverseTransform.cpp b/src/Processors/Transforms/ReverseTransform.cpp index 98f2bf54aa5..66b774ab50d 100644 --- a/src/Processors/Transforms/ReverseTransform.cpp +++ b/src/Processors/Transforms/ReverseTransform.cpp @@ -6,11 +6,11 @@ namespace DB void ReverseTransform::transform(Chunk & chunk) { - IColumn::Permutation permutation; - size_t num_rows = chunk.getNumRows(); + IColumn::Permutation permutation(num_rows); + for (size_t i = 0; i < num_rows; ++i) - permutation.emplace_back(num_rows - 1 - i); + permutation[i] = num_rows - 1 - i; auto columns = chunk.detachColumns(); From 1cbcd2f2ef4032cfb718433befbd8742c1e4b9cf Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 8 Jun 2023 13:39:05 +0200 Subject: [PATCH 254/722] Refactor reading from object storages --- src/Disks/IO/ReadBufferFromRemoteFSGather.cpp | 59 ++++++--- src/Disks/IO/ReadBufferFromRemoteFSGather.h | 10 +- src/Disks/IO/ReadBufferFromWebServer.h | 2 - .../IO/ReadIndirectBufferFromRemoteFS.cpp | 118 ------------------ src/Disks/IO/ReadIndirectBufferFromRemoteFS.h | 46 ------- .../AzureBlobStorage/AzureObjectStorage.cpp | 44 ++++--- .../AzureBlobStorage/AzureObjectStorage.h | 1 - .../ObjectStorages/HDFS/HDFSObjectStorage.cpp | 8 +- .../Local/LocalObjectStorage.cpp | 39 +++--- .../ObjectStorages/S3/S3ObjectStorage.cpp | 45 ++++--- .../ObjectStorages/Web/WebObjectStorage.cpp | 43 ++++--- src/IO/SeekAvoidingReadBuffer.cpp | 35 ------ src/IO/SeekAvoidingReadBuffer.h | 26 ---- src/Storages/StorageS3.cpp | 2 +- 14 files changed, 149 insertions(+), 329 deletions(-) delete mode 100644 src/Disks/IO/ReadIndirectBufferFromRemoteFS.cpp delete mode 100644 src/Disks/IO/ReadIndirectBufferFromRemoteFS.h delete mode 100644 src/IO/SeekAvoidingReadBuffer.cpp delete mode 100644 src/IO/SeekAvoidingReadBuffer.h diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp index 04030fe5f8f..eb9c509e459 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp @@ -22,13 +22,15 @@ ReadBufferFromRemoteFSGather::ReadBufferFromRemoteFSGather( ReadBufferCreator && read_buffer_creator_, const StoredObjects & blobs_to_read_, const ReadSettings & settings_, - std::shared_ptr cache_log_) - : ReadBufferFromFileBase(0, nullptr, 0) + std::shared_ptr cache_log_, + bool use_external_buffer_) + : ReadBufferFromFileBase(use_external_buffer_ ? 0 : settings_.remote_fs_buffer_size, nullptr, 0) , settings(settings_) , blobs_to_read(blobs_to_read_) , read_buffer_creator(std::move(read_buffer_creator_)) , cache_log(settings.enable_filesystem_cache_log ? cache_log_ : nullptr) , query_id(CurrentThread::isInitialized() && CurrentThread::get().getQueryContext() != nullptr ? CurrentThread::getQueryId() : "") + , use_external_buffer(use_external_buffer_) , log(&Poco::Logger::get("ReadBufferFromRemoteFSGather")) { if (!blobs_to_read.empty()) @@ -36,7 +38,9 @@ ReadBufferFromRemoteFSGather::ReadBufferFromRemoteFSGather( with_cache = settings.remote_fs_cache && settings.enable_filesystem_cache - && (!query_id.empty() || settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache || !settings.avoid_readthrough_cache_outside_query_context); + && (!query_id.empty() + || settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache + || !settings.avoid_readthrough_cache_outside_query_context); } SeekableReadBufferPtr ReadBufferFromRemoteFSGather::createImplementationBuffer(const StoredObject & object) @@ -235,22 +239,49 @@ void ReadBufferFromRemoteFSGather::reset() off_t ReadBufferFromRemoteFSGather::seek(off_t offset, int whence) { - if (whence != SEEK_SET) - throw Exception(ErrorCodes::CANNOT_SEEK_THROUGH_FILE, "Only seeking with SEEK_SET is allowed"); + if (offset == getPosition() && whence == SEEK_SET) + return offset; + + if (whence != SEEK_SET) + throw Exception(ErrorCodes::CANNOT_SEEK_THROUGH_FILE, "Only SEEK_SET mode is allowed."); + + if (use_external_buffer) + { + /// In case use_external_buffer == true, the buffer manages seeks itself. + reset(); + } + else + { + if (!working_buffer.empty() + && static_cast(offset) >= file_offset_of_buffer_end - working_buffer.size() + && static_cast(offset) < file_offset_of_buffer_end) + { + pos = working_buffer.end() - (file_offset_of_buffer_end - offset); + assert(pos >= working_buffer.begin()); + assert(pos < working_buffer.end()); + + return getPosition(); + } + + off_t position = getPosition(); + if (current_buf && offset > position) + { + size_t diff = offset - position; + if (diff < settings.remote_read_min_bytes_for_seek) + { + ignore(diff); + return offset; + } + } + + resetWorkingBuffer(); + reset(); + } - reset(); file_offset_of_buffer_end = offset; return file_offset_of_buffer_end; } -size_t ReadBufferFromRemoteFSGather::getImplementationBufferOffset() const -{ - if (!current_buf) - return file_offset_of_buffer_end; - - return current_buf->getFileOffsetOfBufferEnd(); -} - ReadBufferFromRemoteFSGather::~ReadBufferFromRemoteFSGather() { if (!with_cache) diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.h b/src/Disks/IO/ReadBufferFromRemoteFSGather.h index 39b81d6f9ac..cb98ac6d9f3 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.h +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.h @@ -27,7 +27,8 @@ public: ReadBufferCreator && read_buffer_creator_, const StoredObjects & blobs_to_read_, const ReadSettings & settings_, - std::shared_ptr cache_log_); + std::shared_ptr cache_log_, + bool use_external_buffer_); ~ReadBufferFromRemoteFSGather() override; @@ -37,16 +38,14 @@ public: void setReadUntilPosition(size_t position) override; + void setReadUntilEnd() override { return setReadUntilPosition(getFileSize()); } + IAsynchronousReader::Result readInto(char * data, size_t size, size_t offset, size_t ignore) override; size_t getFileSize() override { return getTotalSize(blobs_to_read); } size_t getFileOffsetOfBufferEnd() const override { return file_offset_of_buffer_end; } - bool initialized() const { return current_buf != nullptr; } - - size_t getImplementationBufferOffset() const; - off_t seek(off_t offset, int whence) override; off_t getPosition() override { return file_offset_of_buffer_end - available() + bytes_to_ignore; } @@ -71,6 +70,7 @@ private: const ReadBufferCreator read_buffer_creator; const std::shared_ptr cache_log; const String query_id; + const bool use_external_buffer; bool with_cache; size_t read_until_position = 0; diff --git a/src/Disks/IO/ReadBufferFromWebServer.h b/src/Disks/IO/ReadBufferFromWebServer.h index dd9cf63224f..fa899cf2c5e 100644 --- a/src/Disks/IO/ReadBufferFromWebServer.h +++ b/src/Disks/IO/ReadBufferFromWebServer.h @@ -12,8 +12,6 @@ namespace DB /* Read buffer, which reads via http, but is used as ReadBufferFromFileBase. * Used to read files, hosted on a web server with static files. - * - * Usage: ReadIndirectBufferFromRemoteFS -> SeekAvoidingReadBuffer -> ReadBufferFromWebServer -> ReadWriteBufferFromHTTP. */ class ReadBufferFromWebServer : public ReadBufferFromFileBase { diff --git a/src/Disks/IO/ReadIndirectBufferFromRemoteFS.cpp b/src/Disks/IO/ReadIndirectBufferFromRemoteFS.cpp deleted file mode 100644 index a559b47f2cc..00000000000 --- a/src/Disks/IO/ReadIndirectBufferFromRemoteFS.cpp +++ /dev/null @@ -1,118 +0,0 @@ -#include "ReadIndirectBufferFromRemoteFS.h" - -#include - - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int CANNOT_SEEK_THROUGH_FILE; -} - - -ReadIndirectBufferFromRemoteFS::ReadIndirectBufferFromRemoteFS( - std::shared_ptr impl_, const ReadSettings & settings) - : ReadBufferFromFileBase(settings.remote_fs_buffer_size, nullptr, 0) - , impl(impl_) - , read_settings(settings) -{ -} - -size_t ReadIndirectBufferFromRemoteFS::getFileSize() -{ - return impl->getFileSize(); -} - -off_t ReadIndirectBufferFromRemoteFS::getPosition() -{ - return impl->file_offset_of_buffer_end - available(); -} - - -String ReadIndirectBufferFromRemoteFS::getFileName() const -{ - return impl->getFileName(); -} - - -void ReadIndirectBufferFromRemoteFS::setReadUntilPosition(size_t position) -{ - impl->setReadUntilPosition(position); -} - - -void ReadIndirectBufferFromRemoteFS::setReadUntilEnd() -{ - impl->setReadUntilPosition(impl->getFileSize()); -} - - -off_t ReadIndirectBufferFromRemoteFS::seek(off_t offset_, int whence) -{ - if (whence == SEEK_CUR) - { - /// If position within current working buffer - shift pos. - if (!working_buffer.empty() && size_t(getPosition() + offset_) < impl->file_offset_of_buffer_end) - { - pos += offset_; - return getPosition(); - } - else - { - impl->file_offset_of_buffer_end += offset_; - } - } - else if (whence == SEEK_SET) - { - /// If position within current working buffer - shift pos. - if (!working_buffer.empty() - && size_t(offset_) >= impl->file_offset_of_buffer_end - working_buffer.size() - && size_t(offset_) < impl->file_offset_of_buffer_end) - { - pos = working_buffer.end() - (impl->file_offset_of_buffer_end - offset_); - return getPosition(); - } - else - { - impl->file_offset_of_buffer_end = offset_; - } - } - else - throw Exception(ErrorCodes::CANNOT_SEEK_THROUGH_FILE, "Only SEEK_SET or SEEK_CUR modes are allowed."); - - impl->seek(impl->file_offset_of_buffer_end, SEEK_SET); - resetWorkingBuffer(); - - file_offset_of_buffer_end = impl->file_offset_of_buffer_end; - return impl->file_offset_of_buffer_end; -} - - -bool ReadIndirectBufferFromRemoteFS::nextImpl() -{ - chassert(internal_buffer.size() == read_settings.remote_fs_buffer_size); - chassert(file_offset_of_buffer_end <= impl->getFileSize()); - - auto [size, offset, _] = impl->readInto(internal_buffer.begin(), internal_buffer.size(), file_offset_of_buffer_end, /* ignore */0); - - chassert(offset <= size); - chassert(size <= internal_buffer.size()); - - size_t bytes_read = size - offset; - if (bytes_read) - working_buffer = Buffer(internal_buffer.begin() + offset, internal_buffer.begin() + size); - - file_offset_of_buffer_end = impl->getFileOffsetOfBufferEnd(); - - /// In case of multiple files for the same file in clickhouse (i.e. log family) - /// file_offset_of_buffer_end will not match getImplementationBufferOffset() - /// so we use [impl->getImplementationBufferOffset(), impl->getFileSize()] - chassert(file_offset_of_buffer_end >= impl->getImplementationBufferOffset()); - chassert(file_offset_of_buffer_end <= impl->getFileSize()); - - return bytes_read; -} - -} diff --git a/src/Disks/IO/ReadIndirectBufferFromRemoteFS.h b/src/Disks/IO/ReadIndirectBufferFromRemoteFS.h deleted file mode 100644 index 19647b1fa39..00000000000 --- a/src/Disks/IO/ReadIndirectBufferFromRemoteFS.h +++ /dev/null @@ -1,46 +0,0 @@ -#pragma once - -#include "config.h" -#include -#include -#include - - -namespace DB -{ - -class ReadBufferFromRemoteFSGather; - -/** -* Reads data from S3/HDFS/Web using stored paths in metadata. -* There is asynchronous version of this class -- AsynchronousReadIndirectBufferFromRemoteFS. -*/ -class ReadIndirectBufferFromRemoteFS : public ReadBufferFromFileBase -{ - -public: - explicit ReadIndirectBufferFromRemoteFS(std::shared_ptr impl_, const ReadSettings & settings); - - off_t seek(off_t offset_, int whence) override; - - off_t getPosition() override; - - String getFileName() const override; - - void setReadUntilPosition(size_t position) override; - - void setReadUntilEnd() override; - - size_t getFileSize() override; - -private: - bool nextImpl() override; - - std::shared_ptr impl; - - ReadSettings read_settings; - - size_t file_offset_of_buffer_end = 0; -}; - -} diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp index 23a0da39dd3..3636c5780fb 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp @@ -6,7 +6,6 @@ #include #include #include -#include #include #include @@ -218,24 +217,33 @@ std::unique_ptr AzureObjectStorage::readObjects( /// NOL read_until_position); }; - auto reader_impl = std::make_unique( - std::move(read_buffer_creator), - objects, - disk_read_settings, - global_context->getFilesystemCacheLog()); + switch (read_settings.remote_fs_method) + { + case RemoteFSReadMethod::read: + { + return std::make_unique( + std::move(read_buffer_creator), + objects, + disk_read_settings, + global_context->getFilesystemCacheLog(), + /* use_external_buffer */false); - if (disk_read_settings.remote_fs_method == RemoteFSReadMethod::threadpool) - { - auto & reader = global_context->getThreadPoolReader(FilesystemReaderType::ASYNCHRONOUS_REMOTE_FS_READER); - return std::make_unique( - std::move(reader_impl), reader, disk_read_settings, - global_context->getAsyncReadCounters(), - global_context->getFilesystemReadPrefetchesLog()); - } - else - { - auto buf = std::make_unique(std::move(reader_impl), disk_read_settings); - return std::make_unique(std::move(buf), settings_ptr->min_bytes_for_seek); + } + case RemoteFSReadMethod::threadpool: + { + auto impl = std::make_unique( + std::move(read_buffer_creator), + objects, + disk_read_settings, + global_context->getFilesystemCacheLog(), + /* use_external_buffer */true); + + auto & reader = global_context->getThreadPoolReader(FilesystemReaderType::ASYNCHRONOUS_REMOTE_FS_READER); + return std::make_unique( + std::move(impl), reader, disk_read_settings, + global_context->getAsyncReadCounters(), + global_context->getFilesystemReadPrefetchesLog()); + } } } diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h index 5b08ceb80e3..2fbd1514abd 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h @@ -5,7 +5,6 @@ #include #include -#include #include #include diff --git a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp index a3092bc6f12..60230ce2fb0 100644 --- a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp +++ b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp @@ -1,13 +1,10 @@ #include -#include #include - #include #include #include -#include #include #include @@ -72,9 +69,8 @@ std::unique_ptr HDFSObjectStorage::readObjects( /// NOLI hdfs_uri, hdfs_path, config, disk_read_settings, /* read_until_position */0, /* use_external_buffer */true); }; - auto hdfs_impl = std::make_unique(std::move(read_buffer_creator), objects, disk_read_settings, nullptr); - auto buf = std::make_unique(std::move(hdfs_impl), read_settings); - return std::make_unique(std::move(buf), settings->min_bytes_for_seek); + return std::make_unique( + std::move(read_buffer_creator), objects, disk_read_settings, nullptr, /* use_external_buffer */false); } std::unique_ptr HDFSObjectStorage::writeObject( /// NOLINT diff --git a/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp b/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp index 05c0c8f3961..69ccf309096 100644 --- a/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp @@ -4,11 +4,9 @@ #include #include #include -#include #include #include #include -#include #include #include #include @@ -59,25 +57,26 @@ std::unique_ptr LocalObjectStorage::readObjects( /// NOL return createReadBufferFromFileBase(file_path, modified_settings, read_hint, file_size); }; - auto impl = std::make_unique( - std::move(read_buffer_creator), objects, modified_settings, - global_context->getFilesystemCacheLog()); + switch (read_settings.remote_fs_method) + { + case RemoteFSReadMethod::read: + { + return std::make_unique( + std::move(read_buffer_creator), objects, modified_settings, + global_context->getFilesystemCacheLog(), /* use_external_buffer */false); + } + case RemoteFSReadMethod::threadpool: + { + auto impl = std::make_unique( + std::move(read_buffer_creator), objects, modified_settings, + global_context->getFilesystemCacheLog(), /* use_external_buffer */true); - /// We use `remove_fs_method` (not `local_fs_method`) because we are about to use - /// AsynchronousBoundedReadBuffer which works by the remote_fs_* settings. - if (modified_settings.remote_fs_method == RemoteFSReadMethod::threadpool) - { - auto & reader = global_context->getThreadPoolReader(FilesystemReaderType::ASYNCHRONOUS_REMOTE_FS_READER); - return std::make_unique( - std::move(impl), reader, modified_settings, - global_context->getAsyncReadCounters(), - global_context->getFilesystemReadPrefetchesLog()); - } - else - { - auto buf = std::make_unique(std::move(impl), modified_settings); - return std::make_unique( - std::move(buf), modified_settings.remote_read_min_bytes_for_seek); + auto & reader = global_context->getThreadPoolReader(FilesystemReaderType::ASYNCHRONOUS_REMOTE_FS_READER); + return std::make_unique( + std::move(impl), reader, read_settings, + global_context->getAsyncReadCounters(), + global_context->getFilesystemReadPrefetchesLog()); + } } } diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index d19be20f920..e48924326e1 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -8,11 +8,9 @@ #include #include #include -#include #include #include #include -#include #include #include #include @@ -182,24 +180,33 @@ std::unique_ptr S3ObjectStorage::readObjects( /// NOLINT /* restricted_seek */true); }; - auto s3_impl = std::make_unique( - std::move(read_buffer_creator), - objects, - disk_read_settings, - global_context->getFilesystemCacheLog()); + switch (read_settings.remote_fs_method) + { + case RemoteFSReadMethod::read: + { + return std::make_unique( + std::move(read_buffer_creator), + objects, + disk_read_settings, + global_context->getFilesystemCacheLog(), + /* use_external_buffer */false); - if (read_settings.remote_fs_method == RemoteFSReadMethod::threadpool) - { - auto & reader = global_context->getThreadPoolReader(FilesystemReaderType::ASYNCHRONOUS_REMOTE_FS_READER); - return std::make_unique( - std::move(s3_impl), reader, disk_read_settings, - global_context->getAsyncReadCounters(), - global_context->getFilesystemReadPrefetchesLog()); - } - else - { - auto buf = std::make_unique(std::move(s3_impl), disk_read_settings); - return std::make_unique(std::move(buf), settings_ptr->min_bytes_for_seek); + } + case RemoteFSReadMethod::threadpool: + { + auto impl = std::make_unique( + std::move(read_buffer_creator), + objects, + disk_read_settings, + global_context->getFilesystemCacheLog(), + /* use_external_buffer */true); + + auto & reader = global_context->getThreadPoolReader(FilesystemReaderType::ASYNCHRONOUS_REMOTE_FS_READER); + return std::make_unique( + std::move(impl), reader, disk_read_settings, + global_context->getAsyncReadCounters(), + global_context->getFilesystemReadPrefetchesLog()); + } } } diff --git a/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp b/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp index 4f34f3eed9c..690a0d3372c 100644 --- a/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp @@ -4,11 +4,9 @@ #include #include -#include #include #include -#include #include #include #include @@ -181,24 +179,33 @@ std::unique_ptr WebObjectStorage::readObject( /// NOLINT }; auto global_context = Context::getGlobalContextInstance(); - auto web_impl = std::make_unique( - std::move(read_buffer_creator), - StoredObjects{object}, - read_settings, - global_context->getFilesystemCacheLog()); - if (read_settings.remote_fs_method == RemoteFSReadMethod::threadpool) + switch (read_settings.remote_fs_method) { - auto & reader = global_context->getThreadPoolReader(FilesystemReaderType::ASYNCHRONOUS_REMOTE_FS_READER); - return std::make_unique( - std::move(web_impl), reader, read_settings, - global_context->getAsyncReadCounters(), - global_context->getFilesystemReadPrefetchesLog()); - } - else - { - auto buf = std::make_unique(std::move(web_impl), read_settings); - return std::make_unique(std::move(buf), min_bytes_for_seek); + case RemoteFSReadMethod::read: + { + return std::make_unique( + std::move(read_buffer_creator), + StoredObjects{object}, + read_settings, + global_context->getFilesystemCacheLog(), + /* use_external_buffer */false); + } + case RemoteFSReadMethod::threadpool: + { + auto impl = std::make_unique( + std::move(read_buffer_creator), + StoredObjects{object}, + read_settings, + global_context->getFilesystemCacheLog(), + /* use_external_buffer */true); + + auto & reader = global_context->getThreadPoolReader(FilesystemReaderType::ASYNCHRONOUS_REMOTE_FS_READER); + return std::make_unique( + std::move(impl), reader, read_settings, + global_context->getAsyncReadCounters(), + global_context->getFilesystemReadPrefetchesLog()); + } } } diff --git a/src/IO/SeekAvoidingReadBuffer.cpp b/src/IO/SeekAvoidingReadBuffer.cpp deleted file mode 100644 index 4d6406d8ddf..00000000000 --- a/src/IO/SeekAvoidingReadBuffer.cpp +++ /dev/null @@ -1,35 +0,0 @@ -#include - - -namespace DB -{ - -SeekAvoidingReadBuffer::SeekAvoidingReadBuffer(std::unique_ptr impl_, UInt64 min_bytes_for_seek_) - : ReadBufferFromFileDecorator(std::move(impl_)) - , min_bytes_for_seek(min_bytes_for_seek_) -{ -} - - -off_t SeekAvoidingReadBuffer::seek(off_t off, int whence) -{ - off_t position = getPosition(); - - if (whence == SEEK_CUR) - { - off += position; - whence = SEEK_SET; - } - - if (whence == SEEK_SET && off >= position && off < position + static_cast(min_bytes_for_seek)) - { - swap(*impl); - impl->ignore(off - position); - swap(*impl); - return off; - } - - return ReadBufferFromFileDecorator::seek(off, whence); -} - -} diff --git a/src/IO/SeekAvoidingReadBuffer.h b/src/IO/SeekAvoidingReadBuffer.h deleted file mode 100644 index 716d7c5046c..00000000000 --- a/src/IO/SeekAvoidingReadBuffer.h +++ /dev/null @@ -1,26 +0,0 @@ -#pragma once - -#include - - -namespace DB -{ - -/// `SeekAvoidingReadBuffer` prefers sequential reads over seeks within specified window. -/// It is useful in network and spinning disk storage media when seek is relatively expensive -/// operation. -/// See also: `merge_tree_min_rows_for_seek`. -class SeekAvoidingReadBuffer : public ReadBufferFromFileDecorator -{ -public: - SeekAvoidingReadBuffer(std::unique_ptr impl_, UInt64 min_bytes_for_seek_); - - off_t seek(off_t off, int whence) override; - - void prefetch(Priority priority) override { impl->prefetch(priority); } - -private: - UInt64 min_bytes_for_seek; /// Minimum positive seek offset which shall be executed using seek operation. -}; - -} diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index c3ed0f1af16..8bab596901c 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -657,7 +657,7 @@ std::unique_ptr StorageS3Source::createAsyncS3ReadBuffer( std::move(read_buffer_creator), StoredObjects{StoredObject{key, object_size}}, read_settings, - /* cache_log */nullptr); + /* cache_log */nullptr, /* use_external_buffer */true); auto modified_settings{read_settings}; /// FIXME: Changing this setting to default value breaks something around parquet reading From 48e1b21aabd8ef642c1bcab7ba863f8c61123723 Mon Sep 17 00:00:00 2001 From: kevinyhzou Date: Thu, 8 Jun 2023 20:34:30 +0800 Subject: [PATCH 255/722] Add feature to support read csv by space & tab delimiter --- src/Core/Settings.h | 1 + src/Formats/FormatFactory.cpp | 1 + src/Formats/FormatSettings.h | 1 + .../Formats/Impl/CSVRowInputFormat.cpp | 36 ++++++++++--------- ...h_whitespace_tab_field_delimiter.reference | 2 ++ ...ext_with_whitespace_tab_field_delimiter.sh | 18 ++++++++++ .../data_csv/csv_with_space_delimiter.csv | 1 + .../data_csv/csv_with_tab_delimiter.csv | 1 + 8 files changed, 45 insertions(+), 16 deletions(-) create mode 100644 tests/queries/0_stateless/02785_text_with_whitespace_tab_field_delimiter.reference create mode 100755 tests/queries/0_stateless/02785_text_with_whitespace_tab_field_delimiter.sh create mode 100644 tests/queries/0_stateless/data_csv/csv_with_space_delimiter.csv create mode 100644 tests/queries/0_stateless/data_csv/csv_with_tab_delimiter.csv diff --git a/src/Core/Settings.h b/src/Core/Settings.h index a87e321bed2..45641e76689 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -850,6 +850,7 @@ class IColumn; M(Bool, input_format_csv_use_best_effort_in_schema_inference, true, "Use some tweaks and heuristics to infer schema in CSV format", 0) \ M(Bool, input_format_tsv_use_best_effort_in_schema_inference, true, "Use some tweaks and heuristics to infer schema in TSV format", 0) \ M(Bool, input_format_csv_detect_header, true, "Automatically detect header with names and types in CSV format", 0) \ + M(Bool, input_format_csv_skip_whitespaces_tabs, true, "Skips spaces and tabs(\\t) characters in the CSV strings", 0) \ M(Bool, input_format_csv_trim_whitespaces, true, "Trims spaces and tabs (\\t) characters at the beginning and end in CSV strings", 0) \ M(Bool, input_format_tsv_detect_header, true, "Automatically detect header with names and types in TSV format", 0) \ M(Bool, input_format_custom_detect_header, true, "Automatically detect header with names and types in CustomSeparated format", 0) \ diff --git a/src/Formats/FormatFactory.cpp b/src/Formats/FormatFactory.cpp index 39b28e025a6..73a7d4f73f2 100644 --- a/src/Formats/FormatFactory.cpp +++ b/src/Formats/FormatFactory.cpp @@ -70,6 +70,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings) format_settings.csv.skip_first_lines = settings.input_format_csv_skip_first_lines; format_settings.csv.try_detect_header = settings.input_format_csv_detect_header; format_settings.csv.trim_whitespaces = settings.input_format_csv_trim_whitespaces; + format_settings.csv.skip_whitespaces_tabs = settings.input_format_csv_skip_whitespaces_tabs; format_settings.hive_text.fields_delimiter = settings.input_format_hive_text_fields_delimiter; format_settings.hive_text.collection_items_delimiter = settings.input_format_hive_text_collection_items_delimiter; format_settings.hive_text.map_keys_delimiter = settings.input_format_hive_text_map_keys_delimiter; diff --git a/src/Formats/FormatSettings.h b/src/Formats/FormatSettings.h index 6b4caffbf43..434389e31a1 100644 --- a/src/Formats/FormatSettings.h +++ b/src/Formats/FormatSettings.h @@ -137,6 +137,7 @@ struct FormatSettings String custom_delimiter; bool try_detect_header = true; bool trim_whitespaces = true; + bool skip_whitespaces_tabs = true; } csv; struct HiveText diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp index 8b4dbbffe1d..4094285e1ad 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp @@ -25,10 +25,10 @@ namespace ErrorCodes namespace { - void checkBadDelimiter(char delimiter) + void checkBadDelimiter(char delimiter, bool skip_whitespaces_tabs) { constexpr std::string_view bad_delimiters = " \t\"'.UL"; - if (bad_delimiters.find(delimiter) != std::string_view::npos) + if (bad_delimiters.find(delimiter) != std::string_view::npos && skip_whitespaces_tabs) throw Exception( ErrorCodes::BAD_ARGUMENTS, "CSV format may not work correctly with delimiter '{}'. Try use CustomSeparated format instead", @@ -68,7 +68,7 @@ CSVRowInputFormat::CSVRowInputFormat( format_settings_.csv.try_detect_header), buf(std::move(in_)) { - checkBadDelimiter(format_settings_.csv.delimiter); + checkBadDelimiter(format_settings_.csv.delimiter, format_settings_.csv.skip_whitespaces_tabs); } CSVRowInputFormat::CSVRowInputFormat( @@ -90,7 +90,7 @@ CSVRowInputFormat::CSVRowInputFormat( format_settings_.csv.try_detect_header), buf(std::move(in_)) { - checkBadDelimiter(format_settings_.csv.delimiter); + checkBadDelimiter(format_settings_.csv.delimiter, format_settings_.csv.skip_whitespaces_tabs); } void CSVRowInputFormat::syncAfterError() @@ -134,8 +134,12 @@ static void skipEndOfLine(ReadBuffer & in) } /// Skip `whitespace` symbols allowed in CSV. -static inline void skipWhitespacesAndTabs(ReadBuffer & in) +static inline void skipWhitespacesAndTabs(ReadBuffer & in, const bool & skip_whitespaces_tabs) { + if (!skip_whitespaces_tabs) + { + return; + } while (!in.eof() && (*in.position() == ' ' || *in.position() == '\t')) ++in.position(); } @@ -146,7 +150,7 @@ CSVFormatReader::CSVFormatReader(PeekableReadBuffer & buf_, const FormatSettings void CSVFormatReader::skipFieldDelimiter() { - skipWhitespacesAndTabs(*buf); + skipWhitespacesAndTabs(*buf, format_settings.csv.skip_whitespaces_tabs); assertChar(format_settings.csv.delimiter, *buf); } @@ -154,7 +158,7 @@ template String CSVFormatReader::readCSVFieldIntoString() { if (format_settings.csv.trim_whitespaces) [[likely]] - skipWhitespacesAndTabs(*buf); + skipWhitespacesAndTabs(*buf, format_settings.csv.skip_whitespaces_tabs); String field; if constexpr (read_string) @@ -166,14 +170,14 @@ String CSVFormatReader::readCSVFieldIntoString() void CSVFormatReader::skipField() { - skipWhitespacesAndTabs(*buf); + skipWhitespacesAndTabs(*buf, format_settings.csv.skip_whitespaces_tabs); NullOutput out; readCSVStringInto(out, *buf, format_settings.csv); } void CSVFormatReader::skipRowEndDelimiter() { - skipWhitespacesAndTabs(*buf); + skipWhitespacesAndTabs(*buf, format_settings.csv.skip_whitespaces_tabs); if (buf->eof()) return; @@ -182,7 +186,7 @@ void CSVFormatReader::skipRowEndDelimiter() if (*buf->position() == format_settings.csv.delimiter) ++buf->position(); - skipWhitespacesAndTabs(*buf); + skipWhitespacesAndTabs(*buf, format_settings.csv.skip_whitespaces_tabs); if (buf->eof()) return; @@ -194,7 +198,7 @@ void CSVFormatReader::skipHeaderRow() do { skipField(); - skipWhitespacesAndTabs(*buf); + skipWhitespacesAndTabs(*buf, format_settings.csv.skip_whitespaces_tabs); } while (checkChar(format_settings.csv.delimiter, *buf)); skipRowEndDelimiter(); @@ -207,7 +211,7 @@ std::vector CSVFormatReader::readRowImpl() do { fields.push_back(readCSVFieldIntoString()); - skipWhitespacesAndTabs(*buf); + skipWhitespacesAndTabs(*buf, format_settings.csv.skip_whitespaces_tabs); } while (checkChar(format_settings.csv.delimiter, *buf)); skipRowEndDelimiter(); @@ -220,7 +224,7 @@ bool CSVFormatReader::parseFieldDelimiterWithDiagnosticInfo(WriteBuffer & out) try { - skipWhitespacesAndTabs(*buf); + skipWhitespacesAndTabs(*buf, format_settings.csv.skip_whitespaces_tabs); assertChar(delimiter, *buf); } catch (const DB::Exception &) @@ -246,7 +250,7 @@ bool CSVFormatReader::parseFieldDelimiterWithDiagnosticInfo(WriteBuffer & out) bool CSVFormatReader::parseRowEndWithDiagnosticInfo(WriteBuffer & out) { - skipWhitespacesAndTabs(*buf); + skipWhitespacesAndTabs(*buf, format_settings.csv.skip_whitespaces_tabs); if (buf->eof()) return true; @@ -255,7 +259,7 @@ bool CSVFormatReader::parseRowEndWithDiagnosticInfo(WriteBuffer & out) if (*buf->position() == format_settings.csv.delimiter) { ++buf->position(); - skipWhitespacesAndTabs(*buf); + skipWhitespacesAndTabs(*buf, format_settings.csv.skip_whitespaces_tabs); if (buf->eof()) return true; } @@ -283,7 +287,7 @@ bool CSVFormatReader::readField( const String & /*column_name*/) { if (format_settings.csv.trim_whitespaces || !isStringOrFixedString(removeNullable(type))) [[likely]] - skipWhitespacesAndTabs(*buf); + skipWhitespacesAndTabs(*buf, format_settings.csv.skip_whitespaces_tabs); const bool at_delimiter = !buf->eof() && *buf->position() == format_settings.csv.delimiter; const bool at_last_column_line_end = is_last_file_column && (buf->eof() || *buf->position() == '\n' || *buf->position() == '\r'); diff --git a/tests/queries/0_stateless/02785_text_with_whitespace_tab_field_delimiter.reference b/tests/queries/0_stateless/02785_text_with_whitespace_tab_field_delimiter.reference new file mode 100644 index 00000000000..531391394a7 --- /dev/null +++ b/tests/queries/0_stateless/02785_text_with_whitespace_tab_field_delimiter.reference @@ -0,0 +1,2 @@ +1 a b +2 c d diff --git a/tests/queries/0_stateless/02785_text_with_whitespace_tab_field_delimiter.sh b/tests/queries/0_stateless/02785_text_with_whitespace_tab_field_delimiter.sh new file mode 100755 index 00000000000..19d343c352f --- /dev/null +++ b/tests/queries/0_stateless/02785_text_with_whitespace_tab_field_delimiter.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +# NOTE: this sh wrapper is required because of shell_config + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -q "drop table if exists test_whitespace" +$CLICKHOUSE_CLIENT -q "drop table if exists test_tab" +$CLICKHOUSE_CLIENT -q "create table test_whitespace (x UInt32, y String, z String) engine=MergeTree order by x" +$CLICKHOUSE_CLIENT -q "create table test_tab (x UInt32, y String, z String) engine=MergeTree order by x" +$CUR_DIR/data_csv/csv_with_space_delimiter.csv | ${CLICKHOUSE_CLIENT} -q "INSERT INTO test_whitespace SETTINGS format_csv_delimiter=' ', input_format_csv_skip_whitespaces_tabs=false FORMAT CSV" +$CUR_DIR/data_csv/csv_with_tab_delimiter.csv | ${CLICKHOUSE_CLIENT} -q "INSERT INTO test_tab SETTINGS format_csv_delimiter='\t', input_format_csv_skip_whitespaces_tabs=false FORMAT CSV" +$CLICKHOUSE_CLIENT -q "select * from test_whitespace" +$CLICKHOUSE_CLIENT -q "select * from test_tab" +$CLICKHOUSE_CLIENT -q "drop table test_whitespace" +$CLICKHOUSE_CLIENT -q "drop table test_tab"; diff --git a/tests/queries/0_stateless/data_csv/csv_with_space_delimiter.csv b/tests/queries/0_stateless/data_csv/csv_with_space_delimiter.csv new file mode 100644 index 00000000000..967f8ae450e --- /dev/null +++ b/tests/queries/0_stateless/data_csv/csv_with_space_delimiter.csv @@ -0,0 +1 @@ +1 a b diff --git a/tests/queries/0_stateless/data_csv/csv_with_tab_delimiter.csv b/tests/queries/0_stateless/data_csv/csv_with_tab_delimiter.csv new file mode 100644 index 00000000000..f3b63950ea8 --- /dev/null +++ b/tests/queries/0_stateless/data_csv/csv_with_tab_delimiter.csv @@ -0,0 +1 @@ +2 c d From f4202963ad04f52da3aa0ada96c6151cfadd4a69 Mon Sep 17 00:00:00 2001 From: kevinyhzou Date: Thu, 8 Jun 2023 21:06:38 +0800 Subject: [PATCH 256/722] test modify --- .../02785_text_with_whitespace_tab_field_delimiter.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02785_text_with_whitespace_tab_field_delimiter.sh b/tests/queries/0_stateless/02785_text_with_whitespace_tab_field_delimiter.sh index 19d343c352f..e3f61262674 100755 --- a/tests/queries/0_stateless/02785_text_with_whitespace_tab_field_delimiter.sh +++ b/tests/queries/0_stateless/02785_text_with_whitespace_tab_field_delimiter.sh @@ -10,8 +10,8 @@ $CLICKHOUSE_CLIENT -q "drop table if exists test_whitespace" $CLICKHOUSE_CLIENT -q "drop table if exists test_tab" $CLICKHOUSE_CLIENT -q "create table test_whitespace (x UInt32, y String, z String) engine=MergeTree order by x" $CLICKHOUSE_CLIENT -q "create table test_tab (x UInt32, y String, z String) engine=MergeTree order by x" -$CUR_DIR/data_csv/csv_with_space_delimiter.csv | ${CLICKHOUSE_CLIENT} -q "INSERT INTO test_whitespace SETTINGS format_csv_delimiter=' ', input_format_csv_skip_whitespaces_tabs=false FORMAT CSV" -$CUR_DIR/data_csv/csv_with_tab_delimiter.csv | ${CLICKHOUSE_CLIENT} -q "INSERT INTO test_tab SETTINGS format_csv_delimiter='\t', input_format_csv_skip_whitespaces_tabs=false FORMAT CSV" +cat $CURDIR/data_csv/csv_with_space_delimiter.csv | ${CLICKHOUSE_CLIENT} -q "INSERT INTO test_whitespace SETTINGS format_csv_delimiter=' ', input_format_csv_skip_whitespaces_tabs=false FORMAT CSV" +cat $CURDIR/data_csv/csv_with_tab_delimiter.csv | ${CLICKHOUSE_CLIENT} -q "INSERT INTO test_tab SETTINGS format_csv_delimiter='\t', input_format_csv_skip_whitespaces_tabs=false FORMAT CSV" $CLICKHOUSE_CLIENT -q "select * from test_whitespace" $CLICKHOUSE_CLIENT -q "select * from test_tab" $CLICKHOUSE_CLIENT -q "drop table test_whitespace" From fa877f456185b5de5b5f2e36e775acdb8dec7f31 Mon Sep 17 00:00:00 2001 From: zvonand Date: Thu, 8 Jun 2023 16:05:14 +0200 Subject: [PATCH 257/722] cosmetic changes --- src/Common/DateLUT.h | 2 +- src/Common/LocalDate.h | 13 +++++-------- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/src/Common/DateLUT.h b/src/Common/DateLUT.h index 23698331afe..833b2291254 100644 --- a/src/Common/DateLUT.h +++ b/src/Common/DateLUT.h @@ -39,7 +39,7 @@ public: /// Timezone is passed in query_context, but on CH-Client we have no query context, /// and each time we modify client's global context - const auto global_context = DB::CurrentThread::get().getGlobalContext(); + const DB::ContextPtr global_context = DB::CurrentThread::get().getGlobalContext(); if (global_context) { context_timezone = extractTimezoneFromContext(global_context); diff --git a/src/Common/LocalDate.h b/src/Common/LocalDate.h index 4a383129ae4..2331a40fd12 100644 --- a/src/Common/LocalDate.h +++ b/src/Common/LocalDate.h @@ -24,9 +24,8 @@ private: unsigned char m_month; unsigned char m_day; - void init(time_t time) + void init(time_t time, const DateLUTImpl & date_lut) { - const auto & date_lut = DateLUT::instance(); const auto & values = date_lut.getValues(time); m_year = values.year; @@ -56,9 +55,9 @@ private: } public: - explicit LocalDate(time_t time) + explicit LocalDate(time_t time, const DateLUTImpl & time_zone = DateLUT::instance()) { - init(time); + init(time, time_zone); } LocalDate(DayNum day_num, const DateLUTImpl & time_zone = DateLUT::instance()) /// NOLINT @@ -99,15 +98,13 @@ public: LocalDate(const LocalDate &) noexcept = default; LocalDate & operator= (const LocalDate &) noexcept = default; - DayNum getDayNum() const + DayNum getDayNum(const DateLUTImpl & lut = DateLUT::instance()) const { - const auto & lut = DateLUT::instance(); return DayNum(lut.makeDayNum(m_year, m_month, m_day).toUnderType()); } - ExtendedDayNum getExtenedDayNum() const + ExtendedDayNum getExtenedDayNum(const DateLUTImpl & lut = DateLUT::instance()) const { - const auto & lut = DateLUT::instance(); return ExtendedDayNum (lut.makeDayNum(m_year, m_month, m_day).toUnderType()); } From 9f61c786ed67ae571384f90ef934bb839ab20e0a Mon Sep 17 00:00:00 2001 From: Victor Krasnov Date: Thu, 8 Jun 2023 17:25:53 +0300 Subject: [PATCH 258/722] Amend the tests --- ...3_parseDateTimeBestEffort_syslog.reference | 21 +--- .../02783_parseDateTimeBestEffort_syslog.sql | 97 ++++++------------- 2 files changed, 34 insertions(+), 84 deletions(-) diff --git a/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.reference b/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.reference index 7409b413260..63e7e367941 100644 --- a/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.reference +++ b/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.reference @@ -1,20 +1,5 @@ parseDateTimeBestEffort - dt_ref res res_sam res_auc res_null res_null_sam res_null_auc res_zero res_zero_sam res_zero_auc + dt_ref res res_sam res_auc res_null res_null_sam res_null_auc res_zero res_zero_sam res_zero_auc res_us res_us_sam res_us_auc res_us_null res_us_null_sam res_us_null_auc res_us_zero res_us_zero_sam res_us_zero_auc res64 res64_sam res64_auc res64_null res64_null_sam res64_null_auc res64_zero res64_zero_sam res64_zero_auc res64_us res64_us_sam res64_us_auc res64_us_null res64_us_null_sam res64_us_null_auc res64_us_zero res64_us_zero_sam res64_us_zero_auc - Jun 7 04:55:00 2023-06-07 04:55:00 2022-06-07 04:55:00 2023-06-07 04:55:00 2023-06-07 04:55:00 2022-06-07 04:55:00 2023-06-07 04:55:00 2023-06-07 04:55:00 2022-06-07 04:55:00 2023-06-07 04:55:00 - Jun 7 04:56:00 2022-06-07 04:56:00 2022-06-07 04:56:00 2023-06-07 04:56:00 2022-06-07 04:56:00 2022-06-07 04:56:00 2023-06-07 04:56:00 2022-06-07 04:56:00 2022-06-07 04:56:00 2023-06-07 04:56:00 -parseDateTimeBestEffortUS - dt_ref res res_sam res_auc res_null res_null_sam res_null_auc res_zero res_zero_sam res_zero_auc - - Jun 7 04:55:00 2023-06-07 04:55:00 2022-06-07 04:55:00 2023-06-07 04:55:00 2023-06-07 04:55:00 2022-06-07 04:55:00 2023-06-07 04:55:00 2023-06-07 04:55:00 2022-06-07 04:55:00 2023-06-07 04:55:00 - Jun 7 04:56:00 2022-06-07 04:56:00 2022-06-07 04:56:00 2023-06-07 04:56:00 2022-06-07 04:56:00 2022-06-07 04:56:00 2023-06-07 04:56:00 2022-06-07 04:56:00 2022-06-07 04:56:00 2023-06-07 04:56:00 -parseDateTime64BestEffort - dt_ref res res_sam res_auc res_null res_null_sam res_null_auc res_zero res_zero_sam res_zero_auc - - Jun 7 04:55:00 2023-06-07 04:55:00.000 2022-06-07 04:55:00.000 2023-06-07 04:55:00.000 2023-06-07 04:55:00.000 2022-06-07 04:55:00.000 2023-06-07 04:55:00.000 2023-06-07 04:55:00.000 2022-06-07 04:55:00.000 2023-06-07 04:55:00.000 - Jun 7 04:56:00 2022-06-07 04:56:00.000 2022-06-07 04:56:00.000 2023-06-07 04:56:00.000 2022-06-07 04:56:00.000 2022-06-07 04:56:00.000 2023-06-07 04:56:00.000 2022-06-07 04:56:00.000 2022-06-07 04:56:00.000 2023-06-07 04:56:00.000 -parseDateTime64BestEffortUS - dt_ref res res_sam res_auc res_null res_null_sam res_null_auc res_zero res_zero_sam res_zero_auc - - Jun 7 04:55:00 2023-06-07 04:55:00.000 2022-06-07 04:55:00.000 2023-06-07 04:55:00.000 2023-06-07 04:55:00.000 2022-06-07 04:55:00.000 2023-06-07 04:55:00.000 2023-06-07 04:55:00.000 2022-06-07 04:55:00.000 2023-06-07 04:55:00.000 - Jun 7 04:56:00 2022-06-07 04:56:00.000 2022-06-07 04:56:00.000 2023-06-07 04:56:00.000 2022-06-07 04:56:00.000 2022-06-07 04:56:00.000 2023-06-07 04:56:00.000 2022-06-07 04:56:00.000 2022-06-07 04:56:00.000 2023-06-07 04:56:00.000 + Jun 6 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 + Jun 8 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 diff --git a/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.sql b/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.sql index 91ae230205b..59211d3e6a0 100644 --- a/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.sql +++ b/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.sql @@ -1,8 +1,9 @@ SELECT 'parseDateTimeBestEffort'; WITH + 86400 AS secs_in_day, now() AS ts_now, - '2023-06-07 04:55:30' AS ref_point, + '2023-06-07' AS ref_point, dateDiff('second', toDateTime(ref_point), ts_now) AS impedimenta, formatDateTime(ts_around, '%b %e %T') AS dt_curr SELECT @@ -15,69 +16,33 @@ SELECT parseDateTimeBestEffortOrNull(dt_curr, 'Pacific/Auckland') - impedimenta AS res_null_auc, parseDateTimeBestEffortOrZero(dt_curr) - impedimenta AS res_zero, parseDateTimeBestEffortOrZero(dt_curr, 'US/Samoa') - impedimenta AS res_zero_sam, - parseDateTimeBestEffortOrZero(dt_curr, 'Pacific/Auckland') - impedimenta AS res_zero_auc -FROM (SELECT arrayJoin([ts_now - 30, ts_now + 30]) AS ts_around) -FORMAT PrettySpaceNoEscapes; - -SELECT 'parseDateTimeBestEffortUS'; - -WITH - now() AS ts_now, - '2023-06-07 04:55:30' AS ref_point, - dateDiff('second', toDateTime(ref_point), ts_now) AS impedimenta, - formatDateTime(ts_around, '%b %e %T') AS dt_curr -SELECT - formatDateTime(ts_around - impedimenta, '%b %e %H:%i:%s') AS dt_ref, - parseDateTimeBestEffortUS(dt_curr) - impedimenta AS res, - parseDateTimeBestEffortUS(dt_curr, 'US/Samoa') - impedimenta AS res_sam, - parseDateTimeBestEffortUS(dt_curr, 'Pacific/Auckland') - impedimenta AS res_auc, - parseDateTimeBestEffortUSOrNull(dt_curr) - impedimenta AS res_null, - parseDateTimeBestEffortUSOrNull(dt_curr, 'US/Samoa') - impedimenta AS res_null_sam, - parseDateTimeBestEffortUSOrNull(dt_curr, 'Pacific/Auckland') - impedimenta AS res_null_auc, - parseDateTimeBestEffortUSOrZero(dt_curr) - impedimenta AS res_zero, - parseDateTimeBestEffortUSOrZero(dt_curr, 'US/Samoa') - impedimenta AS res_zero_sam, - parseDateTimeBestEffortUSOrZero(dt_curr, 'Pacific/Auckland') - impedimenta AS res_zero_auc -FROM (SELECT arrayJoin([ts_now - 30, ts_now + 30]) AS ts_around) -FORMAT PrettySpaceNoEscapes; - -SELECT 'parseDateTime64BestEffort'; - -WITH - now() AS ts_now, - '2023-06-07 04:55:30' AS ref_point, - dateDiff('second', toDateTime(ref_point), ts_now) AS impedimenta, - formatDateTime(ts_around, '%b %e %T') AS dt_curr -SELECT - formatDateTime(ts_around - impedimenta, '%b %e %H:%i:%s') AS dt_ref, - parseDateTime64BestEffort(dt_curr) - impedimenta AS res, - parseDateTime64BestEffort(dt_curr, 3, 'US/Samoa') - impedimenta AS res_sam, - parseDateTime64BestEffort(dt_curr, 3, 'Pacific/Auckland') - impedimenta AS res_auc, - parseDateTime64BestEffortOrNull(dt_curr) - impedimenta AS res_null, - parseDateTime64BestEffortOrNull(dt_curr, 3, 'US/Samoa') - impedimenta AS res_null_sam, - parseDateTime64BestEffortOrNull(dt_curr, 3, 'Pacific/Auckland') - impedimenta AS res_null_auc, - parseDateTime64BestEffortOrZero(dt_curr) - impedimenta AS res_zero, - parseDateTime64BestEffortOrZero(dt_curr, 3, 'US/Samoa') - impedimenta AS res_zero_sam, - parseDateTime64BestEffortOrZero(dt_curr, 3, 'Pacific/Auckland') - impedimenta AS res_zero_auc -FROM (SELECT arrayJoin([ts_now - 30, ts_now + 30]) AS ts_around) -FORMAT PrettySpaceNoEscapes; - -SELECT 'parseDateTime64BestEffortUS'; - -WITH - now() AS ts_now, - '2023-06-07 04:55:30' AS ref_point, - dateDiff('second', toDateTime(ref_point), ts_now) AS impedimenta, - formatDateTime(ts_around, '%b %e %T') AS dt_curr -SELECT - formatDateTime(ts_around - impedimenta, '%b %e %H:%i:%s') AS dt_ref, - parseDateTime64BestEffortUS(dt_curr) - impedimenta AS res, - parseDateTime64BestEffortUS(dt_curr, 3, 'US/Samoa') - impedimenta AS res_sam, - parseDateTime64BestEffortUS(dt_curr, 3, 'Pacific/Auckland') - impedimenta AS res_auc, - parseDateTime64BestEffortUSOrNull(dt_curr) - impedimenta AS res_null, - parseDateTime64BestEffortUSOrNull(dt_curr, 3, 'US/Samoa') - impedimenta AS res_null_sam, - parseDateTime64BestEffortUSOrNull(dt_curr, 3, 'Pacific/Auckland') - impedimenta AS res_null_auc, - parseDateTime64BestEffortUSOrZero(dt_curr) - impedimenta AS res_zero, - parseDateTime64BestEffortUSOrZero(dt_curr, 3, 'US/Samoa') - impedimenta AS res_zero_sam, - parseDateTime64BestEffortUSOrZero(dt_curr, 3, 'Pacific/Auckland') - impedimenta AS res_zero_auc -FROM (SELECT arrayJoin([ts_now - 30, ts_now + 30]) AS ts_around) + parseDateTimeBestEffortOrZero(dt_curr, 'Pacific/Auckland') - impedimenta AS res_zero_auc, + parseDateTimeBestEffortUS(dt_curr) - impedimenta AS res_us, + parseDateTimeBestEffortUS(dt_curr, 'US/Samoa') - impedimenta AS res_us_sam, + parseDateTimeBestEffortUS(dt_curr, 'Pacific/Auckland') - impedimenta AS res_us_auc, + parseDateTimeBestEffortUSOrNull(dt_curr) - impedimenta AS res_us_null, + parseDateTimeBestEffortUSOrNull(dt_curr, 'US/Samoa') - impedimenta AS res_us_null_sam, + parseDateTimeBestEffortUSOrNull(dt_curr, 'Pacific/Auckland') - impedimenta AS res_us_null_auc, + parseDateTimeBestEffortUSOrZero(dt_curr) - impedimenta AS res_us_zero, + parseDateTimeBestEffortUSOrZero(dt_curr, 'US/Samoa') - impedimenta AS res_us_zero_sam, + parseDateTimeBestEffortUSOrZero(dt_curr, 'Pacific/Auckland') - impedimenta AS res_us_zero_auc, + parseDateTime64BestEffort(dt_curr) - impedimenta AS res64, + parseDateTime64BestEffort(dt_curr, 3, 'US/Samoa') - impedimenta AS res64_sam, + parseDateTime64BestEffort(dt_curr, 3, 'Pacific/Auckland') - impedimenta AS res64_auc, + parseDateTime64BestEffortOrNull(dt_curr) - impedimenta AS res64_null, + parseDateTime64BestEffortOrNull(dt_curr, 3, 'US/Samoa') - impedimenta AS res64_null_sam, + parseDateTime64BestEffortOrNull(dt_curr, 3, 'Pacific/Auckland') - impedimenta AS res64_null_auc, + parseDateTime64BestEffortOrZero(dt_curr) - impedimenta AS res64_zero, + parseDateTime64BestEffortOrZero(dt_curr, 3, 'US/Samoa') - impedimenta AS res64_zero_sam, + parseDateTime64BestEffortOrZero(dt_curr, 3, 'Pacific/Auckland') - impedimenta AS res64_zero_auc, + parseDateTime64BestEffortUS(dt_curr) - impedimenta AS res64_us, + parseDateTime64BestEffortUS(dt_curr, 3, 'US/Samoa') - impedimenta AS res64_us_sam, + parseDateTime64BestEffortUS(dt_curr, 3, 'Pacific/Auckland') - impedimenta AS res64_us_auc, + parseDateTime64BestEffortUSOrNull(dt_curr) - impedimenta AS res64_us_null, + parseDateTime64BestEffortUSOrNull(dt_curr, 3, 'US/Samoa') - impedimenta AS res64_us_null_sam, + parseDateTime64BestEffortUSOrNull(dt_curr, 3, 'Pacific/Auckland') - impedimenta AS res64_us_null_auc, + parseDateTime64BestEffortUSOrZero(dt_curr) - impedimenta AS res64_us_zero, + parseDateTime64BestEffortUSOrZero(dt_curr, 3, 'US/Samoa') - impedimenta AS res64_us_zero_sam, + parseDateTime64BestEffortUSOrZero(dt_curr, 3, 'Pacific/Auckland') - impedimenta AS res64_us_zero_auc +FROM (SELECT arrayJoin([ts_now - secs_in_day, ts_now + secs_in_day]) AS ts_around) FORMAT PrettySpaceNoEscapes; From b8178088d020d956cdf27d390f3d3b7a72a813e8 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 8 Jun 2023 08:10:40 +0000 Subject: [PATCH 259/722] Misc Annoy fixes --- .../mergetree-family/annindexes.md | 169 +++++--- .../mergetree-family/mergetree.md | 2 +- src/Parsers/ParserCreateIndexQuery.cpp | 11 +- src/Parsers/ParserCreateQuery.cpp | 12 +- ...pproximateNearestNeighborIndexesCommon.cpp | 2 +- .../ApproximateNearestNeighborIndexesCommon.h | 4 +- .../MergeTree/MergeTreeIndexAnnoy.cpp | 43 +- src/Storages/MergeTree/MergeTreeIndexAnnoy.h | 16 +- .../0_stateless/02354_annoy_index.reference | 260 ++++++------ .../queries/0_stateless/02354_annoy_index.sql | 371 +++++++++++------- .../aspell-ignore/en/aspell-dict.txt | 3 + 11 files changed, 548 insertions(+), 345 deletions(-) diff --git a/docs/en/engines/table-engines/mergetree-family/annindexes.md b/docs/en/engines/table-engines/mergetree-family/annindexes.md index f600f9a015c..2b0b77a0735 100644 --- a/docs/en/engines/table-engines/mergetree-family/annindexes.md +++ b/docs/en/engines/table-engines/mergetree-family/annindexes.md @@ -1,104 +1,142 @@ # Approximate Nearest Neighbor Search Indexes [experimental] {#table_engines-ANNIndex} -Nearest neighborhood search refers to the problem of finding the point(s) with the smallest distance to a given point in an n-dimensional -space. Since exact search is in practice usually typically too slow, the task is often solved with approximate algorithms. A popular use -case of of neighbor search is finding similar pictures (texts) for a given picture (text). Pictures (texts) can be decomposed into -[embeddings](https://cloud.google.com/architecture/overview-extracting-and-serving-feature-embeddings-for-machine-learning), and instead of -comparing pictures (texts) pixel-by-pixel (character-by-character), only the embeddings are compared. +Nearest neighborhood search is the problem of finding the M closest points for a given point in an N-dimensional vector space. The most +straightforward approach to solve this problem is a brute force search where the distance between all points in the vector space and the +reference point is computed. This method guarantees perfect accuracy but it is usually too slow for practical applications. Thus, nearest +neighborhood search problems are often solved with [approximative algorithms](https://github.com/erikbern/ann-benchmarks). Approximative +nearest neighborhood search techniques, in conjunction with [embedding +methods](https://cloud.google.com/architecture/overview-extracting-and-serving-feature-embeddings-for-machine-learning) allow to search huge +amounts of media (pictures, songs, articles, etc.) in milliseconds. -In terms of SQL, the problem can be expressed as follows: +Blogs: +- [Vector Search with ClickHouse - Part 1](https://clickhouse.com/blog/vector-search-clickhouse-p1) +- [Vector Search with ClickHouse - Part 2](https://clickhouse.com/blog/vector-search-clickhouse-p2) + + +In terms of SQL, the nearest neighborhood problem can be expressed as follows: ``` sql SELECT * FROM table -WHERE L2Distance(column, Point) < MaxDistance +ORDER BY Distance(vectors, Point) LIMIT N ``` +`vectors` contains N-dimensional values of type [Array](../../../sql-reference/data-types/array.md) or +[Tuple](../../../sql-reference/data-types/tuple.md), for example embeddings. Function `Distance` computes the distance between two vectors. +Often, the the Euclidean (L2) distance is chosen as distance function but [other +distance functions](/docs/en/sql-reference/functions/distance-functions.md) are also possible. `Point` is the reference point, e.g. `(0.17, +0.33, ...)`, and `N` limits the number of search results. + +An alternative formulation of the nearest neighborhood search problem looks as follows: + ``` sql SELECT * FROM table -ORDER BY L2Distance(column, Point) +WHERE Distance(vectors, Point) < MaxDistance LIMIT N ``` -The queries are expensive because the L2 (Euclidean) distance between `Point` and all points in `column` and must be computed. To speed this process up, Approximate Nearest Neighbor Search Indexes (ANN indexes) store a compact representation of the search space (using clustering, search trees, etc.) which allows to compute an approximate answer quickly. +While the first query returns the top-`N` closest points to the reference point, the second query returns all points closer to the reference +point than a maximally allowed radius `MaxDistance`. Parameter `N` limits the number of returned values which is useful for situations where +`MaxDistance` is difficult to determine in advance. -# Creating ANN Indexes +With brute force search, both queries are expensive (linear in the number of points) because the distance between all points in `vectors` and +`Point` must be computed. To speed this process up, Approximate Nearest Neighbor Search Indexes (ANN indexes) store a compact representation +of the search space (using clustering, search trees, etc.) which allows to compute an approximate answer much quicker (in sub-linear time). -As long as ANN indexes are experimental, you first need to `SET allow_experimental_annoy_index = 1`. +# Creating and Using ANN Indexes -Syntax to create an ANN index over an `Array` column: +Syntax to create an ANN index over an [Array](../../../sql-reference/data-types/array.md) column: ```sql CREATE TABLE table ( `id` Int64, - `embedding` Array(Float32), - INDEX embedding TYPE () GRANULARITY + `vectors` Array(Float32), + INDEX vectors TYPE () [GRANULARITY ] ) ENGINE = MergeTree ORDER BY id; ``` -Syntax to create an ANN index over a `Tuple` column: +Syntax to create an ANN index over a [Tuple](../../../sql-reference/data-types/tuple.md) column: ```sql CREATE TABLE table ( `id` Int64, - `embedding` Tuple(Float32[, Float32[, ...]]), - INDEX embedding TYPE () GRANULARITY + `vectors` Tuple(Float32[, Float32[, ...]]), + INDEX vectors TYPE () [GRANULARITY ] ) ENGINE = MergeTree ORDER BY id; ``` -ANN indexes are built during column insertion and merge and `INSERT` and `OPTIMIZE` statements will be slower than for ordinary tables. ANNIndexes are ideally used only with immutable or rarely changed data, respectively there are much more read requests than write requests. - -Similar to regular skip indexes, ANN indexes are constructed over granules and each indexed block consists of `GRANULARITY = `-many -granules. For example, if the primary index granularity of the table is 8192 (setting `index_granularity = 8192`) and `GRANULARITY = 2`, -then each indexed block will consist of 16384 rows. However, unlike skip indexes, ANN indexes are not only able to skip the entire indexed -block, they are able to skip individual granules in indexed blocks. As a result, the `GRANULARITY` parameter has a different meaning in ANN -indexes than in normal skip indexes. Basically, the bigger `GRANULARITY` is chosen, the more data is provided to a single ANN index, and the -higher the chance that with the right hyper parameters, the index will remember the data structure better. - -# Using ANN Indexes +ANN indexes are built during column insertion and merge. As a result, `INSERT` and `OPTIMIZE` statements will be slower than for ordinary +tables. ANNIndexes are ideally used only with immutable or rarely changed data, respectively when are far more read requests than write +requests. ANN indexes support two types of queries: -- WHERE queries: - - ``` sql - SELECT * - FROM table - WHERE DistanceFunction(column, Point) < MaxDistance - LIMIT N - ``` - - ORDER BY queries: ``` sql SELECT * FROM table [WHERE ...] - ORDER BY DistanceFunction(column, Point) + ORDER BY Distance(vectors, Point) LIMIT N ``` -`DistanceFunction` is a [distance function](/docs/en/sql-reference/functions/distance-functions.md), `Point` is a reference vector (e.g. `(0.17, 0.33, ...)`) and `MaxDistance` is a floating point value which restricts the size of the neighbourhood. +- WHERE queries: + + ``` sql + SELECT * + FROM table + WHERE Distance(vectors, Point) < MaxDistance + LIMIT N + ``` :::tip -To avoid writing out large vectors, you can use [query parameters](/docs/en//interfaces/cli.md#queries-with-parameters-cli-queries-with-parameters), e.g. +To avoid writing out large vectors, you can use [query +parameters](/docs/en//interfaces/cli.md#queries-with-parameters-cli-queries-with-parameters), e.g. ```bash -clickhouse-client --param_vec='hello' --query="SELECT * FROM table WHERE L2Distance(embedding, {vec: Array(Float32)}) < 1.0" +clickhouse-client --param_vec='hello' --query="SELECT * FROM table WHERE L2Distance(vectors, {vec: Array(Float32)}) < 1.0" ``` ::: -ANN indexes cannot speed up queries that contain both a `WHERE DistanceFunction(column, Point) < MaxDistance` and an `ORDER BY DistanceFunction(column, Point)` clause. Also, the approximate algorithms used to determine the nearest neighbors require a limit, hence queries that use an ANN index must have a `LIMIT` clause. +**Restrictions**: Queries that contain both a `WHERE Distance(vectors, Point) < MaxDistance` and an `ORDER BY Distance(vectors, Point)` +clause cannot use ANN indexes. Also, the approximate algorithms used to determine the nearest neighbors require a limit, hence queries +without `LIMIT` clause cannot utilize ANN indexes. Also ANN indexes are only used if the query has a `LIMIT` value smaller than setting +`max_limit_for_ann_queries` (default: 1 million rows). This is a safeguard to prevent large memory allocations by external libraries for +approximate neighbor search. + +**Differences to Skip Indexes** Similar to regular [skip indexes](https://clickhouse.com/docs/en/optimize/skipping-indexes), ANN indexes are +constructed over granules and each indexed block consists of `GRANULARITY = `-many granules (`` = 1 by default for normal skip +indexes). For example, if the primary index granularity of the table is 8192 (setting `index_granularity = 8192`) and `GRANULARITY = 2`, +then each indexed block will contain 16384 rows. However, data structures and algorithms for approximate neighborhood search (usually +provided by external libraries) are inherently row-oriented. They store a compact representation of a set of rows and also return rows for +ANN queries. This causes some rather unintuitive differences in the way ANN indexes behave compared to normal skip indexes. + +When a user defines a ANN index on a column, ClickHouse internally creates a ANN "sub-index" for each index block. The sub-index is "local" +in the sense that it only knows about the rows of its containing index block. In the previous example and assuming that a column has 65536 +rows, we obtain four index blocks (spanning eight granules) and a ANN sub-index for each index block. A sub-index is theoretically able to +return the rows with the N closest points within its index block directly. However, since ClickHouse loads data from disk to memory at the +granularity of granules, sub-indexes extrapolate matching rows to granule granularity. This is different from regular skip indexes which +skip data at the granularity of index blocks. + +The `GRANULARITY` parameter determines how many ANN sub-indexes are created. Bigger `GRANULARITY` values mean fewer but larger ANN +sub-indexes, up to the point where a column (or a column part) has only a single sub-index. In that case, the sub-index has a "global" view of +all column rows and can directly return all granules of the column (part) with relevant rows (there are at at most `LIMIT `-many +such granules). In a second step, ClickHouse will load these granules and identify the actually best rows by performing a brute-force distance +calculation over all rows of the granules. With a small `GRANULARITY` value, each of the sub-indexes returns up to `LIMIT N`-many granules. +As a result, more granules need to be loaded and post-filtered. Note that the search accuracy is with both cases equally good, only the +processing performance differs. It is generally recommended to use a large `GRANULARITY` for ANN indexes and fall back to a smaller +`GRANULARITY` values only in case of problems like excessive memory consumption of the ANN structures. If no `GRANULARITY` was specified for +ANN indexes, the default value is 100 million. -An ANN index is only used if the query has a `LIMIT` value smaller than setting `max_limit_for_ann_queries` (default: 1 million rows). This is a safety measure which helps to avoid large memory consumption by external libraries for approximate neighbor search. # Available ANN Indexes @@ -106,51 +144,68 @@ An ANN index is only used if the query has a `LIMIT` value smaller than setting ## Annoy {#annoy} -(currently disabled on ARM due to memory safety problems with the algorithm) +Annoy indexes are currently experimental, to use them you first need to `SET allow_experimental_annoy_index = 1`. They are also currently +disabled on ARM due to memory safety problems with the algorithm. -This type of ANN index implements [the Annoy algorithm](https://github.com/spotify/annoy) which uses a recursive division of the space in random linear surfaces (lines in 2D, planes in 3D etc.). +This type of ANN index implements [the Annoy algorithm](https://github.com/spotify/annoy) which is based on a recursive division of the +space in random linear surfaces (lines in 2D, planes in 3D etc.). -Syntax to create a Annoy index over a `Array` column: +
+ +
+ +Syntax to create an Annoy index over an [Array](../../../sql-reference/data-types/array.md) column: ```sql CREATE TABLE table ( id Int64, - embedding Array(Float32), - INDEX embedding TYPE annoy([DistanceName[, NumTrees]]) GRANULARITY N + vectors Array(Float32), + INDEX vectors TYPE annoy([Distance[, NumTrees]]) [GRANULARITY N] ) ENGINE = MergeTree ORDER BY id; ``` -Syntax to create a Annoy index over a `Tuple` column: +Syntax to create an ANN index over a [Tuple](../../../sql-reference/data-types/tuple.md) column: ```sql CREATE TABLE table ( id Int64, - embedding Tuple(Float32[, Float32[, ...]]), - INDEX embedding TYPE annoy([DistanceName[, NumTrees]]) GRANULARITY N + vectors Tuple(Float32[, Float32[, ...]]), + INDEX vectors TYPE annoy([Distance[, NumTrees]]) [GRANULARITY N] ) ENGINE = MergeTree ORDER BY id; ``` -Parameter `DistanceName` is name of a distance function (default `L2Distance`). Annoy currently supports `L2Distance` and `cosineDistance` as distance functions. Parameter `NumTrees` (default: 100) is the number of trees which the algorithm will create. Higher values of `NumTree` mean slower `CREATE` and `SELECT` statements (approximately linearly), but increase the accuracy of search results. +Annoy currently supports `L2Distance` and `cosineDistance` as distance function `Distance`. If no distance function was specified during +index creation, `L2Distance` is used as default. Parameter `NumTrees` is the number of trees which the algorithm creates (default if not +specified: 100). Higher values of `NumTree` mean more accurate search results but slower index creation / query times (approximately +linearly) as well as larger index sizes. :::note -Indexes over columns of type `Array` will generally work faster than indexes on `Tuple` columns. All arrays **must** have same length. Use [CONSTRAINT](/docs/en/sql-reference/statements/create/table.md#constraints) to avoid errors. For example, `CONSTRAINT constraint_name_1 CHECK length(embedding) = 256`. +Indexes over columns of type `Array` will generally work faster than indexes on `Tuple` columns. All arrays **must** have same length. Use +[CONSTRAINT](/docs/en/sql-reference/statements/create/table.md#constraints) to avoid errors. For example, `CONSTRAINT constraint_name_1 +CHECK length(vectors) = 256`. ::: -Setting `annoy_index_search_k_nodes` (default: `NumTrees * LIMIT`) determines how many tree nodes are inspected during SELECTs. It can be used to -balance runtime and accuracy at runtime. - -Example: +Setting `annoy_index_search_k_nodes` (default: `NumTrees * LIMIT`) determines how many tree nodes are inspected during SELECTs. Larger +values mean more accurate results at the cost of longer query runtime: ``` sql SELECT * FROM table_name [WHERE ...] -ORDER BY L2Distance(column, Point) +ORDER BY L2Distance(vectors, Point) LIMIT N SETTINGS annoy_index_search_k_nodes=100 ``` diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index 07f706af91d..61276110138 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -491,7 +491,7 @@ Syntax: `tokenbf_v1(size_of_bloom_filter_in_bytes, number_of_hash_functions, ran #### Special-purpose -- An experimental index to support approximate nearest neighbor (ANN) search. See [here](annindexes.md) for details. +- Experimental indexes to support approximate nearest neighbor (ANN) search. See [here](annindexes.md) for details. - An experimental inverted index to support full-text search. See [here](invertedindexes.md) for details. ### Functions Support {#functions-support} diff --git a/src/Parsers/ParserCreateIndexQuery.cpp b/src/Parsers/ParserCreateIndexQuery.cpp index 7323c5da141..57afd3fb99e 100644 --- a/src/Parsers/ParserCreateIndexQuery.cpp +++ b/src/Parsers/ParserCreateIndexQuery.cpp @@ -46,7 +46,16 @@ bool ParserCreateIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected index->part_of_create_index_query = true; index->set(index->expr, expr); index->set(index->type, type); - index->granularity = granularity ? granularity->as().value.safeGet() : 1; + + if (granularity) + index->granularity = granularity->as().value.safeGet(); + else + { + if (index->type->name == "annoy") + index->granularity = 100'000'000; + else + index->granularity = 1; + } node = index; return true; diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index f975e8ba3c8..c6273f369b1 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -141,7 +141,17 @@ bool ParserIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected & expe index->name = name->as().name(); index->set(index->expr, expr); index->set(index->type, type); - index->granularity = granularity ? granularity->as().value.safeGet() : 1; + + if (granularity) + index->granularity = granularity->as().value.safeGet(); + else + { + if (index->type->name == "annoy") + index->granularity = 100'000'000; + else + index->granularity = 1; + } + node = index; return true; diff --git a/src/Storages/MergeTree/ApproximateNearestNeighborIndexesCommon.cpp b/src/Storages/MergeTree/ApproximateNearestNeighborIndexesCommon.cpp index bf277c55863..69e54dd5f0c 100644 --- a/src/Storages/MergeTree/ApproximateNearestNeighborIndexesCommon.cpp +++ b/src/Storages/MergeTree/ApproximateNearestNeighborIndexesCommon.cpp @@ -88,7 +88,7 @@ std::vector ApproximateNearestNeighborCondition::getReferenceVector() con throw Exception(ErrorCodes::LOGICAL_ERROR, "Reference vector was requested for useless or uninitialized index."); } -size_t ApproximateNearestNeighborCondition::getNumOfDimensions() const +size_t ApproximateNearestNeighborCondition::getDimensions() const { if (index_is_useful && query_information.has_value()) return query_information->reference_vector.size(); diff --git a/src/Storages/MergeTree/ApproximateNearestNeighborIndexesCommon.h b/src/Storages/MergeTree/ApproximateNearestNeighborIndexesCommon.h index 4fb95c3f492..310890eba1e 100644 --- a/src/Storages/MergeTree/ApproximateNearestNeighborIndexesCommon.h +++ b/src/Storages/MergeTree/ApproximateNearestNeighborIndexesCommon.h @@ -90,8 +90,8 @@ public: /// Distance should be calculated regarding to referenceVector std::vector getReferenceVector() const; - /// Reference vector's dimension size - size_t getNumOfDimensions() const; + /// Reference vector's dimension count + size_t getDimensions() const; String getColumnName() const; diff --git a/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp b/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp index 1a28f28f746..f77cfe4fed0 100644 --- a/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp @@ -27,13 +27,13 @@ namespace ErrorCodes template -AnnoyIndexWithSerialization::AnnoyIndexWithSerialization(uint64_t dim) - : Base::AnnoyIndex(dim) +AnnoyIndexWithSerialization::AnnoyIndexWithSerialization(size_t dimensions) + : Base::AnnoyIndex(dimensions) { } template -void AnnoyIndexWithSerialization::serialize(WriteBuffer& ostr) const +void AnnoyIndexWithSerialization::serialize(WriteBuffer & ostr) const { chassert(Base::_built); writeIntBinary(Base::_s, ostr); @@ -43,11 +43,11 @@ void AnnoyIndexWithSerialization::serialize(WriteBuffer& ostr) const writeIntBinary(Base::_K, ostr); writeIntBinary(Base::_seed, ostr); writeVectorBinary(Base::_roots, ostr); - ostr.write(reinterpret_cast(Base::_nodes), Base::_s * Base::_n_nodes); + ostr.write(reinterpret_cast(Base::_nodes), Base::_s * Base::_n_nodes); } template -void AnnoyIndexWithSerialization::deserialize(ReadBuffer& istr) +void AnnoyIndexWithSerialization::deserialize(ReadBuffer & istr) { chassert(!Base::_built); readIntBinary(Base::_s, istr); @@ -69,7 +69,7 @@ void AnnoyIndexWithSerialization::deserialize(ReadBuffer& istr) } template -uint64_t AnnoyIndexWithSerialization::getNumOfDimensions() const +size_t AnnoyIndexWithSerialization::getDimensions() const { return Base::get_f(); } @@ -97,14 +97,14 @@ void MergeTreeIndexGranuleAnnoy::serializeBinary(WriteBuffer & ostr) c { /// Number of dimensions is required in the index constructor, /// so it must be written and read separately from the other part - writeIntBinary(index->getNumOfDimensions(), ostr); // write dimension + writeIntBinary(static_cast(index->getDimensions()), ostr); // write dimension index->serialize(ostr); } template void MergeTreeIndexGranuleAnnoy::deserializeBinary(ReadBuffer & istr, MergeTreeIndexVersion /*version*/) { - uint64_t dimension; + UInt64 dimension; readIntBinary(dimension, istr); index = std::make_shared>(dimension); index->deserialize(istr); @@ -114,7 +114,7 @@ template MergeTreeIndexAggregatorAnnoy::MergeTreeIndexAggregatorAnnoy( const String & index_name_, const Block & index_sample_block_, - uint64_t trees_) + UInt64 trees_) : index_name(index_name_) , index_sample_block(index_sample_block_) , trees(trees_) @@ -251,10 +251,10 @@ std::vector MergeTreeIndexConditionAnnoy::getUsefulRangesImpl(MergeTreeI const AnnoyIndexWithSerializationPtr annoy = granule->index; - if (ann_condition.getNumOfDimensions() != annoy->getNumOfDimensions()) + if (ann_condition.getDimensions() != annoy->getDimensions()) throw Exception(ErrorCodes::INCORRECT_QUERY, "The dimension of the space in the request ({}) " "does not match the dimension in the index ({})", - ann_condition.getNumOfDimensions(), annoy->getNumOfDimensions()); + ann_condition.getDimensions(), annoy->getDimensions()); std::vector neighbors; /// indexes of dots which were closest to the reference vector std::vector distances; @@ -281,7 +281,7 @@ std::vector MergeTreeIndexConditionAnnoy::getUsefulRangesImpl(MergeTreeI return granule_numbers; } -MergeTreeIndexAnnoy::MergeTreeIndexAnnoy(const IndexDescription & index_, uint64_t trees_, const String & distance_function_) +MergeTreeIndexAnnoy::MergeTreeIndexAnnoy(const IndexDescription & index_, UInt64 trees_, const String & distance_function_) : IMergeTreeIndex(index_) , trees(trees_) , distance_function(distance_function_) @@ -320,9 +320,9 @@ MergeTreeIndexPtr annoyIndexCreator(const IndexDescription & index) if (!index.arguments.empty()) distance_function = index.arguments[0].get(); - uint64_t trees = default_trees; + UInt64 trees = default_trees; if (index.arguments.size() > 1) - trees = index.arguments[1].get(); + trees = index.arguments[1].get(); return std::make_shared(index, trees, distance_function); } @@ -338,7 +338,7 @@ void annoyIndexValidator(const IndexDescription & index, bool /* attach */) throw Exception(ErrorCodes::INCORRECT_QUERY, "Distance function argument of Annoy index must be of type String"); if (index.arguments.size() > 1 && index.arguments[1].getType() != Field::Types::UInt64) - throw Exception(ErrorCodes::INCORRECT_QUERY, "Number of trees argument of Annoy index must be UInt64"); + throw Exception(ErrorCodes::INCORRECT_QUERY, "Number of trees argument of Annoy index must be of type UInt64"); /// Check that the index is created on a single column @@ -351,17 +351,16 @@ void annoyIndexValidator(const IndexDescription & index, bool /* attach */) { String distance_name = index.arguments[0].get(); if (distance_name != "L2Distance" && distance_name != "cosineDistance") - throw Exception(ErrorCodes::INCORRECT_DATA, "Annoy index supports only distance functions 'L2Distance' and 'cosineDistance'. Given distance function: {}", distance_name); + throw Exception(ErrorCodes::INCORRECT_DATA, "Annoy index only supports distance functions 'L2Distance' and 'cosineDistance'"); } /// Check data type of indexed column: - auto throw_unsupported_underlying_column_exception = [](DataTypePtr data_type) + auto throw_unsupported_underlying_column_exception = []() { throw Exception( ErrorCodes::ILLEGAL_COLUMN, - "Annoy indexes can only be created on columns of type Array(Float32) and Tuple(Float32). Given type: {}", - data_type->getName()); + "Annoy indexes can only be created on columns of type Array(Float32) and Tuple(Float32)"); }; DataTypePtr data_type = index.sample_block.getDataTypes()[0]; @@ -370,7 +369,7 @@ void annoyIndexValidator(const IndexDescription & index, bool /* attach */) { TypeIndex nested_type_index = data_type_array->getNestedType()->getTypeId(); if (!WhichDataType(nested_type_index).isFloat32()) - throw_unsupported_underlying_column_exception(data_type); + throw_unsupported_underlying_column_exception(); } else if (const auto * data_type_tuple = typeid_cast(data_type.get())) { @@ -379,11 +378,11 @@ void annoyIndexValidator(const IndexDescription & index, bool /* attach */) { TypeIndex nested_type_index = inner_type->getTypeId(); if (!WhichDataType(nested_type_index).isFloat32()) - throw_unsupported_underlying_column_exception(data_type); + throw_unsupported_underlying_column_exception(); } } else - throw_unsupported_underlying_column_exception(data_type); + throw_unsupported_underlying_column_exception(); } } diff --git a/src/Storages/MergeTree/MergeTreeIndexAnnoy.h b/src/Storages/MergeTree/MergeTreeIndexAnnoy.h index 5204ff07b27..cfc3b7519b8 100644 --- a/src/Storages/MergeTree/MergeTreeIndexAnnoy.h +++ b/src/Storages/MergeTree/MergeTreeIndexAnnoy.h @@ -16,10 +16,10 @@ class AnnoyIndexWithSerialization : public Annoy::AnnoyIndex; public: - explicit AnnoyIndexWithSerialization(uint64_t dim); - void serialize(WriteBuffer& ostr) const; - void deserialize(ReadBuffer& istr); - uint64_t getNumOfDimensions() const; + explicit AnnoyIndexWithSerialization(size_t dimensions); + void serialize(WriteBuffer & ostr) const; + void deserialize(ReadBuffer & istr); + size_t getDimensions() const; }; template @@ -46,7 +46,7 @@ struct MergeTreeIndexGranuleAnnoy final : public IMergeTreeIndexGranule template struct MergeTreeIndexAggregatorAnnoy final : IMergeTreeIndexAggregator { - MergeTreeIndexAggregatorAnnoy(const String & index_name_, const Block & index_sample_block, uint64_t trees); + MergeTreeIndexAggregatorAnnoy(const String & index_name_, const Block & index_sample_block, UInt64 trees); ~MergeTreeIndexAggregatorAnnoy() override = default; bool empty() const override { return !index || index->get_n_items() == 0; } @@ -55,7 +55,7 @@ struct MergeTreeIndexAggregatorAnnoy final : IMergeTreeIndexAggregator const String index_name; const Block index_sample_block; - const uint64_t trees; + const UInt64 trees; AnnoyIndexWithSerializationPtr index; }; @@ -89,7 +89,7 @@ class MergeTreeIndexAnnoy : public IMergeTreeIndex { public: - MergeTreeIndexAnnoy(const IndexDescription & index_, uint64_t trees_, const String & distance_function_); + MergeTreeIndexAnnoy(const IndexDescription & index_, UInt64 trees_, const String & distance_function_); ~MergeTreeIndexAnnoy() override = default; @@ -100,7 +100,7 @@ public: bool mayBenefitFromIndexForIn(const ASTPtr & /*node*/) const override { return false; } private: - const uint64_t trees; + const UInt64 trees; const String distance_function; }; diff --git a/tests/queries/0_stateless/02354_annoy_index.reference b/tests/queries/0_stateless/02354_annoy_index.reference index 45515bc7733..5e01a6e566e 100644 --- a/tests/queries/0_stateless/02354_annoy_index.reference +++ b/tests/queries/0_stateless/02354_annoy_index.reference @@ -1,118 +1,144 @@ ---- Test with Array --- -WHERE type, L2Distance -1 [0,0,10] -2 [0,0,10.5] -3 [0,0,9.5] -4 [0,0,9.7] -5 [0,0,10.2] -ORDER BY type, L2Distance -1 [0,0,10] -5 [0,0,10.2] -4 [0,0,9.7] -WHERE type, L2Distance, check that index is used -Expression ((Projection + Before ORDER BY)) - Limit (preliminary LIMIT (without OFFSET)) - ReadFromMergeTree (default.tab) - Indexes: - PrimaryKey - Condition: true - Parts: 1/1 - Granules: 3/3 - Skip - Name: annoy_index - Description: annoy GRANULARITY 1 - Parts: 1/1 - Granules: 1/3 -ORDER BY type, L2Distance, check that index is used -Expression (Projection) - Limit (preliminary LIMIT (without OFFSET)) - Sorting (Sorting for ORDER BY) - Expression (Before ORDER BY) - ReadFromMergeTree (default.tab) - Indexes: - PrimaryKey - Condition: true - Parts: 1/1 - Granules: 3/3 - Skip - Name: annoy_index - Description: annoy GRANULARITY 1 - Parts: 1/1 - Granules: 3/3 -parameter annoy_index_search_k_nodes -parameter max_limit_for_ann_queries -Expression (Projection) - Limit (preliminary LIMIT (without OFFSET)) - Sorting (Sorting for ORDER BY) - Expression (Before ORDER BY) - ReadFromMergeTree (default.tab) - Indexes: - PrimaryKey - Condition: true - Parts: 1/1 - Granules: 3/3 ---- Test with Tuple --- -WHERE type, L2Distance -1 (0,0,10) -2 (0,0,10.5) -3 (0,0,9.5) -4 (0,0,9.7) -5 (0,0,10.2) -ORDER BY type, L2Distance -1 (0,0,10) -5 (0,0,10.2) -4 (0,0,9.7) -WHERE type, L2Distance, check that index is used -Expression ((Projection + Before ORDER BY)) - Limit (preliminary LIMIT (without OFFSET)) - ReadFromMergeTree (default.tab) - Indexes: - PrimaryKey - Condition: true - Parts: 1/1 - Granules: 3/3 - Skip - Name: annoy_index - Description: annoy GRANULARITY 1 - Parts: 1/1 - Granules: 1/3 -ORDER BY type, L2Distance, check that index is used -Expression (Projection) - Limit (preliminary LIMIT (without OFFSET)) - Sorting (Sorting for ORDER BY) - Expression (Before ORDER BY) - ReadFromMergeTree (default.tab) - Indexes: - PrimaryKey - Condition: true - Parts: 1/1 - Granules: 3/3 - Skip - Name: annoy_index - Description: annoy GRANULARITY 1 - Parts: 1/1 - Granules: 3/3 -parameter annoy_index_search_k_nodes -parameter max_limit_for_ann_queries -Expression (Projection) - Limit (preliminary LIMIT (without OFFSET)) - Sorting (Sorting for ORDER BY) - Expression (Before ORDER BY) - ReadFromMergeTree (default.tab) - Indexes: - PrimaryKey - Condition: true - Parts: 1/1 - Granules: 3/3 ---- Test alternative metric (cosine distance) and non-default NumTrees --- -WHERE type, L2Distance -1 [0,0,10] -2 [0,0,10.5] -3 [0,0,9.5] -4 [0,0,9.7] -5 [0,0,10.2] -ORDER BY type, L2Distance -1 [0,0,10] -5 [0,0,10.2] -4 [0,0,9.7] --- Negative tests --- +--- Test default GRANULARITY (should be 100 mio. for annoy)--- +CREATE TABLE default.tab\n(\n `id` Int32,\n `vector` Array(Float32),\n INDEX annoy_index vector TYPE annoy GRANULARITY 100000000\n)\nENGINE = MergeTree\nORDER BY id\nSETTINGS index_granularity = 8192 +CREATE TABLE default.tab\n(\n `id` Int32,\n `vector` Array(Float32),\n INDEX annoy_index vector TYPE annoy GRANULARITY 100000000\n)\nENGINE = MergeTree\nORDER BY id\nSETTINGS index_granularity = 8192 +--- Test with Array, GRANULARITY = 1, index_granularity = 5 --- +WHERE type, L2Distance, check that index is used +Expression ((Projection + Before ORDER BY)) + Limit (preliminary LIMIT (without OFFSET)) + ReadFromMergeTree (default.tab) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 3/3 + Skip + Name: annoy_index + Description: annoy GRANULARITY 1 + Parts: 1/1 + Granules: 1/3 +ORDER BY type, L2Distance, check that index is used +Expression (Projection) + Limit (preliminary LIMIT (without OFFSET)) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + ReadFromMergeTree (default.tab) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 3/3 + Skip + Name: annoy_index + Description: annoy GRANULARITY 1 + Parts: 1/1 + Granules: 3/3 +Reference ARRAYs with non-matching dimension are rejected +Special case: MaximumDistance is negative +WHERE type, L2Distance +Special case: setting annoy_index_search_k_nodes +Special case: setting max_limit_for_ann_queries +Expression (Projection) + Limit (preliminary LIMIT (without OFFSET)) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + ReadFromMergeTree (default.tab) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 3/3 +--- Test with Tuple, GRANULARITY = 1, index_granularity = 5 --- +WHERE type, L2Distance, check that index is used +Expression ((Projection + Before ORDER BY)) + Limit (preliminary LIMIT (without OFFSET)) + ReadFromMergeTree (default.tab) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 3/3 + Skip + Name: annoy_index + Description: annoy GRANULARITY 1 + Parts: 1/1 + Granules: 1/3 +ORDER BY type, L2Distance, check that index is used +Expression (Projection) + Limit (preliminary LIMIT (without OFFSET)) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + ReadFromMergeTree (default.tab) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 3/3 + Skip + Name: annoy_index + Description: annoy GRANULARITY 1 + Parts: 1/1 + Granules: 3/3 +--- Test non-default metric (cosine distance) + non-default NumTrees (200) --- +--- Test with Array, GRANULARITY = 2, index_granularity = 4 --- +WHERE type, L2Distance, check that index is used +Expression ((Projection + Before ORDER BY)) + Limit (preliminary LIMIT (without OFFSET)) + ReadFromMergeTree (default.tab) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 4/4 + Skip + Name: annoy_index + Description: annoy GRANULARITY 2 + Parts: 0/1 + Granules: 2/4 +ORDER BY type, L2Distance, check that index is used +Expression (Projection) + Limit (preliminary LIMIT (without OFFSET)) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + ReadFromMergeTree (default.tab) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 4/4 + Skip + Name: annoy_index + Description: annoy GRANULARITY 2 + Parts: 1/1 + Granules: 4/4 +--- Test with Array, GRANULARITY = 4, index_granularity = 4 --- +WHERE type, L2Distance, check that index is used +Expression ((Projection + Before ORDER BY)) + Limit (preliminary LIMIT (without OFFSET)) + ReadFromMergeTree (default.tab) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 4/4 + Skip + Name: annoy_index + Description: annoy GRANULARITY 4 + Parts: 0/1 + Granules: 3/4 +ORDER BY type, L2Distance, check that index is used +Expression (Projection) + Limit (preliminary LIMIT (without OFFSET)) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + ReadFromMergeTree (default.tab) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 4/4 + Skip + Name: annoy_index + Description: annoy GRANULARITY 4 + Parts: 1/1 + Granules: 4/4 diff --git a/tests/queries/0_stateless/02354_annoy_index.sql b/tests/queries/0_stateless/02354_annoy_index.sql index abee5e8a6e4..fefb51dfcc9 100644 --- a/tests/queries/0_stateless/02354_annoy_index.sql +++ b/tests/queries/0_stateless/02354_annoy_index.sql @@ -1,150 +1,251 @@ --- Tags: disabled, no-fasttest, no-ubsan, no-cpu-aarch64, no-upgrade-check +-- Tags: no-fasttest, no-ubsan, no-cpu-aarch64, no-upgrade-check SET allow_experimental_annoy_index = 1; - -SELECT '--- Test with Array ---'; - -DROP TABLE IF EXISTS tab; -CREATE TABLE tab(id Int32, embedding Array(Float32), INDEX annoy_index embedding TYPE annoy()) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity=5; -INSERT INTO tab VALUES (1, [0.0, 0.0, 10.0]), (2, [0.0, 0.0, 10.5]), (3, [0.0, 0.0, 9.5]), (4, [0.0, 0.0, 9.7]), (5, [0.0, 0.0, 10.2]), (6, [10.0, 0.0, 0.0]), (7, [9.5, 0.0, 0.0]), (8, [9.7, 0.0, 0.0]), (9, [10.2, 0.0, 0.0]), (10, [10.5, 0.0, 0.0]), (11, [0.0, 10.0, 0.0]), (12, [0.0, 9.5, 0.0]), (13, [0.0, 9.7, 0.0]), (14, [0.0, 10.2, 0.0]), (15, [0.0, 10.5, 0.0]); - -SELECT 'WHERE type, L2Distance'; -SELECT * -FROM tab -WHERE L2Distance(embedding, [0.0, 0.0, 10.0]) < 1.0 -LIMIT 5; - -SELECT 'ORDER BY type, L2Distance'; -SELECT * -FROM tab -ORDER BY L2Distance(embedding, [0.0, 0.0, 10.0]) -LIMIT 3; - --- Produces different error code with analyzer, TODO: check --- SELECT 'Reference ARRAYs with non-matching dimension are rejected'; --- SELECT * --- FROM tab --- ORDER BY L2Distance(embedding, [0.0, 0.0]) --- LIMIT 3; -- { serverError INCORRECT_QUERY } - -SELECT 'WHERE type, L2Distance, check that index is used'; -EXPLAIN indexes=1 -SELECT * -FROM tab -WHERE L2Distance(embedding, [0.0, 0.0, 10.0]) < 1.0 -LIMIT 5; - -SELECT 'ORDER BY type, L2Distance, check that index is used'; -EXPLAIN indexes=1 -SELECT * -FROM tab -ORDER BY L2Distance(embedding, [0.0, 0.0, 10.0]) -LIMIT 3; - -SELECT 'parameter annoy_index_search_k_nodes'; -SELECT * -FROM tab -ORDER BY L2Distance(embedding, [5.3, 7.3, 2.1]) -LIMIT 5 -SETTINGS annoy_index_search_k_nodes=0; -- searches zero nodes --> no results - -SELECT 'parameter max_limit_for_ann_queries'; -EXPLAIN indexes=1 -SELECT * -FROM tab -ORDER BY L2Distance(embedding, [5.3, 7.3, 2.1]) -LIMIT 5 -SETTINGS max_limit_for_ann_queries=2; -- doesn't use the ann index - -DROP TABLE tab; - -SELECT '--- Test with Tuple ---'; - -CREATE TABLE tab(id Int32, embedding Tuple(Float32, Float32, Float32), INDEX annoy_index embedding TYPE annoy()) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity=5; -INSERT INTO tab VALUES (1, (0.0, 0.0, 10.0)), (2, (0.0, 0.0, 10.5)), (3, (0.0, 0.0, 9.5)), (4, (0.0, 0.0, 9.7)), (5, (0.0, 0.0, 10.2)), (6, (10.0, 0.0, 0.0)), (7, (9.5, 0.0, 0.0)), (8, (9.7, 0.0, 0.0)), (9, (10.2, 0.0, 0.0)), (10, (10.5, 0.0, 0.0)), (11, (0.0, 10.0, 0.0)), (12, (0.0, 9.5, 0.0)), (13, (0.0, 9.7, 0.0)), (14, (0.0, 10.2, 0.0)), (15, (0.0, 10.5, 0.0)); - -SELECT 'WHERE type, L2Distance'; -SELECT * -FROM tab -WHERE L2Distance(embedding, (0.0, 0.0, 10.0)) < 1.0 -LIMIT 5; - -SELECT 'ORDER BY type, L2Distance'; -SELECT * -FROM tab -ORDER BY L2Distance(embedding, (0.0, 0.0, 10.0)) -LIMIT 3; - -SELECT 'WHERE type, L2Distance, check that index is used'; -EXPLAIN indexes=1 -SELECT * -FROM tab -WHERE L2Distance(embedding, (0.0, 0.0, 10.0)) < 1.0 -LIMIT 5; - -SELECT 'ORDER BY type, L2Distance, check that index is used'; -EXPLAIN indexes=1 -SELECT * -FROM tab -ORDER BY L2Distance(embedding, (0.0, 0.0, 10.0)) -LIMIT 3; - -SELECT 'parameter annoy_index_search_k_nodes'; -SELECT * -FROM tab -ORDER BY L2Distance(embedding, (5.3, 7.3, 2.1)) -LIMIT 5 -SETTINGS annoy_index_search_k_nodes=0; -- searches zero nodes --> no results - -SELECT 'parameter max_limit_for_ann_queries'; -EXPLAIN indexes=1 -SELECT * -FROM tab -ORDER BY L2Distance(embedding, (5.3, 7.3, 2.1)) -LIMIT 5 -SETTINGS max_limit_for_ann_queries=2; -- doesn't use the ann index - -DROP TABLE tab; - -SELECT '--- Test alternative metric (cosine distance) and non-default NumTrees ---'; - -CREATE TABLE tab(id Int32, embedding Array(Float32), INDEX annoy_index embedding TYPE annoy('cosineDistance', 200)) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity=5; -INSERT INTO tab VALUES (1, [0.0, 0.0, 10.0]), (2, [0.0, 0.0, 10.5]), (3, [0.0, 0.0, 9.5]), (4, [0.0, 0.0, 9.7]), (5, [0.0, 0.0, 10.2]), (6, [10.0, 0.0, 0.0]), (7, [9.5, 0.0, 0.0]), (8, [9.7, 0.0, 0.0]), (9, [10.2, 0.0, 0.0]), (10, [10.5, 0.0, 0.0]), (11, [0.0, 10.0, 0.0]), (12, [0.0, 9.5, 0.0]), (13, [0.0, 9.7, 0.0]), (14, [0.0, 10.2, 0.0]), (15, [0.0, 10.5, 0.0]); - -SELECT 'WHERE type, L2Distance'; -SELECT * -FROM tab -WHERE L2Distance(embedding, [0.0, 0.0, 10.0]) < 1.0 -LIMIT 5; - -SELECT 'ORDER BY type, L2Distance'; -SELECT * -FROM tab -ORDER BY L2Distance(embedding, [0.0, 0.0, 10.0]) -LIMIT 3; - -DROP TABLE tab; +SET allow_experimental_analyzer = 0; SELECT '--- Negative tests ---'; +DROP TABLE IF EXISTS tab; + -- must have at most 2 arguments -CREATE TABLE tab(id Int32, embedding Array(Float32), INDEX annoy_index embedding TYPE annoy('too', 'many', 'arguments')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(id Int32, vector Array(Float32), INDEX annoy_index vector TYPE annoy('too', 'many', 'arguments')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } -- first argument (distance_function) must be String -CREATE TABLE tab(id Int32, embedding Array(Float32), INDEX annoy_index embedding TYPE annoy(3)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(id Int32, vector Array(Float32), INDEX annoy_index vector TYPE annoy(3)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } -- 2nd argument (number of trees) must be UInt64 -CREATE TABLE tab(id Int32, embedding Array(Float32), INDEX annoy_index embedding TYPE annoy('L2Distance', 'not an UInt64')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } - --- reject unsupported distance functions -CREATE TABLE tab(id Int32, embedding Array(Float32), INDEX annoy_index embedding TYPE annoy('wormholeDistance')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } +CREATE TABLE tab(id Int32, vector Array(Float32), INDEX annoy_index vector TYPE annoy('L2Distance', 'not an UInt64')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } -- must be created on single column -CREATE TABLE tab(id Int32, embedding Array(Float32), INDEX annoy_index (embedding, id) TYPE annoy()) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_NUMBER_OF_COLUMNS } +CREATE TABLE tab(id Int32, vector Array(Float32), INDEX annoy_index (vector, id) TYPE annoy()) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_NUMBER_OF_COLUMNS } + +-- reject unsupported distance functions +CREATE TABLE tab(id Int32, vector Array(Float32), INDEX annoy_index vector TYPE annoy('wormholeDistance')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } -- must be created on Array/Tuple(Float32) columns SET allow_suspicious_low_cardinality_types = 1; -CREATE TABLE tab(id Int32, embedding Float32, INDEX annoy_index embedding TYPE annoy()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } -CREATE TABLE tab(id Int32, embedding Array(Float64), INDEX annoy_index embedding TYPE annoy()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } -CREATE TABLE tab(id Int32, embedding LowCardinality(Float32), INDEX annoy_index embedding TYPE annoy()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } -CREATE TABLE tab(id Int32, embedding Nullable(Float32), INDEX annoy_index embedding TYPE annoy()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(id Int32, vector Float32, INDEX annoy_index vector TYPE annoy()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(id Int32, vector Array(Float64), INDEX annoy_index vector TYPE annoy()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(id Int32, vector Tuple(Float64), INDEX annoy_index vector TYPE annoy()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(id Int32, vector LowCardinality(Float32), INDEX annoy_index vector TYPE annoy()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(id Int32, vector Nullable(Float32), INDEX annoy_index vector TYPE annoy()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } + +SELECT '--- Test default GRANULARITY (should be 100 mio. for annoy)---'; + +CREATE TABLE tab (id Int32, vector Array(Float32), INDEX annoy_index(vector) TYPE annoy) ENGINE=MergeTree ORDER BY id; +SHOW CREATE TABLE tab; +DROP TABLE tab; + +CREATE TABLE tab (id Int32, vector Array(Float32)) ENGINE=MergeTree ORDER BY id; +ALTER TABLE tab ADD INDEX annoy_index(vector) TYPE annoy; +SHOW CREATE TABLE tab; + +DROP TABLE tab; + +SELECT '--- Test with Array, GRANULARITY = 1, index_granularity = 5 ---'; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab(id Int32, vector Array(Float32), INDEX annoy_index vector TYPE annoy() GRANULARITY 1) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 5; +INSERT INTO tab VALUES (1, [0.0, 0.0, 10.0]), (2, [0.0, 0.0, 10.5]), (3, [0.0, 0.0, 9.5]), (4, [0.0, 0.0, 9.7]), (5, [0.0, 0.0, 10.2]), (6, [10.0, 0.0, 0.0]), (7, [9.5, 0.0, 0.0]), (8, [9.7, 0.0, 0.0]), (9, [10.2, 0.0, 0.0]), (10, [10.5, 0.0, 0.0]), (11, [0.0, 10.0, 0.0]), (12, [0.0, 9.5, 0.0]), (13, [0.0, 9.7, 0.0]), (14, [0.0, 10.2, 0.0]), (15, [0.0, 10.5, 0.0]); + +-- rows = 15, index_granularity = 5, GRANULARITY = 1 gives 3 annoy-indexed blocks (each comprising a single granule) +-- condition 'L2Distance(vector, reference_vector) < 1.0' ensures that only one annoy-indexed block produces results --> "Granules: 1/3" + +-- See (*) why commented out +-- SELECT 'WHERE type, L2Distance'; +-- SELECT * +-- FROM tab +-- WHERE L2Distance(vector, [0.0, 0.0, 10.0]) < 1.0 +-- LIMIT 3; + +SELECT 'WHERE type, L2Distance, check that index is used'; +EXPLAIN indexes=1 +SELECT * +FROM tab +WHERE L2Distance(vector, [0.0, 0.0, 10.0]) < 1.0 +LIMIT 3; + +-- See (*) why commented out +-- SELECT 'ORDER BY type, L2Distance'; +-- SELECT * +-- FROM tab +-- ORDER BY L2Distance(vector, [0.0, 0.0, 10.0]) +-- LIMIT 3; + +SELECT 'ORDER BY type, L2Distance, check that index is used'; +EXPLAIN indexes=1 +SELECT * +FROM tab +ORDER BY L2Distance(vector, [0.0, 0.0, 10.0]) +LIMIT 3; + +-- Test special cases. Corresponding special case tests are omitted from later tests. + +SELECT 'Reference ARRAYs with non-matching dimension are rejected'; +SELECT * +FROM tab +ORDER BY L2Distance(vector, [0.0, 0.0]) +LIMIT 3; -- { serverError INCORRECT_QUERY } + +SELECT 'Special case: MaximumDistance is negative'; +SELECT 'WHERE type, L2Distance'; +SELECT * +FROM tab +WHERE L2Distance(vector, [0.0, 0.0, 10.0]) < -1.0 +LIMIT 3; -- { serverError INCORRECT_QUERY } + +SELECT 'Special case: setting annoy_index_search_k_nodes'; +SELECT * +FROM tab +ORDER BY L2Distance(vector, [5.3, 7.3, 2.1]) +LIMIT 3 +SETTINGS annoy_index_search_k_nodes=0; -- searches zero nodes --> no results + +SELECT 'Special case: setting max_limit_for_ann_queries'; +EXPLAIN indexes=1 +SELECT * +FROM tab +ORDER BY L2Distance(vector, [5.3, 7.3, 2.1]) +LIMIT 3 +SETTINGS max_limit_for_ann_queries=2; -- doesn't use the ann index + +DROP TABLE tab; + +-- Test Tuple embeddings. Triggers different logic than Array inside MergeTreeIndexAnnoy but the same logic as Array above MergeTreeIndexAnnoy. +-- Therefore test Tuple case just once. + +SELECT '--- Test with Tuple, GRANULARITY = 1, index_granularity = 5 ---'; + +CREATE TABLE tab(id Int32, vector Tuple(Float32, Float32, Float32), INDEX annoy_index vector TYPE annoy() GRANULARITY 1) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 5; +INSERT INTO tab VALUES (1, (0.0, 0.0, 10.0)), (2, (0.0, 0.0, 10.5)), (3, (0.0, 0.0, 9.5)), (4, (0.0, 0.0, 9.7)), (5, (0.0, 0.0, 10.2)), (6, (10.0, 0.0, 0.0)), (7, (9.5, 0.0, 0.0)), (8, (9.7, 0.0, 0.0)), (9, (10.2, 0.0, 0.0)), (10, (10.5, 0.0, 0.0)), (11, (0.0, 10.0, 0.0)), (12, (0.0, 9.5, 0.0)), (13, (0.0, 9.7, 0.0)), (14, (0.0, 10.2, 0.0)), (15, (0.0, 10.5, 0.0)); + +-- See (*) why commented out +-- SELECT 'WHERE type, L2Distance'; +-- SELECT * +-- FROM tab +-- WHERE L2Distance(vector, (0.0, 0.0, 10.0)) < 1.0 +-- LIMIT 3; + +SELECT 'WHERE type, L2Distance, check that index is used'; +EXPLAIN indexes=1 +SELECT * +FROM tab +WHERE L2Distance(vector, (0.0, 0.0, 10.0)) < 1.0 +LIMIT 3; + +-- See (*) why commented out +-- SELECT 'ORDER BY type, L2Distance'; +-- SELECT * +-- FROM tab +-- ORDER BY L2Distance(vector, (0.0, 0.0, 10.0)) +-- LIMIT 3; + +SELECT 'ORDER BY type, L2Distance, check that index is used'; +EXPLAIN indexes=1 +SELECT * +FROM tab +ORDER BY L2Distance(vector, (0.0, 0.0, 10.0)) +LIMIT 3; + +DROP TABLE tab; + +-- Not a systematic test, just to make sure no bad things happen +SELECT '--- Test non-default metric (cosine distance) + non-default NumTrees (200) ---'; + +CREATE TABLE tab(id Int32, vector Array(Float32), INDEX annoy_index vector TYPE annoy('cosineDistance', 200) GRANULARITY 1) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 5; +INSERT INTO tab VALUES (1, [0.0, 0.0, 10.0]), (2, [0.0, 0.0, 10.5]), (3, [0.0, 0.0, 9.5]), (4, [0.0, 0.0, 9.7]), (5, [0.0, 0.0, 10.2]), (6, [10.0, 0.0, 0.0]), (7, [9.5, 0.0, 0.0]), (8, [9.7, 0.0, 0.0]), (9, [10.2, 0.0, 0.0]), (10, [10.5, 0.0, 0.0]), (11, [0.0, 10.0, 0.0]), (12, [0.0, 9.5, 0.0]), (13, [0.0, 9.7, 0.0]), (14, [0.0, 10.2, 0.0]), (15, [0.0, 10.5, 0.0]); + +-- See (*) why commented out +-- SELECT 'WHERE type, L2Distance'; +-- SELECT * +-- FROM tab +-- WHERE L2Distance(vector, [0.0, 0.0, 10.0]) < 1.0 +-- LIMIT 3; + +-- See (*) why commented out +-- SELECT 'ORDER BY type, L2Distance'; +-- SELECT * +-- FROM tab +-- ORDER BY L2Distance(vector, [0.0, 0.0, 10.0]) +-- LIMIT 3; + +DROP TABLE tab; + +SELECT '--- Test with Array, GRANULARITY = 2, index_granularity = 4 ---'; + +CREATE TABLE tab(id Int32, vector Array(Float32), INDEX annoy_index vector TYPE annoy() GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 4; +INSERT INTO tab VALUES (1, [0.0, 0.0, 10.0, 0.0]), (2, [0.0, 0.0, 10.5, 0.0]), (3, [0.0, 0.0, 9.5, 0.0]), (4, [0.0, 0.0, 9.7, 0.0]), (5, [10.0, 0.0, 0.0, 0.0]), (6, [9.5, 0.0, 0.0, 0.0]), (7, [9.7, 0.0, 0.0, 0.0]), (8, [10.2, 0.0, 0.0, 0.0]), (9, [0.0, 10.0, 0.0, 0.0]), (10, [0.0, 9.5, 0.0, 0.0]), (11, [0.0, 9.7, 0.0, 0.0]), (12, [0.0, 9.7, 0.0, 0.0]), (13, [0.0, 0.0, 0.0, 10.3]), (14, [0.0, 0.0, 0.0, 9.5]), (15, [0.0, 0.0, 0.0, 10.0]), (16, [0.0, 0.0, 0.0, 10.5]); + +-- rows = 16, index_granularity = 4, GRANULARITY = 2 gives 2 annoy-indexed blocks (each comprising two granules) +-- condition 'L2Distance(vector, reference_vector) < 1.0' ensures that only one annoy-indexed block produces results --> "Granules: 2/4" + +-- See (*) why commented out +-- SELECT 'WHERE type, L2Distance'; +-- SELECT * +-- FROM tab +-- WHERE L2Distance(vector, [10.0, 0.0, 10.0, 0.0]) < 5.0 +-- LIMIT 3; + +SELECT 'WHERE type, L2Distance, check that index is used'; +EXPLAIN indexes=1 +SELECT * +FROM tab +WHERE L2Distance(vector, [10.0, 0.0, 10.0, 0.0]) < 5.0 +LIMIT 3; + +-- See (*) why commented out +-- SELECT 'ORDER BY type, L2Distance'; +-- SELECT * +-- FROM tab +-- ORDER BY L2Distance(vector, [10.0, 0.0, 10.0, 0.0]) +-- LIMIT 3; + +SELECT 'ORDER BY type, L2Distance, check that index is used'; +EXPLAIN indexes=1 +SELECT * +FROM tab +ORDER BY L2Distance(vector, [10.0, 0.0, 10.0, 0.0]) +LIMIT 3; + +DROP TABLE tab; + +SELECT '--- Test with Array, GRANULARITY = 4, index_granularity = 4 ---'; + +CREATE TABLE tab(id Int32, vector Array(Float32), INDEX annoy_index vector TYPE annoy() GRANULARITY 4) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 4; +INSERT INTO tab VALUES (1, [0.0, 0.0, 10.0, 0.0]), (2, [0.0, 0.0, 10.5, 0.0]), (3, [0.0, 0.0, 9.5, 0.0]), (4, [0.0, 0.0, 9.7, 0.0]), (5, [10.0, 0.0, 0.0, 0.0]), (6, [9.5, 0.0, 0.0, 0.0]), (7, [9.7, 0.0, 0.0, 0.0]), (8, [10.2, 0.0, 0.0, 0.0]), (9, [0.0, 10.0, 0.0, 0.0]), (10, [0.0, 9.5, 0.0, 0.0]), (11, [0.0, 9.7, 0.0, 0.0]), (12, [0.0, 9.7, 0.0, 0.0]), (13, [0.0, 0.0, 0.0, 10.3]), (14, [0.0, 0.0, 0.0, 9.5]), (15, [0.0, 0.0, 0.0, 10.0]), (16, [0.0, 0.0, 0.0, 10.5]); + +-- rows = 16, index_granularity = 4, GRANULARITY = 4 gives a single annoy-indexed block (comprising all granules) +-- no two matches happen to be located in the same granule, so with LIMIT = 3, we'll get "Granules: 2/4" + +-- See (*) why commented out +-- SELECT 'WHERE type, L2Distance'; +-- SELECT * +-- FROM tab +-- WHERE L2Distance(vector, [10.0, 0.0, 10.0, 0.0]) < 5.0 +-- LIMIT 3; + +SELECT 'WHERE type, L2Distance, check that index is used'; +EXPLAIN indexes=1 +SELECT * +FROM tab +WHERE L2Distance(vector, [10.0, 0.0, 10.0, 0.0]) < 5.0 +LIMIT 3; + +-- See (*) why commented out +-- SELECT 'ORDER BY type, L2Distance'; +-- SELECT * +-- FROM tab +-- ORDER BY L2Distance(vector, [10.0, 0.0, 10.0, 0.0]) +-- LIMIT 3; + +SELECT 'ORDER BY type, L2Distance, check that index is used'; +EXPLAIN indexes=1 +SELECT * +FROM tab +ORDER BY L2Distance(vector, [10.0, 0.0, 10.0, 0.0]) +LIMIT 3; + +DROP TABLE tab; + +-- (*) Storage and search in Annoy indexes is inherently random. Tests which check for exact row matches would be unstable. Therefore, +-- comment them out. diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index d6cef1883f4..021855e399f 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -26,6 +26,7 @@ AlertManager Alexey AnyEvent AppleClang +Approximative ArrayJoin ArrowStream AsyncInsertCacheSize @@ -1005,6 +1006,7 @@ anyLast anyheavy anylast appendTrailingCharIfAbsent +approximative argMax argMin argmax @@ -2419,6 +2421,7 @@ unescaping unhex unicode unidimensional +unintuitive uniq uniqCombined uniqExact From 4d4e5c690e446db23f8a1ef7fc1e577df93e9373 Mon Sep 17 00:00:00 2001 From: zvonand Date: Thu, 8 Jun 2023 17:10:51 +0200 Subject: [PATCH 260/722] update docs spelling check failed --- docs/en/operations/settings/settings.md | 2 +- docs/en/sql-reference/functions/date-time-functions.md | 4 +--- utils/check-style/aspell-ignore/en/aspell-dict.txt | 1 + 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 77c9238e4c7..6c9c8349519 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -4116,7 +4116,7 @@ SELECT *, timezone() FROM test_tz WHERE d = '2000-01-01 00:00:00' SETTINGS sessi This happens due to different parsing pipelines: - `toDateTime('2000-01-01 00:00:00')` creates a new DateTime in a usual way, and thus `session_timezone` setting from query context is applied. - - `2000-01-01 00:00:00` is parsed to a DateTime inheriting type of `d` column, including DateTime's time zone, and `session_timezone` has no impact on this value. + - `2000-01-01 00:00:00` is parsed to a DateTime inheriting type of `d` column, including its time zone, and `session_timezone` has no impact on this value. Possible values: diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index 1a5b0dcabf9..89ac6d438ff 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -163,7 +163,7 @@ Type: [String](../../sql-reference/data-types/string.md). ## serverTimeZone Returns the default timezone of the server, i.e. the value of setting [timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). -If it is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard. Otherwise it produces a constant value. +If it is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard. Otherwise, it produces a constant value. **Syntax** @@ -171,8 +171,6 @@ If it is executed in the context of a distributed table, then it generates a nor serverTimeZone() ``` -Alias: `ServerTimezone`, `servertimezone`. - **Returned value** - Timezone. diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index ded7a4643a9..8301579b6a8 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -484,6 +484,7 @@ russian rw sasl schemas +servertimezone simdjson skippingerrors sparsehash From ea9d0f6c3c94ae6fef1bb552958455c7ab9b25e7 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 8 Jun 2023 18:18:12 +0200 Subject: [PATCH 261/722] Fix --- .../IO/CachedOnDiskReadBufferFromFile.cpp | 55 +++++++++---------- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp index 6bf72434580..6317aba20e9 100644 --- a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp @@ -1038,34 +1038,6 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep() current_file_segment_counters.increment(ProfileEvents::FileSegmentUsedBytes, available()); - // No necessary because of the SCOPE_EXIT above, but useful for logging below. - if (download_current_segment) - file_segment.completePartAndResetDownloader(); - - chassert(!file_segment.isDownloader()); - - LOG_TEST( - log, - "Key: {}. Returning with {} bytes, buffer position: {} (offset: {}, predownloaded: {}), " - "buffer available: {}, current range: {}, file offset of buffer end: {}, impl offset: {}, file segment state: {}, " - "current write offset: {}, read_type: {}, reading until position: {}, started with offset: {}, " - "remaining ranges: {}", - cache_key.toString(), - working_buffer.size(), - getPosition(), - offset(), - needed_to_predownload, - available(), - current_read_range.toString(), - file_offset_of_buffer_end, - implementation_buffer->getFileOffsetOfBufferEnd(), - FileSegment::stateToString(file_segment.state()), - file_segment.getCurrentWriteOffset(false), - toString(read_type), - read_until_position, - first_offset, - file_segments->toString()); - if (size == 0 && file_offset_of_buffer_end < read_until_position) { size_t cache_file_size = getFileSizeFromReadBuffer(*implementation_buffer); @@ -1086,6 +1058,33 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep() file_segment.getInfoForLog()); } + // No necessary because of the SCOPE_EXIT above, but useful for logging below. + if (download_current_segment) + file_segment.completePartAndResetDownloader(); + + chassert(!file_segment.isDownloader()); + + LOG_TEST( + log, + "Key: {}. Returning with {} bytes, buffer position: {} (offset: {}, predownloaded: {}), " + "buffer available: {}, current range: {}, file offset of buffer end: {}, file segment state: {}, " + "current write offset: {}, read_type: {}, reading until position: {}, started with offset: {}, " + "remaining ranges: {}", + cache_key.toString(), + working_buffer.size(), + getPosition(), + offset(), + needed_to_predownload, + available(), + current_read_range.toString(), + file_offset_of_buffer_end, + FileSegment::stateToString(file_segment.state()), + file_segment.getCurrentWriteOffset(false), + toString(read_type), + read_until_position, + first_offset, + file_segments->toString()); + return result; } From a1b1e12e5bb5d6a1937f9081eb43374afef60f9b Mon Sep 17 00:00:00 2001 From: zvonand Date: Thu, 8 Jun 2023 18:38:51 +0200 Subject: [PATCH 262/722] upd spell --- docs/en/sql-reference/functions/date-time-functions.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index 89ac6d438ff..62bbb84053a 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -158,9 +158,9 @@ Type: [String](../../sql-reference/data-types/string.md). **See also** -- [serverTimeZone](#serverTimeZone) +- [serverTimezone](#serverTimeZone) -## serverTimeZone +## serverTimezone Returns the default timezone of the server, i.e. the value of setting [timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). If it is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard. Otherwise, it produces a constant value. From f34937687e6316b11fb2d61b95f818fd4828a9ce Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Thu, 8 Jun 2023 18:00:54 +0000 Subject: [PATCH 263/722] enable settings for mutation throttling by default --- src/Storages/MergeTree/MergeTreeSettings.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index 33aea358078..a3d475b74b2 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -67,8 +67,8 @@ struct Settings; M(Bool, remove_rolled_back_parts_immediately, 1, "Setting for an incomplete experimental feature.", 0) \ M(CleanDeletedRows, clean_deleted_rows, CleanDeletedRows::Never, "Is the Replicated Merge cleanup has to be done automatically at each merge or manually (possible values are 'Always'/'Never' (default))", 0) \ M(UInt64, replicated_max_mutations_in_one_entry, 10000, "Max number of mutation commands that can be merged together and executed in one MUTATE_PART entry (0 means unlimited)", 0) \ - M(UInt64, number_of_mutations_to_delay, 0, "If table has at least that many unfinished mutations, artificially slow down mutations of table. Disabled if set to 0", 0) \ - M(UInt64, number_of_mutations_to_throw, 0, "If table has at least that many unfinished mutations, throw 'Too many mutations' exception. Disabled if set to 0", 0) \ + M(UInt64, number_of_mutations_to_delay, 500, "If table has at least that many unfinished mutations, artificially slow down mutations of table. Disabled if set to 0", 0) \ + M(UInt64, number_of_mutations_to_throw, 1000, "If table has at least that many unfinished mutations, throw 'Too many mutations' exception. Disabled if set to 0", 0) \ M(UInt64, min_delay_to_mutate_ms, 10, "Min delay of mutating MergeTree table in milliseconds, if there are a lot of unfinished mutations", 0) \ M(UInt64, max_delay_to_mutate_ms, 1000, "Max delay of mutating MergeTree table in milliseconds, if there are a lot of unfinished mutations", 0) \ \ From a2355673d8349f4a689f46a354c335a7647a5c59 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Thu, 8 Jun 2023 21:18:29 +0000 Subject: [PATCH 264/722] fix tests --- tests/queries/0_stateless/02125_many_mutations.sh | 2 +- tests/queries/0_stateless/02125_many_mutations_2.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02125_many_mutations.sh b/tests/queries/0_stateless/02125_many_mutations.sh index 7a89e5f7c4f..c3108df5ae3 100755 --- a/tests/queries/0_stateless/02125_many_mutations.sh +++ b/tests/queries/0_stateless/02125_many_mutations.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -q "create table many_mutations (x UInt32, y UInt32) engine = MergeTree order by x" +$CLICKHOUSE_CLIENT -q "create table many_mutations (x UInt32, y UInt32) engine = MergeTree order by x settings number_of_mutations_to_delay = 0, number_of_mutations_to_throw = 0" $CLICKHOUSE_CLIENT -q "insert into many_mutations values (0, 0), (1, 1)" $CLICKHOUSE_CLIENT -q "system stop merges many_mutations" diff --git a/tests/queries/0_stateless/02125_many_mutations_2.sh b/tests/queries/0_stateless/02125_many_mutations_2.sh index df170a402c6..52866a54974 100755 --- a/tests/queries/0_stateless/02125_many_mutations_2.sh +++ b/tests/queries/0_stateless/02125_many_mutations_2.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -q "create table many_mutations (x UInt32, y UInt32) engine = MergeTree order by x" +$CLICKHOUSE_CLIENT -q "create table many_mutations (x UInt32, y UInt32) engine = MergeTree order by x settings number_of_mutations_to_delay = 0, number_of_mutations_to_throw = 0" $CLICKHOUSE_CLIENT -q "insert into many_mutations select number, number + 1 from numbers(2000)" $CLICKHOUSE_CLIENT -q "system stop merges many_mutations" From 7578203b46b1657373fc27a5f05e56531581916b Mon Sep 17 00:00:00 2001 From: Alexey Gerasimchuck Date: Fri, 9 Jun 2023 03:44:43 +0000 Subject: [PATCH 265/722] Changes after review --- docs/en/interfaces/cli.md | 240 +++++++++++++++++--------------- docs/ru/interfaces/cli.md | 235 ++++++++++++++++--------------- programs/client/Client.cpp | 4 +- src/Client/ConnectionString.cpp | 49 ++++--- 4 files changed, 281 insertions(+), 247 deletions(-) diff --git a/docs/en/interfaces/cli.md b/docs/en/interfaces/cli.md index c36887672c7..2126c538c5d 100644 --- a/docs/en/interfaces/cli.md +++ b/docs/en/interfaces/cli.md @@ -158,116 +158,6 @@ $ clickhouse-client --param_tuple_in_tuple="(10, ('dt', 10))" -q "SELECT * FROM $ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="number" --query "SELECT {col:Identifier} FROM {db:Identifier}.{tbl:Identifier} LIMIT 10" ``` -## Connection string {#connection_string} - -The connection string for clickhouse-client is presented in URI format: - -```text -clickhouse://[user_info@][hosts_and_ports][/dbname][?query_parameters] -``` - -where user_info is: `user[:password]` -and hosts_and_ports is a list of values: `[host][:port],[host][:port]` Port is not mandatory. -and query_parameters is a list of parameter[=value]: `param_name[=value]¶m_name[=value]...` value may not be required for some of the parameters. Parameter names are case sensitive. - -Allowed query_parameters keys: - -- `secure` or shorthanded `s` - no value. If specified, client will connect to the server over a secure connection (TLS). See `secure` in [command-line-options](#command-line-options) - -These examples illustrate valid connection strings for clickhouse-client: - -```text -clickhouse: -clickhouse://localhost -clickhouse://localhost:9000 -clickhouse://localhost/default -clickhouse://default@localhost -clickhouse://user:password@localhost -clickhouse://user_name@localhost/some_database?secure -clickhouse://host1:9000,host2:5000/some_database -``` - -The host component can either be an IP address or a host name. Put an IPv6 address in square brackets to specify it: - -```text -clickhouse://[2001:db8::1234] -``` - -If user or/and password are not specified, default values will be used. -If host is not specified, the default host will be used (localhost). -If port is not specified, the default port will be used (9000). -If database is not specified, the default database will be used. - -User, password, and database can be specified in the connection string either in `--user`, `--password`, `--database` command line options. - -The connection string must be specified in the first argument of clickhouse-client. The connection string can be combined with other [command-line-options](#command-line-options) except `--host(h)` and `--port`. - -### Multiple hosts {#connection_string_multiple_hosts} - -URI allows multiple hosts to be connected to, and the client will try to connect to those hosts using the order from URI and command line options. The hosts and ports in the URI accept comma-separated lists of values. - -If more than one host is supplied, or if a single host name is translated to more than one address, each host and address will be attempted one at a time until one is successful. The remaining hosts after successful connection in the list are not tried. - -### Percent encoding {#connection_string_uri_percent_encoding} - -Hosts, user name, password, database, and query parameters should be [Percent-Encoded](https://en.wikipedia.org/wiki/URL_encoding) if values contain invalid URI characters. - -### Examples {#connection_string_examples} - -Connect to localhost using port 9000 and execute the query "SELECT 1". - -``` bash -clickhouse-client "clickhouse://localhost:9000" --query "SELECT 1" -``` - -Connect to localhost using port 9000 in interactive, multiline mode. - -``` bash -clickhouse-client "clickhouse://localhost:9000" -m -``` - -Connect to localhost using port 9000 in interactive mode with the user specified in `--user` option. - -``` bash -clickhouse-client "clickhouse://localhost:9000" --user default -``` - -Connect to localhost using port 9000 in interactive mode to `my_database` database specified in the command line option. - -``` bash -clickhouse-client "clickhouse://localhost:9000" --database my_database -``` - -Connect to localhost using port 9000 in interactive mode to `my_database` database specified in the connection string. - -``` bash -clickhouse-client "clickhouse://localhost:9000/my_database" -``` - -Connect to localhost using port 9000 in interactive mode to `my_database` database specified in the connection string and a secure connection using shorthanded 's' URI parameter. - -```bash -clickhouse-client "clickhouse://localhost/my_database?s" -``` - -Connect to default host using default port, default user, and default database. - -``` bash -clickhouse-client "clickhouse:" -``` - -Connect to the default host using the default port, using user user_name and no password. - -``` bash -clickhouse-client "clickhouse://user_name@" -``` - -Connect to localhost using email as the user name. `@` symbol is percent encoded to `%40`. - -``` bash -clickhouse-client "clickhouse://some_user%40some_mail.com@localhost:9000" -``` - ## Configuring {#interfaces_cli_configuration} You can pass parameters to `clickhouse-client` (all parameters have a default value) using: @@ -304,7 +194,135 @@ You can pass parameters to `clickhouse-client` (all parameters have a default va - `--print-profile-events` – Print `ProfileEvents` packets. - `--profile-events-delay-ms` – Delay between printing `ProfileEvents` packets (-1 - print only totals, 0 - print every single packet). -Since version 20.5, `clickhouse-client` has automatic syntax highlighting (always enabled). +Instead of --host, --port, --user and --password options, ClickHouse client also supports connection strings. + + +## Connection string {#connection_string} + +clickhouse-client alternatively supports connecting to clickhouse server using a connection string similar to [MongoDB](https://www.mongodb.com/docs/manual/reference/connection-string/), [PostreSQL](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING), [MySQL](https://dev.mysql.com/doc/refman/8.0/en/connecting-using-uri-or-key-value-pairs.html#connecting-using-uri). It has the following syntax: + +```text +clickhouse:[//[user_info@][hosts_and_ports]][/database][?query_parameters] +``` + +Where + +- `user_spec` - (optional) is a user and an optional password, +- `hostspec` - (optional) is a list of hosts and optional ports `host[:port] [, host:[port]], ...`, +- `database` - (optional) is the database name, +- `paramspec` - (optional) is a list of key-value pairs `param1=value1[,¶m2=value2], ...`. For some parameters, no value is required. Parameter names and values are case-sensitive. + + + +The host component can either be an IP address or a host name. Put an IPv6 address in square brackets to specify it: + +```text +clickhouse://[2001:db8::1234] +``` + +If user is not specified, `default` user without password will be used. +If host is not specified, the `localhost` will be used (localhost). +If port is not specified, `9000` will be used as port. +If database is not specified, the `default` database will be used. + +If the user name, password or database was specified in the connection string, it cannot be specified using `--user`, `--password` or `--database` (and vice versa). + +The connection string must be specified in the first argument of clickhouse-client. The connection string can be combined with other [command-line-options](#command-line-options) except `--host(h)` and `--port`. + +### Multiple hosts {#connection_string_multiple_hosts} + +URI allows multiple hosts to be connected to. Connection strings can contain multiple hosts. ClickHouse-client will try to connect to these hosts in order (i.e. from left to right). After the connection is established, no attempt to connect to the remaining hosts is made. + +### Allowed query_parameters keys {#connection_string_query_parameters} + +- `secure` or shorthanded `s` - no value. If specified, client will connect to the server over a secure connection (TLS). See `secure` in [command-line-options](#command-line-options) + +### Percent encoding {#connection_string_uri_percent_encoding} + +Non-US ASCII characters in the user name, password, hosts, database or query parameters must be [percent-encoded](https://en.wikipedia.org/wiki/URL_encoding). + +### Examples {#connection_string_examples} + +Connect to localhost using port 9000 and execute the query "SELECT 1". + +``` bash +clickhouse-client clickhouse://localhost:9000 --query "SELECT 1" +``` + +Connect to localhost using user `john` with password `secret`, host `127.0.0.1` and port `9000` + +``` bash +clickhouse-client clickhouse://john:secret@127.0.0.1:9000 +``` + +Connect to localhost using default user, host with IPV6 address `[::1]` and port `9000`. + +``` bash +clickhouse-client clickhouse://[::1]:9000 +``` + +Connect to localhost using default user, host with IPV6 address `[2001:db8:3333:4444:5555:6666:7777:8888]` and port `9000`. + +``` bash +clickhouse-client clickhouse://[2001:db8:3333:4444:5555:6666:7777:8888]:9000 +``` + +Connect to localhost using port 9000 in multiline mode. + +``` bash +clickhouse-client clickhouse://localhost:9000 '-m' +``` + +Connect to localhost using port 9000 with the user `default`. + +``` bash +clickhouse-client clickhouse://default@localhost:9000 --user default + +# equivalent to: +clickhouse-client clickhouse://localhost:9000 --user default +``` + +Connect to localhost using port 9000 to `my_database` database. + +``` bash +clickhouse-client clickhouse://localhost:9000/my_database + +# equivalent to: +clickhouse-client clickhouse://localhost:9000 --database my_database +``` + +Connect to localhost using port 9000 to `my_database` database specified in the connection string and a secure connection using shorthanded 's' URI parameter. + +```bash +clickhouse-client clickhouse://localhost/my_database?s + +# equivalent to: +clickhouse-client clickhouse://localhost/my_database -s +``` + +Connect to default host using default port, default user, and default database. + +``` bash +clickhouse-client clickhouse: +``` + +Connect to the default host using the default port, using user user_name and no password. + +``` bash +clickhouse-client clickhouse://user_name@ +``` + +Connect to localhost using email as the user name. `@` symbol is percent encoded to `%40`. + +``` bash +clickhouse-client clickhouse://some_user%40some_mail.com@localhost:9000 +``` + +Connect to one of provides hosts: `192.168.1.15`, `192.168.1.25`. + +``` bash +clickhouse-client clickhouse://192.168.1.15,192.168.1.25 +``` ### Configuration Files {#configuration_files} diff --git a/docs/ru/interfaces/cli.md b/docs/ru/interfaces/cli.md index 801a72e48ec..f86ccb42356 100644 --- a/docs/ru/interfaces/cli.md +++ b/docs/ru/interfaces/cli.md @@ -110,117 +110,6 @@ $ clickhouse-client --param_tuple_in_tuple="(10, ('dt', 10))" -q "SELECT * FROM $ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="number" --query "SELECT {col:Identifier} FROM {db:Identifier}.{tbl:Identifier} LIMIT 10" ``` -## Строка подключения {#connection_string} - -Строка подключения для clickhouse-client представлена в формате URI: - -```text -clickhouse://[user_info@][hosts_and_ports][/dbname][?query_parameters] -``` - -где user_info - это: `user[:password]` -hosts_and_ports - это список значений: `[host][:port],[host][:port]`. Port может быть не задан. -query_parameters - это список пар ключ[=значение]: `param_name[=value]¶m_name[=value]...`. Значение может быть пустым. -Имена параметров чувствительны к регистру. - -Допустимые ключи query_parameters: - -- `secure` или сокращенно `s` - без значение. Если параметр указан, то соединение с сервером будет осуществляться по защищенному каналу (TLS). См. `secure` в [command-line-options](#command-line-options). - -Эти примеры иллюстрируют допустимые строки подключения для clickhouse-client: - -```text -clickhouse: -clickhouse://localhost -clickhouse://localhost:9000 -clickhouse://localhost/default -clickhouse://default@localhost -clickhouse://user:password@localhost -clickhouse://имя_пользователя@localhost/some_database?secure -clickhouse://host1:9000,host2:5000/some_database -``` - -Параметр host может быть либо IP-адресом, либо именем хоста. Для указания IPv6-адреса поместите его в квадратные скобки: - -```text -clickhouse://[2001:db8::1234] -``` - -Если пользователь или/и пароль не указаны, будут использоваться значения по умолчанию. -Если host не указан, будет использован хост по умолчанию (localhost). -Если port не указан, будет использоваться порт по умолчанию (9000). -Если база данных не указана, будет использоваться база данных по умолчанию (default). - -Пользователь, пароль и база данных могут быть указаны в строке подключения либо в опциях командной строки `--user`, `--password`, `--database`. - -Строка подключения должна быть указана в первом аргументе clickhouse-client. Строка подключения может комбинироваться с другими [параметрами командной строки] (#command-line-options) кроме `--host (h)` и `--port`. - -### Несколько хостов {#connection_string_multiple_hosts} - -URI позволяет подключаться к нескольким хостам, и клиент будет пытаться подключиться к этим хостам, используя порядок из URI и опций командной строки. Хосты и порты в URI принимают списки значений, разделенные запятыми. - -Если указано более одного хоста или если одно имя хоста транслируется в несколько адресов, Клиент будет будет пытаться подключится к каждому хосту и адресу в порядке в котором они встречаются в URI И опциях клиента, пока не будет установлено соединение. Соединение разрывается, если соединение установлено и аутентификация прошла успешно, остальные хосты в списке игнорируются. - -### Кодирование URI {#connection_string_uri_percent_encoding} - -Хосты, имя пользователя, пароль, имя базы данных, и параметры запроса должны быть [закодированы](https://ru.wikipedia.org/wiki/URL#%D0%9A%D0%BE%D0%B4%D0%B8%D1%80%D0%BE%D0%B2%D0%B0%D0%BD%D0%B8%D0%B5_URL), если значения содержат невалидные символы URI. - -### Примеры {#connection_string_examples} - -Подключиться к localhost через порт 9000 и выполнить запрос "SELECT 1" - -``` bash -clickhouse-client "clickhouse://localhost:9000" --query "SELECT 1" -``` - -Подключиться к localhost через порт 9000 в интерактивном, многострочном режиме. - -``` bash -clickhouse-client "clickhouse://localhost:9000" -m -``` - -Подключиться к localhost через порт 9000 в интерактивном режиме с пользователем default, указанным в опции --user. - -``` bash -clickhouse-client "clickhouse://localhost:9000" --user default -``` - -Подключиться к localhost, используя порт 9000 в интерактивном режиме с базой данных `my_database`, указанной в опции командной строки. - -``` bash -clickhouse-client "clickhouse://localhost:9000" --database my_database -``` - -Подключиться к localhost через порт 9000 в интерактивном режиме с базой данных `my_database`, указанной в строке подключения. - -``` bash -clickhouse-client "clickhouse://localhost:9000/my_database" -``` - -Подключиться к localhost через порт 9000 в интерактивном режиме с базой данных `my_database`, указанной в строке подключения, и безопасным соединением, используя короткий вариант команды URI 's'. - -``` bash -clickhouse-client "clickhouse://localhost/my_database?s" -``` - -Подключиться к хосту по умолчанию с использованием порта по умолчанию, пользователя по умолчанию, и базы данных по умолчанию. - -``` bash -clickhouse-client "clickhouse:" -``` - -Подключиться к хосту по умолчанию через порт по умолчанию, используя имя пользователя user_name без пароля. - -``` bash -clickhouse-client "clickhouse://user_name@" -``` - -Подключиться к localhost, используя электронную почту, как имя пользователя. Символ `@` закодирован как `%40`. - -``` bash -clickhouse-client "clickhouse://some_user%40some_mail.com@localhost:9000" -``` - ## Конфигурирование {#interfaces_cli_configuration} В `clickhouse-client` можно передавать различные параметры (все параметры имеют значения по умолчанию) с помощью: @@ -253,7 +142,129 @@ clickhouse-client "clickhouse://some_user%40some_mail.com@localhost:9000" - `--history_file` - путь к файлу с историей команд. - `--param_` — значение параметра для [запроса с параметрами](#cli-queries-with-parameters). -Начиная с версии 20.5, в `clickhouse-client` есть автоматическая подсветка синтаксиса (включена всегда). +## Строка подключения {#connection_string} + +clickhouse-client также поддерживает подключение к серверу clickhouse с помощью строки подключения, аналогичной [MongoDB](https://www.mongodb.com/docs/manual/reference/connection-string/), [PostreSQL](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING), [MySQL](https://dev.mysql.com/doc/refman/8.0/en/connecting-using-uri-or-key-value-pairs.html#connecting-using-uri). Она имеет следующий синтаксис: + +```text +clickhouse:[//[user_info@][hosts_and_ports]][/database][?query_parameters] +``` + +Где + +- `user_spec` - (необязательно) - это пользователь и необязательный пароль, +- `hostspec` - (необязательно) - список хостов и необязательных портов. `host[:port] [, host:[port]], ...`, +- `database` - (необязательно) - это имя базы данных, +- `paramspec` - (опционально) список пар ключ-значение `param1=value1[,¶m2=value2], ...`. Для некоторых параметров значение не требуется. Имена и значения параметров чувствительны к регистру. + +Параметр host может быть либо IP-адресом, либо именем хоста. Для указания IPv6-адреса поместите его в квадратные скобки: + +```text +clickhouse://[2001:db8::1234] +``` + +Если user не указан, будут использоваться имя пользователя `default`. +Если host не указан, будет использован хост `localhost`. +Если port не указан, будет использоваться порт `9000`. +Если база данных не указана, будет использоваться база данных `default`. + +Если имя пользователя, пароль или база данных были указаны в строке подключения, их нельзя указать с помощью `--user`, `--password` или `--database` (и наоборот). + +Строка подключения должна быть указана в первом аргументе clickhouse-client. Строка подключения может комбинироваться с другими [параметрами командной строки] (#command-line-options) кроме `--host (h)` и `--port`. + +### Несколько хостов {#connection_string_multiple_hosts} + +URI позволяет подключаться к нескольким хостам. Строки подключения могут содержать несколько хостов. ClickHouse-client будет пытаться подключиться к этим хостам по порядку (т.е. слева направо). После установления соединения попытки подключения к оставшимся хостам не предпринимаются. + +### Допустимые ключи query_parameters {#connection_string_query_parameters} + +- `secure` или сокращенно `s` - без значение. Если параметр указан, то соединение с сервером будет осуществляться по защищенному каналу (TLS). См. `secure` в [command-line-options](#command-line-options). + +### Кодирование URI {#connection_string_uri_percent_encoding} + +Не US ASCII символы в имени пользователя, пароле, хостах, базе данных или параметрах запроса должны быть [закодированы](https://ru.wikipedia.org/wiki/URL#%D0%9A%D0%BE%D0%B4%D0%B8%D1%80%D0%BE%D0%B2%D0%B0%D0%BD%D0%B8%D0%B5_URL). + +### Примеры {#connection_string_examples} + +Подключиться к localhost через порт 9000 и выполнить запрос "SELECT 1" + +``` bash +clickhouse-client clickhouse://localhost:9000 --query "SELECT 1" +``` +Подключиться к localhost, используя пользователя `john` с паролем `secret`, хост `127.0.0.1` и порт `9000` + +``bash +clickhouse-client clickhouse://john:secret@127.0.0.1:9000 +``` + +Подключиться к localhost, используя пользователя по умолчанию, хост с IPV6 адресом `[::1]` и порт `9000`. + +``` bash +clickhouse-client clickhouse://[::1]:9000 +``` + +Подключиться к localhost, используя пользователя по умолчанию, хост с IPV6 адресом `[2001:db8:3333:4444:5555:6666:7777:8888]` и портом `9000`. + +`` bash +clickhouse-client clickhouse://[2001:db8:3333:4444:5555:6666:7777:8888]:9000 +``` + +Подключиться к localhost через порт 9000 многострочном режиме. + +``` bash +clickhouse-client clickhouse://localhost:9000 '-m' +``` + +Подключиться к localhost через порт 9000 с пользователем default. + +``` bash +clickhouse-client clickhouse://default@localhost:9000 + +# Эквивалетно: +clickhouse-client clickhouse://localhost:9000 --user default +``` + +Подключиться к localhost через порт 9000 с базой данных `my_database` + +``` bash +clickhouse-client clickhouse://localhost:9000/my_database + +# Эквивалетно: +clickhouse-client clickhouse://localhost:9000 --database my_database +``` + +Подключиться к localhost через порт 9000 с базой данных `my_database`, указанной в строке подключения, используя безопасным соединением при помощи короткого варианта параметра URI 's'. + +``` bash +clickhouse-client clickhouse://localhost/my_database?s + +# Эквивалетно: +clickhouse-client clickhouse://localhost/my_database -s +``` + +Подключиться к хосту по умолчанию с использованием порта по умолчанию, пользователя по умолчанию, и базы данных по умолчанию. + +``` bash +clickhouse-client clickhouse: +``` + +Подключиться к хосту по умолчанию через порт по умолчанию, используя имя пользователя user_name без пароля. + +``` bash +clickhouse-client clickhouse://user_name@ +``` + +Подключиться к localhost, используя электронную почту, как имя пользователя. Символ `@` закодирован как `%40`. + +``` bash +clickhouse-client clickhouse://some_user%40some_mail.com@localhost:9000 +``` + +Подключится к одному из хостов: `192.168.1.15`, `192.168.1.25`. + +``` bash +clickhouse-client clickhouse://192.168.1.15,192.168.1.25 +``` ### Конфигурационные файлы {#configuration_files} diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index e513314387f..1429853e333 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -1326,7 +1326,7 @@ void Client::readArguments( else if (arg.starts_with("--host") || arg.starts_with("-h")) { if (has_connection_string) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Mixing connection string and --host/--port client arguments is prohibited"); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Mixing a connection string and --host argument is prohibited"); std::string host_arg; /// --host host @@ -1360,7 +1360,7 @@ void Client::readArguments( else if (arg.starts_with("--port")) { if (has_connection_string) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Mixing connection string and --host/--port client arguments is prohibited"); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Mixing a connection string and --port argument is prohibited"); auto port_arg = String{arg}; /// --port port diff --git a/src/Client/ConnectionString.cpp b/src/Client/ConnectionString.cpp index 95fec5b52ee..2e475a1f49d 100644 --- a/src/Client/ConnectionString.cpp +++ b/src/Client/ConnectionString.cpp @@ -26,21 +26,20 @@ using namespace std::literals::string_view_literals; constexpr auto CONNECTION_URI_SCHEME = "clickhouse:"sv; -void uriDecode(std::string & uri_encoded_string, bool plus_as_space) +std::string uriDecode(const std::string & uri_encoded_string, bool plus_as_space) { - std::string temp; - Poco::URI::decode(uri_encoded_string, temp, plus_as_space); - std::swap(temp, uri_encoded_string); + std::string decoded_string; + Poco::URI::decode(uri_encoded_string, decoded_string, plus_as_space); + return decoded_string; } void getHostAndPort(const Poco::URI & uri, std::vector> & hosts_and_ports_arguments) { - auto host = uri.getHost(); std::vector host_and_port; + auto host = uri.getHost(); if (!host.empty()) { - uriDecode(host, false); - host_and_port.push_back("--host="s + host); + host_and_port.push_back("--host="s + uriDecode(host, false)); } // Port can be written without host (":9000"). Empty host name equals to default host. @@ -52,7 +51,7 @@ void getHostAndPort(const Poco::URI & uri, std::vector> hosts_and_ports_arguments.push_back(std::move(host_and_port)); } -void getHostAndPort( +void buildConnectionString( Poco::URI & uri, std::vector> & hosts_and_ports_arguments, std::string_view host_and_port, @@ -96,13 +95,13 @@ bool tryParseConnectionString( std::vector & common_arguments, std::vector> & hosts_and_ports_arguments) { + if (connection_string == CONNECTION_URI_SCHEME) + return true; + if (!connection_string.starts_with(CONNECTION_URI_SCHEME)) return false; - if (connection_string.size() == CONNECTION_URI_SCHEME.size()) - return true; - - auto offset = CONNECTION_URI_SCHEME.size(); + size_t offset = CONNECTION_URI_SCHEME.size(); if ((connection_string.substr(offset).starts_with("//"))) offset += 2; @@ -146,7 +145,7 @@ bool tryParseConnectionString( { if (*it == ',') { - getHostAndPort(uri, hosts_and_ports_arguments, {last_host_begin, it}, {hosts_end, connection_string.end()}); + buildConnectionString(uri, hosts_and_ports_arguments, {last_host_begin, it}, {hosts_end, connection_string.end()}); last_host_begin = it + 1; } } @@ -154,11 +153,11 @@ bool tryParseConnectionString( if (uri.empty()) { // URI has no host specified - uri = std::string{connection_string.begin(), connection_string.end()}; + uri = std::string(connection_string); getHostAndPort(uri, hosts_and_ports_arguments); } else - getHostAndPort(uri, hosts_and_ports_arguments, {last_host_begin, hosts_end}, {hosts_end, connection_string.end()}); + buildConnectionString(uri, hosts_and_ports_arguments, {last_host_begin, hosts_end}, {hosts_end, connection_string.end()}); Poco::URI::QueryParameters params = uri.getQueryParameters(); for (const auto & param : params) @@ -166,12 +165,12 @@ bool tryParseConnectionString( if (param.first == "secure" || param.first == "s") { if (!param.second.empty()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "secure URI argument does not require value"); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "secure URI query parameter does not require value"); common_arguments.push_back(makeArgument(param.first)); } else - throw Exception(ErrorCodes::BAD_ARGUMENTS, "URI argument {} is unknown", param.first); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "URI query parameter {} is unknown", param.first); } auto user_info = uri.getUserInfo(); @@ -180,21 +179,27 @@ bool tryParseConnectionString( // Poco::URI doesn't decode user name/password by default. // But ClickHouse allows to have users with email user name like: 'john@some_mail.com' // john@some_mail.com should be percent-encoded: 'john%40some_mail.com' - uriDecode(user_info, true); std::string::size_type pos = user_info.find(':'); if (pos != std::string::npos) { common_arguments.push_back("--user"); - common_arguments.push_back(user_info.substr(0, pos)); + common_arguments.push_back(uriDecode(user_info.substr(0, pos), true)); ++pos; // Skip ':' common_arguments.push_back("--password"); - common_arguments.push_back(user_info.substr(pos)); + if (user_info.size() > pos + 1) + common_arguments.push_back(uriDecode(user_info.substr(pos), true)); + else + { + // in case of user_info == 'user:', ':' is specified, but password is empty + // then add password argument "\n" which means: Ask user for a password. + common_arguments.push_back("\n"); + } } else { common_arguments.push_back("--user"); - common_arguments.push_back(user_info); + common_arguments.push_back(uriDecode(user_info, true)); } } @@ -209,7 +214,7 @@ bool tryParseConnectionString( catch (const Poco::URISyntaxException & invalid_uri_exception) { throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, - "Invalid connection string {}: {}", connection_string, invalid_uri_exception.what()); + "Invalid connection string '{}': {}", connection_string, invalid_uri_exception.what()); } return true; From d15b7372942040863e6241f9d299b846c2513495 Mon Sep 17 00:00:00 2001 From: Alexey Gerasimchuck Date: Fri, 9 Jun 2023 03:54:29 +0000 Subject: [PATCH 266/722] Minor renaming --- docs/en/interfaces/cli.md | 2 +- docs/ru/interfaces/cli.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/interfaces/cli.md b/docs/en/interfaces/cli.md index 2126c538c5d..fc24bdcad68 100644 --- a/docs/en/interfaces/cli.md +++ b/docs/en/interfaces/cli.md @@ -199,7 +199,7 @@ Instead of --host, --port, --user and --password options, ClickHouse client also ## Connection string {#connection_string} -clickhouse-client alternatively supports connecting to clickhouse server using a connection string similar to [MongoDB](https://www.mongodb.com/docs/manual/reference/connection-string/), [PostreSQL](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING), [MySQL](https://dev.mysql.com/doc/refman/8.0/en/connecting-using-uri-or-key-value-pairs.html#connecting-using-uri). It has the following syntax: +clickhouse-client alternatively supports connecting to clickhouse server using a connection string similar to [MongoDB](https://www.mongodb.com/docs/manual/reference/connection-string/), [PostgreSQL](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING), [MySQL](https://dev.mysql.com/doc/refman/8.0/en/connecting-using-uri-or-key-value-pairs.html#connecting-using-uri). It has the following syntax: ```text clickhouse:[//[user_info@][hosts_and_ports]][/database][?query_parameters] diff --git a/docs/ru/interfaces/cli.md b/docs/ru/interfaces/cli.md index f86ccb42356..ee29b122afb 100644 --- a/docs/ru/interfaces/cli.md +++ b/docs/ru/interfaces/cli.md @@ -144,7 +144,7 @@ $ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="numbe ## Строка подключения {#connection_string} -clickhouse-client также поддерживает подключение к серверу clickhouse с помощью строки подключения, аналогичной [MongoDB](https://www.mongodb.com/docs/manual/reference/connection-string/), [PostreSQL](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING), [MySQL](https://dev.mysql.com/doc/refman/8.0/en/connecting-using-uri-or-key-value-pairs.html#connecting-using-uri). Она имеет следующий синтаксис: +clickhouse-client также поддерживает подключение к серверу clickhouse с помощью строки подключения, аналогичной [MongoDB](https://www.mongodb.com/docs/manual/reference/connection-string/), [PostgreSQL](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING), [MySQL](https://dev.mysql.com/doc/refman/8.0/en/connecting-using-uri-or-key-value-pairs.html#connecting-using-uri). Она имеет следующий синтаксис: ```text clickhouse:[//[user_info@][hosts_and_ports]][/database][?query_parameters] From 094d661701cc83d84ebcf4bbaffeff47e0f7c547 Mon Sep 17 00:00:00 2001 From: YalalovSM <39567223+YalalovSM@users.noreply.github.com> Date: Fri, 9 Jun 2023 09:26:00 +0500 Subject: [PATCH 267/722] Update projection.md Document using keywords IF EXISTS/IF NOT EXISTS with projections --- docs/en/sql-reference/statements/alter/projection.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/en/sql-reference/statements/alter/projection.md b/docs/en/sql-reference/statements/alter/projection.md index 030e9352a00..b7399442d41 100644 --- a/docs/en/sql-reference/statements/alter/projection.md +++ b/docs/en/sql-reference/statements/alter/projection.md @@ -142,11 +142,11 @@ The following operations with [projections](/docs/en/engines/table-engines/merge ## ADD PROJECTION -`ALTER TABLE [db].name ADD PROJECTION name ( SELECT [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata. +`ALTER TABLE [db].name ADD PROJECTION [IF NOT EXISTS] name ( SELECT [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata. ## DROP PROJECTION -`ALTER TABLE [db].name DROP PROJECTION name` - Removes projection description from tables metadata and deletes projection files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). +`ALTER TABLE [db].name DROP PROJECTION [IF EXISTS] name` - Removes projection description from tables metadata and deletes projection files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). ## MATERIALIZE PROJECTION @@ -154,7 +154,7 @@ The following operations with [projections](/docs/en/engines/table-engines/merge ## CLEAR PROJECTION -`ALTER TABLE [db.]table CLEAR PROJECTION name IN PARTITION partition_name` - Deletes projection files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). +`ALTER TABLE [db.]table CLEAR PROJECTION [IF EXISTS] name IN PARTITION partition_name` - Deletes projection files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). The commands `ADD`, `DROP` and `CLEAR` are lightweight in a sense that they only change metadata or remove files. From 95c0b942c141493f8997fb807b4ff72c34b8292b Mon Sep 17 00:00:00 2001 From: YalalovSM <39567223+YalalovSM@users.noreply.github.com> Date: Fri, 9 Jun 2023 10:37:20 +0500 Subject: [PATCH 268/722] Update projection.md --- docs/ru/sql-reference/statements/alter/projection.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/ru/sql-reference/statements/alter/projection.md b/docs/ru/sql-reference/statements/alter/projection.md index 63b068611ec..33e52b93add 100644 --- a/docs/ru/sql-reference/statements/alter/projection.md +++ b/docs/ru/sql-reference/statements/alter/projection.md @@ -8,13 +8,13 @@ sidebar_label: PROJECTION Доступны следующие операции с [проекциями](../../../engines/table-engines/mergetree-family/mergetree.md#projections): -- `ALTER TABLE [db].name ADD PROJECTION name ( SELECT [GROUP BY] [ORDER BY] )` — добавляет описание проекции в метаданные. +- `ALTER TABLE [db].name ADD PROJECTION [IF NOT EXISTS] name ( SELECT [GROUP BY] [ORDER BY] )` — добавляет описание проекции в метаданные. -- `ALTER TABLE [db].name DROP PROJECTION name` — удаляет описание проекции из метаданных и удаляет файлы проекции с диска. +- `ALTER TABLE [db].name DROP PROJECTION [IF EXISTS] name` — удаляет описание проекции из метаданных и удаляет файлы проекции с диска. - `ALTER TABLE [db.]table MATERIALIZE PROJECTION name IN PARTITION partition_name` — перестраивает проекцию в указанной партиции. Реализовано как [мутация](../../../sql-reference/statements/alter/index.md#mutations). -- `ALTER TABLE [db.]table CLEAR PROJECTION name IN PARTITION partition_name` — удаляет файлы проекции с диска без удаления описания. +- `ALTER TABLE [db.]table CLEAR PROJECTION [IF EXISTS] name IN PARTITION partition_name` — удаляет файлы проекции с диска без удаления описания. Команды `ADD`, `DROP` и `CLEAR` — легковесны, поскольку они только меняют метаданные или удаляют файлы. @@ -22,4 +22,4 @@ sidebar_label: PROJECTION :::note Манипуляции с проекциями поддерживаются только для таблиц с движком [`*MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) (включая [replicated](../../../engines/table-engines/mergetree-family/replication.md) варианты). - ::: \ No newline at end of file + ::: From d0938e95e68551c51f54407abe30bbd35534bf2e Mon Sep 17 00:00:00 2001 From: Alexey Gerasimchuck Date: Fri, 9 Jun 2023 06:40:57 +0000 Subject: [PATCH 269/722] prohibited to use --connection --- programs/client/Client.cpp | 9 +++------ src/Client/ConnectionString.cpp | 19 +++++++++++++++++++ src/Client/ConnectionString.h | 5 +++++ 3 files changed, 27 insertions(+), 6 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 1429853e333..a49447dff69 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -1267,6 +1267,9 @@ void Client::readArguments( { std::string_view arg = argv[arg_num]; + if (has_connection_string) + validateConnectionStringClientOption(arg); + if (arg == "--external") { in_external_group = true; @@ -1325,9 +1328,6 @@ void Client::readArguments( } else if (arg.starts_with("--host") || arg.starts_with("-h")) { - if (has_connection_string) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Mixing a connection string and --host argument is prohibited"); - std::string host_arg; /// --host host if (arg == "--host" || arg == "-h") @@ -1359,9 +1359,6 @@ void Client::readArguments( } else if (arg.starts_with("--port")) { - if (has_connection_string) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Mixing a connection string and --port argument is prohibited"); - auto port_arg = String{arg}; /// --port port if (arg == "--port") diff --git a/src/Client/ConnectionString.cpp b/src/Client/ConnectionString.cpp index 2e475a1f49d..b9658772e2e 100644 --- a/src/Client/ConnectionString.cpp +++ b/src/Client/ConnectionString.cpp @@ -7,6 +7,7 @@ #include #include #include +#include namespace DB { @@ -26,6 +27,15 @@ using namespace std::literals::string_view_literals; constexpr auto CONNECTION_URI_SCHEME = "clickhouse:"sv; +const std::unordered_map PROHIBITED_CLIENT_OPTIONS = +{ + /// Client option, client option long name + {"-h", "--host"}, + {"--host", "--host"}, + {"--port", "--port"}, + {"--connection", "--connection"}, +}; + std::string uriDecode(const std::string & uri_encoded_string, bool plus_as_space) { std::string decoded_string; @@ -220,4 +230,13 @@ bool tryParseConnectionString( return true; } +void validateConnectionStringClientOption(std::string_view command_line_option) +{ + const auto prohibited_option_iter = PROHIBITED_CLIENT_OPTIONS.find(command_line_option); + if (prohibited_option_iter != PROHIBITED_CLIENT_OPTIONS.end()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Mixing a connection string and {} option is prohibited", + prohibited_option_iter->second); +} + } diff --git a/src/Client/ConnectionString.h b/src/Client/ConnectionString.h index aafb1139b00..ce72de9edf6 100644 --- a/src/Client/ConnectionString.h +++ b/src/Client/ConnectionString.h @@ -19,4 +19,9 @@ bool tryParseConnectionString( std::string_view connection_string, std::vector & common_arguments, std::vector> & hosts_and_ports_arguments); + +// throws DB::Exception with BAD_ARGUMENTS if the given command line argument is allowed +// to be used with the connection string +void validateConnectionStringClientOption(std::string_view command_line_option); + } From b8fc25ab239fbf8c82f589b175e386ceed737c26 Mon Sep 17 00:00:00 2001 From: Alexey Gerasimchuck Date: Fri, 9 Jun 2023 06:51:34 +0000 Subject: [PATCH 270/722] minor update --- src/Client/ConnectionString.cpp | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/src/Client/ConnectionString.cpp b/src/Client/ConnectionString.cpp index b9658772e2e..e1f39369b2a 100644 --- a/src/Client/ConnectionString.cpp +++ b/src/Client/ConnectionString.cpp @@ -27,13 +27,12 @@ using namespace std::literals::string_view_literals; constexpr auto CONNECTION_URI_SCHEME = "clickhouse:"sv; -const std::unordered_map PROHIBITED_CLIENT_OPTIONS = -{ - /// Client option, client option long name - {"-h", "--host"}, - {"--host", "--host"}, - {"--port", "--port"}, - {"--connection", "--connection"}, +const std::unordered_map PROHIBITED_CLIENT_OPTIONS = { + /// Client option, client option long name + {"-h", "--host"}, + {"--host", "--host"}, + {"--port", "--port"}, + {"--connection", "--connection"}, }; std::string uriDecode(const std::string & uri_encoded_string, bool plus_as_space) @@ -234,9 +233,8 @@ void validateConnectionStringClientOption(std::string_view command_line_option) { const auto prohibited_option_iter = PROHIBITED_CLIENT_OPTIONS.find(command_line_option); if (prohibited_option_iter != PROHIBITED_CLIENT_OPTIONS.end()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Mixing a connection string and {} option is prohibited", - prohibited_option_iter->second); + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Mixing a connection string and {} option is prohibited", prohibited_option_iter->second); } } From 5c76a8882ef644ca924c5653cdad14e7cfa94270 Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Fri, 9 Jun 2023 09:12:07 +0000 Subject: [PATCH 271/722] Fix docs by pull request comments --- .../functions/type-conversion-functions.md | 177 +++++++++++----- .../functions/type-conversion-functions.md | 199 ++++++++++++++---- 2 files changed, 288 insertions(+), 88 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index a021fed195d..c634a3da27e 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -33,7 +33,7 @@ SELECT toTypeName(toNullable('') AS val) AS source_type, toTypeName(toString(val)) AS to_type_result_type, toTypeName(CAST(val, 'String')) AS cast_result_type - + ┌─source_type──────┬─to_type_result_type─┬─cast_result_type─┐ │ Nullable(String) │ Nullable(String) │ String │ └──────────────────┴─────────────────────┴──────────────────┘ @@ -203,7 +203,7 @@ Result: ## toDate -Converts the argument to [Date](/docs/en/sql-reference/data-types/date.md) data type. +Converts the argument to [Date](/docs/en/sql-reference/data-types/date.md) data type. If the argument is [DateTime](/docs/en/sql-reference/data-types/datetime.md) or [DateTime64](/docs/en/sql-reference/data-types/datetime64.md), it truncates it and leaves the date component of the DateTime: @@ -232,7 +232,7 @@ SELECT │ 2022-12-30 │ Date │ └────────────┴──────────────────────────────────┘ -1 row in set. Elapsed: 0.001 sec. +1 row in set. Elapsed: 0.001 sec. ``` ```sql @@ -314,14 +314,52 @@ SELECT └─────────────────────┴───────────────┴─────────────┴─────────────────────┘ ``` + ## toDateOrZero +The same as [toDate](#todate) but returns lower boundery of [Date](/docs/en/sql-reference/data-types/date.md) if an invalid argument is received. Only [String](/docs/en/sql-reference/data-types/string.md) argument is supported. + +**Example** + +Query: + +``` sql +SELECT toDateOrZero('2022-12-30'), toDateOrZero(''); +``` + +Result: + +```response +┌─toDateOrZero('2022-12-30')─┬─toDateOrZero('')─┐ +│ 2022-12-30 │ 1970-01-01 │ +└────────────────────────────┴──────────────────┘ +``` + + ## toDateOrNull +The same as [toDate](#todate) but returns `NULL` if an invalid argument is received. Only [String](/docs/en/sql-reference/data-types/string.md) argument is supported. + +**Example** + +Query: + +``` sql +SELECT toDateOrNull('2022-12-30'), toDateOrNull(''); +``` + +Result: + +```response +┌─toDateOrNull('2022-12-30')─┬─toDateOrNull('')─┐ +│ 2022-12-30 │ ᴺᵁᴸᴸ │ +└────────────────────────────┴──────────────────┘ +``` + + ## toDateOrDefault -Converts an input value to [Date](/docs/en/sql-reference/data-types/date.md) data type. -If unsuccessful, returns the lower border value supported by [Date](/docs/en/sql-reference/data-types/date.md). The default value can be specified as a second argument. -Similar to [toDate](#todate). + +Like [toDate](#todate) but if unsuccessful, returns a default value which is either the second argument (if specified), or otherwise the lower boundery of [Date](/docs/en/sql-reference/data-types/date.md). **Syntax** @@ -329,62 +367,37 @@ Similar to [toDate](#todate). toDateOrDefault(expr [, default_value]) ``` -**Arguments** - -- `expr` — The value. [String](/docs/en/sql-reference/data-types/string.md), [Int](/docs/en/sql-reference/data-types/int-uint.md), [Date](/docs/en/sql-reference/data-types/date.md) or [DateTime](/docs/en/sql-reference/data-types/datetime.md). -- `default_value` — The default value. [Date](/docs/en/sql-reference/data-types/date.md) - -If `expr` is a number and looks like a UNIX timestamp (is greater than 65535), it is interpreted as a DateTime, then truncated to Date in the current timezone. If `expr` is a number and it is smaller than 65536, it is interpreted as the number of days since 1970-01-01. - -**Returned value** - -- A calendar date. [Date](/docs/en/sql-reference/data-types/date.md) - **Example** Query: ``` sql -SELECT - toDateOrDefault('2021-01-01', '2023-01-01'::Date), - toDateOrDefault('xx2021-01-01', '2023-01-01'::Date); +SELECT toDateOrDefault('2022-12-30'), toDateOrDefault('', '2023-01-01'::Date); ``` Result: ```response -┌─toDateOrDefault('2021-01-01', CAST('2023-01-01', 'Date'))─┬─toDateOrDefault('xx2021-01-01', CAST('2023-01-01', 'Date'))─┐ -│ 2021-01-01 │ 2023-01-01 │ -└───────────────────────────────────────────────────────────┴─────────────────────────────────────────────────────────────┘ +┌─toDateOrDefault('2022-12-30')─┬─toDateOrDefault('', CAST('2023-01-01', 'Date'))─┐ +│ 2022-12-30 │ 2023-01-01 │ +└───────────────────────────────┴─────────────────────────────────────────────────┘ ``` -**See Also** -- [toDate](#todate) -- [toDate32OrDefault](#todate32ordefault) - ## toDateTime -## toDateTimeOrZero - -## toDateTimeOrNull - -## toDateTimeOrDefault -Converts an input value to [DateTime](/docs/en/sql-reference/data-types/datetime.md) data type. -If unsuccessful, returns the lower border value supported by [DateTime](/docs/en/sql-reference/data-types/datetime.md). The default value can be specified as a third argument. -Similar to [toDateTime](#todatetime). +Converts an input value to [DateTime](/docs/en/sql-reference/data-types/datetime.md). **Syntax** ``` sql -toDateTimeOrDefault(expr, [, time_zone [, default_value]]) +toDateTime(expr[, time_zone ]) ``` **Arguments** -- `expr` — The value. [String](/docs/en/sql-reference/data-types/string.md), [Int](/docs/en/sql-reference/data-types/int-uint.md), [Date](/docs/en/sql-reference/data-types/date.md) or [DateTime](/docs/en/sql-reference/data-types/datetime.md). +- `expr` — The value. [String](/docs/en/sql-reference/data-types/string.md), [Int](/docs/en/sql-reference/data-types/int-uint.md), [Date](/docs/en/sql-reference/data-types/date.md) or [DateTime](/docs/en/sql-reference/data-types/datetime.md). - `time_zone` — Time zone. [String](/docs/en/sql-reference/data-types/string.md). -- `default_value` — The default value. [DateTime](/docs/en/sql-reference/data-types/datetime.md) If `expr` is a number, it is interpreted as the number of seconds since the beginning of the Unix Epoch (as Unix timestamp). @@ -397,21 +410,86 @@ If `expr` is a number, it is interpreted as the number of seconds since the begi Query: ``` sql -SELECT - toDateTimeOrDefault('2021-01-01', 'UTC', '2023-01-01'::DateTime('UTC')), - toDateTimeOrDefault('xx2021-01-01', 'UTC', '2023-01-01'::DateTime('UTC')); +SELECT toDateTime('2022-12-30 13:44:17'), toDateTime(1685457500, 'UTC'); ``` Result: ```response -┌─toDateTimeOrDefault('2021-01-01', 'UTC', CAST('2023-01-01', 'DateTime(\'UTC\')'))─┬─toDateTimeOrDefault('xx2021-01-01', 'UTC', CAST('2023-01-01', 'DateTime(\'UTC\')'))─┐ -│ 2021-01-01 00:00:00 │ 2023-01-01 00:00:00 │ -└───────────────────────────────────────────────────────────────────────────────────┴─────────────────────────────────────────────────────────────────────────────────────┘ +┌─toDateTime('2022-12-30 13:44:17')─┬─toDateTime(1685457500, 'UTC')─┐ +│ 2022-12-30 13:44:17 │ 2023-05-30 14:38:20 │ +└───────────────────────────────────┴───────────────────────────────┘ +``` + + +## toDateTimeOrZero + +The same as [toDateTime](#todate) but returns lower boundery of [Date](/docs/en/sql-reference/data-types/date.md) if an invalid argument is received. Only [String](/docs/en/sql-reference/data-types/string.md) argument is supported. + +**Example** + +Query: + +``` sql +SELECT toDateTimeOrZero('2022-12-30 13:44:17'), toDateTimeOrZero(''); +``` + +Result: + +```response +┌─toDateTimeOrZero('2022-12-30 13:44:17')─┬─toDateTimeOrZero('')─┐ +│ 2022-12-30 13:44:17 │ 1970-01-01 00:00:00 │ +└─────────────────────────────────────────┴──────────────────────┘ +``` + + +## toDateTimeOrNull + +The same as [toDateTime](#todatetime) but returns `NULL` if an invalid argument is received. Only [String](/docs/en/sql-reference/data-types/string.md) argument is supported. + +**Example** + +Query: + +``` sql +SELECT toDateTimeOrNull('2022-12-30 13:44:17'), toDateTimeOrNull(''); +``` + +Result: + +```response +┌─toDateTimeOrNull('2022-12-30 13:44:17')─┬─toDateTimeOrNull('')─┐ +│ 2022-12-30 13:44:17 │ ᴺᵁᴸᴸ │ +└─────────────────────────────────────────┴──────────────────────┘ +``` + + +## toDateTimeOrDefault + +Like [toDateTime](#todatetime) but if unsuccessful, returns a default value which is either the third argument (if specified), or otherwise the lower boundery of [DateTime](/docs/en/sql-reference/data-types/datetime.md). + +**Syntax** + +``` sql +toDateTimeOrDefault(expr [, time_zone [, default_value]]) +``` + +**Example** + +Query: + +``` sql +SELECT toDateTimeOrDefault('2022-12-30 13:44:17'), toDateTimeOrDefault('', 'UTC', '2023-01-01'::DateTime('UTC')); +``` + +Result: + +```response +┌─toDateTimeOrDefault('2022-12-30 13:44:17')─┬─toDateTimeOrDefault('', 'UTC', CAST('2023-01-01', 'DateTime(\'UTC\')'))─┐ +│ 2022-12-30 13:44:17 │ 2023-01-01 00:00:00 │ +└────────────────────────────────────────────┴─────────────────────────────────────────────────────────────────────────┘ ``` -**See Also** -- [toDateTime](#todatetime) ## toDate32 @@ -604,6 +682,11 @@ SELECT toDateTime64('2019-01-01 00:00:00', 3, 'Asia/Istanbul') AS value, toTypeN └─────────────────────────┴─────────────────────────────────────────────────────────────────────┘ ``` +## toDateTime64OrZero + +## toDateTime64OrNull + +## toDateTime64OrDefault ## toDecimal(32\|64\|128\|256) @@ -1332,7 +1415,7 @@ Returns DateTime values parsed from input string according to a MySQL style form **Supported format specifiers** All format specifiers listed in [formatDateTime](/docs/en/sql-reference/functions/date-time-functions.md#date_time_functions-formatDateTime) except: -- %Q: Quarter (1-4) +- %Q: Quarter (1-4) **Example** diff --git a/docs/ru/sql-reference/functions/type-conversion-functions.md b/docs/ru/sql-reference/functions/type-conversion-functions.md index 67d1732d34e..d43b5415114 100644 --- a/docs/ru/sql-reference/functions/type-conversion-functions.md +++ b/docs/ru/sql-reference/functions/type-conversion-functions.md @@ -165,28 +165,17 @@ SELECT toUInt64(nan), toUInt32(-32), toUInt16('16'), toUInt8(8.8); ## toDate {#todate} -Cиноним: `DATE`. - -## toDateOrZero {#todateorzero} - -## toDateOrNull {#todateornull} - -## toDateOrDefault {#todateordefault} - -Конвертирует аргумент в значение [Date](/docs/ru/sql-reference/data-types/date.md) data type. -Если получен недопустимый аргумент, то возвращает значение по умолчанию (нижняя граница [Date](/docs/ru/sql-reference/data-types/date.md). Значение по умолчанию может быть указано вторым аргументом. -Похожа на [toDate](#todate). +Конвертирует аргумент в значение [Date](/docs/ru/sql-reference/data-types/date.md). **Синтаксис** ``` sql -toDateOrDefault(expr [, default_value]) +toDate(expr) ``` **Аргументы** -- `expr` — Значение для преобразования. [String](/docs/ru/sql-reference/data-types/string.md), [Int](/docs/ru/sql-reference/data-types/int-uint.md), [Date](/docs/ru/sql-reference/data-types/date.md) или [DateTime](/docs/ru/sql-reference/data-types/datetime.md). -- `default_value` — Значение по умолчанию. [Date](/docs/ru/sql-reference/data-types/date.md) +- `expr` — Значение для преобразования. [String](/docs/ru/sql-reference/data-types/string.md), [Int](/docs/ru/sql-reference/data-types/int-uint.md), [Date](/docs/ru/sql-reference/data-types/date.md) или [DateTime](/docs/ru/sql-reference/data-types/datetime.md). Если `expr` является числом выглядит как UNIX timestamp (больше чем 65535), оно интерпретируется как DateTime, затем обрезается до Date учитывавая текущую часовой пояс. Если `expr` является числом и меньше чем 65536, оно интерпретируется как количество дней с 1970-01-01. @@ -199,46 +188,101 @@ toDateOrDefault(expr [, default_value]) Запрос: ``` sql -SELECT - toDateOrDefault('2021-01-01', '2023-01-01'::Date), - toDateOrDefault('xx2021-01-01', '2023-01-01'::Date); +SELECT toDate('2022-12-30'), toDate(1685457500); ``` Результат: ```response -┌─toDateOrDefault('2021-01-01', CAST('2023-01-01', 'Date'))─┬─toDateOrDefault('xx2021-01-01', CAST('2023-01-01', 'Date'))─┐ -│ 2021-01-01 │ 2023-01-01 │ -└───────────────────────────────────────────────────────────┴─────────────────────────────────────────────────────────────┘ +┌─toDate('2022-12-30')─┬─toDate(1685457500)─┐ +│ 2022-12-30 │ 2023-05-30 │ +└──────────────────────┴────────────────────┘ ``` -**Смотрите также** -- [toDate](#todate) -- [toDate32OrDefault](#todate32ordefault) -## toDateTime {#todatetime} +## toDateOrZero {#todateorzero} -## toDateTimeOrZero {#todatetimeorzero} +Как [toDate](#todate), но в случае неудачи возвращает нижнюю границу [Date](/docs/ru/sql-reference/data-types/date.md)). Поддерживается только аргумент типа [String](/docs/ru/sql-reference/data-types/string.md). -## toDateTimeOrNull {#todatetimeornull} +**Пример** -## toDateTimeOrDefault {#todatetimeordefault} +Запрос: -Конвертирует аргумент в значение [DateTime](/docs/ru/sql-reference/data-types/datetime.md). -Если получен недопустимый аргумент, то возвращает значение по умолчанию (нижняя граница [DateTime](/docs/ru/sql-reference/data-types/datetime.md)). Значение по умолчанию может быть указано третьим аргументом. -Похожа на [toDateTime](#todatetime). +``` sql +SELECT toDateOrZero('2022-12-30'), toDateOrZero(''); +``` + +Результат: + +```response +┌─toDateOrZero('2022-12-30')─┬─toDateOrZero('')─┐ +│ 2022-12-30 │ 1970-01-01 │ +└────────────────────────────┴──────────────────┘ +``` + + +## toDateOrNull {#todateornull} + +Как [toDate](#todate), но в случае неудачи возвращает `NULL`. Поддерживается только аргумент типа [String](/docs/ru/sql-reference/data-types/string.md). + +**Пример** + +Запрос: + +``` sql +SELECT toDateOrNull('2022-12-30'), toDateOrNull(''); +``` + +Результат: + +```response +┌─toDateOrNull('2022-12-30')─┬─toDateOrNull('')─┐ +│ 2022-12-30 │ ᴺᵁᴸᴸ │ +└────────────────────────────┴──────────────────┘ +``` + + +## toDateOrDefault {#todateordefault} + +Как [toDate](#todate), но в случае неудачи возвращает значение по умолчанию (или второй аргумент (если указан), или нижняя граница [Date](/docs/ru/sql-reference/data-types/date.md)). **Синтаксис** ``` sql -toDateTimeOrDefault(expr, [, time_zone [, default_value]]) +toDateOrDefault(expr [, default_value]) +``` + +**Пример** + +Запрос: + +``` sql +SELECT toDateOrDefault('2022-12-30'), toDateOrDefault('', '2023-01-01'::Date); +``` + +Результат: + +```response +┌─toDateOrDefault('2022-12-30')─┬─toDateOrDefault('', CAST('2023-01-01', 'Date'))─┐ +│ 2022-12-30 │ 2023-01-01 │ +└───────────────────────────────┴─────────────────────────────────────────────────┘ +``` + + +## toDateTime {#todatetime} + +Конвертирует аргумент в значение [DateTime](/docs/ru/sql-reference/data-types/datetime.md). + +**Синтаксис** + +``` sql +toDateTime(expr[, time_zone ]) ``` **Аргументы** -- `expr` — Значение для преобразования. [String](/docs/ru/sql-reference/data-types/string.md), [Int](/docs/ru/sql-reference/data-types/int-uint.md), [Date](/docs/ru/sql-reference/data-types/date.md) или [DateTime](/docs/ru/sql-reference/data-types/datetime.md). +- `expr` — Значение для преобразования. [String](/docs/ru/sql-reference/data-types/string.md), [Int](/docs/ru/sql-reference/data-types/int-uint.md), [Date](/docs/ru/sql-reference/data-types/date.md) или [DateTime](/docs/ru/sql-reference/data-types/datetime.md). - `time_zone` — Часовой пояс. [String](/docs/ru/sql-reference/data-types/string.md). -- `default_value` — Значение по умолчанию. [DateTime](/docs/ru/sql-reference/data-types/datetime.md) Если `expr` является числом, оно интерпретируется как количество секунд от начала unix эпохи. @@ -251,21 +295,86 @@ toDateTimeOrDefault(expr, [, time_zone [, default_value]]) Запрос: ``` sql -SELECT - toDateTimeOrDefault('2021-01-01', 'UTC', '2023-01-01'::DateTime('UTC')), - toDateTimeOrDefault('xx2021-01-01', 'UTC', '2023-01-01'::DateTime('UTC')); +SELECT toDateTime('2022-12-30 13:44:17'), toDateTime(1685457500, 'UTC'); ``` Результат: ```response -┌─toDateTimeOrDefault('2021-01-01', 'UTC', CAST('2023-01-01', 'DateTime(\'UTC\')'))─┬─toDateTimeOrDefault('xx2021-01-01', 'UTC', CAST('2023-01-01', 'DateTime(\'UTC\')'))─┐ -│ 2021-01-01 00:00:00 │ 2023-01-01 00:00:00 │ -└───────────────────────────────────────────────────────────────────────────────────┴─────────────────────────────────────────────────────────────────────────────────────┘ +┌─toDateTime('2022-12-30 13:44:17')─┬─toDateTime(1685457500, 'UTC')─┐ +│ 2022-12-30 13:44:17 │ 2023-05-30 14:38:20 │ +└───────────────────────────────────┴───────────────────────────────┘ +``` + + +## toDateTimeOrZero {#todatetimeorzero} + +Как [toDateTime](#todatetime), но в случае неудачи возвращает нижнюю границу [DateTime](/docs/ru/sql-reference/data-types/datetime.md)). Поддерживается только аргумент типа [String](/docs/ru/sql-reference/data-types/string.md). + +**Пример** + +Запрос: + +``` sql +SELECT toDateTimeOrZero('2022-12-30 13:44:17'), toDateTimeOrZero(''); +``` + +Результат: + +```response +┌─toDateTimeOrZero('2022-12-30 13:44:17')─┬─toDateTimeOrZero('')─┐ +│ 2022-12-30 13:44:17 │ 1970-01-01 00:00:00 │ +└─────────────────────────────────────────┴──────────────────────┘ +``` + + +## toDateTimeOrNull {#todatetimeornull} + +Как [toDateTime](#todatetime), но в случае неудачи возвращает `NULL`. Поддерживается только аргумент типа [String](/docs/ru/sql-reference/data-types/string.md). + +**Example** + +Query: + +``` sql +SELECT toDateTimeOrNull('2022-12-30 13:44:17'), toDateTimeOrNull(''); +``` + +Result: + +```response +┌─toDateTimeOrNull('2022-12-30 13:44:17')─┬─toDateTimeOrNull('')─┐ +│ 2022-12-30 13:44:17 │ ᴺᵁᴸᴸ │ +└─────────────────────────────────────────┴──────────────────────┘ +``` + + +## toDateTimeOrDefault {#todatetimeordefault} + +Как [toDateTime](#todatetime), но в случае неудачи возвращает значение по умолчанию (или третий аргумент (если указан), или нижняя граница [DateTime](/docs/ru/sql-reference/data-types/datetime.md)). + +**Синтаксис** + +``` sql +toDateTimeOrDefault(expr, [, time_zone [, default_value]]) +``` + +**Пример** + +Запрос: + +``` sql +SELECT toDateTimeOrDefault('2022-12-30 13:44:17'), toDateTimeOrDefault('', 'UTC', '2023-01-01'::DateTime('UTC')); +``` + +Результат: + +```response +┌─toDateTimeOrDefault('2022-12-30 13:44:17')─┬─toDateTimeOrDefault('', 'UTC', CAST('2023-01-01', 'DateTime(\'UTC\')'))─┐ +│ 2022-12-30 13:44:17 │ 2023-01-01 00:00:00 │ +└────────────────────────────────────────────┴─────────────────────────────────────────────────────────────────────────┘ ``` -**Смотрите также** -- [toDateTime](#todatetime) ## toDate32 {#todate32} @@ -387,6 +496,14 @@ SELECT └─────────────────────────────────────────────────────────┴───────────────────────────────────────────────────────────┘ ``` +## toDateTime64 + +## toDateTime64OrZero + +## toDateTime64OrNull + +## toDateTime64OrDefault + ## toDecimal(32\|64\|128\|256) {#todecimal3264128} Преобразует `value` к типу данных [Decimal](../../sql-reference/functions/type-conversion-functions.md) с точностью `S`. `value` может быть числом или строкой. Параметр `S` (scale) задаёт число десятичных знаков. From ab8365630b3ee50120e67664ca4ecbab1afcc4c3 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 9 Jun 2023 09:19:21 +0000 Subject: [PATCH 272/722] Reject logs containing unknown operation --- contrib/NuRaft | 2 +- src/Coordination/Changelog.cpp | 3 +- src/Coordination/Changelog.h | 2 +- src/Coordination/KeeperServer.cpp | 39 +++++++++++++++++++++---- src/Coordination/KeeperStateMachine.cpp | 5 ++-- 5 files changed, 39 insertions(+), 12 deletions(-) diff --git a/contrib/NuRaft b/contrib/NuRaft index b56784be1ae..f43d10dbc97 160000 --- a/contrib/NuRaft +++ b/contrib/NuRaft @@ -1 +1 @@ -Subproject commit b56784be1aec568fb72aff47f281097c017623cb +Subproject commit f43d10dbc977a63f11dfb3afdd010fcf7ad89950 diff --git a/src/Coordination/Changelog.cpp b/src/Coordination/Changelog.cpp index 894fd93cfa7..c0dfbc2cbc3 100644 --- a/src/Coordination/Changelog.cpp +++ b/src/Coordination/Changelog.cpp @@ -13,6 +13,7 @@ #include #include #include +#include namespace DB @@ -479,7 +480,7 @@ public: continue; /// Create log entry for read data - auto log_entry = nuraft::cs_new(record.header.term, record.blob, record.header.value_type); + auto log_entry = nuraft::cs_new(record.header.term, record.blob, static_cast(record.header.value_type)); if (result.first_read_index == 0) result.first_read_index = record.header.index; diff --git a/src/Coordination/Changelog.h b/src/Coordination/Changelog.h index 56b0475ba8b..3c09370182d 100644 --- a/src/Coordination/Changelog.h +++ b/src/Coordination/Changelog.h @@ -39,7 +39,7 @@ struct ChangelogRecordHeader ChangelogVersion version = CURRENT_CHANGELOG_VERSION; uint64_t index = 0; /// entry log number uint64_t term = 0; - nuraft::log_val_type value_type{}; + int32_t value_type{}; uint64_t blob_size = 0; }; diff --git a/src/Coordination/KeeperServer.cpp b/src/Coordination/KeeperServer.cpp index 45db9e85fa5..6e47412cd3a 100644 --- a/src/Coordination/KeeperServer.cpp +++ b/src/Coordination/KeeperServer.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -607,12 +608,30 @@ nuraft::cb_func::ReturnCode KeeperServer::callbackFunc(nuraft::cb_func::Type typ } } + const auto follower_preappend = [&](const auto & entry) + { + if (entry->get_val_type() != nuraft::app_log) + return nuraft::cb_func::ReturnCode::Ok; + + try + { + state_machine->parseRequest(entry->get_buf(), /*final=*/false); + } + catch (...) + { + tryLogCurrentException(log, "Failed to parse request from log entry"); + throw; + } + return nuraft::cb_func::ReturnCode::Ok; + + }; + if (initialized_flag) { switch (type) { // This event is called before a single log is appended to the entry on the leader node - case nuraft::cb_func::PreAppendLog: + case nuraft::cb_func::PreAppendLogLeader: { // we are relying on the fact that request are being processed under a mutex // and not a RW lock @@ -665,7 +684,12 @@ nuraft::cb_func::ReturnCode KeeperServer::callbackFunc(nuraft::cb_func::Type typ if (request_for_session->digest->version != KeeperStorage::NO_DIGEST) writeIntBinary(request_for_session->digest->value, write_buf); - break; + return nuraft::cb_func::ReturnCode::Ok; + } + case nuraft::cb_func::PreAppendLogFollower: + { + const auto & entry = *static_cast(param->ctx); + return follower_preappend(entry); } case nuraft::cb_func::AppendLogFailed: { @@ -678,13 +702,11 @@ nuraft::cb_func::ReturnCode KeeperServer::callbackFunc(nuraft::cb_func::Type typ auto & entry_buf = entry->get_buf(); auto request_for_session = state_machine->parseRequest(entry_buf, true); state_machine->rollbackRequest(*request_for_session, true); - break; + return nuraft::cb_func::ReturnCode::Ok; } default: - break; + return nuraft::cb_func::ReturnCode::Ok; } - - return nuraft::cb_func::ReturnCode::Ok; } size_t last_commited = state_machine->last_commit_index(); @@ -737,6 +759,11 @@ nuraft::cb_func::ReturnCode KeeperServer::callbackFunc(nuraft::cb_func::Type typ initial_batch_committed = true; return nuraft::cb_func::ReturnCode::Ok; } + case nuraft::cb_func::PreAppendLogFollower: + { + const auto & entry = *static_cast(param->ctx); + return follower_preappend(entry); + } default: /// ignore other events return nuraft::cb_func::ReturnCode::Ok; } diff --git a/src/Coordination/KeeperStateMachine.cpp b/src/Coordination/KeeperStateMachine.cpp index 65abee44050..7d251ad48b9 100644 --- a/src/Coordination/KeeperStateMachine.cpp +++ b/src/Coordination/KeeperStateMachine.cpp @@ -272,9 +272,8 @@ bool KeeperStateMachine::preprocess(const KeeperStorage::RequestForSession & req } catch (...) { - tryLogCurrentException(__PRETTY_FUNCTION__); - rollbackRequestNoLock(request_for_session, true); - throw; + tryLogCurrentException(__PRETTY_FUNCTION__, "Failed to preprocess stored log, aborting to avoid inconsistent state"); + std::abort(); } if (keeper_context->digest_enabled && request_for_session.digest) From de70e322cf93d5f10a01bbc7d7aa8f4798755214 Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Fri, 9 Jun 2023 10:29:44 +0000 Subject: [PATCH 273/722] Fix by pull request comments --- src/Functions/DateTimeTransforms.h | 35 ++++++++--------- src/Functions/FunctionsConversion.h | 2 +- .../01556_accurate_cast_or_null.reference | 2 + .../01556_accurate_cast_or_null.sql | 2 + .../0_stateless/01601_accurate_cast.sql | 38 +++++++++---------- 5 files changed, 40 insertions(+), 39 deletions(-) diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index 09b0d71daf8..1d3ec1bd368 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -1436,7 +1436,7 @@ struct Transformer { template static void vector(const FromTypeVector & vec_from, ToTypeVector & vec_to, const DateLUTImpl & time_zone, const Transform & transform, - ColumnUInt8::Container * vec_null_map_to [[maybe_unused]]) + [[maybe_unused]] ColumnUInt8::Container * vec_null_map_to) { using ValueType = typename ToTypeVector::value_type; size_t size = vec_from.size(); @@ -1444,29 +1444,26 @@ struct Transformer for (size_t i = 0; i < size; ++i) { - if constexpr (std::is_same_v - || std::is_same_v) + if constexpr (std::is_same_v || std::is_same_v) { - bool check_range_result = true; - - if constexpr (std::is_same_v || std::is_same_v) + if constexpr (std::is_same_v + || std::is_same_v) { - check_range_result = vec_from[i] >= 0 && vec_from[i] <= 0xFFFFFFFFL; - } + bool is_valid_input = vec_from[i] >= 0 && vec_from[i] <= 0xFFFFFFFFL; - if (!check_range_result) - { - if constexpr (std::is_same_v) + if (!is_valid_input) { - vec_to[i] = 0; - if (vec_null_map_to) + if constexpr (std::is_same_v) + { + vec_to[i] = 0; (*vec_null_map_to)[i] = true; - continue; - } - else - { - throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Value in column {} cannot be safely converted into type {}", - TypeName, TypeName); + continue; + } + else + { + throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Value in column {} cannot be safely converted into type {}", + TypeName, TypeName); + } } } } diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index 3a8ddcc9094..ea8efada21d 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -2885,7 +2885,7 @@ private: if constexpr (IsDataTypeNumber && (std::is_same_v || std::is_same_v)) { - if (wrapper_cast_type == CastType::accurate) + if (wrapper_cast_type == CastType::accurate) { result_column = ConvertImpl::template execute( arguments, result_type, input_rows_count); diff --git a/tests/queries/0_stateless/01556_accurate_cast_or_null.reference b/tests/queries/0_stateless/01556_accurate_cast_or_null.reference index 21faa830636..3bff125068a 100644 --- a/tests/queries/0_stateless/01556_accurate_cast_or_null.reference +++ b/tests/queries/0_stateless/01556_accurate_cast_or_null.reference @@ -35,6 +35,8 @@ \N 2023-05-30 14:38:20 1970-01-01 00:00:19 +1970-01-01 19:26:40 +\N \N \N 2023-05-30 diff --git a/tests/queries/0_stateless/01556_accurate_cast_or_null.sql b/tests/queries/0_stateless/01556_accurate_cast_or_null.sql index 3f57358576e..3266198d930 100644 --- a/tests/queries/0_stateless/01556_accurate_cast_or_null.sql +++ b/tests/queries/0_stateless/01556_accurate_cast_or_null.sql @@ -41,8 +41,10 @@ SELECT accurateCastOrNull(5000000000, 'DateTime'); SELECT accurateCastOrNull('1xxx', 'DateTime'); select toString(accurateCastOrNull('2023-05-30 14:38:20', 'DateTime'), timezone()); SELECT toString(accurateCastOrNull(19, 'DateTime'), 'UTC'); +SELECT toString(accurateCastOrNull(70000, 'DateTime'), 'UTC'); SELECT accurateCastOrNull(-1, 'Date'); +SELECT accurateCastOrNull(5000000000, 'Date'); SELECT accurateCastOrNull('1xxx', 'Date'); SELECT accurateCastOrNull('2023-05-30', 'Date'); SELECT accurateCastOrNull(19, 'Date'); diff --git a/tests/queries/0_stateless/01601_accurate_cast.sql b/tests/queries/0_stateless/01601_accurate_cast.sql index 5555129f0ad..2108e42df05 100644 --- a/tests/queries/0_stateless/01601_accurate_cast.sql +++ b/tests/queries/0_stateless/01601_accurate_cast.sql @@ -1,36 +1,36 @@ -SELECT accurateCast(-1, 'UInt8'); -- { serverError 70 } +SELECT accurateCast(-1, 'UInt8'); -- { serverError CANNOT_CONVERT_TYPE } SELECT accurateCast(5, 'UInt8'); -SELECT accurateCast(257, 'UInt8'); -- { serverError 70 } -SELECT accurateCast(-1, 'UInt16'); -- { serverError 70 } +SELECT accurateCast(257, 'UInt8'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast(-1, 'UInt16'); -- { serverError CANNOT_CONVERT_TYPE } SELECT accurateCast(5, 'UInt16'); -SELECT accurateCast(65536, 'UInt16'); -- { serverError 70 } -SELECT accurateCast(-1, 'UInt32'); -- { serverError 70 } +SELECT accurateCast(65536, 'UInt16'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast(-1, 'UInt32'); -- { serverError CANNOT_CONVERT_TYPE } SELECT accurateCast(5, 'UInt32'); -SELECT accurateCast(4294967296, 'UInt32'); -- { serverError 70 } -SELECT accurateCast(-1, 'UInt64'); -- { serverError 70 } +SELECT accurateCast(4294967296, 'UInt32'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast(-1, 'UInt64'); -- { serverError CANNOT_CONVERT_TYPE } SELECT accurateCast(5, 'UInt64'); -SELECT accurateCast(-1, 'UInt256'); -- { serverError 70 } +SELECT accurateCast(-1, 'UInt256'); -- { serverError CANNOT_CONVERT_TYPE } SELECT accurateCast(5, 'UInt256'); -SELECT accurateCast(-129, 'Int8'); -- { serverError 70 } +SELECT accurateCast(-129, 'Int8'); -- { serverError CANNOT_CONVERT_TYPE } SELECT accurateCast(5, 'Int8'); -SELECT accurateCast(128, 'Int8'); -- { serverError 70 } +SELECT accurateCast(128, 'Int8'); -- { serverError CANNOT_CONVERT_TYPE } -SELECT accurateCast(10, 'Decimal32(9)'); -- { serverError 407 } +SELECT accurateCast(10, 'Decimal32(9)'); -- { serverError DECIMAL_OVERFLOW } SELECT accurateCast(1, 'Decimal32(9)'); -SELECT accurateCast(-10, 'Decimal32(9)'); -- { serverError 407 } +SELECT accurateCast(-10, 'Decimal32(9)'); -- { serverError DECIMAL_OVERFLOW } -SELECT accurateCast('123', 'FixedString(2)'); -- { serverError 131 } +SELECT accurateCast('123', 'FixedString(2)'); -- { serverError TOO_LARGE_STRING_SIZE } SELECT accurateCast('12', 'FixedString(2)'); -SELECT accurateCast(-1, 'DateTime'); -- { serverError 70 } -SELECT accurateCast(0xFFFFFFFF + 1, 'DateTime'); -- { serverError 70 } -SELECT accurateCast('1xxx', 'DateTime'); -- { serverError 41 } +SELECT accurateCast(-1, 'DateTime'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast(0xFFFFFFFF + 1, 'DateTime'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast('1xxx', 'DateTime'); -- { serverError CANNOT_PARSE_DATETIME } SELECT accurateCast('2023-05-30 14:38:20', 'DateTime'); SELECT toString(accurateCast(19, 'DateTime'), 'UTC'); -SELECT accurateCast(-1, 'Date'); -- { serverError 70 } -SELECT accurateCast(0xFFFFFFFF + 1, 'Date'); -- { serverError 70 } -SELECT accurateCast('1xxx', 'Date'); -- { serverError 38 } +SELECT accurateCast(-1, 'Date'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast(0xFFFFFFFF + 1, 'Date'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast('1xxx', 'Date'); -- { serverError CANNOT_PARSE_DATE } SELECT accurateCast('2023-05-30', 'Date'); SELECT accurateCast(19, 'Date'); From 7a02a70ad4239e920c6e23b9bc2bcc0a5c5db58b Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Fri, 9 Jun 2023 11:11:49 +0000 Subject: [PATCH 274/722] Add value to exceptions text --- src/Functions/DateTimeTransforms.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index 1d3ec1bd368..019e0c42cde 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -1461,8 +1461,8 @@ struct Transformer } else { - throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Value in column {} cannot be safely converted into type {}", - TypeName, TypeName); + throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Value {} cannot be safely converted into type {}", + vec_from[i], TypeName); } } } From f437d5d8b51b69347eecb79a0af878bfd2707d9d Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Fri, 9 Jun 2023 11:27:27 +0000 Subject: [PATCH 275/722] Fix toDateTimeOrZero description --- docs/en/sql-reference/functions/type-conversion-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index c634a3da27e..28db7e6e677 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -424,7 +424,7 @@ Result: ## toDateTimeOrZero -The same as [toDateTime](#todate) but returns lower boundery of [Date](/docs/en/sql-reference/data-types/date.md) if an invalid argument is received. Only [String](/docs/en/sql-reference/data-types/string.md) argument is supported. +The same as [toDateTime](#todatetime) but returns lower boundery of [DateTime](/docs/en/sql-reference/data-types/datetime.md) if an invalid argument is received. Only [String](/docs/en/sql-reference/data-types/string.md) argument is supported. **Example** From 2d220bd8411607521fb6a2767b568352a2cae127 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 9 Jun 2023 11:38:50 +0000 Subject: [PATCH 276/722] Fix NuRaft --- contrib/NuRaft | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/NuRaft b/contrib/NuRaft index f43d10dbc97..8f267da1a91 160000 --- a/contrib/NuRaft +++ b/contrib/NuRaft @@ -1 +1 @@ -Subproject commit f43d10dbc977a63f11dfb3afdd010fcf7ad89950 +Subproject commit 8f267da1a91310bd152af755b0178cfd38c646c7 From 32d781c058321b3c2ee0b21b17a2cbb87a9a3e23 Mon Sep 17 00:00:00 2001 From: Victor Krasnov Date: Fri, 9 Jun 2023 14:51:26 +0300 Subject: [PATCH 277/722] Align the documentation with the new feature --- docs/en/sql-reference/functions/type-conversion-functions.md | 4 +++- docs/ru/sql-reference/functions/type-conversion-functions.md | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index f6c99b168ac..dad3cfb4cc5 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -1343,6 +1343,7 @@ parseDateTimeBestEffort(time_string [, time_zone]) - A string with a date, but no time component: `YYYY`, `YYYYMM`, `YYYY*MM`, `DD/MM/YYYY`, `DD-MM-YY` etc. - A string with a day and time: `DD`, `DD hh`, `DD hh:mm`. In this case `YYYY-MM` are substituted as `2000-01`. - A string that includes the date and time along with time zone offset information: `YYYY-MM-DD hh:mm:ss ±h:mm`, etc. For example, `2020-12-12 17:36:00 -5:00`. +- A string that includes the date and time in a [syslog](https://datatracker.ietf.org/doc/html/rfc3164) datetime format. For example, `Jun 9 14:20:32`. For all of the formats with separator the function parses months names expressed by their full name or by the first three letters of a month name. Examples: `24/DEC/18`, `24-Dec-18`, `01-September-2018`. @@ -1428,10 +1429,11 @@ Result: **See Also** -- [RFC 1123](https://tools.ietf.org/html/rfc1123) +- [RFC 1123](https://datatracker.ietf.org/doc/html/rfc1123) - [toDate](#todate) - [toDateTime](#todatetime) - [ISO 8601 announcement by @xkcd](https://xkcd.com/1179/) +- [RFC 3164](https://datatracker.ietf.org/doc/html/rfc3164) ## parseDateTimeBestEffortUS diff --git a/docs/ru/sql-reference/functions/type-conversion-functions.md b/docs/ru/sql-reference/functions/type-conversion-functions.md index 298b7bbc93e..03e3adfbdca 100644 --- a/docs/ru/sql-reference/functions/type-conversion-functions.md +++ b/docs/ru/sql-reference/functions/type-conversion-functions.md @@ -1022,6 +1022,7 @@ parseDateTimeBestEffort(time_string[, time_zone]) - Строка с датой, но без времени: `YYYY`, `YYYYMM`, `YYYY*MM`, `DD/MM/YYYY`, `DD-MM-YY` и т.д. - Строка с временем, и с днём: `DD`, `DD hh`, `DD hh:mm`. В этом случае `YYYY-MM` принимается равным `2000-01`. - Строка, содержащая дату и время вместе с информацией о часовом поясе: `YYYY-MM-DD hh:mm:ss ±h:mm`, и т.д. Например, `2020-12-12 17:36:00 -5:00`. +- Строка, содержащая дату и время в формате [syslog](https://datatracker.ietf.org/doc/html/rfc3164). Например, `Jun 9 14:20:32`. Для всех форматов с разделителями функция распознаёт названия месяцев, выраженных в виде полного англоязычного имени месяца или в виде первых трёх символов имени месяца. Примеры: `24/DEC/18`, `24-Dec-18`, `01-September-2018`. @@ -1108,9 +1109,10 @@ SELECT parseDateTimeBestEffort('10 20:19'); **Смотрите также** - [Информация о формате ISO 8601 от @xkcd](https://xkcd.com/1179/) -- [RFC 1123](https://tools.ietf.org/html/rfc1123) +- [RFC 1123](https://datatracker.ietf.org/doc/html/rfc1123) - [toDate](#todate) - [toDateTime](#todatetime) +- [RFC 3164](https://datatracker.ietf.org/doc/html/rfc3164) ## parseDateTimeBestEffortUS {#parsedatetimebesteffortUS} From b740a08b6e508ccee08efc7e2ca83e8d7192e3ef Mon Sep 17 00:00:00 2001 From: Victor Krasnov Date: Fri, 9 Jun 2023 15:05:23 +0300 Subject: [PATCH 278/722] Fix the docs --- docs/en/sql-reference/functions/type-conversion-functions.md | 2 +- docs/ru/sql-reference/functions/type-conversion-functions.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index dad3cfb4cc5..8e186844c93 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -1343,7 +1343,7 @@ parseDateTimeBestEffort(time_string [, time_zone]) - A string with a date, but no time component: `YYYY`, `YYYYMM`, `YYYY*MM`, `DD/MM/YYYY`, `DD-MM-YY` etc. - A string with a day and time: `DD`, `DD hh`, `DD hh:mm`. In this case `YYYY-MM` are substituted as `2000-01`. - A string that includes the date and time along with time zone offset information: `YYYY-MM-DD hh:mm:ss ±h:mm`, etc. For example, `2020-12-12 17:36:00 -5:00`. -- A string that includes the date and time in a [syslog](https://datatracker.ietf.org/doc/html/rfc3164) datetime format. For example, `Jun 9 14:20:32`. +- A string that includes the date and time in the [syslog timestamp](https://datatracker.ietf.org/doc/html/rfc3164) format. For example, `Jun 9 14:20:32`. For all of the formats with separator the function parses months names expressed by their full name or by the first three letters of a month name. Examples: `24/DEC/18`, `24-Dec-18`, `01-September-2018`. diff --git a/docs/ru/sql-reference/functions/type-conversion-functions.md b/docs/ru/sql-reference/functions/type-conversion-functions.md index 03e3adfbdca..93ca6b410c8 100644 --- a/docs/ru/sql-reference/functions/type-conversion-functions.md +++ b/docs/ru/sql-reference/functions/type-conversion-functions.md @@ -1022,7 +1022,7 @@ parseDateTimeBestEffort(time_string[, time_zone]) - Строка с датой, но без времени: `YYYY`, `YYYYMM`, `YYYY*MM`, `DD/MM/YYYY`, `DD-MM-YY` и т.д. - Строка с временем, и с днём: `DD`, `DD hh`, `DD hh:mm`. В этом случае `YYYY-MM` принимается равным `2000-01`. - Строка, содержащая дату и время вместе с информацией о часовом поясе: `YYYY-MM-DD hh:mm:ss ±h:mm`, и т.д. Например, `2020-12-12 17:36:00 -5:00`. -- Строка, содержащая дату и время в формате [syslog](https://datatracker.ietf.org/doc/html/rfc3164). Например, `Jun 9 14:20:32`. +- Строка, содержащая дату и время в формате [syslog timestamp](https://datatracker.ietf.org/doc/html/rfc3164). Например, `Jun 9 14:20:32`. Для всех форматов с разделителями функция распознаёт названия месяцев, выраженных в виде полного англоязычного имени месяца или в виде первых трёх символов имени месяца. Примеры: `24/DEC/18`, `24-Dec-18`, `01-September-2018`. From d0c2c1dbad9da978246fe6c9105a62a972041cd8 Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Fri, 9 Jun 2023 12:06:43 +0000 Subject: [PATCH 279/722] Add test and reorder if's --- src/Functions/FunctionsConversion.h | 49 ++++++++++--------- .../01556_accurate_cast_or_null.reference | 1 + .../01556_accurate_cast_or_null.sql | 1 + 3 files changed, 27 insertions(+), 24 deletions(-) diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index ea8efada21d..e44a3bdaa2e 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -2866,36 +2866,37 @@ private: using LeftDataType = typename Types::LeftType; using RightDataType = typename Types::RightType; - if constexpr (IsDataTypeNumber && IsDataTypeNumber) + if constexpr (IsDataTypeNumber) { - if (wrapper_cast_type == CastType::accurate) + if constexpr (IsDataTypeNumber) { - result_column = ConvertImpl::execute( - arguments, result_type, input_rows_count, AccurateConvertStrategyAdditions()); - } - else - { - result_column = ConvertImpl::execute( - arguments, result_type, input_rows_count, AccurateOrNullConvertStrategyAdditions()); + if (wrapper_cast_type == CastType::accurate) + { + result_column = ConvertImpl::execute( + arguments, result_type, input_rows_count, AccurateConvertStrategyAdditions()); + } + else + { + result_column = ConvertImpl::execute( + arguments, result_type, input_rows_count, AccurateOrNullConvertStrategyAdditions()); + } + return true; } - return true; - } - - if constexpr (IsDataTypeNumber - && (std::is_same_v || std::is_same_v)) - { - if (wrapper_cast_type == CastType::accurate) + if constexpr (std::is_same_v || std::is_same_v) { - result_column = ConvertImpl::template execute( - arguments, result_type, input_rows_count); + if (wrapper_cast_type == CastType::accurate) + { + result_column = ConvertImpl::template execute( + arguments, result_type, input_rows_count); + } + else + { + result_column = ConvertImpl::template execute( + arguments, result_type, input_rows_count); + } + return true; } - else - { - result_column = ConvertImpl::template execute( - arguments, result_type, input_rows_count); - } - return true; } return false; diff --git a/tests/queries/0_stateless/01556_accurate_cast_or_null.reference b/tests/queries/0_stateless/01556_accurate_cast_or_null.reference index 3bff125068a..31a9c37421e 100644 --- a/tests/queries/0_stateless/01556_accurate_cast_or_null.reference +++ b/tests/queries/0_stateless/01556_accurate_cast_or_null.reference @@ -40,4 +40,5 @@ \N \N 2023-05-30 +2149-06-06 1970-01-20 diff --git a/tests/queries/0_stateless/01556_accurate_cast_or_null.sql b/tests/queries/0_stateless/01556_accurate_cast_or_null.sql index 3266198d930..f00f6ef837f 100644 --- a/tests/queries/0_stateless/01556_accurate_cast_or_null.sql +++ b/tests/queries/0_stateless/01556_accurate_cast_or_null.sql @@ -47,4 +47,5 @@ SELECT accurateCastOrNull(-1, 'Date'); SELECT accurateCastOrNull(5000000000, 'Date'); SELECT accurateCastOrNull('1xxx', 'Date'); SELECT accurateCastOrNull('2023-05-30', 'Date'); +SELECT accurateCastOrNull('2180-01-01', 'Date'); SELECT accurateCastOrNull(19, 'Date'); From a21bd4ec62825172d859424be0687e127d36b4c0 Mon Sep 17 00:00:00 2001 From: Victor Krasnov Date: Fri, 9 Jun 2023 15:33:51 +0300 Subject: [PATCH 280/722] Elucidate the syslog case in the documentation --- docs/en/sql-reference/functions/type-conversion-functions.md | 3 ++- docs/ru/sql-reference/functions/type-conversion-functions.md | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 8e186844c93..e62cf89a6b2 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -1341,11 +1341,12 @@ parseDateTimeBestEffort(time_string [, time_zone]) - A string containing 9..10 digit [unix timestamp](https://en.wikipedia.org/wiki/Unix_time). - A string with a date and a time component: `YYYYMMDDhhmmss`, `DD/MM/YYYY hh:mm:ss`, `DD-MM-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, etc. - A string with a date, but no time component: `YYYY`, `YYYYMM`, `YYYY*MM`, `DD/MM/YYYY`, `DD-MM-YY` etc. -- A string with a day and time: `DD`, `DD hh`, `DD hh:mm`. In this case `YYYY-MM` are substituted as `2000-01`. +- A string with a day and time: `DD`, `DD hh`, `DD hh:mm`. In this case `MM` is substituted by `01`. - A string that includes the date and time along with time zone offset information: `YYYY-MM-DD hh:mm:ss ±h:mm`, etc. For example, `2020-12-12 17:36:00 -5:00`. - A string that includes the date and time in the [syslog timestamp](https://datatracker.ietf.org/doc/html/rfc3164) format. For example, `Jun 9 14:20:32`. For all of the formats with separator the function parses months names expressed by their full name or by the first three letters of a month name. Examples: `24/DEC/18`, `24-Dec-18`, `01-September-2018`. +If the year is not specified, it is considered to be equal to the current year. If the resulting date and time happens to be ahead of the current moment even by a second, the current year is substituted by the previous one. **Returned value** diff --git a/docs/ru/sql-reference/functions/type-conversion-functions.md b/docs/ru/sql-reference/functions/type-conversion-functions.md index 93ca6b410c8..6de55757b64 100644 --- a/docs/ru/sql-reference/functions/type-conversion-functions.md +++ b/docs/ru/sql-reference/functions/type-conversion-functions.md @@ -1020,11 +1020,12 @@ parseDateTimeBestEffort(time_string[, time_zone]) - [Unix timestamp](https://ru.wikipedia.org/wiki/Unix-время) в строковом представлении. 9 или 10 символов. - Строка с датой и временем: `YYYYMMDDhhmmss`, `DD/MM/YYYY hh:mm:ss`, `DD-MM-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, etc. - Строка с датой, но без времени: `YYYY`, `YYYYMM`, `YYYY*MM`, `DD/MM/YYYY`, `DD-MM-YY` и т.д. -- Строка с временем, и с днём: `DD`, `DD hh`, `DD hh:mm`. В этом случае `YYYY-MM` принимается равным `2000-01`. +- Строка с временем, и с днём: `DD`, `DD hh`, `DD hh:mm`. В этом случае `MM` принимается равным `01`. - Строка, содержащая дату и время вместе с информацией о часовом поясе: `YYYY-MM-DD hh:mm:ss ±h:mm`, и т.д. Например, `2020-12-12 17:36:00 -5:00`. - Строка, содержащая дату и время в формате [syslog timestamp](https://datatracker.ietf.org/doc/html/rfc3164). Например, `Jun 9 14:20:32`. Для всех форматов с разделителями функция распознаёт названия месяцев, выраженных в виде полного англоязычного имени месяца или в виде первых трёх символов имени месяца. Примеры: `24/DEC/18`, `24-Dec-18`, `01-September-2018`. +Если год не указан, вместо него подставляется текущий год. Если в результате получается будущее время, хотя бы на одну секунду впереди, текущий год заменяется на прошлый. **Возвращаемое значение** From 056ca4f555fbbf4463de5be8642a2c01b6759192 Mon Sep 17 00:00:00 2001 From: jinjunzh Date: Wed, 24 May 2023 13:26:15 -0400 Subject: [PATCH 281/722] Add extensive testing cases for deflate qpl codec --- .../sql-reference/statements/create/table.md | 2 +- src/Client/Connection.cpp | 2 +- src/Compression/CompressionCodecDeflateQpl.h | 3 +- src/Compression/CompressionFactory.h | 4 +- .../CompressionFactoryAdditions.cpp | 14 ++-- src/Compression/ICompressionCodec.h | 3 + src/Core/Settings.h | 1 + src/Interpreters/InterpreterCreateQuery.cpp | 3 +- src/Server/TCPHandler.cpp | 2 +- src/Storages/AlterCommands.cpp | 8 +-- src/Storages/ColumnsDescription.cpp | 2 +- src/Storages/Distributed/DistributedSink.cpp | 2 +- src/Storages/TTLDescription.cpp | 2 +- .../deflateqpl_compression_by_default.xml | 11 ++++ .../configs/enable_deflateqpl_codec.xml | 7 ++ .../test_non_default_compression/test.py | 65 ++++++++++++++++++- ...04_test_alter_compression_codecs.reference | 31 ++++++--- .../00804_test_alter_compression_codecs.sql | 28 +++++--- 18 files changed, 153 insertions(+), 37 deletions(-) create mode 100644 tests/integration/test_non_default_compression/configs/deflateqpl_compression_by_default.xml create mode 100644 tests/integration/test_non_default_compression/configs/enable_deflateqpl_codec.xml diff --git a/docs/en/sql-reference/statements/create/table.md b/docs/en/sql-reference/statements/create/table.md index de44a001472..b0865ad2896 100644 --- a/docs/en/sql-reference/statements/create/table.md +++ b/docs/en/sql-reference/statements/create/table.md @@ -380,7 +380,7 @@ High compression levels are useful for asymmetric scenarios, like compress once, `DEFLATE_QPL` — [Deflate compression algorithm](https://github.com/intel/qpl) implemented by Intel® Query Processing Library. Some limitations apply: -- DEFLATE_QPL is experimental and can only be used after setting configuration parameter `allow_experimental_codecs=1`. +- DEFLATE_QPL is disabled by default and can only be used after setting configuration parameter `enable_qpl_deflate=1`. - DEFLATE_QPL requires a ClickHouse build compiled with SSE 4.2 instructions (by default, this is the case). Refer to [Build Clickhouse with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Build-Clickhouse-with-DEFLATE_QPL) for more details. - DEFLATE_QPL works best if the system has a Intel® IAA (In-Memory Analytics Accelerator) offloading device. Refer to [Accelerator Configuration](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#accelerator-configuration) and [Benchmark with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Run-Benchmark-with-DEFLATE_QPL) for more details. - DEFLATE_QPL-compressed data can only be transferred between ClickHouse nodes compiled with SSE 4.2 enabled. diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index 2350a5039ab..68bc3b39a56 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -588,7 +588,7 @@ void Connection::sendQuery( if (method == "ZSTD") level = settings->network_zstd_compression_level; - CompressionCodecFactory::instance().validateCodec(method, level, !settings->allow_suspicious_codecs, settings->allow_experimental_codecs); + CompressionCodecFactory::instance().validateCodec(method, level, !settings->allow_suspicious_codecs, settings->allow_experimental_codecs, settings->enable_qpl_deflate); compression_codec = CompressionCodecFactory::instance().get(method, level); } else diff --git a/src/Compression/CompressionCodecDeflateQpl.h b/src/Compression/CompressionCodecDeflateQpl.h index 7a1a764295d..13aa8733b54 100644 --- a/src/Compression/CompressionCodecDeflateQpl.h +++ b/src/Compression/CompressionCodecDeflateQpl.h @@ -98,7 +98,8 @@ public: protected: bool isCompression() const override { return true; } bool isGenericCompression() const override { return true; } - bool isExperimental() const override { return true; } + bool isExperimental() const override { return false; } + bool isDeflateQplCompression() const override { return true; } UInt32 doCompressData(const char * source, UInt32 source_size, char * dest) const override; void doDecompressData(const char * source, UInt32 source_size, char * dest, UInt32 uncompressed_size) const override; diff --git a/src/Compression/CompressionFactory.h b/src/Compression/CompressionFactory.h index a4451f9ed2e..1fdaf4f1c71 100644 --- a/src/Compression/CompressionFactory.h +++ b/src/Compression/CompressionFactory.h @@ -40,10 +40,10 @@ public: CompressionCodecPtr getDefaultCodec() const; /// Validate codecs AST specified by user and parses codecs description (substitute default parameters) - ASTPtr validateCodecAndGetPreprocessedAST(const ASTPtr & ast, const DataTypePtr & column_type, bool sanity_check, bool allow_experimental_codecs) const; + ASTPtr validateCodecAndGetPreprocessedAST(const ASTPtr & ast, const DataTypePtr & column_type, bool sanity_check, bool allow_experimental_codecs, bool enable_qpl_deflate) const; /// Validate codecs AST specified by user - void validateCodec(const String & family_name, std::optional level, bool sanity_check, bool allow_experimental_codecs) const; + void validateCodec(const String & family_name, std::optional level, bool sanity_check, bool allow_experimental_codecs, bool enable_qpl_deflate) const; /// Get codec by AST and possible column_type. Some codecs can use /// information about type to improve inner settings, but every codec should diff --git a/src/Compression/CompressionFactoryAdditions.cpp b/src/Compression/CompressionFactoryAdditions.cpp index 978a0fe5069..2630326238a 100644 --- a/src/Compression/CompressionFactoryAdditions.cpp +++ b/src/Compression/CompressionFactoryAdditions.cpp @@ -34,7 +34,7 @@ namespace ErrorCodes void CompressionCodecFactory::validateCodec( - const String & family_name, std::optional level, bool sanity_check, bool allow_experimental_codecs) const + const String & family_name, std::optional level, bool sanity_check, bool allow_experimental_codecs, bool enable_qpl_deflate) const { if (family_name.empty()) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Compression codec name cannot be empty"); @@ -43,13 +43,13 @@ void CompressionCodecFactory::validateCodec( { auto literal = std::make_shared(static_cast(*level)); validateCodecAndGetPreprocessedAST(makeASTFunction("CODEC", makeASTFunction(Poco::toUpper(family_name), literal)), - {}, sanity_check, allow_experimental_codecs); + {}, sanity_check, allow_experimental_codecs, enable_qpl_deflate); } else { auto identifier = std::make_shared(Poco::toUpper(family_name)); validateCodecAndGetPreprocessedAST(makeASTFunction("CODEC", identifier), - {}, sanity_check, allow_experimental_codecs); + {}, sanity_check, allow_experimental_codecs, enable_qpl_deflate); } } @@ -77,7 +77,7 @@ bool innerDataTypeIsFloat(const DataTypePtr & type) } ASTPtr CompressionCodecFactory::validateCodecAndGetPreprocessedAST( - const ASTPtr & ast, const DataTypePtr & column_type, bool sanity_check, bool allow_experimental_codecs) const + const ASTPtr & ast, const DataTypePtr & column_type, bool sanity_check, bool allow_experimental_codecs, bool enable_qpl_deflate) const { if (const auto * func = ast->as()) { @@ -159,6 +159,12 @@ ASTPtr CompressionCodecFactory::validateCodecAndGetPreprocessedAST( " You can enable it with the 'allow_experimental_codecs' setting.", codec_family_name); + if (!enable_qpl_deflate && result_codec->isDeflateQplCompression()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Codec {} is disabled by default." + " You can enable it with the 'enable_qpl_deflate' setting.", + codec_family_name); + codecs_descriptions->children.emplace_back(result_codec->getCodecDesc()); } diff --git a/src/Compression/ICompressionCodec.h b/src/Compression/ICompressionCodec.h index 44835ac19cb..d92ad3fc718 100644 --- a/src/Compression/ICompressionCodec.h +++ b/src/Compression/ICompressionCodec.h @@ -112,6 +112,9 @@ public: /// If it does nothing. virtual bool isNone() const { return false; } + /// This is a knob for Deflate QPL codec. + virtual bool isDeflateQplCompression() const { return false; } + protected: /// This is used for fuzz testing friend int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size); diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 464b9168a4c..c6a2069e6ae 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -319,6 +319,7 @@ class IColumn; M(Bool, allow_distributed_ddl, true, "If it is set to true, then a user is allowed to executed distributed DDL queries.", 0) \ M(Bool, allow_suspicious_codecs, false, "If it is set to true, allow to specify meaningless compression codecs.", 0) \ M(Bool, allow_experimental_codecs, false, "If it is set to true, allow to specify experimental compression codecs (but we don't have those yet and this option does nothing).", 0) \ + M(Bool, enable_qpl_deflate, false, "If it is set to true, allow to use deflate_qpl for compression.", 0) \ M(UInt64, query_profiler_real_time_period_ns, QUERY_PROFILER_DEFAULT_SAMPLE_RATE_NS, "Period for real clock timer of query profiler (in nanoseconds). Set 0 value to turn off the real clock query profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \ M(UInt64, query_profiler_cpu_time_period_ns, QUERY_PROFILER_DEFAULT_SAMPLE_RATE_NS, "Period for CPU clock timer of query profiler (in nanoseconds). Set 0 value to turn off the CPU clock query profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \ M(Bool, metrics_perf_events_enabled, false, "If enabled, some of the perf events will be measured throughout queries' execution.", 0) \ diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index ab9e1fb04d6..5c22b46b360 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -571,6 +571,7 @@ ColumnsDescription InterpreterCreateQuery::getColumnsDescription( bool sanity_check_compression_codecs = !attach && !context_->getSettingsRef().allow_suspicious_codecs; bool allow_experimental_codecs = attach || context_->getSettingsRef().allow_experimental_codecs; + bool enable_qpl_deflate = attach || context_->getSettingsRef().enable_qpl_deflate; ColumnsDescription res; auto name_type_it = column_names_and_types.begin(); @@ -631,7 +632,7 @@ ColumnsDescription InterpreterCreateQuery::getColumnsDescription( if (col_decl.default_specifier == "ALIAS") throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot specify codec for column type ALIAS"); column.codec = CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST( - col_decl.codec, column.type, sanity_check_compression_codecs, allow_experimental_codecs); + col_decl.codec, column.type, sanity_check_compression_codecs, allow_experimental_codecs, enable_qpl_deflate); } if (col_decl.ttl) diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 1ded7d97248..96c585e7d16 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -1775,7 +1775,7 @@ void TCPHandler::initBlockOutput(const Block & block) if (state.compression == Protocol::Compression::Enable) { - CompressionCodecFactory::instance().validateCodec(method, level, !query_settings.allow_suspicious_codecs, query_settings.allow_experimental_codecs); + CompressionCodecFactory::instance().validateCodec(method, level, !query_settings.allow_suspicious_codecs, query_settings.allow_experimental_codecs, query_settings.enable_qpl_deflate); state.maybe_compressed_out = std::make_shared( *out, CompressionCodecFactory::instance().get(method, level)); diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index 5fd823b9e01..ecbddfc3e2a 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -388,7 +388,7 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata, ContextPtr context) column.comment = *comment; if (codec) - column.codec = CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST(codec, data_type, false, true); + column.codec = CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST(codec, data_type, false, true, true); column.ttl = ttl; @@ -429,7 +429,7 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata, ContextPtr context) else { if (codec) - column.codec = CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST(codec, data_type ? data_type : column.type, false, true); + column.codec = CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST(codec, data_type ? data_type : column.type, false, true, true); if (comment) column.comment = *comment; @@ -1067,7 +1067,7 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const "this column name is reserved for lightweight delete feature", backQuote(column_name)); if (command.codec) - CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST(command.codec, command.data_type, !context->getSettingsRef().allow_suspicious_codecs, context->getSettingsRef().allow_experimental_codecs); + CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST(command.codec, command.data_type, !context->getSettingsRef().allow_suspicious_codecs, context->getSettingsRef().allow_experimental_codecs, context->getSettingsRef().enable_qpl_deflate); all_columns.add(ColumnDescription(column_name, command.data_type)); } @@ -1093,7 +1093,7 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const { if (all_columns.hasAlias(column_name)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot specify codec for column type ALIAS"); - CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST(command.codec, command.data_type, !context->getSettingsRef().allow_suspicious_codecs, context->getSettingsRef().allow_experimental_codecs); + CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST(command.codec, command.data_type, !context->getSettingsRef().allow_suspicious_codecs, context->getSettingsRef().allow_experimental_codecs, context->getSettingsRef().enable_qpl_deflate); } auto column_default = all_columns.getDefault(column_name); if (column_default) diff --git a/src/Storages/ColumnsDescription.cpp b/src/Storages/ColumnsDescription.cpp index 8eabae7929c..045afd7e6e6 100644 --- a/src/Storages/ColumnsDescription.cpp +++ b/src/Storages/ColumnsDescription.cpp @@ -130,7 +130,7 @@ void ColumnDescription::readText(ReadBuffer & buf) comment = col_ast->comment->as().value.get(); if (col_ast->codec) - codec = CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST(col_ast->codec, type, false, true); + codec = CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST(col_ast->codec, type, false, true, true); if (col_ast->ttl) ttl = col_ast->ttl; diff --git a/src/Storages/Distributed/DistributedSink.cpp b/src/Storages/Distributed/DistributedSink.cpp index 720a951299a..ce1dbde8eae 100644 --- a/src/Storages/Distributed/DistributedSink.cpp +++ b/src/Storages/Distributed/DistributedSink.cpp @@ -733,7 +733,7 @@ void DistributedSink::writeToShard(const Cluster::ShardInfo & shard_info, const if (compression_method == "ZSTD") compression_level = settings.network_zstd_compression_level; - CompressionCodecFactory::instance().validateCodec(compression_method, compression_level, !settings.allow_suspicious_codecs, settings.allow_experimental_codecs); + CompressionCodecFactory::instance().validateCodec(compression_method, compression_level, !settings.allow_suspicious_codecs, settings.allow_experimental_codecs, settings.enable_qpl_deflate); CompressionCodecPtr compression_codec = CompressionCodecFactory::instance().get(compression_method, compression_level); /// tmp directory is used to ensure atomicity of transactions diff --git a/src/Storages/TTLDescription.cpp b/src/Storages/TTLDescription.cpp index e1a80800630..f5209cbdff6 100644 --- a/src/Storages/TTLDescription.cpp +++ b/src/Storages/TTLDescription.cpp @@ -285,7 +285,7 @@ TTLDescription TTLDescription::getTTLFromAST( { result.recompression_codec = CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST( - ttl_element->recompression_codec, {}, !context->getSettingsRef().allow_suspicious_codecs, context->getSettingsRef().allow_experimental_codecs); + ttl_element->recompression_codec, {}, !context->getSettingsRef().allow_suspicious_codecs, context->getSettingsRef().allow_experimental_codecs, context->getSettingsRef().enable_qpl_deflate); } } diff --git a/tests/integration/test_non_default_compression/configs/deflateqpl_compression_by_default.xml b/tests/integration/test_non_default_compression/configs/deflateqpl_compression_by_default.xml new file mode 100644 index 00000000000..2ad6a0f1eff --- /dev/null +++ b/tests/integration/test_non_default_compression/configs/deflateqpl_compression_by_default.xml @@ -0,0 +1,11 @@ + + + + + 0 + 0 + + deflate_qpl + + + diff --git a/tests/integration/test_non_default_compression/configs/enable_deflateqpl_codec.xml b/tests/integration/test_non_default_compression/configs/enable_deflateqpl_codec.xml new file mode 100644 index 00000000000..46e9e43ca27 --- /dev/null +++ b/tests/integration/test_non_default_compression/configs/enable_deflateqpl_codec.xml @@ -0,0 +1,7 @@ + + + + 1 + + + diff --git a/tests/integration/test_non_default_compression/test.py b/tests/integration/test_non_default_compression/test.py index e0a67a5db95..e69b32daae0 100644 --- a/tests/integration/test_non_default_compression/test.py +++ b/tests/integration/test_non_default_compression/test.py @@ -41,7 +41,14 @@ node6 = cluster.add_instance( main_configs=["configs/allow_experimental_codecs.xml"], user_configs=["configs/allow_suspicious_codecs.xml"], ) - +node7 = cluster.add_instance( + "node7", + main_configs=["configs/deflateqpl_compression_by_default.xml"], + user_configs=[ + "configs/enable_deflateqpl_codec.xml", + "configs/allow_suspicious_codecs.xml", + ], +) @pytest.fixture(scope="module") def start_cluster(): @@ -244,3 +251,59 @@ def test_uncompressed_cache_plus_zstd_codec(start_cluster): ) == "10000\n" ) + +def test_preconfigured_deflateqpl_codec(start_cluster): + node7.query( + """ + CREATE TABLE compression_codec_multiple_with_key ( + somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12), DEFLATE_QPL), + id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC, DEFLATE_QPL), + data String CODEC(ZSTD(2), LZ4HC, NONE, LZ4, LZ4, DEFLATE_QPL), + somecolumn Float64 + ) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS index_granularity = 2; + """ + ) + node7.query( + "INSERT INTO compression_codec_multiple_with_key VALUES(toDate('2018-10-12'), 100000, 'hello', 88.88), (toDate('2018-10-12'), 100002, 'world', 99.99), (toDate('2018-10-12'), 1111, '!', 777.777)" + ) + assert ( + node7.query( + "SELECT COUNT(*) FROM compression_codec_multiple_with_key WHERE id % 2 == 0" + ) + == "2\n" + ) + assert ( + node7.query( + "SELECT DISTINCT somecolumn FROM compression_codec_multiple_with_key ORDER BY id" + ) + == "777.777\n88.88\n99.99\n" + ) + assert ( + node7.query( + "SELECT data FROM compression_codec_multiple_with_key WHERE id >= 1112 AND somedate = toDate('2018-10-12') AND somecolumn <= 100" + ) + == "hello\nworld\n" + ) + + node7.query( + "INSERT INTO compression_codec_multiple_with_key SELECT toDate('2018-10-12'), number, toString(number), 1.0 FROM system.numbers LIMIT 10000" + ) + + assert ( + node7.query( + "SELECT COUNT(id) FROM compression_codec_multiple_with_key WHERE id % 10 == 0" + ) + == "1001\n" + ) + assert ( + node7.query( + "SELECT SUM(somecolumn) FROM compression_codec_multiple_with_key" + ) + == str(777.777 + 88.88 + 99.99 + 1.0 * 10000) + "\n" + ) + assert ( + node7.query( + "SELECT count(*) FROM compression_codec_multiple_with_key GROUP BY somedate" + ) + == "10003\n" + ) diff --git a/tests/queries/0_stateless/00804_test_alter_compression_codecs.reference b/tests/queries/0_stateless/00804_test_alter_compression_codecs.reference index cfbfadf1e67..a6afe11126c 100644 --- a/tests/queries/0_stateless/00804_test_alter_compression_codecs.reference +++ b/tests/queries/0_stateless/00804_test_alter_compression_codecs.reference @@ -12,13 +12,7 @@ CODEC(NONE) 2018-01-01 4 4 2018-01-01 5 5 2018-01-01 6 6 -2018-01-01 1 default_value -2018-01-01 2 default_value -2018-01-01 3 3 -2018-01-01 4 4 -2018-01-01 5 5 -2018-01-01 6 6 -CODEC(ZSTD(1), LZ4HC(0), LZ4, LZ4, NONE) +CODEC(DEFLATE_QPL) 2018-01-01 1 default_value 2018-01-01 2 default_value 2018-01-01 3 3 @@ -27,7 +21,26 @@ CODEC(ZSTD(1), LZ4HC(0), LZ4, LZ4, NONE) 2018-01-01 6 6 2018-01-01 7 7 2018-01-01 8 8 -CODEC(ZSTD(1), LZ4HC(0), LZ4, LZ4, NONE) -CODEC(NONE, LZ4, LZ4HC(0), ZSTD(1)) +2018-01-01 1 default_value +2018-01-01 2 default_value +2018-01-01 3 3 +2018-01-01 4 4 +2018-01-01 5 5 +2018-01-01 6 6 +2018-01-01 7 7 +2018-01-01 8 8 +CODEC(ZSTD(1), LZ4HC(0), LZ4, LZ4, DEFLATE_QPL, NONE) +2018-01-01 1 default_value +2018-01-01 2 default_value +2018-01-01 3 3 +2018-01-01 4 4 +2018-01-01 5 5 +2018-01-01 6 6 +2018-01-01 7 7 +2018-01-01 8 8 +2018-01-01 9 9 +2018-01-01 10 10 +CODEC(ZSTD(1), LZ4HC(0), LZ4, LZ4, DEFLATE_QPL, NONE) +CODEC(NONE, LZ4, LZ4HC(0), ZSTD(1), DEFLATE_QPL) 2 1 diff --git a/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql b/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql index 85e5f8b63ad..40a8bb4c7cb 100644 --- a/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql +++ b/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql @@ -25,15 +25,23 @@ INSERT INTO alter_compression_codec VALUES('2018-01-01', 5, '5'); INSERT INTO alter_compression_codec VALUES('2018-01-01', 6, '6'); SELECT * FROM alter_compression_codec ORDER BY id; -OPTIMIZE TABLE alter_compression_codec FINAL; -SELECT * FROM alter_compression_codec ORDER BY id; - -SET allow_suspicious_codecs = 1; -ALTER TABLE alter_compression_codec MODIFY COLUMN alter_column CODEC(ZSTD, LZ4HC, LZ4, LZ4, NONE); +SET enable_qpl_deflate = 1; +ALTER TABLE alter_compression_codec MODIFY COLUMN alter_column CODEC(DEFLATE_QPL); SELECT compression_codec FROM system.columns WHERE database = currentDatabase() AND table = 'alter_compression_codec' AND name = 'alter_column'; INSERT INTO alter_compression_codec VALUES('2018-01-01', 7, '7'); INSERT INTO alter_compression_codec VALUES('2018-01-01', 8, '8'); +SELECT * FROM alter_compression_codec ORDER BY id; + +OPTIMIZE TABLE alter_compression_codec FINAL; +SELECT * FROM alter_compression_codec ORDER BY id; + +SET allow_suspicious_codecs = 1; +ALTER TABLE alter_compression_codec MODIFY COLUMN alter_column CODEC(ZSTD, LZ4HC, LZ4, LZ4, DEFLATE_QPL, NONE); +SELECT compression_codec FROM system.columns WHERE database = currentDatabase() AND table = 'alter_compression_codec' AND name = 'alter_column'; + +INSERT INTO alter_compression_codec VALUES('2018-01-01', 9, '9'); +INSERT INTO alter_compression_codec VALUES('2018-01-01', 10, '10'); OPTIMIZE TABLE alter_compression_codec FINAL; SELECT * FROM alter_compression_codec ORDER BY id; @@ -54,15 +62,17 @@ ALTER TABLE alter_bad_codec ADD COLUMN alter_column DateTime DEFAULT '2019-01-01 ALTER TABLE alter_bad_codec ADD COLUMN alter_column DateTime DEFAULT '2019-01-01 00:00:00' CODEC(ZSTD(100)); -- { serverError 433 } +ALTER TABLE alter_bad_codec ADD COLUMN alter_column DateTime DEFAULT '2019-01-01 00:00:00' CODEC(DEFLATE_QPL(100)); -- { serverError 378 } + DROP TABLE IF EXISTS alter_bad_codec; DROP TABLE IF EXISTS large_alter_table_00804; DROP TABLE IF EXISTS store_of_hash_00804; CREATE TABLE large_alter_table_00804 ( - somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12)), - id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC), - data String CODEC(ZSTD(2), LZ4HC, NONE, LZ4, LZ4) + somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12), DEFLATE_QPL), + id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC, DEFLATE_QPL), + data String CODEC(ZSTD(2), LZ4HC, NONE, LZ4, LZ4, DEFLATE_QPL) ) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi', min_bytes_for_wide_part = 0; INSERT INTO large_alter_table_00804 SELECT toDate('2019-01-01'), number, toString(number + rand()) FROM system.numbers LIMIT 300000; @@ -71,7 +81,7 @@ CREATE TABLE store_of_hash_00804 (hash UInt64) ENGINE = Memory(); INSERT INTO store_of_hash_00804 SELECT sum(cityHash64(*)) FROM large_alter_table_00804; -ALTER TABLE large_alter_table_00804 MODIFY COLUMN data CODEC(NONE, LZ4, LZ4HC, ZSTD); +ALTER TABLE large_alter_table_00804 MODIFY COLUMN data CODEC(NONE, LZ4, LZ4HC, ZSTD, DEFLATE_QPL); OPTIMIZE TABLE large_alter_table_00804; From d85bc02388317ed4b2743814bcc217baf1652971 Mon Sep 17 00:00:00 2001 From: jinjunzh Date: Wed, 24 May 2023 15:08:23 -0400 Subject: [PATCH 282/722] add function test for deflate_qpl --- ...4_test_custom_compression_codecs.reference | 8 ++-- .../00804_test_custom_compression_codecs.sql | 45 +++++++++++-------- ...m_compression_codes_log_storages.reference | 20 ++++----- ..._custom_compression_codes_log_storages.sql | 41 +++++++++-------- ...st_deflate_qpl_codec_compression.reference | 4 ++ ...804_test_deflate_qpl_codec_compression.sql | 32 +++++++++++++ ...804_test_delta_codec_compression.reference | 2 + .../00804_test_delta_codec_compression.sql | 38 ++++++++++++++++ 8 files changed, 140 insertions(+), 50 deletions(-) create mode 100644 tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.reference create mode 100644 tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql diff --git a/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference b/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference index 7bd91e5a69b..a9cbe3d32d3 100644 --- a/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference +++ b/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference @@ -1,6 +1,6 @@ -1 hello 2018-12-14 1.1 aaa 5 -2 world 2018-12-15 2.2 bbb 6 -3 ! 2018-12-16 3.3 ccc 7 +1 hello 2018-12-14 1.1 aaa 5 qpl11 11 +2 world 2018-12-15 2.2 bbb 6 qpl22 22 +3 ! 2018-12-16 3.3 ccc 7 qpl33 33 2 1 world 2018-10-05 1.1 2 hello 2018-10-01 2.2 @@ -9,7 +9,7 @@ 10003 274972506.6 9175437371954010821 -CREATE TABLE default.compression_codec_multiple_more_types\n(\n `id` Decimal(38, 13) CODEC(ZSTD(1), LZ4, ZSTD(1), ZSTD(1), Delta(2), Delta(4), Delta(1), LZ4HC(0)),\n `data` FixedString(12) CODEC(ZSTD(1), ZSTD(1), NONE, NONE, NONE, LZ4HC(0)),\n `ddd.age` Array(UInt8) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8)),\n `ddd.Name` Array(String) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.compression_codec_multiple_more_types\n(\n `id` Decimal(38, 13) CODEC(ZSTD(1), LZ4, ZSTD(1), ZSTD(1), Delta(2), Delta(4), Delta(1), LZ4HC(0), DEFLATE_QPL),\n `data` FixedString(12) CODEC(ZSTD(1), ZSTD(1), NONE, NONE, NONE, LZ4HC(0), DEFLATE_QPL),\n `ddd.age` Array(UInt8) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8), DEFLATE_QPL),\n `ddd.Name` Array(String) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8), DEFLATE_QPL)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 1.5555555555555 hello world! [77] ['John'] 7.1 xxxxxxxxxxxx [127] ['Henry'] ! diff --git a/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql b/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql index c080c2fc98e..44a0daada27 100644 --- a/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql +++ b/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql @@ -1,5 +1,6 @@ SET send_logs_level = 'fatal'; SET allow_suspicious_codecs = 1; +SET enable_qpl_deflate = 1; DROP TABLE IF EXISTS compression_codec; @@ -9,18 +10,20 @@ CREATE TABLE compression_codec( ddd Date CODEC(NONE), somenum Float64 CODEC(ZSTD(2)), somestr FixedString(3) CODEC(LZ4HC(7)), - othernum Int64 CODEC(Delta) + othernum Int64 CODEC(Delta), + qplstr String CODEC(DEFLATE_QPL), + qplnum UInt32 CODEC(DEFLATE_QPL), ) ENGINE = MergeTree() ORDER BY tuple(); -INSERT INTO compression_codec VALUES(1, 'hello', toDate('2018-12-14'), 1.1, 'aaa', 5); -INSERT INTO compression_codec VALUES(2, 'world', toDate('2018-12-15'), 2.2, 'bbb', 6); -INSERT INTO compression_codec VALUES(3, '!', toDate('2018-12-16'), 3.3, 'ccc', 7); +INSERT INTO compression_codec VALUES(1, 'hello', toDate('2018-12-14'), 1.1, 'aaa', 5, 'qpl11', 11); +INSERT INTO compression_codec VALUES(2, 'world', toDate('2018-12-15'), 2.2, 'bbb', 6,'qpl22', 22); +INSERT INTO compression_codec VALUES(3, '!', toDate('2018-12-16'), 3.3, 'ccc', 7, 'qpl33', 33); SELECT * FROM compression_codec ORDER BY id; OPTIMIZE TABLE compression_codec FINAL; -INSERT INTO compression_codec VALUES(2, '', toDate('2018-12-13'), 4.4, 'ddd', 8); +INSERT INTO compression_codec VALUES(2, '', toDate('2018-12-13'), 4.4, 'ddd', 8, 'qpl44', 44); DETACH TABLE compression_codec; ATTACH TABLE compression_codec; @@ -31,25 +34,31 @@ DROP TABLE IF EXISTS compression_codec; DROP TABLE IF EXISTS bad_codec; DROP TABLE IF EXISTS params_when_no_params; +DROP TABLE IF EXISTS params_when_no_params2; DROP TABLE IF EXISTS too_many_params; DROP TABLE IF EXISTS codec_multiple_direct_specification_1; DROP TABLE IF EXISTS codec_multiple_direct_specification_2; +DROP TABLE IF EXISTS codec_multiple_direct_specification_3; DROP TABLE IF EXISTS delta_bad_params1; DROP TABLE IF EXISTS delta_bad_params2; CREATE TABLE bad_codec(id UInt64 CODEC(adssadads)) ENGINE = MergeTree() order by tuple(); -- { serverError 432 } CREATE TABLE too_many_params(id UInt64 CODEC(ZSTD(2,3,4,5))) ENGINE = MergeTree() order by tuple(); -- { serverError 431 } CREATE TABLE params_when_no_params(id UInt64 CODEC(LZ4(1))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError 378 } +CREATE TABLE params_when_no_params2(id UInt64 CODEC(DEFLATE_QPL(1))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError 378 } CREATE TABLE codec_multiple_direct_specification_1(id UInt64 CODEC(MULTIPLE(LZ4, ZSTD))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError 432 } CREATE TABLE codec_multiple_direct_specification_2(id UInt64 CODEC(multiple(LZ4, ZSTD))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError 432 } +CREATE TABLE codec_multiple_direct_specification_3(id UInt64 CODEC(multiple(LZ4, DEFLATE_QPL))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError 432 } CREATE TABLE delta_bad_params1(id UInt64 CODEC(Delta(3))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError 433 } CREATE TABLE delta_bad_params2(id UInt64 CODEC(Delta(16))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError 433 } DROP TABLE IF EXISTS bad_codec; DROP TABLE IF EXISTS params_when_no_params; +DROP TABLE IF EXISTS params_when_no_params2; DROP TABLE IF EXISTS too_many_params; DROP TABLE IF EXISTS codec_multiple_direct_specification_1; DROP TABLE IF EXISTS codec_multiple_direct_specification_2; +DROP TABLE IF EXISTS codec_multiple_direct_specification_3; DROP TABLE IF EXISTS delta_bad_params1; DROP TABLE IF EXISTS delta_bad_params2; @@ -58,10 +67,10 @@ DROP TABLE IF EXISTS compression_codec_multiple; SET network_compression_method = 'lz4hc'; CREATE TABLE compression_codec_multiple ( - id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC, Delta(4)), - data String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC, LZ4, LZ4, Delta(8)), - ddd Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD, LZ4HC, LZ4HC), - somenum Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD) + id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC, Delta(4), DEFLATE_QPL), + data String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC, LZ4, LZ4, Delta(8), DEFLATE_QPL), + ddd Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD, LZ4HC, LZ4HC, DEFLATE_QPL), + somenum Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD, DEFLATE_QPL) ) ENGINE = MergeTree() ORDER BY tuple(); INSERT INTO compression_codec_multiple VALUES (1, 'world', toDate('2018-10-05'), 1.1), (2, 'hello', toDate('2018-10-01'), 2.2), (3, 'buy', toDate('2018-10-11'), 3.3); @@ -85,15 +94,15 @@ SELECT sum(cityHash64(*)) FROM compression_codec_multiple; DROP TABLE IF EXISTS compression_codec_multiple_more_types; CREATE TABLE compression_codec_multiple_more_types ( - id Decimal128(13) CODEC(ZSTD, LZ4, ZSTD, ZSTD, Delta(2), Delta(4), Delta(1), LZ4HC), - data FixedString(12) CODEC(ZSTD, ZSTD, Delta, Delta, Delta, NONE, NONE, NONE, LZ4HC), - ddd Nested (age UInt8, Name String) CODEC(LZ4, LZ4HC, NONE, NONE, NONE, ZSTD, Delta(8)) + id Decimal128(13) CODEC(ZSTD, LZ4, ZSTD, ZSTD, Delta(2), Delta(4), Delta(1), LZ4HC, DEFLATE_QPL), + data FixedString(12) CODEC(ZSTD, ZSTD, Delta, Delta, Delta, NONE, NONE, NONE, LZ4HC, DEFLATE_QPL), + ddd Nested (age UInt8, Name String) CODEC(LZ4, LZ4HC, NONE, NONE, NONE, ZSTD, Delta(8), DEFLATE_QPL) ) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError 36 } CREATE TABLE compression_codec_multiple_more_types ( - id Decimal128(13) CODEC(ZSTD, LZ4, ZSTD, ZSTD, Delta(2), Delta(4), Delta(1), LZ4HC), - data FixedString(12) CODEC(ZSTD, ZSTD, NONE, NONE, NONE, LZ4HC), - ddd Nested (age UInt8, Name String) CODEC(LZ4, LZ4HC, NONE, NONE, NONE, ZSTD, Delta(8)) + id Decimal128(13) CODEC(ZSTD, LZ4, ZSTD, ZSTD, Delta(2), Delta(4), Delta(1), LZ4HC, DEFLATE_QPL), + data FixedString(12) CODEC(ZSTD, ZSTD, NONE, NONE, NONE, LZ4HC, DEFLATE_QPL), + ddd Nested (age UInt8, Name String) CODEC(LZ4, LZ4HC, NONE, NONE, NONE, ZSTD, Delta(8), DEFLATE_QPL) ) ENGINE = MergeTree() ORDER BY tuple(); SHOW CREATE TABLE compression_codec_multiple_more_types; @@ -109,9 +118,9 @@ SET network_compression_method = 'zstd'; SET network_zstd_compression_level = 5; CREATE TABLE compression_codec_multiple_with_key ( - somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12), Delta, Delta), - id UInt64 CODEC(LZ4, ZSTD, Delta, NONE, LZ4HC, Delta), - data String CODEC(ZSTD(2), Delta(1), LZ4HC, NONE, LZ4, LZ4) + somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12), Delta, Delta, DEFLATE_QPL), + id UInt64 CODEC(LZ4, ZSTD, Delta, NONE, LZ4HC, Delta, DEFLATE_QPL), + data String CODEC(ZSTD(2), Delta(1), LZ4HC, NONE, LZ4, LZ4, DEFLATE_QPL) ) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi'; diff --git a/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference index 8145ca99829..d64b8a77eed 100644 --- a/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference +++ b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference @@ -1,9 +1,9 @@ -CREATE TABLE default.compression_codec_log\n(\n `id` UInt64 CODEC(LZ4),\n `data` String CODEC(ZSTD(1)),\n `ddd` Date CODEC(NONE),\n `somenum` Float64 CODEC(ZSTD(2)),\n `somestr` FixedString(3) CODEC(LZ4HC(7)),\n `othernum` Int64 CODEC(Delta(8))\n)\nENGINE = Log -1 hello 2018-12-14 1.1 aaa 5 -2 world 2018-12-15 2.2 bbb 6 -3 ! 2018-12-16 3.3 ccc 7 +CREATE TABLE default.compression_codec_log\n(\n `id` UInt64 CODEC(LZ4),\n `data` String CODEC(ZSTD(1)),\n `ddd` Date CODEC(NONE),\n `somenum` Float64 CODEC(ZSTD(2)),\n `somestr` FixedString(3) CODEC(LZ4HC(7)),\n `othernum` Int64 CODEC(Delta(8)),\n `qplstr` String CODEC(DEFLATE_QPL),\n `qplnum` UInt32 CODEC(DEFLATE_QPL)\n)\nENGINE = Log +1 hello 2018-12-14 1.1 aaa 5 qpl11 11 +2 world 2018-12-15 2.2 bbb 6 qpl22 22 +3 ! 2018-12-16 3.3 ccc 7 qpl33 33 2 -CREATE TABLE default.compression_codec_multiple_log\n(\n `id` UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4)),\n `data` String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8)),\n `ddd` Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0)),\n `somenum` Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1))\n)\nENGINE = Log +CREATE TABLE default.compression_codec_multiple_log\n(\n `id` UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4), DEFLATE_QPL),\n `data` String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8), DEFLATE_QPL),\n `ddd` Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0), DEFLATE_QPL),\n `somenum` Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1), DEFLATE_QPL)\n)\nENGINE = Log 1 world 2018-10-05 1.1 2 hello 2018-10-01 2.2 3 buy 2018-10-11 3.3 @@ -11,12 +11,12 @@ CREATE TABLE default.compression_codec_multiple_log\n(\n `id` UInt64 CODEC(LZ 10003 274972506.6 9175437371954010821 -CREATE TABLE default.compression_codec_tiny_log\n(\n `id` UInt64 CODEC(LZ4),\n `data` String CODEC(ZSTD(1)),\n `ddd` Date CODEC(NONE),\n `somenum` Float64 CODEC(ZSTD(2)),\n `somestr` FixedString(3) CODEC(LZ4HC(7)),\n `othernum` Int64 CODEC(Delta(8))\n)\nENGINE = TinyLog -1 hello 2018-12-14 1.1 aaa 5 -2 world 2018-12-15 2.2 bbb 6 -3 ! 2018-12-16 3.3 ccc 7 +CREATE TABLE default.compression_codec_tiny_log\n(\n `id` UInt64 CODEC(LZ4),\n `data` String CODEC(ZSTD(1)),\n `ddd` Date CODEC(NONE),\n `somenum` Float64 CODEC(ZSTD(2)),\n `somestr` FixedString(3) CODEC(LZ4HC(7)),\n `othernum` Int64 CODEC(Delta(8)),\n `qplstr` String CODEC(DEFLATE_QPL),\n `qplnum` UInt32 CODEC(DEFLATE_QPL)\n)\nENGINE = TinyLog +1 hello 2018-12-14 1.1 aaa 5 qpl11 11 +2 world 2018-12-15 2.2 bbb 6 qpl22 22 +3 ! 2018-12-16 3.3 ccc 7 qpl33 33 2 -CREATE TABLE default.compression_codec_multiple_tiny_log\n(\n `id` UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4)),\n `data` String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8)),\n `ddd` Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0)),\n `somenum` Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1))\n)\nENGINE = TinyLog +CREATE TABLE default.compression_codec_multiple_tiny_log\n(\n `id` UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4), DEFLATE_QPL),\n `data` String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8), DEFLATE_QPL),\n `ddd` Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0), DEFLATE_QPL),\n `somenum` Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1), DEFLATE_QPL)\n)\nENGINE = TinyLog 1 world 2018-10-05 1.1 2 hello 2018-10-01 2.2 3 buy 2018-10-11 3.3 diff --git a/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql index fba6a216762..113f26732e7 100644 --- a/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql +++ b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql @@ -1,5 +1,6 @@ SET send_logs_level = 'fatal'; SET allow_suspicious_codecs = 1; +SET enable_qpl_deflate = 1; -- copy-paste for storage log @@ -11,18 +12,20 @@ CREATE TABLE compression_codec_log( ddd Date CODEC(NONE), somenum Float64 CODEC(ZSTD(2)), somestr FixedString(3) CODEC(LZ4HC(7)), - othernum Int64 CODEC(Delta) + othernum Int64 CODEC(Delta), + qplstr String CODEC(DEFLATE_QPL), + qplnum UInt32 CODEC(DEFLATE_QPL), ) ENGINE = Log(); SHOW CREATE TABLE compression_codec_log; -INSERT INTO compression_codec_log VALUES(1, 'hello', toDate('2018-12-14'), 1.1, 'aaa', 5); -INSERT INTO compression_codec_log VALUES(2, 'world', toDate('2018-12-15'), 2.2, 'bbb', 6); -INSERT INTO compression_codec_log VALUES(3, '!', toDate('2018-12-16'), 3.3, 'ccc', 7); +INSERT INTO compression_codec_log VALUES(1, 'hello', toDate('2018-12-14'), 1.1, 'aaa', 5, 'qpl11', 11); +INSERT INTO compression_codec_log VALUES(2, 'world', toDate('2018-12-15'), 2.2, 'bbb', 6,'qpl22', 22); +INSERT INTO compression_codec_log VALUES(3, '!', toDate('2018-12-16'), 3.3, 'ccc', 7, 'qpl33', 33); SELECT * FROM compression_codec_log ORDER BY id; -INSERT INTO compression_codec_log VALUES(2, '', toDate('2018-12-13'), 4.4, 'ddd', 8); +INSERT INTO compression_codec_log VALUES(2, '', toDate('2018-12-13'), 4.4, 'ddd', 8, 'qpl44', 44); DETACH TABLE compression_codec_log; ATTACH TABLE compression_codec_log; @@ -34,10 +37,10 @@ DROP TABLE IF EXISTS compression_codec_log; DROP TABLE IF EXISTS compression_codec_multiple_log; CREATE TABLE compression_codec_multiple_log ( - id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC, Delta(4)), - data String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC, LZ4, LZ4, Delta(8)), - ddd Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD, LZ4HC, LZ4HC), - somenum Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD) + id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC, Delta(4), DEFLATE_QPL), + data String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC, LZ4, LZ4, Delta(8), DEFLATE_QPL), + ddd Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD, LZ4HC, LZ4HC, DEFLATE_QPL), + somenum Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD, DEFLATE_QPL) ) ENGINE = Log(); SHOW CREATE TABLE compression_codec_multiple_log; @@ -69,18 +72,20 @@ CREATE TABLE compression_codec_tiny_log( ddd Date CODEC(NONE), somenum Float64 CODEC(ZSTD(2)), somestr FixedString(3) CODEC(LZ4HC(7)), - othernum Int64 CODEC(Delta) + othernum Int64 CODEC(Delta), + qplstr String CODEC(DEFLATE_QPL), + qplnum UInt32 CODEC(DEFLATE_QPL), ) ENGINE = TinyLog(); SHOW CREATE TABLE compression_codec_tiny_log; -INSERT INTO compression_codec_tiny_log VALUES(1, 'hello', toDate('2018-12-14'), 1.1, 'aaa', 5); -INSERT INTO compression_codec_tiny_log VALUES(2, 'world', toDate('2018-12-15'), 2.2, 'bbb', 6); -INSERT INTO compression_codec_tiny_log VALUES(3, '!', toDate('2018-12-16'), 3.3, 'ccc', 7); +INSERT INTO compression_codec_tiny_log VALUES(1, 'hello', toDate('2018-12-14'), 1.1, 'aaa', 5, 'qpl11', 11); +INSERT INTO compression_codec_tiny_log VALUES(2, 'world', toDate('2018-12-15'), 2.2, 'bbb', 6, 'qpl22', 22); +INSERT INTO compression_codec_tiny_log VALUES(3, '!', toDate('2018-12-16'), 3.3, 'ccc', 7, 'qpl33', 33); SELECT * FROM compression_codec_tiny_log ORDER BY id; -INSERT INTO compression_codec_tiny_log VALUES(2, '', toDate('2018-12-13'), 4.4, 'ddd', 8); +INSERT INTO compression_codec_tiny_log VALUES(2, '', toDate('2018-12-13'), 4.4, 'ddd', 8, 'qpl44', 44); DETACH TABLE compression_codec_tiny_log; ATTACH TABLE compression_codec_tiny_log; @@ -92,10 +97,10 @@ DROP TABLE IF EXISTS compression_codec_tiny_log; DROP TABLE IF EXISTS compression_codec_multiple_tiny_log; CREATE TABLE compression_codec_multiple_tiny_log ( - id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC, Delta(4)), - data String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC, LZ4, LZ4, Delta(8)), - ddd Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD, LZ4HC, LZ4HC), - somenum Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD) + id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC, Delta(4), DEFLATE_QPL), + data String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC, LZ4, LZ4, Delta(8), DEFLATE_QPL), + ddd Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD, LZ4HC, LZ4HC, DEFLATE_QPL), + somenum Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD, DEFLATE_QPL) ) ENGINE = TinyLog(); SHOW CREATE TABLE compression_codec_multiple_tiny_log; diff --git a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.reference b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.reference new file mode 100644 index 00000000000..88d274d9cba --- /dev/null +++ b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.reference @@ -0,0 +1,4 @@ +1 hello 2018-12-14 1.1 aaa 5 qpl11 11 +2 world 2018-12-15 2.2 bbb 6 qpl22 22 +3 ! 2018-12-16 3.3 ccc 7 qpl33 33 +2 diff --git a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql new file mode 100644 index 00000000000..fe23e49804d --- /dev/null +++ b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql @@ -0,0 +1,32 @@ +SET send_logs_level = 'fatal'; +SET enable_qpl_deflate = 1; + +DROP TABLE IF EXISTS compression_codec; + +CREATE TABLE compression_codec( + id UInt64 CODEC(DEFLATE_QPL), + data String CODEC(DEFLATE_QPL), + ddd Date CODEC(DEFLATE_QPL), + somenum Float64 CODEC(DEFLATE_QPL), + somestr FixedString(3) CODEC(DEFLATE_QPL), + othernum Int64 CODEC(DEFLATE_QPL), + qplstr String CODEC(DEFLATE_QPL), + qplnum UInt32 CODEC(DEFLATE_QPL), +) ENGINE = MergeTree() ORDER BY tuple(); + +INSERT INTO compression_codec VALUES(1, 'hello', toDate('2018-12-14'), 1.1, 'aaa', 5, 'qpl11', 11); +INSERT INTO compression_codec VALUES(2, 'world', toDate('2018-12-15'), 2.2, 'bbb', 6,'qpl22', 22); +INSERT INTO compression_codec VALUES(3, '!', toDate('2018-12-16'), 3.3, 'ccc', 7, 'qpl33', 33); + +SELECT * FROM compression_codec ORDER BY id; + +OPTIMIZE TABLE compression_codec FINAL; + +INSERT INTO compression_codec VALUES(2, '', toDate('2018-12-13'), 4.4, 'ddd', 8, 'qpl44', 44); + +DETACH TABLE compression_codec; +ATTACH TABLE compression_codec; + +SELECT count(*) FROM compression_codec WHERE id = 2 GROUP BY id; + +DROP TABLE IF EXISTS compression_codec; diff --git a/tests/queries/0_stateless/00804_test_delta_codec_compression.reference b/tests/queries/0_stateless/00804_test_delta_codec_compression.reference index 949d37ed27a..37f9d4901b3 100644 --- a/tests/queries/0_stateless/00804_test_delta_codec_compression.reference +++ b/tests/queries/0_stateless/00804_test_delta_codec_compression.reference @@ -4,3 +4,5 @@ 1 32 1 +17 +1 diff --git a/tests/queries/0_stateless/00804_test_delta_codec_compression.sql b/tests/queries/0_stateless/00804_test_delta_codec_compression.sql index 25988f6474b..f9805246662 100644 --- a/tests/queries/0_stateless/00804_test_delta_codec_compression.sql +++ b/tests/queries/0_stateless/00804_test_delta_codec_compression.sql @@ -115,3 +115,41 @@ USING(key); DROP TABLE IF EXISTS delta_codec_string; DROP TABLE IF EXISTS default_codec_string; + +SET enable_qpl_deflate = 1; +DROP TABLE IF EXISTS delta_codec_string_qpl; +DROP TABLE IF EXISTS default_codec_string_qpl; + +CREATE TABLE delta_codec_string_qpl +( + id Float64 Codec(Delta, DEFLATE_QPL) +) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key=false; + +CREATE TABLE default_codec_string_qpl +( + id Float64 Codec(DEFLATE_QPL) +) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key=false; + +INSERT INTO delta_codec_string_qpl SELECT concat(toString(number), toString(number % 100)) FROM numbers(1547510400, 500000); +INSERT INTO default_codec_string_qpl SELECT * from delta_codec_string_qpl; + +OPTIMIZE TABLE delta_codec_string_qpl FINAL; +OPTIMIZE TABLE default_codec_string_qpl FINAL; + +SELECT + floor(big_size / small_size) as ratio +FROM + (SELECT 1 AS key, sum(bytes_on_disk) AS small_size FROM system.parts WHERE database = currentDatabase() and table = 'delta_codec_string_qpl' and active) +INNER JOIN + (SELECT 1 AS key, sum(bytes_on_disk) as big_size FROM system.parts WHERE database = currentDatabase() and table = 'default_codec_string_qpl' and active) USING(key); + +SELECT + small_hash == big_hash +FROM + (SELECT 1 AS key, sum(cityHash64(*)) AS small_hash FROM delta_codec_string_qpl) +INNER JOIN + (SELECT 1 AS key, sum(cityHash64(*)) AS big_hash FROM default_codec_string_qpl) +USING(key); + +DROP TABLE IF EXISTS delta_codec_string_qpl; +DROP TABLE IF EXISTS default_codec_string_qpl; From 31173ab55b0926f634c2fbfc06f7d2f34410a4ff Mon Sep 17 00:00:00 2001 From: jinjunzh Date: Wed, 24 May 2023 15:15:40 -0400 Subject: [PATCH 283/722] add sections of deflate_qpl for stress test and performance test --- tests/ci/stress.py | 1 + tests/performance/codecs_float_insert.xml | 2 ++ tests/performance/codecs_float_select.xml | 2 ++ tests/performance/codecs_int_insert.xml | 2 ++ tests/performance/codecs_int_select.xml | 2 ++ 5 files changed, 9 insertions(+) diff --git a/tests/ci/stress.py b/tests/ci/stress.py index b9044874071..b95cac9044e 100755 --- a/tests/ci/stress.py +++ b/tests/ci/stress.py @@ -20,6 +20,7 @@ def get_options(i, upgrade_check): '''--db-engine="Replicated('/test/db/test_{}', 's1', 'r1')"'''.format(i) ) client_options.append("allow_experimental_database_replicated=1") + client_options.append("enable_qpl_deflate=1") # If database name is not specified, new database is created for each functional test. # Run some threads with one database for all tests. diff --git a/tests/performance/codecs_float_insert.xml b/tests/performance/codecs_float_insert.xml index 64325d30189..25291f7f499 100644 --- a/tests/performance/codecs_float_insert.xml +++ b/tests/performance/codecs_float_insert.xml @@ -1,6 +1,7 @@ 1 + 1 @@ -10,6 +11,7 @@ NONE LZ4 ZSTD + DEFLATE_QPL DoubleDelta Gorilla FPC diff --git a/tests/performance/codecs_float_select.xml b/tests/performance/codecs_float_select.xml index 325c140d9a0..bb67987c75e 100644 --- a/tests/performance/codecs_float_select.xml +++ b/tests/performance/codecs_float_select.xml @@ -1,6 +1,7 @@ 1 + 1 @@ -10,6 +11,7 @@ NONE LZ4 ZSTD + DEFLATE_QPL DoubleDelta Gorilla FPC diff --git a/tests/performance/codecs_int_insert.xml b/tests/performance/codecs_int_insert.xml index 618e20160f8..1db9ee8f746 100644 --- a/tests/performance/codecs_int_insert.xml +++ b/tests/performance/codecs_int_insert.xml @@ -1,6 +1,7 @@ 1 + 1 @@ -10,6 +11,7 @@ NONE LZ4 ZSTD + DEFLATE_QPL Delta T64 DoubleDelta diff --git a/tests/performance/codecs_int_select.xml b/tests/performance/codecs_int_select.xml index 62c1ee16e7b..5dc7ab48704 100644 --- a/tests/performance/codecs_int_select.xml +++ b/tests/performance/codecs_int_select.xml @@ -1,6 +1,7 @@ 1 + 1 @@ -10,6 +11,7 @@ NONE LZ4 ZSTD + DEFLATE_QPL Delta T64 DoubleDelta From cbdb408ec8330c8ce469c68e979ca208c76d0629 Mon Sep 17 00:00:00 2001 From: jinjunzh Date: Fri, 26 May 2023 12:15:34 -0400 Subject: [PATCH 284/722] add USE_QPL for buildoptions --- src/Storages/System/StorageSystemBuildOptions.cpp.in | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Storages/System/StorageSystemBuildOptions.cpp.in b/src/Storages/System/StorageSystemBuildOptions.cpp.in index 3465e47449b..c2a188e7750 100644 --- a/src/Storages/System/StorageSystemBuildOptions.cpp.in +++ b/src/Storages/System/StorageSystemBuildOptions.cpp.in @@ -68,6 +68,7 @@ const char * auto_config_build[] "GIT_BRANCH", R"IRjaNsZIL9Yh7FQ4(@GIT_BRANCH@)IRjaNsZIL9Yh7FQ4", "GIT_DATE", "@GIT_DATE@", "GIT_COMMIT_SUBJECT", R"Gi17KJMlbGCjErEN(@GIT_COMMIT_SUBJECT@)Gi17KJMlbGCjErEN", + "USE_QPL", "@ENABLE_QPL@", nullptr, nullptr }; From f1192d59afa7ee2271d7ee6b5cb9d98bb27254a0 Mon Sep 17 00:00:00 2001 From: jinjunzh Date: Thu, 1 Jun 2023 12:42:22 -0400 Subject: [PATCH 285/722] refine patch according to comments --- .../sql-reference/statements/create/table.md | 2 +- src/Client/Connection.cpp | 2 +- src/Compression/CompressionFactory.h | 4 +- .../CompressionFactoryAdditions.cpp | 12 +++--- src/Compression/ICompressionCodec.h | 6 +-- src/Core/Settings.h | 2 +- src/Interpreters/InterpreterCreateQuery.cpp | 4 +- src/Server/TCPHandler.cpp | 2 +- src/Storages/AlterCommands.cpp | 4 +- src/Storages/Distributed/DistributedSink.cpp | 2 +- src/Storages/TTLDescription.cpp | 2 +- tests/ci/stress.py | 2 +- .../configs/enable_deflateqpl_codec.xml | 2 +- .../test_non_default_compression/test.py | 32 ++++++++-------- tests/performance/codecs_float_insert.xml | 3 +- tests/performance/codecs_float_select.xml | 3 +- tests/performance/codecs_int_insert.xml | 3 +- tests/performance/codecs_int_select.xml | 3 +- ...04_test_alter_compression_codecs.reference | 4 +- .../00804_test_alter_compression_codecs.sql | 10 ++--- ...4_test_custom_compression_codecs.reference | 6 +-- .../00804_test_custom_compression_codecs.sql | 13 +++---- ..._custom_compression_codes_log_storages.sql | 2 +- ...st_deflate_qpl_codec_compression.reference | 6 +-- ...804_test_deflate_qpl_codec_compression.sql | 16 ++++---- ...804_test_delta_codec_compression.reference | 2 - .../00804_test_delta_codec_compression.sql | 38 ------------------- 27 files changed, 71 insertions(+), 116 deletions(-) diff --git a/docs/en/sql-reference/statements/create/table.md b/docs/en/sql-reference/statements/create/table.md index b0865ad2896..d0e17410791 100644 --- a/docs/en/sql-reference/statements/create/table.md +++ b/docs/en/sql-reference/statements/create/table.md @@ -380,7 +380,7 @@ High compression levels are useful for asymmetric scenarios, like compress once, `DEFLATE_QPL` — [Deflate compression algorithm](https://github.com/intel/qpl) implemented by Intel® Query Processing Library. Some limitations apply: -- DEFLATE_QPL is disabled by default and can only be used after setting configuration parameter `enable_qpl_deflate=1`. +- DEFLATE_QPL is disabled by default and can only be used after setting configuration parameter `enable_qpl_deflate_codec=1`. - DEFLATE_QPL requires a ClickHouse build compiled with SSE 4.2 instructions (by default, this is the case). Refer to [Build Clickhouse with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Build-Clickhouse-with-DEFLATE_QPL) for more details. - DEFLATE_QPL works best if the system has a Intel® IAA (In-Memory Analytics Accelerator) offloading device. Refer to [Accelerator Configuration](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#accelerator-configuration) and [Benchmark with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Run-Benchmark-with-DEFLATE_QPL) for more details. - DEFLATE_QPL-compressed data can only be transferred between ClickHouse nodes compiled with SSE 4.2 enabled. diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index 68bc3b39a56..ac8e6654e84 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -588,7 +588,7 @@ void Connection::sendQuery( if (method == "ZSTD") level = settings->network_zstd_compression_level; - CompressionCodecFactory::instance().validateCodec(method, level, !settings->allow_suspicious_codecs, settings->allow_experimental_codecs, settings->enable_qpl_deflate); + CompressionCodecFactory::instance().validateCodec(method, level, !settings->allow_suspicious_codecs, settings->allow_experimental_codecs, settings->enable_qpl_deflate_codec); compression_codec = CompressionCodecFactory::instance().get(method, level); } else diff --git a/src/Compression/CompressionFactory.h b/src/Compression/CompressionFactory.h index 1fdaf4f1c71..e020e51bb09 100644 --- a/src/Compression/CompressionFactory.h +++ b/src/Compression/CompressionFactory.h @@ -40,10 +40,10 @@ public: CompressionCodecPtr getDefaultCodec() const; /// Validate codecs AST specified by user and parses codecs description (substitute default parameters) - ASTPtr validateCodecAndGetPreprocessedAST(const ASTPtr & ast, const DataTypePtr & column_type, bool sanity_check, bool allow_experimental_codecs, bool enable_qpl_deflate) const; + ASTPtr validateCodecAndGetPreprocessedAST(const ASTPtr & ast, const DataTypePtr & column_type, bool sanity_check, bool allow_experimental_codecs, bool enable_qpl_deflate_codec) const; /// Validate codecs AST specified by user - void validateCodec(const String & family_name, std::optional level, bool sanity_check, bool allow_experimental_codecs, bool enable_qpl_deflate) const; + void validateCodec(const String & family_name, std::optional level, bool sanity_check, bool allow_experimental_codecs, bool enable_qpl_deflate_codec) const; /// Get codec by AST and possible column_type. Some codecs can use /// information about type to improve inner settings, but every codec should diff --git a/src/Compression/CompressionFactoryAdditions.cpp b/src/Compression/CompressionFactoryAdditions.cpp index 2630326238a..b4a2d96cf39 100644 --- a/src/Compression/CompressionFactoryAdditions.cpp +++ b/src/Compression/CompressionFactoryAdditions.cpp @@ -34,7 +34,7 @@ namespace ErrorCodes void CompressionCodecFactory::validateCodec( - const String & family_name, std::optional level, bool sanity_check, bool allow_experimental_codecs, bool enable_qpl_deflate) const + const String & family_name, std::optional level, bool sanity_check, bool allow_experimental_codecs, bool enable_qpl_deflate_codec) const { if (family_name.empty()) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Compression codec name cannot be empty"); @@ -43,13 +43,13 @@ void CompressionCodecFactory::validateCodec( { auto literal = std::make_shared(static_cast(*level)); validateCodecAndGetPreprocessedAST(makeASTFunction("CODEC", makeASTFunction(Poco::toUpper(family_name), literal)), - {}, sanity_check, allow_experimental_codecs, enable_qpl_deflate); + {}, sanity_check, allow_experimental_codecs, enable_qpl_deflate_codec); } else { auto identifier = std::make_shared(Poco::toUpper(family_name)); validateCodecAndGetPreprocessedAST(makeASTFunction("CODEC", identifier), - {}, sanity_check, allow_experimental_codecs, enable_qpl_deflate); + {}, sanity_check, allow_experimental_codecs, enable_qpl_deflate_codec); } } @@ -77,7 +77,7 @@ bool innerDataTypeIsFloat(const DataTypePtr & type) } ASTPtr CompressionCodecFactory::validateCodecAndGetPreprocessedAST( - const ASTPtr & ast, const DataTypePtr & column_type, bool sanity_check, bool allow_experimental_codecs, bool enable_qpl_deflate) const + const ASTPtr & ast, const DataTypePtr & column_type, bool sanity_check, bool allow_experimental_codecs, bool enable_qpl_deflate_codec) const { if (const auto * func = ast->as()) { @@ -159,10 +159,10 @@ ASTPtr CompressionCodecFactory::validateCodecAndGetPreprocessedAST( " You can enable it with the 'allow_experimental_codecs' setting.", codec_family_name); - if (!enable_qpl_deflate && result_codec->isDeflateQplCompression()) + if (!enable_qpl_deflate_codec && result_codec->isDeflateQplCompression()) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Codec {} is disabled by default." - " You can enable it with the 'enable_qpl_deflate' setting.", + " You can enable it with the 'enable_qpl_deflate_codec' setting.", codec_family_name); codecs_descriptions->children.emplace_back(result_codec->getCodecDesc()); diff --git a/src/Compression/ICompressionCodec.h b/src/Compression/ICompressionCodec.h index d92ad3fc718..f7e8f4e43d2 100644 --- a/src/Compression/ICompressionCodec.h +++ b/src/Compression/ICompressionCodec.h @@ -109,12 +109,12 @@ public: /// It will not be allowed to use unless the user will turn off the safety switch. virtual bool isExperimental() const { return false; } - /// If it does nothing. - virtual bool isNone() const { return false; } - /// This is a knob for Deflate QPL codec. virtual bool isDeflateQplCompression() const { return false; } + /// If it does nothing. + virtual bool isNone() const { return false; } + protected: /// This is used for fuzz testing friend int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size); diff --git a/src/Core/Settings.h b/src/Core/Settings.h index c6a2069e6ae..4aae8f5d572 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -319,7 +319,7 @@ class IColumn; M(Bool, allow_distributed_ddl, true, "If it is set to true, then a user is allowed to executed distributed DDL queries.", 0) \ M(Bool, allow_suspicious_codecs, false, "If it is set to true, allow to specify meaningless compression codecs.", 0) \ M(Bool, allow_experimental_codecs, false, "If it is set to true, allow to specify experimental compression codecs (but we don't have those yet and this option does nothing).", 0) \ - M(Bool, enable_qpl_deflate, false, "If it is set to true, allow to use deflate_qpl for compression.", 0) \ + M(Bool, enable_qpl_deflate_codec, false, "If it is set to true, allow usage of the DEFLATE_QPL codec.", 0) \ M(UInt64, query_profiler_real_time_period_ns, QUERY_PROFILER_DEFAULT_SAMPLE_RATE_NS, "Period for real clock timer of query profiler (in nanoseconds). Set 0 value to turn off the real clock query profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \ M(UInt64, query_profiler_cpu_time_period_ns, QUERY_PROFILER_DEFAULT_SAMPLE_RATE_NS, "Period for CPU clock timer of query profiler (in nanoseconds). Set 0 value to turn off the CPU clock query profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \ M(Bool, metrics_perf_events_enabled, false, "If enabled, some of the perf events will be measured throughout queries' execution.", 0) \ diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 5c22b46b360..ddb53bbbfaa 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -571,7 +571,7 @@ ColumnsDescription InterpreterCreateQuery::getColumnsDescription( bool sanity_check_compression_codecs = !attach && !context_->getSettingsRef().allow_suspicious_codecs; bool allow_experimental_codecs = attach || context_->getSettingsRef().allow_experimental_codecs; - bool enable_qpl_deflate = attach || context_->getSettingsRef().enable_qpl_deflate; + bool enable_qpl_deflate_codec = attach || context_->getSettingsRef().enable_qpl_deflate_codec; ColumnsDescription res; auto name_type_it = column_names_and_types.begin(); @@ -632,7 +632,7 @@ ColumnsDescription InterpreterCreateQuery::getColumnsDescription( if (col_decl.default_specifier == "ALIAS") throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot specify codec for column type ALIAS"); column.codec = CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST( - col_decl.codec, column.type, sanity_check_compression_codecs, allow_experimental_codecs, enable_qpl_deflate); + col_decl.codec, column.type, sanity_check_compression_codecs, allow_experimental_codecs, enable_qpl_deflate_codec); } if (col_decl.ttl) diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 96c585e7d16..b43fef9dd54 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -1775,7 +1775,7 @@ void TCPHandler::initBlockOutput(const Block & block) if (state.compression == Protocol::Compression::Enable) { - CompressionCodecFactory::instance().validateCodec(method, level, !query_settings.allow_suspicious_codecs, query_settings.allow_experimental_codecs, query_settings.enable_qpl_deflate); + CompressionCodecFactory::instance().validateCodec(method, level, !query_settings.allow_suspicious_codecs, query_settings.allow_experimental_codecs, query_settings.enable_qpl_deflate_codec); state.maybe_compressed_out = std::make_shared( *out, CompressionCodecFactory::instance().get(method, level)); diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index ecbddfc3e2a..73d7be8dc56 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -1067,7 +1067,7 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const "this column name is reserved for lightweight delete feature", backQuote(column_name)); if (command.codec) - CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST(command.codec, command.data_type, !context->getSettingsRef().allow_suspicious_codecs, context->getSettingsRef().allow_experimental_codecs, context->getSettingsRef().enable_qpl_deflate); + CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST(command.codec, command.data_type, !context->getSettingsRef().allow_suspicious_codecs, context->getSettingsRef().allow_experimental_codecs, context->getSettingsRef().enable_qpl_deflate_codec); all_columns.add(ColumnDescription(column_name, command.data_type)); } @@ -1093,7 +1093,7 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const { if (all_columns.hasAlias(column_name)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot specify codec for column type ALIAS"); - CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST(command.codec, command.data_type, !context->getSettingsRef().allow_suspicious_codecs, context->getSettingsRef().allow_experimental_codecs, context->getSettingsRef().enable_qpl_deflate); + CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST(command.codec, command.data_type, !context->getSettingsRef().allow_suspicious_codecs, context->getSettingsRef().allow_experimental_codecs, context->getSettingsRef().enable_qpl_deflate_codec); } auto column_default = all_columns.getDefault(column_name); if (column_default) diff --git a/src/Storages/Distributed/DistributedSink.cpp b/src/Storages/Distributed/DistributedSink.cpp index ce1dbde8eae..e383890d1f7 100644 --- a/src/Storages/Distributed/DistributedSink.cpp +++ b/src/Storages/Distributed/DistributedSink.cpp @@ -733,7 +733,7 @@ void DistributedSink::writeToShard(const Cluster::ShardInfo & shard_info, const if (compression_method == "ZSTD") compression_level = settings.network_zstd_compression_level; - CompressionCodecFactory::instance().validateCodec(compression_method, compression_level, !settings.allow_suspicious_codecs, settings.allow_experimental_codecs, settings.enable_qpl_deflate); + CompressionCodecFactory::instance().validateCodec(compression_method, compression_level, !settings.allow_suspicious_codecs, settings.allow_experimental_codecs, settings.enable_qpl_deflate_codec); CompressionCodecPtr compression_codec = CompressionCodecFactory::instance().get(compression_method, compression_level); /// tmp directory is used to ensure atomicity of transactions diff --git a/src/Storages/TTLDescription.cpp b/src/Storages/TTLDescription.cpp index f5209cbdff6..a437465b3fe 100644 --- a/src/Storages/TTLDescription.cpp +++ b/src/Storages/TTLDescription.cpp @@ -285,7 +285,7 @@ TTLDescription TTLDescription::getTTLFromAST( { result.recompression_codec = CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST( - ttl_element->recompression_codec, {}, !context->getSettingsRef().allow_suspicious_codecs, context->getSettingsRef().allow_experimental_codecs, context->getSettingsRef().enable_qpl_deflate); + ttl_element->recompression_codec, {}, !context->getSettingsRef().allow_suspicious_codecs, context->getSettingsRef().allow_experimental_codecs, context->getSettingsRef().enable_qpl_deflate_codec); } } diff --git a/tests/ci/stress.py b/tests/ci/stress.py index b95cac9044e..e5ceb251d0f 100755 --- a/tests/ci/stress.py +++ b/tests/ci/stress.py @@ -20,7 +20,7 @@ def get_options(i, upgrade_check): '''--db-engine="Replicated('/test/db/test_{}', 's1', 'r1')"'''.format(i) ) client_options.append("allow_experimental_database_replicated=1") - client_options.append("enable_qpl_deflate=1") + client_options.append("enable_qpl_deflate_codec=1") # If database name is not specified, new database is created for each functional test. # Run some threads with one database for all tests. diff --git a/tests/integration/test_non_default_compression/configs/enable_deflateqpl_codec.xml b/tests/integration/test_non_default_compression/configs/enable_deflateqpl_codec.xml index 46e9e43ca27..521b0fd663c 100644 --- a/tests/integration/test_non_default_compression/configs/enable_deflateqpl_codec.xml +++ b/tests/integration/test_non_default_compression/configs/enable_deflateqpl_codec.xml @@ -1,7 +1,7 @@ - 1 + 1 diff --git a/tests/integration/test_non_default_compression/test.py b/tests/integration/test_non_default_compression/test.py index e69b32daae0..e1a9c1ae540 100644 --- a/tests/integration/test_non_default_compression/test.py +++ b/tests/integration/test_non_default_compression/test.py @@ -38,16 +38,16 @@ node5 = cluster.add_instance( ) node6 = cluster.add_instance( "node6", - main_configs=["configs/allow_experimental_codecs.xml"], - user_configs=["configs/allow_suspicious_codecs.xml"], + main_configs=["configs/deflateqpl_compression_by_default.xml"], + user_configs=[ + "configs/allow_suspicious_codecs.xml", + "configs/enable_deflateqpl_codec.xml", + ], ) node7 = cluster.add_instance( "node7", - main_configs=["configs/deflateqpl_compression_by_default.xml"], - user_configs=[ - "configs/enable_deflateqpl_codec.xml", - "configs/allow_suspicious_codecs.xml", - ], + main_configs=["configs/allow_experimental_codecs.xml"], + user_configs=["configs/allow_suspicious_codecs.xml"], ) @pytest.fixture(scope="module") @@ -253,7 +253,7 @@ def test_uncompressed_cache_plus_zstd_codec(start_cluster): ) def test_preconfigured_deflateqpl_codec(start_cluster): - node7.query( + node6.query( """ CREATE TABLE compression_codec_multiple_with_key ( somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12), DEFLATE_QPL), @@ -263,46 +263,46 @@ def test_preconfigured_deflateqpl_codec(start_cluster): ) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS index_granularity = 2; """ ) - node7.query( + node6.query( "INSERT INTO compression_codec_multiple_with_key VALUES(toDate('2018-10-12'), 100000, 'hello', 88.88), (toDate('2018-10-12'), 100002, 'world', 99.99), (toDate('2018-10-12'), 1111, '!', 777.777)" ) assert ( - node7.query( + node6.query( "SELECT COUNT(*) FROM compression_codec_multiple_with_key WHERE id % 2 == 0" ) == "2\n" ) assert ( - node7.query( + node6.query( "SELECT DISTINCT somecolumn FROM compression_codec_multiple_with_key ORDER BY id" ) == "777.777\n88.88\n99.99\n" ) assert ( - node7.query( + node6.query( "SELECT data FROM compression_codec_multiple_with_key WHERE id >= 1112 AND somedate = toDate('2018-10-12') AND somecolumn <= 100" ) == "hello\nworld\n" ) - node7.query( + node6.query( "INSERT INTO compression_codec_multiple_with_key SELECT toDate('2018-10-12'), number, toString(number), 1.0 FROM system.numbers LIMIT 10000" ) assert ( - node7.query( + node6.query( "SELECT COUNT(id) FROM compression_codec_multiple_with_key WHERE id % 10 == 0" ) == "1001\n" ) assert ( - node7.query( + node6.query( "SELECT SUM(somecolumn) FROM compression_codec_multiple_with_key" ) == str(777.777 + 88.88 + 99.99 + 1.0 * 10000) + "\n" ) assert ( - node7.query( + node6.query( "SELECT count(*) FROM compression_codec_multiple_with_key GROUP BY somedate" ) == "10003\n" diff --git a/tests/performance/codecs_float_insert.xml b/tests/performance/codecs_float_insert.xml index 25291f7f499..be0935ad4cf 100644 --- a/tests/performance/codecs_float_insert.xml +++ b/tests/performance/codecs_float_insert.xml @@ -1,7 +1,7 @@ 1 - 1 + 1 @@ -11,7 +11,6 @@ NONE LZ4 ZSTD - DEFLATE_QPL DoubleDelta Gorilla FPC diff --git a/tests/performance/codecs_float_select.xml b/tests/performance/codecs_float_select.xml index bb67987c75e..844ab4508d8 100644 --- a/tests/performance/codecs_float_select.xml +++ b/tests/performance/codecs_float_select.xml @@ -1,7 +1,7 @@ 1 - 1 + 1 @@ -11,7 +11,6 @@ NONE LZ4 ZSTD - DEFLATE_QPL DoubleDelta Gorilla FPC diff --git a/tests/performance/codecs_int_insert.xml b/tests/performance/codecs_int_insert.xml index 1db9ee8f746..d5f12810118 100644 --- a/tests/performance/codecs_int_insert.xml +++ b/tests/performance/codecs_int_insert.xml @@ -1,7 +1,7 @@ 1 - 1 + 1 @@ -11,7 +11,6 @@ NONE LZ4 ZSTD - DEFLATE_QPL Delta T64 DoubleDelta diff --git a/tests/performance/codecs_int_select.xml b/tests/performance/codecs_int_select.xml index 5dc7ab48704..06b2c2a73f3 100644 --- a/tests/performance/codecs_int_select.xml +++ b/tests/performance/codecs_int_select.xml @@ -1,7 +1,7 @@ 1 - 1 + 1 @@ -11,7 +11,6 @@ NONE LZ4 ZSTD - DEFLATE_QPL Delta T64 DoubleDelta diff --git a/tests/queries/0_stateless/00804_test_alter_compression_codecs.reference b/tests/queries/0_stateless/00804_test_alter_compression_codecs.reference index a6afe11126c..5c77a102740 100644 --- a/tests/queries/0_stateless/00804_test_alter_compression_codecs.reference +++ b/tests/queries/0_stateless/00804_test_alter_compression_codecs.reference @@ -12,15 +12,13 @@ CODEC(NONE) 2018-01-01 4 4 2018-01-01 5 5 2018-01-01 6 6 -CODEC(DEFLATE_QPL) 2018-01-01 1 default_value 2018-01-01 2 default_value 2018-01-01 3 3 2018-01-01 4 4 2018-01-01 5 5 2018-01-01 6 6 -2018-01-01 7 7 -2018-01-01 8 8 +CODEC(DEFLATE_QPL) 2018-01-01 1 default_value 2018-01-01 2 default_value 2018-01-01 3 3 diff --git a/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql b/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql index 40a8bb4c7cb..5b8b73270a2 100644 --- a/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql +++ b/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql @@ -25,7 +25,10 @@ INSERT INTO alter_compression_codec VALUES('2018-01-01', 5, '5'); INSERT INTO alter_compression_codec VALUES('2018-01-01', 6, '6'); SELECT * FROM alter_compression_codec ORDER BY id; -SET enable_qpl_deflate = 1; +OPTIMIZE TABLE alter_compression_codec FINAL; +SELECT * FROM alter_compression_codec ORDER BY id; + +SET enable_qpl_deflate_codec = 1; ALTER TABLE alter_compression_codec MODIFY COLUMN alter_column CODEC(DEFLATE_QPL); SELECT compression_codec FROM system.columns WHERE database = currentDatabase() AND table = 'alter_compression_codec' AND name = 'alter_column'; @@ -33,9 +36,6 @@ INSERT INTO alter_compression_codec VALUES('2018-01-01', 7, '7'); INSERT INTO alter_compression_codec VALUES('2018-01-01', 8, '8'); SELECT * FROM alter_compression_codec ORDER BY id; -OPTIMIZE TABLE alter_compression_codec FINAL; -SELECT * FROM alter_compression_codec ORDER BY id; - SET allow_suspicious_codecs = 1; ALTER TABLE alter_compression_codec MODIFY COLUMN alter_column CODEC(ZSTD, LZ4HC, LZ4, LZ4, DEFLATE_QPL, NONE); SELECT compression_codec FROM system.columns WHERE database = currentDatabase() AND table = 'alter_compression_codec' AND name = 'alter_column'; @@ -62,7 +62,7 @@ ALTER TABLE alter_bad_codec ADD COLUMN alter_column DateTime DEFAULT '2019-01-01 ALTER TABLE alter_bad_codec ADD COLUMN alter_column DateTime DEFAULT '2019-01-01 00:00:00' CODEC(ZSTD(100)); -- { serverError 433 } -ALTER TABLE alter_bad_codec ADD COLUMN alter_column DateTime DEFAULT '2019-01-01 00:00:00' CODEC(DEFLATE_QPL(100)); -- { serverError 378 } +ALTER TABLE alter_bad_codec ADD COLUMN alter_column DateTime DEFAULT '2019-01-01 00:00:00' CODEC(DEFLATE_QPL(100)); -- { serverError DATA_TYPE_CANNOT_HAVE_ARGUMENTS } DROP TABLE IF EXISTS alter_bad_codec; diff --git a/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference b/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference index a9cbe3d32d3..8b51d65004a 100644 --- a/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference +++ b/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference @@ -1,6 +1,6 @@ -1 hello 2018-12-14 1.1 aaa 5 qpl11 11 -2 world 2018-12-15 2.2 bbb 6 qpl22 22 -3 ! 2018-12-16 3.3 ccc 7 qpl33 33 +1 hello 2018-12-14 2018-12-14 1.1 aaa 5 +2 world 2018-12-15 2018-12-15 2.2 bbb 6 +3 ! 2018-12-16 2018-12-16 3.3 ccc 7 2 1 world 2018-10-05 1.1 2 hello 2018-10-01 2.2 diff --git a/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql b/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql index 44a0daada27..47ec268bfec 100644 --- a/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql +++ b/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql @@ -1,6 +1,6 @@ SET send_logs_level = 'fatal'; SET allow_suspicious_codecs = 1; -SET enable_qpl_deflate = 1; +SET enable_qpl_deflate_codec = 1; DROP TABLE IF EXISTS compression_codec; @@ -8,22 +8,21 @@ CREATE TABLE compression_codec( id UInt64 CODEC(LZ4), data String CODEC(ZSTD), ddd Date CODEC(NONE), + ddd32 Date32 CODEC(DEFLATE_QPL), somenum Float64 CODEC(ZSTD(2)), somestr FixedString(3) CODEC(LZ4HC(7)), othernum Int64 CODEC(Delta), - qplstr String CODEC(DEFLATE_QPL), - qplnum UInt32 CODEC(DEFLATE_QPL), ) ENGINE = MergeTree() ORDER BY tuple(); -INSERT INTO compression_codec VALUES(1, 'hello', toDate('2018-12-14'), 1.1, 'aaa', 5, 'qpl11', 11); -INSERT INTO compression_codec VALUES(2, 'world', toDate('2018-12-15'), 2.2, 'bbb', 6,'qpl22', 22); -INSERT INTO compression_codec VALUES(3, '!', toDate('2018-12-16'), 3.3, 'ccc', 7, 'qpl33', 33); +INSERT INTO compression_codec VALUES(1, 'hello', toDate('2018-12-14'), toDate32('2018-12-14'), 1.1, 'aaa', 5); +INSERT INTO compression_codec VALUES(2, 'world', toDate('2018-12-15'), toDate32('2018-12-15'), 2.2, 'bbb', 6); +INSERT INTO compression_codec VALUES(3, '!', toDate('2018-12-16'), toDate32('2018-12-16'), 3.3, 'ccc', 7); SELECT * FROM compression_codec ORDER BY id; OPTIMIZE TABLE compression_codec FINAL; -INSERT INTO compression_codec VALUES(2, '', toDate('2018-12-13'), 4.4, 'ddd', 8, 'qpl44', 44); +INSERT INTO compression_codec VALUES(2, '', toDate('2018-12-13'), toDate32('2018-12-13'), 4.4, 'ddd', 8); DETACH TABLE compression_codec; ATTACH TABLE compression_codec; diff --git a/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql index 113f26732e7..bcd09277824 100644 --- a/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql +++ b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql @@ -1,6 +1,6 @@ SET send_logs_level = 'fatal'; SET allow_suspicious_codecs = 1; -SET enable_qpl_deflate = 1; +SET enable_qpl_deflate_codec = 1; -- copy-paste for storage log diff --git a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.reference b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.reference index 88d274d9cba..276747f8233 100644 --- a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.reference +++ b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.reference @@ -1,4 +1,4 @@ -1 hello 2018-12-14 1.1 aaa 5 qpl11 11 -2 world 2018-12-15 2.2 bbb 6 qpl22 22 -3 ! 2018-12-16 3.3 ccc 7 qpl33 33 +1 hello 2018-12-14 2018-12-14 1.1 aaa 5 [1,2,3] {'k1':1,'k2':2} (1,2) +2 world 2018-12-15 2018-12-15 2.2 bbb 6 [4,5,6] {'k3':3,'k4':4} (3,4) +3 ! 2018-12-16 2018-12-16 3.3 ccc 7 [7,8,9] {'k5':5,'k6':6} (5,6) 2 diff --git a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql index fe23e49804d..64e66d47522 100644 --- a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql +++ b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql @@ -1,5 +1,5 @@ SET send_logs_level = 'fatal'; -SET enable_qpl_deflate = 1; +SET enable_qpl_deflate_codec = 1; DROP TABLE IF EXISTS compression_codec; @@ -7,22 +7,24 @@ CREATE TABLE compression_codec( id UInt64 CODEC(DEFLATE_QPL), data String CODEC(DEFLATE_QPL), ddd Date CODEC(DEFLATE_QPL), + ddd32 Date32 CODEC(DEFLATE_QPL), somenum Float64 CODEC(DEFLATE_QPL), somestr FixedString(3) CODEC(DEFLATE_QPL), othernum Int64 CODEC(DEFLATE_QPL), - qplstr String CODEC(DEFLATE_QPL), - qplnum UInt32 CODEC(DEFLATE_QPL), + somearray Array(UInt8) CODEC(DEFLATE_QPL), + somemap Map(String, UInt32) CODEC(DEFLATE_QPL), + sometuple Tuple(UInt16, UInt64) CODEC(DEFLATE_QPL), ) ENGINE = MergeTree() ORDER BY tuple(); -INSERT INTO compression_codec VALUES(1, 'hello', toDate('2018-12-14'), 1.1, 'aaa', 5, 'qpl11', 11); -INSERT INTO compression_codec VALUES(2, 'world', toDate('2018-12-15'), 2.2, 'bbb', 6,'qpl22', 22); -INSERT INTO compression_codec VALUES(3, '!', toDate('2018-12-16'), 3.3, 'ccc', 7, 'qpl33', 33); +INSERT INTO compression_codec VALUES(1, 'hello', toDate('2018-12-14'), toDate32('2018-12-14'), 1.1, 'aaa', 5, [1,2,3], map('k1',1,'k2',2), tuple(1,2)); +INSERT INTO compression_codec VALUES(2, 'world', toDate('2018-12-15'), toDate32('2018-12-15'), 2.2, 'bbb', 6, [4,5,6], map('k3',3,'k4',4), tuple(3,4)); +INSERT INTO compression_codec VALUES(3, '!', toDate('2018-12-16'), toDate32('2018-12-16'), 3.3, 'ccc', 7, [7,8,9], map('k5',5,'k6',6), tuple(5,6)); SELECT * FROM compression_codec ORDER BY id; OPTIMIZE TABLE compression_codec FINAL; -INSERT INTO compression_codec VALUES(2, '', toDate('2018-12-13'), 4.4, 'ddd', 8, 'qpl44', 44); +INSERT INTO compression_codec VALUES(2, '', toDate('2018-12-13'), toDate32('2018-12-13'), 4.4, 'ddd', 8, [10,11,12], map('k7',7,'k8',8), tuple(7,8)); DETACH TABLE compression_codec; ATTACH TABLE compression_codec; diff --git a/tests/queries/0_stateless/00804_test_delta_codec_compression.reference b/tests/queries/0_stateless/00804_test_delta_codec_compression.reference index 37f9d4901b3..949d37ed27a 100644 --- a/tests/queries/0_stateless/00804_test_delta_codec_compression.reference +++ b/tests/queries/0_stateless/00804_test_delta_codec_compression.reference @@ -4,5 +4,3 @@ 1 32 1 -17 -1 diff --git a/tests/queries/0_stateless/00804_test_delta_codec_compression.sql b/tests/queries/0_stateless/00804_test_delta_codec_compression.sql index f9805246662..25988f6474b 100644 --- a/tests/queries/0_stateless/00804_test_delta_codec_compression.sql +++ b/tests/queries/0_stateless/00804_test_delta_codec_compression.sql @@ -115,41 +115,3 @@ USING(key); DROP TABLE IF EXISTS delta_codec_string; DROP TABLE IF EXISTS default_codec_string; - -SET enable_qpl_deflate = 1; -DROP TABLE IF EXISTS delta_codec_string_qpl; -DROP TABLE IF EXISTS default_codec_string_qpl; - -CREATE TABLE delta_codec_string_qpl -( - id Float64 Codec(Delta, DEFLATE_QPL) -) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key=false; - -CREATE TABLE default_codec_string_qpl -( - id Float64 Codec(DEFLATE_QPL) -) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key=false; - -INSERT INTO delta_codec_string_qpl SELECT concat(toString(number), toString(number % 100)) FROM numbers(1547510400, 500000); -INSERT INTO default_codec_string_qpl SELECT * from delta_codec_string_qpl; - -OPTIMIZE TABLE delta_codec_string_qpl FINAL; -OPTIMIZE TABLE default_codec_string_qpl FINAL; - -SELECT - floor(big_size / small_size) as ratio -FROM - (SELECT 1 AS key, sum(bytes_on_disk) AS small_size FROM system.parts WHERE database = currentDatabase() and table = 'delta_codec_string_qpl' and active) -INNER JOIN - (SELECT 1 AS key, sum(bytes_on_disk) as big_size FROM system.parts WHERE database = currentDatabase() and table = 'default_codec_string_qpl' and active) USING(key); - -SELECT - small_hash == big_hash -FROM - (SELECT 1 AS key, sum(cityHash64(*)) AS small_hash FROM delta_codec_string_qpl) -INNER JOIN - (SELECT 1 AS key, sum(cityHash64(*)) AS big_hash FROM default_codec_string_qpl) -USING(key); - -DROP TABLE IF EXISTS delta_codec_string_qpl; -DROP TABLE IF EXISTS default_codec_string_qpl; From aae281eb7df6ce8e00d872d3ef0d0558781a5f1a Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 1 Jun 2023 15:49:52 +0200 Subject: [PATCH 286/722] Update codecs_float_insert.xml --- tests/performance/codecs_float_insert.xml | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/performance/codecs_float_insert.xml b/tests/performance/codecs_float_insert.xml index be0935ad4cf..64325d30189 100644 --- a/tests/performance/codecs_float_insert.xml +++ b/tests/performance/codecs_float_insert.xml @@ -1,7 +1,6 @@ 1 - 1 From dc93b6324ee505228b96791db629b7437f6db931 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 1 Jun 2023 15:50:28 +0200 Subject: [PATCH 287/722] Update codecs_float_select.xml --- tests/performance/codecs_float_select.xml | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/performance/codecs_float_select.xml b/tests/performance/codecs_float_select.xml index 844ab4508d8..325c140d9a0 100644 --- a/tests/performance/codecs_float_select.xml +++ b/tests/performance/codecs_float_select.xml @@ -1,7 +1,6 @@ 1 - 1 From 7043db669e4e445764d99cd749cfef99d3f437cf Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 1 Jun 2023 15:50:40 +0200 Subject: [PATCH 288/722] Update codecs_int_insert.xml --- tests/performance/codecs_int_insert.xml | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/performance/codecs_int_insert.xml b/tests/performance/codecs_int_insert.xml index d5f12810118..618e20160f8 100644 --- a/tests/performance/codecs_int_insert.xml +++ b/tests/performance/codecs_int_insert.xml @@ -1,7 +1,6 @@ 1 - 1 From 4d7364af97893c4457a86a064628ff478d900c05 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 1 Jun 2023 15:50:49 +0200 Subject: [PATCH 289/722] Update codecs_int_select.xml --- tests/performance/codecs_int_select.xml | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/performance/codecs_int_select.xml b/tests/performance/codecs_int_select.xml index 06b2c2a73f3..62c1ee16e7b 100644 --- a/tests/performance/codecs_int_select.xml +++ b/tests/performance/codecs_int_select.xml @@ -1,7 +1,6 @@ 1 - 1 From 1f928f2d3d0eea55ff1743cea386162fd87fed92 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 1 Jun 2023 15:53:48 +0200 Subject: [PATCH 290/722] Update StorageSystemBuildOptions.cpp.in --- src/Storages/System/StorageSystemBuildOptions.cpp.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/System/StorageSystemBuildOptions.cpp.in b/src/Storages/System/StorageSystemBuildOptions.cpp.in index c2a188e7750..c2d35c96ce5 100644 --- a/src/Storages/System/StorageSystemBuildOptions.cpp.in +++ b/src/Storages/System/StorageSystemBuildOptions.cpp.in @@ -64,11 +64,11 @@ const char * auto_config_build[] "USE_ARROW", "@USE_ARROW@", "USE_ORC", "@USE_ORC@", "USE_MSGPACK", "@USE_MSGPACK@", + "USE_QPL", "@ENABLE_QPL@", "GIT_HASH", "@GIT_HASH@", "GIT_BRANCH", R"IRjaNsZIL9Yh7FQ4(@GIT_BRANCH@)IRjaNsZIL9Yh7FQ4", "GIT_DATE", "@GIT_DATE@", "GIT_COMMIT_SUBJECT", R"Gi17KJMlbGCjErEN(@GIT_COMMIT_SUBJECT@)Gi17KJMlbGCjErEN", - "USE_QPL", "@ENABLE_QPL@", nullptr, nullptr }; From 1aa158909e434438733504d2dbcd9ea9d113e41b Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Fri, 9 Jun 2023 12:38:38 +0000 Subject: [PATCH 291/722] enable_qpl_deflate_codec --> enable_deflate_qpl_codec --- docs/en/sql-reference/statements/create/table.md | 2 +- src/Client/Connection.cpp | 2 +- src/Compression/CompressionFactory.h | 4 ++-- src/Compression/CompressionFactoryAdditions.cpp | 12 ++++++------ src/Core/Settings.h | 2 +- src/Interpreters/InterpreterCreateQuery.cpp | 4 ++-- src/Server/TCPHandler.cpp | 2 +- src/Storages/AlterCommands.cpp | 4 ++-- src/Storages/Distributed/DistributedSink.cpp | 2 +- src/Storages/TTLDescription.cpp | 2 +- tests/ci/stress.py | 2 +- .../configs/enable_deflateqpl_codec.xml | 2 +- .../00804_test_alter_compression_codecs.sql | 2 +- .../00804_test_custom_compression_codecs.sql | 2 +- ...04_test_custom_compression_codes_log_storages.sql | 2 +- .../00804_test_deflate_qpl_codec_compression.sql | 2 +- 16 files changed, 24 insertions(+), 24 deletions(-) diff --git a/docs/en/sql-reference/statements/create/table.md b/docs/en/sql-reference/statements/create/table.md index d0e17410791..496ecdbda7b 100644 --- a/docs/en/sql-reference/statements/create/table.md +++ b/docs/en/sql-reference/statements/create/table.md @@ -380,7 +380,7 @@ High compression levels are useful for asymmetric scenarios, like compress once, `DEFLATE_QPL` — [Deflate compression algorithm](https://github.com/intel/qpl) implemented by Intel® Query Processing Library. Some limitations apply: -- DEFLATE_QPL is disabled by default and can only be used after setting configuration parameter `enable_qpl_deflate_codec=1`. +- DEFLATE_QPL is disabled by default and can only be used after setting configuration parameter `enable_deflate_qpl_codec = 1`. - DEFLATE_QPL requires a ClickHouse build compiled with SSE 4.2 instructions (by default, this is the case). Refer to [Build Clickhouse with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Build-Clickhouse-with-DEFLATE_QPL) for more details. - DEFLATE_QPL works best if the system has a Intel® IAA (In-Memory Analytics Accelerator) offloading device. Refer to [Accelerator Configuration](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#accelerator-configuration) and [Benchmark with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Run-Benchmark-with-DEFLATE_QPL) for more details. - DEFLATE_QPL-compressed data can only be transferred between ClickHouse nodes compiled with SSE 4.2 enabled. diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index ac8e6654e84..636532ade4b 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -588,7 +588,7 @@ void Connection::sendQuery( if (method == "ZSTD") level = settings->network_zstd_compression_level; - CompressionCodecFactory::instance().validateCodec(method, level, !settings->allow_suspicious_codecs, settings->allow_experimental_codecs, settings->enable_qpl_deflate_codec); + CompressionCodecFactory::instance().validateCodec(method, level, !settings->allow_suspicious_codecs, settings->allow_experimental_codecs, settings->enable_deflate_qpl_codec); compression_codec = CompressionCodecFactory::instance().get(method, level); } else diff --git a/src/Compression/CompressionFactory.h b/src/Compression/CompressionFactory.h index e020e51bb09..4f2627587a3 100644 --- a/src/Compression/CompressionFactory.h +++ b/src/Compression/CompressionFactory.h @@ -40,10 +40,10 @@ public: CompressionCodecPtr getDefaultCodec() const; /// Validate codecs AST specified by user and parses codecs description (substitute default parameters) - ASTPtr validateCodecAndGetPreprocessedAST(const ASTPtr & ast, const DataTypePtr & column_type, bool sanity_check, bool allow_experimental_codecs, bool enable_qpl_deflate_codec) const; + ASTPtr validateCodecAndGetPreprocessedAST(const ASTPtr & ast, const DataTypePtr & column_type, bool sanity_check, bool allow_experimental_codecs, bool enable_deflate_qpl_codec) const; /// Validate codecs AST specified by user - void validateCodec(const String & family_name, std::optional level, bool sanity_check, bool allow_experimental_codecs, bool enable_qpl_deflate_codec) const; + void validateCodec(const String & family_name, std::optional level, bool sanity_check, bool allow_experimental_codecs, bool enable_deflate_qpl_codec) const; /// Get codec by AST and possible column_type. Some codecs can use /// information about type to improve inner settings, but every codec should diff --git a/src/Compression/CompressionFactoryAdditions.cpp b/src/Compression/CompressionFactoryAdditions.cpp index b4a2d96cf39..46f7e2653c2 100644 --- a/src/Compression/CompressionFactoryAdditions.cpp +++ b/src/Compression/CompressionFactoryAdditions.cpp @@ -34,7 +34,7 @@ namespace ErrorCodes void CompressionCodecFactory::validateCodec( - const String & family_name, std::optional level, bool sanity_check, bool allow_experimental_codecs, bool enable_qpl_deflate_codec) const + const String & family_name, std::optional level, bool sanity_check, bool allow_experimental_codecs, bool enable_deflate_qpl_codec) const { if (family_name.empty()) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Compression codec name cannot be empty"); @@ -43,13 +43,13 @@ void CompressionCodecFactory::validateCodec( { auto literal = std::make_shared(static_cast(*level)); validateCodecAndGetPreprocessedAST(makeASTFunction("CODEC", makeASTFunction(Poco::toUpper(family_name), literal)), - {}, sanity_check, allow_experimental_codecs, enable_qpl_deflate_codec); + {}, sanity_check, allow_experimental_codecs, enable_deflate_qpl_codec); } else { auto identifier = std::make_shared(Poco::toUpper(family_name)); validateCodecAndGetPreprocessedAST(makeASTFunction("CODEC", identifier), - {}, sanity_check, allow_experimental_codecs, enable_qpl_deflate_codec); + {}, sanity_check, allow_experimental_codecs, enable_deflate_qpl_codec); } } @@ -77,7 +77,7 @@ bool innerDataTypeIsFloat(const DataTypePtr & type) } ASTPtr CompressionCodecFactory::validateCodecAndGetPreprocessedAST( - const ASTPtr & ast, const DataTypePtr & column_type, bool sanity_check, bool allow_experimental_codecs, bool enable_qpl_deflate_codec) const + const ASTPtr & ast, const DataTypePtr & column_type, bool sanity_check, bool allow_experimental_codecs, bool enable_deflate_qpl_codec) const { if (const auto * func = ast->as()) { @@ -159,10 +159,10 @@ ASTPtr CompressionCodecFactory::validateCodecAndGetPreprocessedAST( " You can enable it with the 'allow_experimental_codecs' setting.", codec_family_name); - if (!enable_qpl_deflate_codec && result_codec->isDeflateQplCompression()) + if (!enable_deflate_qpl_codec && result_codec->isDeflateQplCompression()) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Codec {} is disabled by default." - " You can enable it with the 'enable_qpl_deflate_codec' setting.", + " You can enable it with the 'enable_deflate_qpl_codec' setting.", codec_family_name); codecs_descriptions->children.emplace_back(result_codec->getCodecDesc()); diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 4aae8f5d572..e0034174597 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -319,7 +319,7 @@ class IColumn; M(Bool, allow_distributed_ddl, true, "If it is set to true, then a user is allowed to executed distributed DDL queries.", 0) \ M(Bool, allow_suspicious_codecs, false, "If it is set to true, allow to specify meaningless compression codecs.", 0) \ M(Bool, allow_experimental_codecs, false, "If it is set to true, allow to specify experimental compression codecs (but we don't have those yet and this option does nothing).", 0) \ - M(Bool, enable_qpl_deflate_codec, false, "If it is set to true, allow usage of the DEFLATE_QPL codec.", 0) \ + M(Bool, enable_deflate_qpl_codec, false, "Enable/disable the DEFLATE_QPL codec.", 0) \ M(UInt64, query_profiler_real_time_period_ns, QUERY_PROFILER_DEFAULT_SAMPLE_RATE_NS, "Period for real clock timer of query profiler (in nanoseconds). Set 0 value to turn off the real clock query profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \ M(UInt64, query_profiler_cpu_time_period_ns, QUERY_PROFILER_DEFAULT_SAMPLE_RATE_NS, "Period for CPU clock timer of query profiler (in nanoseconds). Set 0 value to turn off the CPU clock query profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \ M(Bool, metrics_perf_events_enabled, false, "If enabled, some of the perf events will be measured throughout queries' execution.", 0) \ diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index ddb53bbbfaa..d0bb3dd389f 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -571,7 +571,7 @@ ColumnsDescription InterpreterCreateQuery::getColumnsDescription( bool sanity_check_compression_codecs = !attach && !context_->getSettingsRef().allow_suspicious_codecs; bool allow_experimental_codecs = attach || context_->getSettingsRef().allow_experimental_codecs; - bool enable_qpl_deflate_codec = attach || context_->getSettingsRef().enable_qpl_deflate_codec; + bool enable_deflate_qpl_codec = attach || context_->getSettingsRef().enable_deflate_qpl_codec; ColumnsDescription res; auto name_type_it = column_names_and_types.begin(); @@ -632,7 +632,7 @@ ColumnsDescription InterpreterCreateQuery::getColumnsDescription( if (col_decl.default_specifier == "ALIAS") throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot specify codec for column type ALIAS"); column.codec = CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST( - col_decl.codec, column.type, sanity_check_compression_codecs, allow_experimental_codecs, enable_qpl_deflate_codec); + col_decl.codec, column.type, sanity_check_compression_codecs, allow_experimental_codecs, enable_deflate_qpl_codec); } if (col_decl.ttl) diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index b43fef9dd54..50e9d50e2f6 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -1775,7 +1775,7 @@ void TCPHandler::initBlockOutput(const Block & block) if (state.compression == Protocol::Compression::Enable) { - CompressionCodecFactory::instance().validateCodec(method, level, !query_settings.allow_suspicious_codecs, query_settings.allow_experimental_codecs, query_settings.enable_qpl_deflate_codec); + CompressionCodecFactory::instance().validateCodec(method, level, !query_settings.allow_suspicious_codecs, query_settings.allow_experimental_codecs, query_settings.enable_deflate_qpl_codec); state.maybe_compressed_out = std::make_shared( *out, CompressionCodecFactory::instance().get(method, level)); diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index 73d7be8dc56..a9247f9b898 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -1067,7 +1067,7 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const "this column name is reserved for lightweight delete feature", backQuote(column_name)); if (command.codec) - CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST(command.codec, command.data_type, !context->getSettingsRef().allow_suspicious_codecs, context->getSettingsRef().allow_experimental_codecs, context->getSettingsRef().enable_qpl_deflate_codec); + CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST(command.codec, command.data_type, !context->getSettingsRef().allow_suspicious_codecs, context->getSettingsRef().allow_experimental_codecs, context->getSettingsRef().enable_deflate_qpl_codec); all_columns.add(ColumnDescription(column_name, command.data_type)); } @@ -1093,7 +1093,7 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const { if (all_columns.hasAlias(column_name)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot specify codec for column type ALIAS"); - CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST(command.codec, command.data_type, !context->getSettingsRef().allow_suspicious_codecs, context->getSettingsRef().allow_experimental_codecs, context->getSettingsRef().enable_qpl_deflate_codec); + CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST(command.codec, command.data_type, !context->getSettingsRef().allow_suspicious_codecs, context->getSettingsRef().allow_experimental_codecs, context->getSettingsRef().enable_deflate_qpl_codec); } auto column_default = all_columns.getDefault(column_name); if (column_default) diff --git a/src/Storages/Distributed/DistributedSink.cpp b/src/Storages/Distributed/DistributedSink.cpp index e383890d1f7..1e1c911920e 100644 --- a/src/Storages/Distributed/DistributedSink.cpp +++ b/src/Storages/Distributed/DistributedSink.cpp @@ -733,7 +733,7 @@ void DistributedSink::writeToShard(const Cluster::ShardInfo & shard_info, const if (compression_method == "ZSTD") compression_level = settings.network_zstd_compression_level; - CompressionCodecFactory::instance().validateCodec(compression_method, compression_level, !settings.allow_suspicious_codecs, settings.allow_experimental_codecs, settings.enable_qpl_deflate_codec); + CompressionCodecFactory::instance().validateCodec(compression_method, compression_level, !settings.allow_suspicious_codecs, settings.allow_experimental_codecs, settings.enale_deflate_qpl_codec); CompressionCodecPtr compression_codec = CompressionCodecFactory::instance().get(compression_method, compression_level); /// tmp directory is used to ensure atomicity of transactions diff --git a/src/Storages/TTLDescription.cpp b/src/Storages/TTLDescription.cpp index a437465b3fe..f601fed06ac 100644 --- a/src/Storages/TTLDescription.cpp +++ b/src/Storages/TTLDescription.cpp @@ -285,7 +285,7 @@ TTLDescription TTLDescription::getTTLFromAST( { result.recompression_codec = CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST( - ttl_element->recompression_codec, {}, !context->getSettingsRef().allow_suspicious_codecs, context->getSettingsRef().allow_experimental_codecs, context->getSettingsRef().enable_qpl_deflate_codec); + ttl_element->recompression_codec, {}, !context->getSettingsRef().allow_suspicious_codecs, context->getSettingsRef().allow_experimental_codecs, context->getSettingsRef().enable_deflate_qpl_codec); } } diff --git a/tests/ci/stress.py b/tests/ci/stress.py index e5ceb251d0f..6d17384c63f 100755 --- a/tests/ci/stress.py +++ b/tests/ci/stress.py @@ -20,7 +20,7 @@ def get_options(i, upgrade_check): '''--db-engine="Replicated('/test/db/test_{}', 's1', 'r1')"'''.format(i) ) client_options.append("allow_experimental_database_replicated=1") - client_options.append("enable_qpl_deflate_codec=1") + client_options.append("enable_deflate_qpl_codec=1") # If database name is not specified, new database is created for each functional test. # Run some threads with one database for all tests. diff --git a/tests/integration/test_non_default_compression/configs/enable_deflateqpl_codec.xml b/tests/integration/test_non_default_compression/configs/enable_deflateqpl_codec.xml index 521b0fd663c..24e101e0e3f 100644 --- a/tests/integration/test_non_default_compression/configs/enable_deflateqpl_codec.xml +++ b/tests/integration/test_non_default_compression/configs/enable_deflateqpl_codec.xml @@ -1,7 +1,7 @@ - 1 + 1 diff --git a/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql b/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql index 5b8b73270a2..fd9855e82d3 100644 --- a/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql +++ b/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql @@ -28,7 +28,7 @@ SELECT * FROM alter_compression_codec ORDER BY id; OPTIMIZE TABLE alter_compression_codec FINAL; SELECT * FROM alter_compression_codec ORDER BY id; -SET enable_qpl_deflate_codec = 1; +SET enable_deflate_qpl_codec = 1; ALTER TABLE alter_compression_codec MODIFY COLUMN alter_column CODEC(DEFLATE_QPL); SELECT compression_codec FROM system.columns WHERE database = currentDatabase() AND table = 'alter_compression_codec' AND name = 'alter_column'; diff --git a/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql b/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql index 47ec268bfec..89e77f758a7 100644 --- a/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql +++ b/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql @@ -1,6 +1,6 @@ SET send_logs_level = 'fatal'; SET allow_suspicious_codecs = 1; -SET enable_qpl_deflate_codec = 1; +SET enable_deflate_qpl_codec = 1; DROP TABLE IF EXISTS compression_codec; diff --git a/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql index bcd09277824..a629df2666d 100644 --- a/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql +++ b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql @@ -1,6 +1,6 @@ SET send_logs_level = 'fatal'; SET allow_suspicious_codecs = 1; -SET enable_qpl_deflate_codec = 1; +SET enable_deflate_qpl_codec = 1; -- copy-paste for storage log diff --git a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql index 64e66d47522..5a56fc0d576 100644 --- a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql +++ b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql @@ -1,5 +1,5 @@ SET send_logs_level = 'fatal'; -SET enable_qpl_deflate_codec = 1; +SET enable_deflate_qpl_codec = 1; DROP TABLE IF EXISTS compression_codec; From 3c1b02a37bfa349fbf3af86277c9ad3ae0eadc1c Mon Sep 17 00:00:00 2001 From: Victor Krasnov Date: Fri, 9 Jun 2023 15:43:36 +0300 Subject: [PATCH 292/722] Rectify the existing example of the year omission --- .../sql-reference/functions/type-conversion-functions.md | 8 ++++---- .../sql-reference/functions/type-conversion-functions.md | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index e62cf89a6b2..f1e2785285c 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -1417,15 +1417,15 @@ Result: Query: ``` sql -SELECT parseDateTimeBestEffort('10 20:19'); +SELECT toYear(now()) as year, parseDateTimeBestEffort('10 20:19'); ``` Result: ```response -┌─parseDateTimeBestEffort('10 20:19')─┐ -│ 2000-01-10 20:19:00 │ -└─────────────────────────────────────┘ +┌─year─┬─parseDateTimeBestEffort('10 20:19')─┐ +│ 2023 │ 2023-01-10 20:19:00 │ +└──────┴─────────────────────────────────────┘ ``` **See Also** diff --git a/docs/ru/sql-reference/functions/type-conversion-functions.md b/docs/ru/sql-reference/functions/type-conversion-functions.md index 6de55757b64..b763ee2b3ac 100644 --- a/docs/ru/sql-reference/functions/type-conversion-functions.md +++ b/docs/ru/sql-reference/functions/type-conversion-functions.md @@ -1096,15 +1096,15 @@ AS parseDateTimeBestEffort; Запрос: ``` sql -SELECT parseDateTimeBestEffort('10 20:19'); +SELECT toYear(now()) as year, parseDateTimeBestEffort('10 20:19'); ``` Результат: ``` text -┌─parseDateTimeBestEffort('10 20:19')─┐ -│ 2000-01-10 20:19:00 │ -└─────────────────────────────────────┘ +┌─year─┬─parseDateTimeBestEffort('10 20:19')─┐ +│ 2023 │ 2023-01-10 20:19:00 │ +└──────┴─────────────────────────────────────┘ ``` **Смотрите также** From 47b0c2a862c282b642a81ce0f0287c5059d717dc Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 9 Jun 2023 13:01:36 +0000 Subject: [PATCH 293/722] Make better --- src/Formats/CapnProtoSerializer.cpp | 117 +++++++++++++----- .../0_stateless/02030_capnp_format.reference | 11 ++ .../queries/0_stateless/02030_capnp_format.sh | 13 +- ...case_insensitive_names_matching.reference} | 0 ..._capnp_case_insensitive_names_matching.sh} | 0 5 files changed, 110 insertions(+), 31 deletions(-) rename tests/queries/0_stateless/{02735_capnp_case_insensitive_names_matcing.reference => 02735_capnp_case_insensitive_names_matching.reference} (100%) rename tests/queries/0_stateless/{02735_capnp_case_insensitive_names_matcing.sh => 02735_capnp_case_insensitive_names_matching.sh} (100%) diff --git a/src/Formats/CapnProtoSerializer.cpp b/src/Formats/CapnProtoSerializer.cpp index e99db23bb5e..f51f8c4b933 100644 --- a/src/Formats/CapnProtoSerializer.cpp +++ b/src/Formats/CapnProtoSerializer.cpp @@ -110,7 +110,7 @@ namespace /// Write row as struct field. virtual void writeRow( const ColumnPtr & column, - std::unique_ptr & builder, + std::unique_ptr & builder, /// Maybe unused for simple types, needed to initialize structs and lists. capnp::DynamicStruct::Builder & parent_struct_builder, UInt32 slot_offset, size_t row_num) = 0; @@ -118,7 +118,7 @@ namespace /// Write row as list element. virtual void writeRow( const ColumnPtr & column, - std::unique_ptr & builder, + std::unique_ptr & builder, /// Maybe unused for simple types, needed to initialize structs and lists. capnp::DynamicList::Builder & parent_list_builder, UInt32 array_index, size_t row_num) = 0; @@ -262,54 +262,93 @@ namespace if (!capnp_type.isEnum()) throwCannotConvert(data_type, column_name, capnp_type); - bool to_lower = enum_comparing_mode == FormatSettings::CapnProtoEnumComparingMode::BY_NAMES_CASE_INSENSITIVE; const auto * enum_type = assert_cast *>(data_type.get()); const auto & enum_values = dynamic_cast &>(*enum_type); enum_schema = capnp_type.asEnum(); auto enumerants = enum_schema.getEnumerants(); - constexpr auto max_value = std::is_same_v ? INT8_MAX : INT16_MAX; if (enum_comparing_mode == FormatSettings::CapnProtoEnumComparingMode::BY_VALUES) { - /// In CapnProto Enum fields are numbered sequentially starting from zero. - if (enumerants.size() > max_value) - throw Exception( - ErrorCodes::CAPN_PROTO_BAD_CAST, - "Enum from CapnProto schema contains values that are out of range for Clickhouse enum type {}", - data_type->getName()); - - auto values = enum_values.getSetOfAllValues(); - std::unordered_set capn_enum_values; + auto ch_enum_values = enum_values.getSetOfAllValues(); + std::unordered_set capn_enum_values; for (auto enumerant : enumerants) - capn_enum_values.insert(EnumType(enumerant.getOrdinal())); - if (values != capn_enum_values) - throw Exception(ErrorCodes::CAPN_PROTO_BAD_CAST, "The set of values in Enum from CapnProto schema is different from the set of values in ClickHouse Enum"); + capn_enum_values.insert(enumerant.getOrdinal()); + + /// Check if ClickHouse values is a superset of CapnProto values. + ch_enum_is_superset = true; + /// In CapnProto Enum fields are numbered sequentially starting from zero. + /// Check if max CapnProto value exceeds max ClickHouse value. + constexpr auto max_value = std::is_same_v ? INT8_MAX : INT16_MAX; + if (enumerants.size() > max_value) + { + ch_enum_is_superset = false; + } + else + { + for (auto capnp_value : capn_enum_values) + { + if (!ch_enum_values.contains(static_cast(capnp_value))) + { + ch_enum_is_superset = false; + break; + } + } + } + + /// Check if CapnProto values is a superset of ClickHouse values. + capnp_enum_is_superset = true; + for (auto ch_value : ch_enum_values) + { + /// Capnp doesn't support negative enum values. + if (ch_value < 0 || !capn_enum_values.contains(static_cast(ch_value))) + { + capnp_enum_is_superset = false; + break; + } + } } else { - auto all_values = enum_values.getValues(); - if (all_values.size() != enumerants.size()) - throw Exception( - ErrorCodes::CAPN_PROTO_BAD_CAST, - "The set of names in Enum from CapnProto schema is different from the set of names in ClickHouse Enum"); + bool to_lower = enum_comparing_mode == FormatSettings::CapnProtoEnumComparingMode::BY_NAMES_CASE_INSENSITIVE; + auto all_values = enum_values.getValues(); std::unordered_map ch_name_to_value; for (auto & [name, value] : all_values) ch_name_to_value[to_lower ? boost::algorithm::to_lower_copy(name) : name] = value; + std::unordered_map capnp_name_to_value; for (auto enumerant : enumerants) { String capnp_name = enumerant.getProto().getName(); - UInt16 capnp_value = enumerant.getOrdinal(); - auto it = ch_name_to_value.find(to_lower ? boost::algorithm::to_lower_copy(capnp_name) : capnp_name); - if (it == ch_name_to_value.end()) - throw Exception( - ErrorCodes::CAPN_PROTO_BAD_CAST, - "The set of names in Enum from CapnProto schema is different from the set of names in ClickHouse Enum"); + capnp_name_to_value[to_lower ? boost::algorithm::to_lower_copy(capnp_name) : capnp_name] = enumerant.getOrdinal(); + } - ch_to_capnp_values[it->second] = capnp_value; + /// Check if ClickHouse names is a superset of CapnProto names. + ch_enum_is_superset = true; + for (auto & [capnp_name, capnp_value] : capnp_name_to_value) + { + auto it = ch_name_to_value.find(capnp_name); + if (it == ch_name_to_value.end()) + { + ch_enum_is_superset = false; + break; + } capnp_to_ch_values[capnp_value] = it->second; } + + /// Check if CapnProto names is a superset of ClickHouse names. + capnp_enum_is_superset = true; + + for (auto & [ch_name, ch_value] : ch_name_to_value) + { + auto it = capnp_name_to_value.find(ch_name); + if (it == capnp_name_to_value.end()) + { + capnp_enum_is_superset = false; + break; + } + ch_to_capnp_values[ch_value] = it->second; + } } } @@ -336,23 +375,43 @@ namespace private: UInt16 getValue(const ColumnPtr & column, size_t row_num) { + if (!capnp_enum_is_superset) + throw Exception(ErrorCodes::CAPN_PROTO_BAD_CAST, "Cannot convert ClickHouse enum to CapnProto enum: CapnProto enum values/names is not a superset of ClickHouse enum values/names"); + EnumType enum_value = assert_cast &>(*column).getElement(row_num); if (enum_comparing_mode == FormatSettings::CapnProtoEnumComparingMode::BY_VALUES) return static_cast(enum_value); - return ch_to_capnp_values[enum_value]; + auto it = ch_to_capnp_values.find(enum_value); + if (it == ch_to_capnp_values.end()) + throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected value {} in ClickHouse enum", enum_value); + + return it->second; } void insertValue(IColumn & column, UInt16 capnp_enum_value) { + if (!ch_enum_is_superset) + throw Exception(ErrorCodes::CAPN_PROTO_BAD_CAST, "Cannot convert CapnProto enum to ClickHouse enum: ClickHouse enum values/names is not a superset of CapnProto enum values/names"); + if (enum_comparing_mode == FormatSettings::CapnProtoEnumComparingMode::BY_VALUES) + { assert_cast &>(column).insertValue(static_cast(capnp_enum_value)); + } else + { + auto it = capnp_to_ch_values.find(capnp_enum_value); + if (it == capnp_to_ch_values.end()) + throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected value {} in CapnProto enum", capnp_enum_value); + assert_cast &>(column).insertValue(capnp_to_ch_values[capnp_enum_value]); + } } DataTypePtr data_type; capnp::EnumSchema enum_schema; const FormatSettings::CapnProtoEnumComparingMode enum_comparing_mode; + bool ch_enum_is_superset; + bool capnp_enum_is_superset; std::unordered_map ch_to_capnp_values; std::unordered_map capnp_to_ch_values; }; diff --git a/tests/queries/0_stateless/02030_capnp_format.reference b/tests/queries/0_stateless/02030_capnp_format.reference index 2b2307bfc6a..e08b1eb1271 100644 --- a/tests/queries/0_stateless/02030_capnp_format.reference +++ b/tests/queries/0_stateless/02030_capnp_format.reference @@ -12,6 +12,9 @@ \N [NULL,NULL,42] (NULL) 1 [1,NULL,2] (1) \N [NULL,NULL,42] (NULL) +OK +OK +OK one two tHrEe @@ -21,6 +24,14 @@ threE first second third +first +second +third +OK +one +two +tHrEe +OK OK OK OK diff --git a/tests/queries/0_stateless/02030_capnp_format.sh b/tests/queries/0_stateless/02030_capnp_format.sh index 625104fb590..b4484ca3766 100755 --- a/tests/queries/0_stateless/02030_capnp_format.sh +++ b/tests/queries/0_stateless/02030_capnp_format.sh @@ -71,16 +71,25 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE capnp_nullable" $CLICKHOUSE_CLIENT --query="SELECT CAST(number, 'Enum(\'one\' = 0, \'two\' = 1, \'tHrEe\' = 2)') AS value FROM numbers(3) FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_enum:Message'" > $CAPN_PROTO_FILE +$CLICKHOUSE_CLIENT --query="SELECT CAST(number % 2, 'Enum(\'one\' = 0, \'two\' = 1, \'tHrEe\' = 2, \'four\' = 4)') AS value FROM numbers(3) FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_enum:Message', format_capn_proto_enum_comparising_mode='by_names'" 2>&1 | grep -F -q "CAPN_PROTO_BAD_CAST" && echo 'OK' || echo 'FAIL'; +$CLICKHOUSE_CLIENT --query="SELECT CAST(number % 2, 'Enum(\'one\' = 0, \'two\' = 1, \'tHrEe\' = 2, \'four\' = 4)') AS value FROM numbers(3) FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_enum:Message', format_capn_proto_enum_comparising_mode='by_names_case_insensitive'" 2>&1 | grep -F -q "CAPN_PROTO_BAD_CAST" && echo 'OK' || echo 'FAIL'; +$CLICKHOUSE_CLIENT --query="SELECT CAST(number % 2, 'Enum(\'one\' = 0, \'two\' = 1, \'tHrEe\' = 2, \'four\' = 4)') AS value FROM numbers(3) FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_enum:Message', format_capn_proto_enum_comparising_mode='by_values'" 2>&1 | grep -F -q "CAPN_PROTO_BAD_CAST" && echo 'OK' || echo 'FAIL'; $CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'value Enum(\'one\' = 1, \'two\' = 2, \'tHrEe\' = 3)') SETTINGS format_schema='$SERVER_SCHEMADIR/02030_capnp_enum:Message', format_capn_proto_enum_comparising_mode='by_names'" $CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'value Enum(\'oNe\' = 1, \'tWo\' = 2, \'threE\' = 3)') SETTINGS format_schema='$SERVER_SCHEMADIR/02030_capnp_enum:Message', format_capn_proto_enum_comparising_mode='by_names_case_insensitive'" $CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'value Enum(\'first\' = 0, \'second\' = 1, \'third\' = 2)') SETTINGS format_schema='$SERVER_SCHEMADIR/02030_capnp_enum:Message', format_capn_proto_enum_comparising_mode='by_values'" - +$CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'value Enum(\'first\' = 0, \'second\' = 1, \'third\' = 2)') SETTINGS format_schema='$SERVER_SCHEMADIR/02030_capnp_enum:Message', format_capn_proto_enum_comparising_mode='by_values'" $CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'value Enum(\'one\' = 0, \'two\' = 1, \'three\' = 2)') SETTINGS format_schema='$SERVER_SCHEMADIR/02030_capnp_enum:Message', format_capn_proto_enum_comparising_mode='by_names'" 2>&1 | grep -F -q "CAPN_PROTO_BAD_CAST" && echo 'OK' || echo 'FAIL'; -$CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'value Enum(\'one\' = 0, \'two\' = 1, \'tHrEe\' = 2, \'four\' = 3)') SETTINGS format_schema='$SERVER_SCHEMADIR/02030_capnp_enum:Message', format_capn_proto_enum_comparising_mode='by_names'" 2>&1 | grep -F -q "CAPN_PROTO_BAD_CAST" && echo 'OK' || echo 'FAIL'; +$CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'value Enum(\'one\' = 0, \'two\' = 1, \'tHrEe\' = 2, \'four\' = 3)') SETTINGS format_schema='$SERVER_SCHEMADIR/02030_capnp_enum:Message', format_capn_proto_enum_comparising_mode='by_names'" $CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'value Enum(\'one\' = 1, \'two\' = 2, \'tHrEe\' = 3)') SETTINGS format_schema='$SERVER_SCHEMADIR/02030_capnp_enum:Message', format_capn_proto_enum_comparising_mode='by_values'" 2>&1 | grep -F -q "CAPN_PROTO_BAD_CAST" && echo 'OK' || echo 'FAIL'; $CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'value Enum(\'first\' = 1, \'two\' = 2, \'three\' = 3)') SETTINGS format_schema='$SERVER_SCHEMADIR/02030_capnp_enum:Message', format_capn_proto_enum_comparising_mode='by_names_case_insensitive'" 2>&1 | grep -F -q "CAPN_PROTO_BAD_CAST" && echo 'OK' || echo 'FAIL'; +$CLICKHOUSE_CLIENT --query="SELECT CAST(number % 2, 'Enum(\'one\' = 0, \'two\' = 1)') AS value FROM numbers(3) FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_enum:Message'" > $CAPN_PROTO_FILE +$CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'value Enum(\'first\' = 0, \'two\' = 1)') SETTINGS format_schema='$SERVER_SCHEMADIR/02030_capnp_enum:Message', format_capn_proto_enum_comparising_mode='by_values'" 2>&1 | grep -F -q "CAPN_PROTO_BAD_CAST" && echo 'OK' || echo 'FAIL'; +$CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'value Enum(\'first\' = 0, \'two\' = 1)') SETTINGS format_schema='$SERVER_SCHEMADIR/02030_capnp_enum:Message', format_capn_proto_enum_comparising_mode='by_names'" 2>&1 | grep -F -q "CAPN_PROTO_BAD_CAST" && echo 'OK' || echo 'FAIL'; +$CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'value Enum(\'first\' = 0, \'two\' = 1)') SETTINGS format_schema='$SERVER_SCHEMADIR/02030_capnp_enum:Message', format_capn_proto_enum_comparising_mode='by_names_case_insensitive'" 2>&1 | grep -F -q "CAPN_PROTO_BAD_CAST" && echo 'OK' || echo 'FAIL'; + + $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS capnp_low_cardinality" $CLICKHOUSE_CLIENT --query="CREATE TABLE capnp_low_cardinality (lc1 LowCardinality(String), lc2 LowCardinality(Nullable(String)), lc3 Array(LowCardinality(Nullable(String)))) ENGINE=Memory" $CLICKHOUSE_CLIENT --query="INSERT INTO capnp_low_cardinality VALUES ('one', 'two', ['one', Null, 'two', Null]), ('two', Null, [Null])" diff --git a/tests/queries/0_stateless/02735_capnp_case_insensitive_names_matcing.reference b/tests/queries/0_stateless/02735_capnp_case_insensitive_names_matching.reference similarity index 100% rename from tests/queries/0_stateless/02735_capnp_case_insensitive_names_matcing.reference rename to tests/queries/0_stateless/02735_capnp_case_insensitive_names_matching.reference diff --git a/tests/queries/0_stateless/02735_capnp_case_insensitive_names_matcing.sh b/tests/queries/0_stateless/02735_capnp_case_insensitive_names_matching.sh similarity index 100% rename from tests/queries/0_stateless/02735_capnp_case_insensitive_names_matcing.sh rename to tests/queries/0_stateless/02735_capnp_case_insensitive_names_matching.sh From 9b70836b6c2b7f90ed04c41f90ad9ba3473dbe59 Mon Sep 17 00:00:00 2001 From: Victor Krasnov Date: Fri, 9 Jun 2023 16:07:07 +0300 Subject: [PATCH 294/722] Add a syslog format example to the documentation --- .../functions/type-conversion-functions.md | 22 +++++++++++++++++++ .../functions/type-conversion-functions.md | 22 +++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index f1e2785285c..9c079bd9515 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -1428,6 +1428,28 @@ Result: └──────┴─────────────────────────────────────┘ ``` +Query: + +``` sql +WITH + now() AS ts_now, + formatDateTime(ts_around, '%b %e %T') AS syslog_arg +SELECT + ts_now, + syslog_arg, + parseDateTimeBestEffort(syslog_arg) +FROM (SELECT arrayJoin([ts_now - 30, ts_now + 30]) AS ts_around); +``` + +Result: + +```response +┌──────────────ts_now─┬─syslog_arg──────┬─parseDateTimeBestEffort(syslog_arg)─┐ +│ 2023-06-09 16:04:30 │ Jun 9 16:04:00 │ 2023-06-09 16:04:00 │ +│ 2023-06-09 16:04:30 │ Jun 9 16:05:00 │ 2022-06-09 16:05:00 │ +└─────────────────────┴─────────────────┴─────────────────────────────────────┘ +``` + **See Also** - [RFC 1123](https://datatracker.ietf.org/doc/html/rfc1123) diff --git a/docs/ru/sql-reference/functions/type-conversion-functions.md b/docs/ru/sql-reference/functions/type-conversion-functions.md index b763ee2b3ac..6e93a5e0acf 100644 --- a/docs/ru/sql-reference/functions/type-conversion-functions.md +++ b/docs/ru/sql-reference/functions/type-conversion-functions.md @@ -1107,6 +1107,28 @@ SELECT toYear(now()) as year, parseDateTimeBestEffort('10 20:19'); └──────┴─────────────────────────────────────┘ ``` +Запрос: + +``` sql +WITH + now() AS ts_now, + formatDateTime(ts_around, '%b %e %T') AS syslog_arg +SELECT + ts_now, + syslog_arg, + parseDateTimeBestEffort(syslog_arg) +FROM (SELECT arrayJoin([ts_now - 30, ts_now + 30]) AS ts_around); +``` + +Результат: + +``` text +┌──────────────ts_now─┬─syslog_arg──────┬─parseDateTimeBestEffort(syslog_arg)─┐ +│ 2023-06-09 16:04:30 │ Jun 9 16:04:00 │ 2023-06-09 16:04:00 │ +│ 2023-06-09 16:04:30 │ Jun 9 16:05:00 │ 2022-06-09 16:05:00 │ +└─────────────────────┴─────────────────┴─────────────────────────────────────┘ +``` + **Смотрите также** - [Информация о формате ISO 8601 от @xkcd](https://xkcd.com/1179/) From 5af06f8c08a70298e217d3a8909a5ed8d412f474 Mon Sep 17 00:00:00 2001 From: Victor Krasnov Date: Fri, 9 Jun 2023 16:11:52 +0300 Subject: [PATCH 295/722] Amend the test infinitesimally --- .../0_stateless/02783_parseDateTimeBestEffort_syslog.reference | 2 +- .../0_stateless/02783_parseDateTimeBestEffort_syslog.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.reference b/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.reference index 63e7e367941..3ec93143e0e 100644 --- a/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.reference +++ b/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.reference @@ -1,5 +1,5 @@ parseDateTimeBestEffort - dt_ref res res_sam res_auc res_null res_null_sam res_null_auc res_zero res_zero_sam res_zero_auc res_us res_us_sam res_us_auc res_us_null res_us_null_sam res_us_null_auc res_us_zero res_us_zero_sam res_us_zero_auc res64 res64_sam res64_auc res64_null res64_null_sam res64_null_auc res64_zero res64_zero_sam res64_zero_auc res64_us res64_us_sam res64_us_auc res64_us_null res64_us_null_sam res64_us_null_auc res64_us_zero res64_us_zero_sam res64_us_zero_auc + around_June_7 res res_sam res_auc res_null res_null_sam res_null_auc res_zero res_zero_sam res_zero_auc res_us res_us_sam res_us_auc res_us_null res_us_null_sam res_us_null_auc res_us_zero res_us_zero_sam res_us_zero_auc res64 res64_sam res64_auc res64_null res64_null_sam res64_null_auc res64_zero res64_zero_sam res64_zero_auc res64_us res64_us_sam res64_us_auc res64_us_null res64_us_null_sam res64_us_null_auc res64_us_zero res64_us_zero_sam res64_us_zero_auc Jun 6 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 Jun 8 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 2022-06-08 00:00:00.000 diff --git a/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.sql b/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.sql index 59211d3e6a0..52975cb5bbf 100644 --- a/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.sql +++ b/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.sql @@ -7,7 +7,7 @@ WITH dateDiff('second', toDateTime(ref_point), ts_now) AS impedimenta, formatDateTime(ts_around, '%b %e %T') AS dt_curr SELECT - formatDateTime(ts_around - impedimenta, '%b %e %H:%i:%s') AS dt_ref, + formatDateTime(ts_around - impedimenta, '%b %e %H:%i:%s') AS around_June_7, parseDateTimeBestEffort(dt_curr) - impedimenta AS res, parseDateTimeBestEffort(dt_curr, 'US/Samoa') - impedimenta AS res_sam, parseDateTimeBestEffort(dt_curr, 'Pacific/Auckland') - impedimenta AS res_auc, From f8791a0ea393120dbfba8eec8627edbc8d00deb8 Mon Sep 17 00:00:00 2001 From: Jordi Villar Date: Fri, 9 Jun 2023 15:36:48 +0200 Subject: [PATCH 296/722] SummingMergeTree support for DateTime64 --- src/DataTypes/DataTypeDateTime64.h | 2 ++ .../02785_summing_merge_tree_datetime64.reference | 1 + .../02785_summing_merge_tree_datetime64.sql | 12 ++++++++++++ 3 files changed, 15 insertions(+) create mode 100644 tests/queries/0_stateless/02785_summing_merge_tree_datetime64.reference create mode 100644 tests/queries/0_stateless/02785_summing_merge_tree_datetime64.sql diff --git a/src/DataTypes/DataTypeDateTime64.h b/src/DataTypes/DataTypeDateTime64.h index aaa99485040..64cedd798d1 100644 --- a/src/DataTypes/DataTypeDateTime64.h +++ b/src/DataTypes/DataTypeDateTime64.h @@ -37,6 +37,8 @@ public: bool canBeUsedAsVersion() const override { return true; } + bool isSummable() const override { return false; } + protected: SerializationPtr doGetDefaultSerialization() const override; }; diff --git a/tests/queries/0_stateless/02785_summing_merge_tree_datetime64.reference b/tests/queries/0_stateless/02785_summing_merge_tree_datetime64.reference new file mode 100644 index 00000000000..d395c4d6a0f --- /dev/null +++ b/tests/queries/0_stateless/02785_summing_merge_tree_datetime64.reference @@ -0,0 +1 @@ +1 2023-05-01 23:55:55.100 15 diff --git a/tests/queries/0_stateless/02785_summing_merge_tree_datetime64.sql b/tests/queries/0_stateless/02785_summing_merge_tree_datetime64.sql new file mode 100644 index 00000000000..1ed930ebbc7 --- /dev/null +++ b/tests/queries/0_stateless/02785_summing_merge_tree_datetime64.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS summing_merge_tree_datetime64; + +CREATE TABLE summing_merge_tree_datetime64 ( `pk` UInt64, `timestamp` DateTime64(3), `value` UInt64 ) +ENGINE = SummingMergeTree() ORDER BY pk; + +INSERT INTO summing_merge_tree_datetime64 SELECT 1 pk, '2023-05-01 23:55:55.100' timestamp, 1 value; +INSERT INTO summing_merge_tree_datetime64 SELECT 1 pk, '2023-05-01 23:55:55.100' timestamp, 2 value; +INSERT INTO summing_merge_tree_datetime64 SELECT 1 pk, '2023-05-01 23:55:55.100' timestamp, 3 value; +INSERT INTO summing_merge_tree_datetime64 SELECT 1 pk, '2023-05-01 23:55:55.100' timestamp, 4 value; +INSERT INTO summing_merge_tree_datetime64 SELECT 1 pk, '2023-05-01 23:55:55.100' timestamp, 5 value; + +SELECT * FROM summing_merge_tree_datetime64 FINAL; From 13798f8b07f2550ec73323ce8596a276155aa367 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Fri, 9 Jun 2023 19:52:49 +0300 Subject: [PATCH 297/722] Update MergeTreeData.cpp --- src/Storages/MergeTree/MergeTreeData.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 9cca471fddb..23351423d49 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1252,6 +1252,10 @@ MergeTreeData::LoadPartResult MergeTreeData::loadDataPart( mark_broken(); return res; } + catch (const Poco::TimeoutException &) + { + throw; + } catch (...) { mark_broken(); From a8b579a85618f57f0fd6316d16d28677dfeb0d8b Mon Sep 17 00:00:00 2001 From: Smita Kulkarni Date: Fri, 9 Jun 2023 19:28:06 +0200 Subject: [PATCH 298/722] Rename azure_blob_storage to azureBlobStorage --- .../table-functions/azure_blob_storage.md | 2 +- .../TableFunctionAzureBlobStorage.cpp | 2 +- .../TableFunctionAzureBlobStorage.h | 2 +- .../test_storage_azure_blob_storage/test.py | 22 +++++++++---------- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/docs/en/sql-reference/table-functions/azure_blob_storage.md b/docs/en/sql-reference/table-functions/azure_blob_storage.md index f86307b3b85..6091aab5f9d 100644 --- a/docs/en/sql-reference/table-functions/azure_blob_storage.md +++ b/docs/en/sql-reference/table-functions/azure_blob_storage.md @@ -5,7 +5,7 @@ sidebar_label: azure_blob_storage keywords: [azure blob storage] --- -# azure\_blob\_storage Table Function +# azureBlobStorage Table Function Provides a table-like interface to select/insert files in [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs). This table function is similar to the [s3 function](../../sql-reference/table-functions/s3.md). diff --git a/src/TableFunctions/TableFunctionAzureBlobStorage.cpp b/src/TableFunctions/TableFunctionAzureBlobStorage.cpp index 38d9362894a..d2a96173491 100644 --- a/src/TableFunctions/TableFunctionAzureBlobStorage.cpp +++ b/src/TableFunctions/TableFunctionAzureBlobStorage.cpp @@ -246,7 +246,7 @@ void registerTableFunctionAzureBlobStorage(TableFunctionFactory & factory) factory.registerFunction( {.documentation = {.description=R"(The table function can be used to read the data stored on Azure Blob Storage.)", - .examples{{"azure_blob_storage", "SELECT * FROM azure_blob_storage(connection, container, blob_path, format, structure)", ""}}}, + .examples{{"azureBlobStorage", "SELECT * FROM azureBlobStorage(connection_string|storage_account_url, container_name, blobpath, [account_name, account_key, format, compression, structure])", ""}}}, .allow_readonly = false}); } diff --git a/src/TableFunctions/TableFunctionAzureBlobStorage.h b/src/TableFunctions/TableFunctionAzureBlobStorage.h index 0bb872de3f3..0ac3f9771c7 100644 --- a/src/TableFunctions/TableFunctionAzureBlobStorage.h +++ b/src/TableFunctions/TableFunctionAzureBlobStorage.h @@ -18,7 +18,7 @@ class Context; class TableFunctionAzureBlobStorage : public ITableFunction { public: - static constexpr auto name = "azure_blob_storage"; + static constexpr auto name = "azureBlobStorage"; static constexpr auto signature = "- connection_string|storage_account_url, container_name, blobpath, [account_name, account_key, format, compression, structure]\n"; static size_t getMaxNumberOfArguments() { return 8; } diff --git a/tests/integration/test_storage_azure_blob_storage/test.py b/tests/integration/test_storage_azure_blob_storage/test.py index f0934d3aa80..f9d337b6d86 100644 --- a/tests/integration/test_storage_azure_blob_storage/test.py +++ b/tests/integration/test_storage_azure_blob_storage/test.py @@ -466,7 +466,7 @@ def test_simple_write_account_string_table_function(cluster): node = cluster.instances["node"] azure_query( node, - "INSERT INTO TABLE FUNCTION azure_blob_storage('http://azurite1:10000/devstoreaccount1', 'cont', 'test_simple_write_tf.csv', 'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV', 'auto', 'key UInt64, data String') VALUES (1, 'a')", + "INSERT INTO TABLE FUNCTION azureBlobStorage('http://azurite1:10000/devstoreaccount1', 'cont', 'test_simple_write_tf.csv', 'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV', 'auto', 'key UInt64, data String') VALUES (1, 'a')", ) print(get_azure_file_content("test_simple_write_tf.csv")) assert get_azure_file_content("test_simple_write_tf.csv") == '1,"a"\n' @@ -476,7 +476,7 @@ def test_simple_write_connection_string_table_function(cluster): node = cluster.instances["node"] azure_query( node, - "INSERT INTO TABLE FUNCTION azure_blob_storage('DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azurite1:10000/devstoreaccount1;', 'cont', 'test_simple_write_connection_tf.csv', 'CSV', 'auto', 'key UInt64, data String') VALUES (1, 'a')", + "INSERT INTO TABLE FUNCTION azureBlobStorage('DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azurite1:10000/devstoreaccount1;', 'cont', 'test_simple_write_connection_tf.csv', 'CSV', 'auto', 'key UInt64, data String') VALUES (1, 'a')", ) print(get_azure_file_content("test_simple_write_connection_tf.csv")) assert get_azure_file_content("test_simple_write_connection_tf.csv") == '1,"a"\n' @@ -486,7 +486,7 @@ def test_simple_write_named_collection_1_table_function(cluster): node = cluster.instances["node"] azure_query( node, - "INSERT INTO TABLE FUNCTION azure_blob_storage(azure_conf1) VALUES (1, 'a')", + "INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf1) VALUES (1, 'a')", ) print(get_azure_file_content("test_simple_write_named.csv")) assert get_azure_file_content("test_simple_write_named.csv") == '1,"a"\n' @@ -507,7 +507,7 @@ def test_simple_write_named_collection_2_table_function(cluster): azure_query( node, - "INSERT INTO TABLE FUNCTION azure_blob_storage(azure_conf2, container='cont', blob_path='test_simple_write_named_2_tf.csv', format='CSV', structure='key UInt64, data String') VALUES (1, 'a')", + "INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, container='cont', blob_path='test_simple_write_named_2_tf.csv', format='CSV', structure='key UInt64, data String') VALUES (1, 'a')", ) print(get_azure_file_content("test_simple_write_named_2_tf.csv")) assert get_azure_file_content("test_simple_write_named_2_tf.csv") == '1,"a"\n' @@ -529,9 +529,9 @@ def test_put_get_with_globs_tf(cluster): azure_query( node, - f"INSERT INTO TABLE FUNCTION azure_blob_storage(azure_conf2, container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values}", + f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values}", ) - query = f"select sum(column1), sum(column2), sum(column3), min(_file), max(_path) from azure_blob_storage(azure_conf2, container='cont', blob_path='{unique_prefix}/*_{{a,b,c,d}}/?.csv', format='CSV', structure='{table_format}')" + query = f"select sum(column1), sum(column2), sum(column3), min(_file), max(_path) from azureBlobStorage(azure_conf2, container='cont', blob_path='{unique_prefix}/*_{{a,b,c,d}}/?.csv', format='CSV', structure='{table_format}')" assert azure_query(node, query).splitlines() == [ "450\t450\t900\t0.csv\t{bucket}/{max_path}".format( bucket="cont", max_path=max_path @@ -543,10 +543,10 @@ def test_schema_inference_no_globs_tf(cluster): node = cluster.instances["node"] # type: ClickHouseInstance table_format = "column1 UInt32, column2 String, column3 UInt32" - query = f"insert into table function azure_blob_storage(azure_conf2, container='cont', blob_path='test_schema_inference_no_globs_tf.csv', format='CSVWithNames', structure='{table_format}') SELECT number, toString(number), number * number FROM numbers(1000)" + query = f"insert into table function azureBlobStorage(azure_conf2, container='cont', blob_path='test_schema_inference_no_globs_tf.csv', format='CSVWithNames', structure='{table_format}') SELECT number, toString(number), number * number FROM numbers(1000)" azure_query(node, query) - query = "select sum(column1), sum(length(column2)), sum(column3), min(_file), max(_path) from azure_blob_storage(azure_conf2, container='cont', blob_path='test_schema_inference_no_globs_tf.csv')" + query = "select sum(column1), sum(length(column2)), sum(column3), min(_file), max(_path) from azureBlobStorage(azure_conf2, container='cont', blob_path='test_schema_inference_no_globs_tf.csv')" assert azure_query(node, query).splitlines() == [ "499500\t2890\t332833500\ttest_schema_inference_no_globs_tf.csv\tcont/test_schema_inference_no_globs_tf.csv" ] @@ -566,10 +566,10 @@ def test_schema_inference_from_globs_tf(cluster): max_path = max(path, max_path) values = f"({i},{j},{i + j})" - query = f"insert into table function azure_blob_storage(azure_conf2, container='cont', blob_path='{path}', format='CSVWithNames', structure='{table_format}') VALUES {values}" + query = f"insert into table function azureBlobStorage(azure_conf2, container='cont', blob_path='{path}', format='CSVWithNames', structure='{table_format}') VALUES {values}" azure_query(node, query) - query = f"select sum(column1), sum(column2), sum(column3), min(_file), max(_path) from azure_blob_storage(azure_conf2, container='cont', blob_path='{unique_prefix}/*_{{a,b,c,d}}/?.csv')" + query = f"select sum(column1), sum(column2), sum(column3), min(_file), max(_path) from azureBlobStorage(azure_conf2, container='cont', blob_path='{unique_prefix}/*_{{a,b,c,d}}/?.csv')" assert azure_query(node, query).splitlines() == [ "450\t450\t900\t0.csv\t{bucket}/{max_path}".format( bucket="cont", max_path=max_path @@ -586,7 +586,7 @@ def test_partition_by_tf(cluster): azure_query( node, - f"INSERT INTO TABLE FUNCTION azure_blob_storage('http://azurite1:10000/devstoreaccount1', 'cont', '{filename}', 'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV', 'auto', '{table_format}') PARTITION BY {partition_by} VALUES {values}", + f"INSERT INTO TABLE FUNCTION azureBlobStorage('http://azurite1:10000/devstoreaccount1', 'cont', '{filename}', 'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV', 'auto', '{table_format}') PARTITION BY {partition_by} VALUES {values}", ) assert "1,2,3\n" == get_azure_file_content("test_tf_3.csv") From e662fa01d0a1455aa2a625fd342ef8e7e998f34f Mon Sep 17 00:00:00 2001 From: Smita Kulkarni Date: Fri, 9 Jun 2023 20:21:57 +0200 Subject: [PATCH 299/722] Added azureBlobStorage to aspell-dict.txt --- utils/check-style/aspell-ignore/en/aspell-dict.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index d6cef1883f4..a01b67b26b1 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -1081,6 +1081,7 @@ avgweighted avro avx aws +azureBlobStorage backend backoff backticks From f0d4ce4770a00a8c0cd9857a485fc8bbc124a95b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 9 Jun 2023 22:05:21 +0300 Subject: [PATCH 300/722] Update 02785_summing_merge_tree_datetime64.sql --- .../queries/0_stateless/02785_summing_merge_tree_datetime64.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/02785_summing_merge_tree_datetime64.sql b/tests/queries/0_stateless/02785_summing_merge_tree_datetime64.sql index 1ed930ebbc7..db00f189330 100644 --- a/tests/queries/0_stateless/02785_summing_merge_tree_datetime64.sql +++ b/tests/queries/0_stateless/02785_summing_merge_tree_datetime64.sql @@ -10,3 +10,4 @@ INSERT INTO summing_merge_tree_datetime64 SELECT 1 pk, '2023-05-01 23:55:55.100' INSERT INTO summing_merge_tree_datetime64 SELECT 1 pk, '2023-05-01 23:55:55.100' timestamp, 5 value; SELECT * FROM summing_merge_tree_datetime64 FINAL; +DROP TABLE summing_merge_tree_datetime64; From c538506f2e3ba0716dcc2f13f63bb4edc1f6f33e Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Fri, 9 Jun 2023 20:50:17 +0000 Subject: [PATCH 301/722] More fixes --- docs/en/sql-reference/statements/create/table.md | 4 ++++ src/Compression/CompressionCodecDeflateQpl.h | 3 +-- src/Compression/CompressionFactoryAdditions.cpp | 2 +- src/Compression/ICompressionCodec.h | 4 ++-- src/Storages/Distributed/DistributedSink.cpp | 2 +- 5 files changed, 9 insertions(+), 6 deletions(-) diff --git a/docs/en/sql-reference/statements/create/table.md b/docs/en/sql-reference/statements/create/table.md index 496ecdbda7b..1a72f89fb1f 100644 --- a/docs/en/sql-reference/statements/create/table.md +++ b/docs/en/sql-reference/statements/create/table.md @@ -385,6 +385,10 @@ High compression levels are useful for asymmetric scenarios, like compress once, - DEFLATE_QPL works best if the system has a Intel® IAA (In-Memory Analytics Accelerator) offloading device. Refer to [Accelerator Configuration](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#accelerator-configuration) and [Benchmark with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Run-Benchmark-with-DEFLATE_QPL) for more details. - DEFLATE_QPL-compressed data can only be transferred between ClickHouse nodes compiled with SSE 4.2 enabled. +:::note +DEFLATE_QPL is not available in ClickHouse Cloud. +::: + ### Specialized Codecs These codecs are designed to make compression more effective by using specific features of data. Some of these codecs do not compress data themself. Instead, they prepare the data for a common purpose codec, which compresses it better than without this preparation. diff --git a/src/Compression/CompressionCodecDeflateQpl.h b/src/Compression/CompressionCodecDeflateQpl.h index 13aa8733b54..8d73568707e 100644 --- a/src/Compression/CompressionCodecDeflateQpl.h +++ b/src/Compression/CompressionCodecDeflateQpl.h @@ -98,8 +98,7 @@ public: protected: bool isCompression() const override { return true; } bool isGenericCompression() const override { return true; } - bool isExperimental() const override { return false; } - bool isDeflateQplCompression() const override { return true; } + bool isDeflateQpl() const override { return true; } UInt32 doCompressData(const char * source, UInt32 source_size, char * dest) const override; void doDecompressData(const char * source, UInt32 source_size, char * dest, UInt32 uncompressed_size) const override; diff --git a/src/Compression/CompressionFactoryAdditions.cpp b/src/Compression/CompressionFactoryAdditions.cpp index 46f7e2653c2..98e9e7480da 100644 --- a/src/Compression/CompressionFactoryAdditions.cpp +++ b/src/Compression/CompressionFactoryAdditions.cpp @@ -159,7 +159,7 @@ ASTPtr CompressionCodecFactory::validateCodecAndGetPreprocessedAST( " You can enable it with the 'allow_experimental_codecs' setting.", codec_family_name); - if (!enable_deflate_qpl_codec && result_codec->isDeflateQplCompression()) + if (!enable_deflate_qpl_codec && result_codec->isDeflateQpl()) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Codec {} is disabled by default." " You can enable it with the 'enable_deflate_qpl_codec' setting.", diff --git a/src/Compression/ICompressionCodec.h b/src/Compression/ICompressionCodec.h index f7e8f4e43d2..6630838fa64 100644 --- a/src/Compression/ICompressionCodec.h +++ b/src/Compression/ICompressionCodec.h @@ -109,8 +109,8 @@ public: /// It will not be allowed to use unless the user will turn off the safety switch. virtual bool isExperimental() const { return false; } - /// This is a knob for Deflate QPL codec. - virtual bool isDeflateQplCompression() const { return false; } + /// Is this the DEFLATE_QPL codec? + virtual bool isDeflateQpl() const { return false; } /// If it does nothing. virtual bool isNone() const { return false; } diff --git a/src/Storages/Distributed/DistributedSink.cpp b/src/Storages/Distributed/DistributedSink.cpp index 1e1c911920e..875764f7633 100644 --- a/src/Storages/Distributed/DistributedSink.cpp +++ b/src/Storages/Distributed/DistributedSink.cpp @@ -733,7 +733,7 @@ void DistributedSink::writeToShard(const Cluster::ShardInfo & shard_info, const if (compression_method == "ZSTD") compression_level = settings.network_zstd_compression_level; - CompressionCodecFactory::instance().validateCodec(compression_method, compression_level, !settings.allow_suspicious_codecs, settings.allow_experimental_codecs, settings.enale_deflate_qpl_codec); + CompressionCodecFactory::instance().validateCodec(compression_method, compression_level, !settings.allow_suspicious_codecs, settings.allow_experimental_codecs, settings.enable_deflate_qpl_codec); CompressionCodecPtr compression_codec = CompressionCodecFactory::instance().get(compression_method, compression_level); /// tmp directory is used to ensure atomicity of transactions From c71edb6c798163ad50a077a19a3bf74eb57ba212 Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky Date: Fri, 9 Jun 2023 17:29:42 +0000 Subject: [PATCH 302/722] Fix style --- src/Processors/Sources/MongoDBSource.cpp | 10 +++++----- src/Processors/Sources/MongoDBSource.h | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/Processors/Sources/MongoDBSource.cpp b/src/Processors/Sources/MongoDBSource.cpp index 74dfa13158c..cd4db416a29 100644 --- a/src/Processors/Sources/MongoDBSource.cpp +++ b/src/Processors/Sources/MongoDBSource.cpp @@ -371,8 +371,8 @@ bool isMongoDBWireProtocolOld(Poco::MongoDB::Connection & connection_) { Poco::MongoDB::Database db("config"); Poco::MongoDB::Document::Ptr doc = db.queryServerHello(connection_); - auto _wireVersion = doc->getInteger("maxWireVersion"); - return _wireVersion < Poco::MongoDB::Database::WireVersion::VER_36; + auto wire_version = doc->getInteger("maxWireVersion"); + return wire_version < Poco::MongoDB::Database::WireVersion::VER_36; } @@ -413,20 +413,20 @@ Poco::MongoDB::Document::Vector MongoDBCursor::nextDocuments(Poco::MongoDB::Conn if (is_wire_protocol_old) { auto response = old_cursor->next(connection); - cursorID_ = response.cursorID(); + cursor_id = response.cursorID(); return std::move(response.documents()); } else { auto response = new_cursor->next(connection); - cursorID_ = new_cursor->cursorID(); + cursor_id = new_cursor->cursorID(); return std::move(response.documents()); } } Int64 MongoDBCursor::cursorID() const { - return cursorID_; + return cursor_id; } diff --git a/src/Processors/Sources/MongoDBSource.h b/src/Processors/Sources/MongoDBSource.h index 2bc5481e20b..0e95d42c028 100644 --- a/src/Processors/Sources/MongoDBSource.h +++ b/src/Processors/Sources/MongoDBSource.h @@ -53,7 +53,7 @@ private: const bool is_wire_protocol_old; std::unique_ptr old_cursor; std::unique_ptr new_cursor; - Int64 cursorID_ = 0; + Int64 cursor_id = 0; }; /// Converts MongoDB Cursor to a stream of Blocks From 96d7b2efc9c0d4f40b919c5036fcfbe7445d10a1 Mon Sep 17 00:00:00 2001 From: tpanetti Date: Fri, 9 Jun 2023 13:50:30 -0700 Subject: [PATCH 303/722] Disable fasttest for MySQL Compatibility Type Conversion and refactor style for DataTypeNumberBase --- src/DataTypes/DataTypeNumberBase.cpp | 22 ------------------- .../02775_show_columns_mysql_compatibility.sh | 1 + 2 files changed, 1 insertion(+), 22 deletions(-) diff --git a/src/DataTypes/DataTypeNumberBase.cpp b/src/DataTypes/DataTypeNumberBase.cpp index e4c0fb96483..4cefc4945c6 100644 --- a/src/DataTypes/DataTypeNumberBase.cpp +++ b/src/DataTypes/DataTypeNumberBase.cpp @@ -15,50 +15,28 @@ template String DataTypeNumberBase::getSQLCompatibleName() const { if constexpr (std::is_same_v) - { return "TINYINT"; - } else if constexpr (std::is_same_v) - { return "SMALLINT"; - } else if constexpr (std::is_same_v) - { return "INTEGER"; - } else if constexpr (std::is_same_v) - { return "BIGINT"; - } else if constexpr (std::is_same_v) - { return "TINYINT UNSIGNED"; - } else if constexpr (std::is_same_v) - { return "SMALLINT UNSIGNED"; - } else if constexpr (std::is_same_v) - { return "INTEGER UNSIGNED"; - } else if constexpr (std::is_same_v) - { return "BIGINT UNSIGNED"; - } else if constexpr (std::is_same_v) - { return "FLOAT"; - } else if constexpr (std::is_same_v) - { return "DOUBLE"; - } /// Unsupported types are converted to TEXT else - { return "TEXT"; - } } template diff --git a/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh b/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh index 938102cb5fc..6a546c47a38 100755 --- a/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh +++ b/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-fasttest CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From 20b66689f3c912757540c4e91589b7ffd6fe3593 Mon Sep 17 00:00:00 2001 From: alesapin Date: Sat, 10 Jun 2023 13:34:51 +0200 Subject: [PATCH 304/722] Fix test --- docker/test/upgrade/run.sh | 6 ------ 1 file changed, 6 deletions(-) diff --git a/docker/test/upgrade/run.sh b/docker/test/upgrade/run.sh index bd0c59a12cd..8353d03fc69 100644 --- a/docker/test/upgrade/run.sh +++ b/docker/test/upgrade/run.sh @@ -59,12 +59,6 @@ install_packages previous_release_package_folder # available for dump via clickhouse-local configure -# local_blob_storage disk type does not exist in older versions -sudo cat /etc/clickhouse-server/config.d/storage_conf.xml \ - | sed "s|local_blob_storage|local|" \ - > /etc/clickhouse-server/config.d/storage_conf.xml.tmp -sudo mv /etc/clickhouse-server/config.d/storage_conf.xml.tmp /etc/clickhouse-server/config.d/storage_conf.xml - # it contains some new settings, but we can safely remove it rm /etc/clickhouse-server/config.d/merge_tree.xml From ff96c4c0d8898c15e1aea876267c65ec8b0c69f0 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sat, 10 Jun 2023 12:09:47 +0000 Subject: [PATCH 305/722] Fix black --- tests/integration/test_non_default_compression/test.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/integration/test_non_default_compression/test.py b/tests/integration/test_non_default_compression/test.py index e1a9c1ae540..18e2eb43813 100644 --- a/tests/integration/test_non_default_compression/test.py +++ b/tests/integration/test_non_default_compression/test.py @@ -50,6 +50,7 @@ node7 = cluster.add_instance( user_configs=["configs/allow_suspicious_codecs.xml"], ) + @pytest.fixture(scope="module") def start_cluster(): try: @@ -252,6 +253,7 @@ def test_uncompressed_cache_plus_zstd_codec(start_cluster): == "10000\n" ) + def test_preconfigured_deflateqpl_codec(start_cluster): node6.query( """ @@ -268,7 +270,7 @@ def test_preconfigured_deflateqpl_codec(start_cluster): ) assert ( node6.query( - "SELECT COUNT(*) FROM compression_codec_multiple_with_key WHERE id % 2 == 0" + "SELECT COUNT(*) FROM compression_codec_multiple_with_key WHERE id % 2 == 0" ) == "2\n" ) @@ -296,9 +298,7 @@ def test_preconfigured_deflateqpl_codec(start_cluster): == "1001\n" ) assert ( - node6.query( - "SELECT SUM(somecolumn) FROM compression_codec_multiple_with_key" - ) + node6.query("SELECT SUM(somecolumn) FROM compression_codec_multiple_with_key") == str(777.777 + 88.88 + 99.99 + 1.0 * 10000) + "\n" ) assert ( From 1f3e923528c25ddab273243fa80a6d0838c568c7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Jun 2023 15:23:41 +0300 Subject: [PATCH 306/722] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index eb08a8ec100..7142ad26e15 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,7 +31,7 @@ * Introduces new keyword `INTO OUTFILE 'file.txt' APPEND`. [#48880](https://github.com/ClickHouse/ClickHouse/pull/48880) ([alekar](https://github.com/alekar)). * Added `system.zookeeper_connection` table that shows information about Keeper connections. [#45245](https://github.com/ClickHouse/ClickHouse/pull/45245) ([mateng915](https://github.com/mateng0915)). * Add new function `generateRandomStructure` that generates random table structure. It can be used in combination with table function `generateRandom`. [#47409](https://github.com/ClickHouse/ClickHouse/pull/47409) ([Kruglov Pavel](https://github.com/Avogar)). -* Allow the use of `CASE` without an `ELSE` branch and extended `transform` to deal with more types. Also fix some issues that made transform() return incorrect results when decimal types were mixed with other numeric types. [#48300](https://github.com/ClickHouse/ClickHouse/pull/48300) ([Salvatore Mesoraca](https://github.com/aiven-sal)). +* Allow the use of `CASE` without an `ELSE` branch and extended `transform` to deal with more types. Also fix some issues that made transform() return incorrect results when decimal types were mixed with other numeric types. [#48300](https://github.com/ClickHouse/ClickHouse/pull/48300) ([Salvatore Mesoraca](https://github.com/aiven-sal)). This closes #2655. This closes #9596. This closes #38666. * Added [server-side encryption using KMS keys](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) with S3 tables, and the `header` setting with S3 disks. Closes [#48723](https://github.com/ClickHouse/ClickHouse/issues/48723). [#48724](https://github.com/ClickHouse/ClickHouse/pull/48724) ([Johann Gan](https://github.com/johanngan)). * Add MemoryTracker for the background tasks (merges and mutation). Introduces `merges_mutations_memory_usage_soft_limit` and `merges_mutations_memory_usage_to_ram_ratio` settings that represent the soft memory limit for merges and mutations. If this limit is reached ClickHouse won't schedule new merge or mutation tasks. Also `MergesMutationsMemoryTracking` metric is introduced to allow observing current memory usage of background tasks. Resubmit [#46089](https://github.com/ClickHouse/ClickHouse/issues/46089). Closes [#48774](https://github.com/ClickHouse/ClickHouse/issues/48774). [#48787](https://github.com/ClickHouse/ClickHouse/pull/48787) ([Dmitry Novik](https://github.com/novikd)). * Function `dotProduct` work for array. [#49050](https://github.com/ClickHouse/ClickHouse/pull/49050) ([FFFFFFFHHHHHHH](https://github.com/FFFFFFFHHHHHHH)). From 818e081162e77045468d8349ad5c438b261b02d5 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Jun 2023 14:30:36 +0200 Subject: [PATCH 307/722] Fill gaps on the dashboard --- programs/server/dashboard.html | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/programs/server/dashboard.html b/programs/server/dashboard.html index 97b35ec97c4..951b7db3aa3 100644 --- a/programs/server/dashboard.html +++ b/programs/server/dashboard.html @@ -449,7 +449,7 @@ let queries = [ FROM system.metric_log WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} GROUP BY t -ORDER BY t` +ORDER BY t WITH FILL STEP {rounding:UInt32}` }, { "title": "CPU Usage (cores)", @@ -457,7 +457,7 @@ ORDER BY t` FROM system.metric_log WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} GROUP BY t -ORDER BY t` +ORDER BY t WITH FILL STEP {rounding:UInt32}` }, { "title": "Queries Running", @@ -465,7 +465,7 @@ ORDER BY t` FROM system.metric_log WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} GROUP BY t -ORDER BY t` +ORDER BY t WITH FILL STEP {rounding:UInt32}` }, { "title": "Merges Running", @@ -473,7 +473,7 @@ ORDER BY t` FROM system.metric_log WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} GROUP BY t -ORDER BY t` +ORDER BY t WITH FILL STEP {rounding:UInt32}` }, { "title": "Selected Bytes/second", @@ -481,7 +481,7 @@ ORDER BY t` FROM system.metric_log WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} GROUP BY t -ORDER BY t` +ORDER BY t WITH FILL STEP {rounding:UInt32}` }, { "title": "IO Wait", @@ -489,7 +489,7 @@ ORDER BY t` FROM system.metric_log WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} GROUP BY t -ORDER BY t` +ORDER BY t WITH FILL STEP {rounding:UInt32}` }, { "title": "CPU Wait", @@ -497,7 +497,7 @@ ORDER BY t` FROM system.metric_log WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} GROUP BY t -ORDER BY t` +ORDER BY t WITH FILL STEP {rounding:UInt32}` }, { "title": "OS CPU Usage (Userspace)", @@ -506,7 +506,7 @@ FROM system.asynchronous_metric_log WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} AND metric = 'OSUserTimeNormalized' GROUP BY t -ORDER BY t` +ORDER BY t WITH FILL STEP {rounding:UInt32}` }, { "title": "OS CPU Usage (Kernel)", @@ -515,7 +515,7 @@ FROM system.asynchronous_metric_log WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} AND metric = 'OSSystemTimeNormalized' GROUP BY t -ORDER BY t` +ORDER BY t WITH FILL STEP {rounding:UInt32}` }, { "title": "Read From Disk", @@ -523,7 +523,7 @@ ORDER BY t` FROM system.metric_log WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} GROUP BY t -ORDER BY t` +ORDER BY t WITH FILL STEP {rounding:UInt32}` }, { "title": "Read From Filesystem", @@ -531,7 +531,7 @@ ORDER BY t` FROM system.metric_log WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} GROUP BY t -ORDER BY t` +ORDER BY t WITH FILL STEP {rounding:UInt32}` }, { "title": "Memory (tracked)", @@ -539,7 +539,7 @@ ORDER BY t` FROM system.metric_log WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} GROUP BY t -ORDER BY t` +ORDER BY t WITH FILL STEP {rounding:UInt32}` }, { "title": "Load Average (15 minutes)", @@ -548,7 +548,7 @@ FROM system.asynchronous_metric_log WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} AND metric = 'LoadAverage15' GROUP BY t -ORDER BY t` +ORDER BY t WITH FILL STEP {rounding:UInt32}` }, { "title": "Selected Rows/second", @@ -556,7 +556,7 @@ ORDER BY t` FROM system.metric_log WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} GROUP BY t -ORDER BY t` +ORDER BY t WITH FILL STEP {rounding:UInt32}` }, { "title": "Inserted Rows/second", @@ -564,7 +564,7 @@ ORDER BY t` FROM system.metric_log WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} GROUP BY t -ORDER BY t` +ORDER BY t WITH FILL STEP {rounding:UInt32}` }, { "title": "Total MergeTree Parts", @@ -573,7 +573,7 @@ FROM system.asynchronous_metric_log WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} AND metric = 'TotalPartsOfMergeTreeTables' GROUP BY t -ORDER BY t` +ORDER BY t WITH FILL STEP {rounding:UInt32}` }, { "title": "Max Parts For Partition", @@ -582,7 +582,7 @@ FROM system.asynchronous_metric_log WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} AND metric = 'MaxPartCountForPartition' GROUP BY t -ORDER BY t` +ORDER BY t WITH FILL STEP {rounding:UInt32}` } ]; From dc4a2fb07d29a4fe5818a42aa6c17be9bd59ccb7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Jun 2023 14:47:24 +0200 Subject: [PATCH 308/722] Add tests for function "transform" --- .../02786_transform_float.reference | 10 +++++ .../0_stateless/02786_transform_float.sql | 3 ++ .../02787_transform_null.reference | 9 +++++ .../0_stateless/02787_transform_null.sql | 40 +++++++++++++++++++ 4 files changed, 62 insertions(+) create mode 100644 tests/queries/0_stateless/02786_transform_float.reference create mode 100644 tests/queries/0_stateless/02786_transform_float.sql create mode 100644 tests/queries/0_stateless/02787_transform_null.reference create mode 100644 tests/queries/0_stateless/02787_transform_null.sql diff --git a/tests/queries/0_stateless/02786_transform_float.reference b/tests/queries/0_stateless/02786_transform_float.reference new file mode 100644 index 00000000000..3fbb2492f2e --- /dev/null +++ b/tests/queries/0_stateless/02786_transform_float.reference @@ -0,0 +1,10 @@ +1 +1 +1 +--- +1 +1 +1 +1 +1 +1 diff --git a/tests/queries/0_stateless/02786_transform_float.sql b/tests/queries/0_stateless/02786_transform_float.sql new file mode 100644 index 00000000000..4229425b084 --- /dev/null +++ b/tests/queries/0_stateless/02786_transform_float.sql @@ -0,0 +1,3 @@ +select transform(number, [1], [toFloat32(1)], toFloat32(1)) from numbers(3); +SELECT '---'; +select transform(number, [3], [toFloat32(1)], toFloat32(1)) from numbers(6); diff --git a/tests/queries/0_stateless/02787_transform_null.reference b/tests/queries/0_stateless/02787_transform_null.reference new file mode 100644 index 00000000000..a650dbbd173 --- /dev/null +++ b/tests/queries/0_stateless/02787_transform_null.reference @@ -0,0 +1,9 @@ +ZERO +ZERO +ONE +ONE +a +a + \N 0 \N 0 \N +1 1 1 \N 1 1 +a \N 3 3 3 3 diff --git a/tests/queries/0_stateless/02787_transform_null.sql b/tests/queries/0_stateless/02787_transform_null.sql new file mode 100644 index 00000000000..64a771f0f4b --- /dev/null +++ b/tests/queries/0_stateless/02787_transform_null.sql @@ -0,0 +1,40 @@ +SELECT transform(0, [0, 1], ['ZERO', 'ONE'], 'DEFAULT') AS result; +SELECT transform(0, [0, 1], ['ZERO', 'ONE'], NULL) AS result; + +SELECT CASE 1 + WHEN 0 THEN 'ZERO' + WHEN 1 THEN 'ONE' + ELSE 'NONE' +END AS result; + +SELECT CASE 1 + WHEN 0 THEN NULL + WHEN 1 THEN 'ONE' + ELSE 'NONE' +END AS result; + +select + case 1 + when 1 then 'a' + else 'b' + end value; + +select + case 1 + when 1 then 'a' + end value; + +SELECT + d, + toInt16OrNull(d), + caseWithExpression(d, 'a', 3, toInt16OrZero(d)) AS case_zero, + caseWithExpression(d, 'a', 3, toInt16OrNull(d)) AS case_null, + if(d = 'a', 3, toInt16OrZero(d)) AS if_zero, + if(d = 'a', 3, toInt16OrNull(d)) AS if_null +FROM +( + SELECT arrayJoin(['', '1', 'a']) AS d +) +ORDER BY + case_zero ASC, + d ASC; From a614aa3b03b365c769f955f22b788181a8cf36ab Mon Sep 17 00:00:00 2001 From: alesapin Date: Sat, 10 Jun 2023 16:06:37 +0200 Subject: [PATCH 309/722] More leftovers --- docker/test/upgrade/run.sh | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docker/test/upgrade/run.sh b/docker/test/upgrade/run.sh index 8353d03fc69..6f7d3999f1d 100644 --- a/docker/test/upgrade/run.sh +++ b/docker/test/upgrade/run.sh @@ -86,11 +86,6 @@ export USE_S3_STORAGE_FOR_MERGE_TREE=1 export ZOOKEEPER_FAULT_INJECTION=0 configure -sudo cat /etc/clickhouse-server/config.d/storage_conf.xml \ - | sed "s|local_blob_storage|local|" \ - > /etc/clickhouse-server/config.d/storage_conf.xml.tmp -sudo mv /etc/clickhouse-server/config.d/storage_conf.xml.tmp /etc/clickhouse-server/config.d/storage_conf.xml - # it contains some new settings, but we can safely remove it rm /etc/clickhouse-server/config.d/merge_tree.xml From cb8c20722b8976fe0bc402498667b02c2585cc02 Mon Sep 17 00:00:00 2001 From: tpanetti Date: Sat, 10 Jun 2023 08:35:51 -0700 Subject: [PATCH 310/722] Rename setting and description for MySQL compatible types This renames the setting for MySQL compatible types from output_format_mysql_types to use_mysql_types_in_show_columns --- src/Core/Settings.h | 2 +- src/Storages/System/StorageSystemColumns.cpp | 4 ++-- .../0_stateless/02775_show_columns_mysql_compatibility.sh | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index b72fc037fbb..d47015ebb39 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -196,7 +196,7 @@ class IColumn; M(Bool, allow_experimental_inverted_index, false, "If it is set to true, allow to use experimental inverted index.", 0) \ \ M(UInt64, mysql_max_rows_to_insert, 65536, "The maximum number of rows in MySQL batch insertion of the MySQL storage engine", 0) \ - M(Bool, output_format_mysql_types, false, "Use MySQL converted types when connected via MySQL compatibility", 0) \ + M(Bool, use_mysql_types_in_show_columns, false, "Use MySQL converted types when connected via MySQL compatibility for show columns query", 0) \ \ M(UInt64, optimize_min_equality_disjunction_chain_length, 3, "The minimum length of the expression `expr = x1 OR ... expr = xN` for optimization ", 0) \ \ diff --git a/src/Storages/System/StorageSystemColumns.cpp b/src/Storages/System/StorageSystemColumns.cpp index 684c35709a4..e4ca6a15138 100644 --- a/src/Storages/System/StorageSystemColumns.cpp +++ b/src/Storages/System/StorageSystemColumns.cpp @@ -75,7 +75,7 @@ public: , columns_mask(std::move(columns_mask_)), max_block_size(max_block_size_) , databases(std::move(databases_)), tables(std::move(tables_)), storages(std::move(storages_)) , client_info_interface(context->getClientInfo().interface) - , use_mysql_types(context->getSettingsRef().output_format_mysql_types) + , use_mysql_types(context->getSettingsRef().use_mysql_types_in_show_columns) , total_tables(tables->size()), access(context->getAccess()) , query_id(context->getCurrentQueryId()), lock_acquire_timeout(context->getSettingsRef().lock_acquire_timeout) { @@ -133,7 +133,7 @@ protected: auto get_type_name = [this](const IDataType& type) -> std::string { - // Check if the output_format_mysql_types setting is enabled and client is connected via MySQL protocol + // Check if the use_mysql_types_in_show_columns setting is enabled and client is connected via MySQL protocol if (use_mysql_types && client_info_interface == DB::ClientInfo::Interface::MYSQL) { return type.getSQLCompatibleName(); diff --git a/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh b/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh index 6a546c47a38..51c9da2a842 100755 --- a/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh +++ b/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh @@ -127,7 +127,7 @@ TEMP_FILE=$(mktemp) cat < $TEMP_FILE SHOW COLUMNS FROM tab; -SET output_format_mysql_types=1; +SET use_mysql_types_in_show_columns=1; SHOW COLUMNS FROM tab; SHOW EXTENDED COLUMNS FROM tab; SHOW FULL COLUMNS FROM tab; From 7cb6f3c72279878db1ad65c5ea9670287cc42d16 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Jun 2023 17:43:42 +0200 Subject: [PATCH 311/722] Rename Cpu to CPU and Cfs to CFS --- src/Common/AsynchronousMetrics.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Common/AsynchronousMetrics.cpp b/src/Common/AsynchronousMetrics.cpp index 6821647a180..531f0b04aa1 100644 --- a/src/Common/AsynchronousMetrics.cpp +++ b/src/Common/AsynchronousMetrics.cpp @@ -962,8 +962,8 @@ void AsynchronousMetrics::update(TimePoint update_time) period = std::stoull(field2); } - new_values["CGroupCpuCfsPeriod"] = { period, "The CFS period of CPU cgroup."}; - new_values["CGroupCpuCfsQuota"] = { quota, "The CFS quota of CPU cgroup. If stated zero, the quota is max."}; + new_values["CGroupCPUCFSPeriod"] = { period, "The CFS period of CPU cgroup."}; + new_values["CGroupCPUCFSQuota"] = { quota, "The CFS quota of CPU cgroup. If stated zero, the quota is max."}; } catch (...) { @@ -982,8 +982,8 @@ void AsynchronousMetrics::update(TimePoint update_time) tryReadText(quota, *cgroupcpu_cfs_quota); tryReadText(period, *cgroupcpu_cfs_period); - new_values["CGroupCpuCfsPeriod"] = { period, "The CFS period of CPU cgroup."}; - new_values["CGroupCpuCfsQuota"] = { quota, "The CFS quota of CPU cgroup. If stated zero, the quota is max."}; + new_values["CGroupCPUCFSPeriod"] = { period, "The CFS period of CPU cgroup."}; + new_values["CGroupCPUCFSQuota"] = { quota, "The CFS quota of CPU cgroup. If stated zero, the quota is max."}; } catch (...) { From e785c6796d8c1694e0f820e15772fa59bf31cbf0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Jun 2023 17:54:46 +0200 Subject: [PATCH 312/722] Replace CGroups CPU metrics to one --- src/Common/AsynchronousMetrics.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/Common/AsynchronousMetrics.cpp b/src/Common/AsynchronousMetrics.cpp index 531f0b04aa1..c610034a6b0 100644 --- a/src/Common/AsynchronousMetrics.cpp +++ b/src/Common/AsynchronousMetrics.cpp @@ -939,7 +939,8 @@ void AsynchronousMetrics::update(TimePoint update_time) if (cgroupcpu_max) { - try { + try + { cgroupcpu_max->rewind(); uint64_t quota = 0; @@ -962,8 +963,7 @@ void AsynchronousMetrics::update(TimePoint update_time) period = std::stoull(field2); } - new_values["CGroupCPUCFSPeriod"] = { period, "The CFS period of CPU cgroup."}; - new_values["CGroupCPUCFSQuota"] = { quota, "The CFS quota of CPU cgroup. If stated zero, the quota is max."}; + new_values["CGroupMaxCPU"] = { static_cast(quota) / period, "The maximum number of CPU cores according to CGroups."}; } catch (...) { @@ -972,7 +972,8 @@ void AsynchronousMetrics::update(TimePoint update_time) } else if (cgroupcpu_cfs_quota && cgroupcpu_cfs_period) { - try { + try + { cgroupcpu_cfs_quota->rewind(); cgroupcpu_cfs_period->rewind(); @@ -982,8 +983,7 @@ void AsynchronousMetrics::update(TimePoint update_time) tryReadText(quota, *cgroupcpu_cfs_quota); tryReadText(period, *cgroupcpu_cfs_period); - new_values["CGroupCPUCFSPeriod"] = { period, "The CFS period of CPU cgroup."}; - new_values["CGroupCPUCFSQuota"] = { quota, "The CFS quota of CPU cgroup. If stated zero, the quota is max."}; + new_values["CGroupMaxCPU"] = { static_cast(quota) / period, "The maximum number of CPU cores according to CGroups."}; } catch (...) { From a91b84e60c1904bdd6fe7bdfd82869ad07e6bb94 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Jun 2023 18:00:47 +0200 Subject: [PATCH 313/722] Slightly better --- src/Common/AsynchronousMetrics.cpp | 116 +++++++++++++++-------------- 1 file changed, 62 insertions(+), 54 deletions(-) diff --git a/src/Common/AsynchronousMetrics.cpp b/src/Common/AsynchronousMetrics.cpp index c610034a6b0..de9800aa896 100644 --- a/src/Common/AsynchronousMetrics.cpp +++ b/src/Common/AsynchronousMetrics.cpp @@ -727,6 +727,68 @@ void AsynchronousMetrics::update(TimePoint update_time) } } + Float64 max_cpu_cgroups = 0; + if (cgroupcpu_max) + { + try + { + cgroupcpu_max->rewind(); + + uint64_t quota = 0; + uint64_t period = 0; + + std::string line; + readText(line, *cgroupcpu_max); + + auto space = line.find(' '); + + if (line.rfind("max", space) == std::string::npos) + { + auto field1 = line.substr(0, space); + quota = std::stoull(field1); + } + + if (space != std::string::npos) + { + auto field2 = line.substr(space + 1); + period = std::stoull(field2); + } + + if (quota > 0 && period > 0) + max_cpu_cgroups = static_cast(quota) / period; + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } + } + else if (cgroupcpu_cfs_quota && cgroupcpu_cfs_period) + { + try + { + cgroupcpu_cfs_quota->rewind(); + cgroupcpu_cfs_period->rewind(); + + uint64_t quota = 0; + uint64_t period = 0; + + tryReadText(quota, *cgroupcpu_cfs_quota); + tryReadText(period, *cgroupcpu_cfs_period); + + if (quota > 0 && period > 0) + max_cpu_cgroups = static_cast(quota) / period; + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } + } + + if (max_cpu_cgroups > 0) + { + new_values["CGroupMaxCPU"] = { max_cpu_cgroups, "The maximum number of CPU cores according to CGroups."}; + } + if (proc_stat) { try @@ -937,60 +999,6 @@ void AsynchronousMetrics::update(TimePoint update_time) } } - if (cgroupcpu_max) - { - try - { - cgroupcpu_max->rewind(); - - uint64_t quota = 0; - uint64_t period = 0; - - std::string line; - readText(line, *cgroupcpu_max); - - auto space = line.find(' '); - - if (line.rfind("max", space) == std::string::npos) - { - auto field1 = line.substr(0, space); - quota = std::stoull(field1); - } - - if (space != std::string::npos) - { - auto field2 = line.substr(space + 1); - period = std::stoull(field2); - } - - new_values["CGroupMaxCPU"] = { static_cast(quota) / period, "The maximum number of CPU cores according to CGroups."}; - } - catch (...) - { - tryLogCurrentException(__PRETTY_FUNCTION__); - } - } - else if (cgroupcpu_cfs_quota && cgroupcpu_cfs_period) - { - try - { - cgroupcpu_cfs_quota->rewind(); - cgroupcpu_cfs_period->rewind(); - - uint64_t quota = 0; - uint64_t period = 0; - - tryReadText(quota, *cgroupcpu_cfs_quota); - tryReadText(period, *cgroupcpu_cfs_period); - - new_values["CGroupMaxCPU"] = { static_cast(quota) / period, "The maximum number of CPU cores according to CGroups."}; - } - catch (...) - { - tryLogCurrentException(__PRETTY_FUNCTION__); - } - } - if (meminfo) { try From ddd2257cf51edf0cf0fb264b1d010e7436d7e94b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Jun 2023 18:03:17 +0200 Subject: [PATCH 314/722] Normalize with respect to CGroups --- src/Common/AsynchronousMetrics.cpp | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/src/Common/AsynchronousMetrics.cpp b/src/Common/AsynchronousMetrics.cpp index de9800aa896..36c87010fa5 100644 --- a/src/Common/AsynchronousMetrics.cpp +++ b/src/Common/AsynchronousMetrics.cpp @@ -933,36 +933,38 @@ void AsynchronousMetrics::update(TimePoint update_time) /// Also write values normalized to 0..1 by diving to the number of CPUs. /// These values are good to be averaged across the cluster of non-uniform servers. - if (num_cpus) + Float64 num_cpus_to_normalize = max_cpu_cgroups > 0 ? max_cpu_cgroups : num_cpus; + + if (num_cpus_to_normalize > 0) { - new_values["OSUserTimeNormalized"] = { delta_values_all_cpus.user * multiplier / num_cpus, + new_values["OSUserTimeNormalized"] = { delta_values_all_cpus.user * multiplier / num_cpus_to_normalize, "The value is similar to `OSUserTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSNiceTimeNormalized"] = { delta_values_all_cpus.nice * multiplier / num_cpus, + new_values["OSNiceTimeNormalized"] = { delta_values_all_cpus.nice * multiplier / num_cpus_to_normalize, "The value is similar to `OSNiceTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSSystemTimeNormalized"] = { delta_values_all_cpus.system * multiplier / num_cpus, + new_values["OSSystemTimeNormalized"] = { delta_values_all_cpus.system * multiplier / num_cpus_to_normalize, "The value is similar to `OSSystemTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSIdleTimeNormalized"] = { delta_values_all_cpus.idle * multiplier / num_cpus, + new_values["OSIdleTimeNormalized"] = { delta_values_all_cpus.idle * multiplier / num_cpus_to_normalize, "The value is similar to `OSIdleTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSIOWaitTimeNormalized"] = { delta_values_all_cpus.iowait * multiplier / num_cpus, + new_values["OSIOWaitTimeNormalized"] = { delta_values_all_cpus.iowait * multiplier / num_cpus_to_normalize, "The value is similar to `OSIOWaitTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSIrqTimeNormalized"] = { delta_values_all_cpus.irq * multiplier / num_cpus, + new_values["OSIrqTimeNormalized"] = { delta_values_all_cpus.irq * multiplier / num_cpus_to_normalize, "The value is similar to `OSIrqTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSSoftIrqTimeNormalized"] = { delta_values_all_cpus.softirq * multiplier / num_cpus, + new_values["OSSoftIrqTimeNormalized"] = { delta_values_all_cpus.softirq * multiplier / num_cpus_to_normalize, "The value is similar to `OSSoftIrqTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSStealTimeNormalized"] = { delta_values_all_cpus.steal * multiplier / num_cpus, + new_values["OSStealTimeNormalized"] = { delta_values_all_cpus.steal * multiplier / num_cpus_to_normalize, "The value is similar to `OSStealTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSGuestTimeNormalized"] = { delta_values_all_cpus.guest * multiplier / num_cpus, + new_values["OSGuestTimeNormalized"] = { delta_values_all_cpus.guest * multiplier / num_cpus_to_normalize, "The value is similar to `OSGuestTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSGuestNiceTimeNormalized"] = { delta_values_all_cpus.guest_nice * multiplier / num_cpus, + new_values["OSGuestNiceTimeNormalized"] = { delta_values_all_cpus.guest_nice * multiplier / num_cpus_to_normalize, "The value is similar to `OSGuestNiceTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; } From 72b9d75a84476d7ee402de8a160f8a22b9ccdb59 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sat, 10 Jun 2023 12:53:02 +0000 Subject: [PATCH 315/722] Add compat setting for non-const timezones SQL function toTimezone() converts a Date or DateTime into another timezone. The problem is that the timezone is part of the Date / DateTime type but not part of the internal representation (value). This led to the fact that toTimeZone() wqith non-const timezones produced wrong and misleading results until #48471 (shipped with v23.4) enforced a const timezone. Unfortunately, this PR also broke existing table definitions with non-const timezones, e.g. in ALIAS expressions. So while #48471 addressed the issue appropriately, it is really backwards-incompatible. This PR adds a setting to toggle the behavior and makes it also part of the compatibility profile. --- src/Core/Settings.h | 1 + src/Core/SettingsChangesHistory.h | 1 + .../FunctionDateOrDateTimeAddInterval.h | 4 +- .../FunctionDateOrDateTimeToDateOrDate32.h | 2 +- ...tionDateOrDateTimeToDateTimeOrDateTime64.h | 2 +- .../FunctionDateOrDateTimeToSomething.h | 4 +- src/Functions/FunctionSnowflake.h | 17 ++++-- src/Functions/FunctionUnixTimestamp64.h | 13 +++-- src/Functions/FunctionsConversion.h | 10 ++-- src/Functions/FunctionsTimeWindow.cpp | 4 +- src/Functions/date_trunc.cpp | 2 +- .../extractTimeZoneFromFunctionArguments.cpp | 5 +- .../extractTimeZoneFromFunctionArguments.h | 10 +++- src/Functions/fromUnixTimestamp64Micro.cpp | 4 +- src/Functions/fromUnixTimestamp64Milli.cpp | 4 +- src/Functions/fromUnixTimestamp64Nano.cpp | 4 +- src/Functions/now.cpp | 12 +++-- src/Functions/now64.cpp | 10 +++- src/Functions/nowInBlock.cpp | 12 +++-- src/Functions/snowflake.cpp | 8 +-- src/Functions/timeSlots.cpp | 4 +- src/Functions/toStartOfInterval.cpp | 4 +- src/Functions/toTimezone.cpp | 11 +++- .../00515_enhanced_time_zones.reference | 7 +++ .../0_stateless/00515_enhanced_time_zones.sql | 54 ++++++++++++++----- 25 files changed, 147 insertions(+), 62 deletions(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 464b9168a4c..d2e6a470a91 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -147,6 +147,7 @@ class IColumn; M(Bool, enable_memory_bound_merging_of_aggregation_results, true, "Enable memory bound merging strategy for aggregation.", 0) \ M(Bool, enable_positional_arguments, true, "Enable positional arguments in ORDER BY, GROUP BY and LIMIT BY", 0) \ M(Bool, enable_extended_results_for_datetime_functions, false, "Enable date functions like toLastDayOfMonth return Date32 results (instead of Date results) for Date32/DateTime64 arguments.", 0) \ + M(Bool, allow_nonconst_timezone_arguments, false, "Allow non-const timezone arguments in certain time-related functions like toTimeZone(), fromUnixTimestamp*(), snowflakeToDateTime*()", 0) \ \ M(Bool, group_by_use_nulls, false, "Treat columns mentioned in ROLLUP, CUBE or GROUPING SETS as Nullable", 0) \ \ diff --git a/src/Core/SettingsChangesHistory.h b/src/Core/SettingsChangesHistory.h index c0f10b13282..9fd45ac16d6 100644 --- a/src/Core/SettingsChangesHistory.h +++ b/src/Core/SettingsChangesHistory.h @@ -85,6 +85,7 @@ static std::map sett {"use_with_fill_by_sorting_prefix", false, true, "Columns preceding WITH FILL columns in ORDER BY clause form sorting prefix. Rows with different values in sorting prefix are filled independently"}, {"output_format_parquet_compliant_nested_types", false, true, "Change an internal field name in output Parquet file schema."}}}, {"23.4", {{"allow_suspicious_indices", true, false, "If true, index can defined with identical expressions"}, + {"allow_nonconst_timezone_arguments", true, false, "Allow non-const timezone arguments in certain time-related functions like toTimeZone(), fromUnixTimestamp*(), snowflakeToDateTime*()."}, {"connect_timeout_with_failover_ms", 50, 1000, "Increase default connect timeout because of async connect"}, {"connect_timeout_with_failover_secure_ms", 100, 1000, "Increase default secure connect timeout because of async connect"}, {"hedged_connection_timeout_ms", 100, 50, "Start new connection in hedged requests after 50 ms instead of 100 to correspond with previous connect timeout"}}}, diff --git a/src/Functions/FunctionDateOrDateTimeAddInterval.h b/src/Functions/FunctionDateOrDateTimeAddInterval.h index 507dc37e266..1546c24d30c 100644 --- a/src/Functions/FunctionDateOrDateTimeAddInterval.h +++ b/src/Functions/FunctionDateOrDateTimeAddInterval.h @@ -679,7 +679,7 @@ public: } else if constexpr (std::is_same_v) { - return std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 2, 0)); + return std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 2, 0, false)); } else if constexpr (std::is_same_v) { @@ -696,7 +696,7 @@ public: return {}; }); - auto timezone = extractTimeZoneNameFromFunctionArguments(arguments, 2, 0); + auto timezone = extractTimeZoneNameFromFunctionArguments(arguments, 2, 0, false); if (const auto* datetime64_type = typeid_cast(arguments[0].type.get())) { const auto from_scale = datetime64_type->getScale(); diff --git a/src/Functions/FunctionDateOrDateTimeToDateOrDate32.h b/src/Functions/FunctionDateOrDateTimeToDateOrDate32.h index 8e006b93b98..6eb3e534b62 100644 --- a/src/Functions/FunctionDateOrDateTimeToDateOrDate32.h +++ b/src/Functions/FunctionDateOrDateTimeToDateOrDate32.h @@ -36,7 +36,7 @@ public: /// If the time zone is specified but empty, throw an exception. /// only validate the time_zone part if the number of arguments is 2. if ((which.isDateTime() || which.isDateTime64()) && arguments.size() == 2 - && extractTimeZoneNameFromFunctionArguments(arguments, 1, 0).empty()) + && extractTimeZoneNameFromFunctionArguments(arguments, 1, 0, false).empty()) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Function {} supports a 2nd argument (optional) that must be a valid time zone", this->getName()); diff --git a/src/Functions/FunctionDateOrDateTimeToDateTimeOrDateTime64.h b/src/Functions/FunctionDateOrDateTimeToDateTimeOrDateTime64.h index 3d1f0f192cf..9f1066fd687 100644 --- a/src/Functions/FunctionDateOrDateTimeToDateTimeOrDateTime64.h +++ b/src/Functions/FunctionDateOrDateTimeToDateTimeOrDateTime64.h @@ -34,7 +34,7 @@ public: WhichDataType which(from_type); - std::string time_zone = extractTimeZoneNameFromFunctionArguments(arguments, 1, 0); + std::string time_zone = extractTimeZoneNameFromFunctionArguments(arguments, 1, 0, false); /// If the time zone is specified but empty, throw an exception. /// only validate the time_zone part if the number of arguments is 2. diff --git a/src/Functions/FunctionDateOrDateTimeToSomething.h b/src/Functions/FunctionDateOrDateTimeToSomething.h index 47433d13e0b..82818cc3d2b 100644 --- a/src/Functions/FunctionDateOrDateTimeToSomething.h +++ b/src/Functions/FunctionDateOrDateTimeToSomething.h @@ -24,7 +24,7 @@ public: /// If the time zone is specified but empty, throw an exception. if constexpr (std::is_same_v) { - std::string time_zone = extractTimeZoneNameFromFunctionArguments(arguments, 1, 0); + std::string time_zone = extractTimeZoneNameFromFunctionArguments(arguments, 1, 0, false); /// only validate the time_zone part if the number of arguments is 2. This is mainly /// to accommodate functions like toStartOfDay(today()), toStartOfDay(yesterday()) etc. if (arguments.size() == 2 && time_zone.empty()) @@ -53,7 +53,7 @@ public: scale = std::max(source_scale, static_cast(9)); } - return std::make_shared(scale, extractTimeZoneNameFromFunctionArguments(arguments, 1, 0)); + return std::make_shared(scale, extractTimeZoneNameFromFunctionArguments(arguments, 1, 0, false)); } else return std::make_shared(); diff --git a/src/Functions/FunctionSnowflake.h b/src/Functions/FunctionSnowflake.h index 998db98890a..ce3a48269b4 100644 --- a/src/Functions/FunctionSnowflake.h +++ b/src/Functions/FunctionSnowflake.h @@ -6,6 +6,7 @@ #include #include #include +#include #include @@ -72,9 +73,13 @@ class FunctionSnowflakeToDateTime : public IFunction { private: const char * name; + const bool allow_nonconst_timezone_arguments; public: - explicit FunctionSnowflakeToDateTime(const char * name_) : name(name_) { } + explicit FunctionSnowflakeToDateTime(const char * name_, ContextPtr context) + : name(name_) + , allow_nonconst_timezone_arguments(context->getSettings().allow_nonconst_timezone_arguments) + {} String getName() const override { return name; } size_t getNumberOfArguments() const override { return 0; } @@ -92,7 +97,7 @@ public: std::string timezone; if (arguments.size() == 2) - timezone = extractTimeZoneNameFromFunctionArguments(arguments, 1, 0); + timezone = extractTimeZoneNameFromFunctionArguments(arguments, 1, 0, allow_nonconst_timezone_arguments); return std::make_shared(timezone); } @@ -162,9 +167,13 @@ class FunctionSnowflakeToDateTime64 : public IFunction { private: const char * name; + const bool allow_nonconst_timezone_arguments; public: - explicit FunctionSnowflakeToDateTime64(const char * name_) : name(name_) { } + explicit FunctionSnowflakeToDateTime64(const char * name_, ContextPtr context) + : name(name_) + , allow_nonconst_timezone_arguments(context->getSettings().allow_nonconst_timezone_arguments) + {} String getName() const override { return name; } size_t getNumberOfArguments() const override { return 0; } @@ -182,7 +191,7 @@ public: std::string timezone; if (arguments.size() == 2) - timezone = extractTimeZoneNameFromFunctionArguments(arguments, 1, 0); + timezone = extractTimeZoneNameFromFunctionArguments(arguments, 1, 0, allow_nonconst_timezone_arguments); return std::make_shared(3, timezone); } diff --git a/src/Functions/FunctionUnixTimestamp64.h b/src/Functions/FunctionUnixTimestamp64.h index 7519e46f9dc..58a23f7266e 100644 --- a/src/Functions/FunctionUnixTimestamp64.h +++ b/src/Functions/FunctionUnixTimestamp64.h @@ -6,6 +6,7 @@ #include #include #include +#include #include @@ -99,11 +100,13 @@ class FunctionFromUnixTimestamp64 : public IFunction private: size_t target_scale; const char * name; + const bool allow_nonconst_timezone_arguments; public: - FunctionFromUnixTimestamp64(size_t target_scale_, const char * name_) - : target_scale(target_scale_), name(name_) - { - } + FunctionFromUnixTimestamp64(size_t target_scale_, const char * name_, ContextPtr context) + : target_scale(target_scale_) + , name(name_) + , allow_nonconst_timezone_arguments(context->getSettings().allow_nonconst_timezone_arguments) + {} String getName() const override { return name; } size_t getNumberOfArguments() const override { return 0; } @@ -121,7 +124,7 @@ public: std::string timezone; if (arguments.size() == 2) - timezone = extractTimeZoneNameFromFunctionArguments(arguments, 1, 0); + timezone = extractTimeZoneNameFromFunctionArguments(arguments, 1, 0, allow_nonconst_timezone_arguments); return std::make_shared(target_scale, timezone); } diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index 940585d6d57..87229b8ad04 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -1796,13 +1796,13 @@ public: if (to_datetime64 || scale != 0) /// toDateTime('xxxx-xx-xx xx:xx:xx', 0) return DateTime return std::make_shared(scale, - extractTimeZoneNameFromFunctionArguments(arguments, timezone_arg_position, 0)); + extractTimeZoneNameFromFunctionArguments(arguments, timezone_arg_position, 0, false)); - return std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, timezone_arg_position, 0)); + return std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, timezone_arg_position, 0, false)); } if constexpr (std::is_same_v) - return std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, timezone_arg_position, 0)); + return std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, timezone_arg_position, 0, false)); else if constexpr (std::is_same_v) throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected branch in code of conversion function: it is a bug."); else @@ -2067,7 +2067,7 @@ public: UInt64 scale = to_datetime64 ? DataTypeDateTime64::default_scale : 0; if (arguments.size() > 1) scale = extractToDecimalScale(arguments[1]); - const auto timezone = extractTimeZoneNameFromFunctionArguments(arguments, 2, 0); + const auto timezone = extractTimeZoneNameFromFunctionArguments(arguments, 2, 0, false); res = scale == 0 ? res = std::make_shared(timezone) : std::make_shared(scale, timezone); } @@ -2117,7 +2117,7 @@ public: } if constexpr (std::is_same_v) - res = std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 1, 0)); + res = std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 1, 0, false)); else if constexpr (std::is_same_v) throw Exception(ErrorCodes::LOGICAL_ERROR, "LOGICAL ERROR: It is a bug."); else if constexpr (to_decimal) diff --git a/src/Functions/FunctionsTimeWindow.cpp b/src/Functions/FunctionsTimeWindow.cpp index 8a57a4da692..231e8b6fa77 100644 --- a/src/Functions/FunctionsTimeWindow.cpp +++ b/src/Functions/FunctionsTimeWindow.cpp @@ -138,7 +138,7 @@ struct TimeWindowImpl if (result_type_is_date) data_type = std::make_shared(); else - data_type = std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 2, 0)); + data_type = std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 2, 0, false)); return std::make_shared(DataTypes{data_type, data_type}); } @@ -322,7 +322,7 @@ struct TimeWindowImpl if (result_type_is_date) data_type = std::make_shared(); else - data_type = std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 3, 0)); + data_type = std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 3, 0, false)); return std::make_shared(DataTypes{data_type, data_type}); } diff --git a/src/Functions/date_trunc.cpp b/src/Functions/date_trunc.cpp index 016b8f4da5e..414512fc4f8 100644 --- a/src/Functions/date_trunc.cpp +++ b/src/Functions/date_trunc.cpp @@ -107,7 +107,7 @@ public: if (result_type_is_date) return std::make_shared(); else - return std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 2, 1)); + return std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 2, 1, false)); } bool useDefaultImplementationForConstants() const override { return true; } diff --git a/src/Functions/extractTimeZoneFromFunctionArguments.cpp b/src/Functions/extractTimeZoneFromFunctionArguments.cpp index 7ed240fdbcf..7168c68c9c9 100644 --- a/src/Functions/extractTimeZoneFromFunctionArguments.cpp +++ b/src/Functions/extractTimeZoneFromFunctionArguments.cpp @@ -30,10 +30,11 @@ std::string extractTimeZoneNameFromColumn(const IColumn * column, const String & } -std::string extractTimeZoneNameFromFunctionArguments(const ColumnsWithTypeAndName & arguments, size_t time_zone_arg_num, size_t datetime_arg_num) +std::string extractTimeZoneNameFromFunctionArguments(const ColumnsWithTypeAndName & arguments, size_t time_zone_arg_num, size_t datetime_arg_num, bool allow_nonconst_timezone_arguments) { /// Explicit time zone may be passed in last argument. - if (arguments.size() == time_zone_arg_num + 1) + if ((arguments.size() == time_zone_arg_num + 1) + && (!allow_nonconst_timezone_arguments || arguments[time_zone_arg_num].column)) { return extractTimeZoneNameFromColumn(arguments[time_zone_arg_num].column.get(), arguments[time_zone_arg_num].name); } diff --git a/src/Functions/extractTimeZoneFromFunctionArguments.h b/src/Functions/extractTimeZoneFromFunctionArguments.h index 858be40def7..3c012c64c49 100644 --- a/src/Functions/extractTimeZoneFromFunctionArguments.h +++ b/src/Functions/extractTimeZoneFromFunctionArguments.h @@ -16,8 +16,16 @@ std::string extractTimeZoneNameFromColumn(const IColumn * column, const String & /// Determine working timezone either from optional argument with time zone name or from time zone in DateTime type of argument. /// Returns empty string if default time zone should be used. +/// +/// Parameter allow_nonconst_timezone_arguments toggles if non-const timezone function arguments are accepted (legacy behavior) or not. The +/// problem with the old behavior is that the timezone is part of the type, and not part of the value. This lead to confusion and unexpected +/// results. +/// - For new functions, set allow_nonconst_timezone_arguments = false. +/// - For existing functions +/// - which disallow non-const timezone arguments anyways (e.g. getArgumentsThatAreAlwaysConstant()), set allow_nonconst_timezone_arguments = false, +/// - which allow non-const timezone arguments, set allow_nonconst_timezone_arguments according to the corresponding setting. std::string extractTimeZoneNameFromFunctionArguments( - const ColumnsWithTypeAndName & arguments, size_t time_zone_arg_num, size_t datetime_arg_num); + const ColumnsWithTypeAndName & arguments, size_t time_zone_arg_num, size_t datetime_arg_num, bool allow_nonconst_timezone_arguments); const DateLUTImpl & extractTimeZoneFromFunctionArguments( const ColumnsWithTypeAndName & arguments, size_t time_zone_arg_num, size_t datetime_arg_num); diff --git a/src/Functions/fromUnixTimestamp64Micro.cpp b/src/Functions/fromUnixTimestamp64Micro.cpp index 70dcbcd1d4b..191e2137a0d 100644 --- a/src/Functions/fromUnixTimestamp64Micro.cpp +++ b/src/Functions/fromUnixTimestamp64Micro.cpp @@ -7,8 +7,8 @@ namespace DB REGISTER_FUNCTION(FromUnixTimestamp64Micro) { factory.registerFunction("fromUnixTimestamp64Micro", - [](ContextPtr){ return std::make_unique( - std::make_shared(6, "fromUnixTimestamp64Micro")); }); + [](ContextPtr context){ return std::make_unique( + std::make_shared(6, "fromUnixTimestamp64Micro", context)); }); } } diff --git a/src/Functions/fromUnixTimestamp64Milli.cpp b/src/Functions/fromUnixTimestamp64Milli.cpp index 532013dfe5f..c6d4fcd30a2 100644 --- a/src/Functions/fromUnixTimestamp64Milli.cpp +++ b/src/Functions/fromUnixTimestamp64Milli.cpp @@ -7,8 +7,8 @@ namespace DB REGISTER_FUNCTION(FromUnixTimestamp64Milli) { factory.registerFunction("fromUnixTimestamp64Milli", - [](ContextPtr){ return std::make_unique( - std::make_shared(3, "fromUnixTimestamp64Milli")); }); + [](ContextPtr context){ return std::make_unique( + std::make_shared(3, "fromUnixTimestamp64Milli", context)); }); } } diff --git a/src/Functions/fromUnixTimestamp64Nano.cpp b/src/Functions/fromUnixTimestamp64Nano.cpp index 96afdda0fa8..2b5a7addbfc 100644 --- a/src/Functions/fromUnixTimestamp64Nano.cpp +++ b/src/Functions/fromUnixTimestamp64Nano.cpp @@ -7,8 +7,8 @@ namespace DB REGISTER_FUNCTION(FromUnixTimestamp64Nano) { factory.registerFunction("fromUnixTimestamp64Nano", - [](ContextPtr){ return std::make_unique( - std::make_shared(9, "fromUnixTimestamp64Nano")); }); + [](ContextPtr context){ return std::make_unique( + std::make_shared(9, "fromUnixTimestamp64Nano", context)); }); } } diff --git a/src/Functions/now.cpp b/src/Functions/now.cpp index 3c3bff1524f..d3a94379a61 100644 --- a/src/Functions/now.cpp +++ b/src/Functions/now.cpp @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB { @@ -87,7 +88,10 @@ public: bool isVariadic() const override { return true; } size_t getNumberOfArguments() const override { return 0; } - static FunctionOverloadResolverPtr create(ContextPtr) { return std::make_unique(); } + static FunctionOverloadResolverPtr create(ContextPtr context) { return std::make_unique(context); } + explicit NowOverloadResolver(ContextPtr context) + : allow_nonconst_timezone_arguments(context->getSettings().allow_nonconst_timezone_arguments) + {} DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override { @@ -102,7 +106,7 @@ public: } if (arguments.size() == 1) { - return std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 0, 0)); + return std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 0, 0, allow_nonconst_timezone_arguments)); } return std::make_shared(); } @@ -121,10 +125,12 @@ public: if (arguments.size() == 1) return std::make_unique( time(nullptr), DataTypes{arguments.front().type}, - std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 0, 0))); + std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 0, 0, allow_nonconst_timezone_arguments))); return std::make_unique(time(nullptr), DataTypes(), std::make_shared()); } +private: + const bool allow_nonconst_timezone_arguments; }; } diff --git a/src/Functions/now64.cpp b/src/Functions/now64.cpp index f29b73061d9..349b8c71145 100644 --- a/src/Functions/now64.cpp +++ b/src/Functions/now64.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include @@ -115,7 +116,10 @@ public: bool isVariadic() const override { return true; } size_t getNumberOfArguments() const override { return 0; } - static FunctionOverloadResolverPtr create(ContextPtr) { return std::make_unique(); } + static FunctionOverloadResolverPtr create(ContextPtr context) { return std::make_unique(context); } + explicit Now64OverloadResolver(ContextPtr context) + : allow_nonconst_timezone_arguments(context->getSettings().allow_nonconst_timezone_arguments) + {} DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override { @@ -137,7 +141,7 @@ public: } if (arguments.size() == 2) { - timezone_name = extractTimeZoneNameFromFunctionArguments(arguments, 1, 0); + timezone_name = extractTimeZoneNameFromFunctionArguments(arguments, 1, 0, allow_nonconst_timezone_arguments); } return std::make_shared(scale, timezone_name); @@ -157,6 +161,8 @@ public: return std::make_unique(nowSubsecond(scale), std::move(arg_types), result_type); } +private: + const bool allow_nonconst_timezone_arguments; }; } diff --git a/src/Functions/nowInBlock.cpp b/src/Functions/nowInBlock.cpp index dfb3ed7c34a..0d5f9c45780 100644 --- a/src/Functions/nowInBlock.cpp +++ b/src/Functions/nowInBlock.cpp @@ -4,6 +4,7 @@ #include #include #include +#include namespace DB @@ -25,10 +26,13 @@ class FunctionNowInBlock : public IFunction { public: static constexpr auto name = "nowInBlock"; - static FunctionPtr create(ContextPtr) + static FunctionPtr create(ContextPtr context) { - return std::make_shared(); + return std::make_shared(context); } + explicit FunctionNowInBlock(ContextPtr context) + : allow_nonconst_timezone_arguments(context->getSettings().allow_nonconst_timezone_arguments) + {} String getName() const override { @@ -68,7 +72,7 @@ public: } if (arguments.size() == 1) { - return std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 0, 0)); + return std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 0, 0, allow_nonconst_timezone_arguments)); } return std::make_shared(); } @@ -77,6 +81,8 @@ public: { return ColumnDateTime::create(input_rows_count, static_cast(time(nullptr))); } +private: + const bool allow_nonconst_timezone_arguments; }; } diff --git a/src/Functions/snowflake.cpp b/src/Functions/snowflake.cpp index 4849d6512ca..ca78945acb9 100644 --- a/src/Functions/snowflake.cpp +++ b/src/Functions/snowflake.cpp @@ -21,14 +21,14 @@ REGISTER_FUNCTION(DateTime64ToSnowflake) REGISTER_FUNCTION(SnowflakeToDateTime) { factory.registerFunction("snowflakeToDateTime", - [](ContextPtr){ return std::make_unique( - std::make_shared("snowflakeToDateTime")); }); + [](ContextPtr context ){ return std::make_unique( + std::make_shared("snowflakeToDateTime", context)); }); } REGISTER_FUNCTION(SnowflakeToDateTime64) { factory.registerFunction("snowflakeToDateTime64", - [](ContextPtr){ return std::make_unique( - std::make_shared("snowflakeToDateTime64")); }); + [](ContextPtr context){ return std::make_unique( + std::make_shared("snowflakeToDateTime64", context)); }); } } diff --git a/src/Functions/timeSlots.cpp b/src/Functions/timeSlots.cpp index 568ab5e5a47..040495ab023 100644 --- a/src/Functions/timeSlots.cpp +++ b/src/Functions/timeSlots.cpp @@ -270,14 +270,14 @@ public: /// Note that there is no explicit time zone argument for this function (we specify 2 as an argument number with explicit time zone). if (WhichDataType(arguments[0].type).isDateTime()) { - return std::make_shared(std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 3, 0))); + return std::make_shared(std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 3, 0, false))); } else { auto start_time_scale = assert_cast(*arguments[0].type).getScale(); auto duration_scale = assert_cast(*arguments[1].type).getScale(); return std::make_shared( - std::make_shared(std::max(start_time_scale, duration_scale), extractTimeZoneNameFromFunctionArguments(arguments, 3, 0))); + std::make_shared(std::max(start_time_scale, duration_scale), extractTimeZoneNameFromFunctionArguments(arguments, 3, 0, false))); } } diff --git a/src/Functions/toStartOfInterval.cpp b/src/Functions/toStartOfInterval.cpp index c0220f1aed2..649242d0d86 100644 --- a/src/Functions/toStartOfInterval.cpp +++ b/src/Functions/toStartOfInterval.cpp @@ -384,7 +384,7 @@ public: if (result_type_is_date) return std::make_shared(); else if (result_type_is_datetime) - return std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 2, 0)); + return std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 2, 0, false)); else { auto scale = 0; @@ -396,7 +396,7 @@ public: else if (interval_type->getKind() == IntervalKind::Millisecond) scale = 3; - return std::make_shared(scale, extractTimeZoneNameFromFunctionArguments(arguments, 2, 0)); + return std::make_shared(scale, extractTimeZoneNameFromFunctionArguments(arguments, 2, 0, false)); } } diff --git a/src/Functions/toTimezone.cpp b/src/Functions/toTimezone.cpp index 0a54e5a86b7..a0d90351898 100644 --- a/src/Functions/toTimezone.cpp +++ b/src/Functions/toTimezone.cpp @@ -5,6 +5,8 @@ #include #include +#include + #include #include @@ -84,7 +86,10 @@ public: String getName() const override { return name; } size_t getNumberOfArguments() const override { return 2; } - static FunctionOverloadResolverPtr create(ContextPtr) { return std::make_unique(); } + static FunctionOverloadResolverPtr create(ContextPtr context) { return std::make_unique(context); } + explicit ToTimeZoneOverloadResolver(ContextPtr context) + : allow_nonconst_timezone_arguments(context->getSettings().allow_nonconst_timezone_arguments) + {} DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override { @@ -98,7 +103,7 @@ public: throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}. " "Should be DateTime or DateTime64", arguments[0].type->getName(), getName()); - String time_zone_name = extractTimeZoneNameFromFunctionArguments(arguments, 1, 0); + String time_zone_name = extractTimeZoneNameFromFunctionArguments(arguments, 1, 0, allow_nonconst_timezone_arguments); if (which_type.isDateTime()) return std::make_shared(time_zone_name); @@ -119,6 +124,8 @@ public: return std::make_unique(is_constant_timezone, data_types, result_type); } +private: + const bool allow_nonconst_timezone_arguments; }; } diff --git a/tests/queries/0_stateless/00515_enhanced_time_zones.reference b/tests/queries/0_stateless/00515_enhanced_time_zones.reference index ad0dae35c45..2ee2c3eac81 100644 --- a/tests/queries/0_stateless/00515_enhanced_time_zones.reference +++ b/tests/queries/0_stateless/00515_enhanced_time_zones.reference @@ -16,3 +16,10 @@ 2017-11-05 08:07:47 2017-11-05 10:37:47 2017-11-05 10:37:47 +-- Test const/non-const timezone arguments -- +Asia/Kolkata 2017-11-05 08:07:47 +42 Asia/Kolkata 1970-01-01 00:00:00.042 +42 Asia/Kolkata 1970-01-01 00:00:00.000042 +42 Asia/Kolkata 1970-01-01 00:00:00.000000042 +42 Asia/Kolkata 2010-11-04 01:42:54 +42 Asia/Kolkata 2010-11-04 01:42:54.657 diff --git a/tests/queries/0_stateless/00515_enhanced_time_zones.sql b/tests/queries/0_stateless/00515_enhanced_time_zones.sql index f719ff70d7a..7659b6e4603 100644 --- a/tests/queries/0_stateless/00515_enhanced_time_zones.sql +++ b/tests/queries/0_stateless/00515_enhanced_time_zones.sql @@ -21,16 +21,46 @@ SELECT toString(toDateTime('2017-11-05 08:07:47', 'Asia/Istanbul')); SELECT toString(toTimeZone(toDateTime('2017-11-05 08:07:47', 'Asia/Istanbul'), 'Asia/Kolkata')); SELECT toString(toDateTime('2017-11-05 08:07:47', 'Asia/Istanbul'), 'Asia/Kolkata'); -SELECT toTimeZone(dt, tz) FROM ( - SELECT toDateTime('2017-11-05 08:07:47', 'Asia/Istanbul') AS dt, arrayJoin(['Asia/Kolkata', 'UTC']) AS tz -); -- { serverError ILLEGAL_COLUMN } -SELECT materialize('Asia/Kolkata') t, toTimeZone(toDateTime('2017-11-05 08:07:47', 'Asia/Istanbul'), t); -- { serverError ILLEGAL_COLUMN } +SELECT '-- Test const/non-const timezone arguments --'; -CREATE TEMPORARY TABLE tmp AS SELECT arrayJoin(['Europe/Istanbul', 'Asia/Istanbul']); -SELECT toTimeZone(now(), (*,).1) FROM tmp; -- { serverError ILLEGAL_COLUMN } -SELECT now((*,).1) FROM tmp; -- { serverError ILLEGAL_COLUMN } -SELECT now64(1, (*,).1) FROM tmp; -- { serverError ILLEGAL_COLUMN } -SELECT toStartOfInterval(now(), INTERVAL 3 HOUR, (*,).1) FROM tmp; -- { serverError ILLEGAL_COLUMN } -SELECT snowflakeToDateTime(toInt64(123), (*,).1) FROM tmp; -- { serverError ILLEGAL_COLUMN } -SELECT toUnixTimestamp(now(), (*,).1) FROM tmp; -- { serverError ILLEGAL_COLUMN } -SELECT toDateTimeOrDefault('2023-04-12 16:43:32', (*,).1, now()) FROM tmp; -- { serverError ILLEGAL_COLUMN } +SELECT materialize('Asia/Kolkata') tz, toTimeZone(toDateTime('2017-11-05 08:07:47', 'Asia/Istanbul'), tz) SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +SELECT materialize('Asia/Kolkata') tz, toTimeZone(toDateTime('2017-11-05 08:07:47', 'Asia/Istanbul'), tz) SETTINGS allow_nonconst_timezone_arguments = 1; + +SELECT materialize('Asia/Kolkata') tz, now(tz) SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +-- SELECT materialize('Asia/Kolkata') tz, now(tz) SETTINGS allow_nonconst_timezone_arguments = 1; + +SELECT materialize('Asia/Kolkata') tz, now64(9, tz) SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +-- SELECT materialize('Asia/Kolkata') tz, now64(9, tz) SETTINGS allow_nonconst_timezone_arguments = 1; + +SELECT materialize('Asia/Kolkata') tz, nowInBlock(tz) SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +-- SELECT materialize('Asia/Kolkata') tz, nowInBlock(tz) SETTINGS allow_nonconst_timezone_arguments = 1; + +SELECT materialize(42::Int64) ts, materialize('Asia/Kolkata') tz, fromUnixTimestamp64Milli(ts, tz) SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +SELECT materialize(42::Int64) ts, materialize('Asia/Kolkata') tz, fromUnixTimestamp64Milli(ts, tz) SETTINGS allow_nonconst_timezone_arguments = 1; + +SELECT materialize(42::Int64) ts, materialize('Asia/Kolkata') tz, fromUnixTimestamp64Micro(ts, tz) SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +SELECT materialize(42::Int64) ts, materialize('Asia/Kolkata') tz, fromUnixTimestamp64Micro(ts, tz) SETTINGS allow_nonconst_timezone_arguments = 1; + +SELECT materialize(42::Int64) ts, materialize('Asia/Kolkata') tz, fromUnixTimestamp64Nano(ts, tz) SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +SELECT materialize(42::Int64) ts, materialize('Asia/Kolkata') tz, fromUnixTimestamp64Nano(ts, tz) SETTINGS allow_nonconst_timezone_arguments = 1; + +SELECT materialize(42::Int64) ts, materialize('Asia/Kolkata') tz, snowflakeToDateTime(ts, tz) settings allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +SELECT materialize(42::Int64) ts, materialize('Asia/Kolkata') tz, snowflakeToDateTime(ts, tz) settings allow_nonconst_timezone_arguments = 1; + +SELECT materialize(42::Int64) ts, materialize('Asia/Kolkata') tz, snowflakeToDateTime64(ts, tz) settings allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +SELECT materialize(42::Int64) ts, materialize('Asia/Kolkata') tz, snowflakeToDateTime64(ts, tz) settings allow_nonconst_timezone_arguments = 1; + +-- test for a related bug: + +DROP TABLE IF EXISTS tab; + +SET allow_nonconst_timezone_arguments = 1; + +CREATE TABLE tab (`country` LowCardinality(FixedString(7)) DEFAULT 'unknown', `city` LowCardinality(String) DEFAULT 'unknown', `region` LowCardinality(String) DEFAULT 'unknown', `continent` LowCardinality(FixedString(7)) DEFAULT 'unknown', `is_eu_country` Bool, `date` DateTime CODEC(DoubleDelta, LZ4), `viewer_date` DateTime ALIAS toTimezone(date, timezone), `device_browser` LowCardinality(String) DEFAULT 'unknown', `metro_code` LowCardinality(String) DEFAULT 'unknown', `domain` String DEFAULT 'unknown', `device_platform` LowCardinality(String) DEFAULT 'unknown', `device_type` LowCardinality(String) DEFAULT 'unknown', `device_vendor` LowCardinality(String) DEFAULT 'unknown', `ip` FixedString(39) DEFAULT 'unknown', `lat` Decimal(8, 6) CODEC(T64), `lng` Decimal(9, 6) CODEC(T64), `asset_id` String DEFAULT 'unknown', `is_personalized` Bool, `metric` String, `origin` String DEFAULT 'unknown', `product_id` UInt64 CODEC(T64), `referer` String DEFAULT 'unknown', `server_side` Int8 CODEC(T64), `third_party_id` String DEFAULT 'unknown', `partner_slug` LowCardinality(FixedString(10)) DEFAULT 'unknown', `user_agent` String DEFAULT 'unknown', `user_id` UUID, `zip` FixedString(10) DEFAULT 'unknown', `timezone` LowCardinality(String), `as_organization` LowCardinality(String) DEFAULT 'unknown', `content_cat` Array(String), `playback_method` LowCardinality(String) DEFAULT 'unknown', `store_id` LowCardinality(String) DEFAULT 'unknown', `store_url` String DEFAULT 'unknown', `timestamp` Nullable(DateTime), `ad_count` Int8 CODEC(T64), `ad_type` LowCardinality(FixedString(10)) DEFAULT 'unknown', `ad_categories` Array(FixedString(8)), `blocked_ad_categories` Array(FixedString(8)), `break_max_ad_length` Int8 CODEC(T64), `break_max_ads` Int8 CODEC(T64), `break_max_duration` Int8 CODEC(T64), `break_min_ad_length` Int8 CODEC(T64), `break_position` LowCardinality(FixedString(18)) DEFAULT 'unknown', `media_playhead` String DEFAULT 'unknown', `placement_type` Int8 CODEC(T64), `transaction_id` String, `universal_ad_id` Array(String), `client_ua` LowCardinality(String) DEFAULT 'unknown', `device_ip` FixedString(39) DEFAULT 'unknown', `device_ua` LowCardinality(String) DEFAULT 'unknown', `ifa` String, `ifa_type` LowCardinality(String) DEFAULT 'unknown', `vast_lat` Decimal(8, 6) CODEC(T64), `vast_long` Decimal(9, 6) CODEC(T64), `server_ua` String DEFAULT 'unknown', `app_bundle` String DEFAULT 'unknown', `page_url` String DEFAULT 'unknown', `api_framework` Array(UInt8), `click_type` LowCardinality(String), `extensions` Array(String), `media_mime` Array(String), `om_id_partner` LowCardinality(String) DEFAULT 'unknown', `player_capabilities` Array(FixedString(12)), `vast_versions` Array(UInt8), `verification_vendors` Array(String), `ad_play_head` String DEFAULT 'unknown', `ad_serving_id` String DEFAULT 'unknown', `asset_uri` String DEFAULT 'unknown', `content_id` String DEFAULT 'unknown', `content_uri` String DEFAULT 'unknown', `inventory_state` Array(FixedString(14)), `player_size` Array(UInt8), `player_state` Array(FixedString(12)), `pod_sequence` Int8 CODEC(T64), `click_position` Array(UInt32), `error_code` Int16 CODEC(T64), `error_reason` Int8 CODEC(T64), `gdpr_consent` String DEFAULT 'unknown', `limited_tracking` Bool, `regulations` String DEFAULT 'unknown', `content_category` Array(String), PROJECTION projection_TPAG_VAST_date (SELECT * ORDER BY toYYYYMMDD(date), metric, product_id, asset_id)) ENGINE = MergeTree ORDER BY (product_id, metric, asset_id, toYYYYMMDD(date)); + +DETACH TABLE tab; + +ATTACH TABLE tab SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +ATTACH TABLE tab SETTINGS allow_nonconst_timezone_arguments = 1; + +DROP TABLE tab; From ffb941624bc971886212e0745716e79688a154a1 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sat, 10 Jun 2023 17:01:44 +0000 Subject: [PATCH 316/722] Exclude some tests with QPL from fasttest --- .../0_stateless/00804_test_alter_compression_codecs.sql | 3 +++ .../0_stateless/00804_test_custom_compression_codecs.sql | 3 +++ .../00804_test_custom_compression_codes_log_storages.sql | 3 +++ .../0_stateless/00804_test_deflate_qpl_codec_compression.sql | 3 +++ 4 files changed, 12 insertions(+) diff --git a/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql b/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql index fd9855e82d3..eb1abda9a21 100644 --- a/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql +++ b/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql @@ -1,3 +1,6 @@ +--Tags: no-fasttest +-- no-fasttest because DEFLATE_QPL isn't available in fasttest + SET send_logs_level = 'fatal'; DROP TABLE IF EXISTS alter_compression_codec; diff --git a/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql b/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql index 89e77f758a7..df74620a201 100644 --- a/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql +++ b/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql @@ -1,3 +1,6 @@ +--Tags: no-fasttest +-- no-fasttest because DEFLATE_QPL isn't available in fasttest + SET send_logs_level = 'fatal'; SET allow_suspicious_codecs = 1; SET enable_deflate_qpl_codec = 1; diff --git a/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql index a629df2666d..67c0074c58f 100644 --- a/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql +++ b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql @@ -1,3 +1,6 @@ +--Tags: no-fasttest +-- no-fasttest because DEFLATE_QPL isn't available in fasttest + SET send_logs_level = 'fatal'; SET allow_suspicious_codecs = 1; SET enable_deflate_qpl_codec = 1; diff --git a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql index 5a56fc0d576..a46272112a9 100644 --- a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql +++ b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql @@ -1,3 +1,6 @@ +--Tags: no-fasttest +-- no-fasttest because DEFLATE_QPL isn't available in fasttest + SET send_logs_level = 'fatal'; SET enable_deflate_qpl_codec = 1; From a3da7c8ebe33977cacf6f67f5b1a75833de9aa64 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Sat, 10 Jun 2023 17:20:29 +0000 Subject: [PATCH 317/722] Merged NuRaft --- contrib/NuRaft | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/NuRaft b/contrib/NuRaft index 8f267da1a91..491eaf592d9 160000 --- a/contrib/NuRaft +++ b/contrib/NuRaft @@ -1 +1 @@ -Subproject commit 8f267da1a91310bd152af755b0178cfd38c646c7 +Subproject commit 491eaf592d950e0e37accbe8b3f217e068c9fecf From 296b11a1aeaa069b56fb2befd3b933c984b20a1b Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sat, 10 Jun 2023 20:56:54 +0000 Subject: [PATCH 318/722] Update version_date.tsv and changelogs after v23.5.2.7-stable --- docker/keeper/Dockerfile | 2 +- docker/server/Dockerfile.alpine | 2 +- docker/server/Dockerfile.ubuntu | 2 +- docs/changelogs/v23.5.2.7-stable.md | 18 ++++++++++++++++++ utils/list-versions/version_date.tsv | 1 + 5 files changed, 22 insertions(+), 3 deletions(-) create mode 100644 docs/changelogs/v23.5.2.7-stable.md diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile index 7190ef4d649..44967af4b32 100644 --- a/docker/keeper/Dockerfile +++ b/docker/keeper/Dockerfile @@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \ esac ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release" -ARG VERSION="23.5.1.3174" +ARG VERSION="23.5.2.7" ARG PACKAGES="clickhouse-keeper" # user/group precreated explicitly with fixed uid/gid on purpose. diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index ca966b16a2d..8ab9bf7b077 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="23.5.1.3174" +ARG VERSION="23.5.2.7" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # user/group precreated explicitly with fixed uid/gid on purpose. diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index c82ac592120..b3b0cfe1510 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -22,7 +22,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list ARG REPO_CHANNEL="stable" ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" -ARG VERSION="23.5.1.3174" +ARG VERSION="23.5.2.7" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # set non-empty deb_location_url url to create a docker image diff --git a/docs/changelogs/v23.5.2.7-stable.md b/docs/changelogs/v23.5.2.7-stable.md new file mode 100644 index 00000000000..2e4931c64e0 --- /dev/null +++ b/docs/changelogs/v23.5.2.7-stable.md @@ -0,0 +1,18 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v23.5.2.7-stable (5751aa1ab9f) FIXME as compared to v23.5.1.3174-stable (2fec796e73e) + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Do not read all the columns from right GLOBAL JOIN table. [#50721](https://github.com/ClickHouse/ClickHouse/pull/50721) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Fix build for aarch64 (temporary disable azure) [#50770](https://github.com/ClickHouse/ClickHouse/pull/50770) ([alesapin](https://github.com/alesapin)). +* Rename azure_blob_storage to azureBlobStorage [#50812](https://github.com/ClickHouse/ClickHouse/pull/50812) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 2aeeb5db35c..4647bcb4af1 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,3 +1,4 @@ +v23.5.2.7-stable 2023-06-10 v23.5.1.3174-stable 2023-06-09 v23.4.2.11-stable 2023-05-02 v23.4.1.1943-stable 2023-04-27 From 78c32a204ce656c278433fed92fb535584b8ee3b Mon Sep 17 00:00:00 2001 From: Smita Kulkarni Date: Sat, 10 Jun 2023 23:00:59 +0200 Subject: [PATCH 319/722] Updated docs for azureBlobStorage Table function & engine --- .../integrations/azureBlobStorage.md | 28 ++++++++ .../table-functions/azureBlobStorage.md | 70 +++++++++++++++++++ .../table-functions/azure_blob_storage.md | 11 --- 3 files changed, 98 insertions(+), 11 deletions(-) create mode 100644 docs/en/engines/table-engines/integrations/azureBlobStorage.md create mode 100644 docs/en/sql-reference/table-functions/azureBlobStorage.md delete mode 100644 docs/en/sql-reference/table-functions/azure_blob_storage.md diff --git a/docs/en/engines/table-engines/integrations/azureBlobStorage.md b/docs/en/engines/table-engines/integrations/azureBlobStorage.md new file mode 100644 index 00000000000..b1c6169592b --- /dev/null +++ b/docs/en/engines/table-engines/integrations/azureBlobStorage.md @@ -0,0 +1,28 @@ +--- +slug: /en/engines/table-engines/integrations/azureBlobStorage +sidebar_position: 7 +sidebar_label: AzureBlobStorage +--- + +# AzureBlobStorage Table Engine + +This engine provides integration with [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs) ecosystem. + +## Create Table {#creating-a-table} + +``` sql +CREATE TABLE azure_blob_storage_table (name String, value UInt32) + ENGINE = AzureBlobStorage(connection_string|storage_account_url, container_name, blobpath, [account_name, account_key, format, compression]) + [PARTITION BY expr] + [SETTINGS ...] +``` + +**Engine parameters** + +- `connection_string|storage_account_url` — connection_string includes account name & key ([Create connection string](https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json&bc=%2Fazure%2Fstorage%2Fblobs%2Fbreadcrumb%2Ftoc.json#configure-a-connection-string-for-an-azure-storage-account)) or you could also provide the storage account url here and account name & account key as separate parameters (see parameters account_name & account_key) +- `container_name` - Container name +- `blobpath` - file path. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. +- `account_name` - if storage_account_url is used, then account name can be specified here +- `account_key` - if storage_account_url is used, then account key can be specified here +- `format` — The [format](../../interfaces/formats.md#formats) of the file. +- `compression` — Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. By default, it will autodetect compression by file extension. (same as setting to `auto`). diff --git a/docs/en/sql-reference/table-functions/azureBlobStorage.md b/docs/en/sql-reference/table-functions/azureBlobStorage.md new file mode 100644 index 00000000000..f8a9016bd15 --- /dev/null +++ b/docs/en/sql-reference/table-functions/azureBlobStorage.md @@ -0,0 +1,70 @@ +--- +slug: /en/sql-reference/table-functions/azure_blob_storage +sidebar_position: 45 +sidebar_label: azure_blob_storage +keywords: [azure blob storage] +--- + +# azure\_blob\_storage Table Function + +Provides a table-like interface to select/insert files in [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs). This table function is similar to the [s3 function](../../sql-reference/table-functions/s3.md). + +**Syntax** + +``` sql +azureBlobStorage(- connection_string|storage_account_url, container_name, blobpath, [account_name, account_key, format, compression, structure]) +``` + +**Arguments** + +- `connection_string|storage_account_url` — connection_string includes account name & key ([Create connection string](https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json&bc=%2Fazure%2Fstorage%2Fblobs%2Fbreadcrumb%2Ftoc.json#configure-a-connection-string-for-an-azure-storage-account)) or you could also provide the storage account url here and account name & account key as separate parameters (see parameters account_name & account_key) +- `container_name` - Container name +- `blobpath` - file path. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. +- `account_name` - if storage_account_url is used, then account name can be specified here +- `account_key` - if storage_account_url is used, then account key can be specified here +- `format` — The [format](../../interfaces/formats.md#formats) of the file. +- `compression` — Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. By default, it will autodetect compression by file extension. (same as setting to `auto`). +- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. + +**Returned value** + +A table with the specified structure for reading or writing data in the specified file. + +**Examples** + +Write data into azure blob storage using the following : + +```sql +INSERT INTO TABLE FUNCTION azureBlobStorage('http://azurite1:10000/devstoreaccount1', + 'test_container', 'test_{_partition_id}.csv', 'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', + 'CSV', 'auto', 'column1 UInt32, column2 UInt32, column3 UInt32') PARTITION BY column3 VALUES (1, 2, 3), (3, 2, 1), (78, 43, 3); +``` + +And then it can be read using + +```sql +SELECT * FROM azureBlobStorage('http://azurite1:10000/devstoreaccount1', + 'test_container', 'test_1.csv', 'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', + 'CSV', 'auto', 'column1 UInt32, column2 UInt32, column3 UInt32'); +``` + +```response +┌───column1─┬────column2─┬───column3─┐ +│ 3 │ 2 │ 1 │ +└───────────┴────────────┴───────────┘ +``` + +or with storage_account_url + +```sql +SELECT count(*) FROM azureBlobStorage('DefaultEndpointsProtocol=https;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;EndPointSuffix=core.windows.net', + 'test_container', 'test_3.csv', 'CSV', 'auto' , 'column1 UInt32, column2 UInt32, column3 UInt32'); +``` + +``` text +┌─count()─┐ +│ 2 │ +└─────────┘ +``` + + \ No newline at end of file diff --git a/docs/en/sql-reference/table-functions/azure_blob_storage.md b/docs/en/sql-reference/table-functions/azure_blob_storage.md deleted file mode 100644 index 6091aab5f9d..00000000000 --- a/docs/en/sql-reference/table-functions/azure_blob_storage.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -slug: /en/sql-reference/table-functions/azure_blob_storage -sidebar_position: 45 -sidebar_label: azure_blob_storage -keywords: [azure blob storage] ---- - -# azureBlobStorage Table Function - -Provides a table-like interface to select/insert files in [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs). This table function is similar to the [s3 function](../../sql-reference/table-functions/s3.md). - From 05d4baf1e7186c902c9e44fe0f16e9cbbc18e5c0 Mon Sep 17 00:00:00 2001 From: DanRoscigno Date: Sat, 10 Jun 2023 18:20:39 -0400 Subject: [PATCH 320/722] edits --- .../integrations/azureBlobStorage.md | 15 +++++++------ .../table-engines/integrations/index.md | 21 +------------------ .../table-functions/azureBlobStorage.md | 5 +++-- 3 files changed, 13 insertions(+), 28 deletions(-) diff --git a/docs/en/engines/table-engines/integrations/azureBlobStorage.md b/docs/en/engines/table-engines/integrations/azureBlobStorage.md index b1c6169592b..82b26e79579 100644 --- a/docs/en/engines/table-engines/integrations/azureBlobStorage.md +++ b/docs/en/engines/table-engines/integrations/azureBlobStorage.md @@ -1,14 +1,13 @@ --- slug: /en/engines/table-engines/integrations/azureBlobStorage -sidebar_position: 7 -sidebar_label: AzureBlobStorage +sidebar_label: Azure Blob Storage --- # AzureBlobStorage Table Engine -This engine provides integration with [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs) ecosystem. +This engine provides an integration with [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs) ecosystem. -## Create Table {#creating-a-table} +## Create Table ``` sql CREATE TABLE azure_blob_storage_table (name String, value UInt32) @@ -17,12 +16,16 @@ CREATE TABLE azure_blob_storage_table (name String, value UInt32) [SETTINGS ...] ``` -**Engine parameters** +### Engine parameters - `connection_string|storage_account_url` — connection_string includes account name & key ([Create connection string](https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json&bc=%2Fazure%2Fstorage%2Fblobs%2Fbreadcrumb%2Ftoc.json#configure-a-connection-string-for-an-azure-storage-account)) or you could also provide the storage account url here and account name & account key as separate parameters (see parameters account_name & account_key) - `container_name` - Container name - `blobpath` - file path. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. - `account_name` - if storage_account_url is used, then account name can be specified here - `account_key` - if storage_account_url is used, then account key can be specified here -- `format` — The [format](../../interfaces/formats.md#formats) of the file. +- `format` — The [format](/docs/en/interfaces/formats.md) of the file. - `compression` — Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. By default, it will autodetect compression by file extension. (same as setting to `auto`). + +## See also + +[Azure Blob Storage Table Function](/docs/en/sql-reference/table-functions/azureBlobStorage.md) diff --git a/docs/en/engines/table-engines/integrations/index.md b/docs/en/engines/table-engines/integrations/index.md index b321a644d32..b9171d9033b 100644 --- a/docs/en/engines/table-engines/integrations/index.md +++ b/docs/en/engines/table-engines/integrations/index.md @@ -6,24 +6,5 @@ sidebar_label: Integrations # Table Engines for Integrations -ClickHouse provides various means for integrating with external systems, including table engines. Like with all other table engines, the configuration is done using `CREATE TABLE` or `ALTER TABLE` queries. Then from a user perspective, the configured integration looks like a normal table, but queries to it are proxied to the external system. This transparent querying is one of the key advantages of this approach over alternative integration methods, like dictionaries or table functions, which require to use custom query methods on each use. +ClickHouse provides various means for integrating with external systems, including table engines. Like with all other table engines, the configuration is done using `CREATE TABLE` or `ALTER TABLE` queries. Then from a user perspective, the configured integration looks like a normal table, but queries to it are proxied to the external system. This transparent querying is one of the key advantages of this approach over alternative integration methods, like dictionaries or table functions, which require the use of custom query methods on each use. -List of supported integrations: - -- [ODBC](../../../engines/table-engines/integrations/odbc.md) -- [JDBC](../../../engines/table-engines/integrations/jdbc.md) -- [MySQL](../../../engines/table-engines/integrations/mysql.md) -- [MongoDB](../../../engines/table-engines/integrations/mongodb.md) -- [HDFS](../../../engines/table-engines/integrations/hdfs.md) -- [S3](../../../engines/table-engines/integrations/s3.md) -- [Kafka](../../../engines/table-engines/integrations/kafka.md) -- [EmbeddedRocksDB](../../../engines/table-engines/integrations/embedded-rocksdb.md) -- [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md) -- [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md) -- [SQLite](../../../engines/table-engines/integrations/sqlite.md) -- [Hive](../../../engines/table-engines/integrations/hive.md) -- [ExternalDistributed](../../../engines/table-engines/integrations/ExternalDistributed.md) -- [MaterializedPostgreSQL](../../../engines/table-engines/integrations/materialized-postgresql.md) -- [NATS](../../../engines/table-engines/integrations/nats.md) -- [DeltaLake](../../../engines/table-engines/integrations/deltalake.md) -- [Hudi](../../../engines/table-engines/integrations/hudi.md) diff --git a/docs/en/sql-reference/table-functions/azureBlobStorage.md b/docs/en/sql-reference/table-functions/azureBlobStorage.md index f8a9016bd15..b79137cb786 100644 --- a/docs/en/sql-reference/table-functions/azureBlobStorage.md +++ b/docs/en/sql-reference/table-functions/azureBlobStorage.md @@ -1,6 +1,5 @@ --- slug: /en/sql-reference/table-functions/azure_blob_storage -sidebar_position: 45 sidebar_label: azure_blob_storage keywords: [azure blob storage] --- @@ -67,4 +66,6 @@ SELECT count(*) FROM azureBlobStorage('DefaultEndpointsProtocol=https;AccountNam └─────────┘ ``` - \ No newline at end of file +**See Also** + +- [AzureBlogStorage Table Engine](/docs/en/engines/table-engines/integrations/azureBlobStorage.md) From 2146c356081fbe1b43da41ad4c739262f1db60c1 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sun, 11 Jun 2023 07:40:39 +0000 Subject: [PATCH 321/722] Fix style --- src/Functions/snowflake.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Functions/snowflake.cpp b/src/Functions/snowflake.cpp index ca78945acb9..c18f1c03332 100644 --- a/src/Functions/snowflake.cpp +++ b/src/Functions/snowflake.cpp @@ -21,7 +21,7 @@ REGISTER_FUNCTION(DateTime64ToSnowflake) REGISTER_FUNCTION(SnowflakeToDateTime) { factory.registerFunction("snowflakeToDateTime", - [](ContextPtr context ){ return std::make_unique( + [](ContextPtr context){ return std::make_unique( std::make_shared("snowflakeToDateTime", context)); }); } REGISTER_FUNCTION(SnowflakeToDateTime64) From e6c2a6d13db4604442f84e8e2cacf6bc617fc42e Mon Sep 17 00:00:00 2001 From: Smita Kulkarni Date: Sun, 11 Jun 2023 09:55:20 +0200 Subject: [PATCH 322/722] Added example for table engine and fixed typos --- .../integrations/azureBlobStorage.md | 20 +++++++++++++++++++ .../table-functions/azureBlobStorage.md | 4 ++-- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/docs/en/engines/table-engines/integrations/azureBlobStorage.md b/docs/en/engines/table-engines/integrations/azureBlobStorage.md index 82b26e79579..b8e621fd513 100644 --- a/docs/en/engines/table-engines/integrations/azureBlobStorage.md +++ b/docs/en/engines/table-engines/integrations/azureBlobStorage.md @@ -26,6 +26,26 @@ CREATE TABLE azure_blob_storage_table (name String, value UInt32) - `format` — The [format](/docs/en/interfaces/formats.md) of the file. - `compression` — Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. By default, it will autodetect compression by file extension. (same as setting to `auto`). +**Example** + +``` sql +CREATE TABLE test_table (key UInt64, data String) + ENGINE = AzureBlobStorage('DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azurite1:10000/devstoreaccount1/;', + 'test_container', 'test_table', 'CSV'); + +INSERT INTO test_table VALUES (1, 'a'), (2, 'b'), (3, 'c'); + +SELECT * FROM test_table; +``` + +```text +┌─key──┬─data──┐ +│ 1 │ a │ +│ 2 │ b │ +│ 3 │ c │ +└──────┴───────┘ +``` + ## See also [Azure Blob Storage Table Function](/docs/en/sql-reference/table-functions/azureBlobStorage.md) diff --git a/docs/en/sql-reference/table-functions/azureBlobStorage.md b/docs/en/sql-reference/table-functions/azureBlobStorage.md index b79137cb786..369bf7a964d 100644 --- a/docs/en/sql-reference/table-functions/azureBlobStorage.md +++ b/docs/en/sql-reference/table-functions/azureBlobStorage.md @@ -53,7 +53,7 @@ SELECT * FROM azureBlobStorage('http://azurite1:10000/devstoreaccount1', └───────────┴────────────┴───────────┘ ``` -or with storage_account_url +or using connection_string ```sql SELECT count(*) FROM azureBlobStorage('DefaultEndpointsProtocol=https;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;EndPointSuffix=core.windows.net', @@ -68,4 +68,4 @@ SELECT count(*) FROM azureBlobStorage('DefaultEndpointsProtocol=https;AccountNam **See Also** -- [AzureBlogStorage Table Engine](/docs/en/engines/table-engines/integrations/azureBlobStorage.md) +- [AzureBlobStorage Table Engine](/docs/en/engines/table-engines/integrations/azureBlobStorage.md) From e9d539f4bd72d94cef27ed7f1a8a34cd6fa08322 Mon Sep 17 00:00:00 2001 From: Smita Kulkarni Date: Sun, 11 Jun 2023 10:05:52 +0200 Subject: [PATCH 323/722] Updated changelog with azureBlobStorage table function & engine entry --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7142ad26e15..72372c8fac4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ * Setting `enable_memory_bound_merging_of_aggregation_results` is enabled by default. If you update from version prior to 22.12, we recommend to set this flag to `false` until update is finished. [#50319](https://github.com/ClickHouse/ClickHouse/pull/50319) ([Nikita Taranov](https://github.com/nickitat)). #### New Feature +* Added storage engine AzureBlobStorage and azureBlobStorage table function. The supported set of features is very similar to storage/table function S3 [#50604] (https://github.com/ClickHouse/ClickHouse/pull/50604) ([alesapin](https://github.com/alesapin)) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni). * Added native ClickHouse Keeper CLI Client, it is available as `clickhouse keeper-client` [#47414](https://github.com/ClickHouse/ClickHouse/pull/47414) ([pufit](https://github.com/pufit)). * Add `urlCluster` table function. Refactor all *Cluster table functions to reduce code duplication. Make schema inference work for all possible *Cluster function signatures and for named collections. Closes [#38499](https://github.com/ClickHouse/ClickHouse/issues/38499). [#45427](https://github.com/ClickHouse/ClickHouse/pull/45427) ([attack204](https://github.com/attack204)), Pavel Kruglov. * The query cache can now be used for production workloads. [#47977](https://github.com/ClickHouse/ClickHouse/pull/47977) ([Robert Schulze](https://github.com/rschu1ze)). The query cache can now support queries with totals and extremes modifier. [#48853](https://github.com/ClickHouse/ClickHouse/pull/48853) ([Robert Schulze](https://github.com/rschu1ze)). Make `allow_experimental_query_cache` setting as obsolete for backward-compatibility. It was removed in https://github.com/ClickHouse/ClickHouse/pull/47977. [#49934](https://github.com/ClickHouse/ClickHouse/pull/49934) ([Timur Solodovnikov](https://github.com/tsolodov)). From b4bc28b6de86c569bcdfe8d2de0e21ce1717a8c7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 11 Jun 2023 16:48:29 +0300 Subject: [PATCH 324/722] Update easy_tasks_sorted_ru.md --- tests/instructions/easy_tasks_sorted_ru.md | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/tests/instructions/easy_tasks_sorted_ru.md b/tests/instructions/easy_tasks_sorted_ru.md index a98a5766ffe..09ea48d0bd9 100644 --- a/tests/instructions/easy_tasks_sorted_ru.md +++ b/tests/instructions/easy_tasks_sorted_ru.md @@ -6,7 +6,7 @@ Делаем `chmod 000 /etc/clickhouse-client/config.xml` и смотрим, что получится. -## Уменьшать max_memory_usage, если на сервере мало оперативки. +## + Уменьшать max_memory_usage, если на сервере мало оперативки. Смотрим, сколько на сервере оперативки. Если `max_memory_usage`, `max_memory_usage_for_all_queries` ограничены, но больше 90% (настройка) от имеющейся оперативки, то уменьшать их и выводить предупреждение в лог.. @@ -42,10 +42,12 @@ void memoryBitAnd(const char * a, const char * b, char * result, size_t size); В ClickHouse есть возможность указать collation для сортировки строк. Это не работает для `Nullable(String)`. -## Запретить чтение значений типа AggregateFunction по-умолчанию и добавить настройку. +## + Запретить чтение значений типа AggregateFunction по-умолчанию и добавить настройку. Состояния агрегатных функций могут быть записаны в дамп и считаны из него. Но десериализация состояний агрегатных функций небезопасна. Аккуратно выбранные пользовательские данные могут привести к segfault или порче памяти. Поэтому нужно просто сделать настройку, которая запрещает читать AggregateFunction из пользовательских данных. +Upd: сделали по-другому: теперь всё безопасно. + ## + В статистику jemalloc добавить информацию по arenas. В `system.asynchronous_metrics` - суммарный размер арен. @@ -56,9 +58,9 @@ void memoryBitAnd(const char * a, const char * b, char * result, size_t size); Как cache, но без кэша — всегда прямой запрос в источник. -## Функции randomFixedString, randomBinaryString, fuzzBits, fuzzBytes. +## + Функции randomFixedString, randomBinaryString, fuzzBits, fuzzBytes. -## Агрегатные функции для статистических тестов (e.g. тест нормальности распределения) и статистик. +## + Агрегатные функции для статистических тестов (e.g. тест нормальности распределения) и статистик. ## + Функции создания и обновления состояния агрегатной функции по одному кортежу аргументов. @@ -119,11 +121,11 @@ position с конца строки. Добавляем счётчики всех ошибок (ErrorCodes) по аналогии с ProfileEvents. Кроме количества запоминаем также время последней ошибки, стек трейс, сообщение. Добавляем системную таблицу system.errors. Отправка в Graphite. -## Добавить Lizard, LZSSE и density в качестве вариантов алгоритмов сжатия. +## + Добавить Lizard, LZSSE и density в качестве вариантов алгоритмов сжатия. Экспериментальные алгоритмы сжатия. Сейчас ClickHouse поддерживает только lz4 и zstd. -## Запрос CREATE OR REPLACE TABLE +## + Запрос CREATE OR REPLACE TABLE Атомарно удаляет таблицу перед созданием новой, если такая была. @@ -149,12 +151,16 @@ https://clickhouse.com/docs/en/query_language/create/#create-table Запретить модификацию данных в партиции. На партицию ставится флаг, что она заблокирована. В неё нельзя делать INSERT и ALTER. С файлов снимается доступ на запись. +Upd: не нужно. + ## Настройка join_use_nulls: поддержка для LEFT ARRAY JOIN. -## Внешние словари из Aerospike/Couchbase/Cassandra (на выбор). +## + Внешние словари из Aerospike/Couchbase/Cassandra (на выбор). Подключить одну из key-value БД как источник. +Upd: сделали Redis, Cassandra, MongoDB. + ## + Движок таблиц Mongo, табличная функция mongo. Возможность легко импортировать данные из MongoDB. @@ -181,7 +187,7 @@ https://clickhouse.com/docs/en/operations/table_engines/external_data/ Не работает, если открыть clickhouse-client в интерактивном режиме и делать несколько запросов. -## Настройка для возможности получить частичный результат при cancel-е. +## + Настройка для возможности получить частичный результат при cancel-е. Хотим по Ctrl+C получить те данные, которые успели обработаться. From 6bdbcd3f436f7f55e0ab71e24d0c96df072d0003 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sun, 11 Jun 2023 14:26:13 +0000 Subject: [PATCH 325/722] Stabilize tests --- .../00515_enhanced_time_zones.reference | 26 ++++++++--- .../0_stateless/00515_enhanced_time_zones.sql | 45 ++++++++++--------- 2 files changed, 44 insertions(+), 27 deletions(-) diff --git a/tests/queries/0_stateless/00515_enhanced_time_zones.reference b/tests/queries/0_stateless/00515_enhanced_time_zones.reference index 2ee2c3eac81..2555c885558 100644 --- a/tests/queries/0_stateless/00515_enhanced_time_zones.reference +++ b/tests/queries/0_stateless/00515_enhanced_time_zones.reference @@ -16,10 +16,22 @@ 2017-11-05 08:07:47 2017-11-05 10:37:47 2017-11-05 10:37:47 --- Test const/non-const timezone arguments -- -Asia/Kolkata 2017-11-05 08:07:47 -42 Asia/Kolkata 1970-01-01 00:00:00.042 -42 Asia/Kolkata 1970-01-01 00:00:00.000042 -42 Asia/Kolkata 1970-01-01 00:00:00.000000042 -42 Asia/Kolkata 2010-11-04 01:42:54 -42 Asia/Kolkata 2010-11-04 01:42:54.657 +-- Test const timezone arguments -- +42 +43 +42 +43 +42 +43 +42 +43 +42 +43 +42 +43 +42 +43 +42 +43 +42 +43 diff --git a/tests/queries/0_stateless/00515_enhanced_time_zones.sql b/tests/queries/0_stateless/00515_enhanced_time_zones.sql index 7659b6e4603..5f40cfb53c1 100644 --- a/tests/queries/0_stateless/00515_enhanced_time_zones.sql +++ b/tests/queries/0_stateless/00515_enhanced_time_zones.sql @@ -21,38 +21,43 @@ SELECT toString(toDateTime('2017-11-05 08:07:47', 'Asia/Istanbul')); SELECT toString(toTimeZone(toDateTime('2017-11-05 08:07:47', 'Asia/Istanbul'), 'Asia/Kolkata')); SELECT toString(toDateTime('2017-11-05 08:07:47', 'Asia/Istanbul'), 'Asia/Kolkata'); -SELECT '-- Test const/non-const timezone arguments --'; +SELECT '-- Test const timezone arguments --'; -SELECT materialize('Asia/Kolkata') tz, toTimeZone(toDateTime('2017-11-05 08:07:47', 'Asia/Istanbul'), tz) SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } -SELECT materialize('Asia/Kolkata') tz, toTimeZone(toDateTime('2017-11-05 08:07:47', 'Asia/Istanbul'), tz) SETTINGS allow_nonconst_timezone_arguments = 1; +DROP TABLE IF EXISTS tab; -SELECT materialize('Asia/Kolkata') tz, now(tz) SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } --- SELECT materialize('Asia/Kolkata') tz, now(tz) SETTINGS allow_nonconst_timezone_arguments = 1; +CREATE TABLE tab (val Int64, tz String) engine=Log; +INSERT INTO tab VALUES (42, 'Asia/Singapore') (43, 'Asia/Tokyo'); -SELECT materialize('Asia/Kolkata') tz, now64(9, tz) SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } --- SELECT materialize('Asia/Kolkata') tz, now64(9, tz) SETTINGS allow_nonconst_timezone_arguments = 1; +SELECT val FROM tab WHERE now(tz) != toDateTime('2000-01-01 00:00:00') ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +SELECT val FROM tab WHERE now(tz) != toDateTime('2000-01-01 00:00:00') ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 1; -SELECT materialize('Asia/Kolkata') tz, nowInBlock(tz) SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } --- SELECT materialize('Asia/Kolkata') tz, nowInBlock(tz) SETTINGS allow_nonconst_timezone_arguments = 1; +SELECT val FROM tab WHERE now64(9, tz) != toDateTime64('2000-01-01 00:00:00', 6) ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +SELECT val FROM tab WHERE now64(9, tz) != toDateTime64('2000-01-01 00:00:00', 6) ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 1; -SELECT materialize(42::Int64) ts, materialize('Asia/Kolkata') tz, fromUnixTimestamp64Milli(ts, tz) SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } -SELECT materialize(42::Int64) ts, materialize('Asia/Kolkata') tz, fromUnixTimestamp64Milli(ts, tz) SETTINGS allow_nonconst_timezone_arguments = 1; +SELECT val FROM tab WHERE nowInBlock(tz) != toDateTime('2000-01-01 00:00:00') ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +SELECT val FROM tab WHERE nowInBlock(tz) != toDateTime('2000-01-01 00:00:00') ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 1; -SELECT materialize(42::Int64) ts, materialize('Asia/Kolkata') tz, fromUnixTimestamp64Micro(ts, tz) SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } -SELECT materialize(42::Int64) ts, materialize('Asia/Kolkata') tz, fromUnixTimestamp64Micro(ts, tz) SETTINGS allow_nonconst_timezone_arguments = 1; +SELECT val FROM tab WHERE toTimeZone(toDateTime(val), tz) != toDateTime('2023-06-11 14:14:14') ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +SELECT val FROM tab WHERE toTimeZone(toDateTime(val), tz) != toDateTime('2023-06-11 14:14:14') ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 1; -SELECT materialize(42::Int64) ts, materialize('Asia/Kolkata') tz, fromUnixTimestamp64Nano(ts, tz) SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } -SELECT materialize(42::Int64) ts, materialize('Asia/Kolkata') tz, fromUnixTimestamp64Nano(ts, tz) SETTINGS allow_nonconst_timezone_arguments = 1; +SELECT val FROM tab WHERE fromUnixTimestamp64Milli(val, tz) != toDateTime64('2023-06-11 14:14:14', 6) ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +SELECT val FROM tab WHERE fromUnixTimestamp64Milli(val, tz) != toDateTime64('2023-06-11 14:14:14', 6) ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 1; -SELECT materialize(42::Int64) ts, materialize('Asia/Kolkata') tz, snowflakeToDateTime(ts, tz) settings allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } -SELECT materialize(42::Int64) ts, materialize('Asia/Kolkata') tz, snowflakeToDateTime(ts, tz) settings allow_nonconst_timezone_arguments = 1; +SELECT val FROM tab WHERE fromUnixTimestamp64Micro(val, tz) != toDateTime64('2023-06-11 14:14:14', 6) ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +SELECT val FROM tab WHERE fromUnixTimestamp64Micro(val, tz) != toDateTime64('2023-06-11 14:14:14', 6) ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 1; -SELECT materialize(42::Int64) ts, materialize('Asia/Kolkata') tz, snowflakeToDateTime64(ts, tz) settings allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } -SELECT materialize(42::Int64) ts, materialize('Asia/Kolkata') tz, snowflakeToDateTime64(ts, tz) settings allow_nonconst_timezone_arguments = 1; +SELECT val FROM tab WHERE fromUnixTimestamp64Nano(val, tz) != toDateTime64('2023-06-11 14:14:14', 6) ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +SELECT val FROM tab WHERE fromUnixTimestamp64Nano(val, tz) != toDateTime64('2023-06-11 14:14:14', 6) ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 1; + +SELECT val FROM tab WHERE snowflakeToDateTime(val, tz) != toDateTime('2023-06-11 14:14:14') ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +SELECT val FROM tab WHERE snowflakeToDateTime(val, tz) != toDateTime('2023-06-11 14:14:14') ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 1; + +SELECT val FROM tab WHERE snowflakeToDateTime64(val, tz) != toDateTime64('2023-06-11 14:14:14', 6) ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +SELECT val FROM tab WHERE snowflakeToDateTime64(val, tz) != toDateTime64('2023-06-11 14:14:14', 6) ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 1; -- test for a related bug: -DROP TABLE IF EXISTS tab; +DROP TABLE tab; SET allow_nonconst_timezone_arguments = 1; From 48e03ac92a457d612dd8b2e4838dce1e47e51109 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sun, 11 Jun 2023 14:33:21 +0000 Subject: [PATCH 326/722] Revert "Exclude some tests with QPL from fasttest" This reverts commit ffb941624bc971886212e0745716e79688a154a1. --- .../0_stateless/00804_test_alter_compression_codecs.sql | 3 --- .../0_stateless/00804_test_custom_compression_codecs.sql | 3 --- .../00804_test_custom_compression_codes_log_storages.sql | 3 --- .../0_stateless/00804_test_deflate_qpl_codec_compression.sql | 3 --- 4 files changed, 12 deletions(-) diff --git a/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql b/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql index eb1abda9a21..fd9855e82d3 100644 --- a/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql +++ b/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql @@ -1,6 +1,3 @@ ---Tags: no-fasttest --- no-fasttest because DEFLATE_QPL isn't available in fasttest - SET send_logs_level = 'fatal'; DROP TABLE IF EXISTS alter_compression_codec; diff --git a/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql b/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql index df74620a201..89e77f758a7 100644 --- a/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql +++ b/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql @@ -1,6 +1,3 @@ ---Tags: no-fasttest --- no-fasttest because DEFLATE_QPL isn't available in fasttest - SET send_logs_level = 'fatal'; SET allow_suspicious_codecs = 1; SET enable_deflate_qpl_codec = 1; diff --git a/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql index 67c0074c58f..a629df2666d 100644 --- a/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql +++ b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql @@ -1,6 +1,3 @@ ---Tags: no-fasttest --- no-fasttest because DEFLATE_QPL isn't available in fasttest - SET send_logs_level = 'fatal'; SET allow_suspicious_codecs = 1; SET enable_deflate_qpl_codec = 1; diff --git a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql index a46272112a9..5a56fc0d576 100644 --- a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql +++ b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql @@ -1,6 +1,3 @@ ---Tags: no-fasttest --- no-fasttest because DEFLATE_QPL isn't available in fasttest - SET send_logs_level = 'fatal'; SET enable_deflate_qpl_codec = 1; From d228411f41eabf7e443fbbb2f4148880a3da78fa Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sun, 11 Jun 2023 14:39:15 +0000 Subject: [PATCH 327/722] Reset modified tests --- ...04_test_alter_compression_codecs.reference | 17 ++----- .../00804_test_alter_compression_codecs.sql | 22 +++------- ...4_test_custom_compression_codecs.reference | 8 ++-- .../00804_test_custom_compression_codecs.sql | 44 ++++++++----------- ...m_compression_codes_log_storages.reference | 20 ++++----- ..._custom_compression_codes_log_storages.sql | 41 ++++++++--------- ...804_test_deflate_qpl_codec_compression.sql | 4 ++ 7 files changed, 63 insertions(+), 93 deletions(-) diff --git a/tests/queries/0_stateless/00804_test_alter_compression_codecs.reference b/tests/queries/0_stateless/00804_test_alter_compression_codecs.reference index 5c77a102740..cfbfadf1e67 100644 --- a/tests/queries/0_stateless/00804_test_alter_compression_codecs.reference +++ b/tests/queries/0_stateless/00804_test_alter_compression_codecs.reference @@ -18,7 +18,7 @@ CODEC(NONE) 2018-01-01 4 4 2018-01-01 5 5 2018-01-01 6 6 -CODEC(DEFLATE_QPL) +CODEC(ZSTD(1), LZ4HC(0), LZ4, LZ4, NONE) 2018-01-01 1 default_value 2018-01-01 2 default_value 2018-01-01 3 3 @@ -27,18 +27,7 @@ CODEC(DEFLATE_QPL) 2018-01-01 6 6 2018-01-01 7 7 2018-01-01 8 8 -CODEC(ZSTD(1), LZ4HC(0), LZ4, LZ4, DEFLATE_QPL, NONE) -2018-01-01 1 default_value -2018-01-01 2 default_value -2018-01-01 3 3 -2018-01-01 4 4 -2018-01-01 5 5 -2018-01-01 6 6 -2018-01-01 7 7 -2018-01-01 8 8 -2018-01-01 9 9 -2018-01-01 10 10 -CODEC(ZSTD(1), LZ4HC(0), LZ4, LZ4, DEFLATE_QPL, NONE) -CODEC(NONE, LZ4, LZ4HC(0), ZSTD(1), DEFLATE_QPL) +CODEC(ZSTD(1), LZ4HC(0), LZ4, LZ4, NONE) +CODEC(NONE, LZ4, LZ4HC(0), ZSTD(1)) 2 1 diff --git a/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql b/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql index fd9855e82d3..85e5f8b63ad 100644 --- a/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql +++ b/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql @@ -28,20 +28,12 @@ SELECT * FROM alter_compression_codec ORDER BY id; OPTIMIZE TABLE alter_compression_codec FINAL; SELECT * FROM alter_compression_codec ORDER BY id; -SET enable_deflate_qpl_codec = 1; -ALTER TABLE alter_compression_codec MODIFY COLUMN alter_column CODEC(DEFLATE_QPL); +SET allow_suspicious_codecs = 1; +ALTER TABLE alter_compression_codec MODIFY COLUMN alter_column CODEC(ZSTD, LZ4HC, LZ4, LZ4, NONE); SELECT compression_codec FROM system.columns WHERE database = currentDatabase() AND table = 'alter_compression_codec' AND name = 'alter_column'; INSERT INTO alter_compression_codec VALUES('2018-01-01', 7, '7'); INSERT INTO alter_compression_codec VALUES('2018-01-01', 8, '8'); -SELECT * FROM alter_compression_codec ORDER BY id; - -SET allow_suspicious_codecs = 1; -ALTER TABLE alter_compression_codec MODIFY COLUMN alter_column CODEC(ZSTD, LZ4HC, LZ4, LZ4, DEFLATE_QPL, NONE); -SELECT compression_codec FROM system.columns WHERE database = currentDatabase() AND table = 'alter_compression_codec' AND name = 'alter_column'; - -INSERT INTO alter_compression_codec VALUES('2018-01-01', 9, '9'); -INSERT INTO alter_compression_codec VALUES('2018-01-01', 10, '10'); OPTIMIZE TABLE alter_compression_codec FINAL; SELECT * FROM alter_compression_codec ORDER BY id; @@ -62,17 +54,15 @@ ALTER TABLE alter_bad_codec ADD COLUMN alter_column DateTime DEFAULT '2019-01-01 ALTER TABLE alter_bad_codec ADD COLUMN alter_column DateTime DEFAULT '2019-01-01 00:00:00' CODEC(ZSTD(100)); -- { serverError 433 } -ALTER TABLE alter_bad_codec ADD COLUMN alter_column DateTime DEFAULT '2019-01-01 00:00:00' CODEC(DEFLATE_QPL(100)); -- { serverError DATA_TYPE_CANNOT_HAVE_ARGUMENTS } - DROP TABLE IF EXISTS alter_bad_codec; DROP TABLE IF EXISTS large_alter_table_00804; DROP TABLE IF EXISTS store_of_hash_00804; CREATE TABLE large_alter_table_00804 ( - somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12), DEFLATE_QPL), - id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC, DEFLATE_QPL), - data String CODEC(ZSTD(2), LZ4HC, NONE, LZ4, LZ4, DEFLATE_QPL) + somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12)), + id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC), + data String CODEC(ZSTD(2), LZ4HC, NONE, LZ4, LZ4) ) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi', min_bytes_for_wide_part = 0; INSERT INTO large_alter_table_00804 SELECT toDate('2019-01-01'), number, toString(number + rand()) FROM system.numbers LIMIT 300000; @@ -81,7 +71,7 @@ CREATE TABLE store_of_hash_00804 (hash UInt64) ENGINE = Memory(); INSERT INTO store_of_hash_00804 SELECT sum(cityHash64(*)) FROM large_alter_table_00804; -ALTER TABLE large_alter_table_00804 MODIFY COLUMN data CODEC(NONE, LZ4, LZ4HC, ZSTD, DEFLATE_QPL); +ALTER TABLE large_alter_table_00804 MODIFY COLUMN data CODEC(NONE, LZ4, LZ4HC, ZSTD); OPTIMIZE TABLE large_alter_table_00804; diff --git a/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference b/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference index 8b51d65004a..7bd91e5a69b 100644 --- a/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference +++ b/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference @@ -1,6 +1,6 @@ -1 hello 2018-12-14 2018-12-14 1.1 aaa 5 -2 world 2018-12-15 2018-12-15 2.2 bbb 6 -3 ! 2018-12-16 2018-12-16 3.3 ccc 7 +1 hello 2018-12-14 1.1 aaa 5 +2 world 2018-12-15 2.2 bbb 6 +3 ! 2018-12-16 3.3 ccc 7 2 1 world 2018-10-05 1.1 2 hello 2018-10-01 2.2 @@ -9,7 +9,7 @@ 10003 274972506.6 9175437371954010821 -CREATE TABLE default.compression_codec_multiple_more_types\n(\n `id` Decimal(38, 13) CODEC(ZSTD(1), LZ4, ZSTD(1), ZSTD(1), Delta(2), Delta(4), Delta(1), LZ4HC(0), DEFLATE_QPL),\n `data` FixedString(12) CODEC(ZSTD(1), ZSTD(1), NONE, NONE, NONE, LZ4HC(0), DEFLATE_QPL),\n `ddd.age` Array(UInt8) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8), DEFLATE_QPL),\n `ddd.Name` Array(String) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8), DEFLATE_QPL)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.compression_codec_multiple_more_types\n(\n `id` Decimal(38, 13) CODEC(ZSTD(1), LZ4, ZSTD(1), ZSTD(1), Delta(2), Delta(4), Delta(1), LZ4HC(0)),\n `data` FixedString(12) CODEC(ZSTD(1), ZSTD(1), NONE, NONE, NONE, LZ4HC(0)),\n `ddd.age` Array(UInt8) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8)),\n `ddd.Name` Array(String) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 1.5555555555555 hello world! [77] ['John'] 7.1 xxxxxxxxxxxx [127] ['Henry'] ! diff --git a/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql b/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql index 89e77f758a7..c080c2fc98e 100644 --- a/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql +++ b/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql @@ -1,6 +1,5 @@ SET send_logs_level = 'fatal'; SET allow_suspicious_codecs = 1; -SET enable_deflate_qpl_codec = 1; DROP TABLE IF EXISTS compression_codec; @@ -8,21 +7,20 @@ CREATE TABLE compression_codec( id UInt64 CODEC(LZ4), data String CODEC(ZSTD), ddd Date CODEC(NONE), - ddd32 Date32 CODEC(DEFLATE_QPL), somenum Float64 CODEC(ZSTD(2)), somestr FixedString(3) CODEC(LZ4HC(7)), - othernum Int64 CODEC(Delta), + othernum Int64 CODEC(Delta) ) ENGINE = MergeTree() ORDER BY tuple(); -INSERT INTO compression_codec VALUES(1, 'hello', toDate('2018-12-14'), toDate32('2018-12-14'), 1.1, 'aaa', 5); -INSERT INTO compression_codec VALUES(2, 'world', toDate('2018-12-15'), toDate32('2018-12-15'), 2.2, 'bbb', 6); -INSERT INTO compression_codec VALUES(3, '!', toDate('2018-12-16'), toDate32('2018-12-16'), 3.3, 'ccc', 7); +INSERT INTO compression_codec VALUES(1, 'hello', toDate('2018-12-14'), 1.1, 'aaa', 5); +INSERT INTO compression_codec VALUES(2, 'world', toDate('2018-12-15'), 2.2, 'bbb', 6); +INSERT INTO compression_codec VALUES(3, '!', toDate('2018-12-16'), 3.3, 'ccc', 7); SELECT * FROM compression_codec ORDER BY id; OPTIMIZE TABLE compression_codec FINAL; -INSERT INTO compression_codec VALUES(2, '', toDate('2018-12-13'), toDate32('2018-12-13'), 4.4, 'ddd', 8); +INSERT INTO compression_codec VALUES(2, '', toDate('2018-12-13'), 4.4, 'ddd', 8); DETACH TABLE compression_codec; ATTACH TABLE compression_codec; @@ -33,31 +31,25 @@ DROP TABLE IF EXISTS compression_codec; DROP TABLE IF EXISTS bad_codec; DROP TABLE IF EXISTS params_when_no_params; -DROP TABLE IF EXISTS params_when_no_params2; DROP TABLE IF EXISTS too_many_params; DROP TABLE IF EXISTS codec_multiple_direct_specification_1; DROP TABLE IF EXISTS codec_multiple_direct_specification_2; -DROP TABLE IF EXISTS codec_multiple_direct_specification_3; DROP TABLE IF EXISTS delta_bad_params1; DROP TABLE IF EXISTS delta_bad_params2; CREATE TABLE bad_codec(id UInt64 CODEC(adssadads)) ENGINE = MergeTree() order by tuple(); -- { serverError 432 } CREATE TABLE too_many_params(id UInt64 CODEC(ZSTD(2,3,4,5))) ENGINE = MergeTree() order by tuple(); -- { serverError 431 } CREATE TABLE params_when_no_params(id UInt64 CODEC(LZ4(1))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError 378 } -CREATE TABLE params_when_no_params2(id UInt64 CODEC(DEFLATE_QPL(1))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError 378 } CREATE TABLE codec_multiple_direct_specification_1(id UInt64 CODEC(MULTIPLE(LZ4, ZSTD))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError 432 } CREATE TABLE codec_multiple_direct_specification_2(id UInt64 CODEC(multiple(LZ4, ZSTD))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError 432 } -CREATE TABLE codec_multiple_direct_specification_3(id UInt64 CODEC(multiple(LZ4, DEFLATE_QPL))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError 432 } CREATE TABLE delta_bad_params1(id UInt64 CODEC(Delta(3))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError 433 } CREATE TABLE delta_bad_params2(id UInt64 CODEC(Delta(16))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError 433 } DROP TABLE IF EXISTS bad_codec; DROP TABLE IF EXISTS params_when_no_params; -DROP TABLE IF EXISTS params_when_no_params2; DROP TABLE IF EXISTS too_many_params; DROP TABLE IF EXISTS codec_multiple_direct_specification_1; DROP TABLE IF EXISTS codec_multiple_direct_specification_2; -DROP TABLE IF EXISTS codec_multiple_direct_specification_3; DROP TABLE IF EXISTS delta_bad_params1; DROP TABLE IF EXISTS delta_bad_params2; @@ -66,10 +58,10 @@ DROP TABLE IF EXISTS compression_codec_multiple; SET network_compression_method = 'lz4hc'; CREATE TABLE compression_codec_multiple ( - id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC, Delta(4), DEFLATE_QPL), - data String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC, LZ4, LZ4, Delta(8), DEFLATE_QPL), - ddd Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD, LZ4HC, LZ4HC, DEFLATE_QPL), - somenum Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD, DEFLATE_QPL) + id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC, Delta(4)), + data String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC, LZ4, LZ4, Delta(8)), + ddd Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD, LZ4HC, LZ4HC), + somenum Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD) ) ENGINE = MergeTree() ORDER BY tuple(); INSERT INTO compression_codec_multiple VALUES (1, 'world', toDate('2018-10-05'), 1.1), (2, 'hello', toDate('2018-10-01'), 2.2), (3, 'buy', toDate('2018-10-11'), 3.3); @@ -93,15 +85,15 @@ SELECT sum(cityHash64(*)) FROM compression_codec_multiple; DROP TABLE IF EXISTS compression_codec_multiple_more_types; CREATE TABLE compression_codec_multiple_more_types ( - id Decimal128(13) CODEC(ZSTD, LZ4, ZSTD, ZSTD, Delta(2), Delta(4), Delta(1), LZ4HC, DEFLATE_QPL), - data FixedString(12) CODEC(ZSTD, ZSTD, Delta, Delta, Delta, NONE, NONE, NONE, LZ4HC, DEFLATE_QPL), - ddd Nested (age UInt8, Name String) CODEC(LZ4, LZ4HC, NONE, NONE, NONE, ZSTD, Delta(8), DEFLATE_QPL) + id Decimal128(13) CODEC(ZSTD, LZ4, ZSTD, ZSTD, Delta(2), Delta(4), Delta(1), LZ4HC), + data FixedString(12) CODEC(ZSTD, ZSTD, Delta, Delta, Delta, NONE, NONE, NONE, LZ4HC), + ddd Nested (age UInt8, Name String) CODEC(LZ4, LZ4HC, NONE, NONE, NONE, ZSTD, Delta(8)) ) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError 36 } CREATE TABLE compression_codec_multiple_more_types ( - id Decimal128(13) CODEC(ZSTD, LZ4, ZSTD, ZSTD, Delta(2), Delta(4), Delta(1), LZ4HC, DEFLATE_QPL), - data FixedString(12) CODEC(ZSTD, ZSTD, NONE, NONE, NONE, LZ4HC, DEFLATE_QPL), - ddd Nested (age UInt8, Name String) CODEC(LZ4, LZ4HC, NONE, NONE, NONE, ZSTD, Delta(8), DEFLATE_QPL) + id Decimal128(13) CODEC(ZSTD, LZ4, ZSTD, ZSTD, Delta(2), Delta(4), Delta(1), LZ4HC), + data FixedString(12) CODEC(ZSTD, ZSTD, NONE, NONE, NONE, LZ4HC), + ddd Nested (age UInt8, Name String) CODEC(LZ4, LZ4HC, NONE, NONE, NONE, ZSTD, Delta(8)) ) ENGINE = MergeTree() ORDER BY tuple(); SHOW CREATE TABLE compression_codec_multiple_more_types; @@ -117,9 +109,9 @@ SET network_compression_method = 'zstd'; SET network_zstd_compression_level = 5; CREATE TABLE compression_codec_multiple_with_key ( - somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12), Delta, Delta, DEFLATE_QPL), - id UInt64 CODEC(LZ4, ZSTD, Delta, NONE, LZ4HC, Delta, DEFLATE_QPL), - data String CODEC(ZSTD(2), Delta(1), LZ4HC, NONE, LZ4, LZ4, DEFLATE_QPL) + somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12), Delta, Delta), + id UInt64 CODEC(LZ4, ZSTD, Delta, NONE, LZ4HC, Delta), + data String CODEC(ZSTD(2), Delta(1), LZ4HC, NONE, LZ4, LZ4) ) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi'; diff --git a/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference index d64b8a77eed..8145ca99829 100644 --- a/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference +++ b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference @@ -1,9 +1,9 @@ -CREATE TABLE default.compression_codec_log\n(\n `id` UInt64 CODEC(LZ4),\n `data` String CODEC(ZSTD(1)),\n `ddd` Date CODEC(NONE),\n `somenum` Float64 CODEC(ZSTD(2)),\n `somestr` FixedString(3) CODEC(LZ4HC(7)),\n `othernum` Int64 CODEC(Delta(8)),\n `qplstr` String CODEC(DEFLATE_QPL),\n `qplnum` UInt32 CODEC(DEFLATE_QPL)\n)\nENGINE = Log -1 hello 2018-12-14 1.1 aaa 5 qpl11 11 -2 world 2018-12-15 2.2 bbb 6 qpl22 22 -3 ! 2018-12-16 3.3 ccc 7 qpl33 33 +CREATE TABLE default.compression_codec_log\n(\n `id` UInt64 CODEC(LZ4),\n `data` String CODEC(ZSTD(1)),\n `ddd` Date CODEC(NONE),\n `somenum` Float64 CODEC(ZSTD(2)),\n `somestr` FixedString(3) CODEC(LZ4HC(7)),\n `othernum` Int64 CODEC(Delta(8))\n)\nENGINE = Log +1 hello 2018-12-14 1.1 aaa 5 +2 world 2018-12-15 2.2 bbb 6 +3 ! 2018-12-16 3.3 ccc 7 2 -CREATE TABLE default.compression_codec_multiple_log\n(\n `id` UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4), DEFLATE_QPL),\n `data` String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8), DEFLATE_QPL),\n `ddd` Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0), DEFLATE_QPL),\n `somenum` Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1), DEFLATE_QPL)\n)\nENGINE = Log +CREATE TABLE default.compression_codec_multiple_log\n(\n `id` UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4)),\n `data` String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8)),\n `ddd` Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0)),\n `somenum` Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1))\n)\nENGINE = Log 1 world 2018-10-05 1.1 2 hello 2018-10-01 2.2 3 buy 2018-10-11 3.3 @@ -11,12 +11,12 @@ CREATE TABLE default.compression_codec_multiple_log\n(\n `id` UInt64 CODEC(LZ 10003 274972506.6 9175437371954010821 -CREATE TABLE default.compression_codec_tiny_log\n(\n `id` UInt64 CODEC(LZ4),\n `data` String CODEC(ZSTD(1)),\n `ddd` Date CODEC(NONE),\n `somenum` Float64 CODEC(ZSTD(2)),\n `somestr` FixedString(3) CODEC(LZ4HC(7)),\n `othernum` Int64 CODEC(Delta(8)),\n `qplstr` String CODEC(DEFLATE_QPL),\n `qplnum` UInt32 CODEC(DEFLATE_QPL)\n)\nENGINE = TinyLog -1 hello 2018-12-14 1.1 aaa 5 qpl11 11 -2 world 2018-12-15 2.2 bbb 6 qpl22 22 -3 ! 2018-12-16 3.3 ccc 7 qpl33 33 +CREATE TABLE default.compression_codec_tiny_log\n(\n `id` UInt64 CODEC(LZ4),\n `data` String CODEC(ZSTD(1)),\n `ddd` Date CODEC(NONE),\n `somenum` Float64 CODEC(ZSTD(2)),\n `somestr` FixedString(3) CODEC(LZ4HC(7)),\n `othernum` Int64 CODEC(Delta(8))\n)\nENGINE = TinyLog +1 hello 2018-12-14 1.1 aaa 5 +2 world 2018-12-15 2.2 bbb 6 +3 ! 2018-12-16 3.3 ccc 7 2 -CREATE TABLE default.compression_codec_multiple_tiny_log\n(\n `id` UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4), DEFLATE_QPL),\n `data` String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8), DEFLATE_QPL),\n `ddd` Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0), DEFLATE_QPL),\n `somenum` Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1), DEFLATE_QPL)\n)\nENGINE = TinyLog +CREATE TABLE default.compression_codec_multiple_tiny_log\n(\n `id` UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4)),\n `data` String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8)),\n `ddd` Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0)),\n `somenum` Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1))\n)\nENGINE = TinyLog 1 world 2018-10-05 1.1 2 hello 2018-10-01 2.2 3 buy 2018-10-11 3.3 diff --git a/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql index a629df2666d..fba6a216762 100644 --- a/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql +++ b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql @@ -1,6 +1,5 @@ SET send_logs_level = 'fatal'; SET allow_suspicious_codecs = 1; -SET enable_deflate_qpl_codec = 1; -- copy-paste for storage log @@ -12,20 +11,18 @@ CREATE TABLE compression_codec_log( ddd Date CODEC(NONE), somenum Float64 CODEC(ZSTD(2)), somestr FixedString(3) CODEC(LZ4HC(7)), - othernum Int64 CODEC(Delta), - qplstr String CODEC(DEFLATE_QPL), - qplnum UInt32 CODEC(DEFLATE_QPL), + othernum Int64 CODEC(Delta) ) ENGINE = Log(); SHOW CREATE TABLE compression_codec_log; -INSERT INTO compression_codec_log VALUES(1, 'hello', toDate('2018-12-14'), 1.1, 'aaa', 5, 'qpl11', 11); -INSERT INTO compression_codec_log VALUES(2, 'world', toDate('2018-12-15'), 2.2, 'bbb', 6,'qpl22', 22); -INSERT INTO compression_codec_log VALUES(3, '!', toDate('2018-12-16'), 3.3, 'ccc', 7, 'qpl33', 33); +INSERT INTO compression_codec_log VALUES(1, 'hello', toDate('2018-12-14'), 1.1, 'aaa', 5); +INSERT INTO compression_codec_log VALUES(2, 'world', toDate('2018-12-15'), 2.2, 'bbb', 6); +INSERT INTO compression_codec_log VALUES(3, '!', toDate('2018-12-16'), 3.3, 'ccc', 7); SELECT * FROM compression_codec_log ORDER BY id; -INSERT INTO compression_codec_log VALUES(2, '', toDate('2018-12-13'), 4.4, 'ddd', 8, 'qpl44', 44); +INSERT INTO compression_codec_log VALUES(2, '', toDate('2018-12-13'), 4.4, 'ddd', 8); DETACH TABLE compression_codec_log; ATTACH TABLE compression_codec_log; @@ -37,10 +34,10 @@ DROP TABLE IF EXISTS compression_codec_log; DROP TABLE IF EXISTS compression_codec_multiple_log; CREATE TABLE compression_codec_multiple_log ( - id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC, Delta(4), DEFLATE_QPL), - data String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC, LZ4, LZ4, Delta(8), DEFLATE_QPL), - ddd Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD, LZ4HC, LZ4HC, DEFLATE_QPL), - somenum Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD, DEFLATE_QPL) + id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC, Delta(4)), + data String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC, LZ4, LZ4, Delta(8)), + ddd Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD, LZ4HC, LZ4HC), + somenum Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD) ) ENGINE = Log(); SHOW CREATE TABLE compression_codec_multiple_log; @@ -72,20 +69,18 @@ CREATE TABLE compression_codec_tiny_log( ddd Date CODEC(NONE), somenum Float64 CODEC(ZSTD(2)), somestr FixedString(3) CODEC(LZ4HC(7)), - othernum Int64 CODEC(Delta), - qplstr String CODEC(DEFLATE_QPL), - qplnum UInt32 CODEC(DEFLATE_QPL), + othernum Int64 CODEC(Delta) ) ENGINE = TinyLog(); SHOW CREATE TABLE compression_codec_tiny_log; -INSERT INTO compression_codec_tiny_log VALUES(1, 'hello', toDate('2018-12-14'), 1.1, 'aaa', 5, 'qpl11', 11); -INSERT INTO compression_codec_tiny_log VALUES(2, 'world', toDate('2018-12-15'), 2.2, 'bbb', 6, 'qpl22', 22); -INSERT INTO compression_codec_tiny_log VALUES(3, '!', toDate('2018-12-16'), 3.3, 'ccc', 7, 'qpl33', 33); +INSERT INTO compression_codec_tiny_log VALUES(1, 'hello', toDate('2018-12-14'), 1.1, 'aaa', 5); +INSERT INTO compression_codec_tiny_log VALUES(2, 'world', toDate('2018-12-15'), 2.2, 'bbb', 6); +INSERT INTO compression_codec_tiny_log VALUES(3, '!', toDate('2018-12-16'), 3.3, 'ccc', 7); SELECT * FROM compression_codec_tiny_log ORDER BY id; -INSERT INTO compression_codec_tiny_log VALUES(2, '', toDate('2018-12-13'), 4.4, 'ddd', 8, 'qpl44', 44); +INSERT INTO compression_codec_tiny_log VALUES(2, '', toDate('2018-12-13'), 4.4, 'ddd', 8); DETACH TABLE compression_codec_tiny_log; ATTACH TABLE compression_codec_tiny_log; @@ -97,10 +92,10 @@ DROP TABLE IF EXISTS compression_codec_tiny_log; DROP TABLE IF EXISTS compression_codec_multiple_tiny_log; CREATE TABLE compression_codec_multiple_tiny_log ( - id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC, Delta(4), DEFLATE_QPL), - data String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC, LZ4, LZ4, Delta(8), DEFLATE_QPL), - ddd Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD, LZ4HC, LZ4HC, DEFLATE_QPL), - somenum Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD, DEFLATE_QPL) + id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC, Delta(4)), + data String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC, LZ4, LZ4, Delta(8)), + ddd Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD, LZ4HC, LZ4HC), + somenum Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD) ) ENGINE = TinyLog(); SHOW CREATE TABLE compression_codec_multiple_tiny_log; diff --git a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql index 5a56fc0d576..78c57013eeb 100644 --- a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql +++ b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql @@ -1,3 +1,7 @@ +--Tags: no-fasttest, no-cpu-aarch64 +-- no-fasttest because DEFLATE_QPL isn't available in fasttest +-- no-cpu-aarch64 because DEFLATE_QPL is x86-only + SET send_logs_level = 'fatal'; SET enable_deflate_qpl_codec = 1; From d72751be27ba5f69337a0039f41e577c05a3ae7f Mon Sep 17 00:00:00 2001 From: alekseygolub Date: Sun, 11 Jun 2023 15:01:45 +0000 Subject: [PATCH 328/722] Added cache invalidation; Fix issues --- src/Databases/DatabaseFactory.cpp | 5 +-- src/Databases/DatabaseFilesystem.cpp | 40 ++++++++++++++----- src/Databases/DatabaseFilesystem.h | 8 +++- src/Databases/DatabaseHDFS.cpp | 6 +++ src/Databases/DatabaseHDFS.h | 5 ++- src/Databases/DatabaseS3.cpp | 14 +++---- src/Databases/DatabaseS3.h | 5 ++- .../0_stateless/02724_database_s3.reference | 4 -- .../queries/0_stateless/02724_database_s3.sh | 6 --- 9 files changed, 59 insertions(+), 34 deletions(-) diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index 26952cc574e..9d90c61bb41 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -15,10 +15,9 @@ #include #include #include -#include -#include -#include #include +#include +#include #include "config.h" diff --git a/src/Databases/DatabaseFilesystem.cpp b/src/Databases/DatabaseFilesystem.cpp index 8de609f0ca2..cf45240a5f0 100644 --- a/src/Databases/DatabaseFilesystem.cpp +++ b/src/Databases/DatabaseFilesystem.cpp @@ -94,14 +94,32 @@ bool DatabaseFilesystem::checkTableFilePath(const std::string & table_path, Cont return true; } -bool DatabaseFilesystem::isTableExist(const String & name, ContextPtr context_) const +StoragePtr DatabaseFilesystem::tryGetTableFromCache(const std::string & name) const { + StoragePtr table = nullptr; { std::lock_guard lock(mutex); - if (loaded_tables.find(name) != loaded_tables.end()) - return true; + auto it = loaded_tables.find(name); + if (it != loaded_tables.end()) + table = it->second; } + // invalidate cache if file no longer exists + if (table && !fs::exists(getTablePath(name))) + { + std::lock_guard lock(mutex); + loaded_tables.erase(name); + return nullptr; + } + + return table; +} + +bool DatabaseFilesystem::isTableExist(const String & name, ContextPtr context_) const +{ + if (tryGetTableFromCache(name)) + return true; + fs::path table_file_path(getTablePath(name)); return checkTableFilePath(table_file_path, context_, false); @@ -109,13 +127,9 @@ bool DatabaseFilesystem::isTableExist(const String & name, ContextPtr context_) StoragePtr DatabaseFilesystem::getTableImpl(const String & name, ContextPtr context_) const { - // Check if the table exists in the loaded tables map - { - std::lock_guard lock(mutex); - auto it = loaded_tables.find(name); - if (it != loaded_tables.end()) - return it->second; - } + // Check if table exists in loaded tables map + if (auto table = tryGetTableFromCache(name)) + return table; auto table_path = getTablePath(name); @@ -165,6 +179,12 @@ StoragePtr DatabaseFilesystem::tryGetTable(const String & name, ContextPtr conte } } +bool DatabaseFilesystem::empty() const +{ + std::lock_guard lock(mutex); + return loaded_tables.empty(); +} + ASTPtr DatabaseFilesystem::getCreateDatabaseQuery() const { auto settings = getContext()->getSettingsRef(); diff --git a/src/Databases/DatabaseFilesystem.h b/src/Databases/DatabaseFilesystem.h index 3d2ad695cc6..350ebfe37a3 100644 --- a/src/Databases/DatabaseFilesystem.h +++ b/src/Databases/DatabaseFilesystem.h @@ -31,7 +31,10 @@ public: StoragePtr tryGetTable(const String & name, ContextPtr context) const override; - bool empty() const override { return true; } + // Contains only temporary tables + bool shouldBeEmptyOnDetach() const override { return false; } + + bool empty() const override; bool isReadOnly() const override { return true; } @@ -45,13 +48,14 @@ public: protected: StoragePtr getTableImpl(const String & name, ContextPtr context) const; + StoragePtr tryGetTableFromCache(const std::string & name) const; + std::string getTablePath(const std::string & table_name) const; void addTable(const std::string & table_name, StoragePtr table_storage) const; bool checkTableFilePath(const std::string & table_path, ContextPtr context_, bool throw_on_error) const; - private: String path; mutable Tables loaded_tables TSA_GUARDED_BY(mutex); diff --git a/src/Databases/DatabaseHDFS.cpp b/src/Databases/DatabaseHDFS.cpp index 39c3f955bf5..34cb337cdbe 100644 --- a/src/Databases/DatabaseHDFS.cpp +++ b/src/Databases/DatabaseHDFS.cpp @@ -170,6 +170,12 @@ StoragePtr DatabaseHDFS::tryGetTable(const String & name, ContextPtr context_) c } } +bool DatabaseHDFS::empty() const +{ + std::lock_guard lock(mutex); + return loaded_tables.empty(); +} + ASTPtr DatabaseHDFS::getCreateDatabaseQuery() const { auto settings = getContext()->getSettingsRef(); diff --git a/src/Databases/DatabaseHDFS.h b/src/Databases/DatabaseHDFS.h index 9a506c5c8ac..c7071370b5e 100644 --- a/src/Databases/DatabaseHDFS.h +++ b/src/Databases/DatabaseHDFS.h @@ -33,7 +33,10 @@ public: StoragePtr tryGetTable(const String & name, ContextPtr context) const override; - bool empty() const override { return true; } + // Contains only temporary tables + bool shouldBeEmptyOnDetach() const override { return false; } + + bool empty() const override; bool isReadOnly() const override { return true; } diff --git a/src/Databases/DatabaseS3.cpp b/src/Databases/DatabaseS3.cpp index 96616426475..46f8a67687d 100644 --- a/src/Databases/DatabaseS3.cpp +++ b/src/Databases/DatabaseS3.cpp @@ -67,14 +67,8 @@ void DatabaseS3::addTable(const std::string & table_name, StoragePtr table_stora std::string DatabaseS3::getFullUrl(const std::string & name) const { - try - { - S3::URI uri(name); - } - catch (...) - { + if (!config.url_prefix.empty()) return (fs::path(config.url_prefix) / name).string(); - } return name; } @@ -181,6 +175,12 @@ StoragePtr DatabaseS3::tryGetTable(const String & name, ContextPtr context_) con } } +bool DatabaseS3::empty() const +{ + std::lock_guard lock(mutex); + return loaded_tables.empty(); +} + ASTPtr DatabaseS3::getCreateDatabaseQuery() const { auto settings = getContext()->getSettingsRef(); diff --git a/src/Databases/DatabaseS3.h b/src/Databases/DatabaseS3.h index 4e6910566df..f494925b09b 100644 --- a/src/Databases/DatabaseS3.h +++ b/src/Databases/DatabaseS3.h @@ -43,7 +43,10 @@ public: StoragePtr tryGetTable(const String & name, ContextPtr context) const override; - bool empty() const override { return true; } + // Contains only temporary tables + bool shouldBeEmptyOnDetach() const override { return false; } + + bool empty() const override; bool isReadOnly() const override { return true; } diff --git a/tests/queries/0_stateless/02724_database_s3.reference b/tests/queries/0_stateless/02724_database_s3.reference index 811e38b7f2b..425cca6a077 100644 --- a/tests/queries/0_stateless/02724_database_s3.reference +++ b/tests/queries/0_stateless/02724_database_s3.reference @@ -12,10 +12,6 @@ test1 13 14 15 16 17 18 0 0 0 -1 2 3 -4 5 6 -7 8 9 -0 0 0 10 11 12 13 14 15 16 17 18 diff --git a/tests/queries/0_stateless/02724_database_s3.sh b/tests/queries/0_stateless/02724_database_s3.sh index ac1b97beecf..79199b43571 100755 --- a/tests/queries/0_stateless/02724_database_s3.sh +++ b/tests/queries/0_stateless/02724_database_s3.sh @@ -32,12 +32,6 @@ USE test4; SELECT * FROM \"b.tsv\" """ -# check that database url_prefix is ignored if pass full url as table name -${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ -USE test4; -SELECT * FROM \"http://localhost:11111/test/a.tsv\" -""" - # Check named collection loading ${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ DROP DATABASE IF EXISTS test5; From 2419a7b90fd1effd8ebf8b5b4741a0325447cdec Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sun, 11 Jun 2023 15:16:52 +0000 Subject: [PATCH 329/722] Fix tests --- .../00804_test_deflate_qpl_codec_compression.reference | 2 ++ .../00804_test_deflate_qpl_codec_compression.sql | 9 ++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.reference b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.reference index 276747f8233..a2178f5eda7 100644 --- a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.reference +++ b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.reference @@ -1,4 +1,6 @@ +CREATE TABLE default.compression_codec\n(\n `id` UInt64 CODEC(DEFLATE_QPL),\n `data` String CODEC(DEFLATE_QPL),\n `ddd` Date CODEC(DEFLATE_QPL),\n `ddd32` Date32 CODEC(DEFLATE_QPL),\n `somenum` Float64 CODEC(DEFLATE_QPL),\n `somestr` FixedString(3) CODEC(DEFLATE_QPL),\n `othernum` Int64 CODEC(DEFLATE_QPL),\n `somearray` Array(UInt8) CODEC(DEFLATE_QPL),\n `somemap` Map(String, UInt32) CODEC(DEFLATE_QPL),\n `sometuple` Tuple(UInt16, UInt64) CODEC(DEFLATE_QPL)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 1 hello 2018-12-14 2018-12-14 1.1 aaa 5 [1,2,3] {'k1':1,'k2':2} (1,2) 2 world 2018-12-15 2018-12-15 2.2 bbb 6 [4,5,6] {'k3':3,'k4':4} (3,4) 3 ! 2018-12-16 2018-12-16 3.3 ccc 7 [7,8,9] {'k5':5,'k6':6} (5,6) 2 +10001 diff --git a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql index 78c57013eeb..ff3c1812c86 100644 --- a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql +++ b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql @@ -2,7 +2,8 @@ -- no-fasttest because DEFLATE_QPL isn't available in fasttest -- no-cpu-aarch64 because DEFLATE_QPL is x86-only -SET send_logs_level = 'fatal'; +-- A bunch of random DDLs to test the DEFLATE_QPL codec. + SET enable_deflate_qpl_codec = 1; DROP TABLE IF EXISTS compression_codec; @@ -20,6 +21,8 @@ CREATE TABLE compression_codec( sometuple Tuple(UInt16, UInt64) CODEC(DEFLATE_QPL), ) ENGINE = MergeTree() ORDER BY tuple(); +SHOW CREATE TABLE compression_codec; + INSERT INTO compression_codec VALUES(1, 'hello', toDate('2018-12-14'), toDate32('2018-12-14'), 1.1, 'aaa', 5, [1,2,3], map('k1',1,'k2',2), tuple(1,2)); INSERT INTO compression_codec VALUES(2, 'world', toDate('2018-12-15'), toDate32('2018-12-15'), 2.2, 'bbb', 6, [4,5,6], map('k3',3,'k4',4), tuple(3,4)); INSERT INTO compression_codec VALUES(3, '!', toDate('2018-12-16'), toDate32('2018-12-16'), 3.3, 'ccc', 7, [7,8,9], map('k5',5,'k6',6), tuple(5,6)); @@ -35,4 +38,8 @@ ATTACH TABLE compression_codec; SELECT count(*) FROM compression_codec WHERE id = 2 GROUP BY id; +INSERT INTO compression_codec SELECT 3, '!', toDate('2018-12-16'), toDate32('2018-12-16'), 3.3, 'ccc', 7, [7,8,9], map('k5',5,'k6',6), tuple(5,6) FROM system.numbers LIMIT 10000; + +SELECT count(*) FROM compression_codec WHERE id = 3 GROUP BY id; + DROP TABLE IF EXISTS compression_codec; From 598501011f5cbedb42188b2f828c055d44a0fcd8 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 11 Jun 2023 17:51:54 +0200 Subject: [PATCH 330/722] Relax "too many parts" further --- programs/server/Server.cpp | 1 - src/Loggers/OwnPatternFormatter.cpp | 1 - src/Storages/MergeTree/MergeTreeData.cpp | 10 +++++----- src/Storages/MergeTree/MergeTreeData.h | 2 +- src/Storages/MergeTree/MergeTreeSettings.h | 6 +++--- src/Storages/MergeTree/MergeTreeSink.cpp | 9 +++++++-- src/Storages/MergeTree/MergeTreeSink.h | 3 ++- src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp | 9 +++++++-- src/Storages/MergeTree/ReplicatedMergeTreeSink.h | 1 + 9 files changed, 26 insertions(+), 16 deletions(-) diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index d0fc8aca5e8..cfef7f0a94a 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -1705,7 +1705,6 @@ try #endif /// Must be done after initialization of `servers`, because async_metrics will access `servers` variable from its thread. - async_metrics.start(); { diff --git a/src/Loggers/OwnPatternFormatter.cpp b/src/Loggers/OwnPatternFormatter.cpp index ccf6c479b80..0c2256aaa1b 100644 --- a/src/Loggers/OwnPatternFormatter.cpp +++ b/src/Loggers/OwnPatternFormatter.cpp @@ -4,7 +4,6 @@ #include #include #include -#include #include diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 9cca471fddb..b42d130bf62 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -4315,14 +4315,14 @@ std::optional MergeTreeData::getMinPartDataVersion() const } -void MergeTreeData::delayInsertOrThrowIfNeeded(Poco::Event * until, const ContextPtr & query_context) const +void MergeTreeData::delayInsertOrThrowIfNeeded(Poco::Event * until, const ContextPtr & query_context, bool allow_throw) const { const auto settings = getSettings(); const auto & query_settings = query_context->getSettingsRef(); const size_t parts_count_in_total = getActivePartsCount(); - /// check if have too many parts in total - if (parts_count_in_total >= settings->max_parts_in_total) + /// Check if we have too many parts in total + if (allow_throw && parts_count_in_total >= settings->max_parts_in_total) { ProfileEvents::increment(ProfileEvents::RejectedInserts); throw Exception( @@ -4338,7 +4338,7 @@ void MergeTreeData::delayInsertOrThrowIfNeeded(Poco::Event * until, const Contex if (settings->inactive_parts_to_throw_insert > 0 || settings->inactive_parts_to_delay_insert > 0) outdated_parts_count_in_partition = getMaxOutdatedPartsCountForPartition(); - if (settings->inactive_parts_to_throw_insert > 0 && outdated_parts_count_in_partition >= settings->inactive_parts_to_throw_insert) + if (allow_throw && settings->inactive_parts_to_throw_insert > 0 && outdated_parts_count_in_partition >= settings->inactive_parts_to_throw_insert) { ProfileEvents::increment(ProfileEvents::RejectedInserts); throw Exception( @@ -4362,7 +4362,7 @@ void MergeTreeData::delayInsertOrThrowIfNeeded(Poco::Event * until, const Contex bool parts_are_large_enough_in_average = settings->max_avg_part_size_for_too_many_parts && average_part_size > settings->max_avg_part_size_for_too_many_parts; - if (parts_count_in_partition >= active_parts_to_throw_insert && !parts_are_large_enough_in_average) + if (allow_throw && parts_count_in_partition >= active_parts_to_throw_insert && !parts_are_large_enough_in_average) { ProfileEvents::increment(ProfileEvents::RejectedInserts); throw Exception( diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index b1e1e43bd0b..ebda82eeaed 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -557,7 +557,7 @@ public: /// If the table contains too many active parts, sleep for a while to give them time to merge. /// If until is non-null, wake up from the sleep earlier if the event happened. /// The decision to delay or throw is made according to settings 'parts_to_delay_insert' and 'parts_to_throw_insert'. - void delayInsertOrThrowIfNeeded(Poco::Event * until, const ContextPtr & query_context) const; + void delayInsertOrThrowIfNeeded(Poco::Event * until, const ContextPtr & query_context, bool allow_throw) const; /// If the table contains too many unfinished mutations, sleep for a while to give them time to execute. /// If until is non-null, wake up from the sleep earlier if the event happened. diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index 33aea358078..082b84be575 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -73,11 +73,11 @@ struct Settings; M(UInt64, max_delay_to_mutate_ms, 1000, "Max delay of mutating MergeTree table in milliseconds, if there are a lot of unfinished mutations", 0) \ \ /** Inserts settings. */ \ - M(UInt64, parts_to_delay_insert, 150, "If table contains at least that many active parts in single partition, artificially slow down insert into table. Disabled if set to 0", 0) \ + M(UInt64, parts_to_delay_insert, 1000, "If table contains at least that many active parts in single partition, artificially slow down insert into table. Disabled if set to 0", 0) \ M(UInt64, inactive_parts_to_delay_insert, 0, "If table contains at least that many inactive parts in single partition, artificially slow down insert into table.", 0) \ - M(UInt64, parts_to_throw_insert, 300, "If more than this number active parts in single partition, throw 'Too many parts ...' exception.", 0) \ + M(UInt64, parts_to_throw_insert, 3000, "If more than this number active parts in single partition, throw 'Too many parts ...' exception.", 0) \ M(UInt64, inactive_parts_to_throw_insert, 0, "If more than this number inactive parts in single partition, throw 'Too many inactive parts ...' exception.", 0) \ - M(UInt64, max_avg_part_size_for_too_many_parts, 10ULL * 1024 * 1024 * 1024, "The 'too many parts' check according to 'parts_to_delay_insert' and 'parts_to_throw_insert' will be active only if the average part size (in the relevant partition) is not larger than the specified threshold. If it is larger than the specified threshold, the INSERTs will be neither delayed or rejected. This allows to have hundreds of terabytes in a single table on a single server if the parts are successfully merged to larger parts. This does not affect the thresholds on inactive parts or total parts.", 0) \ + M(UInt64, max_avg_part_size_for_too_many_parts, 1ULL * 1024 * 1024 * 1024, "The 'too many parts' check according to 'parts_to_delay_insert' and 'parts_to_throw_insert' will be active only if the average part size (in the relevant partition) is not larger than the specified threshold. If it is larger than the specified threshold, the INSERTs will be neither delayed or rejected. This allows to have hundreds of terabytes in a single table on a single server if the parts are successfully merged to larger parts. This does not affect the thresholds on inactive parts or total parts.", 0) \ M(UInt64, max_delay_to_insert, 1, "Max delay of inserting data into MergeTree table in seconds, if there are a lot of unmerged parts in single partition.", 0) \ M(UInt64, min_delay_to_insert_ms, 10, "Min delay of inserting data into MergeTree table in milliseconds, if there are a lot of unmerged parts in single partition.", 0) \ M(UInt64, max_parts_in_total, 100000, "If more than this number active parts in all partitions in total, throw 'Too many parts ...' exception.", 0) \ diff --git a/src/Storages/MergeTree/MergeTreeSink.cpp b/src/Storages/MergeTree/MergeTreeSink.cpp index d62fe5024f4..36816904a81 100644 --- a/src/Storages/MergeTree/MergeTreeSink.cpp +++ b/src/Storages/MergeTree/MergeTreeSink.cpp @@ -45,9 +45,9 @@ MergeTreeSink::MergeTreeSink( void MergeTreeSink::onStart() { - /// Only check "too many parts" before write, + /// It's only allowed to throw "too many parts" before write, /// because interrupting long-running INSERT query in the middle is not convenient for users. - storage.delayInsertOrThrowIfNeeded(nullptr, context); + storage.delayInsertOrThrowIfNeeded(nullptr, context, true); } void MergeTreeSink::onFinish() @@ -57,6 +57,9 @@ void MergeTreeSink::onFinish() void MergeTreeSink::consume(Chunk chunk) { + if (num_blocks_processed > 0) + storage.delayInsertOrThrowIfNeeded(nullptr, context, false); + auto block = getHeader().cloneWithColumns(chunk.detachColumns()); if (!storage_snapshot->object_columns.empty()) convertDynamicColumnsToTuples(block, storage_snapshot); @@ -136,6 +139,8 @@ void MergeTreeSink::consume(Chunk chunk) finishDelayedChunk(); delayed_chunk = std::make_unique(); delayed_chunk->partitions = std::move(partitions); + + ++num_blocks_processed; } void MergeTreeSink::finishDelayedChunk() diff --git a/src/Storages/MergeTree/MergeTreeSink.h b/src/Storages/MergeTree/MergeTreeSink.h index 68f11d86a25..07ab3850df2 100644 --- a/src/Storages/MergeTree/MergeTreeSink.h +++ b/src/Storages/MergeTree/MergeTreeSink.h @@ -35,7 +35,8 @@ private: size_t max_parts_per_block; ContextPtr context; StorageSnapshotPtr storage_snapshot; - uint64_t chunk_dedup_seqnum = 0; /// input chunk ordinal number in case of dedup token + UInt64 chunk_dedup_seqnum = 0; /// input chunk ordinal number in case of dedup token + UInt64 num_blocks_processed = 0; /// We can delay processing for previous chunk and start writing a new one. struct DelayedChunk; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index 28dad454afe..5fbd72ccddc 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -367,6 +367,9 @@ size_t ReplicatedMergeTreeSinkImpl::checkQuorumPrecondition(const template void ReplicatedMergeTreeSinkImpl::consume(Chunk chunk) { + if (num_blocks_processed > 0) + storage.delayInsertOrThrowIfNeeded(&storage.partial_shutdown_event, context, false); + auto block = getHeader().cloneWithColumns(chunk.detachColumns()); const auto & settings = context->getSettingsRef(); @@ -512,6 +515,8 @@ void ReplicatedMergeTreeSinkImpl::consume(Chunk chunk) /// TODO: we can also delay commit if there is no MVs. if (!settings.deduplicate_blocks_in_dependent_materialized_views) finishDelayedChunk(zookeeper); + + ++num_blocks_processed; } template<> @@ -1136,9 +1141,9 @@ std::pair, bool> ReplicatedMergeTreeSinkImpl:: template void ReplicatedMergeTreeSinkImpl::onStart() { - /// Only check "too many parts" before write, + /// It's only allowed to throw "too many parts" before write, /// because interrupting long-running INSERT query in the middle is not convenient for users. - storage.delayInsertOrThrowIfNeeded(&storage.partial_shutdown_event, context); + storage.delayInsertOrThrowIfNeeded(&storage.partial_shutdown_event, context, true); } template diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.h b/src/Storages/MergeTree/ReplicatedMergeTreeSink.h index 8d9e2e14129..868590efa25 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.h @@ -123,6 +123,7 @@ private: bool quorum_parallel = false; const bool deduplicate = true; bool last_block_is_duplicate = false; + UInt64 num_blocks_processed = 0; using Logger = Poco::Logger; Poco::Logger * log; From 61fa6944145f851e156943db678e0657a0d6fb42 Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Sun, 11 Jun 2023 19:03:57 +0000 Subject: [PATCH 331/722] Fix boundery -> boundary in docs --- .../sql-reference/functions/type-conversion-functions.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 28db7e6e677..9258b6d6026 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -317,7 +317,7 @@ SELECT ## toDateOrZero -The same as [toDate](#todate) but returns lower boundery of [Date](/docs/en/sql-reference/data-types/date.md) if an invalid argument is received. Only [String](/docs/en/sql-reference/data-types/string.md) argument is supported. +The same as [toDate](#todate) but returns lower boundary of [Date](/docs/en/sql-reference/data-types/date.md) if an invalid argument is received. Only [String](/docs/en/sql-reference/data-types/string.md) argument is supported. **Example** @@ -359,7 +359,7 @@ Result: ## toDateOrDefault -Like [toDate](#todate) but if unsuccessful, returns a default value which is either the second argument (if specified), or otherwise the lower boundery of [Date](/docs/en/sql-reference/data-types/date.md). +Like [toDate](#todate) but if unsuccessful, returns a default value which is either the second argument (if specified), or otherwise the lower boundary of [Date](/docs/en/sql-reference/data-types/date.md). **Syntax** @@ -424,7 +424,7 @@ Result: ## toDateTimeOrZero -The same as [toDateTime](#todatetime) but returns lower boundery of [DateTime](/docs/en/sql-reference/data-types/datetime.md) if an invalid argument is received. Only [String](/docs/en/sql-reference/data-types/string.md) argument is supported. +The same as [toDateTime](#todatetime) but returns lower boundary of [DateTime](/docs/en/sql-reference/data-types/datetime.md) if an invalid argument is received. Only [String](/docs/en/sql-reference/data-types/string.md) argument is supported. **Example** @@ -466,7 +466,7 @@ Result: ## toDateTimeOrDefault -Like [toDateTime](#todatetime) but if unsuccessful, returns a default value which is either the third argument (if specified), or otherwise the lower boundery of [DateTime](/docs/en/sql-reference/data-types/datetime.md). +Like [toDateTime](#todatetime) but if unsuccessful, returns a default value which is either the third argument (if specified), or otherwise the lower boundary of [DateTime](/docs/en/sql-reference/data-types/datetime.md). **Syntax** From 3797a4202cb3da6bf20684149d24931e72fbd239 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Sun, 11 Jun 2023 23:38:31 +0300 Subject: [PATCH 332/722] Update MergeTreeData.cpp --- src/Storages/MergeTree/MergeTreeData.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 23351423d49..8a69c8ff75c 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1252,6 +1252,10 @@ MergeTreeData::LoadPartResult MergeTreeData::loadDataPart( mark_broken(); return res; } + catch (const Poco::NetException &) + { + throw; + } catch (const Poco::TimeoutException &) { throw; From 7bcaf8b233e91a11fe0d82daaf265d20c8279906 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Mon, 12 Jun 2023 10:15:32 +0800 Subject: [PATCH 333/722] fix build error --- src/Storages/StorageRedis.cpp | 6 +++++- src/Storages/StorageRedis.h | 3 ++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/Storages/StorageRedis.cpp b/src/Storages/StorageRedis.cpp index 97f1dbce6da..68c71cac508 100644 --- a/src/Storages/StorageRedis.cpp +++ b/src/Storages/StorageRedis.cpp @@ -461,7 +461,11 @@ Block StorageRedis::getSampleBlock(const Names &) const return getInMemoryMetadataPtr()->getSampleBlock(); } -SinkToStoragePtr StorageRedis::write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, ContextPtr /*context*/) +SinkToStoragePtr StorageRedis::write( + const ASTPtr & /*query*/, + const StorageMetadataPtr & metadata_snapshot, + ContextPtr /*context*/, + bool /*async_insert*/) { return std::make_shared(*this, metadata_snapshot); } diff --git a/src/Storages/StorageRedis.h b/src/Storages/StorageRedis.h index a4ab9a6aa4e..a525a4ed7de 100644 --- a/src/Storages/StorageRedis.h +++ b/src/Storages/StorageRedis.h @@ -36,7 +36,8 @@ public: SinkToStoragePtr write( const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, - ContextPtr context) override; + ContextPtr context, + bool /*async_insert*/) override; void truncate(const ASTPtr &, const StorageMetadataPtr & metadata_snapshot, From a9d4d5194972daa1dcecf80b388c6ccb127f92d0 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Mon, 12 Jun 2023 10:16:02 +0800 Subject: [PATCH 334/722] add word redis to aspell-dict --- utils/check-style/aspell-ignore/en/aspell-dict.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index a01b67b26b1..e594962ec44 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -2044,6 +2044,7 @@ reconnection recurse redash reddit +redis redisstreams refcounter regexpExtract From ef40c029a5617b9abfee6fb4525de9aeca2fcf73 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Mon, 12 Jun 2023 11:54:42 +0800 Subject: [PATCH 335/722] fix style --- src/Storages/StorageRedis.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/StorageRedis.cpp b/src/Storages/StorageRedis.cpp index 68c71cac508..71c84443d8e 100644 --- a/src/Storages/StorageRedis.cpp +++ b/src/Storages/StorageRedis.cpp @@ -462,7 +462,7 @@ Block StorageRedis::getSampleBlock(const Names &) const } SinkToStoragePtr StorageRedis::write( - const ASTPtr & /*query*/, + const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, ContextPtr /*context*/, bool /*async_insert*/) From 911f8ad8dc68d126cedebd3e990ea185ed3c41b1 Mon Sep 17 00:00:00 2001 From: kevinyhzou Date: Mon, 12 Jun 2023 11:57:52 +0800 Subject: [PATCH 336/722] use whitespace or tab as field delimiter --- docs/en/interfaces/formats.md | 1 + .../operations/settings/settings-formats.md | 32 +++++++++++++++++ src/Core/Settings.h | 2 +- src/Formats/FormatFactory.cpp | 2 +- src/Formats/FormatSettings.h | 2 +- .../Formats/Impl/CSVRowInputFormat.cpp | 34 +++++++++---------- ...h_whitespace_tab_field_delimiter.reference | 4 +-- ...ext_with_whitespace_tab_field_delimiter.sh | 4 +-- 8 files changed, 57 insertions(+), 24 deletions(-) diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index f19fd94dcd8..57962c1d730 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -468,6 +468,7 @@ The CSV format supports the output of totals and extremes the same way as `TabSe - [input_format_csv_skip_first_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_first_lines) - skip the specified number of lines at the beginning of data. Default value - `0`. - [input_format_csv_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_csv_detect_header) - automatically detect header with names and types in CSV format. Default value - `true`. - [input_format_csv_trim_whitespaces](/docs/en/operations/settings/settings-formats.md/#input_format_csv_trim_whitespaces) - trim spaces and tabs in non-quoted CSV strings. Default value - `true`. +- [input_format_csv_use_whitespace_tab_as_delimiter](/docs/en/operations/settings/settings-formats.md/# input_format_csv_use_whitespace_tab_as_delimiter) - use whitespace or tab as field delimiter in CSV strings. Default value - `false`. ## CSVWithNames {#csvwithnames} diff --git a/docs/en/operations/settings/settings-formats.md b/docs/en/operations/settings/settings-formats.md index 025e9f889f3..0e30c8f319e 100644 --- a/docs/en/operations/settings/settings-formats.md +++ b/docs/en/operations/settings/settings-formats.md @@ -914,6 +914,38 @@ Result " string " ``` +### input_format_csv_use_whitespace_tab_as_delimiter {#input_format_csv_use_whitespace_tab_as_delimiter} + +Use whitespace or tab as field delimiter in CSV strings. + +Default value: `false`. + +**Examples** + +Query + +```bash +echo 'a b' | ./clickhouse local -q "select * from table FORMAT CSV" --input-format="CSV" --input_format_csv_use_whitespace_tab_as_delimiter=true --format_csv_delimiter=' ' +``` + +Result + +```text +a b +``` + +Query + +```bash +echo 'a b' | ./clickhouse local -q "select * from table FORMAT CSV" --input-format="CSV" --input_format_csv_use_whitespace_tab_as_delimiter=true --format_csv_delimiter='\t' +``` + +Result + +```text +a b +``` + ## Values format settings {#values-format-settings} ### input_format_values_interpret_expressions {#input_format_values_interpret_expressions} diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 45641e76689..4306ac855a3 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -850,7 +850,7 @@ class IColumn; M(Bool, input_format_csv_use_best_effort_in_schema_inference, true, "Use some tweaks and heuristics to infer schema in CSV format", 0) \ M(Bool, input_format_tsv_use_best_effort_in_schema_inference, true, "Use some tweaks and heuristics to infer schema in TSV format", 0) \ M(Bool, input_format_csv_detect_header, true, "Automatically detect header with names and types in CSV format", 0) \ - M(Bool, input_format_csv_skip_whitespaces_tabs, true, "Skips spaces and tabs(\\t) characters in the CSV strings", 0) \ + M(Bool, input_format_csv_use_whitespace_tab_as_delimiter, false, "Use spaces and tabs(\\t) as field delimiter in the CSV strings", 0) \ M(Bool, input_format_csv_trim_whitespaces, true, "Trims spaces and tabs (\\t) characters at the beginning and end in CSV strings", 0) \ M(Bool, input_format_tsv_detect_header, true, "Automatically detect header with names and types in TSV format", 0) \ M(Bool, input_format_custom_detect_header, true, "Automatically detect header with names and types in CustomSeparated format", 0) \ diff --git a/src/Formats/FormatFactory.cpp b/src/Formats/FormatFactory.cpp index 73a7d4f73f2..33ecddfc223 100644 --- a/src/Formats/FormatFactory.cpp +++ b/src/Formats/FormatFactory.cpp @@ -70,7 +70,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings) format_settings.csv.skip_first_lines = settings.input_format_csv_skip_first_lines; format_settings.csv.try_detect_header = settings.input_format_csv_detect_header; format_settings.csv.trim_whitespaces = settings.input_format_csv_trim_whitespaces; - format_settings.csv.skip_whitespaces_tabs = settings.input_format_csv_skip_whitespaces_tabs; + format_settings.csv.use_whitespace_tab_as_delimiter = settings.input_format_csv_use_whitespace_tab_as_delimiter; format_settings.hive_text.fields_delimiter = settings.input_format_hive_text_fields_delimiter; format_settings.hive_text.collection_items_delimiter = settings.input_format_hive_text_collection_items_delimiter; format_settings.hive_text.map_keys_delimiter = settings.input_format_hive_text_map_keys_delimiter; diff --git a/src/Formats/FormatSettings.h b/src/Formats/FormatSettings.h index 434389e31a1..72d60e8423e 100644 --- a/src/Formats/FormatSettings.h +++ b/src/Formats/FormatSettings.h @@ -137,7 +137,7 @@ struct FormatSettings String custom_delimiter; bool try_detect_header = true; bool trim_whitespaces = true; - bool skip_whitespaces_tabs = true; + bool use_whitespace_tab_as_delimiter = false; } csv; struct HiveText diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp index 4094285e1ad..b8d3413f863 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp @@ -25,10 +25,10 @@ namespace ErrorCodes namespace { - void checkBadDelimiter(char delimiter, bool skip_whitespaces_tabs) + void checkBadDelimiter(char delimiter, bool use_whitespace_tab_as_delimiter) { constexpr std::string_view bad_delimiters = " \t\"'.UL"; - if (bad_delimiters.find(delimiter) != std::string_view::npos && skip_whitespaces_tabs) + if (bad_delimiters.find(delimiter) != std::string_view::npos && !use_whitespace_tab_as_delimiter) throw Exception( ErrorCodes::BAD_ARGUMENTS, "CSV format may not work correctly with delimiter '{}'. Try use CustomSeparated format instead", @@ -68,7 +68,7 @@ CSVRowInputFormat::CSVRowInputFormat( format_settings_.csv.try_detect_header), buf(std::move(in_)) { - checkBadDelimiter(format_settings_.csv.delimiter, format_settings_.csv.skip_whitespaces_tabs); + checkBadDelimiter(format_settings_.csv.delimiter, format_settings_.csv.use_whitespace_tab_as_delimiter); } CSVRowInputFormat::CSVRowInputFormat( @@ -90,7 +90,7 @@ CSVRowInputFormat::CSVRowInputFormat( format_settings_.csv.try_detect_header), buf(std::move(in_)) { - checkBadDelimiter(format_settings_.csv.delimiter, format_settings_.csv.skip_whitespaces_tabs); + checkBadDelimiter(format_settings_.csv.delimiter, format_settings_.csv.use_whitespace_tab_as_delimiter); } void CSVRowInputFormat::syncAfterError() @@ -134,9 +134,9 @@ static void skipEndOfLine(ReadBuffer & in) } /// Skip `whitespace` symbols allowed in CSV. -static inline void skipWhitespacesAndTabs(ReadBuffer & in, const bool & skip_whitespaces_tabs) +static inline void skipWhitespacesAndTabs(ReadBuffer & in, const bool & use_whitespace_tab_as_delimiter) { - if (!skip_whitespaces_tabs) + if (use_whitespace_tab_as_delimiter) { return; } @@ -150,7 +150,7 @@ CSVFormatReader::CSVFormatReader(PeekableReadBuffer & buf_, const FormatSettings void CSVFormatReader::skipFieldDelimiter() { - skipWhitespacesAndTabs(*buf, format_settings.csv.skip_whitespaces_tabs); + skipWhitespacesAndTabs(*buf, format_settings.csv.use_whitespace_tab_as_delimiter); assertChar(format_settings.csv.delimiter, *buf); } @@ -158,7 +158,7 @@ template String CSVFormatReader::readCSVFieldIntoString() { if (format_settings.csv.trim_whitespaces) [[likely]] - skipWhitespacesAndTabs(*buf, format_settings.csv.skip_whitespaces_tabs); + skipWhitespacesAndTabs(*buf, format_settings.csv.use_whitespace_tab_as_delimiter); String field; if constexpr (read_string) @@ -170,14 +170,14 @@ String CSVFormatReader::readCSVFieldIntoString() void CSVFormatReader::skipField() { - skipWhitespacesAndTabs(*buf, format_settings.csv.skip_whitespaces_tabs); + skipWhitespacesAndTabs(*buf, format_settings.csv.use_whitespace_tab_as_delimiter); NullOutput out; readCSVStringInto(out, *buf, format_settings.csv); } void CSVFormatReader::skipRowEndDelimiter() { - skipWhitespacesAndTabs(*buf, format_settings.csv.skip_whitespaces_tabs); + skipWhitespacesAndTabs(*buf, format_settings.csv.use_whitespace_tab_as_delimiter); if (buf->eof()) return; @@ -186,7 +186,7 @@ void CSVFormatReader::skipRowEndDelimiter() if (*buf->position() == format_settings.csv.delimiter) ++buf->position(); - skipWhitespacesAndTabs(*buf, format_settings.csv.skip_whitespaces_tabs); + skipWhitespacesAndTabs(*buf, format_settings.csv.use_whitespace_tab_as_delimiter); if (buf->eof()) return; @@ -198,7 +198,7 @@ void CSVFormatReader::skipHeaderRow() do { skipField(); - skipWhitespacesAndTabs(*buf, format_settings.csv.skip_whitespaces_tabs); + skipWhitespacesAndTabs(*buf, format_settings.csv.use_whitespace_tab_as_delimiter); } while (checkChar(format_settings.csv.delimiter, *buf)); skipRowEndDelimiter(); @@ -211,7 +211,7 @@ std::vector CSVFormatReader::readRowImpl() do { fields.push_back(readCSVFieldIntoString()); - skipWhitespacesAndTabs(*buf, format_settings.csv.skip_whitespaces_tabs); + skipWhitespacesAndTabs(*buf, format_settings.csv.use_whitespace_tab_as_delimiter); } while (checkChar(format_settings.csv.delimiter, *buf)); skipRowEndDelimiter(); @@ -224,7 +224,7 @@ bool CSVFormatReader::parseFieldDelimiterWithDiagnosticInfo(WriteBuffer & out) try { - skipWhitespacesAndTabs(*buf, format_settings.csv.skip_whitespaces_tabs); + skipWhitespacesAndTabs(*buf, format_settings.csv.use_whitespace_tab_as_delimiter); assertChar(delimiter, *buf); } catch (const DB::Exception &) @@ -250,7 +250,7 @@ bool CSVFormatReader::parseFieldDelimiterWithDiagnosticInfo(WriteBuffer & out) bool CSVFormatReader::parseRowEndWithDiagnosticInfo(WriteBuffer & out) { - skipWhitespacesAndTabs(*buf, format_settings.csv.skip_whitespaces_tabs); + skipWhitespacesAndTabs(*buf, format_settings.csv.use_whitespace_tab_as_delimiter); if (buf->eof()) return true; @@ -259,7 +259,7 @@ bool CSVFormatReader::parseRowEndWithDiagnosticInfo(WriteBuffer & out) if (*buf->position() == format_settings.csv.delimiter) { ++buf->position(); - skipWhitespacesAndTabs(*buf, format_settings.csv.skip_whitespaces_tabs); + skipWhitespacesAndTabs(*buf, format_settings.csv.use_whitespace_tab_as_delimiter); if (buf->eof()) return true; } @@ -287,7 +287,7 @@ bool CSVFormatReader::readField( const String & /*column_name*/) { if (format_settings.csv.trim_whitespaces || !isStringOrFixedString(removeNullable(type))) [[likely]] - skipWhitespacesAndTabs(*buf, format_settings.csv.skip_whitespaces_tabs); + skipWhitespacesAndTabs(*buf, format_settings.csv.use_whitespace_tab_as_delimiter); const bool at_delimiter = !buf->eof() && *buf->position() == format_settings.csv.delimiter; const bool at_last_column_line_end = is_last_file_column && (buf->eof() || *buf->position() == '\n' || *buf->position() == '\r'); diff --git a/tests/queries/0_stateless/02785_text_with_whitespace_tab_field_delimiter.reference b/tests/queries/0_stateless/02785_text_with_whitespace_tab_field_delimiter.reference index 531391394a7..228436130dc 100644 --- a/tests/queries/0_stateless/02785_text_with_whitespace_tab_field_delimiter.reference +++ b/tests/queries/0_stateless/02785_text_with_whitespace_tab_field_delimiter.reference @@ -1,2 +1,2 @@ -1 a b -2 c d +1 a b +2 c d diff --git a/tests/queries/0_stateless/02785_text_with_whitespace_tab_field_delimiter.sh b/tests/queries/0_stateless/02785_text_with_whitespace_tab_field_delimiter.sh index e3f61262674..deb6e317aac 100755 --- a/tests/queries/0_stateless/02785_text_with_whitespace_tab_field_delimiter.sh +++ b/tests/queries/0_stateless/02785_text_with_whitespace_tab_field_delimiter.sh @@ -10,8 +10,8 @@ $CLICKHOUSE_CLIENT -q "drop table if exists test_whitespace" $CLICKHOUSE_CLIENT -q "drop table if exists test_tab" $CLICKHOUSE_CLIENT -q "create table test_whitespace (x UInt32, y String, z String) engine=MergeTree order by x" $CLICKHOUSE_CLIENT -q "create table test_tab (x UInt32, y String, z String) engine=MergeTree order by x" -cat $CURDIR/data_csv/csv_with_space_delimiter.csv | ${CLICKHOUSE_CLIENT} -q "INSERT INTO test_whitespace SETTINGS format_csv_delimiter=' ', input_format_csv_skip_whitespaces_tabs=false FORMAT CSV" -cat $CURDIR/data_csv/csv_with_tab_delimiter.csv | ${CLICKHOUSE_CLIENT} -q "INSERT INTO test_tab SETTINGS format_csv_delimiter='\t', input_format_csv_skip_whitespaces_tabs=false FORMAT CSV" +cat $CURDIR/data_csv/csv_with_space_delimiter.csv | ${CLICKHOUSE_CLIENT} -q "INSERT INTO test_whitespace SETTINGS format_csv_delimiter=' ', input_format_csv_use_whitespace_tab_as_delimiter=true FORMAT CSV" +cat $CURDIR/data_csv/csv_with_tab_delimiter.csv | ${CLICKHOUSE_CLIENT} -q "INSERT INTO test_tab SETTINGS format_csv_delimiter='\t', input_format_csv_use_whitespace_tab_as_delimiter=true FORMAT CSV" $CLICKHOUSE_CLIENT -q "select * from test_whitespace" $CLICKHOUSE_CLIENT -q "select * from test_tab" $CLICKHOUSE_CLIENT -q "drop table test_whitespace" From 1af062a53214168345838f79cba53ecb11cbc41e Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 12 Jun 2023 08:04:55 +0000 Subject: [PATCH 337/722] Un-flake 00804_test_deflate_qpl_codec_compression --- .../0_stateless/00804_test_deflate_qpl_codec_compression.sql | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql index ff3c1812c86..8a256567e80 100644 --- a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql +++ b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql @@ -6,6 +6,10 @@ SET enable_deflate_qpl_codec = 1; +-- Suppress test failures because stderr contains warning "Initialization of hardware-assisted DeflateQpl failed, falling +-- back to software DeflateQpl coded." +SET send_logs_level = 'fatal'; + DROP TABLE IF EXISTS compression_codec; CREATE TABLE compression_codec( From c378c3fcbbb678e96f5bc11958295f4dd1b4b6ba Mon Sep 17 00:00:00 2001 From: Julian Maicher Date: Mon, 12 Jun 2023 10:29:46 +0200 Subject: [PATCH 338/722] Fix type of LDAP server params hash in cache entry In 1ed7ad57d91db198ca94085a2c56372fb813543a, we switched from (`size_t`, usually 64bit) to SipHash (128bit) and forgot to change the type of the cache entry. This broke the caching of successful LDAP authentication requests (verification cooldown). Fixes #50864 --- src/Access/ExternalAuthenticators.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Access/ExternalAuthenticators.h b/src/Access/ExternalAuthenticators.h index bf928c18d5b..7b47c9351fd 100644 --- a/src/Access/ExternalAuthenticators.h +++ b/src/Access/ExternalAuthenticators.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -42,7 +43,7 @@ public: private: struct LDAPCacheEntry { - std::size_t last_successful_params_hash = 0; + UInt128 last_successful_params_hash = 0; std::chrono::steady_clock::time_point last_successful_authentication_timestamp; LDAPClient::SearchResultsList last_successful_role_search_results; }; From 6b2c33b1e478f175bec55e41a7ca8054807bb4fa Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 12 Jun 2023 09:16:22 +0000 Subject: [PATCH 339/722] Document x86 / ARM prerequisites for Docker image --- docker/server/README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docker/server/README.md b/docker/server/README.md index e6c9ee51fa7..46d30f252b4 100644 --- a/docker/server/README.md +++ b/docker/server/README.md @@ -16,6 +16,10 @@ For more information and documentation see https://clickhouse.com/. - The tag `head` is built from the latest commit to the default branch. - Each tag has optional `-alpine` suffix to reflect that it's built on top of `alpine`. +Compatibility +- The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3. +- The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A). Most ARM CPUs after 2017 support ARMv8.2-A. A notable exception is Raspberry Pi 4 from 2019 whose CPU only supports ARMv8.0-A. + ## How to use this image ### start server instance From a7408170c8bce6be3d4849fc3614834d8f646298 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Mon, 12 Jun 2023 11:21:43 +0200 Subject: [PATCH 340/722] Use H3 --- docker/server/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/server/README.md b/docker/server/README.md index 46d30f252b4..67646a262f5 100644 --- a/docker/server/README.md +++ b/docker/server/README.md @@ -16,7 +16,8 @@ For more information and documentation see https://clickhouse.com/. - The tag `head` is built from the latest commit to the default branch. - Each tag has optional `-alpine` suffix to reflect that it's built on top of `alpine`. -Compatibility +### Compatibility + - The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3. - The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A). Most ARM CPUs after 2017 support ARMv8.2-A. A notable exception is Raspberry Pi 4 from 2019 whose CPU only supports ARMv8.0-A. From 676ba2fbde78ec9ada09a45c0453e0cd96a3ab01 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Mon, 12 Jun 2023 12:30:38 +0300 Subject: [PATCH 341/722] Update MergeTreeData.cpp --- src/Storages/MergeTree/MergeTreeData.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 8a69c8ff75c..ee06056985a 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -99,6 +99,7 @@ #include #include +#include template <> struct fmt::formatter : fmt::formatter From c85344f83b8c1568e67ef45fdcb55b0ec0c07a8b Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Mon, 12 Jun 2023 10:02:44 +0000 Subject: [PATCH 342/722] Try to fix test (becouse timezone randomization) --- .../0_stateless/01746_convert_type_with_default.reference | 2 +- tests/queries/0_stateless/01746_convert_type_with_default.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01746_convert_type_with_default.reference b/tests/queries/0_stateless/01746_convert_type_with_default.reference index 541580d67f5..e5aa42e6116 100644 --- a/tests/queries/0_stateless/01746_convert_type_with_default.reference +++ b/tests/queries/0_stateless/01746_convert_type_with_default.reference @@ -40,7 +40,7 @@ 1970-01-20 1970-01-20 2149-06-06 -1970-01-01 +1970-01-02 2023-05-30 2023-05-30 2023-05-30 14:38:20 diff --git a/tests/queries/0_stateless/01746_convert_type_with_default.sql b/tests/queries/0_stateless/01746_convert_type_with_default.sql index 2620780cfb9..e6e420ae4c0 100644 --- a/tests/queries/0_stateless/01746_convert_type_with_default.sql +++ b/tests/queries/0_stateless/01746_convert_type_with_default.sql @@ -54,7 +54,7 @@ select toDateOrDefault(cast(19 as Int256)); select toDateOrDefault(cast(19 as UInt256)); select toDateOrDefault(65535); -select toDateOrDefault(65536); +select toDateOrDefault(122400); select toDateOrDefault(19507, '2000-01-01'::Date); select toDateOrDefault(-1, '2023-05-30'::Date); From 5cec4c3161b84e32341ef723dc8cea2b38343b69 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 12 Jun 2023 11:34:40 +0000 Subject: [PATCH 343/722] Fallback to parsing big integer from String instead of exception in Parquet format --- src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp | 9 +++------ .../02786_parquet_big_integer_compatibility.reference | 1 + .../02786_parquet_big_integer_compatibility.sh | 9 +++++++++ 3 files changed, 13 insertions(+), 6 deletions(-) create mode 100644 tests/queries/0_stateless/02786_parquet_big_integer_compatibility.reference create mode 100755 tests/queries/0_stateless/02786_parquet_big_integer_compatibility.sh diff --git a/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp b/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp index 0b4700c9d4c..5a7306111a5 100644 --- a/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp +++ b/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp @@ -202,13 +202,10 @@ static ColumnWithTypeAndName readColumnWithBigNumberFromBinaryData(std::shared_p for (size_t i = 0; i != chunk_length; ++i) { + /// If at least one value size is not equal to the size if big integer, fallback to reading String column and further cast to result type. if (!chunk.IsNull(i) && chunk.value_length(i) != sizeof(ValueType)) - throw Exception( - ErrorCodes::BAD_ARGUMENTS, - "Cannot insert data into {} column from binary value, expected data with size {}, got {}", - column_type->getName(), - sizeof(ValueType), - chunk.value_length(i)); + return readColumnWithStringData(arrow_column, column_name); + total_size += chunk_length; } } diff --git a/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.reference b/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.reference new file mode 100644 index 00000000000..7764974255b --- /dev/null +++ b/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.reference @@ -0,0 +1 @@ +424242424242424242424242424242424242424242424242424242 diff --git a/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.sh b/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.sh new file mode 100755 index 00000000000..8865b2e7aab --- /dev/null +++ b/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_LOCAL -q "select toString(424242424242424242424242424242424242424242424242424242::UInt256) as x format Parquet" | $CLICKHOUSE_LOCAL --input-format=Parquet --structure='x UInt256' -q "select * from table" + From 24d70a2afd70a10a709fffe942b4e759d406f93b Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Mon, 12 Jun 2023 13:37:59 +0200 Subject: [PATCH 344/722] Fix --- src/Formats/CapnProtoSerializer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Formats/CapnProtoSerializer.cpp b/src/Formats/CapnProtoSerializer.cpp index f51f8c4b933..b306cca4f94 100644 --- a/src/Formats/CapnProtoSerializer.cpp +++ b/src/Formats/CapnProtoSerializer.cpp @@ -403,7 +403,7 @@ namespace if (it == capnp_to_ch_values.end()) throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected value {} in CapnProto enum", capnp_enum_value); - assert_cast &>(column).insertValue(capnp_to_ch_values[capnp_enum_value]); + assert_cast &>(column).insertValue(it->second); } } From 42393b51ee1747e838c71196a29eb305fca6257c Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Mon, 12 Jun 2023 13:45:00 +0200 Subject: [PATCH 345/722] Fix style --- tests/clickhouse-test | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 56cf2f0ce0f..9242ca8a0b0 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -349,10 +349,11 @@ def kill_gdb_if_any(): for i in range(5): code = subprocess.call("kill -TERM $(pidof gdb)", shell=True, timeout=30) if code != 0: - time.sleep(i) + sleep(i) else: break + # collect server stacktraces using gdb def get_stacktraces_from_gdb(server_pid): try: From 17a6512cdc80c0a14f4570c12df520deff05550c Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Mon, 12 Jun 2023 13:53:20 +0200 Subject: [PATCH 346/722] Delete bad test --- tests/queries/0_stateless/02782_bitmap_overflow.sql | 4 ---- 1 file changed, 4 deletions(-) delete mode 100644 tests/queries/0_stateless/02782_bitmap_overflow.sql diff --git a/tests/queries/0_stateless/02782_bitmap_overflow.sql b/tests/queries/0_stateless/02782_bitmap_overflow.sql deleted file mode 100644 index 71ddce5c3b9..00000000000 --- a/tests/queries/0_stateless/02782_bitmap_overflow.sql +++ /dev/null @@ -1,4 +0,0 @@ --- Tags: no-msan, no-asan - -select unhex('0181808080908380808000')::AggregateFunction(groupBitmap, UInt64); -- {serverError TOO_LARGE_ARRAY_SIZE} - From d100a2031cd51f51e7320d62f683e7bf8520083c Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Mon, 12 Jun 2023 13:53:44 +0200 Subject: [PATCH 347/722] Delete bad test --- tests/queries/0_stateless/02782_bitmap_overflow.reference | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 tests/queries/0_stateless/02782_bitmap_overflow.reference diff --git a/tests/queries/0_stateless/02782_bitmap_overflow.reference b/tests/queries/0_stateless/02782_bitmap_overflow.reference deleted file mode 100644 index e69de29bb2d..00000000000 From 1c8371e1db7c1bfbc9a98d2cf33b1450e5c3547a Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 12 Jun 2023 12:13:10 +0000 Subject: [PATCH 348/722] Update version_date.tsv and changelogs after v22.8.18.31-lts --- docs/changelogs/v22.8.18.31-lts.md | 32 ++++++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 1 + 2 files changed, 33 insertions(+) create mode 100644 docs/changelogs/v22.8.18.31-lts.md diff --git a/docs/changelogs/v22.8.18.31-lts.md b/docs/changelogs/v22.8.18.31-lts.md new file mode 100644 index 00000000000..709bb926f8a --- /dev/null +++ b/docs/changelogs/v22.8.18.31-lts.md @@ -0,0 +1,32 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v22.8.18.31-lts (4de7a95a544) FIXME as compared to v22.8.17.17-lts (df7f2ef0b41) + +#### Performance Improvement +* Backported in [#49214](https://github.com/ClickHouse/ClickHouse/issues/49214): Fixed excessive reading in queries with `FINAL`. [#47801](https://github.com/ClickHouse/ClickHouse/pull/47801) ([Nikita Taranov](https://github.com/nickitat)). + +#### Build/Testing/Packaging Improvement +* Backported in [#49079](https://github.com/ClickHouse/ClickHouse/issues/49079): Update time zones. The following were updated: Africa/Cairo, Africa/Casablanca, Africa/El_Aaiun, America/Bogota, America/Cambridge_Bay, America/Ciudad_Juarez, America/Godthab, America/Inuvik, America/Iqaluit, America/Nuuk, America/Ojinaga, America/Pangnirtung, America/Rankin_Inlet, America/Resolute, America/Whitehorse, America/Yellowknife, Asia/Gaza, Asia/Hebron, Asia/Kuala_Lumpur, Asia/Singapore, Canada/Yukon, Egypt, Europe/Kirov, Europe/Volgograd, Singapore. [#48572](https://github.com/ClickHouse/ClickHouse/pull/48572) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Fix bad cast from LowCardinality column when using short circuit function execution [#43311](https://github.com/ClickHouse/ClickHouse/pull/43311) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix msan issue in randomStringUTF8() [#49750](https://github.com/ClickHouse/ClickHouse/pull/49750) ([Robert Schulze](https://github.com/rschu1ze)). +* JIT compilation not equals NaN fix [#50056](https://github.com/ClickHouse/ClickHouse/pull/50056) ([Maksim Kita](https://github.com/kitaisreal)). +* Fix crash with `multiIf` and constant condition and nullable arguments [#50123](https://github.com/ClickHouse/ClickHouse/pull/50123) ([Anton Popov](https://github.com/CurtizJ)). +* Fixed type conversion from Date/Date32 to DateTime64 when querying with DateTime64 index [#50280](https://github.com/ClickHouse/ClickHouse/pull/50280) ([Lucas Chang](https://github.com/lucas-tubi)). +* Fix Keeper deadlock on exception when preprocessing requests. [#50387](https://github.com/ClickHouse/ClickHouse/pull/50387) ([frinkr](https://github.com/frinkr)). +* Fix Log family table return wrong rows count after truncate [#50585](https://github.com/ClickHouse/ClickHouse/pull/50585) ([flynn](https://github.com/ucasfl)). +* Do not read all the columns from right GLOBAL JOIN table. [#50721](https://github.com/ClickHouse/ClickHouse/pull/50721) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Improve test reports [#49151](https://github.com/ClickHouse/ClickHouse/pull/49151) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Update github.com/distribution/distribution [#50114](https://github.com/ClickHouse/ClickHouse/pull/50114) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Catch issues with dockerd during the build [#50700](https://github.com/ClickHouse/ClickHouse/pull/50700) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 4647bcb4af1..dce6aadbff4 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -44,6 +44,7 @@ v22.9.4.32-stable 2022-10-26 v22.9.3.18-stable 2022-09-30 v22.9.2.7-stable 2022-09-23 v22.9.1.2603-stable 2022-09-22 +v22.8.18.31-lts 2023-06-12 v22.8.17.17-lts 2023-04-22 v22.8.16.32-lts 2023-04-04 v22.8.15.23-lts 2023-03-10 From d177cfceca3af796f7e5cab3adb421212d9856f0 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 12 Jun 2023 12:17:11 +0000 Subject: [PATCH 349/722] Update version_date.tsv and changelogs after v23.3.3.52-lts --- docs/changelogs/v23.3.3.52-lts.md | 45 ++++++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 2 ++ 2 files changed, 47 insertions(+) create mode 100644 docs/changelogs/v23.3.3.52-lts.md diff --git a/docs/changelogs/v23.3.3.52-lts.md b/docs/changelogs/v23.3.3.52-lts.md new file mode 100644 index 00000000000..f845e14eb78 --- /dev/null +++ b/docs/changelogs/v23.3.3.52-lts.md @@ -0,0 +1,45 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v23.3.3.52-lts (cb963c474db) FIXME as compared to v23.3.2.37-lts (1b144bcd101) + +#### Improvement +* Backported in [#49954](https://github.com/ClickHouse/ClickHouse/issues/49954): Add support for (an unusual) case where the arguments in the `IN` operator are single-element tuples. [#49844](https://github.com/ClickHouse/ClickHouse/pull/49844) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). + +#### Build/Testing/Packaging Improvement +* Backported in [#49210](https://github.com/ClickHouse/ClickHouse/issues/49210): Fix glibc compatibility check: replace `preadv` from musl. [#49144](https://github.com/ClickHouse/ClickHouse/pull/49144) ([alesapin](https://github.com/alesapin)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Fix aggregate empty string error [#48999](https://github.com/ClickHouse/ClickHouse/pull/48999) ([LiuNeng](https://github.com/liuneng1994)). +* Fix key not found error for queries with multiple StorageJoin [#49137](https://github.com/ClickHouse/ClickHouse/pull/49137) ([vdimir](https://github.com/vdimir)). +* Fix race on Outdated parts loading [#49223](https://github.com/ClickHouse/ClickHouse/pull/49223) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix bug in DISTINCT [#49628](https://github.com/ClickHouse/ClickHouse/pull/49628) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix msan issue in randomStringUTF8() [#49750](https://github.com/ClickHouse/ClickHouse/pull/49750) ([Robert Schulze](https://github.com/rschu1ze)). +* fix `is_prefix` in OptimizeRegularExpression [#49919](https://github.com/ClickHouse/ClickHouse/pull/49919) ([Han Fei](https://github.com/hanfei1991)). +* Fix IPv6 encoding in protobuf [#49933](https://github.com/ClickHouse/ClickHouse/pull/49933) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Avoid deadlock when starting table in attach thread of `ReplicatedMergeTree` [#50026](https://github.com/ClickHouse/ClickHouse/pull/50026) ([Antonio Andelic](https://github.com/antonio2368)). +* JIT compilation not equals NaN fix [#50056](https://github.com/ClickHouse/ClickHouse/pull/50056) ([Maksim Kita](https://github.com/kitaisreal)). +* Fix crash with `multiIf` and constant condition and nullable arguments [#50123](https://github.com/ClickHouse/ClickHouse/pull/50123) ([Anton Popov](https://github.com/CurtizJ)). +* Fix reconnecting of HTTPS session when target host IP was changed [#50240](https://github.com/ClickHouse/ClickHouse/pull/50240) ([Aleksei Filatov](https://github.com/aalexfvk)). +* Fixed type conversion from Date/Date32 to DateTime64 when querying with DateTime64 index [#50280](https://github.com/ClickHouse/ClickHouse/pull/50280) ([Lucas Chang](https://github.com/lucas-tubi)). +* Fix Keeper deadlock on exception when preprocessing requests. [#50387](https://github.com/ClickHouse/ClickHouse/pull/50387) ([frinkr](https://github.com/frinkr)). +* Fix incorrect constant folding [#50536](https://github.com/ClickHouse/ClickHouse/pull/50536) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix Log family table return wrong rows count after truncate [#50585](https://github.com/ClickHouse/ClickHouse/pull/50585) ([flynn](https://github.com/ucasfl)). +* Fix bug in `uniqExact` parallel merging [#50590](https://github.com/ClickHouse/ClickHouse/pull/50590) ([Nikita Taranov](https://github.com/nickitat)). +* Do not read all the columns from right GLOBAL JOIN table. [#50721](https://github.com/ClickHouse/ClickHouse/pull/50721) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Implement status comment [#48468](https://github.com/ClickHouse/ClickHouse/pull/48468) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Update curl to 8.0.1 (for CVEs) [#48765](https://github.com/ClickHouse/ClickHouse/pull/48765) ([Boris Kuschel](https://github.com/bkuschel)). +* Improve test reports [#49151](https://github.com/ClickHouse/ClickHouse/pull/49151) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Fallback auth gh api [#49314](https://github.com/ClickHouse/ClickHouse/pull/49314) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Improve CI: status commit, auth for get_gh_api [#49388](https://github.com/ClickHouse/ClickHouse/pull/49388) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Update github.com/distribution/distribution [#50114](https://github.com/ClickHouse/ClickHouse/pull/50114) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Catch issues with dockerd during the build [#50700](https://github.com/ClickHouse/ClickHouse/pull/50700) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 4647bcb4af1..411dcd81957 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -2,6 +2,7 @@ v23.5.2.7-stable 2023-06-10 v23.5.1.3174-stable 2023-06-09 v23.4.2.11-stable 2023-05-02 v23.4.1.1943-stable 2023-04-27 +v23.3.3.52-lts 2023-06-12 v23.3.2.37-lts 2023-04-22 v23.3.1.2823-lts 2023-03-31 v23.2.7.32-stable 2023-06-09 @@ -44,6 +45,7 @@ v22.9.4.32-stable 2022-10-26 v22.9.3.18-stable 2022-09-30 v22.9.2.7-stable 2022-09-23 v22.9.1.2603-stable 2022-09-22 +v22.8.18.31-lts 2023-06-12 v22.8.17.17-lts 2023-04-22 v22.8.16.32-lts 2023-04-04 v22.8.15.23-lts 2023-03-10 From 11fbc01de5bc5840f6d91c1e9b0d10bf25387bd9 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 12 Jun 2023 12:37:47 +0000 Subject: [PATCH 350/722] Update version_date.tsv and changelogs after v23.4.3.48-stable --- docs/changelogs/v23.4.3.48-stable.md | 42 ++++++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 3 ++ 2 files changed, 45 insertions(+) create mode 100644 docs/changelogs/v23.4.3.48-stable.md diff --git a/docs/changelogs/v23.4.3.48-stable.md b/docs/changelogs/v23.4.3.48-stable.md new file mode 100644 index 00000000000..8bafd22bfbd --- /dev/null +++ b/docs/changelogs/v23.4.3.48-stable.md @@ -0,0 +1,42 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v23.4.3.48-stable (d9199f8d3cc) FIXME as compared to v23.4.2.11-stable (b6442320f9d) + +#### Backward Incompatible Change +* Backported in [#49981](https://github.com/ClickHouse/ClickHouse/issues/49981): Revert "`groupArray` returns cannot be nullable" (due to binary compatibility breakage for `groupArray`/`groupArrayLast`/`groupArraySample` over `Nullable` types, which likely will lead to `TOO_LARGE_ARRAY_SIZE` or `CANNOT_READ_ALL_DATA`). [#49971](https://github.com/ClickHouse/ClickHouse/pull/49971) ([Azat Khuzhin](https://github.com/azat)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Fix key not found error for queries with multiple StorageJoin [#49137](https://github.com/ClickHouse/ClickHouse/pull/49137) ([vdimir](https://github.com/vdimir)). +* Fix fuzz bug when subquery set is not built when reading from remote() [#49425](https://github.com/ClickHouse/ClickHouse/pull/49425) ([Alexander Gololobov](https://github.com/davenger)). +* Fix postgres database setting [#49481](https://github.com/ClickHouse/ClickHouse/pull/49481) ([Mal Curtis](https://github.com/snikch)). +* Fix AsynchronousReadIndirectBufferFromRemoteFS breaking on short seeks [#49525](https://github.com/ClickHouse/ClickHouse/pull/49525) ([Michael Kolupaev](https://github.com/al13n321)). +* Fix bug in DISTINCT [#49628](https://github.com/ClickHouse/ClickHouse/pull/49628) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix assert in SpanHolder::finish() with fibers [#49673](https://github.com/ClickHouse/ClickHouse/pull/49673) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix msan issue in randomStringUTF8() [#49750](https://github.com/ClickHouse/ClickHouse/pull/49750) ([Robert Schulze](https://github.com/rschu1ze)). +* fix `is_prefix` in OptimizeRegularExpression [#49919](https://github.com/ClickHouse/ClickHouse/pull/49919) ([Han Fei](https://github.com/hanfei1991)). +* Fix IPv6 encoding in protobuf [#49933](https://github.com/ClickHouse/ClickHouse/pull/49933) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Avoid deadlock when starting table in attach thread of `ReplicatedMergeTree` [#50026](https://github.com/ClickHouse/ClickHouse/pull/50026) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix assert in SpanHolder::finish() with fibers attempt 2 [#50034](https://github.com/ClickHouse/ClickHouse/pull/50034) ([Kruglov Pavel](https://github.com/Avogar)). +* JIT compilation not equals NaN fix [#50056](https://github.com/ClickHouse/ClickHouse/pull/50056) ([Maksim Kita](https://github.com/kitaisreal)). +* Fix crashing in case of Replicated database without arguments [#50058](https://github.com/ClickHouse/ClickHouse/pull/50058) ([Azat Khuzhin](https://github.com/azat)). +* Fix crash with `multiIf` and constant condition and nullable arguments [#50123](https://github.com/ClickHouse/ClickHouse/pull/50123) ([Anton Popov](https://github.com/CurtizJ)). +* Fix iceberg metadata parsing [#50232](https://github.com/ClickHouse/ClickHouse/pull/50232) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix bugs in Poco sockets in non-blocking mode, use true non-blocking sockets [#50252](https://github.com/ClickHouse/ClickHouse/pull/50252) ([Kruglov Pavel](https://github.com/Avogar)). +* Fixed type conversion from Date/Date32 to DateTime64 when querying with DateTime64 index [#50280](https://github.com/ClickHouse/ClickHouse/pull/50280) ([Lucas Chang](https://github.com/lucas-tubi)). +* Fix Keeper deadlock on exception when preprocessing requests. [#50387](https://github.com/ClickHouse/ClickHouse/pull/50387) ([frinkr](https://github.com/frinkr)). +* Fix Log family table return wrong rows count after truncate [#50585](https://github.com/ClickHouse/ClickHouse/pull/50585) ([flynn](https://github.com/ucasfl)). +* Fix bug in `uniqExact` parallel merging [#50590](https://github.com/ClickHouse/ClickHouse/pull/50590) ([Nikita Taranov](https://github.com/nickitat)). +* Do not read all the columns from right GLOBAL JOIN table. [#50721](https://github.com/ClickHouse/ClickHouse/pull/50721) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Improve CI: status commit, auth for get_gh_api [#49388](https://github.com/ClickHouse/ClickHouse/pull/49388) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Update github.com/distribution/distribution [#50114](https://github.com/ClickHouse/ClickHouse/pull/50114) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Catch issues with dockerd during the build [#50700](https://github.com/ClickHouse/ClickHouse/pull/50700) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 4647bcb4af1..6afce99612f 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,7 +1,9 @@ v23.5.2.7-stable 2023-06-10 v23.5.1.3174-stable 2023-06-09 +v23.4.3.48-stable 2023-06-12 v23.4.2.11-stable 2023-05-02 v23.4.1.1943-stable 2023-04-27 +v23.3.3.52-lts 2023-06-12 v23.3.2.37-lts 2023-04-22 v23.3.1.2823-lts 2023-03-31 v23.2.7.32-stable 2023-06-09 @@ -44,6 +46,7 @@ v22.9.4.32-stable 2022-10-26 v22.9.3.18-stable 2022-09-30 v22.9.2.7-stable 2022-09-23 v22.9.1.2603-stable 2022-09-22 +v22.8.18.31-lts 2023-06-12 v22.8.17.17-lts 2023-04-22 v22.8.16.32-lts 2023-04-04 v22.8.15.23-lts 2023-03-10 From 5db3b393d825e5597f204b9ff2ca67abf89e4045 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Mon, 12 Jun 2023 16:22:33 +0300 Subject: [PATCH 351/722] Update MergeTreeData.cpp --- src/Storages/MergeTree/MergeTreeData.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index ee06056985a..5833955726f 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1253,7 +1253,7 @@ MergeTreeData::LoadPartResult MergeTreeData::loadDataPart( mark_broken(); return res; } - catch (const Poco::NetException &) + catch (const Poco::Net::NetException &) { throw; } From b5b8c7086b43fbf3de9293196bfb7097e3888b58 Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Mon, 12 Jun 2023 13:43:53 +0000 Subject: [PATCH 352/722] Update broken tests list --- tests/broken_tests.txt | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/tests/broken_tests.txt b/tests/broken_tests.txt index b1fa18c44dd..d49b4f391e5 100644 --- a/tests/broken_tests.txt +++ b/tests/broken_tests.txt @@ -11,7 +11,6 @@ 00927_asof_joins 00940_order_by_read_in_order_query_plan 00945_bloom_filter_index -00952_input_function 00979_set_index_not 00981_in_subquery_with_tuple 01049_join_low_card_bug_long @@ -21,14 +20,12 @@ 01072_optimize_skip_unused_shards_const_expr_eval 01083_expressions_in_engine_arguments 01086_odbc_roundtrip -01152_cross_replication 01155_rename_move_materialized_view 01173_transaction_control_queries 01211_optimize_skip_unused_shards_type_mismatch 01213_optimize_skip_unused_shards_DISTINCT 01214_test_storage_merge_aliases_with_where 01231_distributed_aggregation_memory_efficient_mix_levels -01232_extremes 01244_optimize_distributed_group_by_sharding_key 01247_optimize_distributed_group_by_sharding_key_dist_on_dist 01268_mv_scalars @@ -50,7 +47,6 @@ 01585_use_index_for_global_in 01585_use_index_for_global_in_with_null 01586_columns_pruning -01615_random_one_shard_insertion 01624_soft_constraints 01651_bugs_from_15889 01655_plan_optimizations @@ -79,7 +75,6 @@ 01952_optimize_distributed_group_by_sharding_key 02000_join_on_const 02001_shard_num_shard_count -02024_join_on_or_long 02131_used_row_policies_in_query_log 02139_MV_with_scalar_subquery 02174_cte_scalar_cache_mv @@ -88,14 +83,11 @@ 02302_s3_file_pruning 02317_distinct_in_order_optimization_explain 02341_global_join_cte -02343_aggregation_pipeline 02345_implicit_transaction -02346_additional_filters_distr 02352_grouby_shadows_arg 02354_annoy 02366_union_decimal_conversion 02375_rocksdb_with_filters -02377_optimize_sorting_by_input_stream_properties_explain 02382_join_and_filtering_set 02402_merge_engine_with_view 02404_memory_bound_merging @@ -112,7 +104,6 @@ 02575_merge_prewhere_different_default_kind 02713_array_low_cardinality_string 02707_skip_index_with_in -02324_map_combinator_bug 02241_join_rocksdb_bs 02003_WithMergeableStateAfterAggregationAndLimit_LIMIT_BY_LIMIT_OFFSET 01115_join_with_dictionary @@ -120,7 +111,6 @@ 00917_multiple_joins_denny_crane 00725_join_on_bug_1 00636_partition_key_parts_pruning -00261_storage_aliases_and_array_join 01825_type_json_multiple_files 01281_group_by_limit_memory_tracking 02723_zookeeper_name From 01c7d2fe719f9b9ed59fce58d5e9dec44167e42f Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Mon, 12 Jun 2023 16:53:26 +0300 Subject: [PATCH 353/722] Prostpone check of outdated parts (#50676) * prostpone check of outdated parts * Update ReplicatedMergeTreePartCheckThread.cpp --- .../ReplicatedMergeTreePartCheckThread.cpp | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp index 0882ff5a0bc..7bb8d9d758e 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp @@ -344,6 +344,22 @@ CheckResult ReplicatedMergeTreePartCheckThread::checkPart(const String & part_na LOG_TRACE(log, "Part {} in zookeeper: {}, locally: {}", part_name, exists_in_zookeeper, part != nullptr); + if (exists_in_zookeeper && !part) + { + auto outdated = storage.getPartIfExists(part_name, {MergeTreeDataPartState::Outdated, MergeTreeDataPartState::Deleting}); + if (outdated) + { + /// We cannot rely on exists_in_zookeeper, because the cleanup thread is probably going to remove it from ZooKeeper + /// Also, it will avoid "Cannot commit empty part: Part ... (state Outdated) already exists, but it will be deleted soon" + LOG_WARNING(log, "Part {} is Outdated, will wait for cleanup thread to handle it and check again later", part_name); + time_t lifetime = time(nullptr) - outdated->remove_time; + time_t max_lifetime = storage.getSettings()->old_parts_lifetime.totalSeconds(); + time_t delay = lifetime >= max_lifetime ? 0 : max_lifetime - lifetime; + enqueuePart(part_name, delay + 30); + return {part_name, true, "Part is Outdated, will recheck later"}; + } + } + /// We do not have this or a covering part. if (!part) { From d45f07743c3f27276740b3bac7200f7cad90292e Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Mon, 12 Jun 2023 13:54:07 +0000 Subject: [PATCH 354/722] fix getting number of mutations --- src/Storages/StorageMergeTree.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index a2a46229660..233b37c74a9 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -1358,7 +1358,7 @@ size_t StorageMergeTree::getNumberOfUnfinishedMutations() const size_t count = 0; for (const auto & [version, _] : current_mutations_by_version | std::views::reverse) { - auto status = getIncompleteMutationsStatusUnlocked(version, lock); + auto status = getIncompleteMutationsStatusUnlocked(version, lock, nullptr, true); if (!status) continue; From 26c9bda14410e6f589c843a664635db9ab02b15e Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Mon, 12 Jun 2023 13:51:46 +0000 Subject: [PATCH 355/722] Add a comment --- src/Interpreters/InterpreterSelectQueryAnalyzer.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Interpreters/InterpreterSelectQueryAnalyzer.cpp b/src/Interpreters/InterpreterSelectQueryAnalyzer.cpp index 4f2f05dc7eb..8db1d27c073 100644 --- a/src/Interpreters/InterpreterSelectQueryAnalyzer.cpp +++ b/src/Interpreters/InterpreterSelectQueryAnalyzer.cpp @@ -135,6 +135,8 @@ QueryTreeNodePtr buildQueryTreeAndRunPasses(const ASTPtr & query, QueryTreePassManager query_tree_pass_manager(context); addQueryTreePasses(query_tree_pass_manager); + /// We should not apply any query tree level optimizations on shards + /// because it can lead to a changed header. if (select_query_options.ignore_ast_optimizations || context->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY) query_tree_pass_manager.run(query_tree, 1 /*up_to_pass_index*/); From 41ece306cf02519e97697d1a65bd3003d8cbe898 Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Mon, 12 Jun 2023 13:53:46 +0000 Subject: [PATCH 356/722] Update broken_tests.txt --- tests/broken_tests.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/broken_tests.txt b/tests/broken_tests.txt index b1fa18c44dd..3b888223b78 100644 --- a/tests/broken_tests.txt +++ b/tests/broken_tests.txt @@ -106,7 +106,6 @@ 02458_use_structure_from_insertion_table 02479_race_condition_between_insert_and_droppin_mv 02493_inconsistent_hex_and_binary_number -02494_optimize_group_by_function_keys_and_alias_columns 02521_aggregation_by_partitions 02554_fix_grouping_sets_predicate_push_down 02575_merge_prewhere_different_default_kind From 07eb7b7d664778342e0b44049041c160ac868d94 Mon Sep 17 00:00:00 2001 From: Denny Crane Date: Mon, 12 Jun 2023 11:03:50 -0300 Subject: [PATCH 357/722] Update settings.md --- docs/en/operations/settings/settings.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 2c9679c940d..8104478deff 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -1957,6 +1957,10 @@ Default value: empty string (disabled) For the replicated tables by default the only 100 of the most recent inserts for each partition are deduplicated (see [replicated_deduplication_window](merge-tree-settings.md/#replicated-deduplication-window), [replicated_deduplication_window_seconds](merge-tree-settings.md/#replicated-deduplication-window-seconds)). For not replicated tables see [non_replicated_deduplication_window](merge-tree-settings.md/#non-replicated-deduplication-window). +:::note +`insert_deduplication_token` works on a partition level (the same as `insert_deduplication` checksum). Multiple partitions can have the same `insert_deduplication_token`. +::: + Example: ```sql From 252a10c670977c93b8808d8b98a8679714d6e9a3 Mon Sep 17 00:00:00 2001 From: tpanetti Date: Mon, 12 Jun 2023 08:19:06 -0700 Subject: [PATCH 358/722] Add "no-parallel" tag to MySQL Compatible Types test to fix test issue --- .../0_stateless/02775_show_columns_mysql_compatibility.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh b/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh index 51c9da2a842..e324926e2e7 100755 --- a/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh +++ b/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-fasttest +# Tags: no-fasttest, no-parallel CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From 5aa05667677669a17d6356fd884c7da35478d280 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Mon, 12 Jun 2023 17:24:34 +0200 Subject: [PATCH 359/722] Fix checking the lock file too often while writing a backup. --- src/Backups/BackupImpl.cpp | 6 ++---- src/Backups/BackupImpl.h | 1 + 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/Backups/BackupImpl.cpp b/src/Backups/BackupImpl.cpp index 306236534b6..82793f44739 100644 --- a/src/Backups/BackupImpl.cpp +++ b/src/Backups/BackupImpl.cpp @@ -144,6 +144,7 @@ void BackupImpl::open(const ContextPtr & context) if (!uuid) uuid = UUIDHelpers::generateV4(); lock_file_name = use_archive ? (archive_params.archive_name + ".lock") : ".lock"; + lock_file_before_first_file_checked = false; writing_finalized = false; /// Check that we can write a backup there and create the lock file to own this destination. @@ -833,13 +834,10 @@ void BackupImpl::writeFile(const BackupFileInfo & info, BackupEntryPtr entry) if (writing_finalized) throw Exception(ErrorCodes::LOGICAL_ERROR, "Backup is already finalized"); - bool should_check_lock_file = false; { std::lock_guard lock{mutex}; ++num_files; total_size += info.size; - if (!num_entries) - should_check_lock_file = true; } auto src_disk = entry->getDisk(); @@ -859,7 +857,7 @@ void BackupImpl::writeFile(const BackupFileInfo & info, BackupEntryPtr entry) return; } - if (!should_check_lock_file) + if (!lock_file_before_first_file_checked.exchange(true)) checkLockFile(true); /// NOTE: `mutex` must be unlocked during copying otherwise writing will be in one thread maximum and hence slow. diff --git a/src/Backups/BackupImpl.h b/src/Backups/BackupImpl.h index 7e95d156162..3ab11228892 100644 --- a/src/Backups/BackupImpl.h +++ b/src/Backups/BackupImpl.h @@ -141,6 +141,7 @@ private: std::shared_ptr archive_reader; std::shared_ptr archive_writer; String lock_file_name; + std::atomic lock_file_before_first_file_checked = false; bool writing_finalized = false; bool deduplicate_files = true; From 65d83e45cb177cc3abfec088e31da44fca357c95 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 12 Jun 2023 16:21:28 +0000 Subject: [PATCH 360/722] Fix crash in snowflakeToDateTime(), follow-up to #50834 --- src/Functions/FunctionSnowflake.h | 18 +++++++++++++----- .../01942_snowflakeToDateTime.reference | 1 + .../0_stateless/01942_snowflakeToDateTime.sql | 4 +++- 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/src/Functions/FunctionSnowflake.h b/src/Functions/FunctionSnowflake.h index ce3a48269b4..0a47534c47d 100644 --- a/src/Functions/FunctionSnowflake.h +++ b/src/Functions/FunctionSnowflake.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -110,12 +111,19 @@ public: auto res_column = ColumnUInt32::create(input_rows_count); auto & result_data = res_column->getData(); - const auto & source_data = typeid_cast(col).getData(); - - for (size_t i = 0; i < input_rows_count; ++i) + if (const auto * src_non_const = typeid_cast(&col)) { - result_data[i] = static_cast( - ((source_data[i] >> time_shift) + snowflake_epoch) / 1000); + const auto & source_data = src_non_const->getData(); + for (size_t i = 0; i < input_rows_count; ++i) + result_data[i] = static_cast( + ((source_data[i] >> time_shift) + snowflake_epoch) / 1000); + } + else if (const auto * src_const = typeid_cast(&col)) + { + Int64 src_val = src_const->getValue(); + for (size_t i = 0; i < input_rows_count; ++i) + result_data[i] = static_cast( + ((src_val >> time_shift) + snowflake_epoch) / 1000); } return res_column; } diff --git a/tests/queries/0_stateless/01942_snowflakeToDateTime.reference b/tests/queries/0_stateless/01942_snowflakeToDateTime.reference index bed18023f6a..fa00a22bc63 100644 --- a/tests/queries/0_stateless/01942_snowflakeToDateTime.reference +++ b/tests/queries/0_stateless/01942_snowflakeToDateTime.reference @@ -1,3 +1,4 @@ const column UTC 1426860704886947840 2021-08-15 10:57:56 DateTime(\'UTC\') 2021-08-15 10:57:56.492 DateTime64(3, \'UTC\') Asia/Shanghai 1426860704886947840 2021-08-15 18:57:56 DateTime(\'Asia/Shanghai\') 2021-08-15 18:57:56.492 DateTime64(3, \'Asia/Shanghai\') +Asia/Singapore 2010-11-04 01:42:54 diff --git a/tests/queries/0_stateless/01942_snowflakeToDateTime.sql b/tests/queries/0_stateless/01942_snowflakeToDateTime.sql index f6f171afabf..3efccdddb2d 100644 --- a/tests/queries/0_stateless/01942_snowflakeToDateTime.sql +++ b/tests/queries/0_stateless/01942_snowflakeToDateTime.sql @@ -29,4 +29,6 @@ SELECT snowflakeToDateTime(i64, tz) as dt, toTypeName(dt), snowflakeToDateTime64(i64, tz) as dt64, - toTypeName(dt64); \ No newline at end of file + toTypeName(dt64); + +SELECT materialize('Asia/Singapore') a, snowflakeToDateTime(649::Int64, a) settings allow_nonconst_timezone_arguments = 1 From 326a3a3e8d719aebdc9ef9ee79f8b5fc8645183e Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Mon, 12 Jun 2023 16:46:10 +0000 Subject: [PATCH 361/722] Use query tree to rewrite the query --- src/Storages/StorageDistributed.cpp | 335 +--------------- src/Storages/StorageReplicatedMergeTree.cpp | 22 +- src/Storages/buildQueryTreeForShard.cpp | 372 ++++++++++++++++++ src/Storages/buildQueryTreeForShard.h | 15 + ...02771_parallel_replicas_analyzer.reference | 2 +- 5 files changed, 406 insertions(+), 340 deletions(-) create mode 100644 src/Storages/buildQueryTreeForShard.cpp create mode 100644 src/Storages/buildQueryTreeForShard.h diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index b91ad0b963a..1ec45ce3d57 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -81,6 +81,7 @@ #include #include +#include #include #include @@ -650,264 +651,6 @@ StorageSnapshotPtr StorageDistributed::getStorageSnapshotForQuery( namespace { -/// Visitor that collect column source to columns mapping from query and all subqueries -class CollectColumnSourceToColumnsVisitor : public InDepthQueryTreeVisitor -{ -public: - struct Columns - { - NameSet column_names; - NamesAndTypes columns; - - void addColumn(NameAndTypePair column) - { - if (column_names.contains(column.name)) - return; - - column_names.insert(column.name); - columns.push_back(std::move(column)); - } - }; - - const std::unordered_map & getColumnSourceToColumns() const - { - return column_source_to_columns; - } - - void visitImpl(QueryTreeNodePtr & node) - { - auto * column_node = node->as(); - if (!column_node) - return; - - auto column_source = column_node->getColumnSourceOrNull(); - if (!column_source) - return; - - auto it = column_source_to_columns.find(column_source); - if (it == column_source_to_columns.end()) - { - auto [insert_it, _] = column_source_to_columns.emplace(column_source, Columns()); - it = insert_it; - } - - it->second.addColumn(column_node->getColumn()); - } - -private: - std::unordered_map column_source_to_columns; -}; - -/** Visitor that rewrites IN and JOINs in query and all subqueries according to distributed_product_mode and - * prefer_global_in_and_join settings. - * - * Additionally collects GLOBAL JOIN and GLOBAL IN query nodes. - * - * If distributed_product_mode = deny, then visitor throws exception if there are multiple distributed tables. - * If distributed_product_mode = local, then visitor collects replacement map for tables that must be replaced - * with local tables. - * If distributed_product_mode = global or prefer_global_in_and_join setting is true, then visitor rewrites JOINs and IN functions that - * contain distributed tables to GLOBAL JOINs and GLOBAL IN functions. - * If distributed_product_mode = allow, then visitor does not rewrite query if there are multiple distributed tables. - */ -class DistributedProductModeRewriteInJoinVisitor : public InDepthQueryTreeVisitorWithContext -{ -public: - using Base = InDepthQueryTreeVisitorWithContext; - using Base::Base; - - explicit DistributedProductModeRewriteInJoinVisitor(const ContextPtr & context_) - : Base(context_) - {} - - struct InFunctionOrJoin - { - QueryTreeNodePtr query_node; - size_t subquery_depth = 0; - }; - - const std::unordered_map & getReplacementMap() const - { - return replacement_map; - } - - const std::vector & getGlobalInOrJoinNodes() const - { - return global_in_or_join_nodes; - } - - static bool needChildVisit(QueryTreeNodePtr & parent, QueryTreeNodePtr & child) - { - auto * function_node = parent->as(); - if (function_node && isNameOfGlobalInFunction(function_node->getFunctionName())) - return false; - - auto * join_node = parent->as(); - if (join_node && join_node->getLocality() == JoinLocality::Global && join_node->getRightTableExpression() == child) - return false; - - return true; - } - - void visitImpl(QueryTreeNodePtr & node) - { - auto * function_node = node->as(); - auto * join_node = node->as(); - - if ((function_node && isNameOfGlobalInFunction(function_node->getFunctionName())) || - (join_node && join_node->getLocality() == JoinLocality::Global)) - { - InFunctionOrJoin in_function_or_join_entry; - in_function_or_join_entry.query_node = node; - in_function_or_join_entry.subquery_depth = getSubqueryDepth(); - global_in_or_join_nodes.push_back(std::move(in_function_or_join_entry)); - return; - } - - if ((function_node && isNameOfLocalInFunction(function_node->getFunctionName())) || - (join_node && join_node->getLocality() != JoinLocality::Global)) - { - InFunctionOrJoin in_function_or_join_entry; - in_function_or_join_entry.query_node = node; - in_function_or_join_entry.subquery_depth = getSubqueryDepth(); - in_function_or_join_stack.push_back(in_function_or_join_entry); - return; - } - - if (node->getNodeType() == QueryTreeNodeType::TABLE) - tryRewriteTableNodeIfNeeded(node); - } - - void leaveImpl(QueryTreeNodePtr & node) - { - if (!in_function_or_join_stack.empty() && node.get() == in_function_or_join_stack.back().query_node.get()) - in_function_or_join_stack.pop_back(); - } - -private: - void tryRewriteTableNodeIfNeeded(const QueryTreeNodePtr & table_node) - { - const auto & table_node_typed = table_node->as(); - const auto * distributed_storage = typeid_cast(table_node_typed.getStorage().get()); - if (!distributed_storage) - return; - - bool distributed_valid_for_rewrite = distributed_storage->getShardCount() >= 2; - if (!distributed_valid_for_rewrite) - return; - - auto distributed_product_mode = getSettings().distributed_product_mode; - - if (distributed_product_mode == DistributedProductMode::LOCAL) - { - StorageID remote_storage_id = StorageID{distributed_storage->getRemoteDatabaseName(), - distributed_storage->getRemoteTableName()}; - auto resolved_remote_storage_id = getContext()->resolveStorageID(remote_storage_id); - const auto & distributed_storage_columns = table_node_typed.getStorageSnapshot()->metadata->getColumns(); - auto storage = std::make_shared(resolved_remote_storage_id, distributed_storage_columns); - auto replacement_table_expression = std::make_shared(std::move(storage), getContext()); - replacement_map.emplace(table_node.get(), std::move(replacement_table_expression)); - } - else if ((distributed_product_mode == DistributedProductMode::GLOBAL || getSettings().prefer_global_in_and_join) && - !in_function_or_join_stack.empty()) - { - auto * in_or_join_node_to_modify = in_function_or_join_stack.back().query_node.get(); - - if (auto * in_function_to_modify = in_or_join_node_to_modify->as()) - { - auto global_in_function_name = getGlobalInFunctionNameForLocalInFunctionName(in_function_to_modify->getFunctionName()); - auto global_in_function_resolver = FunctionFactory::instance().get(global_in_function_name, getContext()); - in_function_to_modify->resolveAsFunction(global_in_function_resolver->build(in_function_to_modify->getArgumentColumns())); - } - else if (auto * join_node_to_modify = in_or_join_node_to_modify->as()) - { - join_node_to_modify->setLocality(JoinLocality::Global); - } - - global_in_or_join_nodes.push_back(in_function_or_join_stack.back()); - } - else if (distributed_product_mode == DistributedProductMode::ALLOW) - { - return; - } - else if (distributed_product_mode == DistributedProductMode::DENY) - { - throw Exception(ErrorCodes::DISTRIBUTED_IN_JOIN_SUBQUERY_DENIED, - "Double-distributed IN/JOIN subqueries is denied (distributed_product_mode = 'deny'). " - "You may rewrite query to use local tables " - "in subqueries, or use GLOBAL keyword, or set distributed_product_mode to suitable value."); - } - } - - std::vector in_function_or_join_stack; - std::unordered_map replacement_map; - std::vector global_in_or_join_nodes; -}; - -/** Execute subquery node and put result in mutable context temporary table. - * Returns table node that is initialized with temporary table storage. - */ -TableNodePtr executeSubqueryNode(const QueryTreeNodePtr & subquery_node, - ContextMutablePtr & mutable_context, - size_t subquery_depth) -{ - auto subquery_hash = subquery_node->getTreeHash(); - String temporary_table_name = fmt::format("_data_{}_{}", subquery_hash.first, subquery_hash.second); - - const auto & external_tables = mutable_context->getExternalTables(); - auto external_table_it = external_tables.find(temporary_table_name); - if (external_table_it != external_tables.end()) - { - auto temporary_table_expression_node = std::make_shared(external_table_it->second, mutable_context); - temporary_table_expression_node->setTemporaryTableName(temporary_table_name); - return temporary_table_expression_node; - } - - auto subquery_options = SelectQueryOptions(QueryProcessingStage::Complete, subquery_depth, true /*is_subquery*/); - auto context_copy = Context::createCopy(mutable_context); - updateContextForSubqueryExecution(context_copy); - - InterpreterSelectQueryAnalyzer interpreter(subquery_node, context_copy, subquery_options); - auto & query_plan = interpreter.getQueryPlan(); - - auto sample_block_with_unique_names = query_plan.getCurrentDataStream().header; - makeUniqueColumnNamesInBlock(sample_block_with_unique_names); - - if (!blocksHaveEqualStructure(sample_block_with_unique_names, query_plan.getCurrentDataStream().header)) - { - auto actions_dag = ActionsDAG::makeConvertingActions( - query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName(), - sample_block_with_unique_names.getColumnsWithTypeAndName(), - ActionsDAG::MatchColumnsMode::Position); - auto converting_step = std::make_unique(query_plan.getCurrentDataStream(), std::move(actions_dag)); - query_plan.addStep(std::move(converting_step)); - } - - Block sample = interpreter.getSampleBlock(); - NamesAndTypesList columns = sample.getNamesAndTypesList(); - - auto external_storage_holder = TemporaryTableHolder( - mutable_context, - ColumnsDescription{columns}, - ConstraintsDescription{}, - nullptr /*query*/, - true /*create_for_global_subquery*/); - - StoragePtr external_storage = external_storage_holder.getTable(); - auto temporary_table_expression_node = std::make_shared(external_storage, mutable_context); - temporary_table_expression_node->setTemporaryTableName(temporary_table_name); - - auto table_out = external_storage->write({}, external_storage->getInMemoryMetadataPtr(), mutable_context, /*async_insert=*/false); - auto io = interpreter.execute(); - io.pipeline.complete(std::move(table_out)); - CompletedPipelineExecutor executor(io.pipeline); - executor.execute(); - - mutable_context->addExternalTable(temporary_table_name, std::move(external_storage_holder)); - - return temporary_table_expression_node; -} - QueryTreeNodePtr buildQueryTreeDistributed(SelectQueryInfo & query_info, const StorageSnapshotPtr & distributed_storage_snapshot, const StorageID & remote_storage_id, @@ -963,81 +706,7 @@ QueryTreeNodePtr buildQueryTreeDistributed(SelectQueryInfo & query_info, auto query_tree_to_modify = query_info.query_tree->cloneAndReplace(query_info.table_expression, std::move(replacement_table_expression)); - CollectColumnSourceToColumnsVisitor collect_column_source_to_columns_visitor; - collect_column_source_to_columns_visitor.visit(query_tree_to_modify); - - const auto & column_source_to_columns = collect_column_source_to_columns_visitor.getColumnSourceToColumns(); - - DistributedProductModeRewriteInJoinVisitor visitor(query_info.planner_context->getQueryContext()); - visitor.visit(query_tree_to_modify); - - auto replacement_map = visitor.getReplacementMap(); - const auto & global_in_or_join_nodes = visitor.getGlobalInOrJoinNodes(); - - for (const auto & global_in_or_join_node : global_in_or_join_nodes) - { - if (auto * join_node = global_in_or_join_node.query_node->as()) - { - auto join_right_table_expression = join_node->getRightTableExpression(); - auto join_right_table_expression_node_type = join_right_table_expression->getNodeType(); - - QueryTreeNodePtr subquery_node; - - if (join_right_table_expression_node_type == QueryTreeNodeType::QUERY || - join_right_table_expression_node_type == QueryTreeNodeType::UNION) - { - subquery_node = join_right_table_expression; - } - else if (join_right_table_expression_node_type == QueryTreeNodeType::TABLE || - join_right_table_expression_node_type == QueryTreeNodeType::TABLE_FUNCTION) - { - const auto & columns = column_source_to_columns.at(join_right_table_expression).columns; - subquery_node = buildSubqueryToReadColumnsFromTableExpression(columns, - join_right_table_expression, - planner_context->getQueryContext()); - } - else - { - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Expected JOIN right table expression to be table, table function, query or union node. Actual {}", - join_right_table_expression->formatASTForErrorMessage()); - } - - auto temporary_table_expression_node = executeSubqueryNode(subquery_node, - planner_context->getMutableQueryContext(), - global_in_or_join_node.subquery_depth); - temporary_table_expression_node->setAlias(join_right_table_expression->getAlias()); - - replacement_map.emplace(join_right_table_expression.get(), std::move(temporary_table_expression_node)); - continue; - } - else if (auto * in_function_node = global_in_or_join_node.query_node->as()) - { - auto & in_function_subquery_node = in_function_node->getArguments().getNodes().at(1); - auto in_function_node_type = in_function_subquery_node->getNodeType(); - if (in_function_node_type != QueryTreeNodeType::QUERY && in_function_node_type != QueryTreeNodeType::UNION) - continue; - - auto temporary_table_expression_node = executeSubqueryNode(in_function_subquery_node, - planner_context->getMutableQueryContext(), - global_in_or_join_node.subquery_depth); - - in_function_subquery_node = std::move(temporary_table_expression_node); - } - else - { - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Expected global IN or JOIN query node. Actual {}", - global_in_or_join_node.query_node->formatASTForErrorMessage()); - } - } - - if (!replacement_map.empty()) - query_tree_to_modify = query_tree_to_modify->cloneAndReplace(replacement_map); - - removeGroupingFunctionSpecializations(query_tree_to_modify); - - return query_tree_to_modify; + return buildQueryTreeForShard(query_info, query_tree_to_modify); } } diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 61d1442df92..fafb3b124f2 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -21,6 +21,7 @@ #include +#include #include #include #include @@ -74,6 +75,8 @@ #include #include +#include + #include #include #include @@ -4734,20 +4737,27 @@ void StorageReplicatedMergeTree::read( { auto table_id = getStorageID(); - const auto & modified_query_ast = ClusterProxy::rewriteSelectQuery( - local_context, query_info.query, - table_id.database_name, table_id.table_name, /*remote_table_function_ptr*/nullptr); - - auto cluster = local_context->getCluster(local_context->getSettingsRef().cluster_for_parallel_replicas); + ASTPtr modified_query_ast; Block header; if (local_context->getSettingsRef().allow_experimental_analyzer) + { + auto modified_query_tree = buildQueryTreeForShard(query_info, query_info.query_tree); + header = InterpreterSelectQueryAnalyzer::getSampleBlock( - modified_query_ast, local_context, SelectQueryOptions(processed_stage).analyze()); + modified_query_tree, local_context, SelectQueryOptions(processed_stage).analyze()); + modified_query_ast = queryNodeToSelectQuery(modified_query_tree); + } else + { header = InterpreterSelectQuery(modified_query_ast, local_context, SelectQueryOptions(processed_stage).analyze()).getSampleBlock(); + modified_query_ast = ClusterProxy::rewriteSelectQuery(local_context, query_info.query, + table_id.database_name, table_id.table_name, /*remote_table_function_ptr*/nullptr); + } + + auto cluster = local_context->getCluster(local_context->getSettingsRef().cluster_for_parallel_replicas); ClusterProxy::SelectStreamFactory select_stream_factory = ClusterProxy::SelectStreamFactory( diff --git a/src/Storages/buildQueryTreeForShard.cpp b/src/Storages/buildQueryTreeForShard.cpp new file mode 100644 index 00000000000..a42d67d9aa7 --- /dev/null +++ b/src/Storages/buildQueryTreeForShard.cpp @@ -0,0 +1,372 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int DISTRIBUTED_IN_JOIN_SUBQUERY_DENIED; +} + +namespace +{ + +/// Visitor that collect column source to columns mapping from query and all subqueries +class CollectColumnSourceToColumnsVisitor : public InDepthQueryTreeVisitor +{ +public: + struct Columns + { + NameSet column_names; + NamesAndTypes columns; + + void addColumn(NameAndTypePair column) + { + if (column_names.contains(column.name)) + return; + + column_names.insert(column.name); + columns.push_back(std::move(column)); + } + }; + + const std::unordered_map & getColumnSourceToColumns() const + { + return column_source_to_columns; + } + + void visitImpl(QueryTreeNodePtr & node) + { + auto * column_node = node->as(); + if (!column_node) + return; + + auto column_source = column_node->getColumnSourceOrNull(); + if (!column_source) + return; + + auto it = column_source_to_columns.find(column_source); + if (it == column_source_to_columns.end()) + { + auto [insert_it, _] = column_source_to_columns.emplace(column_source, Columns()); + it = insert_it; + } + + it->second.addColumn(column_node->getColumn()); + } + +private: + std::unordered_map column_source_to_columns; +}; + +/** Visitor that rewrites IN and JOINs in query and all subqueries according to distributed_product_mode and + * prefer_global_in_and_join settings. + * + * Additionally collects GLOBAL JOIN and GLOBAL IN query nodes. + * + * If distributed_product_mode = deny, then visitor throws exception if there are multiple distributed tables. + * If distributed_product_mode = local, then visitor collects replacement map for tables that must be replaced + * with local tables. + * If distributed_product_mode = global or prefer_global_in_and_join setting is true, then visitor rewrites JOINs and IN functions that + * contain distributed tables to GLOBAL JOINs and GLOBAL IN functions. + * If distributed_product_mode = allow, then visitor does not rewrite query if there are multiple distributed tables. + */ +class DistributedProductModeRewriteInJoinVisitor : public InDepthQueryTreeVisitorWithContext +{ +public: + using Base = InDepthQueryTreeVisitorWithContext; + using Base::Base; + + explicit DistributedProductModeRewriteInJoinVisitor(const ContextPtr & context_) + : Base(context_) + {} + + struct InFunctionOrJoin + { + QueryTreeNodePtr query_node; + size_t subquery_depth = 0; + }; + + const std::unordered_map & getReplacementMap() const + { + return replacement_map; + } + + const std::vector & getGlobalInOrJoinNodes() const + { + return global_in_or_join_nodes; + } + + static bool needChildVisit(QueryTreeNodePtr & parent, QueryTreeNodePtr & child) + { + auto * function_node = parent->as(); + if (function_node && isNameOfGlobalInFunction(function_node->getFunctionName())) + return false; + + auto * join_node = parent->as(); + if (join_node && join_node->getLocality() == JoinLocality::Global && join_node->getRightTableExpression() == child) + return false; + + return true; + } + + void visitImpl(QueryTreeNodePtr & node) + { + auto * function_node = node->as(); + auto * join_node = node->as(); + + if ((function_node && isNameOfGlobalInFunction(function_node->getFunctionName())) || + (join_node && join_node->getLocality() == JoinLocality::Global)) + { + InFunctionOrJoin in_function_or_join_entry; + in_function_or_join_entry.query_node = node; + in_function_or_join_entry.subquery_depth = getSubqueryDepth(); + global_in_or_join_nodes.push_back(std::move(in_function_or_join_entry)); + return; + } + + if ((function_node && isNameOfLocalInFunction(function_node->getFunctionName())) || + (join_node && join_node->getLocality() != JoinLocality::Global)) + { + InFunctionOrJoin in_function_or_join_entry; + in_function_or_join_entry.query_node = node; + in_function_or_join_entry.subquery_depth = getSubqueryDepth(); + in_function_or_join_stack.push_back(in_function_or_join_entry); + return; + } + + if (node->getNodeType() == QueryTreeNodeType::TABLE) + tryRewriteTableNodeIfNeeded(node); + } + + void leaveImpl(QueryTreeNodePtr & node) + { + if (!in_function_or_join_stack.empty() && node.get() == in_function_or_join_stack.back().query_node.get()) + in_function_or_join_stack.pop_back(); + } + +private: + void tryRewriteTableNodeIfNeeded(const QueryTreeNodePtr & table_node) + { + const auto & table_node_typed = table_node->as(); + const auto * distributed_storage = typeid_cast(table_node_typed.getStorage().get()); + if (!distributed_storage) + return; + + bool distributed_valid_for_rewrite = distributed_storage->getShardCount() >= 2; + if (!distributed_valid_for_rewrite) + return; + + auto distributed_product_mode = getSettings().distributed_product_mode; + + if (distributed_product_mode == DistributedProductMode::LOCAL) + { + StorageID remote_storage_id = StorageID{distributed_storage->getRemoteDatabaseName(), + distributed_storage->getRemoteTableName()}; + auto resolved_remote_storage_id = getContext()->resolveStorageID(remote_storage_id); + const auto & distributed_storage_columns = table_node_typed.getStorageSnapshot()->metadata->getColumns(); + auto storage = std::make_shared(resolved_remote_storage_id, distributed_storage_columns); + auto replacement_table_expression = std::make_shared(std::move(storage), getContext()); + replacement_map.emplace(table_node.get(), std::move(replacement_table_expression)); + } + else if ((distributed_product_mode == DistributedProductMode::GLOBAL || getSettings().prefer_global_in_and_join) && + !in_function_or_join_stack.empty()) + { + auto * in_or_join_node_to_modify = in_function_or_join_stack.back().query_node.get(); + + if (auto * in_function_to_modify = in_or_join_node_to_modify->as()) + { + auto global_in_function_name = getGlobalInFunctionNameForLocalInFunctionName(in_function_to_modify->getFunctionName()); + auto global_in_function_resolver = FunctionFactory::instance().get(global_in_function_name, getContext()); + in_function_to_modify->resolveAsFunction(global_in_function_resolver->build(in_function_to_modify->getArgumentColumns())); + } + else if (auto * join_node_to_modify = in_or_join_node_to_modify->as()) + { + join_node_to_modify->setLocality(JoinLocality::Global); + } + + global_in_or_join_nodes.push_back(in_function_or_join_stack.back()); + } + else if (distributed_product_mode == DistributedProductMode::ALLOW) + { + return; + } + else if (distributed_product_mode == DistributedProductMode::DENY) + { + throw Exception(ErrorCodes::DISTRIBUTED_IN_JOIN_SUBQUERY_DENIED, + "Double-distributed IN/JOIN subqueries is denied (distributed_product_mode = 'deny'). " + "You may rewrite query to use local tables " + "in subqueries, or use GLOBAL keyword, or set distributed_product_mode to suitable value."); + } + } + + std::vector in_function_or_join_stack; + std::unordered_map replacement_map; + std::vector global_in_or_join_nodes; +}; + +/** Execute subquery node and put result in mutable context temporary table. + * Returns table node that is initialized with temporary table storage. + */ +TableNodePtr executeSubqueryNode(const QueryTreeNodePtr & subquery_node, + ContextMutablePtr & mutable_context, + size_t subquery_depth) +{ + auto subquery_hash = subquery_node->getTreeHash(); + String temporary_table_name = fmt::format("_data_{}_{}", subquery_hash.first, subquery_hash.second); + + const auto & external_tables = mutable_context->getExternalTables(); + auto external_table_it = external_tables.find(temporary_table_name); + if (external_table_it != external_tables.end()) + { + auto temporary_table_expression_node = std::make_shared(external_table_it->second, mutable_context); + temporary_table_expression_node->setTemporaryTableName(temporary_table_name); + return temporary_table_expression_node; + } + + auto subquery_options = SelectQueryOptions(QueryProcessingStage::Complete, subquery_depth, true /*is_subquery*/); + auto context_copy = Context::createCopy(mutable_context); + updateContextForSubqueryExecution(context_copy); + + InterpreterSelectQueryAnalyzer interpreter(subquery_node, context_copy, subquery_options); + auto & query_plan = interpreter.getQueryPlan(); + + auto sample_block_with_unique_names = query_plan.getCurrentDataStream().header; + makeUniqueColumnNamesInBlock(sample_block_with_unique_names); + + if (!blocksHaveEqualStructure(sample_block_with_unique_names, query_plan.getCurrentDataStream().header)) + { + auto actions_dag = ActionsDAG::makeConvertingActions( + query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName(), + sample_block_with_unique_names.getColumnsWithTypeAndName(), + ActionsDAG::MatchColumnsMode::Position); + auto converting_step = std::make_unique(query_plan.getCurrentDataStream(), std::move(actions_dag)); + query_plan.addStep(std::move(converting_step)); + } + + Block sample = interpreter.getSampleBlock(); + NamesAndTypesList columns = sample.getNamesAndTypesList(); + + auto external_storage_holder = TemporaryTableHolder( + mutable_context, + ColumnsDescription{columns}, + ConstraintsDescription{}, + nullptr /*query*/, + true /*create_for_global_subquery*/); + + StoragePtr external_storage = external_storage_holder.getTable(); + auto temporary_table_expression_node = std::make_shared(external_storage, mutable_context); + temporary_table_expression_node->setTemporaryTableName(temporary_table_name); + + auto table_out = external_storage->write({}, external_storage->getInMemoryMetadataPtr(), mutable_context, /*async_insert=*/false); + auto io = interpreter.execute(); + io.pipeline.complete(std::move(table_out)); + CompletedPipelineExecutor executor(io.pipeline); + executor.execute(); + + mutable_context->addExternalTable(temporary_table_name, std::move(external_storage_holder)); + + return temporary_table_expression_node; +} + +} + +QueryTreeNodePtr buildQueryTreeForShard(SelectQueryInfo & query_info, QueryTreeNodePtr query_tree_to_modify) +{ + auto & planner_context = query_info.planner_context; + const auto & query_context = planner_context->getQueryContext(); + + CollectColumnSourceToColumnsVisitor collect_column_source_to_columns_visitor; + collect_column_source_to_columns_visitor.visit(query_tree_to_modify); + + const auto & column_source_to_columns = collect_column_source_to_columns_visitor.getColumnSourceToColumns(); + + DistributedProductModeRewriteInJoinVisitor visitor(query_info.planner_context->getQueryContext()); + visitor.visit(query_tree_to_modify); + + auto replacement_map = visitor.getReplacementMap(); + const auto & global_in_or_join_nodes = visitor.getGlobalInOrJoinNodes(); + + for (const auto & global_in_or_join_node : global_in_or_join_nodes) + { + if (auto * join_node = global_in_or_join_node.query_node->as()) + { + auto join_right_table_expression = join_node->getRightTableExpression(); + auto join_right_table_expression_node_type = join_right_table_expression->getNodeType(); + + QueryTreeNodePtr subquery_node; + + if (join_right_table_expression_node_type == QueryTreeNodeType::QUERY || + join_right_table_expression_node_type == QueryTreeNodeType::UNION) + { + subquery_node = join_right_table_expression; + } + else if (join_right_table_expression_node_type == QueryTreeNodeType::TABLE || + join_right_table_expression_node_type == QueryTreeNodeType::TABLE_FUNCTION) + { + const auto & columns = column_source_to_columns.at(join_right_table_expression).columns; + subquery_node = buildSubqueryToReadColumnsFromTableExpression(columns, + join_right_table_expression, + planner_context->getQueryContext()); + } + else + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Expected JOIN right table expression to be table, table function, query or union node. Actual {}", + join_right_table_expression->formatASTForErrorMessage()); + } + + auto temporary_table_expression_node = executeSubqueryNode(subquery_node, + planner_context->getMutableQueryContext(), + global_in_or_join_node.subquery_depth); + temporary_table_expression_node->setAlias(join_right_table_expression->getAlias()); + + replacement_map.emplace(join_right_table_expression.get(), std::move(temporary_table_expression_node)); + continue; + } + else if (auto * in_function_node = global_in_or_join_node.query_node->as()) + { + auto & in_function_subquery_node = in_function_node->getArguments().getNodes().at(1); + auto in_function_node_type = in_function_subquery_node->getNodeType(); + if (in_function_node_type != QueryTreeNodeType::QUERY && in_function_node_type != QueryTreeNodeType::UNION) + continue; + + auto temporary_table_expression_node = executeSubqueryNode(in_function_subquery_node, + planner_context->getMutableQueryContext(), + global_in_or_join_node.subquery_depth); + + in_function_subquery_node = std::move(temporary_table_expression_node); + } + else + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Expected global IN or JOIN query node. Actual {}", + global_in_or_join_node.query_node->formatASTForErrorMessage()); + } + } + + if (!replacement_map.empty()) + query_tree_to_modify = query_tree_to_modify->cloneAndReplace(replacement_map); + + removeGroupingFunctionSpecializations(query_tree_to_modify); + + return query_tree_to_modify; +} + +} diff --git a/src/Storages/buildQueryTreeForShard.h b/src/Storages/buildQueryTreeForShard.h new file mode 100644 index 00000000000..05d63faeb9f --- /dev/null +++ b/src/Storages/buildQueryTreeForShard.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +namespace DB +{ + +struct SelectQueryInfo; + +class IQueryTreeNode; +using QueryTreeNodePtr = std::shared_ptr; + +QueryTreeNodePtr buildQueryTreeForShard(SelectQueryInfo & query_info, QueryTreeNodePtr query_tree_to_modify); + +} diff --git a/tests/queries/0_stateless/02771_parallel_replicas_analyzer.reference b/tests/queries/0_stateless/02771_parallel_replicas_analyzer.reference index 4e93c530f7b..f688db940d9 100644 --- a/tests/queries/0_stateless/02771_parallel_replicas_analyzer.reference +++ b/tests/queries/0_stateless/02771_parallel_replicas_analyzer.reference @@ -9,4 +9,4 @@ 7885388429666205427 8124171311239967992 1 1 -- Simple query with analyzer and pure parallel replicas\nSELECT number\nFROM join_inner_table__fuzz_146_replicated\n SETTINGS\n allow_experimental_analyzer = 1,\n max_parallel_replicas = 2,\n cluster_for_parallel_replicas = \'test_cluster_one_shard_three_replicas_localhost\',\n allow_experimental_parallel_reading_from_replicas = 1,\n use_hedged_requests = 0; -0 2 SELECT `default`.`join_inner_table__fuzz_146_replicated`.`number` AS `number` FROM `default`.`join_inner_table__fuzz_146_replicated` +0 2 SELECT `join_inner_table__fuzz_146_replicated`.`number` AS `number` FROM `default`.`join_inner_table__fuzz_146_replicated` SETTINGS allow_experimental_analyzer = 1, max_parallel_replicas = 2, cluster_for_parallel_replicas = \'test_cluster_one_shard_three_replicas_localhost\', allow_experimental_parallel_reading_from_replicas = 1, use_hedged_requests = 0 From d05f89f8f5ec3793256cae1557e2af60650290cf Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Mon, 12 Jun 2023 17:33:15 +0000 Subject: [PATCH 362/722] Fix style --- src/Storages/StorageDistributed.cpp | 1 - src/Storages/buildQueryTreeForShard.cpp | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 1ec45ce3d57..0472ce6f832 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -154,7 +154,6 @@ namespace ErrorCodes extern const int DISTRIBUTED_TOO_MANY_PENDING_BYTES; extern const int ARGUMENT_OUT_OF_BOUND; extern const int TOO_LARGE_DISTRIBUTED_DEPTH; - extern const int DISTRIBUTED_IN_JOIN_SUBQUERY_DENIED; } namespace ActionLocks diff --git a/src/Storages/buildQueryTreeForShard.cpp b/src/Storages/buildQueryTreeForShard.cpp index a42d67d9aa7..fa4730cbe84 100644 --- a/src/Storages/buildQueryTreeForShard.cpp +++ b/src/Storages/buildQueryTreeForShard.cpp @@ -21,6 +21,7 @@ namespace DB namespace ErrorCodes { + extern const int LOGICAL_ERROR; extern const int DISTRIBUTED_IN_JOIN_SUBQUERY_DENIED; } From a4285d56b22aafe453309ce728c7380666626576 Mon Sep 17 00:00:00 2001 From: ltrk2 <107155950+ltrk2@users.noreply.github.com> Date: Wed, 31 May 2023 12:37:46 -0700 Subject: [PATCH 363/722] Fix compilation error on big-endian platforms --- src/Functions/FunctionsCodingIP.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Functions/FunctionsCodingIP.cpp b/src/Functions/FunctionsCodingIP.cpp index 2671418fc7b..897b24d90e0 100644 --- a/src/Functions/FunctionsCodingIP.cpp +++ b/src/Functions/FunctionsCodingIP.cpp @@ -580,7 +580,7 @@ private: #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ unalignedStoreLittleEndian(buf + 8, 0x00000000FFFF0000ull | (static_cast(ntohl(in)) << 32)); #else - unalignedStoreLittleEndian(buf + 8, 0x00000000FFFF0000ull | (static_cast(__builtin_bswap32(in))) << 32)); + unalignedStoreLittleEndian(buf + 8, 0x00000000FFFF0000ull | (static_cast(std::byteswap(in))) << 32); #endif } }; From edb4a644b1ef9fdcba7f53c60ed37438d610ae9a Mon Sep 17 00:00:00 2001 From: ltrk2 <107155950+ltrk2@users.noreply.github.com> Date: Thu, 8 Jun 2023 10:21:24 -0400 Subject: [PATCH 364/722] Update FunctionsCodingIP.cpp --- src/Functions/FunctionsCodingIP.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Functions/FunctionsCodingIP.cpp b/src/Functions/FunctionsCodingIP.cpp index 897b24d90e0..7bdbac6531d 100644 --- a/src/Functions/FunctionsCodingIP.cpp +++ b/src/Functions/FunctionsCodingIP.cpp @@ -580,7 +580,7 @@ private: #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ unalignedStoreLittleEndian(buf + 8, 0x00000000FFFF0000ull | (static_cast(ntohl(in)) << 32)); #else - unalignedStoreLittleEndian(buf + 8, 0x00000000FFFF0000ull | (static_cast(std::byteswap(in))) << 32); + unalignedStoreLittleEndian(buf + 8, 0x00000000FFFF0000ull | (static_cast(std::byteswap(in)) << 32)); #endif } }; From e6e8576864421bd2db043d269dfe65d1bf4f85aa Mon Sep 17 00:00:00 2001 From: Denny Crane Date: Mon, 12 Jun 2023 17:04:33 -0300 Subject: [PATCH 365/722] Update mergetree.md --- docs/en/engines/table-engines/mergetree-family/mergetree.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index 61276110138..dbde1a90f67 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -1138,7 +1138,7 @@ These parameters define the cache layer: Cache parameters: - `path` — The path where metadata for the cache is stored. -- `max_size` — The size (amount of memory) that the cache can grow to. +- `max_size` — The size (amount of disk space) that the cache can grow to. :::tip There are several other cache parameters that you can use to tune your storage, see [using local cache](/docs/en/operations/storing-data.md/#using-local-cache) for the details. From 4f39ee51ae867b219735290125f8dc91d461abf6 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 12 Jun 2023 20:06:57 +0000 Subject: [PATCH 366/722] Update Annoy docs --- .../mergetree-family/annindexes.md | 20 +++++++++---------- src/Parsers/ASTIndexDeclaration.h | 3 +++ src/Parsers/ParserCreateIndexQuery.cpp | 4 ++-- src/Parsers/ParserCreateQuery.cpp | 4 ++-- 4 files changed, 17 insertions(+), 14 deletions(-) diff --git a/docs/en/engines/table-engines/mergetree-family/annindexes.md b/docs/en/engines/table-engines/mergetree-family/annindexes.md index 2b0b77a0735..16e244077a7 100644 --- a/docs/en/engines/table-engines/mergetree-family/annindexes.md +++ b/docs/en/engines/table-engines/mergetree-family/annindexes.md @@ -100,7 +100,7 @@ ANN indexes support two types of queries: :::tip To avoid writing out large vectors, you can use [query -parameters](/docs/en//interfaces/cli.md#queries-with-parameters-cli-queries-with-parameters), e.g. +parameters](/docs/en/interfaces/cli.md#queries-with-parameters-cli-queries-with-parameters), e.g. ```bash clickhouse-client --param_vec='hello' --query="SELECT * FROM table WHERE L2Distance(vectors, {vec: Array(Float32)}) < 1.0" @@ -128,14 +128,14 @@ granularity of granules, sub-indexes extrapolate matching rows to granule granul skip data at the granularity of index blocks. The `GRANULARITY` parameter determines how many ANN sub-indexes are created. Bigger `GRANULARITY` values mean fewer but larger ANN -sub-indexes, up to the point where a column (or a column part) has only a single sub-index. In that case, the sub-index has a "global" view of -all column rows and can directly return all granules of the column (part) with relevant rows (there are at at most `LIMIT `-many -such granules). In a second step, ClickHouse will load these granules and identify the actually best rows by performing a brute-force distance -calculation over all rows of the granules. With a small `GRANULARITY` value, each of the sub-indexes returns up to `LIMIT N`-many granules. -As a result, more granules need to be loaded and post-filtered. Note that the search accuracy is with both cases equally good, only the -processing performance differs. It is generally recommended to use a large `GRANULARITY` for ANN indexes and fall back to a smaller -`GRANULARITY` values only in case of problems like excessive memory consumption of the ANN structures. If no `GRANULARITY` was specified for -ANN indexes, the default value is 100 million. +sub-indexes, up to the point where a column (or a column's data part) has only a single sub-index. In that case, the sub-index has a +"global" view of all column rows and can directly return all granules of the column (part) with relevant rows (there are at most `LIMIT +`-many such granules). In a second step, ClickHouse will load these granules and identify the actually best rows by performing a +brute-force distance calculation over all rows of the granules. With a small `GRANULARITY` value, each of the sub-indexes returns up to +`LIMIT N`-many granules. As a result, more granules need to be loaded and post-filtered. Note that the search accuracy is with both cases +equally good, only the processing performance differs. It is generally recommended to use a large `GRANULARITY` for ANN indexes and fall +back to a smaller `GRANULARITY` values only in case of problems like excessive memory consumption of the ANN structures. If no `GRANULARITY` +was specified for ANN indexes, the default value is 100 million. # Available ANN Indexes @@ -204,7 +204,7 @@ values mean more accurate results at the cost of longer query runtime: ``` sql SELECT * -FROM table_name [WHERE ...] +FROM table_name ORDER BY L2Distance(vectors, Point) LIMIT N SETTINGS annoy_index_search_k_nodes=100 diff --git a/src/Parsers/ASTIndexDeclaration.h b/src/Parsers/ASTIndexDeclaration.h index bd52a611f3f..6ed241f75ab 100644 --- a/src/Parsers/ASTIndexDeclaration.h +++ b/src/Parsers/ASTIndexDeclaration.h @@ -12,6 +12,9 @@ class ASTFunction; class ASTIndexDeclaration : public IAST { public: + static const auto DEFAULT_INDEX_GRANULARITY = 1uz; + static const auto DEFAULT_ANNOY_INDEX_GRANULARITY = 100'000'000uz; + String name; IAST * expr; ASTFunction * type; diff --git a/src/Parsers/ParserCreateIndexQuery.cpp b/src/Parsers/ParserCreateIndexQuery.cpp index 57afd3fb99e..f231573b920 100644 --- a/src/Parsers/ParserCreateIndexQuery.cpp +++ b/src/Parsers/ParserCreateIndexQuery.cpp @@ -52,9 +52,9 @@ bool ParserCreateIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected else { if (index->type->name == "annoy") - index->granularity = 100'000'000; + index->granularity = ASTIndexDeclaration::DEFAULT_ANNOY_INDEX_GRANULARITY; else - index->granularity = 1; + index->granularity = ASTIndexDeclaration::DEFAULT_INDEX_GRANULARITY; } node = index; diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index c6273f369b1..adf3513ba40 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -147,9 +147,9 @@ bool ParserIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected & expe else { if (index->type->name == "annoy") - index->granularity = 100'000'000; + index->granularity = ASTIndexDeclaration::DEFAULT_ANNOY_INDEX_GRANULARITY; else - index->granularity = 1; + index->granularity = ASTIndexDeclaration::DEFAULT_INDEX_GRANULARITY; } node = index; From 002c15823c26c3e0c577a4dd8ec8f319b3120a78 Mon Sep 17 00:00:00 2001 From: ltrk2 <107155950+ltrk2@users.noreply.github.com> Date: Mon, 12 Jun 2023 16:44:46 -0700 Subject: [PATCH 367/722] Perform in-place endianness transform because of padding --- src/AggregateFunctions/ReservoirSamplerDeterministic.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/AggregateFunctions/ReservoirSamplerDeterministic.h b/src/AggregateFunctions/ReservoirSamplerDeterministic.h index b1a39a5dcc5..25d3b182654 100644 --- a/src/AggregateFunctions/ReservoirSamplerDeterministic.h +++ b/src/AggregateFunctions/ReservoirSamplerDeterministic.h @@ -190,12 +190,12 @@ public: /// Here we ensure that padding is zero without changing the protocol. /// TODO: After implementation of "versioning aggregate function state", /// change the serialization format. - Element elem; memset(&elem, 0, sizeof(elem)); elem = samples[i]; - writeBinaryLittleEndian(elem, buf); + DB::transformEndianness(elem); + DB::writeString(reinterpret_cast(&elem), sizeof(elem), buf); } } From 959fde4491e33586916efcf689ba1a4b361e6865 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Tue, 13 Jun 2023 09:33:38 +0800 Subject: [PATCH 368/722] add notifications in docs --- docs/en/engines/table-engines/integrations/redis.md | 9 ++++++++- src/Storages/StorageRedis.cpp | 7 +++++-- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/docs/en/engines/table-engines/integrations/redis.md b/docs/en/engines/table-engines/integrations/redis.md index 6cfc60c836c..68235a89d33 100644 --- a/docs/en/engines/table-engines/integrations/redis.md +++ b/docs/en/engines/table-engines/integrations/redis.md @@ -6,7 +6,7 @@ sidebar_label: Redis # Redis -This engine allows integrating ClickHouse with [Redis](https://redis.io/). +This engine allows integrating ClickHouse with [Redis](https://redis.io/). For Redis takes kv model, we strongly recommend you only query it in a point way, such as `where k=xx` or `where k in (xx, xx)`. ## Creating a Table {#creating-a-table} @@ -110,3 +110,10 @@ Flush Redis db asynchronously. Also `Truncate` support SYNC mode. ```sql TRUNCATE TABLE redis_table SYNC; ``` + + +## Limitations {#limitations} + +Redis engine also support scanning query, such as `where k > xx`, but it has some limitations: +1. Scanning query may produce some duplicated keys in a very rare case when it is rehashing, details see [Redis Scan](https://github.com/redis/redis/blob/e4d183afd33e0b2e6e8d1c79a832f678a04a7886/src/dict.c#L1186-L1269) +2. During the scanning keys could be created and deleted, so the resulting dataset can not represent a valid point in time. diff --git a/src/Storages/StorageRedis.cpp b/src/Storages/StorageRedis.cpp index 71c84443d8e..ddb1b62c7b0 100644 --- a/src/Storages/StorageRedis.cpp +++ b/src/Storages/StorageRedis.cpp @@ -3,12 +3,12 @@ #include #include #include +#include #include #include #include #include #include -#include #include #include @@ -19,6 +19,7 @@ #include #include +#include #include #include @@ -87,9 +88,11 @@ public: return storage.getBySerializedKeys(raw_keys, nullptr); } - /// TODO scan may get duplicated keys + /// TODO scan may get duplicated keys when Redis is rehashing, it is a very rare case. Chunk generateFullScan() { + checkStackSize(); + /// redis scan ending if (iterator == 0) return {}; From 2395b25f9e8828ea1adbf6d303832a1ea7ee97a8 Mon Sep 17 00:00:00 2001 From: Alexey Gerasimchuck Date: Tue, 13 Jun 2023 01:55:34 +0000 Subject: [PATCH 369/722] Changes after review --- docs/en/interfaces/cli.md | 18 +++++++++++------- docs/ru/interfaces/cli.md | 14 +++++++++----- src/Client/ConnectionString.cpp | 6 +++--- .../02784_connection_string.reference | 1 + .../0_stateless/02784_connection_string.sh | 3 +++ 5 files changed, 27 insertions(+), 15 deletions(-) diff --git a/docs/en/interfaces/cli.md b/docs/en/interfaces/cli.md index fc24bdcad68..6736d05e65f 100644 --- a/docs/en/interfaces/cli.md +++ b/docs/en/interfaces/cli.md @@ -202,15 +202,16 @@ Instead of --host, --port, --user and --password options, ClickHouse client also clickhouse-client alternatively supports connecting to clickhouse server using a connection string similar to [MongoDB](https://www.mongodb.com/docs/manual/reference/connection-string/), [PostgreSQL](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING), [MySQL](https://dev.mysql.com/doc/refman/8.0/en/connecting-using-uri-or-key-value-pairs.html#connecting-using-uri). It has the following syntax: ```text -clickhouse:[//[user_info@][hosts_and_ports]][/database][?query_parameters] +clickhouse:[//[user[:password]@][hosts_and_ports]][/database][?query_parameters] ``` Where -- `user_spec` - (optional) is a user and an optional password, -- `hostspec` - (optional) is a list of hosts and optional ports `host[:port] [, host:[port]], ...`, +- `user` - (optional) is a user name, +- `password` - (optional) is a user password. If `:` is specified and the password is blank, the client will prompt for the user's password. +- `hosts_and_ports` - (optional) is a list of hosts and optional ports `host[:port] [, host:[port]], ...`, - `database` - (optional) is the database name, -- `paramspec` - (optional) is a list of key-value pairs `param1=value1[,¶m2=value2], ...`. For some parameters, no value is required. Parameter names and values are case-sensitive. +- `query_parameters` - (optional) is a list of key-value pairs `param1=value1[,¶m2=value2], ...`. For some parameters, no value is required. Parameter names and values are case-sensitive. @@ -239,7 +240,7 @@ URI allows multiple hosts to be connected to. Connection strings can contain mul ### Percent encoding {#connection_string_uri_percent_encoding} -Non-US ASCII characters in the user name, password, hosts, database or query parameters must be [percent-encoded](https://en.wikipedia.org/wiki/URL_encoding). +Non-US ASCII, spaces and special characters, and special characters in the `user`, `password`, `hosts`, `database` and `query parameters` must be [percent-encoded](https://en.wikipedia.org/wiki/URL_encoding). ### Examples {#connection_string_examples} @@ -306,10 +307,13 @@ Connect to default host using default port, default user, and default database. clickhouse-client clickhouse: ``` -Connect to the default host using the default port, using user user_name and no password. +Connect to the default host using the default port, using user `my_user` and no password. ``` bash -clickhouse-client clickhouse://user_name@ +clickhouse-client clickhouse://my_user@ + +# Using a blank password between : and @ means to asking user to enter the password before starting the connection. +clickhouse-client clickhouse://my_user:@ ``` Connect to localhost using email as the user name. `@` symbol is percent encoded to `%40`. diff --git a/docs/ru/interfaces/cli.md b/docs/ru/interfaces/cli.md index ee29b122afb..794ac60ec83 100644 --- a/docs/ru/interfaces/cli.md +++ b/docs/ru/interfaces/cli.md @@ -147,12 +147,13 @@ $ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="numbe clickhouse-client также поддерживает подключение к серверу clickhouse с помощью строки подключения, аналогичной [MongoDB](https://www.mongodb.com/docs/manual/reference/connection-string/), [PostgreSQL](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING), [MySQL](https://dev.mysql.com/doc/refman/8.0/en/connecting-using-uri-or-key-value-pairs.html#connecting-using-uri). Она имеет следующий синтаксис: ```text -clickhouse:[//[user_info@][hosts_and_ports]][/database][?query_parameters] +clickhouse:[//[user[:password]@][hosts_and_ports]][/database][?query_parameters] ``` Где -- `user_spec` - (необязательно) - это пользователь и необязательный пароль, +- `user` - (необязательно) - это имя пользователя, +- `password` - (необязательно) - Пароль пользователя. Если символ `:` укаказан, и пароль пуст, то клиент запросит ввести пользователя пароль. - `hostspec` - (необязательно) - список хостов и необязательных портов. `host[:port] [, host:[port]], ...`, - `database` - (необязательно) - это имя базы данных, - `paramspec` - (опционально) список пар ключ-значение `param1=value1[,¶m2=value2], ...`. Для некоторых параметров значение не требуется. Имена и значения параметров чувствительны к регистру. @@ -182,7 +183,7 @@ URI позволяет подключаться к нескольким хост ### Кодирование URI {#connection_string_uri_percent_encoding} -Не US ASCII символы в имени пользователя, пароле, хостах, базе данных или параметрах запроса должны быть [закодированы](https://ru.wikipedia.org/wiki/URL#%D0%9A%D0%BE%D0%B4%D0%B8%D1%80%D0%BE%D0%B2%D0%B0%D0%BD%D0%B8%D0%B5_URL). +Не US ASCII и специальные символы в имени пользователя, пароле, хостах, базе данных и параметрах запроса должны быть [закодированы](https://ru.wikipedia.org/wiki/URL#%D0%9A%D0%BE%D0%B4%D0%B8%D1%80%D0%BE%D0%B2%D0%B0%D0%BD%D0%B8%D0%B5_URL). ### Примеры {#connection_string_examples} @@ -248,10 +249,13 @@ clickhouse-client clickhouse://localhost/my_database -s clickhouse-client clickhouse: ``` -Подключиться к хосту по умолчанию через порт по умолчанию, используя имя пользователя user_name без пароля. +Подключиться к хосту по умолчанию через порт по умолчанию, используя имя пользователя `my_user` без пароля. ``` bash -clickhouse-client clickhouse://user_name@ +clickhouse-client clickhouse://my_user@ + +# Использование пустого пароля между : и @ означает, что пользователь должен ввести пароль перед началом соединения. +clickhouse-client clickhouse://my_user:@ ``` Подключиться к localhost, используя электронную почту, как имя пользователя. Символ `@` закодирован как `%40`. diff --git a/src/Client/ConnectionString.cpp b/src/Client/ConnectionString.cpp index e1f39369b2a..8f0a0980f51 100644 --- a/src/Client/ConnectionString.cpp +++ b/src/Client/ConnectionString.cpp @@ -142,11 +142,11 @@ bool tryParseConnectionString( try { /** Poco::URI doesn't support several hosts in URI. - * Split string clickhouse:[user_info]host1:port1, ... , hostN:portN[database]?[query_parameters] + * Split string clickhouse:[user[:password]@]host1:port1, ... , hostN:portN[database]?[query_parameters] * into multiple string for each host: - * clickhouse:[user_info]host1:port1[database]?[query_parameters] + * clickhouse:[user[:password]@]host1:port1[database]?[query_parameters] * ... - * clickhouse:[user_info]hostN:portN[database]?[query_parameters] + * clickhouse:[user[:password]@]hostN:portN[database]?[query_parameters] */ Poco::URI uri; const auto * last_host_begin = connection_string.begin() + offset; diff --git a/tests/queries/0_stateless/02784_connection_string.reference b/tests/queries/0_stateless/02784_connection_string.reference index 6a36abae8e0..9d58d485a14 100644 --- a/tests/queries/0_stateless/02784_connection_string.reference +++ b/tests/queries/0_stateless/02784_connection_string.reference @@ -121,5 +121,6 @@ BAD_ARGUMENTS BAD_ARGUMENTS BAD_ARGUMENTS BAD_ARGUMENTS +BAD_ARGUMENTS Authentication failed Authentication failed diff --git a/tests/queries/0_stateless/02784_connection_string.sh b/tests/queries/0_stateless/02784_connection_string.sh index fce93fdad74..042f5b2108d 100755 --- a/tests/queries/0_stateless/02784_connection_string.sh +++ b/tests/queries/0_stateless/02784_connection_string.sh @@ -116,6 +116,9 @@ runClient "clickhouse:///?" --port "$CLICKHOUSE_PORT_TCP" --host "$CLICKHOUSE_HO runClient "clickhouse://:/?" --port "$CLICKHOUSE_PORT_TCP" --host "$CLICKHOUSE_HOST" 2>&1 | grep -o 'BAD_ARGUMENTS' runClient "clickhouse:" --database "$CLICKHOUSE_DATABASE" --port "$CLICKHOUSE_PORT_TCP" --host "$CLICKHOUSE_HOST" 2>&1 | grep -o 'BAD_ARGUMENTS' +# Using clickhouse-client and connection is prohibited +runClient "clickhouse:" --connection "connection" 2>&1 | grep -o 'BAD_ARGUMENTS' + # Space is used in connection string (This is prohibited). runClient " clickhouse:" 2>&1 | grep -o 'BAD_ARGUMENTS' runClient "clickhouse: " 2>&1 | grep -o 'BAD_ARGUMENTS' From 6839a1318c8a656c20e7a1ed8e256fd51408820e Mon Sep 17 00:00:00 2001 From: Alexey Gerasimchuck Date: Tue, 13 Jun 2023 04:03:30 +0000 Subject: [PATCH 370/722] minor changes in docs --- docs/en/interfaces/cli.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/interfaces/cli.md b/docs/en/interfaces/cli.md index 6736d05e65f..b5134ea30c0 100644 --- a/docs/en/interfaces/cli.md +++ b/docs/en/interfaces/cli.md @@ -240,7 +240,7 @@ URI allows multiple hosts to be connected to. Connection strings can contain mul ### Percent encoding {#connection_string_uri_percent_encoding} -Non-US ASCII, spaces and special characters, and special characters in the `user`, `password`, `hosts`, `database` and `query parameters` must be [percent-encoded](https://en.wikipedia.org/wiki/URL_encoding). +Non-US ASCII, spaces and special characters in the `user`, `password`, `hosts`, `database` and `query parameters` must be [percent-encoded](https://en.wikipedia.org/wiki/URL_encoding). ### Examples {#connection_string_examples} From a3ff5df205ae8353395023ee4ef0bf83bee31458 Mon Sep 17 00:00:00 2001 From: Victor Krasnov Date: Tue, 13 Jun 2023 09:16:06 +0300 Subject: [PATCH 371/722] Remove reduntant header SELECT from the test --- .../0_stateless/02783_parseDateTimeBestEffort_syslog.reference | 1 - .../0_stateless/02783_parseDateTimeBestEffort_syslog.sql | 2 -- 2 files changed, 3 deletions(-) diff --git a/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.reference b/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.reference index 3ec93143e0e..ee75d68bff4 100644 --- a/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.reference +++ b/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.reference @@ -1,4 +1,3 @@ -parseDateTimeBestEffort around_June_7 res res_sam res_auc res_null res_null_sam res_null_auc res_zero res_zero_sam res_zero_auc res_us res_us_sam res_us_auc res_us_null res_us_null_sam res_us_null_auc res_us_zero res_us_zero_sam res_us_zero_auc res64 res64_sam res64_auc res64_null res64_null_sam res64_null_auc res64_zero res64_zero_sam res64_zero_auc res64_us res64_us_sam res64_us_auc res64_us_null res64_us_null_sam res64_us_null_auc res64_us_zero res64_us_zero_sam res64_us_zero_auc Jun 6 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 2023-06-06 00:00:00.000 diff --git a/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.sql b/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.sql index 52975cb5bbf..742ae03ddab 100644 --- a/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.sql +++ b/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.sql @@ -1,5 +1,3 @@ -SELECT 'parseDateTimeBestEffort'; - WITH 86400 AS secs_in_day, now() AS ts_now, From 879db5098a80d8c8c391296b672332a3367f6ac9 Mon Sep 17 00:00:00 2001 From: Val Doroshchuk Date: Mon, 12 Jun 2023 13:47:38 +0200 Subject: [PATCH 372/722] MaterializedMySQL: Add test_named_collections --- .../configs/users.xml | 1 + .../materialize_with_ddl.py | 31 +++++++++++++++++++ .../test_materialized_mysql_database/test.py | 6 ++++ 3 files changed, 38 insertions(+) diff --git a/tests/integration/test_materialized_mysql_database/configs/users.xml b/tests/integration/test_materialized_mysql_database/configs/users.xml index 3669fbb46ba..7a7529c94bb 100644 --- a/tests/integration/test_materialized_mysql_database/configs/users.xml +++ b/tests/integration/test_materialized_mysql_database/configs/users.xml @@ -14,6 +14,7 @@ ::/0 default + 1 diff --git a/tests/integration/test_materialized_mysql_database/materialize_with_ddl.py b/tests/integration/test_materialized_mysql_database/materialize_with_ddl.py index 2bbbe9a3f13..7fdb73ea1f3 100644 --- a/tests/integration/test_materialized_mysql_database/materialize_with_ddl.py +++ b/tests/integration/test_materialized_mysql_database/materialize_with_ddl.py @@ -2265,3 +2265,34 @@ def dropddl(clickhouse_node, mysql_node, mysql_host): ) mysql_node.query(f"DROP DATABASE {db}") clickhouse_node.query(f"DROP DATABASE {db}") + + +def named_collections(clickhouse_node, mysql_node, service_name): + db = "named_collections" + mysql_node.query(f"DROP DATABASE IF EXISTS {db}") + clickhouse_node.query(f"DROP DATABASE IF EXISTS {db}") + mysql_node.query(f"CREATE DATABASE {db}") + mysql_node.query( + f"CREATE TABLE {db}.t1 (id INT PRIMARY KEY, name VARCHAR(64), val INT)" + ) + mysql_node.query( + f"INSERT INTO {db}.t1 (id, name, val) VALUES (1, 'a', 1), (2, 'b', 2)" + ) + + clickhouse_node.query( + f"""CREATE NAMED COLLECTION {db} AS + user = 'root', + password = 'clickhouse', + host = '{service_name}', + port = 3306, + database = '{db}' + """ + ) + clickhouse_node.query(f"CREATE DATABASE {db} ENGINE = MaterializedMySQL({db})") + check_query( + clickhouse_node, + f"/* expect: (1, 'a', 1), (2, 'b', 2) */ SELECT * FROM {db}.t1", + "1\ta\t1\n2\tb\t2\n", + ) + clickhouse_node.query(f"DROP DATABASE IF EXISTS {db}") + mysql_node.query(f"DROP DATABASE IF EXISTS {db}") diff --git a/tests/integration/test_materialized_mysql_database/test.py b/tests/integration/test_materialized_mysql_database/test.py index a22d73061ae..5272fb2ff8c 100644 --- a/tests/integration/test_materialized_mysql_database/test.py +++ b/tests/integration/test_materialized_mysql_database/test.py @@ -523,3 +523,9 @@ def test_materialized_database_mysql_drop_ddl( ): materialize_with_ddl.dropddl(clickhouse_node, started_mysql_8_0, "mysql80") materialize_with_ddl.dropddl(clickhouse_node, started_mysql_5_7, "mysql57") + + +def test_named_collections(started_cluster, started_mysql_8_0, clickhouse_node): + materialize_with_ddl.named_collections( + clickhouse_node, started_mysql_8_0, "mysql80" + ) From 2c018f5261553dd6106639f22c148fbdd61d8fc4 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Sat, 3 Jun 2023 20:59:04 +0200 Subject: [PATCH 373/722] do not call finalize after exception --- src/Storages/StorageS3.cpp | 11 +- tests/integration/helpers/client.py | 2 + tests/integration/helpers/cluster.py | 2 + .../integration/helpers/s3_mocks/broken_s3.py | 241 +++++++++++++++--- .../test_checking_s3_blobs_paranoid/test.py | 98 ++++++- tests/integration/test_merge_tree_s3/test.py | 222 +++++++++++++++- 6 files changed, 530 insertions(+), 46 deletions(-) diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index f1a7bcb71a2..dfa5ea2667a 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -794,7 +794,7 @@ public: void onException() override { std::lock_guard lock(cancel_mutex); - finalize(); + release(); } void onFinish() override @@ -824,6 +824,15 @@ private: } } + void release() + { + if (!writer) + return; + + writer.reset(); + write_buf.reset(); + } + Block sample_block; std::optional format_settings; std::unique_ptr write_buf; diff --git a/tests/integration/helpers/client.py b/tests/integration/helpers/client.py index c2676ac08a6..fdeedb9a80d 100644 --- a/tests/integration/helpers/client.py +++ b/tests/integration/helpers/client.py @@ -121,6 +121,7 @@ class Client: user=None, password=None, database=None, + query_id=None, ): return self.get_query_request( sql, @@ -130,6 +131,7 @@ class Client: user=user, password=password, database=database, + query_id=query_id, ).get_error() @stacktraces_on_timeout_decorator diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index f57ebf40e54..c77e67062a1 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -3376,6 +3376,7 @@ class ClickHouseInstance: user=None, password=None, database=None, + query_id=None, ): logging.debug(f"Executing query {sql} on {self.name}") return self.client.query_and_get_error( @@ -3386,6 +3387,7 @@ class ClickHouseInstance: user=user, password=password, database=database, + query_id=query_id, ) def query_and_get_error_with_retry( diff --git a/tests/integration/helpers/s3_mocks/broken_s3.py b/tests/integration/helpers/s3_mocks/broken_s3.py index 026a3c6f515..8ff4f9e9203 100644 --- a/tests/integration/helpers/s3_mocks/broken_s3.py +++ b/tests/integration/helpers/s3_mocks/broken_s3.py @@ -6,10 +6,10 @@ import time import urllib.parse import http.server import socketserver +import string -UPSTREAM_HOST = "minio1" -UPSTREAM_PORT = 9001 +INF_COUNT = 100000000 class MockControl: @@ -28,31 +28,88 @@ class MockControl: ], nothrow=True, ) - assert response == "OK" + assert response == "OK", response + + def setup_error_at_object_upload(self, count=None, after=None): + url = f"http://localhost:{self._port}/mock_settings/error_at_object_upload?nothing=1" + + if count is not None: + url += f"&count={count}" + + if after is not None: + url += f"&after={after}" - def setup_fail_upload(self, part_length): response = self._cluster.exec_in_container( self._cluster.get_container_id(self._container), [ "curl", "-s", - f"http://localhost:{self._port}/mock_settings/error_at_put?when_length_bigger={part_length}", + url, ], nothrow=True, ) - assert response == "OK" + assert response == "OK", response + + def setup_error_at_part_upload(self, count=None, after=None): + url = f"http://localhost:{self._port}/mock_settings/error_at_part_upload?nothing=1" + + if count is not None: + url += f"&count={count}" + + if after is not None: + url += f"&after={after}" - def setup_fake_upload(self, part_length): response = self._cluster.exec_in_container( self._cluster.get_container_id(self._container), [ "curl", "-s", - f"http://localhost:{self._port}/mock_settings/fake_put?when_length_bigger={part_length}", + url, ], nothrow=True, ) - assert response == "OK" + assert response == "OK", response + + def setup_error_at_create_multi_part_upload(self, count=None): + url = f"http://localhost:{self._port}/mock_settings/error_at_create_multi_part_upload" + + if count is not None: + url += f"?count={count}" + + response = self._cluster.exec_in_container( + self._cluster.get_container_id(self._container), + [ + "curl", + "-s", + url, + ], + nothrow=True, + ) + assert response == "OK", response + + def setup_fake_puts(self, part_length): + response = self._cluster.exec_in_container( + self._cluster.get_container_id(self._container), + [ + "curl", + "-s", + f"http://localhost:{self._port}/mock_settings/fake_puts?when_length_bigger={part_length}", + ], + nothrow=True, + ) + assert response == "OK", response + + def setup_fake_multpartuploads(self): + response = self._cluster.exec_in_container( + self._cluster.get_container_id(self._container), + [ + "curl", + "-s", + f"http://localhost:{self._port}/mock_settings/setup_fake_multpartuploads?", + ], + nothrow=True, + ) + assert response == "OK", response def setup_slow_answers( self, minimal_length=0, timeout=None, probability=None, count=None @@ -77,7 +134,7 @@ class MockControl: ["curl", "-s", url], nothrow=True, ) - assert response == "OK" + assert response == "OK", response class _ServerRuntime: @@ -88,7 +145,7 @@ class _ServerRuntime: self.probability = probability_ if probability_ is not None else 1 self.timeout = timeout_ if timeout_ is not None else 0.1 self.minimal_length = minimal_length_ if minimal_length_ is not None else 0 - self.count = count_ if count_ is not None else 2**32 + self.count = count_ if count_ is not None else INF_COUNT def __str__(self): return ( @@ -109,12 +166,32 @@ class _ServerRuntime: return _runtime.slow_put.timeout return None + class CountAfter: + def __init__(self, count_=None, after_=None): + self.count = count_ if count_ is not None else INF_COUNT + self.after = after_ if after_ is not None else 0 + + def __str__(self): + return f"count:{self.count} after:{self.after}" + + def has_effect(self): + if self.after: + self.after -= 1 + if self.after == 0: + if self.count: + self.count -= 1 + return True + return False + def __init__(self): self.lock = threading.Lock() - self.error_at_put_when_length_bigger = None + self.error_at_part_upload = None + self.error_at_object_upload = None self.fake_put_when_length_bigger = None self.fake_uploads = dict() self.slow_put = None + self.fake_multipart_upload = None + self.error_at_create_multi_part_upload = None def register_fake_upload(self, upload_id, key): with self.lock: @@ -127,10 +204,14 @@ class _ServerRuntime: return False def reset(self): - self.error_at_put_when_length_bigger = None - self.fake_put_when_length_bigger = None - self.fake_uploads = dict() - self.slow_put = None + with self.lock: + self.error_at_part_upload = None + self.error_at_object_upload = None + self.fake_put_when_length_bigger = None + self.fake_uploads = dict() + self.slow_put = None + self.fake_multipart_upload = None + self.error_at_create_multi_part_upload = None _runtime = _ServerRuntime() @@ -141,6 +222,13 @@ def _and_then(value, func): return None if value is None else func(value) +def get_random_string(length): + # choose from all lowercase letter + letters = string.ascii_lowercase + result_str = "".join(random.choice(letters) for i in range(length)) + return result_str + + class RequestHandler(http.server.BaseHTTPRequestHandler): def _ok(self): self.send_response(200) @@ -166,19 +254,30 @@ class RequestHandler(http.server.BaseHTTPRequestHandler): self._read_out() self.send_response(307) - url = f"http://{UPSTREAM_HOST}:{UPSTREAM_PORT}{self.path}" + url = ( + f"http://{self.server.upstream_host}:{self.server.upstream_port}{self.path}" + ) self.send_header("Location", url) self.end_headers() self.wfile.write(b"Redirected") def _error(self, data): self._read_out() - self.send_response(500) self.send_header("Content-Type", "text/xml") self.end_headers() self.wfile.write(bytes(data, "UTF-8")) + def _error_expected_500(self): + self._error( + '' + "" + "ExpectedError" + "mock s3 injected error" + "txfbd566d03042474888193-00608d7537" + "" + ) + def _fake_put_ok(self): self._read_out() @@ -188,6 +287,28 @@ class RequestHandler(http.server.BaseHTTPRequestHandler): self.send_header("Content-Length", 0) self.end_headers() + def _fake_uploads(self, path, upload_id): + self._read_out() + + parts = [x for x in path.split("/") if x] + bucket = parts[0] + key = "/".join(parts[1:]) + data = ( + '\n' + "\n" + f"{bucket}" + f"{key}" + f"{upload_id}" + "" + ) + + self.send_response(200) + self.send_header("Content-Type", "text/xml") + self.send_header("Content-Length", len(data)) + self.end_headers() + + self.wfile.write(bytes(data, "UTF-8")) + def _fake_post_ok(self, path): self._read_out() @@ -219,18 +340,29 @@ class RequestHandler(http.server.BaseHTTPRequestHandler): if len(path) < 2: return self._error("_mock_settings: wrong command") - if path[1] == "error_at_put": + if path[1] == "error_at_part_upload": params = urllib.parse.parse_qs(parts.query, keep_blank_values=False) - _runtime.error_at_put_when_length_bigger = int( - params.get("when_length_bigger", [1024 * 1024])[0] + _runtime.error_at_part_upload = _ServerRuntime.CountAfter( + count_=_and_then(params.get("count", [None])[0], int), + after_=_and_then(params.get("after", [None])[0], int), ) return self._ok() - if path[1] == "fake_put": + + if path[1] == "error_at_object_upload": + params = urllib.parse.parse_qs(parts.query, keep_blank_values=False) + _runtime.error_at_object_upload = _ServerRuntime.CountAfter( + count_=_and_then(params.get("count", [None])[0], int), + after_=_and_then(params.get("after", [None])[0], int), + ) + return self._ok() + + if path[1] == "fake_puts": params = urllib.parse.parse_qs(parts.query, keep_blank_values=False) _runtime.fake_put_when_length_bigger = int( params.get("when_length_bigger", [1024 * 1024])[0] ) return self._ok() + if path[1] == "slow_put": params = urllib.parse.parse_qs(parts.query, keep_blank_values=False) _runtime.slow_put = _ServerRuntime.SlowPut( @@ -241,6 +373,18 @@ class RequestHandler(http.server.BaseHTTPRequestHandler): ) self.log_message("set slow put %s", _runtime.slow_put) return self._ok() + + if path[1] == "setup_fake_multpartuploads": + _runtime.fake_multipart_upload = True + return self._ok() + + if path[1] == "error_at_create_multi_part_upload": + params = urllib.parse.parse_qs(parts.query, keep_blank_values=False) + _runtime.error_at_create_multi_part_upload = int( + params.get("count", [INF_COUNT])[0] + ) + return self._ok() + if path[1] == "reset": _runtime.reset() return self._ok() @@ -265,33 +409,42 @@ class RequestHandler(http.server.BaseHTTPRequestHandler): self.log_message("slow put %s", timeout) time.sleep(timeout) - if _runtime.error_at_put_when_length_bigger is not None: - if content_length > _runtime.error_at_put_when_length_bigger: - return self._error( - '' - "" - "ExpectedError" - "mock s3 injected error" - "txfbd566d03042474888193-00608d7537" - "" - ) - parts = urllib.parse.urlsplit(self.path) params = urllib.parse.parse_qs(parts.query, keep_blank_values=False) upload_id = params.get("uploadId", [None])[0] - if _runtime.fake_put_when_length_bigger is not None: - if content_length > _runtime.fake_put_when_length_bigger: - if upload_id is not None: - _runtime.register_fake_upload(upload_id, parts.path) - return self._fake_put_ok() + + if upload_id is not None: + if _runtime.error_at_part_upload is not None: + if _runtime.error_at_part_upload.has_effect(): + return self._error_expected_500() + if _runtime.fake_multipart_upload: + if _runtime.is_fake_upload(upload_id, parts.path): + return self._fake_put_ok() + else: + if _runtime.error_at_object_upload is not None: + if _runtime.error_at_object_upload.has_effect(): + return self._error_expected_500() + if _runtime.fake_put_when_length_bigger is not None: + if content_length > _runtime.fake_put_when_length_bigger: + return self._fake_put_ok() return self._redirect() def do_POST(self): parts = urllib.parse.urlsplit(self.path) - params = urllib.parse.parse_qs(parts.query, keep_blank_values=False) - upload_id = params.get("uploadId", [None])[0] + params = urllib.parse.parse_qs(parts.query, keep_blank_values=True) + uploads = params.get("uploads", [None])[0] + if uploads is not None: + if _runtime.error_at_create_multi_part_upload: + _runtime.error_at_create_multi_part_upload -= 1 + return self._error_expected_500() + if _runtime.fake_multipart_upload: + upload_id = get_random_string(5) + _runtime.register_fake_upload(upload_id, parts.path) + return self._fake_uploads(parts.path, upload_id) + + upload_id = params.get("uploadId", [None])[0] if _runtime.is_fake_upload(upload_id, parts.path): return self._fake_post_ok(parts.path) @@ -307,7 +460,15 @@ class RequestHandler(http.server.BaseHTTPRequestHandler): class _ThreadedHTTPServer(socketserver.ThreadingMixIn, http.server.HTTPServer): """Handle requests in a separate thread.""" + def set_upstream(self, upstream_host, upstream_port): + self.upstream_host = upstream_host + self.upstream_port = upstream_port + if __name__ == "__main__": httpd = _ThreadedHTTPServer(("0.0.0.0", int(sys.argv[1])), RequestHandler) + if len(sys.argv) == 4: + httpd.set_upstream(sys.argv[2], sys.argv[3]) + else: + httpd.set_upstream("minio1", 9001) httpd.serve_forever() diff --git a/tests/integration/test_checking_s3_blobs_paranoid/test.py b/tests/integration/test_checking_s3_blobs_paranoid/test.py index 042d57a0c43..c0f184815c9 100644 --- a/tests/integration/test_checking_s3_blobs_paranoid/test.py +++ b/tests/integration/test_checking_s3_blobs_paranoid/test.py @@ -54,7 +54,7 @@ def test_upload_after_check_works(cluster, broken_s3): """ ) - broken_s3.setup_fake_upload(1) + broken_s3.setup_fake_puts(1) error = node.query_and_get_error( "INSERT INTO s3_upload_after_check_works VALUES (1, 'Hello')" @@ -63,3 +63,99 @@ def test_upload_after_check_works(cluster, broken_s3): assert "Code: 499" in error, error assert "Immediately after upload" in error, error assert "suddenly disappeared" in error, error + + +def get_counters(node, query_id, log_type="ExceptionWhileProcessing"): + node.query("SYSTEM FLUSH LOGS") + return [ + int(x) + for x in node.query( + f""" + SELECT + ProfileEvents['S3CreateMultipartUpload'], + ProfileEvents['S3UploadPart'], + ProfileEvents['S3WriteRequestsErrors'] + FROM system.query_log + WHERE query_id='{query_id}' + AND type='{log_type}' + """ + ).split() + if x + ] + + +def test_upload_s3_fail_create_multi_part_upload(cluster, broken_s3): + node = cluster.instances["node"] + + broken_s3.setup_error_at_create_multi_part_upload() + + insert_query_id = "INSERT_INTO_TABLE_FUNCTION_FAIL_CREATE_MPU" + error = node.query_and_get_error( + """ + INSERT INTO + TABLE FUNCTION s3( + 'http://resolver:8083/root/data/test_upload_s3_fail_create_multi_part_upload', + 'minio', 'minio123', + 'CSV', auto, 'none' + ) + SELECT + * + FROM system.numbers + LIMIT 100000000 + SETTINGS + s3_max_single_part_upload_size=100, + s3_min_upload_part_size=100 + """, + query_id=insert_query_id, + ) + + assert "Code: 499" in error, error + assert "mock s3 injected error" in error, error + assert "DB::WriteBufferFromS3::createMultipartUpload()" in error, error + + count_create_multi_part_uploads, count_upload_parts, count_s3_errors = get_counters( + node, insert_query_id + ) + assert count_create_multi_part_uploads == 1 + assert count_upload_parts == 0 + assert count_s3_errors == 1 + + +def test_upload_s3_fail_upload_part_when_multi_part_upload(cluster, broken_s3): + node = cluster.instances["node"] + + broken_s3.setup_fake_multpartuploads() + broken_s3.setup_error_at_part_upload(count=1, after=2) + + insert_query_id = "INSERT_INTO_TABLE_FUNCTION_FAIL_UPLOAD_PART" + error = node.query_and_get_error( + """ + INSERT INTO + TABLE FUNCTION s3( + 'http://resolver:8083/root/data/test_upload_s3_fail_upload_part_when_multi_part_upload', + 'minio', 'minio123', + 'CSV', auto, 'none' + ) + SELECT + * + FROM system.numbers + LIMIT 100000000 + SETTINGS + s3_max_single_part_upload_size=100, + s3_min_upload_part_size=100 + """, + query_id=insert_query_id, + ) + + assert "Code: 499" in error, error + assert "mock s3 injected error" in error, error + assert "DB::WriteBufferFromS3::writePart" in error, error + + count_create_multi_part_uploads, count_upload_parts, count_s3_errors = get_counters( + node, insert_query_id + ) + assert count_create_multi_part_uploads == 1 + assert count_upload_parts >= 2 + assert ( + count_s3_errors == 2 + ) # the second is cancel multipart upload, s3_mock just redirects this request diff --git a/tests/integration/test_merge_tree_s3/test.py b/tests/integration/test_merge_tree_s3/test.py index 2ccd517923a..f3f44f1452c 100644 --- a/tests/integration/test_merge_tree_s3/test.py +++ b/tests/integration/test_merge_tree_s3/test.py @@ -862,7 +862,9 @@ def test_merge_canceled_by_s3_errors(cluster, broken_s3, node_name, storage_poli min_key = node.query("SELECT min(key) FROM test_merge_canceled_by_s3_errors") assert int(min_key) == 0, min_key - broken_s3.setup_fail_upload(50000) + broken_s3.setup_error_at_object_upload() + broken_s3.setup_fake_multpartuploads() + broken_s3.setup_error_at_part_upload() node.query("SYSTEM START MERGES test_merge_canceled_by_s3_errors") @@ -905,7 +907,7 @@ def test_merge_canceled_by_s3_errors_when_move(cluster, broken_s3, node_name): settings={"materialize_ttl_after_modify": 0}, ) - broken_s3.setup_fail_upload(10000) + broken_s3.setup_error_at_object_upload(count=1, after=1) node.query("SYSTEM START MERGES merge_canceled_by_s3_errors_when_move") @@ -941,7 +943,7 @@ def test_s3_engine_heavy_write_check_mem( " ENGINE S3('http://resolver:8083/root/data/test-upload.csv', 'minio', 'minio123', 'CSV')", ) - broken_s3.setup_fake_upload(1000) + broken_s3.setup_fake_multpartuploads() broken_s3.setup_slow_answers(10 * 1024 * 1024, timeout=15, count=10) query_id = f"INSERT_INTO_S3_ENGINE_QUERY_ID_{in_flight}" @@ -987,7 +989,7 @@ def test_s3_disk_heavy_write_check_mem(cluster, broken_s3, node_name): ) node.query("SYSTEM STOP MERGES s3_test") - broken_s3.setup_fake_upload(1000) + broken_s3.setup_fake_multpartuploads() broken_s3.setup_slow_answers(10 * 1024 * 1024, timeout=10, count=50) query_id = f"INSERT_INTO_S3_DISK_QUERY_ID" @@ -1013,3 +1015,215 @@ def test_s3_disk_heavy_write_check_mem(cluster, broken_s3, node_name): assert int(result) > 0.8 * memory check_no_objects_after_drop(cluster, node_name=node_name) + + +def get_memory_usage(node, query_id): + node.query("SYSTEM FLUSH LOGS") + memory_usage = node.query( + "SELECT memory_usage" + " FROM system.query_log" + f" WHERE query_id='{query_id}'" + " AND type='QueryFinish'" + ) + return int(memory_usage) + + +def get_memory_usages(node, query_ids): + node.query("SYSTEM FLUSH LOGS") + result = [] + for query_id in query_ids: + memory_usage = node.query( + "SELECT memory_usage" + " FROM system.query_log" + f" WHERE query_id='{query_id}'" + " AND type='QueryFinish'" + ) + result.append(int(memory_usage)) + return result + + +@pytest.mark.parametrize("node_name", ["node"]) +def test_heavy_insert_select_check_memory(cluster, broken_s3, node_name): + node = cluster.instances[node_name] + + node.query( + """ + CREATE TABLE central_query_log + ( + control_plane_id UUID, + pod_id LowCardinality(String), + scrape_ts_microseconds DateTime64(6) CODEC(Delta(8), LZ4), + event_date Date, + event_time DateTime, + payload Array(String), + payload_01 String, + payload_02 String, + payload_03 String, + payload_04 String, + payload_05 String, + payload_06 String, + payload_07 String, + payload_08 String, + payload_09 String, + payload_10 String, + payload_11 String, + payload_12 String, + payload_13 String, + payload_14 String, + payload_15 String, + payload_16 String, + payload_17 String, + payload_18 String, + payload_19 String + ) + ENGINE=MergeTree() + PARTITION BY toYYYYMM(event_date) + ORDER BY (control_plane_id, event_date, pod_id) + SETTINGS + storage_policy='s3' + """ + ) + + node.query("SYSTEM STOP MERGES central_query_log") + + write_count = 2 + write_query_ids = [] + for x in range(write_count): + query_id = f"INSERT_INTO_TABLE_RANDOM_DATA_QUERY_ID_{x}" + write_query_ids.append(query_id) + node.query( + """ + INSERT INTO central_query_log + SELECT + control_plane_id, + pod_id, + toStartOfHour(event_time) + toIntervalSecond(randUniform(0,60)) as scrape_ts_microseconds, + toDate(event_time) as event_date, + event_time, + payload, + payload[1] as payload_01, + payload[2] as payload_02, + payload[3] as payload_03, + payload[4] as payload_04, + payload[5] as payload_05, + payload[6] as payload_06, + payload[7] as payload_07, + payload[8] as payload_08, + payload[9] as payload_09, + payload[10] as payload_10, + payload[11] as payload_11, + payload[12] as payload_12, + payload[13] as payload_13, + payload[14] as payload_14, + payload[15] as payload_15, + payload[16] as payload_16, + payload[17] as payload_17, + payload[18] as payload_18, + payload[19] as payload_19 + FROM + ( + SELECT + control_plane_id, + substring(payload[1], 1, 5) as pod_id, + toDateTime('2022-12-12 00:00:00') + + toIntervalDay(floor(randUniform(0,3))) + + toIntervalHour(floor(randUniform(0,24))) + + toIntervalSecond(floor(randUniform(0,60))) + as event_time, + payload + FROM + generateRandom( + 'control_plane_id UUID, payload Array(String)', + NULL, + 100, + 100 + ) + LIMIT 10000 + ) + SETTINGS + max_insert_block_size=256000000, + min_insert_block_size_rows=1000000, + min_insert_block_size_bytes=256000000 + """, + query_id=query_id, + ) + + memory = 845346116 + for memory_usage, query_id in zip( + get_memory_usages(node, write_query_ids), write_query_ids + ): + assert int(memory_usage) < 1.2 * memory, f"{memory_usage} : {query_id}" + assert int(memory_usage) > 0.8 * memory, f"{memory_usage} : {query_id}" + + broken_s3.setup_slow_answers(minimal_length=1000, timeout=5, count=20) + broken_s3.setup_fake_multpartuploads() + + insert_query_id = f"INSERT_INTO_S3_FUNCTION_QUERY_ID" + node.query( + """ + INSERT INTO + TABLE FUNCTION s3( + 'http://resolver:8083/root/data/test-upload_{_partition_id}.csv.gz', + 'minio', 'minio123', + 'CSV', auto, 'gzip' + ) + PARTITION BY formatDateTime(subtractHours(toDateTime('2022-12-13 00:00:00'), 1),'%Y-%m-%d_%H:00') + WITH toDateTime('2022-12-13 00:00:00') as time_point + SELECT + * + FROM central_query_log + WHERE + event_date >= subtractDays(toDate(time_point), 1) + AND scrape_ts_microseconds >= subtractHours(toStartOfHour(time_point), 12) + AND scrape_ts_microseconds < toStartOfDay(time_point) + SETTINGS + s3_max_inflight_parts_for_one_file=1 + """, + query_id=insert_query_id, + ) + + query_id = f"SELECT_QUERY_ID" + total = node.query( + """ + SELECT + count() + FROM central_query_log + """, + query_id=query_id, + ) + assert int(total) == 10000 * write_count + + query_id = f"SELECT_WHERE_QUERY_ID" + selected = node.query( + """ + WITH toDateTime('2022-12-13 00:00:00') as time_point + SELECT + count() + FROM central_query_log + WHERE + event_date >= subtractDays(toDate(time_point), 1) + AND scrape_ts_microseconds >= subtractHours(toStartOfHour(time_point), 12) + AND scrape_ts_microseconds < toStartOfDay(time_point) + """, + query_id=query_id, + ) + assert int(selected) < 4500, selected + assert int(selected) > 2500, selected + + node.query("SYSTEM FLUSH LOGS") + profile_events = node.query( + f""" + SELECT ProfileEvents + FROM system.query_log + WHERE query_id='{insert_query_id}' + AND type='QueryFinish' + """ + ) + + memory_usage = get_memory_usage(node, insert_query_id) + memory = 123507857 + assert int(memory_usage) < 1.2 * memory, f"{memory_usage} {profile_events}" + assert int(memory_usage) > 0.8 * memory, f"{memory_usage} {profile_events}" + + node.query(f"DROP TABLE IF EXISTS central_query_log SYNC") + remove_all_s3_objects(cluster) From 9c939b2f3db3b47116d739a3b81ab7c353e6e0bf Mon Sep 17 00:00:00 2001 From: Smita Kulkarni Date: Tue, 13 Jun 2023 10:54:54 +0200 Subject: [PATCH 374/722] Fix heading and sidebar for azureBlobStorage table function --- docs/en/sql-reference/table-functions/azureBlobStorage.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/table-functions/azureBlobStorage.md b/docs/en/sql-reference/table-functions/azureBlobStorage.md index 369bf7a964d..7a362710b9c 100644 --- a/docs/en/sql-reference/table-functions/azureBlobStorage.md +++ b/docs/en/sql-reference/table-functions/azureBlobStorage.md @@ -1,10 +1,10 @@ --- slug: /en/sql-reference/table-functions/azure_blob_storage -sidebar_label: azure_blob_storage +sidebar_label: azureBlobStorage keywords: [azure blob storage] --- -# azure\_blob\_storage Table Function +# azureBlobStorage Table Function Provides a table-like interface to select/insert files in [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs). This table function is similar to the [s3 function](../../sql-reference/table-functions/s3.md). From 79bc8847333f6e8e3653e63b1fed6a063bfb6302 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 13 Jun 2023 08:54:25 +0000 Subject: [PATCH 375/722] Stabilize tests --- src/Functions/FunctionSnowflake.h | 58 ++++++++++--------- .../01942_snowflakeToDateTime.reference | 3 +- .../0_stateless/01942_snowflakeToDateTime.sql | 10 +++- 3 files changed, 41 insertions(+), 30 deletions(-) diff --git a/src/Functions/FunctionSnowflake.h b/src/Functions/FunctionSnowflake.h index 0a47534c47d..ace2fc54f09 100644 --- a/src/Functions/FunctionSnowflake.h +++ b/src/Functions/FunctionSnowflake.h @@ -54,22 +54,19 @@ public: ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const auto & src = arguments[0]; - const auto & col = *src.column; + const auto & src_column = *src.column; auto res_column = ColumnInt64::create(input_rows_count); - auto & result_data = res_column->getData(); + auto & res_data = res_column->getData(); - const auto & source_data = typeid_cast(col).getData(); + const auto & src_data = typeid_cast &>(src_column).getData(); for (size_t i = 0; i < input_rows_count; ++i) - { - result_data[i] = (Int64(source_data[i]) * 1000 - snowflake_epoch) << time_shift; - } + res_data[i] = (UInt32(src_data[i]) * 1000 - snowflake_epoch) << time_shift; return res_column; } }; - class FunctionSnowflakeToDateTime : public IFunction { private: @@ -106,23 +103,23 @@ public: ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const auto & src = arguments[0]; - const auto & col = *src.column; + const auto & src_column = *src.column; auto res_column = ColumnUInt32::create(input_rows_count); - auto & result_data = res_column->getData(); + auto & res_data = res_column->getData(); - if (const auto * src_non_const = typeid_cast(&col)) + if (const auto * src_column_non_const = typeid_cast(&src_column)) { - const auto & source_data = src_non_const->getData(); + const auto & src_data = src_column_non_const->getData(); for (size_t i = 0; i < input_rows_count; ++i) - result_data[i] = static_cast( - ((source_data[i] >> time_shift) + snowflake_epoch) / 1000); + res_data[i] = static_cast( + ((src_data[i] >> time_shift) + snowflake_epoch) / 1000); } - else if (const auto * src_const = typeid_cast(&col)) + else if (const auto * src_column_const = typeid_cast(&src_column)) { - Int64 src_val = src_const->getValue(); + Int64 src_val = src_column_const->getValue(); for (size_t i = 0; i < input_rows_count; ++i) - result_data[i] = static_cast( + res_data[i] = static_cast( ((src_val >> time_shift) + snowflake_epoch) / 1000); } return res_column; @@ -155,16 +152,14 @@ public: ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const auto & src = arguments[0]; - const auto & col = *src.column; + const auto & src_column = *src.column; auto res_column = ColumnInt64::create(input_rows_count); - auto & result_data = res_column->getData(); + auto & res_data = res_column->getData(); - const auto & source_data = typeid_cast &>(col).getData(); + const auto & src_data = typeid_cast &>(src_column).getData(); for (size_t i = 0; i < input_rows_count; ++i) - { - result_data[i] = (source_data[i] - snowflake_epoch) << time_shift; - } + res_data[i] = (src_data[i] - snowflake_epoch) << time_shift; return res_column; } @@ -207,17 +202,24 @@ public: ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const auto & src = arguments[0]; - const auto & col = *src.column; + const auto & src_column = *src.column; auto res_column = ColumnDecimal::create(input_rows_count, 3); - auto & result_data = res_column->getData(); + auto & res_data = res_column->getData(); - const auto & source_data = typeid_cast(col).getData(); - - for (size_t i = 0; i < input_rows_count; ++i) + if (const auto * src_column_non_const = typeid_cast(&src_column)) { - result_data[i] = (source_data[i] >> time_shift) + snowflake_epoch; + const auto & src_data = src_column_non_const->getData(); + for (size_t i = 0; i < input_rows_count; ++i) + res_data[i] = (src_data[i] >> time_shift) + snowflake_epoch; } + else if (const auto * src_column_const = typeid_cast(&src_column)) + { + Int64 src_val = src_column_const->getValue(); + for (size_t i = 0; i < input_rows_count; ++i) + res_data[i] = (src_val >> time_shift) + snowflake_epoch; + } + return res_column; } }; diff --git a/tests/queries/0_stateless/01942_snowflakeToDateTime.reference b/tests/queries/0_stateless/01942_snowflakeToDateTime.reference index fa00a22bc63..e1d141fe450 100644 --- a/tests/queries/0_stateless/01942_snowflakeToDateTime.reference +++ b/tests/queries/0_stateless/01942_snowflakeToDateTime.reference @@ -1,4 +1,5 @@ const column UTC 1426860704886947840 2021-08-15 10:57:56 DateTime(\'UTC\') 2021-08-15 10:57:56.492 DateTime64(3, \'UTC\') Asia/Shanghai 1426860704886947840 2021-08-15 18:57:56 DateTime(\'Asia/Shanghai\') 2021-08-15 18:57:56.492 DateTime64(3, \'Asia/Shanghai\') -Asia/Singapore 2010-11-04 01:42:54 +Asia/Singapore 42 +Asia/Singapore 42 diff --git a/tests/queries/0_stateless/01942_snowflakeToDateTime.sql b/tests/queries/0_stateless/01942_snowflakeToDateTime.sql index 3efccdddb2d..2ad03f2a4f5 100644 --- a/tests/queries/0_stateless/01942_snowflakeToDateTime.sql +++ b/tests/queries/0_stateless/01942_snowflakeToDateTime.sql @@ -31,4 +31,12 @@ SELECT snowflakeToDateTime64(i64, tz) as dt64, toTypeName(dt64); -SELECT materialize('Asia/Singapore') a, snowflakeToDateTime(649::Int64, a) settings allow_nonconst_timezone_arguments = 1 + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab(tz String, val Int64) engine=Log; +INSERT INTO tab VALUES ('Asia/Singapore', 42); + +SELECT * FROM tab WHERE snowflakeToDateTime(42::Int64, tz) != now() SETTINGS allow_nonconst_timezone_arguments = 1; +SELECT * FROM tab WHERE snowflakeToDateTime64(42::Int64, tz) != now() SETTINGS allow_nonconst_timezone_arguments = 1; + +DROP TABLE tab; From 2d0dc2c8f5c329a4da12ccb1db601d5edf2044cd Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 13 Jun 2023 08:59:34 +0000 Subject: [PATCH 376/722] Minor: Switch column order --- .../0_stateless/01942_snowflakeToDateTime.reference | 4 ++-- tests/queries/0_stateless/01942_snowflakeToDateTime.sql | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/queries/0_stateless/01942_snowflakeToDateTime.reference b/tests/queries/0_stateless/01942_snowflakeToDateTime.reference index e1d141fe450..83fae3ef809 100644 --- a/tests/queries/0_stateless/01942_snowflakeToDateTime.reference +++ b/tests/queries/0_stateless/01942_snowflakeToDateTime.reference @@ -1,5 +1,5 @@ const column UTC 1426860704886947840 2021-08-15 10:57:56 DateTime(\'UTC\') 2021-08-15 10:57:56.492 DateTime64(3, \'UTC\') Asia/Shanghai 1426860704886947840 2021-08-15 18:57:56 DateTime(\'Asia/Shanghai\') 2021-08-15 18:57:56.492 DateTime64(3, \'Asia/Shanghai\') -Asia/Singapore 42 -Asia/Singapore 42 +1 +1 diff --git a/tests/queries/0_stateless/01942_snowflakeToDateTime.sql b/tests/queries/0_stateless/01942_snowflakeToDateTime.sql index 2ad03f2a4f5..0092eca848c 100644 --- a/tests/queries/0_stateless/01942_snowflakeToDateTime.sql +++ b/tests/queries/0_stateless/01942_snowflakeToDateTime.sql @@ -33,10 +33,10 @@ SELECT DROP TABLE IF EXISTS tab; -CREATE TABLE tab(tz String, val Int64) engine=Log; -INSERT INTO tab VALUES ('Asia/Singapore', 42); +CREATE TABLE tab(val Int64, tz String) engine=Log; +INSERT INTO tab VALUES (42, 'Asia/Singapore'); -SELECT * FROM tab WHERE snowflakeToDateTime(42::Int64, tz) != now() SETTINGS allow_nonconst_timezone_arguments = 1; -SELECT * FROM tab WHERE snowflakeToDateTime64(42::Int64, tz) != now() SETTINGS allow_nonconst_timezone_arguments = 1; +SELECT 1 FROM tab WHERE snowflakeToDateTime(42::Int64, tz) != now() SETTINGS allow_nonconst_timezone_arguments = 1; +SELECT 1 FROM tab WHERE snowflakeToDateTime64(42::Int64, tz) != now() SETTINGS allow_nonconst_timezone_arguments = 1; DROP TABLE tab; From 72f28321295187ddf40d02a887be3106f6ec4ac3 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Tue, 13 Jun 2023 02:07:05 -0700 Subject: [PATCH 377/722] Slightly more information in error message about cached disk (#50897) --- src/Disks/ObjectStorages/Cached/registerDiskCache.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Disks/ObjectStorages/Cached/registerDiskCache.cpp b/src/Disks/ObjectStorages/Cached/registerDiskCache.cpp index 779ec6120f8..2b40fa9c21b 100644 --- a/src/Disks/ObjectStorages/Cached/registerDiskCache.cpp +++ b/src/Disks/ObjectStorages/Cached/registerDiskCache.cpp @@ -48,7 +48,9 @@ void registerDiskCache(DiskFactory & factory, bool /* global_skip_access_check * auto cache = FileCacheFactory::instance().getOrCreate(name, file_cache_settings); auto disk = disk_it->second; if (!dynamic_cast(disk.get())) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cached disk is allowed only on top of object storage"); + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Cannot wrap disk `{}` with cache layer `{}`: cached disk is allowed only on top of object storage", + disk_name, name); auto disk_object_storage = disk->createDiskObjectStorage(); From 0ab3dc92618f8a7d1accd8f2e1cc21f851dead80 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Tue, 13 Jun 2023 11:25:13 +0200 Subject: [PATCH 378/722] A bit safer UserDefinedSQLFunctionVisitor (#50913) * Update UserDefinedSQLFunctionVisitor.cpp * Update UserDefinedSQLFunctionVisitor.cpp --------- Co-authored-by: Nikita Mikhaylov --- src/Functions/UserDefined/UserDefinedSQLFunctionVisitor.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/Functions/UserDefined/UserDefinedSQLFunctionVisitor.cpp b/src/Functions/UserDefined/UserDefinedSQLFunctionVisitor.cpp index 57cc45cc75d..597e4efe35e 100644 --- a/src/Functions/UserDefined/UserDefinedSQLFunctionVisitor.cpp +++ b/src/Functions/UserDefined/UserDefinedSQLFunctionVisitor.cpp @@ -25,6 +25,12 @@ namespace ErrorCodes void UserDefinedSQLFunctionVisitor::visit(ASTPtr & ast) { + if (!ast) + { + chassert(false); + return; + } + const auto visit_child_with_shared_ptr = [&](ASTPtr & child) { if (!child) From e5de6cde244f530c0f7d3ec1acad462025430e58 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 13 Jun 2023 11:37:24 +0000 Subject: [PATCH 379/722] Update after #50097 --- tests/queries/0_stateless/01655_plan_optimizations.reference | 4 ++-- tests/queries/0_stateless/01655_plan_optimizations.sh | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/01655_plan_optimizations.reference b/tests/queries/0_stateless/01655_plan_optimizations.reference index 34ea2bc20a3..be42a656c66 100644 --- a/tests/queries/0_stateless/01655_plan_optimizations.reference +++ b/tests/queries/0_stateless/01655_plan_optimizations.reference @@ -172,7 +172,7 @@ Filter column: notEquals(number, 1) Join > (analyzer) one condition of filter is pushed down before LEFT JOIN Join -Filter column: notEquals(l.number_0, 1_UInt8) +Filter column: notEquals(number_0, 1_UInt8) 0 0 3 3 > one condition of filter is pushed down before INNER JOIN @@ -181,7 +181,7 @@ Filter column: notEquals(number, 1) Join > (analyzer) one condition of filter is pushed down before INNER JOIN Join -Filter column: notEquals(l.number_0, 1_UInt8) +Filter column: notEquals(number_0, 1_UInt8) 3 3 > filter is pushed down before UNION Union diff --git a/tests/queries/0_stateless/01655_plan_optimizations.sh b/tests/queries/0_stateless/01655_plan_optimizations.sh index d68c2c8b414..a765a6ea4fa 100755 --- a/tests/queries/0_stateless/01655_plan_optimizations.sh +++ b/tests/queries/0_stateless/01655_plan_optimizations.sh @@ -236,7 +236,7 @@ $CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 -q " select number as a, r.b from numbers(4) as l any left join ( select number + 2 as b from numbers(3) ) as r on a = r.b where a != 1 and b != 2 settings enable_optimize_predicate_expression = 0" | - grep -o "Join\|Filter column: notEquals(l.number_0, 1_UInt8)" + grep -o "Join\|Filter column: notEquals(number_0, 1_UInt8)" $CLICKHOUSE_CLIENT -q " select number as a, r.b from numbers(4) as l any left join ( select number + 2 as b from numbers(3) @@ -255,7 +255,7 @@ $CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 -q " select number as a, r.b from numbers(4) as l any inner join ( select number + 2 as b from numbers(3) ) as r on a = r.b where a != 1 and b != 2 settings enable_optimize_predicate_expression = 0" | - grep -o "Join\|Filter column: notEquals(l.number_0, 1_UInt8)" + grep -o "Join\|Filter column: notEquals(number_0, 1_UInt8)" $CLICKHOUSE_CLIENT -q " select number as a, r.b from numbers(4) as l any inner join ( select number + 2 as b from numbers(3) From a8b68a877aae945dee4e37b28e320a967a38f9f2 Mon Sep 17 00:00:00 2001 From: Victor Krasnov Date: Tue, 13 Jun 2023 14:37:33 +0300 Subject: [PATCH 380/722] Rename the 'time shift' variable in the test to make it more clear --- .../02783_parseDateTimeBestEffort_syslog.sql | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.sql b/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.sql index 742ae03ddab..f3ca78e8310 100644 --- a/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.sql +++ b/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.sql @@ -2,45 +2,45 @@ WITH 86400 AS secs_in_day, now() AS ts_now, '2023-06-07' AS ref_point, - dateDiff('second', toDateTime(ref_point), ts_now) AS impedimenta, + dateDiff('second', toDateTime(ref_point), ts_now) AS time_shift, formatDateTime(ts_around, '%b %e %T') AS dt_curr SELECT - formatDateTime(ts_around - impedimenta, '%b %e %H:%i:%s') AS around_June_7, - parseDateTimeBestEffort(dt_curr) - impedimenta AS res, - parseDateTimeBestEffort(dt_curr, 'US/Samoa') - impedimenta AS res_sam, - parseDateTimeBestEffort(dt_curr, 'Pacific/Auckland') - impedimenta AS res_auc, - parseDateTimeBestEffortOrNull(dt_curr) - impedimenta AS res_null, - parseDateTimeBestEffortOrNull(dt_curr, 'US/Samoa') - impedimenta AS res_null_sam, - parseDateTimeBestEffortOrNull(dt_curr, 'Pacific/Auckland') - impedimenta AS res_null_auc, - parseDateTimeBestEffortOrZero(dt_curr) - impedimenta AS res_zero, - parseDateTimeBestEffortOrZero(dt_curr, 'US/Samoa') - impedimenta AS res_zero_sam, - parseDateTimeBestEffortOrZero(dt_curr, 'Pacific/Auckland') - impedimenta AS res_zero_auc, - parseDateTimeBestEffortUS(dt_curr) - impedimenta AS res_us, - parseDateTimeBestEffortUS(dt_curr, 'US/Samoa') - impedimenta AS res_us_sam, - parseDateTimeBestEffortUS(dt_curr, 'Pacific/Auckland') - impedimenta AS res_us_auc, - parseDateTimeBestEffortUSOrNull(dt_curr) - impedimenta AS res_us_null, - parseDateTimeBestEffortUSOrNull(dt_curr, 'US/Samoa') - impedimenta AS res_us_null_sam, - parseDateTimeBestEffortUSOrNull(dt_curr, 'Pacific/Auckland') - impedimenta AS res_us_null_auc, - parseDateTimeBestEffortUSOrZero(dt_curr) - impedimenta AS res_us_zero, - parseDateTimeBestEffortUSOrZero(dt_curr, 'US/Samoa') - impedimenta AS res_us_zero_sam, - parseDateTimeBestEffortUSOrZero(dt_curr, 'Pacific/Auckland') - impedimenta AS res_us_zero_auc, - parseDateTime64BestEffort(dt_curr) - impedimenta AS res64, - parseDateTime64BestEffort(dt_curr, 3, 'US/Samoa') - impedimenta AS res64_sam, - parseDateTime64BestEffort(dt_curr, 3, 'Pacific/Auckland') - impedimenta AS res64_auc, - parseDateTime64BestEffortOrNull(dt_curr) - impedimenta AS res64_null, - parseDateTime64BestEffortOrNull(dt_curr, 3, 'US/Samoa') - impedimenta AS res64_null_sam, - parseDateTime64BestEffortOrNull(dt_curr, 3, 'Pacific/Auckland') - impedimenta AS res64_null_auc, - parseDateTime64BestEffortOrZero(dt_curr) - impedimenta AS res64_zero, - parseDateTime64BestEffortOrZero(dt_curr, 3, 'US/Samoa') - impedimenta AS res64_zero_sam, - parseDateTime64BestEffortOrZero(dt_curr, 3, 'Pacific/Auckland') - impedimenta AS res64_zero_auc, - parseDateTime64BestEffortUS(dt_curr) - impedimenta AS res64_us, - parseDateTime64BestEffortUS(dt_curr, 3, 'US/Samoa') - impedimenta AS res64_us_sam, - parseDateTime64BestEffortUS(dt_curr, 3, 'Pacific/Auckland') - impedimenta AS res64_us_auc, - parseDateTime64BestEffortUSOrNull(dt_curr) - impedimenta AS res64_us_null, - parseDateTime64BestEffortUSOrNull(dt_curr, 3, 'US/Samoa') - impedimenta AS res64_us_null_sam, - parseDateTime64BestEffortUSOrNull(dt_curr, 3, 'Pacific/Auckland') - impedimenta AS res64_us_null_auc, - parseDateTime64BestEffortUSOrZero(dt_curr) - impedimenta AS res64_us_zero, - parseDateTime64BestEffortUSOrZero(dt_curr, 3, 'US/Samoa') - impedimenta AS res64_us_zero_sam, - parseDateTime64BestEffortUSOrZero(dt_curr, 3, 'Pacific/Auckland') - impedimenta AS res64_us_zero_auc + formatDateTime(ts_around - time_shift, '%b %e %H:%i:%s') AS around_June_7, + parseDateTimeBestEffort(dt_curr) - time_shift AS res, + parseDateTimeBestEffort(dt_curr, 'US/Samoa') - time_shift AS res_sam, + parseDateTimeBestEffort(dt_curr, 'Pacific/Auckland') - time_shift AS res_auc, + parseDateTimeBestEffortOrNull(dt_curr) - time_shift AS res_null, + parseDateTimeBestEffortOrNull(dt_curr, 'US/Samoa') - time_shift AS res_null_sam, + parseDateTimeBestEffortOrNull(dt_curr, 'Pacific/Auckland') - time_shift AS res_null_auc, + parseDateTimeBestEffortOrZero(dt_curr) - time_shift AS res_zero, + parseDateTimeBestEffortOrZero(dt_curr, 'US/Samoa') - time_shift AS res_zero_sam, + parseDateTimeBestEffortOrZero(dt_curr, 'Pacific/Auckland') - time_shift AS res_zero_auc, + parseDateTimeBestEffortUS(dt_curr) - time_shift AS res_us, + parseDateTimeBestEffortUS(dt_curr, 'US/Samoa') - time_shift AS res_us_sam, + parseDateTimeBestEffortUS(dt_curr, 'Pacific/Auckland') - time_shift AS res_us_auc, + parseDateTimeBestEffortUSOrNull(dt_curr) - time_shift AS res_us_null, + parseDateTimeBestEffortUSOrNull(dt_curr, 'US/Samoa') - time_shift AS res_us_null_sam, + parseDateTimeBestEffortUSOrNull(dt_curr, 'Pacific/Auckland') - time_shift AS res_us_null_auc, + parseDateTimeBestEffortUSOrZero(dt_curr) - time_shift AS res_us_zero, + parseDateTimeBestEffortUSOrZero(dt_curr, 'US/Samoa') - time_shift AS res_us_zero_sam, + parseDateTimeBestEffortUSOrZero(dt_curr, 'Pacific/Auckland') - time_shift AS res_us_zero_auc, + parseDateTime64BestEffort(dt_curr) - time_shift AS res64, + parseDateTime64BestEffort(dt_curr, 3, 'US/Samoa') - time_shift AS res64_sam, + parseDateTime64BestEffort(dt_curr, 3, 'Pacific/Auckland') - time_shift AS res64_auc, + parseDateTime64BestEffortOrNull(dt_curr) - time_shift AS res64_null, + parseDateTime64BestEffortOrNull(dt_curr, 3, 'US/Samoa') - time_shift AS res64_null_sam, + parseDateTime64BestEffortOrNull(dt_curr, 3, 'Pacific/Auckland') - time_shift AS res64_null_auc, + parseDateTime64BestEffortOrZero(dt_curr) - time_shift AS res64_zero, + parseDateTime64BestEffortOrZero(dt_curr, 3, 'US/Samoa') - time_shift AS res64_zero_sam, + parseDateTime64BestEffortOrZero(dt_curr, 3, 'Pacific/Auckland') - time_shift AS res64_zero_auc, + parseDateTime64BestEffortUS(dt_curr) - time_shift AS res64_us, + parseDateTime64BestEffortUS(dt_curr, 3, 'US/Samoa') - time_shift AS res64_us_sam, + parseDateTime64BestEffortUS(dt_curr, 3, 'Pacific/Auckland') - time_shift AS res64_us_auc, + parseDateTime64BestEffortUSOrNull(dt_curr) - time_shift AS res64_us_null, + parseDateTime64BestEffortUSOrNull(dt_curr, 3, 'US/Samoa') - time_shift AS res64_us_null_sam, + parseDateTime64BestEffortUSOrNull(dt_curr, 3, 'Pacific/Auckland') - time_shift AS res64_us_null_auc, + parseDateTime64BestEffortUSOrZero(dt_curr) - time_shift AS res64_us_zero, + parseDateTime64BestEffortUSOrZero(dt_curr, 3, 'US/Samoa') - time_shift AS res64_us_zero_sam, + parseDateTime64BestEffortUSOrZero(dt_curr, 3, 'Pacific/Auckland') - time_shift AS res64_us_zero_auc FROM (SELECT arrayJoin([ts_now - secs_in_day, ts_now + secs_in_day]) AS ts_around) FORMAT PrettySpaceNoEscapes; From 38151f9c767d12b18e26ccc236244bc929c326bb Mon Sep 17 00:00:00 2001 From: Victor Krasnov Date: Tue, 13 Jun 2023 14:59:38 +0300 Subject: [PATCH 381/722] Rename the test to the snake_case after the Team Lead's review --- ...g.reference => 02783_parsedatetimebesteffort_syslog.reference} | 0 ...Effort_syslog.sql => 02783_parsedatetimebesteffort_syslog.sql} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename tests/queries/0_stateless/{02783_parseDateTimeBestEffort_syslog.reference => 02783_parsedatetimebesteffort_syslog.reference} (100%) rename tests/queries/0_stateless/{02783_parseDateTimeBestEffort_syslog.sql => 02783_parsedatetimebesteffort_syslog.sql} (100%) diff --git a/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.reference b/tests/queries/0_stateless/02783_parsedatetimebesteffort_syslog.reference similarity index 100% rename from tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.reference rename to tests/queries/0_stateless/02783_parsedatetimebesteffort_syslog.reference diff --git a/tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.sql b/tests/queries/0_stateless/02783_parsedatetimebesteffort_syslog.sql similarity index 100% rename from tests/queries/0_stateless/02783_parseDateTimeBestEffort_syslog.sql rename to tests/queries/0_stateless/02783_parsedatetimebesteffort_syslog.sql From 3e3b8ff5f6b78c6ddd202d154ea9101625c561f1 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 13 Jun 2023 09:14:15 +0000 Subject: [PATCH 382/722] More robustness --- src/Functions/FunctionSnowflake.h | 40 ++++++++++++++++++++++++++----- 1 file changed, 34 insertions(+), 6 deletions(-) diff --git a/src/Functions/FunctionSnowflake.h b/src/Functions/FunctionSnowflake.h index ace2fc54f09..c7ec6dca27f 100644 --- a/src/Functions/FunctionSnowflake.h +++ b/src/Functions/FunctionSnowflake.h @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -59,9 +60,20 @@ public: auto res_column = ColumnInt64::create(input_rows_count); auto & res_data = res_column->getData(); - const auto & src_data = typeid_cast &>(src_column).getData(); - for (size_t i = 0; i < input_rows_count; ++i) - res_data[i] = (UInt32(src_data[i]) * 1000 - snowflake_epoch) << time_shift; + if (const auto * src_column_non_const = typeid_cast(&src_column)) + { + const auto & src_data = src_column_non_const->getData(); + for (size_t i = 0; i < input_rows_count; ++i) + res_data[i] = (UInt32(src_data[i]) * 1000 - snowflake_epoch) << time_shift; + } + else if (const auto * src_column_const = typeid_cast(&src_column)) + { + UInt32 src_val = src_column_const->getValue(); + for (size_t i = 0; i < input_rows_count; ++i) + res_data[i] = (src_val * 1000 - snowflake_epoch) << time_shift; + } + else + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal argument for function {}", name); return res_column; } @@ -122,6 +134,9 @@ public: res_data[i] = static_cast( ((src_val >> time_shift) + snowflake_epoch) / 1000); } + else + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal argument for function {}", name); + return res_column; } }; @@ -157,9 +172,20 @@ public: auto res_column = ColumnInt64::create(input_rows_count); auto & res_data = res_column->getData(); - const auto & src_data = typeid_cast &>(src_column).getData(); - for (size_t i = 0; i < input_rows_count; ++i) - res_data[i] = (src_data[i] - snowflake_epoch) << time_shift; + if (const auto * src_column_non_const = typeid_cast(&src_column)) + { + const auto & src_data = src_column_non_const->getData(); + for (size_t i = 0; i < input_rows_count; ++i) + res_data[i] = (UInt32(src_data[i]) * 1000 - snowflake_epoch) << time_shift; + } + else if (const auto * src_column_const = typeid_cast(&src_column)) + { + UInt32 src_val = src_column_const->getValue(); + for (size_t i = 0; i < input_rows_count; ++i) + res_data[i] = (src_val * 1000 - snowflake_epoch) << time_shift; + } + else + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal argument for function {}", name); return res_column; } @@ -219,6 +245,8 @@ public: for (size_t i = 0; i < input_rows_count; ++i) res_data[i] = (src_val >> time_shift) + snowflake_epoch; } + else + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal argument for function {}", name); return res_column; } From eddd932636fdb16802ec0b541a7cb927abcc05ff Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 13 Jun 2023 12:34:26 +0000 Subject: [PATCH 383/722] Do not apply projection if read-in-order was enabled. --- .../Optimizations/projectionsCommon.cpp | 3 ++ .../QueryPlan/ReadFromMergeTree.cpp | 5 ++ src/Processors/QueryPlan/ReadFromMergeTree.h | 1 + ...84_projections_read_in_order_bug.reference | 0 .../02784_projections_read_in_order_bug.sql | 48 +++++++++++++++++++ 5 files changed, 57 insertions(+) create mode 100644 tests/queries/0_stateless/02784_projections_read_in_order_bug.reference create mode 100644 tests/queries/0_stateless/02784_projections_read_in_order_bug.sql diff --git a/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp b/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp index 2f73e14b2a0..cb76ffa84ba 100644 --- a/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp +++ b/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp @@ -38,6 +38,9 @@ bool canUseProjectionForReadingStep(ReadFromMergeTree * reading) if (reading->isParallelReadingEnabled()) return false; + if (reading->readsInOrder()) + return false; + // Currently projection don't support deduplication when moving parts between shards. if (reading->getContext()->getSettingsRef().allow_experimental_query_deduplication) return false; diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 2415507a6eb..3c38ecbbd3f 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -1427,6 +1427,11 @@ bool ReadFromMergeTree::requestReadingInOrder(size_t prefix_size, int direction, return true; } +bool ReadFromMergeTree::readsInOrder() const +{ + return reader_settings.read_in_order; +} + void ReadFromMergeTree::updatePrewhereInfo(const PrewhereInfoPtr & prewhere_info_value) { query_info.prewhere_info = prewhere_info_value; diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index 45beaaaf013..99cbe9d9e50 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -161,6 +161,7 @@ public: /// Returns `false` if requested reading cannot be performed. bool requestReadingInOrder(size_t prefix_size, int direction, size_t limit); + bool readsInOrder() const; void updatePrewhereInfo(const PrewhereInfoPtr & prewhere_info_value); diff --git a/tests/queries/0_stateless/02784_projections_read_in_order_bug.reference b/tests/queries/0_stateless/02784_projections_read_in_order_bug.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02784_projections_read_in_order_bug.sql b/tests/queries/0_stateless/02784_projections_read_in_order_bug.sql new file mode 100644 index 00000000000..52a3a6127ac --- /dev/null +++ b/tests/queries/0_stateless/02784_projections_read_in_order_bug.sql @@ -0,0 +1,48 @@ +create table events ( + `organisation_id` UUID, + `session_id` UUID, + `id` UUID DEFAULT generateUUIDv4(), + `timestamp` UInt64, + `payload` String, + `customer_id` UUID, + `call_id` String, + PROJECTION events_by_session_and_org + ( + SELECT * + ORDER BY + organisation_id, + session_id, + timestamp + ), + PROJECTION events_by_session + ( + SELECT * + ORDER BY + session_id, + timestamp + ), + PROJECTION events_by_session_and_customer + ( + SELECT * + ORDER BY + customer_id, + session_id, + timestamp + ), + PROJECTION events_by_call_id + ( + SELECT * + ORDER BY + call_id, + timestamp + )) engine = MergeTree order by (organisation_id, session_id, timestamp) settings index_granularity = 3; + + +#insert into events values (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), now(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), now(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), now(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), now(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), toDateTime('2022-02-02'), toString(0), reinterpretAsUUID(0), toString(0)); +#insert into events values (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), now(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), now(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), now(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), now(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), toDateTime('2022-02-02'), toString(0), reinterpretAsUUID(0), toString(0)); + +insert into events values (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)); +insert into events values (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)); + +set read_in_order_two_level_merge_threshold=1; +SELECT id, timestamp, payload FROM events WHERE (organisation_id = reinterpretAsUUID(1)) AND (session_id = reinterpretAsUUID(0)) ORDER BY timestamp, payload, id ASC; From 1a4b7e8ebec920943a39c484576f147c130b00ec Mon Sep 17 00:00:00 2001 From: Val Doroshchuk Date: Tue, 13 Jun 2023 14:36:31 +0200 Subject: [PATCH 384/722] MaterializedMySQL: Add missing DROP DATABASE for tests --- .../materialize_with_ddl.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tests/integration/test_materialized_mysql_database/materialize_with_ddl.py b/tests/integration/test_materialized_mysql_database/materialize_with_ddl.py index 2bbbe9a3f13..7efb9ac54a9 100644 --- a/tests/integration/test_materialized_mysql_database/materialize_with_ddl.py +++ b/tests/integration/test_materialized_mysql_database/materialize_with_ddl.py @@ -1476,6 +1476,9 @@ def utf8mb4_test(clickhouse_node, mysql_node, service_name): "1\t\U0001F984\n2\t\u2601\n", ) + clickhouse_node.query("DROP DATABASE utf8mb4_test") + mysql_node.query("DROP DATABASE utf8mb4_test") + def system_parts_test(clickhouse_node, mysql_node, service_name): mysql_node.query("DROP DATABASE IF EXISTS system_parts_test") @@ -1504,6 +1507,9 @@ def system_parts_test(clickhouse_node, mysql_node, service_name): clickhouse_node.query("OPTIMIZE TABLE system_parts_test.test") check_active_parts(1) + clickhouse_node.query("DROP DATABASE system_parts_test") + mysql_node.query("DROP DATABASE system_parts_test") + def multi_table_update_test(clickhouse_node, mysql_node, service_name): mysql_node.query("DROP DATABASE IF EXISTS multi_table_update") @@ -1529,6 +1535,8 @@ def multi_table_update_test(clickhouse_node, mysql_node, service_name): check_query(clickhouse_node, "SELECT * FROM multi_table_update.a", "1\tbaz\n") check_query(clickhouse_node, "SELECT * FROM multi_table_update.b", "1\tquux\n") + clickhouse_node.query("DROP DATABASE multi_table_update") + mysql_node.query("DROP DATABASE multi_table_update") def system_tables_test(clickhouse_node, mysql_node, service_name): @@ -1549,6 +1557,9 @@ def system_tables_test(clickhouse_node, mysql_node, service_name): "intDiv(id, 4294967)\tid\tid\n", ) + clickhouse_node.query("DROP DATABASE system_tables_test") + mysql_node.query("DROP DATABASE system_tables_test") + def materialize_with_column_comments_test(clickhouse_node, mysql_node, service_name): mysql_node.query("DROP DATABASE IF EXISTS materialize_with_column_comments_test") From f4ed10c0a28b52f140f542c7ab0b21e1edf9a0c0 Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Tue, 13 Jun 2023 14:44:39 +0200 Subject: [PATCH 385/722] Update src/Storages/StorageReplicatedMergeTree.cpp --- src/Storages/StorageReplicatedMergeTree.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index fafb3b124f2..84eae32495d 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -4751,10 +4751,10 @@ void StorageReplicatedMergeTree::read( } else { - header - = InterpreterSelectQuery(modified_query_ast, local_context, SelectQueryOptions(processed_stage).analyze()).getSampleBlock(); modified_query_ast = ClusterProxy::rewriteSelectQuery(local_context, query_info.query, table_id.database_name, table_id.table_name, /*remote_table_function_ptr*/nullptr); + header + = InterpreterSelectQuery(modified_query_ast, local_context, SelectQueryOptions(processed_stage).analyze()).getSampleBlock(); } auto cluster = local_context->getCluster(local_context->getSettingsRef().cluster_for_parallel_replicas); From 46c23b3f8d185bf79cb819c2beff0216ca73c4bd Mon Sep 17 00:00:00 2001 From: Smita Kulkarni Date: Tue, 13 Jun 2023 15:46:54 +0200 Subject: [PATCH 386/722] Fixed docs check fails --- docs/en/sql-reference/table-functions/azureBlobStorage.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/table-functions/azureBlobStorage.md b/docs/en/sql-reference/table-functions/azureBlobStorage.md index 7a362710b9c..8587d9839b8 100644 --- a/docs/en/sql-reference/table-functions/azureBlobStorage.md +++ b/docs/en/sql-reference/table-functions/azureBlobStorage.md @@ -1,5 +1,5 @@ --- -slug: /en/sql-reference/table-functions/azure_blob_storage +slug: /en/sql-reference/table-functions/azureBlobStorageg sidebar_label: azureBlobStorage keywords: [azure blob storage] --- From ab020f9311eea0f657f57493e14191e75e51f8af Mon Sep 17 00:00:00 2001 From: Smita Kulkarni Date: Tue, 13 Jun 2023 15:48:42 +0200 Subject: [PATCH 387/722] Fixed typo --- docs/en/sql-reference/table-functions/azureBlobStorage.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/table-functions/azureBlobStorage.md b/docs/en/sql-reference/table-functions/azureBlobStorage.md index 8587d9839b8..5175aabd5d1 100644 --- a/docs/en/sql-reference/table-functions/azureBlobStorage.md +++ b/docs/en/sql-reference/table-functions/azureBlobStorage.md @@ -1,5 +1,5 @@ --- -slug: /en/sql-reference/table-functions/azureBlobStorageg +slug: /en/sql-reference/table-functions/azureBlobStorage sidebar_label: azureBlobStorage keywords: [azure blob storage] --- From 3a2fa65075ab32a04a377cef632dd1679dea02b0 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Tue, 13 Jun 2023 16:02:54 +0200 Subject: [PATCH 388/722] fix 'Illegal column timezone' in stress tests --- docker/test/upgrade/run.sh | 6 ++---- tests/config/install.sh | 1 + tests/config/users.d/nonconst_timezone.xml | 7 +++++++ 3 files changed, 10 insertions(+), 4 deletions(-) create mode 100644 tests/config/users.d/nonconst_timezone.xml diff --git a/docker/test/upgrade/run.sh b/docker/test/upgrade/run.sh index 6f7d3999f1d..951c443c30d 100644 --- a/docker/test/upgrade/run.sh +++ b/docker/test/upgrade/run.sh @@ -59,8 +59,7 @@ install_packages previous_release_package_folder # available for dump via clickhouse-local configure -# it contains some new settings, but we can safely remove it -rm /etc/clickhouse-server/config.d/merge_tree.xml +rm /etc/clickhouse-server/users.d/nonconst_timezone.xml start stop @@ -86,8 +85,7 @@ export USE_S3_STORAGE_FOR_MERGE_TREE=1 export ZOOKEEPER_FAULT_INJECTION=0 configure -# it contains some new settings, but we can safely remove it -rm /etc/clickhouse-server/config.d/merge_tree.xml +rm /etc/clickhouse-server/users.d/nonconst_timezone.xml start diff --git a/tests/config/install.sh b/tests/config/install.sh index efa5a9c086e..b2153db1b2c 100755 --- a/tests/config/install.sh +++ b/tests/config/install.sh @@ -78,6 +78,7 @@ ln -sf $SRC_PATH/users.d/enable_blobs_check.xml $DEST_SERVER_PATH/users.d/ ln -sf $SRC_PATH/users.d/marks.xml $DEST_SERVER_PATH/users.d/ ln -sf $SRC_PATH/users.d/insert_keeper_retries.xml $DEST_SERVER_PATH/users.d/ ln -sf $SRC_PATH/users.d/prefetch_settings.xml $DEST_SERVER_PATH/users.d/ +ln -sf $SRC_PATH/users.d/nonconst_timezone.xml $DEST_SERVER_PATH/users.d/ if [[ -n "$USE_NEW_ANALYZER" ]] && [[ "$USE_NEW_ANALYZER" -eq 1 ]]; then ln -sf $SRC_PATH/users.d/analyzer.xml $DEST_SERVER_PATH/users.d/ diff --git a/tests/config/users.d/nonconst_timezone.xml b/tests/config/users.d/nonconst_timezone.xml new file mode 100644 index 00000000000..c7e9de5ab69 --- /dev/null +++ b/tests/config/users.d/nonconst_timezone.xml @@ -0,0 +1,7 @@ + + + + 1 + + + From 8ea7560d898879e74887e042aca0a6c60031191b Mon Sep 17 00:00:00 2001 From: Val Doroshchuk Date: Tue, 13 Jun 2023 16:28:53 +0200 Subject: [PATCH 389/722] MaterializedMySQL: Add additional test case to insert_with_modify_binlog_checksum (#50884) --- .../materialize_with_ddl.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tests/integration/test_materialized_mysql_database/materialize_with_ddl.py b/tests/integration/test_materialized_mysql_database/materialize_with_ddl.py index 2bbbe9a3f13..f5c28832f79 100644 --- a/tests/integration/test_materialized_mysql_database/materialize_with_ddl.py +++ b/tests/integration/test_materialized_mysql_database/materialize_with_ddl.py @@ -1050,6 +1050,8 @@ def select_without_columns(clickhouse_node, mysql_node, service_name): def insert_with_modify_binlog_checksum(clickhouse_node, mysql_node, service_name): + clickhouse_node.query("DROP DATABASE IF EXISTS test_checksum") + mysql_node.query("DROP DATABASE IF EXISTS test_checksum") mysql_node.query("CREATE DATABASE test_checksum") mysql_node.query("CREATE TABLE test_checksum.t (a INT PRIMARY KEY, b varchar(200))") clickhouse_node.query( @@ -1081,6 +1083,21 @@ def insert_with_modify_binlog_checksum(clickhouse_node, mysql_node, service_name "1\t1111\n2\t2222\n3\t3333\n", ) + clickhouse_node.query("DROP DATABASE test_checksum") + mysql_node.query("SET GLOBAL binlog_checksum=NONE") + clickhouse_node.query( + "CREATE DATABASE test_checksum ENGINE = MaterializeMySQL('{}:3306', 'test_checksum', 'root', 'clickhouse')".format( + service_name + ) + ) + check_query(clickhouse_node, "SHOW TABLES FROM test_checksum FORMAT TSV", "t\n") + mysql_node.query("INSERT INTO test_checksum.t VALUES(4, '4444')") + check_query( + clickhouse_node, + "SELECT * FROM test_checksum.t ORDER BY a FORMAT TSV", + "1\t1111\n2\t2222\n3\t3333\n4\t4444\n", + ) + clickhouse_node.query("DROP DATABASE test_checksum") mysql_node.query("DROP DATABASE test_checksum") From c253c70510008eda1fc3aadb72cf5c8a92e875bb Mon Sep 17 00:00:00 2001 From: Smita Kulkarni Date: Tue, 13 Jun 2023 16:33:36 +0200 Subject: [PATCH 390/722] Fix for MDXContent --- docs/en/engines/table-engines/mergetree-family/annindexes.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/engines/table-engines/mergetree-family/annindexes.md b/docs/en/engines/table-engines/mergetree-family/annindexes.md index 16e244077a7..6b9c3d6157f 100644 --- a/docs/en/engines/table-engines/mergetree-family/annindexes.md +++ b/docs/en/engines/table-engines/mergetree-family/annindexes.md @@ -202,10 +202,10 @@ CHECK length(vectors) = 256`. Setting `annoy_index_search_k_nodes` (default: `NumTrees * LIMIT`) determines how many tree nodes are inspected during SELECTs. Larger values mean more accurate results at the cost of longer query runtime: -``` sql +```sql SELECT * FROM table_name ORDER BY L2Distance(vectors, Point) LIMIT N -SETTINGS annoy_index_search_k_nodes=100 +SETTINGS annoy_index_search_k_nodes=100; ``` From 263be33297a2ada5e5c5281924b56e5ffaa3f80f Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 13 Jun 2023 16:37:52 +0200 Subject: [PATCH 391/722] Fix tests for throttling by allowing more margin of error for trottling event Right now 02703_max_local_write_bandwidth is flaky, and the reason I believe is that the server spent spent sometime somewhere else, which means that the throttler will sleep less. But what is important here is that the overall query duration time matches the expectation, so it is OK to match the LocalWriteThrottlerSleepMicroseconds/LocalReadThrottlerSleepMicroseconds with some error rate. Signed-off-by: Azat Khuzhin --- tests/queries/0_stateless/02703_max_local_read_bandwidth.sh | 2 +- tests/queries/0_stateless/02703_max_local_write_bandwidth.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02703_max_local_read_bandwidth.sh b/tests/queries/0_stateless/02703_max_local_read_bandwidth.sh index d47e2f363bd..c78cd202f1b 100755 --- a/tests/queries/0_stateless/02703_max_local_read_bandwidth.sh +++ b/tests/queries/0_stateless/02703_max_local_read_bandwidth.sh @@ -32,7 +32,7 @@ for read_method in "${read_methods[@]}"; do query_duration_ms >= 7e3, ProfileEvents['ReadBufferFromFileDescriptorReadBytes'] > 8e6, ProfileEvents['LocalReadThrottlerBytes'] > 8e6, - ProfileEvents['LocalReadThrottlerSleepMicroseconds'] > 7e6*0.9 + ProfileEvents['LocalReadThrottlerSleepMicroseconds'] > 7e6*0.5 FROM system.query_log WHERE current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id' AND type != 'QueryStart' " diff --git a/tests/queries/0_stateless/02703_max_local_write_bandwidth.sh b/tests/queries/0_stateless/02703_max_local_write_bandwidth.sh index 41165d35d37..ccde0903278 100755 --- a/tests/queries/0_stateless/02703_max_local_write_bandwidth.sh +++ b/tests/queries/0_stateless/02703_max_local_write_bandwidth.sh @@ -19,7 +19,7 @@ $CLICKHOUSE_CLIENT -nm -q " query_duration_ms >= 7e3, ProfileEvents['WriteBufferFromFileDescriptorWriteBytes'] > 8e6, ProfileEvents['LocalWriteThrottlerBytes'] > 8e6, - ProfileEvents['LocalWriteThrottlerSleepMicroseconds'] > 7e6*0.9 + ProfileEvents['LocalWriteThrottlerSleepMicroseconds'] > 7e6*0.5 FROM system.query_log WHERE current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id' AND type != 'QueryStart' " From 0f4e3a34e846c3a635456dbc8cafa3c12c91155b Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 13 Jun 2023 16:42:30 +0200 Subject: [PATCH 392/722] Update 02784_projections_read_in_order_bug.sql --- .../0_stateless/02784_projections_read_in_order_bug.sql | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/queries/0_stateless/02784_projections_read_in_order_bug.sql b/tests/queries/0_stateless/02784_projections_read_in_order_bug.sql index 52a3a6127ac..9595fc9ae08 100644 --- a/tests/queries/0_stateless/02784_projections_read_in_order_bug.sql +++ b/tests/queries/0_stateless/02784_projections_read_in_order_bug.sql @@ -37,10 +37,6 @@ create table events ( timestamp )) engine = MergeTree order by (organisation_id, session_id, timestamp) settings index_granularity = 3; - -#insert into events values (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), now(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), now(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), now(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), now(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), toDateTime('2022-02-02'), toString(0), reinterpretAsUUID(0), toString(0)); -#insert into events values (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), now(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), now(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), now(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), now(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), toDateTime('2022-02-02'), toString(0), reinterpretAsUUID(0), toString(0)); - insert into events values (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)); insert into events values (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)); From 52a460df67f38f92e67316115fde6139cb1c7937 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Tue, 13 Jun 2023 16:43:35 +0200 Subject: [PATCH 393/722] Tests with parallel replicas are no more "always green" (#50896) --- src/Interpreters/InterpreterSelectQuery.cpp | 6 +-- tests/ci/functional_test_check.py | 42 +++++-------------- .../1_stateful/00013_sorting_of_nested.sql | 3 -- .../1_stateful/00022_merge_prewhere.sql | 2 - .../1_stateful/00042_any_left_join.sql | 2 - .../1_stateful/00043_any_left_join.sql | 2 - .../1_stateful/00044_any_left_join_string.sql | 2 - .../1_stateful/00063_loyalty_joins.sql | 2 - .../00065_loyalty_with_storage_join.sql | 2 - tests/queries/1_stateful/00074_full_join.sql | 2 - .../1_stateful/00075_left_array_join.sql | 2 - ...0079_array_join_not_used_joined_column.sql | 2 - .../1_stateful/00080_array_join_and_union.sql | 2 - .../1_stateful/00084_external_aggregation.sql | 2 - tests/queries/1_stateful/00092_obfuscator.sh | 3 +- .../1_stateful/00096_obfuscator_save_load.sh | 2 - .../00146_aggregate_function_uniq.sql | 2 - .../00149_quantiles_timing_distributed.sql | 2 +- .../00152_insert_different_granularity.sql | 2 +- ...00156_max_execution_speed_sample_merge.sql | 3 -- .../1_stateful/00166_explain_estimate.sql | 2 +- tests/queries/1_stateful/00170_s3_cache.sql | 2 +- ...0171_grouping_aggregated_transform_bug.sql | 2 +- 23 files changed, 19 insertions(+), 74 deletions(-) diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index e84a400a220..1f95b1ebf9f 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -477,7 +477,7 @@ InterpreterSelectQuery::InterpreterSelectQuery( /// Check support for JOIN for parallel replicas with custom key if (joined_tables.tablesCount() > 1 && !settings.parallel_replicas_custom_key.value.empty()) { - LOG_WARNING(log, "JOINs are not supported with parallel_replicas_custom_key. Query will be executed without using them."); + LOG_DEBUG(log, "JOINs are not supported with parallel_replicas_custom_key. Query will be executed without using them."); context->setSetting("parallel_replicas_custom_key", String{""}); } @@ -487,7 +487,7 @@ InterpreterSelectQuery::InterpreterSelectQuery( { if (settings.allow_experimental_parallel_reading_from_replicas == 1) { - LOG_WARNING(log, "FINAL modifier is not supported with parallel replicas. Query will be executed without using them."); + LOG_DEBUG(log, "FINAL modifier is not supported with parallel replicas. Query will be executed without using them."); context->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); context->setSetting("parallel_replicas_custom_key", String{""}); } @@ -503,7 +503,7 @@ InterpreterSelectQuery::InterpreterSelectQuery( { if (settings.allow_experimental_parallel_reading_from_replicas == 1) { - LOG_WARNING(log, "To use parallel replicas with plain MergeTree tables please enable setting `parallel_replicas_for_non_replicated_merge_tree`. For now query will be executed without using them."); + LOG_DEBUG(log, "To use parallel replicas with plain MergeTree tables please enable setting `parallel_replicas_for_non_replicated_merge_tree`. For now query will be executed without using them."); context->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); } else if (settings.allow_experimental_parallel_reading_from_replicas == 2) diff --git a/tests/ci/functional_test_check.py b/tests/ci/functional_test_check.py index 037bb13f1f8..864c3a81acf 100644 --- a/tests/ci/functional_test_check.py +++ b/tests/ci/functional_test_check.py @@ -378,34 +378,16 @@ def main(): print(f"::notice:: {check_name} Report url: {report_url}") if args.post_commit_status == "commit_status": - if "parallelreplicas" in check_name.lower(): - post_commit_status( - commit, - "success", - report_url, - description, - check_name_with_group, - pr_info, - ) - else: - post_commit_status( - commit, state, report_url, description, check_name_with_group, pr_info - ) + post_commit_status( + commit, state, report_url, description, check_name_with_group, pr_info + ) elif args.post_commit_status == "file": - if "parallelreplicas" in check_name.lower(): - post_commit_status_to_file( - post_commit_path, - description, - "success", - report_url, - ) - else: - post_commit_status_to_file( - post_commit_path, - description, - state, - report_url, - ) + post_commit_status_to_file( + post_commit_path, + description, + state, + report_url, + ) else: raise Exception( f'Unknown post_commit_status option "{args.post_commit_status}"' @@ -423,11 +405,7 @@ def main(): ch_helper.insert_events_into(db="default", table="checks", events=prepared_events) if state != "success": - # Parallel replicas are always green for now - if ( - FORCE_TESTS_LABEL in pr_info.labels - or "parallelreplicas" in check_name.lower() - ): + if FORCE_TESTS_LABEL in pr_info.labels: print(f"'{FORCE_TESTS_LABEL}' enabled, will report success") else: sys.exit(1) diff --git a/tests/queries/1_stateful/00013_sorting_of_nested.sql b/tests/queries/1_stateful/00013_sorting_of_nested.sql index f97120e2b98..7f4a5002a7b 100644 --- a/tests/queries/1_stateful/00013_sorting_of_nested.sql +++ b/tests/queries/1_stateful/00013_sorting_of_nested.sql @@ -1,4 +1 @@ --- Tags: no-parallel-replicas - SELECT ParsedParams.Key1 FROM test.visits FINAL WHERE VisitID != 0 AND notEmpty(ParsedParams.Key1) ORDER BY VisitID LIMIT 10 - diff --git a/tests/queries/1_stateful/00022_merge_prewhere.sql b/tests/queries/1_stateful/00022_merge_prewhere.sql index 400a896d5a8..74a3677b68e 100644 --- a/tests/queries/1_stateful/00022_merge_prewhere.sql +++ b/tests/queries/1_stateful/00022_merge_prewhere.sql @@ -1,5 +1,3 @@ --- Tags: no-parallel-replicas - DROP TABLE IF EXISTS test.merge_hits; CREATE TABLE IF NOT EXISTS test.merge_hits AS test.hits ENGINE = Merge(test, '^hits$'); SELECT count() FROM test.merge_hits WHERE AdvEngineID = 2; diff --git a/tests/queries/1_stateful/00042_any_left_join.sql b/tests/queries/1_stateful/00042_any_left_join.sql index c7c0f0f987a..b87cf88f007 100644 --- a/tests/queries/1_stateful/00042_any_left_join.sql +++ b/tests/queries/1_stateful/00042_any_left_join.sql @@ -1,5 +1,3 @@ --- Tags: no-parallel-replicas - SELECT EventDate, hits, diff --git a/tests/queries/1_stateful/00043_any_left_join.sql b/tests/queries/1_stateful/00043_any_left_join.sql index 6b8cce54051..704d38f727a 100644 --- a/tests/queries/1_stateful/00043_any_left_join.sql +++ b/tests/queries/1_stateful/00043_any_left_join.sql @@ -1,5 +1,3 @@ --- Tags: no-parallel-replicas - SELECT EventDate, count() AS hits, diff --git a/tests/queries/1_stateful/00044_any_left_join_string.sql b/tests/queries/1_stateful/00044_any_left_join_string.sql index ceb7a1c1783..a4f2e9e1b96 100644 --- a/tests/queries/1_stateful/00044_any_left_join_string.sql +++ b/tests/queries/1_stateful/00044_any_left_join_string.sql @@ -1,5 +1,3 @@ --- Tags: no-parallel-replicas - SELECT domain, hits, diff --git a/tests/queries/1_stateful/00063_loyalty_joins.sql b/tests/queries/1_stateful/00063_loyalty_joins.sql index 44f0767a87a..44b575cab85 100644 --- a/tests/queries/1_stateful/00063_loyalty_joins.sql +++ b/tests/queries/1_stateful/00063_loyalty_joins.sql @@ -1,5 +1,3 @@ --- Tags: no-parallel-replicas - SET any_join_distinct_right_table_keys = 1; SET joined_subquery_requires_alias = 0; diff --git a/tests/queries/1_stateful/00065_loyalty_with_storage_join.sql b/tests/queries/1_stateful/00065_loyalty_with_storage_join.sql index 35f0c7b60b9..a0f41f8aa8d 100644 --- a/tests/queries/1_stateful/00065_loyalty_with_storage_join.sql +++ b/tests/queries/1_stateful/00065_loyalty_with_storage_join.sql @@ -1,5 +1,3 @@ --- Tags: no-parallel-replicas - USE test; DROP TABLE IF EXISTS join; diff --git a/tests/queries/1_stateful/00074_full_join.sql b/tests/queries/1_stateful/00074_full_join.sql index c1d9e4be1a4..f049be2a74d 100644 --- a/tests/queries/1_stateful/00074_full_join.sql +++ b/tests/queries/1_stateful/00074_full_join.sql @@ -1,5 +1,3 @@ --- Tags: no-parallel-replicas - set any_join_distinct_right_table_keys = 1; set joined_subquery_requires_alias = 0; diff --git a/tests/queries/1_stateful/00075_left_array_join.sql b/tests/queries/1_stateful/00075_left_array_join.sql index 3540d791157..1fd045a26bf 100644 --- a/tests/queries/1_stateful/00075_left_array_join.sql +++ b/tests/queries/1_stateful/00075_left_array_join.sql @@ -1,4 +1,2 @@ --- Tags: no-parallel-replicas - SELECT UserID, EventTime::DateTime('Asia/Dubai'), pp.Key1, pp.Key2, ParsedParams.Key1 FROM test.hits ARRAY JOIN ParsedParams AS pp WHERE CounterID = 1704509 ORDER BY UserID, EventTime, pp.Key1, pp.Key2 LIMIT 100; SELECT UserID, EventTime::DateTime('Asia/Dubai'), pp.Key1, pp.Key2, ParsedParams.Key1 FROM test.hits LEFT ARRAY JOIN ParsedParams AS pp WHERE CounterID = 1704509 ORDER BY UserID, EventTime, pp.Key1, pp.Key2 LIMIT 100; diff --git a/tests/queries/1_stateful/00079_array_join_not_used_joined_column.sql b/tests/queries/1_stateful/00079_array_join_not_used_joined_column.sql index 9431e1cf596..8e6742bb1e1 100644 --- a/tests/queries/1_stateful/00079_array_join_not_used_joined_column.sql +++ b/tests/queries/1_stateful/00079_array_join_not_used_joined_column.sql @@ -1,5 +1,3 @@ --- Tags: no-parallel-replicas - SELECT PP.Key1 AS `ym:s:paramsLevel1`, sum(arrayAll(`x_1` -> `x_1`= '', ParsedParams.Key2)) AS `ym:s:visits` FROM test.hits ARRAY JOIN ParsedParams AS `PP` WHERE CounterID = 1704509 GROUP BY `ym:s:paramsLevel1` ORDER BY PP.Key1, `ym:s:visits` LIMIT 0, 100; SELECT PP.Key1 AS x1, ParsedParams.Key2 AS x2 FROM test.hits ARRAY JOIN ParsedParams AS PP WHERE CounterID = 1704509 ORDER BY x1, x2 LIMIT 10; SELECT ParsedParams.Key2 AS x FROM test.hits ARRAY JOIN ParsedParams AS PP ORDER BY x DESC LIMIT 10; diff --git a/tests/queries/1_stateful/00080_array_join_and_union.sql b/tests/queries/1_stateful/00080_array_join_and_union.sql index 2f2e5e9324f..d9aa1cc17cc 100644 --- a/tests/queries/1_stateful/00080_array_join_and_union.sql +++ b/tests/queries/1_stateful/00080_array_join_and_union.sql @@ -1,3 +1 @@ --- Tags: no-parallel-replicas - SELECT count() FROM (SELECT Goals.ID FROM test.visits ARRAY JOIN Goals WHERE CounterID = 842440 LIMIT 10 UNION ALL SELECT Goals.ID FROM test.visits ARRAY JOIN Goals WHERE CounterID = 842440 LIMIT 10); diff --git a/tests/queries/1_stateful/00084_external_aggregation.sql b/tests/queries/1_stateful/00084_external_aggregation.sql index 330aa158cf7..b3922eae049 100644 --- a/tests/queries/1_stateful/00084_external_aggregation.sql +++ b/tests/queries/1_stateful/00084_external_aggregation.sql @@ -1,5 +1,3 @@ --- Tags: no-random-settings, no-parallel-replicas - SET max_bytes_before_external_group_by = 200000000; SET max_memory_usage = 1500000000; diff --git a/tests/queries/1_stateful/00092_obfuscator.sh b/tests/queries/1_stateful/00092_obfuscator.sh index f19473f01ac..f9e0098a46c 100755 --- a/tests/queries/1_stateful/00092_obfuscator.sh +++ b/tests/queries/1_stateful/00092_obfuscator.sh @@ -1,6 +1,5 @@ #!/usr/bin/env bash -# Tags: no-parallel-replicas -# clickhouse-local may not work with parallel replicas + CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/1_stateful/00096_obfuscator_save_load.sh b/tests/queries/1_stateful/00096_obfuscator_save_load.sh index 1bb212e1bba..a88dfcdb9b9 100755 --- a/tests/queries/1_stateful/00096_obfuscator_save_load.sh +++ b/tests/queries/1_stateful/00096_obfuscator_save_load.sh @@ -1,6 +1,4 @@ #!/usr/bin/env bash -# Tags: no-parallel-replicas -# clickhouse-local may not work with parallel replicas CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/1_stateful/00146_aggregate_function_uniq.sql b/tests/queries/1_stateful/00146_aggregate_function_uniq.sql index 2cab6e70d22..fd3fde7636d 100644 --- a/tests/queries/1_stateful/00146_aggregate_function_uniq.sql +++ b/tests/queries/1_stateful/00146_aggregate_function_uniq.sql @@ -1,5 +1,3 @@ --- Tags: no-parallel-replicas - SELECT RegionID, uniqHLL12(WatchID) AS X FROM remote('127.0.0.{1,2}', test, hits) GROUP BY RegionID HAVING X > 100000 ORDER BY RegionID ASC; SELECT RegionID, uniqCombined(WatchID) AS X FROM remote('127.0.0.{1,2}', test, hits) GROUP BY RegionID HAVING X > 100000 ORDER BY RegionID ASC; SELECT abs(uniq(WatchID) - uniqExact(WatchID)) FROM test.hits; diff --git a/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql b/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql index 5d2476226ba..6f910646fb7 100644 --- a/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql +++ b/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql @@ -1,4 +1,4 @@ --- Tags: distributed, no-parallel-replicas +-- Tags: distributed SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID); SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID) SETTINGS optimize_aggregation_in_order = 1; diff --git a/tests/queries/1_stateful/00152_insert_different_granularity.sql b/tests/queries/1_stateful/00152_insert_different_granularity.sql index 35483149498..294d71b384b 100644 --- a/tests/queries/1_stateful/00152_insert_different_granularity.sql +++ b/tests/queries/1_stateful/00152_insert_different_granularity.sql @@ -1,4 +1,4 @@ --- Tags: no-tsan, no-replicated-database, no-parallel, no-parallel-replicas +-- Tags: no-tsan, no-replicated-database, no-parallel -- Tag no-replicated-database: Fails due to additional replicas or shards DROP TABLE IF EXISTS fixed_granularity_table; diff --git a/tests/queries/1_stateful/00156_max_execution_speed_sample_merge.sql b/tests/queries/1_stateful/00156_max_execution_speed_sample_merge.sql index 32079111f6c..e325c18200b 100644 --- a/tests/queries/1_stateful/00156_max_execution_speed_sample_merge.sql +++ b/tests/queries/1_stateful/00156_max_execution_speed_sample_merge.sql @@ -1,6 +1,3 @@ --- Tags: no-parallel-replicas --- Merge tables doesn't work with parallel replicas currently - SET max_execution_speed = 4000000, timeout_before_checking_execution_speed = 0; CREATE TEMPORARY TABLE times (t DateTime); diff --git a/tests/queries/1_stateful/00166_explain_estimate.sql b/tests/queries/1_stateful/00166_explain_estimate.sql index abac92ecb2e..c4071271736 100644 --- a/tests/queries/1_stateful/00166_explain_estimate.sql +++ b/tests/queries/1_stateful/00166_explain_estimate.sql @@ -1,4 +1,4 @@ --- Tags: no-replicated-database, no-parallel-replicas +-- Tags: no-replicated-database -- Tag no-replicated-database: Requires investigation EXPLAIN ESTIMATE SELECT count() FROM test.hits WHERE CounterID = 29103473; diff --git a/tests/queries/1_stateful/00170_s3_cache.sql b/tests/queries/1_stateful/00170_s3_cache.sql index 43e85af0bc3..23663a1844d 100644 --- a/tests/queries/1_stateful/00170_s3_cache.sql +++ b/tests/queries/1_stateful/00170_s3_cache.sql @@ -1,4 +1,4 @@ --- Tags: no-parallel, no-random-settings, no-parallel-replicas +-- Tags: no-parallel, no-random-settings -- { echo } diff --git a/tests/queries/1_stateful/00171_grouping_aggregated_transform_bug.sql b/tests/queries/1_stateful/00171_grouping_aggregated_transform_bug.sql index 07788af927e..7068780a1b1 100644 --- a/tests/queries/1_stateful/00171_grouping_aggregated_transform_bug.sql +++ b/tests/queries/1_stateful/00171_grouping_aggregated_transform_bug.sql @@ -1,4 +1,4 @@ --- Tags: distributed, no-parallel-replicas +-- Tags: distributed SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID) SETTINGS max_block_size = 63169; SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID) SETTINGS optimize_aggregation_in_order = 1, max_block_size = 63169; From 2e1f56ae336e198d8f388ce815292ec049a7fdc5 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 13 Jun 2023 14:43:50 +0000 Subject: [PATCH 394/722] Address comments --- docs/en/engines/table-engines/special/file.md | 2 +- docs/en/operations/settings/settings.md | 6 +- docs/en/sql-reference/table-functions/file.md | 2 +- src/Storages/HDFS/StorageHDFS.cpp | 90 ++++++++-------- src/Storages/StorageFile.cpp | 29 ++--- src/Storages/StorageS3.cpp | 92 +++++++++------- src/Storages/StorageURL.cpp | 101 ++++++++++-------- src/Storages/StorageURL.h | 2 +- 8 files changed, 177 insertions(+), 147 deletions(-) diff --git a/docs/en/engines/table-engines/special/file.md b/docs/en/engines/table-engines/special/file.md index cf325961b6a..27945b30c03 100644 --- a/docs/en/engines/table-engines/special/file.md +++ b/docs/en/engines/table-engines/special/file.md @@ -99,4 +99,4 @@ For partitioning by month, use the `toYYYYMM(date_column)` expression, where `da - [engine_file_truncate_on_insert](/docs/en/operations/settings/settings.md#engine-file-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default. - [engine_file_allow_create_multiple_files](/docs/en/operations/settings/settings.md#engine_file_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default. - [engine_file_skip_empty_files](/docs/en/operations/settings/settings.md#engine_file_skip_empty_files) - allows to skip empty files while reading. Disabled by default. -- [storage_file_read_method](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - method of reading data from storage file, one of: read, pread, mmap (only for clickhouse-local). Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local. +- [storage_file_read_method](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - method of reading data from storage file, one of: `read`, `pread`, `mmap`. The mmap method does not apply to clickhouse-server (it's intended for clickhouse-local). Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local. diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index a138f8f5515..7a28e33bf90 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -3332,7 +3332,7 @@ Enables or disables creating a new file on each insert in file engine tables if Possible values: - 0 — `INSERT` query appends new data to the end of the file. -- 1 — `INSERT` query replaces existing content of the file with the new data. +- 1 — `INSERT` query creates a new file. Default value: `0`. @@ -3370,7 +3370,7 @@ initial: `data.Parquet.gz` -> `data.1.Parquet.gz` -> `data.2.Parquet.gz`, etc. Possible values: - 0 — `INSERT` query appends new data to the end of the file. -- 1 — `INSERT` query replaces existing content of the file with the new data. +- 1 — `INSERT` query creates a new file. Default value: `0`. @@ -3402,7 +3402,7 @@ initial: `data.Parquet.gz` -> `data.1.Parquet.gz` -> `data.2.Parquet.gz`, etc. Possible values: - 0 — `INSERT` query appends new data to the end of the file. -- 1 — `INSERT` query replaces existing content of the file with the new data. +- 1 — `INSERT` query creates a new file. Default value: `0`. diff --git a/docs/en/sql-reference/table-functions/file.md b/docs/en/sql-reference/table-functions/file.md index 749aafb6d00..f25da96fddb 100644 --- a/docs/en/sql-reference/table-functions/file.md +++ b/docs/en/sql-reference/table-functions/file.md @@ -18,7 +18,7 @@ file(path [,format] [,structure] [,compression]) **Parameters** -- `path` — The relative path to the file from [user_files_path](/docs/en/operations/server-configuration-parameters/settings.md/#server_configuration_parameters-user_files_path). Path to file support following globs in read-only mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc', 'def'` — strings. +- `path` — The relative path to the file from [user_files_path](/docs/en/operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Path to file support following globs in read-only mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc', 'def'` — strings. - `format` — The [format](/docs/en/interfaces/formats.md#formats) of the file. - `structure` — Structure of the table. Format: `'column1_name column1_type, column2_name column2_type, ...'`. - `compression` — The existing compression type when used in a `SELECT` query, or the desired compression type when used in an `INSERT` query. The supported compression types are `gz`, `br`, `xz`, `zst`, `lz4`, and `bz2`. diff --git a/src/Storages/HDFS/StorageHDFS.cpp b/src/Storages/HDFS/StorageHDFS.cpp index 9ea1d805db5..dd3e8fecfaa 100644 --- a/src/Storages/HDFS/StorageHDFS.cpp +++ b/src/Storages/HDFS/StorageHDFS.cpp @@ -140,14 +140,6 @@ namespace return LSWithRegexpMatching("/", fs, path_from_uri); } - - size_t getFileSize(const String & path_from_uri, const String & uri_without_path, ContextPtr context) - { - HDFSBuilderWrapper builder = createHDFSBuilder(uri_without_path + "/", context->getGlobalContext()->getConfigRef()); - HDFSFSPtr fs = createHDFSFS(builder.get()); - auto * info = hdfsGetPathInfo(fs.get(), path_from_uri.data()); - return info->mSize; - } } StorageHDFS::StorageHDFS( @@ -218,26 +210,36 @@ ColumnsDescription StorageHDFS::getTableStructureFromData( ReadBufferIterator read_buffer_iterator = [&, my_uri_without_path = uri_without_path, it = paths_with_info.begin(), first = true]( - ColumnsDescription & columns) mutable -> std::unique_ptr + ColumnsDescription &) mutable -> std::unique_ptr { - if (it == paths_with_info.end()) + PathWithInfo path_with_info; + std::unique_ptr buf; + while (true) { - if (first) - throw Exception(ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, - "Cannot extract table structure from {} format file, because all files are empty. " - "You must specify table structure manually", format); - return nullptr; + if (it == paths_with_info.end()) + { + if (first) + throw Exception(ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, + "Cannot extract table structure from {} format file, because all files are empty. " + "You must specify table structure manually", format); + return nullptr; + } + + path_with_info = *it++; + if (ctx->getSettingsRef().hdfs_skip_empty_files && path_with_info.info && path_with_info.info->size == 0) + continue; + + auto compression = chooseCompressionMethod(path_with_info.path, compression_method); + auto impl = std::make_unique(my_uri_without_path, path_with_info.path, ctx->getGlobalContext()->getConfigRef(), ctx->getReadSettings()); + const Int64 zstd_window_log_max = ctx->getSettingsRef().zstd_window_log_max; + buf = wrapReadBufferWithCompressionMethod(std::move(impl), compression, static_cast(zstd_window_log_max)); + + if (!ctx->getSettingsRef().hdfs_skip_empty_files || !buf->eof()) + { + first = false; + return buf; + } } - - auto path_with_info = *it++; - if (ctx->getSettingsRef().hdfs_skip_empty_files && path_with_info.info && path_with_info.info->size == 0) - return read_buffer_iterator(columns); - - first = false; - auto compression = chooseCompressionMethod(path_with_info.path, compression_method); - auto impl = std::make_unique(my_uri_without_path, path_with_info.path, ctx->getGlobalContext()->getConfigRef(), ctx->getReadSettings()); - const Int64 zstd_window_log_max = ctx->getSettingsRef().zstd_window_log_max; - return wrapReadBufferWithCompressionMethod(std::move(impl), compression, static_cast(zstd_window_log_max)); }; ColumnsDescription columns; @@ -362,26 +364,28 @@ HDFSSource::HDFSSource( bool HDFSSource::initialize() { - auto path_with_info = (*file_iterator)(); - if (path_with_info.path.empty()) - return false; - - current_path = path_with_info.path; - const auto [path_from_uri, uri_without_path] = getPathFromUriAndUriWithoutPath(current_path); - - if (getContext()->getSettingsRef().hdfs_skip_empty_files) + StorageHDFS::PathWithInfo path_with_info; + bool skip_empty_files = getContext()->getSettingsRef().hdfs_skip_empty_files; + while (true) { - auto file_size = path_with_info.info ? path_with_info.info->size : getFileSize(path_from_uri, uri_without_path, getContext()); - /// If file is empty and hdfs_skip_empty_files=1, skip it and go to the next file. - if (file_size == 0) - return initialize(); - } + path_with_info = (*file_iterator)(); + if (path_with_info.path.empty()) + return false; - auto compression = chooseCompressionMethod(path_from_uri, storage->compression_method); - auto impl = std::make_unique( - uri_without_path, path_from_uri, getContext()->getGlobalContext()->getConfigRef(), getContext()->getReadSettings()); - const Int64 zstd_window_log_max = getContext()->getSettingsRef().zstd_window_log_max; - read_buf = wrapReadBufferWithCompressionMethod(std::move(impl), compression, static_cast(zstd_window_log_max)); + if (path_with_info.info && skip_empty_files && path_with_info.info->size == 0) + continue; + + current_path = path_with_info.path; + const auto [path_from_uri, uri_without_path] = getPathFromUriAndUriWithoutPath(current_path); + + auto compression = chooseCompressionMethod(path_from_uri, storage->compression_method); + auto impl = std::make_unique( + uri_without_path, path_from_uri, getContext()->getGlobalContext()->getConfigRef(), getContext()->getReadSettings()); + const Int64 zstd_window_log_max = getContext()->getSettingsRef().zstd_window_log_max; + read_buf = wrapReadBufferWithCompressionMethod(std::move(impl), compression, static_cast(zstd_window_log_max)); + if (!skip_empty_files || !read_buf->eof()) + break; + } auto input_format = getContext()->getInputFormat(storage->format_name, *read_buf, block_for_format, max_block_size); diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 7fc143a6122..06f9d071706 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -404,21 +404,26 @@ ColumnsDescription StorageFile::getTableStructureFromFile( if (context->getSettingsRef().schema_inference_use_cache_for_file) columns_from_cache = tryGetColumnsFromCache(paths, format, format_settings, context); - ReadBufferIterator read_buffer_iterator = [&, it = paths.begin(), first = true](ColumnsDescription & columns) mutable -> std::unique_ptr + ReadBufferIterator read_buffer_iterator = [&, it = paths.begin(), first = true](ColumnsDescription &) mutable -> std::unique_ptr { - if (it == paths.end()) + String path; + struct stat file_stat; + do { - if (first) - throw Exception(ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, - "Cannot extract table structure from {} format file, because all files are empty. You must specify table structure manually", - format); - return nullptr; - } + if (it == paths.end()) + { + if (first) + throw Exception( + ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, + "Cannot extract table structure from {} format file, because all files are empty. You must specify table structure manually", + format); + return nullptr; + } - auto path = *it++; - auto file_stat = getFileStat(path, false, -1, "File"); - if (context->getSettingsRef().engine_file_skip_empty_files && file_stat.st_size == 0) - return read_buffer_iterator(columns); + path = *it++; + file_stat = getFileStat(path, false, -1, "File"); + } + while (context->getSettingsRef().engine_file_skip_empty_files && file_stat.st_size == 0); first = false; return createReadBuffer(path, file_stat, false, -1, compression_method, context); diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index c30973d99e1..5c5895744ac 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -575,19 +575,21 @@ StorageS3Source::StorageS3Source( StorageS3Source::ReaderHolder StorageS3Source::createReader() { - auto [current_key, info] = (*file_iterator)(); - if (current_key.empty()) - return {}; + KeyWithInfo key_with_info; + size_t object_size; + do + { + key_with_info = (*file_iterator)(); + if (key_with_info.key.empty()) + return {}; - size_t object_size = info ? info->size : S3::getObjectSize(*client, bucket, current_key, version_id, request_settings); + object_size = key_with_info.info ? key_with_info.info->size : S3::getObjectSize(*client, bucket, key_with_info.key, version_id, request_settings); + } + while (getContext()->getSettingsRef().s3_skip_empty_files && object_size == 0); - /// If object is empty and s3_skip_empty_files=1, skip it and go to the next key. - if (getContext()->getSettingsRef().s3_skip_empty_files && object_size == 0) - return createReader(); + auto compression_method = chooseCompressionMethod(key_with_info.key, compression_hint); - auto compression_method = chooseCompressionMethod(current_key, compression_hint); - - auto read_buf = createS3ReadBuffer(current_key, object_size); + auto read_buf = createS3ReadBuffer(key_with_info.key, object_size); auto input_format = FormatFactory::instance().getInput( format, *read_buf, sample_block, getContext(), max_block_size, format_settings, std::nullopt, std::nullopt, @@ -606,7 +608,7 @@ StorageS3Source::ReaderHolder StorageS3Source::createReader() auto pipeline = std::make_unique(QueryPipelineBuilder::getPipeline(std::move(builder))); auto current_reader = std::make_unique(*pipeline); - return ReaderHolder{fs::path(bucket) / current_key, std::move(read_buf), std::move(pipeline), std::move(current_reader)}; + return ReaderHolder{fs::path(bucket) / key_with_info.key, std::move(read_buf), std::move(pipeline), std::move(current_reader)}; } std::future StorageS3Source::createReaderAsync() @@ -1451,41 +1453,53 @@ ColumnsDescription StorageS3::getTableStructureFromDataImpl( ReadBufferIterator read_buffer_iterator = [&, first = true](ColumnsDescription & cached_columns) mutable -> std::unique_ptr { - auto [key, info] = (*file_iterator)(); + StorageS3Source::KeyWithInfo key_with_info; + std::unique_ptr buf; - if (key.empty()) + while (true) { - if (first) - throw Exception( - ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, - "Cannot extract table structure from {} format file, because there are no files with provided path " - "in S3 or all files are empty. You must specify table structure manually", configuration.format); + key_with_info = (*file_iterator)(); - return nullptr; - } - - if (ctx->getSettingsRef().s3_skip_empty_files && info->size == 0) - return read_buffer_iterator(cached_columns); - - /// S3 file iterator could get new keys after new iteration, check them in schema cache. - if (ctx->getSettingsRef().schema_inference_use_cache_for_s3 && read_keys.size() > prev_read_keys_size) - { - columns_from_cache = tryGetColumnsFromCache(read_keys.begin() + prev_read_keys_size, read_keys.end(), configuration, format_settings, ctx); - prev_read_keys_size = read_keys.size(); - if (columns_from_cache) + if (key_with_info.key.empty()) { - cached_columns = *columns_from_cache; + if (first) + throw Exception( + ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, + "Cannot extract table structure from {} format file, because there are no files with provided path " + "in S3 or all files are empty. You must specify table structure manually", + configuration.format); + return nullptr; } - } - first = false; - int zstd_window_log_max = static_cast(ctx->getSettingsRef().zstd_window_log_max); - return wrapReadBufferWithCompressionMethod( - std::make_unique( - configuration.client, configuration.url.bucket, key, configuration.url.version_id, configuration.request_settings, ctx->getReadSettings()), - chooseCompressionMethod(key, configuration.compression_method), - zstd_window_log_max); + /// S3 file iterator could get new keys after new iteration, check them in schema cache. + if (ctx->getSettingsRef().schema_inference_use_cache_for_s3 && read_keys.size() > prev_read_keys_size) + { + columns_from_cache = tryGetColumnsFromCache(read_keys.begin() + prev_read_keys_size, read_keys.end(), configuration, format_settings, ctx); + prev_read_keys_size = read_keys.size(); + if (columns_from_cache) + { + cached_columns = *columns_from_cache; + return nullptr; + } + } + + if (ctx->getSettingsRef().s3_skip_empty_files && key_with_info.info && key_with_info.info->size == 0) + continue; + + int zstd_window_log_max = static_cast(ctx->getSettingsRef().zstd_window_log_max); + buf = wrapReadBufferWithCompressionMethod( + std::make_unique( + configuration.client, configuration.url.bucket, key_with_info.key, configuration.url.version_id, configuration.request_settings, ctx->getReadSettings()), + chooseCompressionMethod(key_with_info.key, configuration.compression_method), + zstd_window_log_max); + + if (!ctx->getSettingsRef().s3_skip_empty_files || !buf->eof()) + { + first = false; + return buf; + } + } }; ColumnsDescription columns; diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index fc5525b42d2..4e75a4d54cb 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -241,30 +241,34 @@ StorageURLSource::StorageURLSource( /// Lazy initialization. We should not perform requests in constructor, because we need to do it in query pipeline. initialize = [=, this]() { - const auto current_uri_options = (*uri_iterator)(); - if (current_uri_options.empty()) - return false; + std::vector current_uri_options; + std::pair> uri_and_buf; + do + { + current_uri_options = (*uri_iterator)(); + if (current_uri_options.empty()) + return false; - auto first_option = uri_options.begin(); - auto [actual_uri, buf] = getFirstAvailableURIAndReadBuffer( - first_option, - current_uri_options.end(), - context, - params, - http_method, - callback, - timeouts, - credentials, - headers, - glob_url, - current_uri_options.size() == 1); + auto first_option = current_uri_options.cbegin(); + uri_and_buf = getFirstAvailableURIAndReadBuffer( + first_option, + current_uri_options.end(), + context, + params, + http_method, + callback, + timeouts, + credentials, + headers, + glob_url, + current_uri_options.size() == 1); - /// If file is empty and engine_url_skip_empty_files=1, skip it and go to the next file. - if (context->getSettingsRef().engine_url_skip_empty_files && getFileSizeFromReadBuffer(*buf) == 0) - return initialize(); + /// If file is empty and engine_url_skip_empty_files=1, skip it and go to the next file. + } + while (context->getSettingsRef().engine_url_skip_empty_files && uri_and_buf.second->eof()); - curr_uri = actual_uri; - read_buf = std::move(buf); + curr_uri = uri_and_buf.first; + read_buf = std::move(uri_and_buf.second); try { @@ -347,7 +351,7 @@ Chunk StorageURLSource::generate() return {}; } -std::tuple> StorageURLSource::getFirstAvailableURIAndReadBuffer( +std::pair> StorageURLSource::getFirstAvailableURIAndReadBuffer( std::vector::const_iterator & option, const std::vector::const_iterator & end, ContextPtr context, @@ -590,38 +594,41 @@ ColumnsDescription IStorageURLBase::getTableStructureFromData( if (context->getSettingsRef().schema_inference_use_cache_for_url) columns_from_cache = tryGetColumnsFromCache(urls_to_check, headers, credentials, format, format_settings, context); - ReadBufferIterator read_buffer_iterator = [&, it = urls_to_check.cbegin(), first = true](ColumnsDescription & columns) mutable -> std::unique_ptr + ReadBufferIterator read_buffer_iterator = [&, it = urls_to_check.cbegin(), first = true](ColumnsDescription &) mutable -> std::unique_ptr { - if (it == urls_to_check.cend()) + std::pair> uri_and_buf; + do { - if (first) - throw Exception(ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, - "Cannot extract table structure from {} format file, because all files are empty. " - "You must specify table structure manually", format); - return nullptr; - } + if (it == urls_to_check.cend()) + { + if (first) + throw Exception( + ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, + "Cannot extract table structure from {} format file, because all files are empty. " + "You must specify table structure manually", + format); + return nullptr; + } - auto [_, buf] = StorageURLSource::getFirstAvailableURIAndReadBuffer( - it, - urls_to_check.cend(), - context, - {}, - Poco::Net::HTTPRequest::HTTP_GET, - {}, - getHTTPTimeouts(context), - credentials, - headers, - false, - false); + uri_and_buf = StorageURLSource::getFirstAvailableURIAndReadBuffer( + it, + urls_to_check.cend(), + context, + {}, + Poco::Net::HTTPRequest::HTTP_GET, + {}, + getHTTPTimeouts(context), + credentials, + headers, + false, + false); - ++it; - - if (context->getSettingsRef().engine_url_skip_empty_files && buf_factory->getFileSize() == 0) - return read_buffer_iterator(columns); + ++it; + } while (context->getSettingsRef().engine_url_skip_empty_files && uri_and_buf.second->eof()); first = false; return wrapReadBufferWithCompressionMethod( - std::move(buf), + std::move(uri_and_buf.second), compression_method, static_cast(context->getSettingsRef().zstd_window_log_max)); }; diff --git a/src/Storages/StorageURL.h b/src/Storages/StorageURL.h index 50928ed6962..a5c1174377b 100644 --- a/src/Storages/StorageURL.h +++ b/src/Storages/StorageURL.h @@ -183,7 +183,7 @@ public: static Block getHeader(Block sample_block, const std::vector & requested_virtual_columns); - static std::tuple> getFirstAvailableURIAndReadBuffer( + static std::pair> getFirstAvailableURIAndReadBuffer( std::vector::const_iterator & option, const std::vector::const_iterator & end, ContextPtr context, From 46fbe7fb26d06abdb8f5e21f00d5dd215a084b9b Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 13 Jun 2023 15:01:51 +0000 Subject: [PATCH 395/722] 01746_convert_type_with_default: Temporarily disable flaky test --- .../0_stateless/01746_convert_type_with_default.reference | 1 - tests/queries/0_stateless/01746_convert_type_with_default.sql | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/01746_convert_type_with_default.reference b/tests/queries/0_stateless/01746_convert_type_with_default.reference index e5aa42e6116..0edea4de31e 100644 --- a/tests/queries/0_stateless/01746_convert_type_with_default.reference +++ b/tests/queries/0_stateless/01746_convert_type_with_default.reference @@ -40,7 +40,6 @@ 1970-01-20 1970-01-20 2149-06-06 -1970-01-02 2023-05-30 2023-05-30 2023-05-30 14:38:20 diff --git a/tests/queries/0_stateless/01746_convert_type_with_default.sql b/tests/queries/0_stateless/01746_convert_type_with_default.sql index e6e420ae4c0..c74b185f7fd 100644 --- a/tests/queries/0_stateless/01746_convert_type_with_default.sql +++ b/tests/queries/0_stateless/01746_convert_type_with_default.sql @@ -54,7 +54,7 @@ select toDateOrDefault(cast(19 as Int256)); select toDateOrDefault(cast(19 as UInt256)); select toDateOrDefault(65535); -select toDateOrDefault(122400); +-- select toDateOrDefault(122400); select toDateOrDefault(19507, '2000-01-01'::Date); select toDateOrDefault(-1, '2023-05-30'::Date); @@ -80,4 +80,4 @@ select toDateTimeOrDefault(cast(19 as Int128), 'UTC'); select toDateTimeOrDefault(cast(19 as UInt128), 'UTC'); select toDateTimeOrDefault(cast(19 as Int256), 'UTC'); -select toDateTimeOrDefault(cast(19 as UInt256), 'UTC'); \ No newline at end of file +select toDateTimeOrDefault(cast(19 as UInt256), 'UTC'); From 57cdd3a25d25ca8274e3b68cf75cbaa9bf94daa7 Mon Sep 17 00:00:00 2001 From: rfraposa Date: Tue, 13 Jun 2023 09:13:13 -0600 Subject: [PATCH 396/722] Update annindexes.md --- .../table-engines/mergetree-family/annindexes.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/en/engines/table-engines/mergetree-family/annindexes.md b/docs/en/engines/table-engines/mergetree-family/annindexes.md index 16e244077a7..fe971571419 100644 --- a/docs/en/engines/table-engines/mergetree-family/annindexes.md +++ b/docs/en/engines/table-engines/mergetree-family/annindexes.md @@ -54,7 +54,7 @@ CREATE TABLE table ( `id` Int64, `vectors` Array(Float32), - INDEX vectors TYPE () [GRANULARITY ] + INDEX [ann_index_name vectors TYPE [ann_index_type]([ann_index_parameters]) [GRANULARITY [N]] ) ENGINE = MergeTree ORDER BY id; @@ -67,7 +67,7 @@ CREATE TABLE table ( `id` Int64, `vectors` Tuple(Float32[, Float32[, ...]]), - INDEX vectors TYPE () [GRANULARITY ] + INDEX [ann_index_name] vectors TYPE [ann_index_type]([ann_index_parameters]) [GRANULARITY [N]] ) ENGINE = MergeTree ORDER BY id; @@ -114,7 +114,7 @@ without `LIMIT` clause cannot utilize ANN indexes. Also ANN indexes are only use approximate neighbor search. **Differences to Skip Indexes** Similar to regular [skip indexes](https://clickhouse.com/docs/en/optimize/skipping-indexes), ANN indexes are -constructed over granules and each indexed block consists of `GRANULARITY = `-many granules (`` = 1 by default for normal skip +constructed over granules and each indexed block consists of `GRANULARITY = [N]`-many granules (`[N]` = 1 by default for normal skip indexes). For example, if the primary index granularity of the table is 8192 (setting `index_granularity = 8192`) and `GRANULARITY = 2`, then each indexed block will contain 16384 rows. However, data structures and algorithms for approximate neighborhood search (usually provided by external libraries) are inherently row-oriented. They store a compact representation of a set of rows and also return rows for @@ -130,7 +130,7 @@ skip data at the granularity of index blocks. The `GRANULARITY` parameter determines how many ANN sub-indexes are created. Bigger `GRANULARITY` values mean fewer but larger ANN sub-indexes, up to the point where a column (or a column's data part) has only a single sub-index. In that case, the sub-index has a "global" view of all column rows and can directly return all granules of the column (part) with relevant rows (there are at most `LIMIT -`-many such granules). In a second step, ClickHouse will load these granules and identify the actually best rows by performing a +[N]`-many such granules). In a second step, ClickHouse will load these granules and identify the actually best rows by performing a brute-force distance calculation over all rows of the granules. With a small `GRANULARITY` value, each of the sub-indexes returns up to `LIMIT N`-many granules. As a result, more granules need to be loaded and post-filtered. Note that the search accuracy is with both cases equally good, only the processing performance differs. It is generally recommended to use a large `GRANULARITY` for ANN indexes and fall @@ -169,7 +169,7 @@ CREATE TABLE table ( id Int64, vectors Array(Float32), - INDEX vectors TYPE annoy([Distance[, NumTrees]]) [GRANULARITY N] + INDEX [ann_index_name] vectors TYPE annoy([Distance[, NumTrees]]) [GRANULARITY N] ) ENGINE = MergeTree ORDER BY id; @@ -182,7 +182,7 @@ CREATE TABLE table ( id Int64, vectors Tuple(Float32[, Float32[, ...]]), - INDEX vectors TYPE annoy([Distance[, NumTrees]]) [GRANULARITY N] + INDEX [ann_index_name] vectors TYPE annoy([Distance[, NumTrees]]) [GRANULARITY N] ) ENGINE = MergeTree ORDER BY id; From 20ea87e527eb76bb296a46f0deab59abdd4a4325 Mon Sep 17 00:00:00 2001 From: Dan Roscigno Date: Tue, 13 Jun 2023 11:17:33 -0400 Subject: [PATCH 397/722] Update annindexes.md Don't break code snippets across lines. --- docs/en/engines/table-engines/mergetree-family/annindexes.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/engines/table-engines/mergetree-family/annindexes.md b/docs/en/engines/table-engines/mergetree-family/annindexes.md index 16e244077a7..20e49f1c34c 100644 --- a/docs/en/engines/table-engines/mergetree-family/annindexes.md +++ b/docs/en/engines/table-engines/mergetree-family/annindexes.md @@ -129,8 +129,8 @@ skip data at the granularity of index blocks. The `GRANULARITY` parameter determines how many ANN sub-indexes are created. Bigger `GRANULARITY` values mean fewer but larger ANN sub-indexes, up to the point where a column (or a column's data part) has only a single sub-index. In that case, the sub-index has a -"global" view of all column rows and can directly return all granules of the column (part) with relevant rows (there are at most `LIMIT -`-many such granules). In a second step, ClickHouse will load these granules and identify the actually best rows by performing a +"global" view of all column rows and can directly return all granules of the column (part) with relevant rows (there are at most +`LIMIT `-many such granules). In a second step, ClickHouse will load these granules and identify the actually best rows by performing a brute-force distance calculation over all rows of the granules. With a small `GRANULARITY` value, each of the sub-indexes returns up to `LIMIT N`-many granules. As a result, more granules need to be loaded and post-filtered. Note that the search accuracy is with both cases equally good, only the processing performance differs. It is generally recommended to use a large `GRANULARITY` for ANN indexes and fall From b850f1b9995d981d7ec047df8261fec302d3020a Mon Sep 17 00:00:00 2001 From: DanRoscigno Date: Tue, 13 Jun 2023 11:26:12 -0400 Subject: [PATCH 398/722] fix broken line --- docs/en/engines/table-engines/mergetree-family/annindexes.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/en/engines/table-engines/mergetree-family/annindexes.md b/docs/en/engines/table-engines/mergetree-family/annindexes.md index fe971571419..71e7e008bf2 100644 --- a/docs/en/engines/table-engines/mergetree-family/annindexes.md +++ b/docs/en/engines/table-engines/mergetree-family/annindexes.md @@ -129,8 +129,7 @@ skip data at the granularity of index blocks. The `GRANULARITY` parameter determines how many ANN sub-indexes are created. Bigger `GRANULARITY` values mean fewer but larger ANN sub-indexes, up to the point where a column (or a column's data part) has only a single sub-index. In that case, the sub-index has a -"global" view of all column rows and can directly return all granules of the column (part) with relevant rows (there are at most `LIMIT -[N]`-many such granules). In a second step, ClickHouse will load these granules and identify the actually best rows by performing a +"global" view of all column rows and can directly return all granules of the column (part) with relevant rows (there are at most `LIMIT [N]`-many such granules). In a second step, ClickHouse will load these granules and identify the actually best rows by performing a brute-force distance calculation over all rows of the granules. With a small `GRANULARITY` value, each of the sub-indexes returns up to `LIMIT N`-many granules. As a result, more granules need to be loaded and post-filtered. Note that the search accuracy is with both cases equally good, only the processing performance differs. It is generally recommended to use a large `GRANULARITY` for ANN indexes and fall From 76f69f2b44ba23dbb0afd50f26dd3fd62352a381 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 13 Jun 2023 15:52:06 +0000 Subject: [PATCH 399/722] Revert overengineering --- src/Functions/FunctionSnowflake.h | 34 ++++++------------------------- 1 file changed, 6 insertions(+), 28 deletions(-) diff --git a/src/Functions/FunctionSnowflake.h b/src/Functions/FunctionSnowflake.h index c7ec6dca27f..b57e21e9a50 100644 --- a/src/Functions/FunctionSnowflake.h +++ b/src/Functions/FunctionSnowflake.h @@ -60,20 +60,9 @@ public: auto res_column = ColumnInt64::create(input_rows_count); auto & res_data = res_column->getData(); - if (const auto * src_column_non_const = typeid_cast(&src_column)) - { - const auto & src_data = src_column_non_const->getData(); - for (size_t i = 0; i < input_rows_count; ++i) - res_data[i] = (UInt32(src_data[i]) * 1000 - snowflake_epoch) << time_shift; - } - else if (const auto * src_column_const = typeid_cast(&src_column)) - { - UInt32 src_val = src_column_const->getValue(); - for (size_t i = 0; i < input_rows_count; ++i) - res_data[i] = (src_val * 1000 - snowflake_epoch) << time_shift; - } - else - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal argument for function {}", name); + const auto & src_data = typeid_cast(src_column).getData(); + for (size_t i = 0; i < input_rows_count; ++i) + res_data[i] = (Int64(src_data[i]) * 1000 - snowflake_epoch) << time_shift; return res_column; } @@ -172,20 +161,9 @@ public: auto res_column = ColumnInt64::create(input_rows_count); auto & res_data = res_column->getData(); - if (const auto * src_column_non_const = typeid_cast(&src_column)) - { - const auto & src_data = src_column_non_const->getData(); - for (size_t i = 0; i < input_rows_count; ++i) - res_data[i] = (UInt32(src_data[i]) * 1000 - snowflake_epoch) << time_shift; - } - else if (const auto * src_column_const = typeid_cast(&src_column)) - { - UInt32 src_val = src_column_const->getValue(); - for (size_t i = 0; i < input_rows_count; ++i) - res_data[i] = (src_val * 1000 - snowflake_epoch) << time_shift; - } - else - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal argument for function {}", name); + const auto & src_data = typeid_cast &>(src_column).getData(); + for (size_t i = 0; i < input_rows_count; ++i) + res_data[i] = (src_data[i] - snowflake_epoch) << time_shift; return res_column; } From 0bc624dc0291896001d45408e5316d23e28b3cc1 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Tue, 13 Jun 2023 17:53:19 +0200 Subject: [PATCH 400/722] Fix the statless tests image for old commits --- docker/test/stateless/run.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index c0acb0291a4..21cb3168083 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -16,7 +16,7 @@ dpkg -i package_folder/clickhouse-client_*.deb ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test # shellcheck disable=SC1091 -source /usr/share/clickhouse-test/ci/attach_gdb.lib +source /usr/share/clickhouse-test/ci/attach_gdb.lib || true # FIXME: to not break old builds, clean on 2023-09-01 # install test configs /usr/share/clickhouse-test/config/install.sh @@ -88,7 +88,7 @@ fi sleep 5 -attach_gdb_to_clickhouse +attach_gdb_to_clickhouse || true # FIXME: to not break old builds, clean on 2023-09-01 function run_tests() { From a01056f67c787e069ca173cb63fafbfc5c6e5c96 Mon Sep 17 00:00:00 2001 From: santrancisco Date: Wed, 14 Jun 2023 02:33:48 +1000 Subject: [PATCH 401/722] Update orc submodule --- contrib/orc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/orc b/contrib/orc index c5d7755ba0b..f2c191f9653 160000 --- a/contrib/orc +++ b/contrib/orc @@ -1 +1 @@ -Subproject commit c5d7755ba0b9a95631c8daea4d094101f26ec761 +Subproject commit f2c191f9653a5ddbca016e024ca0fb61508f5eeb From 945981e5f69145908aef64819da58725ca8e67e4 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 13 Jun 2023 16:12:33 +0200 Subject: [PATCH 402/722] Fix tests sanity checks In #43147 the "tests" had been added to EXCLUDE_DIRS, and the reason for this is that there was some C++ code to ignore [1], however it also ignores snaity check for query_log. [1]: https://s3.amazonaws.com/clickhouse-test-reports/43147/63de577172ee024a08e76db69f5000568673db48/style_check.html v2: check-style: ignore $EXCLUDE_DIRS for some other sanity checks of tests Signed-off-by: Azat Khuzhin --- utils/check-style/check-style | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/utils/check-style/check-style b/utils/check-style/check-style index afaf2ee6d48..bd3ee8e02d6 100755 --- a/utils/check-style/check-style +++ b/utils/check-style/check-style @@ -13,7 +13,7 @@ # and then to run formatter only for the specified files. ROOT_PATH=$(git rev-parse --show-toplevel) -EXCLUDE_DIRS='build/|integration/|widechar_width/|glibc-compatibility/|poco/|memcpy/|consistent-hashing|benchmark|tests/|utils/keeper-bench/example.yaml' +EXCLUDE_DIRS='build/|integration/|widechar_width/|glibc-compatibility/|poco/|memcpy/|consistent-hashing|benchmark|tests/.*.cpp|utils/keeper-bench/example.yaml' # From [1]: # But since array_to_string_internal() in array.c still loops over array @@ -163,14 +163,12 @@ find $ROOT_PATH -not -path $ROOT_PATH'/contrib*' \( -name '*.yaml' -or -name '*. # Tests should not be named with "fail" in their names. It makes looking at the results less convenient. find $ROOT_PATH/tests/queries -iname '*fail*' | - grep -vP $EXCLUDE_DIRS | grep . && echo 'Tests should not be named with "fail" in their names. It makes looking at the results less convenient when you search for "fail" substring in browser.' # Queries to system.query_log/system.query_thread_log should have current_database = currentDatabase() condition # NOTE: it is not that accurate, but at least something. tests_with_query_log=( $( find $ROOT_PATH/tests/queries -iname '*.sql' -or -iname '*.sh' -or -iname '*.py' -or -iname '*.j2' | - grep -vP $EXCLUDE_DIRS | xargs grep --with-filename -e system.query_log -e system.query_thread_log | cut -d: -f1 | sort -u ) ) for test_case in "${tests_with_query_log[@]}"; do @@ -205,7 +203,6 @@ tables_with_database_column=( # NOTE: it is not that accuate, but at least something. tests_with_database_column=( $( find $ROOT_PATH/tests/queries -iname '*.sql' -or -iname '*.sh' -or -iname '*.py' -or -iname '*.j2' | - grep -vP $EXCLUDE_DIRS | xargs grep --with-filename $(printf -- "-e %s " "${tables_with_database_column[@]}") | grep -v -e ':--' -e ':#' | cut -d: -f1 | sort -u @@ -225,7 +222,6 @@ done # NOTE: it is not that accuate, but at least something. tests_with_replicated_merge_tree=( $( find $ROOT_PATH/tests/queries -iname '*.sql' -or -iname '*.sh' -or -iname '*.py' -or -iname '*.j2' | - grep -vP $EXCLUDE_DIRS | xargs grep --with-filename -e "Replicated.*MergeTree[ ]*(.*" | cut -d: -f1 | sort -u ) ) for test_case in "${tests_with_replicated_merge_tree[@]}"; do From dc6810601a65cf15a87459cdc72e5258d69949d2 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 13 Jun 2023 16:15:30 +0200 Subject: [PATCH 403/722] Remove DROP TABEL system.query_log from tests This is a very ugly hack that breaks artifacts, since after this query_log is incomplete in artifacts. Signed-off-by: Azat Khuzhin --- .../02494_query_cache_case_agnostic_matching.sql | 4 ++-- .../0_stateless/02494_query_cache_events.reference | 1 + tests/queries/0_stateless/02494_query_cache_events.sql | 9 +++++---- .../0_stateless/02494_query_cache_normalize_ast.sql | 4 ++-- .../02494_query_cache_passive_usage.reference | 1 + .../0_stateless/02494_query_cache_passive_usage.sql | 10 +++++----- 6 files changed, 16 insertions(+), 13 deletions(-) diff --git a/tests/queries/0_stateless/02494_query_cache_case_agnostic_matching.sql b/tests/queries/0_stateless/02494_query_cache_case_agnostic_matching.sql index 9f26d7759de..7dbd79059af 100644 --- a/tests/queries/0_stateless/02494_query_cache_case_agnostic_matching.sql +++ b/tests/queries/0_stateless/02494_query_cache_case_agnostic_matching.sql @@ -1,9 +1,8 @@ -- Tags: no-parallel -- Tag no-parallel: Messes with internal cache --- Start with empty query cache (QC) and query log +-- Start with empty query cache (QC) SYSTEM DROP QUERY CACHE; -DROP TABLE system.query_log SYNC; -- Insert an entry into the query cache. SELECT 1 SETTINGS use_query_cache = true; @@ -22,6 +21,7 @@ SYSTEM FLUSH LOGS; SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses'] FROM system.query_log WHERE type = 'QueryFinish' + AND current_database = currentDatabase() AND query = 'select 1 SETTINGS use_query_cache = true;'; SYSTEM DROP QUERY CACHE; diff --git a/tests/queries/0_stateless/02494_query_cache_events.reference b/tests/queries/0_stateless/02494_query_cache_events.reference index db60d3699e0..9bcd2820f27 100644 --- a/tests/queries/0_stateless/02494_query_cache_events.reference +++ b/tests/queries/0_stateless/02494_query_cache_events.reference @@ -3,4 +3,5 @@ 0 1 --- 1 +0 1 1 0 diff --git a/tests/queries/0_stateless/02494_query_cache_events.sql b/tests/queries/0_stateless/02494_query_cache_events.sql index 900b68f5eb2..05c0acad4b8 100644 --- a/tests/queries/0_stateless/02494_query_cache_events.sql +++ b/tests/queries/0_stateless/02494_query_cache_events.sql @@ -1,9 +1,8 @@ -- Tags: no-parallel -- Tag no-parallel: Messes with internal cache --- Start with empty query cache QC and query log +-- Start with empty query cache QC SYSTEM DROP QUERY CACHE; -DROP TABLE system.query_log SYNC; -- Run a query with QC on. The first execution is a QC miss. SELECT '---'; @@ -13,6 +12,7 @@ SYSTEM FLUSH LOGS; SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses'] FROM system.query_log WHERE type = 'QueryFinish' + AND current_database = currentDatabase() AND query = 'SELECT 1 SETTINGS use_query_cache = true;'; @@ -20,11 +20,12 @@ WHERE type = 'QueryFinish' SELECT '---'; SELECT 1 SETTINGS use_query_cache = true; -DROP TABLE system.query_log SYNC; SYSTEM FLUSH LOGS; SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses'] FROM system.query_log WHERE type = 'QueryFinish' - AND query = 'SELECT 1 SETTINGS use_query_cache = true;'; + AND current_database = currentDatabase() + AND query = 'SELECT 1 SETTINGS use_query_cache = true;' +ORDER BY event_time_microseconds; SYSTEM DROP QUERY CACHE; diff --git a/tests/queries/0_stateless/02494_query_cache_normalize_ast.sql b/tests/queries/0_stateless/02494_query_cache_normalize_ast.sql index 5fd09eb935b..1dbb3ef8158 100644 --- a/tests/queries/0_stateless/02494_query_cache_normalize_ast.sql +++ b/tests/queries/0_stateless/02494_query_cache_normalize_ast.sql @@ -1,9 +1,8 @@ -- Tags: no-parallel -- Tag no-parallel: Messes with internal cache --- Start with empty query cache (QC) and query log. +-- Start with empty query cache (QC) SYSTEM DROP QUERY CACHE; -DROP TABLE system.query_log SYNC; -- Run query whose result gets cached in the query cache. -- Besides "use_query_cache", pass two more knobs (one QC-specific knob and one non-QC-specific knob). We just care @@ -24,6 +23,7 @@ SYSTEM FLUSH LOGS; SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses'] FROM system.query_log WHERE type = 'QueryFinish' + AND current_database = currentDatabase() AND query = 'SELECT 1 SETTINGS use_query_cache = true, enable_writes_to_query_cache = false, max_threads = 16;'; SYSTEM DROP QUERY CACHE; diff --git a/tests/queries/0_stateless/02494_query_cache_passive_usage.reference b/tests/queries/0_stateless/02494_query_cache_passive_usage.reference index edff09773d1..8b73647196e 100644 --- a/tests/queries/0_stateless/02494_query_cache_passive_usage.reference +++ b/tests/queries/0_stateless/02494_query_cache_passive_usage.reference @@ -9,4 +9,5 @@ ----- 1 1 +0 1 1 0 diff --git a/tests/queries/0_stateless/02494_query_cache_passive_usage.sql b/tests/queries/0_stateless/02494_query_cache_passive_usage.sql index 6143b5f7083..f0d2f6398a8 100644 --- a/tests/queries/0_stateless/02494_query_cache_passive_usage.sql +++ b/tests/queries/0_stateless/02494_query_cache_passive_usage.sql @@ -22,10 +22,7 @@ SELECT COUNT(*) FROM system.query_cache; SELECT '-----'; --- Run same query with passive mode again. There must still be one entry in the QC and we must have a QC hit. - --- Get rid of log of previous SELECT -DROP TABLE system.query_log SYNC; +/* Run same query with passive mode again. There must still be one entry in the QC and we must have a QC hit. */ SELECT 1 SETTINGS use_query_cache = true, enable_writes_to_query_cache = false; SELECT COUNT(*) FROM system.query_cache; @@ -34,6 +31,9 @@ SYSTEM FLUSH LOGS; SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses'] FROM system.query_log WHERE type = 'QueryFinish' - AND query = 'SELECT 1 SETTINGS use_query_cache = true, enable_writes_to_query_cache = false;'; + AND current_database = currentDatabase() + /* NOTE: client incorrectly join comments from the previous line into query, hence LIKE */ + AND query LIKE '%\nSELECT 1 SETTINGS use_query_cache = true, enable_writes_to_query_cache = false;' +ORDER BY event_time_microseconds; SYSTEM DROP QUERY CACHE; From 35825bc7d672974fc9219ca7e3608a32d0cd73bc Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 13 Jun 2023 16:48:28 +0200 Subject: [PATCH 404/722] Increase line-length limit for yamlllint CI reports [1]: /ClickHouse/tests/queries/0_stateless/data_ua_parser/browser.yaml 713:301 warning line too long (328 > 300 characters) (line-length) /ClickHouse/tests/queries/0_stateless/data_ua_parser/device.yaml 2606:301 warning line too long (529 > 300 characters) (line-length) 2616:301 warning line too long (348 > 300 characters) (line-length) 2630:301 warning line too long (377 > 300 characters) (line-length) 2637:301 warning line too long (447 > 300 characters) (line-length) [1]: https://s3.amazonaws.com/clickhouse-test-reports/50934/be4555c3226298d956ff650fab477d67bf73ba83/style_check/style_output.txt Signed-off-by: Azat Khuzhin --- .yamllint | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.yamllint b/.yamllint index fe161e71849..9d6550ac960 100644 --- a/.yamllint +++ b/.yamllint @@ -6,8 +6,10 @@ rules: level: warning indent-sequences: consistent line-length: - # there are some bash -c "", so this is OK - max: 300 + # there are: + # - bash -c "", so this is OK + # - yaml in tests + max: 1000 level: warning comments: min-spaces-from-content: 1 From 77775c6074601e49d44349f3d397229354571592 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 13 Jun 2023 16:49:45 +0200 Subject: [PATCH 405/722] Rename 02701_fail_on_invalid_having to 02701_invalid_having_NOT_AN_AGGREGATE To remove "fail" from the test name, which is prohibited Signed-off-by: Azat Khuzhin --- ....reference => 02701_invalid_having_NOT_AN_AGGREGATE.reference} | 0 ...valid_having.sql => 02701_invalid_having_NOT_AN_AGGREGATE.sql} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename tests/queries/0_stateless/{02701_fail_on_invalid_having.reference => 02701_invalid_having_NOT_AN_AGGREGATE.reference} (100%) rename tests/queries/0_stateless/{02701_fail_on_invalid_having.sql => 02701_invalid_having_NOT_AN_AGGREGATE.sql} (100%) diff --git a/tests/queries/0_stateless/02701_fail_on_invalid_having.reference b/tests/queries/0_stateless/02701_invalid_having_NOT_AN_AGGREGATE.reference similarity index 100% rename from tests/queries/0_stateless/02701_fail_on_invalid_having.reference rename to tests/queries/0_stateless/02701_invalid_having_NOT_AN_AGGREGATE.reference diff --git a/tests/queries/0_stateless/02701_fail_on_invalid_having.sql b/tests/queries/0_stateless/02701_invalid_having_NOT_AN_AGGREGATE.sql similarity index 100% rename from tests/queries/0_stateless/02701_fail_on_invalid_having.sql rename to tests/queries/0_stateless/02701_invalid_having_NOT_AN_AGGREGATE.sql From 0444aa2fda48f59be08bef6482b06754f1cb2c0b Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 13 Jun 2023 16:50:43 +0200 Subject: [PATCH 406/722] tests: add missing current_database condition for query_log Signed-off-by: Azat Khuzhin --- tests/queries/0_stateless/02483_elapsed_time.sh | 2 +- .../queries/0_stateless/02499_monotonicity_toUnixTimestamp64.sh | 2 +- .../0_stateless/02521_incorrect_dealy_for_insert_bug_44902.sh | 2 +- .../queries/0_stateless/02578_parameterized_rename_queries.sql | 1 + tests/queries/0_stateless/02585_query_status_deadlock.sh | 2 +- tests/queries/0_stateless/02681_undrop_query.sql | 2 +- tests/queries/0_stateless/02761_ddl_initial_query_id.sh | 2 ++ .../02783_parallel_replicas_trivial_count_optimization.sh | 2 +- tests/queries/1_stateful/00177_memory_bound_merging.sh | 2 ++ 9 files changed, 11 insertions(+), 6 deletions(-) diff --git a/tests/queries/0_stateless/02483_elapsed_time.sh b/tests/queries/0_stateless/02483_elapsed_time.sh index 608299eb01b..e3b983129fb 100755 --- a/tests/queries/0_stateless/02483_elapsed_time.sh +++ b/tests/queries/0_stateless/02483_elapsed_time.sh @@ -25,7 +25,7 @@ QUERY_ID="${CLICKHOUSE_DATABASE}_$(date +%s)_02883_q1" ${CLICKHOUSE_CLIENT} -m --query "$EXCEPTION_BEFORE_START_QUERY" --query_id="$QUERY_ID" >/dev/null 2>&1 ${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" -${CLICKHOUSE_CLIENT} --query "SELECT type == 'ExceptionBeforeStart' as expected_type, query_duration_ms <= 1000 as elapsed_more_than_one_second FROM system.query_log WHERE query_id='$QUERY_ID'" +${CLICKHOUSE_CLIENT} --query "SELECT type == 'ExceptionBeforeStart' as expected_type, query_duration_ms <= 1000 as elapsed_more_than_one_second FROM system.query_log WHERE current_database = '$CLICKHOUSE_DATABASE' AND query_id='$QUERY_ID'" # Now we test with a query that will take 1+ seconds. The CLI should show that as part of the output format OK_QUERY_JSON=" diff --git a/tests/queries/0_stateless/02499_monotonicity_toUnixTimestamp64.sh b/tests/queries/0_stateless/02499_monotonicity_toUnixTimestamp64.sh index 1223d7957b5..5d787aa0d8e 100755 --- a/tests/queries/0_stateless/02499_monotonicity_toUnixTimestamp64.sh +++ b/tests/queries/0_stateless/02499_monotonicity_toUnixTimestamp64.sh @@ -18,5 +18,5 @@ query_id="${CLICKHOUSE_DATABASE}_02499_$RANDOM$RANDOM" $CLICKHOUSE_CLIENT --query_id="$query_id" -q "select ts from t order by toUnixTimestamp64Nano(ts) limit 10 format Null settings max_block_size = $max_block_size, optimize_read_in_order = 1;" $CLICKHOUSE_CLIENT -q "system flush logs;" -$CLICKHOUSE_CLIENT --param_query_id="$query_id" -q "select read_rows <= $max_block_size from system.query_log where event_date >= yesterday() and query_id = {query_id:String} and type = 'QueryFinish';" +$CLICKHOUSE_CLIENT --param_query_id="$query_id" -q "select read_rows <= $max_block_size from system.query_log where event_date >= yesterday() and current_database = '$CLICKHOUSE_DATABASE' and query_id = {query_id:String} and type = 'QueryFinish';" diff --git a/tests/queries/0_stateless/02521_incorrect_dealy_for_insert_bug_44902.sh b/tests/queries/0_stateless/02521_incorrect_dealy_for_insert_bug_44902.sh index 5f91ef19a5a..0ae44ec0c01 100755 --- a/tests/queries/0_stateless/02521_incorrect_dealy_for_insert_bug_44902.sh +++ b/tests/queries/0_stateless/02521_incorrect_dealy_for_insert_bug_44902.sh @@ -16,7 +16,7 @@ do query_id="${CLICKHOUSE_DATABASE}_02521_${i}_$RANDOM$RANDOM" $CLICKHOUSE_CLIENT --query_id="$query_id" -q "INSERT INTO test_02521_insert_delay SELECT number, toString(number) FROM numbers(${i}, 1)" $CLICKHOUSE_CLIENT -q "SYSTEM FLUSH LOGS" - $CLICKHOUSE_CLIENT --param_query_id="$query_id" -q "select ProfileEvents['DelayedInsertsMilliseconds'] as delay from system.query_log where event_date >= yesterday() and query_id = {query_id:String} order by delay desc limit 1" + $CLICKHOUSE_CLIENT --param_query_id="$query_id" -q "select ProfileEvents['DelayedInsertsMilliseconds'] as delay from system.query_log where event_date >= yesterday() and current_database = '$CLICKHOUSE_DATABASE' and query_id = {query_id:String} order by delay desc limit 1" done $CLICKHOUSE_CLIENT -q "INSERT INTO test_02521_insert_delay VALUES(0, 'This query throws error')" 2>&1 | grep -o 'TOO_MANY_PARTS' | head -n 1 diff --git a/tests/queries/0_stateless/02578_parameterized_rename_queries.sql b/tests/queries/0_stateless/02578_parameterized_rename_queries.sql index eecb282083f..de36f8ae3b5 100644 --- a/tests/queries/0_stateless/02578_parameterized_rename_queries.sql +++ b/tests/queries/0_stateless/02578_parameterized_rename_queries.sql @@ -24,6 +24,7 @@ SET param_new_tbl_name = 02661_t1; CREATE TABLE {new_db_name:Identifier}.{old_tbl_name:Identifier} (a UInt64) ENGINE = MergeTree ORDER BY tuple(); RENAME TABLE {new_db_name:Identifier}.{old_tbl_name:Identifier} TO {new_db_name:Identifier}.{new_tbl_name:Identifier}; +-- NOTE: no 'database = currentDatabase()' on purpose SELECT name FROM system.tables WHERE name = {new_tbl_name:String}; -- Case 3: RENAME DICTIONARY diff --git a/tests/queries/0_stateless/02585_query_status_deadlock.sh b/tests/queries/0_stateless/02585_query_status_deadlock.sh index 227ecb1c1b2..9eb6eff8cd0 100755 --- a/tests/queries/0_stateless/02585_query_status_deadlock.sh +++ b/tests/queries/0_stateless/02585_query_status_deadlock.sh @@ -14,7 +14,7 @@ $CLICKHOUSE_CLIENT -q "SYSTEM FLUSH LOGS" while true do - res=$($CLICKHOUSE_CLIENT -q "select query, event_time from system.query_log where query_id = '$QUERY_ID' and query like 'select%' limit 1") + res=$($CLICKHOUSE_CLIENT -q "select query, event_time from system.query_log where query_id = '$QUERY_ID' and current_database = '$CLICKHOUSE_DATABASE' and query like 'select%' limit 1") if [ -n "$res" ]; then break fi diff --git a/tests/queries/0_stateless/02681_undrop_query.sql b/tests/queries/0_stateless/02681_undrop_query.sql index ead1a8bb305..39ca1548d53 100644 --- a/tests/queries/0_stateless/02681_undrop_query.sql +++ b/tests/queries/0_stateless/02681_undrop_query.sql @@ -21,7 +21,7 @@ detach table 02681_undrop_detach; undrop table 02681_undrop_detach; -- { serverError 57 } attach table 02681_undrop_detach; alter table 02681_undrop_detach update num = 2 where id = 1; -select command from system.mutations where table='02681_undrop_detach' limit 1; +select command from system.mutations where table='02681_undrop_detach' and database=currentDatabase() limit 1; drop table 02681_undrop_detach sync; select 'test MergeTree with cluster'; diff --git a/tests/queries/0_stateless/02761_ddl_initial_query_id.sh b/tests/queries/0_stateless/02761_ddl_initial_query_id.sh index e9a315b812b..b8b35ef01f7 100755 --- a/tests/queries/0_stateless/02761_ddl_initial_query_id.sh +++ b/tests/queries/0_stateless/02761_ddl_initial_query_id.sh @@ -21,4 +21,6 @@ $CLICKHOUSE_CLIENT -q "SYSTEM FLUSH LOGS" # - replace() is required to avoid non deterministic behaviour of # normalizeQuery() that replaces the identifier with "?" only if it has more # then two numbers. +# +# NOTE: no current_database = '$CLICKHOUSE_DATABASE' filter on purpose (since ON CLUSTER queries does not have current_database passed) $CLICKHOUSE_CLIENT -q "SELECT normalizeQuery(replace(query, currentDatabase(), 'default')) FROM system.query_log WHERE initial_query_id = '$query_id' AND type != 'QueryStart' ORDER BY event_time_microseconds" diff --git a/tests/queries/0_stateless/02783_parallel_replicas_trivial_count_optimization.sh b/tests/queries/0_stateless/02783_parallel_replicas_trivial_count_optimization.sh index 4c29e513183..6210ef2e8b6 100755 --- a/tests/queries/0_stateless/02783_parallel_replicas_trivial_count_optimization.sh +++ b/tests/queries/0_stateless/02783_parallel_replicas_trivial_count_optimization.sh @@ -12,7 +12,7 @@ function has_used_parallel_replicas () { sumIf(read_rows, is_initial_query) as read_rows, sumIf(read_bytes, is_initial_query) as read_bytes FROM system.query_log - WHERE event_date >= yesterday() and initial_query_id LIKE '$1%' + WHERE event_date >= yesterday() and initial_query_id LIKE '$1%' AND current_database = '$CLICKHOUSE_DATABASE' GROUP BY initial_query_id ORDER BY min(event_time_microseconds) ASC FORMAT TSV" diff --git a/tests/queries/1_stateful/00177_memory_bound_merging.sh b/tests/queries/1_stateful/00177_memory_bound_merging.sh index 008422be108..774f005b8eb 100755 --- a/tests/queries/1_stateful/00177_memory_bound_merging.sh +++ b/tests/queries/1_stateful/00177_memory_bound_merging.sh @@ -12,6 +12,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) check_replicas_read_in_order() { # to check this we actually look for at least one log message from MergeTreeInOrderSelectProcessor. # hopefully logger's names are a bit more stable than log messages itself + # + # NOTE: lack of "current_database = '$CLICKHOUSE_DATABASE'" filter is made on purpose $CLICKHOUSE_CLIENT -nq " SYSTEM FLUSH LOGS; From bb971fd7b7fd5bc400bbe28d16867c9dc337fb17 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 13 Jun 2023 17:02:24 +0200 Subject: [PATCH 407/722] check-style: allow {database} for ReplicatedMergeTree as well CLICKHOUSE_TEST_ZOOKEEPER_PREFIX is a {test_name}_{database}, but actually {database} should be enough, since it is uniq for each test run. Signed-off-by: Azat Khuzhin --- utils/check-style/check-style | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/check-style/check-style b/utils/check-style/check-style index bd3ee8e02d6..e7c06fefee2 100755 --- a/utils/check-style/check-style +++ b/utils/check-style/check-style @@ -229,7 +229,7 @@ for test_case in "${tests_with_replicated_merge_tree[@]}"; do *.gen.*) ;; *.sh) - test_case_zk_prefix="\$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX" + test_case_zk_prefix="\(\$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX\|{database}\)" grep -q -e "Replicated.*MergeTree[ ]*(.*$test_case_zk_prefix" "$test_case" || echo "Replicated.*MergeTree should contain '$test_case_zk_prefix' in zookeeper path to avoid overlaps ($test_case)" ;; *.sql|*.sql.j2) From 1bc5598aa77bb5a7dcffc26b090bb0f45cb83abb Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 13 Jun 2023 18:02:25 +0200 Subject: [PATCH 408/722] impl --- src/Disks/IO/AsynchronousBoundedReadBuffer.cpp | 5 ++--- src/Disks/IO/ReadBufferFromRemoteFSGather.h | 2 ++ src/IO/SeekableReadBuffer.h | 4 ++++ 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/src/Disks/IO/AsynchronousBoundedReadBuffer.cpp b/src/Disks/IO/AsynchronousBoundedReadBuffer.cpp index d75ec9f09e0..f9bd68222ae 100644 --- a/src/Disks/IO/AsynchronousBoundedReadBuffer.cpp +++ b/src/Disks/IO/AsynchronousBoundedReadBuffer.cpp @@ -301,9 +301,8 @@ off_t AsynchronousBoundedReadBuffer::seek(off_t offset, int whence) * Lazy ignore. Save number of bytes to ignore and ignore it either for prefetch buffer or current buffer. * Note: we read in range [file_offset_of_buffer_end, read_until_position). */ - if (file_offset_of_buffer_end && read_until_position && new_pos < *read_until_position - && new_pos > file_offset_of_buffer_end - && new_pos < file_offset_of_buffer_end + read_settings.remote_read_min_bytes_for_seek) + if (!impl->seekIsCheap() && file_offset_of_buffer_end && read_until_position && new_pos < *read_until_position + && new_pos > file_offset_of_buffer_end && new_pos < file_offset_of_buffer_end + read_settings.remote_read_min_bytes_for_seek) { ProfileEvents::increment(ProfileEvents::RemoteFSLazySeeks); bytes_to_ignore = new_pos - file_offset_of_buffer_end; diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.h b/src/Disks/IO/ReadBufferFromRemoteFSGather.h index cb98ac6d9f3..272ed2b3ac1 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.h +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.h @@ -50,6 +50,8 @@ public: off_t getPosition() override { return file_offset_of_buffer_end - available() + bytes_to_ignore; } + bool seekIsCheap() override { return !current_buf; } + private: SeekableReadBufferPtr createImplementationBuffer(const StoredObject & object); diff --git a/src/IO/SeekableReadBuffer.h b/src/IO/SeekableReadBuffer.h index 8ced9d752de..5770948be20 100644 --- a/src/IO/SeekableReadBuffer.h +++ b/src/IO/SeekableReadBuffer.h @@ -83,6 +83,10 @@ public: /// Checks if readBigAt() is allowed. May be slow, may throw (e.g. it may do an HTTP request or an fstat). virtual bool supportsReadAt() { return false; } + + /// We do some tricks to avoid seek cost. E.g we read more data and than ignore it (see remote_read_min_bytes_for_seek). + /// Sometimes however seek is basically free because underlying read buffer wasn't yet initialised (or re-initialised after reset). + virtual bool seekIsCheap() { return false; } }; From 994228ab209d3dae408c43e135e2391e4a6b5112 Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Tue, 13 Jun 2023 20:54:02 +0000 Subject: [PATCH 409/722] Uncomment flaky test --- .../0_stateless/01746_convert_type_with_default.reference | 1 + tests/queries/0_stateless/01746_convert_type_with_default.sql | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01746_convert_type_with_default.reference b/tests/queries/0_stateless/01746_convert_type_with_default.reference index 0edea4de31e..e00156cd3c5 100644 --- a/tests/queries/0_stateless/01746_convert_type_with_default.reference +++ b/tests/queries/0_stateless/01746_convert_type_with_default.reference @@ -40,6 +40,7 @@ 1970-01-20 1970-01-20 2149-06-06 +1 2023-05-30 2023-05-30 2023-05-30 14:38:20 diff --git a/tests/queries/0_stateless/01746_convert_type_with_default.sql b/tests/queries/0_stateless/01746_convert_type_with_default.sql index c74b185f7fd..5ef7718784d 100644 --- a/tests/queries/0_stateless/01746_convert_type_with_default.sql +++ b/tests/queries/0_stateless/01746_convert_type_with_default.sql @@ -54,7 +54,7 @@ select toDateOrDefault(cast(19 as Int256)); select toDateOrDefault(cast(19 as UInt256)); select toDateOrDefault(65535); --- select toDateOrDefault(122400); +select toDateOrDefault(65536) in ('1970-01-01', '1970-01-02'); select toDateOrDefault(19507, '2000-01-01'::Date); select toDateOrDefault(-1, '2023-05-30'::Date); From a570b00bdf76118e53de34a3337a66683b55fcb7 Mon Sep 17 00:00:00 2001 From: Denny Crane Date: Tue, 13 Jun 2023 19:19:34 -0300 Subject: [PATCH 410/722] Update README.md --- docker/server/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/server/README.md b/docker/server/README.md index 67646a262f5..6aec001064e 100644 --- a/docker/server/README.md +++ b/docker/server/README.md @@ -20,6 +20,7 @@ For more information and documentation see https://clickhouse.com/. - The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3. - The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A). Most ARM CPUs after 2017 support ARMv8.2-A. A notable exception is Raspberry Pi 4 from 2019 whose CPU only supports ARMv8.0-A. +- Since Clickhouse 23.3 Ubuntu image started to use `ubuntu:22.04` as a base image it requiers docker version >= `20.10.10`, overwise use `docker run --privileged`. ## How to use this image From d8d570081031f4851c33737618d907d69a3b14e8 Mon Sep 17 00:00:00 2001 From: Denny Crane Date: Tue, 13 Jun 2023 19:24:36 -0300 Subject: [PATCH 411/722] Update README.md --- docker/server/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/server/README.md b/docker/server/README.md index 6aec001064e..0c89f834fcd 100644 --- a/docker/server/README.md +++ b/docker/server/README.md @@ -20,7 +20,7 @@ For more information and documentation see https://clickhouse.com/. - The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3. - The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A). Most ARM CPUs after 2017 support ARMv8.2-A. A notable exception is Raspberry Pi 4 from 2019 whose CPU only supports ARMv8.0-A. -- Since Clickhouse 23.3 Ubuntu image started to use `ubuntu:22.04` as a base image it requiers docker version >= `20.10.10`, overwise use `docker run --privileged`. +- Since Clickhouse 23.3 Ubuntu image started to use `ubuntu:22.04` as a base image it requiers docker version >= `20.10.10`, overwise use `docker run --privileged`. Alternativly try Clickhouse Alpine image. ## How to use this image From 404a52432c59a4b833d093e2e344916f4cb62de5 Mon Sep 17 00:00:00 2001 From: Denny Crane Date: Tue, 13 Jun 2023 19:39:23 -0300 Subject: [PATCH 412/722] Update README.md --- docker/server/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/server/README.md b/docker/server/README.md index 0c89f834fcd..18dce492123 100644 --- a/docker/server/README.md +++ b/docker/server/README.md @@ -20,7 +20,7 @@ For more information and documentation see https://clickhouse.com/. - The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3. - The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A). Most ARM CPUs after 2017 support ARMv8.2-A. A notable exception is Raspberry Pi 4 from 2019 whose CPU only supports ARMv8.0-A. -- Since Clickhouse 23.3 Ubuntu image started to use `ubuntu:22.04` as a base image it requiers docker version >= `20.10.10`, overwise use `docker run --privileged`. Alternativly try Clickhouse Alpine image. +- Since the Clickhouse 23.3 Ubuntu image started using `ubuntu:22.04` as its base image, it requires docker version >= `20.10.10`, or use `docker run -- privileged` instead. Alternatively, try the Clickhouse Alpine image. ## How to use this image From 86075dbae49f8acda2819fc6b8640631ccf1f3fe Mon Sep 17 00:00:00 2001 From: Rich Raposa Date: Tue, 13 Jun 2023 16:40:14 -0600 Subject: [PATCH 413/722] Update azureBlobStorage.md --- docs/en/engines/table-engines/integrations/azureBlobStorage.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/engines/table-engines/integrations/azureBlobStorage.md b/docs/en/engines/table-engines/integrations/azureBlobStorage.md index b8e621fd513..14fbf0c068e 100644 --- a/docs/en/engines/table-engines/integrations/azureBlobStorage.md +++ b/docs/en/engines/table-engines/integrations/azureBlobStorage.md @@ -48,4 +48,4 @@ SELECT * FROM test_table; ## See also -[Azure Blob Storage Table Function](/docs/en/sql-reference/table-functions/azureBlobStorage.md) +[Azure Blob Storage Table Function](/docs/en/sql-reference/table-functions/azureBlobStorage) From 49082bfe8919a4f7113e96eccde94c4fa2d74017 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Wed, 14 Jun 2023 09:00:50 +0800 Subject: [PATCH 414/722] fix typos in redis.md --- docs/en/engines/table-engines/integrations/redis.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/en/engines/table-engines/integrations/redis.md b/docs/en/engines/table-engines/integrations/redis.md index 68235a89d33..a78942ab7bb 100644 --- a/docs/en/engines/table-engines/integrations/redis.md +++ b/docs/en/engines/table-engines/integrations/redis.md @@ -114,6 +114,6 @@ TRUNCATE TABLE redis_table SYNC; ## Limitations {#limitations} -Redis engine also support scanning query, such as `where k > xx`, but it has some limitations: -1. Scanning query may produce some duplicated keys in a very rare case when it is rehashing, details see [Redis Scan](https://github.com/redis/redis/blob/e4d183afd33e0b2e6e8d1c79a832f678a04a7886/src/dict.c#L1186-L1269) -2. During the scanning keys could be created and deleted, so the resulting dataset can not represent a valid point in time. +Redis engine also supports scanning queries, such as `where k > xx`, but it has some limitations: +1. Scanning query may produce some duplicated keys in a very rare case when it is rehashing. See details in [Redis Scan](https://github.com/redis/redis/blob/e4d183afd33e0b2e6e8d1c79a832f678a04a7886/src/dict.c#L1186-L1269) +2. During the scanning, keys could be created and deleted, so the resulting dataset can not represent a valid point in time. From c05bcf56058597e180bfb65532e76dbe6c1639da Mon Sep 17 00:00:00 2001 From: pufit Date: Tue, 13 Jun 2023 21:09:30 -0400 Subject: [PATCH 415/722] Fix keeper-client help message --- programs/keeper-client/KeeperClient.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/programs/keeper-client/KeeperClient.cpp b/programs/keeper-client/KeeperClient.cpp index f38da1b72aa..f41dca1e27a 100644 --- a/programs/keeper-client/KeeperClient.cpp +++ b/programs/keeper-client/KeeperClient.cpp @@ -127,42 +127,42 @@ void KeeperClient::defineOptions(Poco::Util::OptionSet & options) options.addOption( Poco::Util::Option("host", "h", "server hostname. default `localhost`") - .argument("host") + .argument("") .binding("host")); options.addOption( Poco::Util::Option("port", "p", "server port. default `2181`") - .argument("port") + .argument("") .binding("port")); options.addOption( Poco::Util::Option("query", "q", "will execute given query, then exit.") - .argument("query") + .argument("") .binding("query")); options.addOption( Poco::Util::Option("connection-timeout", "", "set connection timeout in seconds. default 10s.") - .argument("connection-timeout") + .argument("") .binding("connection-timeout")); options.addOption( Poco::Util::Option("session-timeout", "", "set session timeout in seconds. default 10s.") - .argument("session-timeout") + .argument("") .binding("session-timeout")); options.addOption( Poco::Util::Option("operation-timeout", "", "set operation timeout in seconds. default 10s.") - .argument("operation-timeout") + .argument("") .binding("operation-timeout")); options.addOption( Poco::Util::Option("history-file", "", "set path of history file. default `~/.keeper-client-history`") - .argument("history-file") + .argument("") .binding("history-file")); options.addOption( Poco::Util::Option("log-level", "", "set log level") - .argument("log-level") + .argument("") .binding("log-level")); } From f1b5d47ce27c053e76722d7827c49fb4aa78ac0e Mon Sep 17 00:00:00 2001 From: Alexey Gerasimchuck Date: Wed, 14 Jun 2023 01:15:45 +0000 Subject: [PATCH 416/722] corrections after second review iteration --- docs/en/interfaces/cli.md | 37 ++++++++----------- docs/ru/interfaces/cli.md | 32 +++++++--------- programs/client/Client.cpp | 2 +- src/Client/ConnectionString.cpp | 31 ++++++++-------- src/Client/ConnectionString.h | 12 +++--- .../0_stateless/02784_connection_string.sh | 6 +-- 6 files changed, 54 insertions(+), 66 deletions(-) diff --git a/docs/en/interfaces/cli.md b/docs/en/interfaces/cli.md index b5134ea30c0..e2c7dc1e608 100644 --- a/docs/en/interfaces/cli.md +++ b/docs/en/interfaces/cli.md @@ -194,7 +194,7 @@ You can pass parameters to `clickhouse-client` (all parameters have a default va - `--print-profile-events` – Print `ProfileEvents` packets. - `--profile-events-delay-ms` – Delay between printing `ProfileEvents` packets (-1 - print only totals, 0 - print every single packet). -Instead of --host, --port, --user and --password options, ClickHouse client also supports connection strings. +Instead of `--host`, `--port`, `--user` and `--password` options, ClickHouse client also supports connection strings (see next section). ## Connection string {#connection_string} @@ -213,28 +213,27 @@ Where - `database` - (optional) is the database name, - `query_parameters` - (optional) is a list of key-value pairs `param1=value1[,¶m2=value2], ...`. For some parameters, no value is required. Parameter names and values are case-sensitive. +If no user is specified, `default` user without password will be used. +If no host is specified, the `localhost` will be used (localhost). +If no port is specified is not specified, `9000` will be used as port. +If no database is specified, the `default` database will be used. +If the user name, password or database was specified in the connection string, it cannot be specified using `--user`, `--password` or `--database` (and vice versa). -The host component can either be an IP address or a host name. Put an IPv6 address in square brackets to specify it: +The host component can either be an a host name and IP address. Put an IPv6 address in square brackets to specify it: ```text clickhouse://[2001:db8::1234] ``` -If user is not specified, `default` user without password will be used. -If host is not specified, the `localhost` will be used (localhost). -If port is not specified, `9000` will be used as port. -If database is not specified, the `default` database will be used. - -If the user name, password or database was specified in the connection string, it cannot be specified using `--user`, `--password` or `--database` (and vice versa). - -The connection string must be specified in the first argument of clickhouse-client. The connection string can be combined with other [command-line-options](#command-line-options) except `--host(h)` and `--port`. - -### Multiple hosts {#connection_string_multiple_hosts} - URI allows multiple hosts to be connected to. Connection strings can contain multiple hosts. ClickHouse-client will try to connect to these hosts in order (i.e. from left to right). After the connection is established, no attempt to connect to the remaining hosts is made. -### Allowed query_parameters keys {#connection_string_query_parameters} + + + +The connection string must be specified as the first argument of clickhouse-client. The connection string can be combined with arbitrary other [command-line-options](#command-line-options) except `--host/-h` and `--port`. + +The following keys are allowed for component `query_parameter`: - `secure` or shorthanded `s` - no value. If specified, client will connect to the server over a secure connection (TLS). See `secure` in [command-line-options](#command-line-options) @@ -244,7 +243,7 @@ Non-US ASCII, spaces and special characters in the `user`, `password`, `hosts`, ### Examples {#connection_string_examples} -Connect to localhost using port 9000 and execute the query "SELECT 1". +Connect to localhost using port 9000 and execute the query `SELECT 1`. ``` bash clickhouse-client clickhouse://localhost:9000 --query "SELECT 1" @@ -262,12 +261,6 @@ Connect to localhost using default user, host with IPV6 address `[::1]` and port clickhouse-client clickhouse://[::1]:9000 ``` -Connect to localhost using default user, host with IPV6 address `[2001:db8:3333:4444:5555:6666:7777:8888]` and port `9000`. - -``` bash -clickhouse-client clickhouse://[2001:db8:3333:4444:5555:6666:7777:8888]:9000 -``` - Connect to localhost using port 9000 in multiline mode. ``` bash @@ -277,7 +270,7 @@ clickhouse-client clickhouse://localhost:9000 '-m' Connect to localhost using port 9000 with the user `default`. ``` bash -clickhouse-client clickhouse://default@localhost:9000 --user default +clickhouse-client clickhouse://default@localhost:9000 # equivalent to: clickhouse-client clickhouse://localhost:9000 --user default diff --git a/docs/ru/interfaces/cli.md b/docs/ru/interfaces/cli.md index 794ac60ec83..aa6ae3629e8 100644 --- a/docs/ru/interfaces/cli.md +++ b/docs/ru/interfaces/cli.md @@ -142,6 +142,8 @@ $ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="numbe - `--history_file` - путь к файлу с историей команд. - `--param_` — значение параметра для [запроса с параметрами](#cli-queries-with-parameters). +Вместо параметров `--host`, `--port`, `--user` и `--password` клиент ClickHouse также поддерживает строки подключения (смотри следующий раздел). + ## Строка подключения {#connection_string} clickhouse-client также поддерживает подключение к серверу clickhouse с помощью строки подключения, аналогичной [MongoDB](https://www.mongodb.com/docs/manual/reference/connection-string/), [PostgreSQL](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING), [MySQL](https://dev.mysql.com/doc/refman/8.0/en/connecting-using-uri-or-key-value-pairs.html#connecting-using-uri). Она имеет следующий синтаксис: @@ -154,15 +156,9 @@ clickhouse:[//[user[:password]@][hosts_and_ports]][/database][?query_parameters] - `user` - (необязательно) - это имя пользователя, - `password` - (необязательно) - Пароль пользователя. Если символ `:` укаказан, и пароль пуст, то клиент запросит ввести пользователя пароль. -- `hostspec` - (необязательно) - список хостов и необязательных портов. `host[:port] [, host:[port]], ...`, +- `hosts_and_ports` - (необязательно) - список хостов и необязательных портов. `host[:port] [, host:[port]], ...`, - `database` - (необязательно) - это имя базы данных, -- `paramspec` - (опционально) список пар ключ-значение `param1=value1[,¶m2=value2], ...`. Для некоторых параметров значение не требуется. Имена и значения параметров чувствительны к регистру. - -Параметр host может быть либо IP-адресом, либо именем хоста. Для указания IPv6-адреса поместите его в квадратные скобки: - -```text -clickhouse://[2001:db8::1234] -``` +- `query_parameters` - (опционально) список пар ключ-значение `param1=value1[,¶m2=value2], ...`. Для некоторых параметров значение не требуется. Имена и значения параметров чувствительны к регистру. Если user не указан, будут использоваться имя пользователя `default`. Если host не указан, будет использован хост `localhost`. @@ -171,13 +167,19 @@ clickhouse://[2001:db8::1234] Если имя пользователя, пароль или база данных были указаны в строке подключения, их нельзя указать с помощью `--user`, `--password` или `--database` (и наоборот). -Строка подключения должна быть указана в первом аргументе clickhouse-client. Строка подключения может комбинироваться с другими [параметрами командной строки] (#command-line-options) кроме `--host (h)` и `--port`. +Параметр host может быть либо именем хоста, либо IP-адресом. Для указания IPv6-адреса поместите его в квадратные скобки: -### Несколько хостов {#connection_string_multiple_hosts} +```text +clickhouse://[2001:db8::1234] +``` URI позволяет подключаться к нескольким хостам. Строки подключения могут содержать несколько хостов. ClickHouse-client будет пытаться подключиться к этим хостам по порядку (т.е. слева направо). После установления соединения попытки подключения к оставшимся хостам не предпринимаются. -### Допустимые ключи query_parameters {#connection_string_query_parameters} + + +Строка подключения должна быть указана в первом аргументе clickhouse-client. Строка подключения может комбинироваться с другими [параметрами командной строки] (#command-line-options) кроме `--host/-h` и `--port`. + +Для компонента `query_parameter` разрешены следующие ключи: - `secure` или сокращенно `s` - без значение. Если параметр указан, то соединение с сервером будет осуществляться по защищенному каналу (TLS). См. `secure` в [command-line-options](#command-line-options). @@ -187,7 +189,7 @@ URI позволяет подключаться к нескольким хост ### Примеры {#connection_string_examples} -Подключиться к localhost через порт 9000 и выполнить запрос "SELECT 1" +Подключиться к localhost через порт 9000 и выполнить запрос `SELECT 1` ``` bash clickhouse-client clickhouse://localhost:9000 --query "SELECT 1" @@ -204,12 +206,6 @@ clickhouse-client clickhouse://john:secret@127.0.0.1:9000 clickhouse-client clickhouse://[::1]:9000 ``` -Подключиться к localhost, используя пользователя по умолчанию, хост с IPV6 адресом `[2001:db8:3333:4444:5555:6666:7777:8888]` и портом `9000`. - -`` bash -clickhouse-client clickhouse://[2001:db8:3333:4444:5555:6666:7777:8888]:9000 -``` - Подключиться к localhost через порт 9000 многострочном режиме. ``` bash diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index a49447dff69..6c3df3520e9 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -1268,7 +1268,7 @@ void Client::readArguments( std::string_view arg = argv[arg_num]; if (has_connection_string) - validateConnectionStringClientOption(arg); + checkIfCmdLineOptionCanBeUsedWithConnectionString(arg); if (arg == "--external") { diff --git a/src/Client/ConnectionString.cpp b/src/Client/ConnectionString.cpp index 8f0a0980f51..62090487490 100644 --- a/src/Client/ConnectionString.cpp +++ b/src/Client/ConnectionString.cpp @@ -45,26 +45,26 @@ std::string uriDecode(const std::string & uri_encoded_string, bool plus_as_space void getHostAndPort(const Poco::URI & uri, std::vector> & hosts_and_ports_arguments) { std::vector host_and_port; - auto host = uri.getHost(); + const auto& host = uri.getHost(); if (!host.empty()) { - host_and_port.push_back("--host="s + uriDecode(host, false)); + host_and_port.push_back("--host=" + uriDecode(host, false)); } // Port can be written without host (":9000"). Empty host name equals to default host. auto port = uri.getPort(); if (port != 0) - host_and_port.push_back("--port="s + std::to_string(port)); + host_and_port.push_back("--port=" + std::to_string(port)); if (!host_and_port.empty()) hosts_and_ports_arguments.push_back(std::move(host_and_port)); } void buildConnectionString( - Poco::URI & uri, - std::vector> & hosts_and_ports_arguments, std::string_view host_and_port, - std::string_view right_part) + std::string_view right_part, + Poco::URI & uri, + std::vector> & hosts_and_ports_arguments) { // User info does not matter in sub URI auto uri_string = std::string(CONNECTION_URI_SCHEME); @@ -154,7 +154,7 @@ bool tryParseConnectionString( { if (*it == ',') { - buildConnectionString(uri, hosts_and_ports_arguments, {last_host_begin, it}, {hosts_end, connection_string.end()}); + buildConnectionString({last_host_begin, it}, {hosts_end, connection_string.end()}, uri, hosts_and_ports_arguments); last_host_begin = it + 1; } } @@ -166,7 +166,7 @@ bool tryParseConnectionString( getHostAndPort(uri, hosts_and_ports_arguments); } else - buildConnectionString(uri, hosts_and_ports_arguments, {last_host_begin, hosts_end}, {hosts_end, connection_string.end()}); + buildConnectionString({last_host_begin, hosts_end}, {hosts_end, connection_string.end()}, uri, hosts_and_ports_arguments); Poco::URI::QueryParameters params = uri.getQueryParameters(); for (const auto & param : params) @@ -174,12 +174,12 @@ bool tryParseConnectionString( if (param.first == "secure" || param.first == "s") { if (!param.second.empty()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "secure URI query parameter does not require value"); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "secure URI query parameter does not allow value"); common_arguments.push_back(makeArgument(param.first)); } else - throw Exception(ErrorCodes::BAD_ARGUMENTS, "URI query parameter {} is unknown", param.first); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "URI query parameter {} is not supported", param.first); } auto user_info = uri.getUserInfo(); @@ -188,7 +188,7 @@ bool tryParseConnectionString( // Poco::URI doesn't decode user name/password by default. // But ClickHouse allows to have users with email user name like: 'john@some_mail.com' // john@some_mail.com should be percent-encoded: 'john%40some_mail.com' - std::string::size_type pos = user_info.find(':'); + size_t pos = user_info.find(':'); if (pos != std::string::npos) { common_arguments.push_back("--user"); @@ -229,12 +229,11 @@ bool tryParseConnectionString( return true; } -void validateConnectionStringClientOption(std::string_view command_line_option) +void checkIfCmdLineOptionCanBeUsedWithConnectionString(std::string_view command_line_option) { - const auto prohibited_option_iter = PROHIBITED_CLIENT_OPTIONS.find(command_line_option); - if (prohibited_option_iter != PROHIBITED_CLIENT_OPTIONS.end()) - throw Exception( - ErrorCodes::BAD_ARGUMENTS, "Mixing a connection string and {} option is prohibited", prohibited_option_iter->second); + if (PROHIBITED_CLIENT_OPTIONS.contains(command_line_option)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Mixing a connection string and {} option is prohibited", PROHIBITED_CLIENT_OPTIONS.at(command_line_option)); } } diff --git a/src/Client/ConnectionString.h b/src/Client/ConnectionString.h index ce72de9edf6..ad63e9cda3d 100644 --- a/src/Client/ConnectionString.h +++ b/src/Client/ConnectionString.h @@ -9,19 +9,19 @@ namespace DB /** Tries to parse ClickHouse connection string. * if @connection_string starts with 'clickhouse:' then connection string will be parsed * and converted into a set of arguments for the client. - * Connection string format is similar to URI "clickhouse:[//[user_info@][hosts_and_ports]][/dbname][?query_parameters]" + * Connection string format is similar to URI "clickhouse:[//[user[:password]@][hosts_and_ports]][/dbname][?query_parameters]" * with the difference that hosts_and_ports can contain multiple hosts separated by ','. * example: clickhouse://user@host1:port1,host2:port2 - * @return returns true if there is a URI, false otherwise. - * @exception throws DB::Exception if URI has valid scheme (clickhouse:), but invalid internals. + * @return Returns false if no connection string was specified. If a connection string was specified, returns true if it is valid, and throws an exception if it is invalid. + * @exception Throws DB::Exception if URI has valid scheme (clickhouse:), but invalid internals. */ bool tryParseConnectionString( std::string_view connection_string, std::vector & common_arguments, std::vector> & hosts_and_ports_arguments); -// throws DB::Exception with BAD_ARGUMENTS if the given command line argument is allowed -// to be used with the connection string -void validateConnectionStringClientOption(std::string_view command_line_option); +// Throws DB::Exception with BAD_ARGUMENTS if the given command line argument +// is not allowed to be used with a connection string. +void checkIfCmdLineOptionCanBeUsedWithConnectionString(std::string_view command_line_option); } diff --git a/tests/queries/0_stateless/02784_connection_string.sh b/tests/queries/0_stateless/02784_connection_string.sh index 042f5b2108d..8353ac5b1e4 100755 --- a/tests/queries/0_stateless/02784_connection_string.sh +++ b/tests/queries/0_stateless/02784_connection_string.sh @@ -99,7 +99,6 @@ runClient "click_house:" 2>&1 | grep -o 'BAD_ARGUMENTS' TEST_INDEX=1000087 # Using connection string prohibits to use --host and --port options runClient "clickhouse://default:@$CLICKHOUSE_HOST/" --host "$CLICKHOUSE_HOST" 2>&1 | grep -o 'BAD_ARGUMENTS' -runClient "clickhouse://default:@$CLICKHOUSE_HOST/" --host "$CLICKHOUSE_HOST" 2>&1 | grep -o 'BAD_ARGUMENTS' runClient "clickhouse://default:@$CLICKHOUSE_HOST:$CLICKHOUSE_PORT_TCP/" --host "$CLICKHOUSE_HOST" 2>&1 | grep -o 'BAD_ARGUMENTS' runClient "clickhouse://default:@$CLICKHOUSE_HOST:$CLICKHOUSE_PORT_TCP/" --port "$CLICKHOUSE_PORT_TCP" 2>&1 | grep -o 'BAD_ARGUMENTS' runClient "clickhouse://default:@$CLICKHOUSE_HOST:$CLICKHOUSE_PORT_TCP/" --host "$CLICKHOUSE_HOST" 2>&1 | grep -o 'BAD_ARGUMENTS' @@ -109,6 +108,7 @@ runClient "clickhouse://default:@$CLICKHOUSE_HOST/" --port "$CLICKHOUSE_PORT_TCP runClient "clickhouse://$CLICKHOUSE_HOST/" --port "$CLICKHOUSE_PORT_TCP" 2>&1 | grep -o 'BAD_ARGUMENTS' runClient "clickhouse://:@$CLICKHOUSE_HOST/" --port "$CLICKHOUSE_PORT_TCP" 2>&1 | grep -o 'BAD_ARGUMENTS' runClient "clickhouse://$CLICKHOUSE_HOST/" --port "$CLICKHOUSE_PORT_TCP" 2>&1 | grep -o 'BAD_ARGUMENTS' +runClient "clickhouse://" --host "$CLICKHOUSE_HOST" 2>&1 | grep -o 'BAD_ARGUMENTS' runClient "clickhouse:" --port "$CLICKHOUSE_PORT_TCP" --host "$CLICKHOUSE_HOST" 2>&1 | grep -o 'BAD_ARGUMENTS' runClient "clickhouse://" --port "$CLICKHOUSE_PORT_TCP" --host "$CLICKHOUSE_HOST" 2>&1 | grep -o 'BAD_ARGUMENTS' runClient "clickhouse:///" --port "$CLICKHOUSE_PORT_TCP" --host "$CLICKHOUSE_HOST" 2>&1 | grep -o 'BAD_ARGUMENTS' @@ -130,9 +130,9 @@ runClient "clickhouse://host1/ database:" 2>&1 | grep -o 'BAD_ARGUMENTS' runClient "clickhouse://user :password@host1" 2>&1 | grep -o 'BAD_ARGUMENTS' runClient "clickhouse://user: password@host1" 2>&1 | grep -o 'BAD_ARGUMENTS' -# Query is not first argument +# Connection string is not first argument runClient --multiline "clickhouse://default:@$CLICKHOUSE_HOST/" 2>&1 | grep -o 'BAD_ARGUMENTS' -# Query used as the first and the second argument of client +# Connection string used as the first and the second argument of client runClient "clickhouse://default:@$CLICKHOUSE_HOST/" "clickhouse://default:@$CLICKHOUSE_HOST/" 2>&1 | grep -o 'BAD_ARGUMENTS' # Invalid hosts From 29b9cba75c18e23f9ee2eb589e5a69e7f46a5054 Mon Sep 17 00:00:00 2001 From: santrancisco Date: Wed, 14 Jun 2023 11:31:09 +1000 Subject: [PATCH 417/722] Update CMakeLists.txt with help from Nikita --- contrib/arrow-cmake/CMakeLists.txt | 68 +++++++++++++++++++++++------- 1 file changed, 52 insertions(+), 16 deletions(-) diff --git a/contrib/arrow-cmake/CMakeLists.txt b/contrib/arrow-cmake/CMakeLists.txt index 16198887075..5fe942d1cd0 100644 --- a/contrib/arrow-cmake/CMakeLists.txt +++ b/contrib/arrow-cmake/CMakeLists.txt @@ -116,43 +116,79 @@ configure_file("${ORC_SOURCE_SRC_DIR}/Adaptor.hh.in" "${ORC_BUILD_INCLUDE_DIR}/A # ARROW_ORC + adapters/orc/CMakefiles set(ORC_SRCS "${CMAKE_CURRENT_BINARY_DIR}/orc_proto.pb.h" - "${ORC_SOURCE_SRC_DIR}/sargs/ExpressionTree.cc" - "${ORC_SOURCE_SRC_DIR}/sargs/Literal.cc" - "${ORC_SOURCE_SRC_DIR}/sargs/PredicateLeaf.cc" - "${ORC_SOURCE_SRC_DIR}/sargs/SargsApplier.cc" - "${ORC_SOURCE_SRC_DIR}/sargs/SearchArgument.cc" - "${ORC_SOURCE_SRC_DIR}/sargs/TruthValue.cc" - "${ORC_SOURCE_SRC_DIR}/Exceptions.cc" - "${ORC_SOURCE_SRC_DIR}/OrcFile.cc" - "${ORC_SOURCE_SRC_DIR}/Reader.cc" + "${ORC_ADDITION_SOURCE_DIR}/orc_proto.pb.cc" + "${ORC_SOURCE_SRC_DIR}/Adaptor.cc" + "${ORC_SOURCE_SRC_DIR}/Adaptor.hh.in" + "${ORC_SOURCE_SRC_DIR}/BlockBuffer.cc" + "${ORC_SOURCE_SRC_DIR}/BlockBuffer.hh" + "${ORC_SOURCE_SRC_DIR}/BloomFilter.cc" + "${ORC_SOURCE_SRC_DIR}/BloomFilter.hh" + "${ORC_SOURCE_SRC_DIR}/Bpacking.hh" + "${ORC_SOURCE_SRC_DIR}/BpackingDefault.cc" + "${ORC_SOURCE_SRC_DIR}/BpackingDefault.hh" "${ORC_SOURCE_SRC_DIR}/ByteRLE.cc" + "${ORC_SOURCE_SRC_DIR}/ByteRLE.hh" + "${ORC_SOURCE_SRC_DIR}/CMakeLists.txt" "${ORC_SOURCE_SRC_DIR}/ColumnPrinter.cc" "${ORC_SOURCE_SRC_DIR}/ColumnReader.cc" + "${ORC_SOURCE_SRC_DIR}/ColumnReader.hh" "${ORC_SOURCE_SRC_DIR}/ColumnWriter.cc" + "${ORC_SOURCE_SRC_DIR}/ColumnWriter.hh" "${ORC_SOURCE_SRC_DIR}/Common.cc" "${ORC_SOURCE_SRC_DIR}/Compression.cc" + "${ORC_SOURCE_SRC_DIR}/Compression.hh" + "${ORC_SOURCE_SRC_DIR}/ConvertColumnReader.cc" + "${ORC_SOURCE_SRC_DIR}/ConvertColumnReader.hh" + "${ORC_SOURCE_SRC_DIR}/CpuInfoUtil.cc" + "${ORC_SOURCE_SRC_DIR}/CpuInfoUtil.hh" + "${ORC_SOURCE_SRC_DIR}/Dispatch.hh" + "${ORC_SOURCE_SRC_DIR}/Exceptions.cc" "${ORC_SOURCE_SRC_DIR}/Int128.cc" "${ORC_SOURCE_SRC_DIR}/LzoDecompressor.cc" + "${ORC_SOURCE_SRC_DIR}/LzoDecompressor.hh" "${ORC_SOURCE_SRC_DIR}/MemoryPool.cc" + "${ORC_SOURCE_SRC_DIR}/Murmur3.cc" + "${ORC_SOURCE_SRC_DIR}/Murmur3.hh" + "${ORC_SOURCE_SRC_DIR}/Options.hh" + "${ORC_SOURCE_SRC_DIR}/OrcFile.cc" "${ORC_SOURCE_SRC_DIR}/RLE.cc" + "${ORC_SOURCE_SRC_DIR}/RLE.hh" + "${ORC_SOURCE_SRC_DIR}/RLEV2Util.cc" + "${ORC_SOURCE_SRC_DIR}/RLEV2Util.hh" "${ORC_SOURCE_SRC_DIR}/RLEv1.cc" + "${ORC_SOURCE_SRC_DIR}/RLEv1.hh" + "${ORC_SOURCE_SRC_DIR}/RLEv2.hh" + "${ORC_SOURCE_SRC_DIR}/Reader.cc" + "${ORC_SOURCE_SRC_DIR}/Reader.hh" "${ORC_SOURCE_SRC_DIR}/RleDecoderV2.cc" "${ORC_SOURCE_SRC_DIR}/RleEncoderV2.cc" - "${ORC_SOURCE_SRC_DIR}/RLEV2Util.cc" + "${ORC_SOURCE_SRC_DIR}/SchemaEvolution.cc" + "${ORC_SOURCE_SRC_DIR}/SchemaEvolution.hh" "${ORC_SOURCE_SRC_DIR}/Statistics.cc" + "${ORC_SOURCE_SRC_DIR}/Statistics.hh" "${ORC_SOURCE_SRC_DIR}/StripeStream.cc" + "${ORC_SOURCE_SRC_DIR}/StripeStream.hh" "${ORC_SOURCE_SRC_DIR}/Timezone.cc" + "${ORC_SOURCE_SRC_DIR}/Timezone.hh" "${ORC_SOURCE_SRC_DIR}/TypeImpl.cc" + "${ORC_SOURCE_SRC_DIR}/TypeImpl.hh" + "${ORC_SOURCE_SRC_DIR}/Utils.hh" "${ORC_SOURCE_SRC_DIR}/Vector.cc" "${ORC_SOURCE_SRC_DIR}/Writer.cc" - "${ORC_SOURCE_SRC_DIR}/Adaptor.cc" - "${ORC_SOURCE_SRC_DIR}/BloomFilter.cc" - "${ORC_SOURCE_SRC_DIR}/Murmur3.cc" - "${ORC_SOURCE_SRC_DIR}/BlockBuffer.cc" - "${ORC_SOURCE_SRC_DIR}/wrap/orc-proto-wrapper.cc" "${ORC_SOURCE_SRC_DIR}/io/InputStream.cc" + "${ORC_SOURCE_SRC_DIR}/io/InputStream.hh" "${ORC_SOURCE_SRC_DIR}/io/OutputStream.cc" - "${ORC_ADDITION_SOURCE_DIR}/orc_proto.pb.cc" + "${ORC_SOURCE_SRC_DIR}/io/OutputStream.hh" + "${ORC_SOURCE_SRC_DIR}/sargs/ExpressionTree.cc" + "${ORC_SOURCE_SRC_DIR}/sargs/ExpressionTree.hh" + "${ORC_SOURCE_SRC_DIR}/sargs/Literal.cc" + "${ORC_SOURCE_SRC_DIR}/sargs/PredicateLeaf.cc" + "${ORC_SOURCE_SRC_DIR}/sargs/PredicateLeaf.hh" + "${ORC_SOURCE_SRC_DIR}/sargs/SargsApplier.cc" + "${ORC_SOURCE_SRC_DIR}/sargs/SargsApplier.hh" + "${ORC_SOURCE_SRC_DIR}/sargs/SearchArgument.cc" + "${ORC_SOURCE_SRC_DIR}/sargs/SearchArgument.hh" + "${ORC_SOURCE_SRC_DIR}/sargs/TruthValue.cc" ) add_library(_orc ${ORC_SRCS}) From 1f76d0874398f4985bb00dd16caf68a772c95502 Mon Sep 17 00:00:00 2001 From: Alexey Gerasimchuck Date: Wed, 14 Jun 2023 01:37:58 +0000 Subject: [PATCH 418/722] fixed --password "" --password issue --- programs/client/Client.cpp | 14 +++++++------- .../01317_no_password_in_command_line.reference | 1 + .../01317_no_password_in_command_line.sh | 2 ++ 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 32a07284d26..1e2696b4910 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -977,13 +977,7 @@ void Client::addOptions(OptionsDescription & options_description) ("connection", po::value(), "connection to use (from the client config), by default connection name is hostname") ("secure,s", "Use TLS connection") ("user,u", po::value()->default_value("default"), "user") - /** If "--password [value]" is used but the value is omitted, the bad argument exception will be thrown. - * implicit_value is used to avoid this exception (to allow user to type just "--password") - * Since currently boost provides no way to check if a value has been set implicitly for an option, - * the "\n" is used to distinguish this case because there is hardly a chance a user would use "\n" - * as the password. - */ - ("password", po::value()->implicit_value("\n", ""), "password") + ("password", po::value(), "password") ("ask-password", "ask-password") ("quota_key", po::value(), "A string to differentiate quotas when the user have keyed quotas configured on server") @@ -1391,6 +1385,12 @@ void Client::readArguments( arg = argv[arg_num]; addMultiquery(arg, common_arguments); } + else if (arg == "--password" && ((arg_num + 1) >= argc || std::string_view(argv[arg_num + 1]).starts_with('-'))) + { + common_arguments.emplace_back(arg); + // Add implicit value to the password. '\n' means client should ask user for password. + common_arguments.emplace_back("\n"); + } else common_arguments.emplace_back(arg); } diff --git a/tests/queries/0_stateless/01317_no_password_in_command_line.reference b/tests/queries/0_stateless/01317_no_password_in_command_line.reference index e69de29bb2d..8f2f637d5e3 100644 --- a/tests/queries/0_stateless/01317_no_password_in_command_line.reference +++ b/tests/queries/0_stateless/01317_no_password_in_command_line.reference @@ -0,0 +1 @@ +Bad arguments diff --git a/tests/queries/0_stateless/01317_no_password_in_command_line.sh b/tests/queries/0_stateless/01317_no_password_in_command_line.sh index 7f2e91201a3..fc5b8997636 100755 --- a/tests/queries/0_stateless/01317_no_password_in_command_line.sh +++ b/tests/queries/0_stateless/01317_no_password_in_command_line.sh @@ -45,3 +45,5 @@ ps u --no-header $bg_query | grep -F -- '--password' | grep -F hello ||: grep -F -- '--password' < "/proc/$bg_query/comm" | grep -F hello ||: $CLICKHOUSE_CLIENT --format Null --param_query_id "$query_id" -q "KILL QUERY WHERE query_id = {query_id:String} SYNC" wait + +$CLICKHOUSE_CLIENT --user "$user" --password=hello --password -q 'select currentUser()' 2>&1 | grep -o 'Bad arguments' From f3b99156aca66e5df89181d224ae05a28f80f257 Mon Sep 17 00:00:00 2001 From: kevinyhzou Date: Wed, 14 Jun 2023 10:48:21 +0800 Subject: [PATCH 419/722] review fix --- docs/en/interfaces/formats.md | 2 +- .../operations/settings/settings-formats.md | 8 ++-- src/Core/Settings.h | 2 +- src/Formats/FormatFactory.cpp | 2 +- src/Formats/FormatSettings.h | 2 +- .../Formats/Impl/CSVRowInputFormat.cpp | 38 ++++++++++--------- ...ext_with_whitespace_tab_field_delimiter.sh | 4 +- 7 files changed, 31 insertions(+), 27 deletions(-) diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 57962c1d730..da1ba17cbb7 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -468,7 +468,7 @@ The CSV format supports the output of totals and extremes the same way as `TabSe - [input_format_csv_skip_first_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_first_lines) - skip the specified number of lines at the beginning of data. Default value - `0`. - [input_format_csv_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_csv_detect_header) - automatically detect header with names and types in CSV format. Default value - `true`. - [input_format_csv_trim_whitespaces](/docs/en/operations/settings/settings-formats.md/#input_format_csv_trim_whitespaces) - trim spaces and tabs in non-quoted CSV strings. Default value - `true`. -- [input_format_csv_use_whitespace_tab_as_delimiter](/docs/en/operations/settings/settings-formats.md/# input_format_csv_use_whitespace_tab_as_delimiter) - use whitespace or tab as field delimiter in CSV strings. Default value - `false`. +- [input_format_csv_allow_whitespace_or_tab_as_delimiter](/docs/en/operations/settings/settings-formats.md/# input_format_csv_allow_whitespace_or_tab_as_delimiter) - Allow to use whitespace or tab as field delimiter in CSV strings. Default value - `false`. ## CSVWithNames {#csvwithnames} diff --git a/docs/en/operations/settings/settings-formats.md b/docs/en/operations/settings/settings-formats.md index 0e30c8f319e..daf27622d3a 100644 --- a/docs/en/operations/settings/settings-formats.md +++ b/docs/en/operations/settings/settings-formats.md @@ -914,9 +914,9 @@ Result " string " ``` -### input_format_csv_use_whitespace_tab_as_delimiter {#input_format_csv_use_whitespace_tab_as_delimiter} +### input_format_csv_allow_whitespace_or_tab_as_delimiter {#input_format_csv_allow_whitespace_or_tab_as_delimiter} -Use whitespace or tab as field delimiter in CSV strings. +Allow to use whitespace or tab as field delimiter in CSV strings. Default value: `false`. @@ -925,7 +925,7 @@ Default value: `false`. Query ```bash -echo 'a b' | ./clickhouse local -q "select * from table FORMAT CSV" --input-format="CSV" --input_format_csv_use_whitespace_tab_as_delimiter=true --format_csv_delimiter=' ' +echo 'a b' | ./clickhouse local -q "select * from table FORMAT CSV" --input-format="CSV" --input_format_csv_allow_whitespace_or_tab_as_delimiter=true --format_csv_delimiter=' ' ``` Result @@ -937,7 +937,7 @@ a b Query ```bash -echo 'a b' | ./clickhouse local -q "select * from table FORMAT CSV" --input-format="CSV" --input_format_csv_use_whitespace_tab_as_delimiter=true --format_csv_delimiter='\t' +echo 'a b' | ./clickhouse local -q "select * from table FORMAT CSV" --input-format="CSV" --input_format_csv_allow_whitespace_or_tab_as_delimiter=true --format_csv_delimiter='\t' ``` Result diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 4306ac855a3..1d889d8b0c3 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -850,7 +850,7 @@ class IColumn; M(Bool, input_format_csv_use_best_effort_in_schema_inference, true, "Use some tweaks and heuristics to infer schema in CSV format", 0) \ M(Bool, input_format_tsv_use_best_effort_in_schema_inference, true, "Use some tweaks and heuristics to infer schema in TSV format", 0) \ M(Bool, input_format_csv_detect_header, true, "Automatically detect header with names and types in CSV format", 0) \ - M(Bool, input_format_csv_use_whitespace_tab_as_delimiter, false, "Use spaces and tabs(\\t) as field delimiter in the CSV strings", 0) \ + M(Bool, input_format_csv_allow_whitespace_or_tab_as_delimiter, false, "Allow to use spaces and tabs(\\t) as field delimiter in the CSV strings", 0) \ M(Bool, input_format_csv_trim_whitespaces, true, "Trims spaces and tabs (\\t) characters at the beginning and end in CSV strings", 0) \ M(Bool, input_format_tsv_detect_header, true, "Automatically detect header with names and types in TSV format", 0) \ M(Bool, input_format_custom_detect_header, true, "Automatically detect header with names and types in CustomSeparated format", 0) \ diff --git a/src/Formats/FormatFactory.cpp b/src/Formats/FormatFactory.cpp index 33ecddfc223..81528937b13 100644 --- a/src/Formats/FormatFactory.cpp +++ b/src/Formats/FormatFactory.cpp @@ -70,7 +70,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings) format_settings.csv.skip_first_lines = settings.input_format_csv_skip_first_lines; format_settings.csv.try_detect_header = settings.input_format_csv_detect_header; format_settings.csv.trim_whitespaces = settings.input_format_csv_trim_whitespaces; - format_settings.csv.use_whitespace_tab_as_delimiter = settings.input_format_csv_use_whitespace_tab_as_delimiter; + format_settings.csv.allow_whitespace_or_tab_as_delimiter = settings.input_format_csv_allow_whitespace_or_tab_as_delimiter; format_settings.hive_text.fields_delimiter = settings.input_format_hive_text_fields_delimiter; format_settings.hive_text.collection_items_delimiter = settings.input_format_hive_text_collection_items_delimiter; format_settings.hive_text.map_keys_delimiter = settings.input_format_hive_text_map_keys_delimiter; diff --git a/src/Formats/FormatSettings.h b/src/Formats/FormatSettings.h index 72d60e8423e..5dc1a14a12c 100644 --- a/src/Formats/FormatSettings.h +++ b/src/Formats/FormatSettings.h @@ -137,7 +137,7 @@ struct FormatSettings String custom_delimiter; bool try_detect_header = true; bool trim_whitespaces = true; - bool use_whitespace_tab_as_delimiter = false; + bool allow_whitespace_or_tab_as_delimiter = false; } csv; struct HiveText diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp index b8d3413f863..181949b3bb7 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp @@ -25,10 +25,14 @@ namespace ErrorCodes namespace { - void checkBadDelimiter(char delimiter, bool use_whitespace_tab_as_delimiter) + void checkBadDelimiter(char delimiter, bool allow_whitespace_or_tab_as_delimiter) { + if ((delimiter == ' ' || delimiter == '\t') && allow_whitespace_or_tab_as_delimiter) + { + return; + } constexpr std::string_view bad_delimiters = " \t\"'.UL"; - if (bad_delimiters.find(delimiter) != std::string_view::npos && !use_whitespace_tab_as_delimiter) + if (bad_delimiters.find(delimiter) != std::string_view::npos) throw Exception( ErrorCodes::BAD_ARGUMENTS, "CSV format may not work correctly with delimiter '{}'. Try use CustomSeparated format instead", @@ -68,7 +72,7 @@ CSVRowInputFormat::CSVRowInputFormat( format_settings_.csv.try_detect_header), buf(std::move(in_)) { - checkBadDelimiter(format_settings_.csv.delimiter, format_settings_.csv.use_whitespace_tab_as_delimiter); + checkBadDelimiter(format_settings_.csv.delimiter, format_settings_.csv.allow_whitespace_or_tab_as_delimiter); } CSVRowInputFormat::CSVRowInputFormat( @@ -90,7 +94,7 @@ CSVRowInputFormat::CSVRowInputFormat( format_settings_.csv.try_detect_header), buf(std::move(in_)) { - checkBadDelimiter(format_settings_.csv.delimiter, format_settings_.csv.use_whitespace_tab_as_delimiter); + checkBadDelimiter(format_settings_.csv.delimiter, format_settings_.csv.allow_whitespace_or_tab_as_delimiter); } void CSVRowInputFormat::syncAfterError() @@ -134,9 +138,9 @@ static void skipEndOfLine(ReadBuffer & in) } /// Skip `whitespace` symbols allowed in CSV. -static inline void skipWhitespacesAndTabs(ReadBuffer & in, const bool & use_whitespace_tab_as_delimiter) +static inline void skipWhitespacesAndTabs(ReadBuffer & in, const bool & allow_whitespace_or_tab_as_delimiter) { - if (use_whitespace_tab_as_delimiter) + if (allow_whitespace_or_tab_as_delimiter) { return; } @@ -150,7 +154,7 @@ CSVFormatReader::CSVFormatReader(PeekableReadBuffer & buf_, const FormatSettings void CSVFormatReader::skipFieldDelimiter() { - skipWhitespacesAndTabs(*buf, format_settings.csv.use_whitespace_tab_as_delimiter); + skipWhitespacesAndTabs(*buf, format_settings.csv.allow_whitespace_or_tab_as_delimiter); assertChar(format_settings.csv.delimiter, *buf); } @@ -158,7 +162,7 @@ template String CSVFormatReader::readCSVFieldIntoString() { if (format_settings.csv.trim_whitespaces) [[likely]] - skipWhitespacesAndTabs(*buf, format_settings.csv.use_whitespace_tab_as_delimiter); + skipWhitespacesAndTabs(*buf, format_settings.csv.allow_whitespace_or_tab_as_delimiter); String field; if constexpr (read_string) @@ -170,14 +174,14 @@ String CSVFormatReader::readCSVFieldIntoString() void CSVFormatReader::skipField() { - skipWhitespacesAndTabs(*buf, format_settings.csv.use_whitespace_tab_as_delimiter); + skipWhitespacesAndTabs(*buf, format_settings.csv.allow_whitespace_or_tab_as_delimiter); NullOutput out; readCSVStringInto(out, *buf, format_settings.csv); } void CSVFormatReader::skipRowEndDelimiter() { - skipWhitespacesAndTabs(*buf, format_settings.csv.use_whitespace_tab_as_delimiter); + skipWhitespacesAndTabs(*buf, format_settings.csv.allow_whitespace_or_tab_as_delimiter); if (buf->eof()) return; @@ -186,7 +190,7 @@ void CSVFormatReader::skipRowEndDelimiter() if (*buf->position() == format_settings.csv.delimiter) ++buf->position(); - skipWhitespacesAndTabs(*buf, format_settings.csv.use_whitespace_tab_as_delimiter); + skipWhitespacesAndTabs(*buf, format_settings.csv.allow_whitespace_or_tab_as_delimiter); if (buf->eof()) return; @@ -198,7 +202,7 @@ void CSVFormatReader::skipHeaderRow() do { skipField(); - skipWhitespacesAndTabs(*buf, format_settings.csv.use_whitespace_tab_as_delimiter); + skipWhitespacesAndTabs(*buf, format_settings.csv.allow_whitespace_or_tab_as_delimiter); } while (checkChar(format_settings.csv.delimiter, *buf)); skipRowEndDelimiter(); @@ -211,7 +215,7 @@ std::vector CSVFormatReader::readRowImpl() do { fields.push_back(readCSVFieldIntoString()); - skipWhitespacesAndTabs(*buf, format_settings.csv.use_whitespace_tab_as_delimiter); + skipWhitespacesAndTabs(*buf, format_settings.csv.allow_whitespace_or_tab_as_delimiter); } while (checkChar(format_settings.csv.delimiter, *buf)); skipRowEndDelimiter(); @@ -224,7 +228,7 @@ bool CSVFormatReader::parseFieldDelimiterWithDiagnosticInfo(WriteBuffer & out) try { - skipWhitespacesAndTabs(*buf, format_settings.csv.use_whitespace_tab_as_delimiter); + skipWhitespacesAndTabs(*buf, format_settings.csv.allow_whitespace_or_tab_as_delimiter); assertChar(delimiter, *buf); } catch (const DB::Exception &) @@ -250,7 +254,7 @@ bool CSVFormatReader::parseFieldDelimiterWithDiagnosticInfo(WriteBuffer & out) bool CSVFormatReader::parseRowEndWithDiagnosticInfo(WriteBuffer & out) { - skipWhitespacesAndTabs(*buf, format_settings.csv.use_whitespace_tab_as_delimiter); + skipWhitespacesAndTabs(*buf, format_settings.csv.allow_whitespace_or_tab_as_delimiter); if (buf->eof()) return true; @@ -259,7 +263,7 @@ bool CSVFormatReader::parseRowEndWithDiagnosticInfo(WriteBuffer & out) if (*buf->position() == format_settings.csv.delimiter) { ++buf->position(); - skipWhitespacesAndTabs(*buf, format_settings.csv.use_whitespace_tab_as_delimiter); + skipWhitespacesAndTabs(*buf, format_settings.csv.allow_whitespace_or_tab_as_delimiter); if (buf->eof()) return true; } @@ -287,7 +291,7 @@ bool CSVFormatReader::readField( const String & /*column_name*/) { if (format_settings.csv.trim_whitespaces || !isStringOrFixedString(removeNullable(type))) [[likely]] - skipWhitespacesAndTabs(*buf, format_settings.csv.use_whitespace_tab_as_delimiter); + skipWhitespacesAndTabs(*buf, format_settings.csv.allow_whitespace_or_tab_as_delimiter); const bool at_delimiter = !buf->eof() && *buf->position() == format_settings.csv.delimiter; const bool at_last_column_line_end = is_last_file_column && (buf->eof() || *buf->position() == '\n' || *buf->position() == '\r'); diff --git a/tests/queries/0_stateless/02785_text_with_whitespace_tab_field_delimiter.sh b/tests/queries/0_stateless/02785_text_with_whitespace_tab_field_delimiter.sh index deb6e317aac..6fca95cb839 100755 --- a/tests/queries/0_stateless/02785_text_with_whitespace_tab_field_delimiter.sh +++ b/tests/queries/0_stateless/02785_text_with_whitespace_tab_field_delimiter.sh @@ -10,8 +10,8 @@ $CLICKHOUSE_CLIENT -q "drop table if exists test_whitespace" $CLICKHOUSE_CLIENT -q "drop table if exists test_tab" $CLICKHOUSE_CLIENT -q "create table test_whitespace (x UInt32, y String, z String) engine=MergeTree order by x" $CLICKHOUSE_CLIENT -q "create table test_tab (x UInt32, y String, z String) engine=MergeTree order by x" -cat $CURDIR/data_csv/csv_with_space_delimiter.csv | ${CLICKHOUSE_CLIENT} -q "INSERT INTO test_whitespace SETTINGS format_csv_delimiter=' ', input_format_csv_use_whitespace_tab_as_delimiter=true FORMAT CSV" -cat $CURDIR/data_csv/csv_with_tab_delimiter.csv | ${CLICKHOUSE_CLIENT} -q "INSERT INTO test_tab SETTINGS format_csv_delimiter='\t', input_format_csv_use_whitespace_tab_as_delimiter=true FORMAT CSV" +cat $CURDIR/data_csv/csv_with_space_delimiter.csv | ${CLICKHOUSE_CLIENT} -q "INSERT INTO test_whitespace SETTINGS format_csv_delimiter=' ', input_format_csv_allow_whitespace_or_tab_as_delimiter=true FORMAT CSV" +cat $CURDIR/data_csv/csv_with_tab_delimiter.csv | ${CLICKHOUSE_CLIENT} -q "INSERT INTO test_tab SETTINGS format_csv_delimiter='\t', input_format_csv_allow_whitespace_or_tab_as_delimiter=true FORMAT CSV" $CLICKHOUSE_CLIENT -q "select * from test_whitespace" $CLICKHOUSE_CLIENT -q "select * from test_tab" $CLICKHOUSE_CLIENT -q "drop table test_whitespace" From 6ffdfb8b6b8656dfb2ef004349a3cad82dd03e1f Mon Sep 17 00:00:00 2001 From: santrancisco Date: Wed, 14 Jun 2023 13:29:05 +1000 Subject: [PATCH 420/722] test removing CpuInfoUtil.cc and see if build breaks :p --- contrib/arrow-cmake/CMakeLists.txt | 2 -- 1 file changed, 2 deletions(-) diff --git a/contrib/arrow-cmake/CMakeLists.txt b/contrib/arrow-cmake/CMakeLists.txt index 5fe942d1cd0..01e9fc5fca9 100644 --- a/contrib/arrow-cmake/CMakeLists.txt +++ b/contrib/arrow-cmake/CMakeLists.txt @@ -139,8 +139,6 @@ set(ORC_SRCS "${ORC_SOURCE_SRC_DIR}/Compression.hh" "${ORC_SOURCE_SRC_DIR}/ConvertColumnReader.cc" "${ORC_SOURCE_SRC_DIR}/ConvertColumnReader.hh" - "${ORC_SOURCE_SRC_DIR}/CpuInfoUtil.cc" - "${ORC_SOURCE_SRC_DIR}/CpuInfoUtil.hh" "${ORC_SOURCE_SRC_DIR}/Dispatch.hh" "${ORC_SOURCE_SRC_DIR}/Exceptions.cc" "${ORC_SOURCE_SRC_DIR}/Int128.cc" From 868c3bd45d8585b03a9afce76e0e7466e675c420 Mon Sep 17 00:00:00 2001 From: Alexey Gerasimchuck Date: Wed, 14 Jun 2023 04:29:08 +0000 Subject: [PATCH 421/722] minor change --- src/Client/ConnectionString.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Client/ConnectionString.cpp b/src/Client/ConnectionString.cpp index 62090487490..f4a4e73c198 100644 --- a/src/Client/ConnectionString.cpp +++ b/src/Client/ConnectionString.cpp @@ -45,7 +45,7 @@ std::string uriDecode(const std::string & uri_encoded_string, bool plus_as_space void getHostAndPort(const Poco::URI & uri, std::vector> & hosts_and_ports_arguments) { std::vector host_and_port; - const auto& host = uri.getHost(); + const std::string & host = uri.getHost(); if (!host.empty()) { host_and_port.push_back("--host=" + uriDecode(host, false)); From e281026e0085048e8d35d8cca78cc346501ee974 Mon Sep 17 00:00:00 2001 From: Chang Chen Date: Wed, 14 Jun 2023 12:29:55 +0800 Subject: [PATCH 422/722] fix build issue on clang 15 --- src/Formats/CapnProtoSerializer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Formats/CapnProtoSerializer.cpp b/src/Formats/CapnProtoSerializer.cpp index b306cca4f94..8373c95599f 100644 --- a/src/Formats/CapnProtoSerializer.cpp +++ b/src/Formats/CapnProtoSerializer.cpp @@ -751,7 +751,7 @@ namespace private: using Reader = typename CapnpType::Reader; - CapnpType::Reader getData(const ColumnPtr & column, size_t row_num) + typename CapnpType::Reader getData(const ColumnPtr & column, size_t row_num) { auto data = column->getDataAt(row_num); if constexpr (std::is_same_v) @@ -801,7 +801,7 @@ namespace private: using Reader = typename CapnpType::Reader; - CapnpType::Reader getData(const ColumnPtr & column, size_t row_num) + typename CapnpType::Reader getData(const ColumnPtr & column, size_t row_num) { auto data = column->getDataAt(row_num); if constexpr (std::is_same_v) From 4db8fa39c7904661a7aac6aa62ee4f0e44092369 Mon Sep 17 00:00:00 2001 From: Alexey Gerasimchuck Date: Wed, 14 Jun 2023 04:38:46 +0000 Subject: [PATCH 423/722] Removed extra lines --- docs/en/interfaces/cli.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/docs/en/interfaces/cli.md b/docs/en/interfaces/cli.md index e2c7dc1e608..8779dd1a544 100644 --- a/docs/en/interfaces/cli.md +++ b/docs/en/interfaces/cli.md @@ -228,9 +228,6 @@ clickhouse://[2001:db8::1234] URI allows multiple hosts to be connected to. Connection strings can contain multiple hosts. ClickHouse-client will try to connect to these hosts in order (i.e. from left to right). After the connection is established, no attempt to connect to the remaining hosts is made. - - - The connection string must be specified as the first argument of clickhouse-client. The connection string can be combined with arbitrary other [command-line-options](#command-line-options) except `--host/-h` and `--port`. The following keys are allowed for component `query_parameter`: From 08cd94e826ce6af55135517d8abcbc72f4b270fb Mon Sep 17 00:00:00 2001 From: Derek Chia Date: Wed, 14 Jun 2023 12:57:50 +0800 Subject: [PATCH 424/722] Update delete.md LWD is not supported in table with projection --- docs/en/sql-reference/statements/delete.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/en/sql-reference/statements/delete.md b/docs/en/sql-reference/statements/delete.md index fa9f08e225f..5522c50d624 100644 --- a/docs/en/sql-reference/statements/delete.md +++ b/docs/en/sql-reference/statements/delete.md @@ -55,6 +55,9 @@ With the described implementation now we can see what can negatively affect 'DEL - Table having a very large number of data parts - Having a lot of data in Compact parts—in a Compact part, all columns are stored in one file. +:::note +Lightweight delete does not work for tables with projection as rows in projection may be affected and require the projection to be rebuilt. Rebuilding projection makes the deletion not lightweight, so this is not supported. +::: ## Related content From f74c585426f0f6dd7f2ce440193cda2c43a9f9d9 Mon Sep 17 00:00:00 2001 From: Alexander Gololobov <440544+davenger@users.noreply.github.com> Date: Wed, 14 Jun 2023 07:46:11 +0200 Subject: [PATCH 425/722] Typos --- .../0_stateless/02784_projections_read_in_order_bug.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02784_projections_read_in_order_bug.sql b/tests/queries/0_stateless/02784_projections_read_in_order_bug.sql index 9595fc9ae08..6bf287a3d77 100644 --- a/tests/queries/0_stateless/02784_projections_read_in_order_bug.sql +++ b/tests/queries/0_stateless/02784_projections_read_in_order_bug.sql @@ -37,8 +37,8 @@ create table events ( timestamp )) engine = MergeTree order by (organisation_id, session_id, timestamp) settings index_granularity = 3; -insert into events values (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)); -insert into events values (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC')(), toString(0), reinterpretAsUUID(0), toString(0)); +insert into events values (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)); +insert into events values (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)); set read_in_order_two_level_merge_threshold=1; SELECT id, timestamp, payload FROM events WHERE (organisation_id = reinterpretAsUUID(1)) AND (session_id = reinterpretAsUUID(0)) ORDER BY timestamp, payload, id ASC; From 86694847c66a170781e5a561940c05e0221966d0 Mon Sep 17 00:00:00 2001 From: Chang Chen Date: Wed, 14 Jun 2023 15:22:32 +0800 Subject: [PATCH 426/722] using Reader instead of typename CapnpType::Reader --- src/Formats/CapnProtoSerializer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Formats/CapnProtoSerializer.cpp b/src/Formats/CapnProtoSerializer.cpp index 8373c95599f..6f7254ab2aa 100644 --- a/src/Formats/CapnProtoSerializer.cpp +++ b/src/Formats/CapnProtoSerializer.cpp @@ -751,7 +751,7 @@ namespace private: using Reader = typename CapnpType::Reader; - typename CapnpType::Reader getData(const ColumnPtr & column, size_t row_num) + Reader getData(const ColumnPtr & column, size_t row_num) { auto data = column->getDataAt(row_num); if constexpr (std::is_same_v) @@ -801,7 +801,7 @@ namespace private: using Reader = typename CapnpType::Reader; - typename CapnpType::Reader getData(const ColumnPtr & column, size_t row_num) + Reader getData(const ColumnPtr & column, size_t row_num) { auto data = column->getDataAt(row_num); if constexpr (std::is_same_v) From b1f0a91b788e2ef89cd5848168e8357d6da13f0a Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 14 Jun 2023 07:48:08 +0000 Subject: [PATCH 427/722] Docs: Fix embedded video link --- docs/en/engines/table-engines/mergetree-family/annindexes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/engines/table-engines/mergetree-family/annindexes.md b/docs/en/engines/table-engines/mergetree-family/annindexes.md index d8a0193ff66..80e47e76ce0 100644 --- a/docs/en/engines/table-engines/mergetree-family/annindexes.md +++ b/docs/en/engines/table-engines/mergetree-family/annindexes.md @@ -152,7 +152,7 @@ This type of ANN index implements [the Annoy algorithm](https://github.com/spoti space in random linear surfaces (lines in 2D, planes in 3D etc.).
-