diff --git a/base/common/LineReader.cpp b/base/common/LineReader.cpp index 0d06e5ef225..dd2e09b0393 100644 --- a/base/common/LineReader.cpp +++ b/base/common/LineReader.cpp @@ -67,8 +67,8 @@ LineReader::Suggest::WordsRange LineReader::Suggest::getCompletions(const String }); } -LineReader::LineReader(const String & history_file_path_, char extender_, char delimiter_) - : history_file_path(history_file_path_), extender(extender_), delimiter(delimiter_) +LineReader::LineReader(const String & history_file_path_, bool multiline_, Patterns extenders_, Patterns delimiters_) + : history_file_path(history_file_path_), multiline(multiline_), extenders(std::move(extenders_)), delimiters(std::move(delimiters_)) { /// FIXME: check extender != delimiter } @@ -76,38 +76,60 @@ LineReader::LineReader(const String & history_file_path_, char extender_, char d String LineReader::readLine(const String & first_prompt, const String & second_prompt) { String line; - bool is_multiline = false; + bool need_next_line = false; - while (auto status = readOneLine(is_multiline ? second_prompt : first_prompt)) + while (auto status = readOneLine(need_next_line ? second_prompt : first_prompt)) { if (status == RESET_LINE) { line.clear(); - is_multiline = false; + need_next_line = false; continue; } if (input.empty()) { - if (!line.empty() && !delimiter && !hasInputData()) + if (!line.empty() && !multiline && !hasInputData()) break; else continue; } - is_multiline = (input.back() == extender) || (delimiter && input.back() != delimiter) || hasInputData(); - - if (input.back() == extender) +#if !defined(ARCADIA_BUILD) /// C++20 + const char * has_extender = nullptr; + for (const auto * extender : extenders) { - input = input.substr(0, input.size() - 1); + if (input.ends_with(extender)) + { + has_extender = extender; + break; + } + } + + const char * has_delimiter = nullptr; + for (const auto * delimiter : delimiters) + { + if (input.ends_with(delimiter)) + { + has_delimiter = delimiter; + break; + } + } + + need_next_line = has_extender || (multiline && !has_delimiter) || hasInputData(); + + if (has_extender) + { + input.resize(input.size() - strlen(has_extender)); trim(input); if (input.empty()) continue; } +#endif line += (line.empty() ? "" : " ") + input; - if (!is_multiline) + if (!need_next_line) break; } diff --git a/base/common/LineReader.h b/base/common/LineReader.h index 3e64bc858ad..f31459078ab 100644 --- a/base/common/LineReader.h +++ b/base/common/LineReader.h @@ -21,7 +21,9 @@ public: WordsRange getCompletions(const String & prefix, size_t prefix_length) const; }; - LineReader(const String & history_file_path, char extender, char delimiter = 0); /// if delimiter != 0, then it's multiline mode + using Patterns = std::vector; + + LineReader(const String & history_file_path, bool multiline, Patterns extenders, Patterns delimiters); virtual ~LineReader() {} /// Reads the whole line until delimiter (in multiline mode) or until the last line without extender. @@ -51,8 +53,10 @@ protected: String input; private: - const char extender; - const char delimiter; + bool multiline; + + Patterns extenders; + Patterns delimiters; String prev_line; diff --git a/base/common/ReadlineLineReader.cpp b/base/common/ReadlineLineReader.cpp index ee9a37d2168..d52ac0e9769 100644 --- a/base/common/ReadlineLineReader.cpp +++ b/base/common/ReadlineLineReader.cpp @@ -56,8 +56,9 @@ static char * generate(const char * text, int state) return nextMatch(); }; -ReadlineLineReader::ReadlineLineReader(const Suggest & suggest_, const String & history_file_path_, char extender_, char delimiter_) - : LineReader(history_file_path_, extender_, delimiter_) +ReadlineLineReader::ReadlineLineReader( + const Suggest & suggest_, const String & history_file_path_, bool multiline_, Patterns extenders_, Patterns delimiters_) + : LineReader(history_file_path_, multiline_, std::move(extenders_), std::move(delimiters_)) { suggest = &suggest_; diff --git a/base/common/ReadlineLineReader.h b/base/common/ReadlineLineReader.h index 395ae56c724..95bd23b4634 100644 --- a/base/common/ReadlineLineReader.h +++ b/base/common/ReadlineLineReader.h @@ -8,7 +8,7 @@ class ReadlineLineReader : public LineReader { public: - ReadlineLineReader(const Suggest & suggest, const String & history_file_path, char extender, char delimiter = 0); + ReadlineLineReader(const Suggest & suggest, const String & history_file_path, bool multiline, Patterns extenders_, Patterns delimiters_); ~ReadlineLineReader() override; void enableBracketedPaste() override; diff --git a/base/common/ReplxxLineReader.cpp b/base/common/ReplxxLineReader.cpp index 52c42235f1b..141237d5d94 100644 --- a/base/common/ReplxxLineReader.cpp +++ b/base/common/ReplxxLineReader.cpp @@ -16,8 +16,9 @@ void trim(String & s) } -ReplxxLineReader::ReplxxLineReader(const Suggest & suggest, const String & history_file_path_, char extender_, char delimiter_) - : LineReader(history_file_path_, extender_, delimiter_) +ReplxxLineReader::ReplxxLineReader( + const Suggest & suggest, const String & history_file_path_, bool multiline_, Patterns extenders_, Patterns delimiters_) + : LineReader(history_file_path_, multiline_, std::move(extenders_), std::move(delimiters_)) { using namespace std::placeholders; using Replxx = replxx::Replxx; diff --git a/base/common/ReplxxLineReader.h b/base/common/ReplxxLineReader.h index e7821f54ad3..472198bcfaf 100644 --- a/base/common/ReplxxLineReader.h +++ b/base/common/ReplxxLineReader.h @@ -7,7 +7,7 @@ class ReplxxLineReader : public LineReader { public: - ReplxxLineReader(const Suggest & suggest, const String & history_file_path, char extender, char delimiter = 0); + ReplxxLineReader(const Suggest & suggest, const String & history_file_path, bool multiline, Patterns extenders_, Patterns delimiters_); ~ReplxxLineReader() override; void enableBracketedPaste() override; diff --git a/base/common/phdr_cache.cpp b/base/common/phdr_cache.cpp index a5cb466f425..f362fb64285 100644 --- a/base/common/phdr_cache.cpp +++ b/base/common/phdr_cache.cpp @@ -1,20 +1,6 @@ /// This code was based on the code by Fedor Korotkiy (prime@yandex-team.ru) for YT product in Yandex. -#if defined(__has_feature) - #if __has_feature(address_sanitizer) - #define ADDRESS_SANITIZER 1 - #endif - #if __has_feature(thread_sanitizer) - #define THREAD_SANITIZER 1 - #endif -#else - #if defined(__SANITIZE_ADDRESS__) - #define ADDRESS_SANITIZER 1 - #endif - #if defined(__SANITIZE_THREAD__) - #define THREAD_SANITIZER 1 - #endif -#endif +#include #if defined(__linux__) && !defined(THREAD_SANITIZER) #define USE_PHDR_CACHE 1 diff --git a/docs/en/engines/table-engines/integrations/kafka.md b/docs/en/engines/table-engines/integrations/kafka.md index d7824443c1d..97d5333b0ad 100644 --- a/docs/en/engines/table-engines/integrations/kafka.md +++ b/docs/en/engines/table-engines/integrations/kafka.md @@ -174,5 +174,6 @@ For a list of possible configuration options, see the [librdkafka configuration **See Also** - [Virtual columns](../index.md#table_engines-virtual_columns) +- [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size) [Original article](https://clickhouse.tech/docs/en/operations/table_engines/kafka/) diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index 20710941a67..5826adb7e9c 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -623,6 +623,8 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd' The `default` storage policy implies using only one volume, which consists of only one disk given in ``. Once a table is created, its storage policy cannot be changed. +The number of threads performing background moves of data parts can be changed by [background_move_pool_size](../../../operations/settings/settings.md#background_move_pool_size) setting. + ### Details {#details} In the case of `MergeTree` tables, data is getting to disk in different ways: @@ -650,3 +652,4 @@ After the completion of background merges and mutations, old parts are removed o During this time, they are not moved to other volumes or disks. Therefore, until the parts are finally removed, they are still taken into account for evaluation of the occupied disk space. [Original article](https://clickhouse.tech/docs/ru/operations/table_engines/mergetree/) + diff --git a/docs/en/engines/table-engines/mergetree-family/replication.md b/docs/en/engines/table-engines/mergetree-family/replication.md index d813f76d127..aa4e2e87d95 100644 --- a/docs/en/engines/table-engines/mergetree-family/replication.md +++ b/docs/en/engines/table-engines/mergetree-family/replication.md @@ -63,7 +63,7 @@ For each `INSERT` query, approximately ten entries are added to ZooKeeper throug For very large clusters, you can use different ZooKeeper clusters for different shards. However, this hasn’t proven necessary on the Yandex.Metrica cluster (approximately 300 servers). -Replication is asynchronous and multi-master. `INSERT` queries (as well as `ALTER`) can be sent to any available server. Data is inserted on the server where the query is run, and then it is copied to the other servers. Because it is asynchronous, recently inserted data appears on the other replicas with some latency. If part of the replicas are not available, the data is written when they become available. If a replica is available, the latency is the amount of time it takes to transfer the block of compressed data over the network. +Replication is asynchronous and multi-master. `INSERT` queries (as well as `ALTER`) can be sent to any available server. Data is inserted on the server where the query is run, and then it is copied to the other servers. Because it is asynchronous, recently inserted data appears on the other replicas with some latency. If part of the replicas are not available, the data is written when they become available. If a replica is available, the latency is the amount of time it takes to transfer the block of compressed data over the network. The number of threads performing background tasks for replicated tables can be set by [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size) setting. By default, an INSERT query waits for confirmation of writing the data from only one replica. If the data was successfully written to only one replica and the server with this replica ceases to exist, the stored data will be lost. To enable getting confirmation of data writes from multiple replicas, use the `insert_quorum` option. @@ -215,4 +215,8 @@ After this, you can launch the server, create a `MergeTree` table, move the data If the data in ZooKeeper was lost or damaged, you can save data by moving it to an unreplicated table as described above. +**See also** + +- [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size) + [Original article](https://clickhouse.tech/docs/en/operations/table_engines/replication/) diff --git a/docs/en/engines/table-engines/special/distributed.md b/docs/en/engines/table-engines/special/distributed.md index 98ca70134b5..bc2a4dc483d 100644 --- a/docs/en/engines/table-engines/special/distributed.md +++ b/docs/en/engines/table-engines/special/distributed.md @@ -130,7 +130,7 @@ You should be concerned about the sharding scheme in the following cases: - Queries are used that require joining data (IN or JOIN) by a specific key. If data is sharded by this key, you can use local IN or JOIN instead of GLOBAL IN or GLOBAL JOIN, which is much more efficient. - A large number of servers is used (hundreds or more) with a large number of small queries (queries of individual clients - websites, advertisers, or partners). In order for the small queries to not affect the entire cluster, it makes sense to locate data for a single client on a single shard. Alternatively, as we’ve done in Yandex.Metrica, you can set up bi-level sharding: divide the entire cluster into “layers”, where a layer may consist of multiple shards. Data for a single client is located on a single layer, but shards can be added to a layer as necessary, and data is randomly distributed within them. Distributed tables are created for each layer, and a single shared distributed table is created for global queries. -Data is written asynchronously. When inserted in the table, the data block is just written to the local file system. The data is sent to the remote servers in the background as soon as possible. The period for sending data is managed by the [distributed\_directory\_monitor\_sleep\_time\_ms](../../../operations/settings/settings.md#distributed_directory_monitor_sleep_time_ms) and [distributed\_directory\_monitor\_max\_sleep\_time\_ms](../../../operations/settings/settings.md#distributed_directory_monitor_max_sleep_time_ms) settings. The `Distributed` engine sends each file with inserted data separately, but you can enable batch sending of files with the [distributed\_directory\_monitor\_batch\_inserts](../../../operations/settings/settings.md#distributed_directory_monitor_batch_inserts) setting. This setting improves cluster performance by better utilizing local server and network resources. You should check whether data is sent successfully by checking the list of files (data waiting to be sent) in the table directory: `/var/lib/clickhouse/data/database/table/`. +Data is written asynchronously. When inserted in the table, the data block is just written to the local file system. The data is sent to the remote servers in the background as soon as possible. The period for sending data is managed by the [distributed\_directory\_monitor\_sleep\_time\_ms](../../../operations/settings/settings.md#distributed_directory_monitor_sleep_time_ms) and [distributed\_directory\_monitor\_max\_sleep\_time\_ms](../../../operations/settings/settings.md#distributed_directory_monitor_max_sleep_time_ms) settings. The `Distributed` engine sends each file with inserted data separately, but you can enable batch sending of files with the [distributed\_directory\_monitor\_batch\_inserts](../../../operations/settings/settings.md#distributed_directory_monitor_batch_inserts) setting. This setting improves cluster performance by better utilizing local server and network resources. You should check whether data is sent successfully by checking the list of files (data waiting to be sent) in the table directory: `/var/lib/clickhouse/data/database/table/`. The number of threads performing background tasks can be set by [background_distributed_schedule_pool_size](../../../operations/settings/settings.md#background_distributed_schedule_pool_size) setting. If the server ceased to exist or had a rough restart (for example, after a device failure) after an INSERT to a Distributed table, the inserted data might be lost. If a damaged data part is detected in the table directory, it is transferred to the ‘broken’ subdirectory and no longer used. @@ -146,5 +146,6 @@ When the max\_parallel\_replicas option is enabled, query processing is parallel **See Also** - [Virtual columns](index.md#table_engines-virtual_columns) +- [background_distributed_schedule_pool_size](../../../operations/settings/settings.md#background_distributed_schedule_pool_size) [Original article](https://clickhouse.tech/docs/en/operations/table_engines/distributed/) diff --git a/docs/en/introduction/adopters.md b/docs/en/introduction/adopters.md index 4daadf32be6..081f963f74f 100644 --- a/docs/en/introduction/adopters.md +++ b/docs/en/introduction/adopters.md @@ -57,7 +57,7 @@ toc_title: Adopters | [S7 Airlines](https://www.s7.ru){.favicon} | Airlines | Metrics, Logging | — | — | [Talk in Russian, March 2019](https://www.youtube.com/watch?v=nwG68klRpPg&t=15s) | | [SEMrush](https://www.semrush.com/){.favicon} | Marketing | Main product | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/5_semrush.pdf) | | [scireum GmbH](https://www.scireum.de/){.favicon} | e-Commerce | Main product | — | — | [Talk in German, February 2020](https://www.youtube.com/watch?v=7QWAn5RbyR4) | -| [Sentry](https://sentry.io/){.favicon} | Software developer | Backend for product | — | — | [Blog Post in English, May 2019](https://blog.sentry.io/2019/05/16/introducing-snuba-sentrys-new-search-infrastructure) | +| [Sentry](https://sentry.io/){.favicon} | Software Development | Main product | — | — | [Blog Post in English, May 2019](https://blog.sentry.io/2019/05/16/introducing-snuba-sentrys-new-search-infrastructure) | | [SGK](http://www.sgk.gov.tr/wps/portal/sgk/tr){.favicon} | Goverment Social Security | Analytics | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) | | [seo.do](https://seo.do/){.favicon} | Analytics | Main product | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/CH%20Presentation-%20Metehan%20Çetinkaya.pdf) | | [Sina](http://english.sina.com/index.html){.favicon} | News | — | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) | diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 5961c701283..b90b432da6c 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -892,6 +892,9 @@ The update is performed asynchronously, in a separate system thread. **Default value**: 15. +**See also** + +- [background_schedule_pool_size](../settings/settings.md#background_schedule_pool_size) ## access_control_path {#access_control_path} diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 880f0ffedb1..f29866d4980 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -433,6 +433,18 @@ Possible values: Default value: 65536. +## join_on_disk_max_files_to_merge {#join_on_disk_max_files_to_merge} + +Limits the number of files allowed for parallel sorting in MergeJoin operations when they are executed on disk. + +The bigger the value of the setting, the more RAM used and the less disk I/O needed. + +Possible values: + +- Any positive integer, starting from 2. + +Default value: 64. + ## any_join_distinct_right_table_keys {#any_join_distinct_right_table_keys} Enables legacy ClickHouse server behavior in `ANY INNER|LEFT JOIN` operations. @@ -463,6 +475,18 @@ See also: - [JOIN strictness](../../sql-reference/statements/select/join.md#select-join-strictness) +## temporary_files_codec {#temporary_files_codec} + +Sets compression codec for temporary files used in sorting and joining operations on disk. + +Possible values: + +- LZ4 — [LZ4](https://en.wikipedia.org/wiki/LZ4_(compression_algorithm)) compression is applied. +- NONE — No compression is applied. + +Default value: LZ4. + + ## max\_block\_size {#setting-max_block_size} In ClickHouse, data is processed by blocks (sets of column parts). The internal processing cycles for a single block are efficient enough, but there are noticeable expenditures on each block. The `max_block_size` setting is a recommendation for what size of the block (in a count of rows) to load from tables. The block size shouldn’t be too small, so that the expenditures on each block are still noticeable, but not too large so that the query with LIMIT that is completed after the first block is processed quickly. The goal is to avoid consuming too much memory when extracting a large number of columns in multiple threads and to preserve at least some cache locality. @@ -1312,7 +1336,7 @@ Type: URL Default value: Empty -## background\_pool\_size {#background_pool_size} +## background_pool_size {#background_pool_size} Sets the number of threads performing background operations in table engines (for example, merges in [MergeTree engine](../../engines/table-engines/mergetree-family/index.md) tables). This setting is applied from `default` profile at ClickHouse server start and can’t be changed in a user session. By adjusting this setting, you manage CPU and disk load. Smaller pool size utilizes less CPU and disk resources, but background processes advance slower which might eventually impact query performance. @@ -1324,6 +1348,46 @@ Possible values: Default value: 16. +## background_buffer_flush_schedule_pool_size {#background_buffer_flush_schedule_pool_size} + +Sets the number of threads performing background flush in [Buffer](../../engines/table-engines/special/buffer.md)-engine tables. This setting is applied at ClickHouse server start and can't be changed in a user session. + +Possible values: + +- Any positive integer. + +Default value: 16. + +## background_move_pool_size {#background_move_pool_size} + +Sets the number of threads performing background moves of data parts for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes)-engine tables. This setting is applied at ClickHouse server start and can’t be changed in a user session. + +Possible values: + +- Any positive integer. + +Default value: 8. + +## background_schedule_pool_size {#background_schedule_pool_size} + +Sets the number of threads performing background tasks for [replicated](../../engines/table-engines/mergetree-family/replication.md) tables, [Kafka](../../engines/table-engines/integrations/kafka.md) streaming, [DNS cache updates](../server-configuration-parameters/settings.md#server-settings-dns-cache-update-period). This setting is applied at ClickHouse server start and can’t be changed in a user session. + +Possible values: + +- Any positive integer. + +Default value: 16. + +## background_distributed_schedule_pool_size {#background_distributed_schedule_pool_size} + +Sets the number of threads performing background tasks for [distributed](../../engines/table-engines/special/distributed.md) sends. This setting is applied at ClickHouse server start and can’t be changed in a user session. + +Possible values: + +- Any positive integer. + +Default value: 16. + ## low_cardinality_max_dictionary_size {#low_cardinality_max_dictionary_size} Sets a maximum size in rows of a shared global dictionary for the [LowCardinality](../../sql-reference/data-types/lowcardinality.md) data type that can be written to a storage file system. This setting prevents issues with RAM in case of unlimited dictionary growth. All the data that can't be encoded due to maximum dictionary size limitation ClickHouse writes in an ordinary method. @@ -1382,5 +1446,4 @@ Possible values: Default value: 0. - [Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) diff --git a/docs/en/operations/system-tables.md b/docs/en/operations/system-tables.md index f364d4e3068..d3d58834e60 100644 --- a/docs/en/operations/system-tables.md +++ b/docs/en/operations/system-tables.md @@ -176,6 +176,41 @@ This table contains a single String column called ‘name’ – the name of a d Each database that the server knows about has a corresponding entry in the table. This system table is used for implementing the `SHOW DATABASES` query. +## system.data_type_families {#system_tables-data_type_families} + +Contains information about supported [data types](../sql-reference/data-types/). + +Columns: + +- `name` ([String](../sql-reference/data-types/string.md)) — Data type name. +- `case_insensitive` ([UInt8](../sql-reference/data-types/int-uint.md)) — Property that shows whether you can use a data type name in a query in case insensitive manner or not. For example, `Date` and `date` are both valid. +- `alias_to` ([String](../sql-reference/data-types/string.md)) — Data type name for which `name` is an alias. + +**Example** + +``` sql +SELECT * FROM system.data_type_families WHERE alias_to = 'String' +``` + +``` text +┌─name───────┬─case_insensitive─┬─alias_to─┐ +│ LONGBLOB │ 1 │ String │ +│ LONGTEXT │ 1 │ String │ +│ TINYTEXT │ 1 │ String │ +│ TEXT │ 1 │ String │ +│ VARCHAR │ 1 │ String │ +│ MEDIUMBLOB │ 1 │ String │ +│ BLOB │ 1 │ String │ +│ TINYBLOB │ 1 │ String │ +│ CHAR │ 1 │ String │ +│ MEDIUMTEXT │ 1 │ String │ +└────────────┴──────────────────┴──────────┘ +``` + +**See Also** + +- [Syntax](../sql-reference/syntax.md) — Information about supported syntax. + ## system.detached\_parts {#system_tables-detached_parts} Contains information about detached parts of [MergeTree](../engines/table-engines/mergetree-family/mergetree.md) tables. The `reason` column specifies why the part was detached. For user-detached parts, the reason is empty. Such parts can be attached with [ALTER TABLE ATTACH PARTITION\|PART](../sql-reference/statements/alter.md#alter_attach-partition) command. For the description of other columns, see [system.parts](#system_tables-parts). If part name is invalid, values of some columns may be `NULL`. Such parts can be deleted with [ALTER TABLE DROP DETACHED PART](../sql-reference/statements/alter.md#alter_drop-detached). diff --git a/docs/en/sql-reference/data-types/index.md b/docs/en/sql-reference/data-types/index.md index da507b81acd..63ec877b703 100644 --- a/docs/en/sql-reference/data-types/index.md +++ b/docs/en/sql-reference/data-types/index.md @@ -10,4 +10,6 @@ ClickHouse can store various kinds of data in table cells. This section describes the supported data types and special considerations for using and/or implementing them if any. +You can check whether data type name is case-sensitive in the [system.data_type_families](../../operations/system-tables.md#system_tables-data_type_families) table. + [Original article](https://clickhouse.tech/docs/en/data_types/) diff --git a/docs/en/sql-reference/functions/random-functions.md b/docs/en/sql-reference/functions/random-functions.md index 788ece6371b..05fb982138c 100644 --- a/docs/en/sql-reference/functions/random-functions.md +++ b/docs/en/sql-reference/functions/random-functions.md @@ -60,4 +60,43 @@ Result: └────────────┴────────────┴──────────────┴────────────────┴─────────────────┴──────────────────────┘ ``` +# Random functions for working with strings {#random-functions-for-working-with-strings} + +## randomString {#random-string} + +## randomFixedString {#random-fixed-string} + +## randomPrintableASCII {#random-printable-ascii} + +## randomStringUTF8 {#random-string-utf8} + +## fuzzBits {#fuzzbits} + +**Syntax** + +``` sql +fuzzBits([s], [prob]) +``` +Inverts bits of `s`, each with probability `prob`. + +**Parameters** +- `s` - `String` or `FixedString` +- `prob` - constant `Float32/64` + +**Returned value** +Fuzzed string with same as s type. + +**Example** + +``` sql +SELECT fuzzBits(materialize('abacaba'), 0.1) +FROM numbers(3) +``` +``` text +┌─fuzzBits(materialize('abacaba'), 0.1)─┐ +│ abaaaja │ +│ a*cjab+ │ +│ aeca2A │ +└───────────────────────────────────────┘ + [Original article](https://clickhouse.tech/docs/en/query_language/functions/random_functions/) diff --git a/docs/en/sql-reference/syntax.md b/docs/en/sql-reference/syntax.md index 70db90b38be..cca01114681 100644 --- a/docs/en/sql-reference/syntax.md +++ b/docs/en/sql-reference/syntax.md @@ -40,7 +40,7 @@ Keywords are case-insensitive when they correspond to: - SQL standard. For example, `SELECT`, `select` and `SeLeCt` are all valid. - Implementation in some popular DBMS (MySQL or Postgres). For example, `DateTime` is the same as `datetime`. -Whether data type name is case-sensitive can be checked in the `system.data_type_families` table. +You can check whether a data type name is case-sensitive in the [system.data_type_families](../operations/system-tables.md#system_tables-data_type_families) table. In contrast to standard SQL, all other keywords (including functions names) are **case-sensitive**. diff --git a/docs/ru/development/style.md b/docs/ru/development/style.md index b7fff2753dc..b948988d699 100644 --- a/docs/ru/development/style.md +++ b/docs/ru/development/style.md @@ -688,10 +688,77 @@ auto s = std::string{"Hello"}; ## Неиспользуемые возможности языка C++ {#neispolzuemye-vozmozhnosti-iazyka-c} -**1.** Виртуальное наследование не используется. - **2.** Спецификаторы исключений из C++03 не используются. +## Сообщения об ошибках {#error-messages} + +Сообщения об ошибках -- это часть пользовательского интерфейса программы, предназначенная для того, чтобы позволить пользователю: +* замечать ошибочные ситуации, +* понимать их смысл и причины, +* устранять эти ситуации. + +Форма и содержание сообщений об ошибках должны способствовать достижению этих целей. + +Есть два основных вида ошибок: +* пользовательская или системная ошибка, +* внутренняя программная ошибка. + +### Пользовательская ошибка {#error-messages-user-error} + +Такая ошибка вызвана действиями пользователя (неверный синтаксис запроса) или конфигурацией внешних систем (кончилось место на диске). Предполагается, что пользователь может устранить её самостоятельно. Для этого в сообщении об ошибке должна содержаться следующая информация: +* что произошло. Это должно объясняться в пользовательских терминах (`Function pow() is not supported for data type UInt128`), а не загадочными конструкциями из кода (`runtime overload resolution failed in DB::BinaryOperationBuilder::Impl, UInt128, Int8>::kaboongleFastPath()`). +* почему/где/когда -- любой контекст, который помогает отладить проблему. Представьте, как бы её отлаживали вы (программировать и пользоваться отладчиком нельзя). +* что можно предпринять для устранения ошибки. Здесь можно перечислить типичные причины проблемы, настройки, влияющие на это поведение, и так далее. + +Пример нормального сообщения: +``` +No alias for subquery or table function in JOIN (set joined_subquery_requires_alias=0 to disable restriction). +While processing '(SELECT 2 AS a)'. +``` +Сказано что не хватает алиаса, показано, для какой части запроса, и предложена настройка, позволяющая ослабить это требование. + +Пример катастрофически плохого сообщения: +``` +The dictionary is configured incorrectly. +``` +Из него не понятно: +- какой словарь? +- в чём ошибка конфигурации? + +Что может сделать пользователь в такой ситуации: применять внешние отладочные инструменты, спрашивать совета на форумах, гадать на кофейной гуще, и, конечно же, ненавидеть софт, который над ним так издевается. Не нужно издеваться над пользователями, это плохой UX. + + +### Внутренняя программная ошибка {#error-messages-internal-error} + +Такая ошибка вызвана нарушением внутренних инвариантов программы: например, внутренняя функция вызвана с неверными параметрами, не совпадают размеры колонок в блоке, произошло разыменование нулевого указателя, и так далее. Сигналы типа `SIGSEGV` относятся к этой же категории. + +Появление такой ошибки всегда свидетельствует о наличии бага в программе. Пользователь не может исправить такую ошибку самостоятельно, и должен сообщить о ней разработчикам. + +Есть два основных варианта проверки на такие ошибки: +* Исключение с кодом `LOGICAL_ERROR`. Его можно использовать для важных проверок, которые делаются в том числе в релизной сборке. +* `assert`. Такие условия не проверяются в релизной сборке, можно использовать для тяжёлых и опциональных проверок. + +Пример сообщения, у которого должен быть код `LOGICAL_ERROR`: +`Block header is inconsistent with Chunk in ICompicatedProcessor::munge(). It is a bug!` +По каким признакам можно заметить, что здесь говорится о внутренней программной ошибке? +* в сообщении упоминаются внутренние сущности из кода, +* в сообщении написано it's a bug, +* непосредственные действия пользователя не могут исправить эту ошибку. Мы ожидаем, что пользователь зарепортит её как баг, и будем исправлять в коде. + +### Как выбрать код ошибки? {#error-messages-choose} + +Код ошибки предназначен для автоматической обработки некоторых видов ошибок, подобно кодам HTTP. SQL стандартизирует некоторые коды, но на деле ClickHouse не всегда соответствует этим стандартам. Лучше всего выбрать существующий код из `ErrorCodes.cpp`, который больше всего подходит по смыслу. Можно использовать общие коды типа `BAD_ARGUMENTS` или `TYPE_MISMATCH`. Заводить новый код нужно, только если вы чётко понимаете, что вам нужна специальная автоматическая обработка конкретно этой ошибки на клиенте. Для внутренних программных ошибок используется код `LOGICAL_ERROR`. + +### Как добавить новое сообщение об ошибке? {#error-messages-add} + +Когда добавляете сообщение об ошибке: +1. Опишите, что произошло, в пользовательских терминах, а не кусками кода. +2. Добавьте максимум контекста (с чем произошло, когда, почему, и т.д.). +3. Добавьте типичные причины. +4. Добавьте варианты исправления (настройки, ссылки на документацию). +5. Вообразите дальнейшие действия пользователя. Ваше сообщение должно помочь ему решить проблему без использования отладочных инструментов и без чужой помощи. +6. Если сообщение об ошибке не формулируется в пользовательских терминах, и действия пользователя не могут исправить проблему -- это внутренняя программная ошибка, используйте код LOGICAL_ERROR или assert. + ## Платформа {#platforma} **1.** Мы пишем код под конкретные платформы. diff --git a/docs/ru/engines/table-engines/integrations/kafka.md b/docs/ru/engines/table-engines/integrations/kafka.md index e2940ccfb56..8a79c8556b8 100644 --- a/docs/ru/engines/table-engines/integrations/kafka.md +++ b/docs/ru/engines/table-engines/integrations/kafka.md @@ -165,5 +165,6 @@ Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format **Смотрите также** - [Виртуальные столбцы](index.md#table_engines-virtual_columns) +- [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size) [Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/kafka/) diff --git a/docs/ru/engines/table-engines/mergetree-family/mergetree.md b/docs/ru/engines/table-engines/mergetree-family/mergetree.md index fe7b7627c01..f9192015188 100644 --- a/docs/ru/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/mergetree.md @@ -589,6 +589,8 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd' По умолчанию используется политика хранения `default` в которой есть один том и один диск, указанный в ``. В данный момент менять политику хранения после создания таблицы нельзя. +Количество потоков для фоновых перемещений кусков между дисками можно изменить с помощью настройки [background_move_pool_size](../../../operations/settings/settings.md#background_move_pool_size) + ### Особенности работы {#osobennosti-raboty} В таблицах `MergeTree` данные попадают на диск несколькими способами: diff --git a/docs/ru/engines/table-engines/mergetree-family/replication.md b/docs/ru/engines/table-engines/mergetree-family/replication.md index 51379c5a8cf..54f390c293e 100644 --- a/docs/ru/engines/table-engines/mergetree-family/replication.md +++ b/docs/ru/engines/table-engines/mergetree-family/replication.md @@ -58,7 +58,7 @@ ClickHouse хранит метаинформацию о репликах в [Apa Для очень больших кластеров, можно использовать разные кластеры ZooKeeper для разных шардов. Впрочем, на кластере Яндекс.Метрики (примерно 300 серверов) такой необходимости не возникает. -Репликация асинхронная, мульти-мастер. Запросы `INSERT` и `ALTER` можно направлять на любой доступный сервер. Данные вставятся на сервер, где выполнен запрос, а затем скопируются на остальные серверы. В связи с асинхронностью, только что вставленные данные появляются на остальных репликах с небольшой задержкой. Если часть реплик недоступна, данные на них запишутся тогда, когда они станут доступны. Если реплика доступна, то задержка составляет столько времени, сколько требуется для передачи блока сжатых данных по сети. +Репликация асинхронная, мульти-мастер. Запросы `INSERT` и `ALTER` можно направлять на любой доступный сервер. Данные вставятся на сервер, где выполнен запрос, а затем скопируются на остальные серверы. В связи с асинхронностью, только что вставленные данные появляются на остальных репликах с небольшой задержкой. Если часть реплик недоступна, данные на них запишутся тогда, когда они станут доступны. Если реплика доступна, то задержка составляет столько времени, сколько требуется для передачи блока сжатых данных по сети. Количество потоков для выполнения фоновых задач можно задать с помощью настройки [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size). По умолчанию, запрос INSERT ждёт подтверждения записи только от одной реплики. Если данные были успешно записаны только на одну реплику, и сервер с этой репликой перестал существовать, то записанные данные будут потеряны. Вы можете включить подтверждение записи от нескольких реплик, используя настройку `insert_quorum`. diff --git a/docs/ru/engines/table-engines/special/distributed.md b/docs/ru/engines/table-engines/special/distributed.md index cd92332b07e..68753a7c217 100644 --- a/docs/ru/engines/table-engines/special/distributed.md +++ b/docs/ru/engines/table-engines/special/distributed.md @@ -107,7 +107,7 @@ logs - имя кластера в конфигурационном файле с - используются запросы, требующие соединение данных (IN, JOIN) по определённому ключу - тогда если данные шардированы по этому ключу, то можно использовать локальные IN, JOIN вместо GLOBAL IN, GLOBAL JOIN, что кардинально более эффективно. - используется большое количество серверов (сотни и больше) и большое количество маленьких запросов (запросы отдельных клиентов - сайтов, рекламодателей, партнёров) - тогда, для того, чтобы маленькие запросы не затрагивали весь кластер, имеет смысл располагать данные одного клиента на одном шарде, или (вариант, который используется в Яндекс.Метрике) сделать двухуровневое шардирование: разбить весь кластер на «слои», где слой может состоять из нескольких шардов; данные для одного клиента располагаются на одном слое, но в один слой можно по мере необходимости добавлять шарды, в рамках которых данные распределены произвольным образом; создаются распределённые таблицы на каждый слой и одна общая распределённая таблица для глобальных запросов. -Запись данных осуществляется полностью асинхронно. При вставке в таблицу, блок данных сначала записывается в файловую систему. Затем, в фоновом режиме отправляются на удалённые серверы при первой возможности. Период отправки регулируется настройками [distributed\_directory\_monitor\_sleep\_time\_ms](../../../operations/settings/settings.md#distributed_directory_monitor_sleep_time_ms) и [distributed\_directory\_monitor\_max\_sleep\_time\_ms](../../../operations/settings/settings.md#distributed_directory_monitor_max_sleep_time_ms). Движок таблиц `Distributed` отправляет каждый файл со вставленными данными отдельно, но можно включить пакетную отправку данных настройкой [distributed\_directory\_monitor\_batch\_inserts](../../../operations/settings/settings.md#distributed_directory_monitor_batch_inserts). Эта настройка улучшает производительность кластера за счет более оптимального использования ресурсов сервера-отправителя и сети. Необходимо проверять, что данные отправлены успешно, для этого проверьте список файлов (данных, ожидающих отправки) в каталоге таблицы `/var/lib/clickhouse/data/database/table/`. +Запись данных осуществляется полностью асинхронно. При вставке в таблицу, блок данных сначала записывается в файловую систему. Затем, в фоновом режиме отправляются на удалённые серверы при первой возможности. Период отправки регулируется настройками [distributed\_directory\_monitor\_sleep\_time\_ms](../../../operations/settings/settings.md#distributed_directory_monitor_sleep_time_ms) и [distributed\_directory\_monitor\_max\_sleep\_time\_ms](../../../operations/settings/settings.md#distributed_directory_monitor_max_sleep_time_ms). Движок таблиц `Distributed` отправляет каждый файл со вставленными данными отдельно, но можно включить пакетную отправку данных настройкой [distributed\_directory\_monitor\_batch\_inserts](../../../operations/settings/settings.md#distributed_directory_monitor_batch_inserts). Эта настройка улучшает производительность кластера за счет более оптимального использования ресурсов сервера-отправителя и сети. Необходимо проверять, что данные отправлены успешно, для этого проверьте список файлов (данных, ожидающих отправки) в каталоге таблицы `/var/lib/clickhouse/data/database/table/`. Количество потоков для выполнения фоновых задач можно задать с помощью настройки [background_distributed_schedule_pool_size](../../../operations/settings/settings.md#background_distributed_schedule_pool_size). Если после INSERT-а в Distributed таблицу, сервер перестал существовать или был грубо перезапущен (например, в следствие аппаратного сбоя), то записанные данные могут быть потеряны. Если в директории таблицы обнаружен повреждённый кусок данных, то он переносится в поддиректорию broken и больше не используется. diff --git a/docs/ru/interfaces/formats.md b/docs/ru/interfaces/formats.md index 75e87a34f88..822f5543f9b 100644 --- a/docs/ru/interfaces/formats.md +++ b/docs/ru/interfaces/formats.md @@ -525,7 +525,7 @@ CREATE TABLE IF NOT EXISTS example_table - Если `input_format_defaults_for_omitted_fields = 1`, то значение по умолчанию для `x` равно `0`, а значение по умолчанию `a` равно `x * 2`. !!! note "Предупреждение" - Если `insert_sample_with_metadata = 1`, то при обработке запросов ClickHouse потребляет больше вычислительных ресурсов, чем если `insert_sample_with_metadata = 0`. + Если `input_format_defaults_for_omitted_fields = 1`, то при обработке запросов ClickHouse потребляет больше вычислительных ресурсов, чем если `input_format_defaults_for_omitted_fields = 0`. ### Выборка данных {#vyborka-dannykh} diff --git a/docs/ru/operations/server-configuration-parameters/settings.md b/docs/ru/operations/server-configuration-parameters/settings.md index ccde7a945ac..e3c1629a46a 100644 --- a/docs/ru/operations/server-configuration-parameters/settings.md +++ b/docs/ru/operations/server-configuration-parameters/settings.md @@ -843,6 +843,10 @@ ClickHouse использует ZooKeeper для хранения метадан **Значение по умолчанию**: 15. +**Смотрите также** + +- [background_schedule_pool_size](../settings/settings.md#background_schedule_pool_size) + ## access_control_path {#access_control_path} Путь к каталогу, где сервер ClickHouse хранит конфигурации пользователей и ролей, созданные командами SQL. diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 4dd43e9607b..5e34affcaac 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -1181,4 +1181,44 @@ Default value: 0. Значение по умолчанию: 16. +## background_buffer_flush_schedule_pool_size {#background_buffer_flush_schedule_pool_size} + +Задает количество потоков для выполнения фонового сброса данных в таблицах с движком [Buffer](../../engines/table-engines/special/buffer.md). Настройка применяется при запуске сервера ClickHouse и не может быть изменена в пользовательском сеансе. + +Допустимые значения: + +- Положительное целое число. + +Значение по умолчанию: 16. + +## background_move_pool_size {#background_move_pool_size} + +Задает количество потоков для фоновых перемещений кусков между дисками. Работает для таблиц с движком [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes). Настройка применяется при запуске сервера ClickHouse и не может быть изменена в пользовательском сеансе. + +Допустимые значения: + +- Положительное целое число. + +Значение по умолчанию: 8. + +## background_schedule_pool_size {#background_schedule_pool_size} + +Задает количество потоков для выполнения фоновых задач. Работает для [реплицируемых](../../engines/table-engines/mergetree-family/replication.md) таблиц, стримов в [Kafka](../../engines/table-engines/integrations/kafka.md) и обновления IP адресов у записей во внутреннем [DNS кеше](../server-configuration-parameters/settings.md#server-settings-dns-cache-update-period). Настройка применяется при запуске сервера ClickHouse и не может быть изменена в пользовательском сеансе. + +Допустимые значения: + +- Положительное целое число. + +Значение по умолчанию: 16. + +## background_distributed_schedule_pool_size {#background_distributed_schedule_pool_size} + +Задает количество потоков для выполнения фоновых задач. Работает для таблиц с движком [Distributed](../../engines/table-engines/special/distributed.md). Настройка применяется при запуске сервера ClickHouse и не может быть изменена в пользовательском сеансе. + +Допустимые значения: + +- Положительное целое число. + +Значение по умолчанию: 16. + [Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings/) diff --git a/docs/ru/operations/system-tables.md b/docs/ru/operations/system-tables.md index 8954a484560..38971d6ee99 100644 --- a/docs/ru/operations/system-tables.md +++ b/docs/ru/operations/system-tables.md @@ -132,6 +132,41 @@ SELECT * FROM system.contributors WHERE name='Olga Khvostikova' Для каждой базы данных, о которой знает сервер, будет присутствовать соответствующая запись в таблице. Эта системная таблица используется для реализации запроса `SHOW DATABASES`. +## system.data_type_families {#system_tables-data_type_families} + +Содержит информацию о поддерживаемых [типах данных](../sql-reference/data-types/). + +Столбцы: + +- `name` ([String](../sql-reference/data-types/string.md)) — имя типа данных. +- `case_insensitive` ([UInt8](../sql-reference/data-types/int-uint.md)) — свойство, которое показывает, зависит ли имя типа данных в запросе от регистра. Например, допустимы и `Date`, и `date`. +- `alias_to` ([String](../sql-reference/data-types/string.md)) — тип данных, для которого `name` является алиасом. + +**Пример** + +``` sql +SELECT * FROM system.data_type_families WHERE alias_to = 'String' +``` + +``` text +┌─name───────┬─case_insensitive─┬─alias_to─┐ +│ LONGBLOB │ 1 │ String │ +│ LONGTEXT │ 1 │ String │ +│ TINYTEXT │ 1 │ String │ +│ TEXT │ 1 │ String │ +│ VARCHAR │ 1 │ String │ +│ MEDIUMBLOB │ 1 │ String │ +│ BLOB │ 1 │ String │ +│ TINYBLOB │ 1 │ String │ +│ CHAR │ 1 │ String │ +│ MEDIUMTEXT │ 1 │ String │ +└────────────┴──────────────────┴──────────┘ +``` + +**See Also** + +- [Синтаксис](../sql-reference/syntax.md) — поддерживаемый SQL синтаксис. + ## system.detached\_parts {#system_tables-detached_parts} Содержит информацию об отсоединённых кусках таблиц семейства [MergeTree](../engines/table-engines/mergetree-family/mergetree.md). Столбец `reason` содержит причину, по которой кусок был отсоединён. Для кусов, отсоединённых пользователем, `reason` содержит пустую строку. diff --git a/docs/ru/sql-reference/data-types/index.md b/docs/ru/sql-reference/data-types/index.md index 8b4f28aad3e..65bce13a1a4 100644 --- a/docs/ru/sql-reference/data-types/index.md +++ b/docs/ru/sql-reference/data-types/index.md @@ -8,5 +8,7 @@ toc_title: "\u0412\u0432\u0435\u0434\u0435\u043D\u0438\u0435" ClickHouse может сохранять в ячейках таблиц данные различных типов. +Зависимость имен типов данных от регистра можно проверить в системной таблице [system.data_type_families](../../operations/system-tables.md#system_tables-data_type_families). + Раздел содержит описания поддерживаемых типов данных и специфику их использования и/или реализации, если таковые имеются. [Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/) diff --git a/docs/ru/sql-reference/functions/type-conversion-functions.md b/docs/ru/sql-reference/functions/type-conversion-functions.md index 57cc76b797e..41ded78055c 100644 --- a/docs/ru/sql-reference/functions/type-conversion-functions.md +++ b/docs/ru/sql-reference/functions/type-conversion-functions.md @@ -358,7 +358,7 @@ SELECT toTypeName(CAST(x, 'Nullable(UInt16)')) FROM t_null ## toInterval(Year\|Quarter\|Month\|Week\|Day\|Hour\|Minute\|Second) {#function-tointerval} -Приводит аргумент из числового типа данных к типу данных [IntervalType](../../sql-reference/functions/type-conversion-functions.md). +Приводит аргумент из числового типа данных к типу данных [IntervalType](../../sql-reference/data-types/special-data-types/interval.md). **Синтаксис** diff --git a/docs/ru/sql-reference/syntax.md b/docs/ru/sql-reference/syntax.md index 24ab2be8a16..41a9c012b71 100644 --- a/docs/ru/sql-reference/syntax.md +++ b/docs/ru/sql-reference/syntax.md @@ -33,7 +33,7 @@ INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def') - Стандарту SQL. Например, применение любого из вариантов `SELECT`, `select` или `SeLeCt` не вызовет ошибки. - Реализации в некоторых популярных DBMS (MySQL или Postgres). Например, `DateTime` и `datetime`. -Зависимость от регистра для имён типов данных можно проверить в таблице `system.data_type_families`. +Зависимость от регистра для имён типов данных можно проверить в таблице [system.data_type_families](../operations/system-tables.md#system_tables-data_type_families). В отличие от стандарта SQL, все остальные ключевые слова, включая названия функций зависят от регистра. diff --git a/docs/zh/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md b/docs/zh/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md index 52f3c708126..19caae5e1a1 100644 --- a/docs/zh/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md +++ b/docs/zh/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md @@ -5,7 +5,7 @@ toc_priority: 37 toc_title: "\u7248\u672C\u96C6\u5408\u5728\u65B0\u6811" --- -# 版本集合在新树 {#versionedcollapsingmergetree} +# 版本折叠合并树 {#versionedcollapsingmergetree} 这个引擎: diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 930e55a06f8..1c2e0925c2a 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -498,12 +498,15 @@ private: if (!history_file.empty() && !Poco::File(history_file).exists()) Poco::File(history_file).createFile(); + LineReader::Patterns query_extenders = {"\\"}; + LineReader::Patterns query_delimiters = {";", "\\G"}; + #if USE_REPLXX - ReplxxLineReader lr(Suggest::instance(), history_file, '\\', config().has("multiline") ? ';' : 0); + ReplxxLineReader lr(Suggest::instance(), history_file, config().has("multiline"), query_extenders, query_delimiters); #elif defined(USE_READLINE) && USE_READLINE - ReadlineLineReader lr(Suggest::instance(), history_file, '\\', config().has("multiline") ? ';' : 0); + ReadlineLineReader lr(Suggest::instance(), history_file, config().has("multiline"), query_extenders, query_delimiters); #else - LineReader lr(history_file, '\\', config().has("multiline") ? ';' : 0); + LineReader lr(history_file, config().has("multiline"), query_extenders, query_delimiters); #endif /// Enable bracketed-paste-mode only when multiquery is enabled and multiline is @@ -1178,7 +1181,7 @@ private: break; } - if (!receiveAndProcessPacket()) + if (!receiveAndProcessPacket(cancelled)) break; } @@ -1189,14 +1192,16 @@ private: /// Receive a part of the result, or progress info or an exception and process it. /// Returns true if one should continue receiving packets. - bool receiveAndProcessPacket() + /// Output of result is suppressed if query was cancelled. + bool receiveAndProcessPacket(bool cancelled) { Packet packet = connection->receivePacket(); switch (packet.type) { case Protocol::Server::Data: - onData(packet.block); + if (!cancelled) + onData(packet.block); return true; case Protocol::Server::Progress: @@ -1208,11 +1213,13 @@ private: return true; case Protocol::Server::Totals: - onTotals(packet.block); + if (!cancelled) + onTotals(packet.block); return true; case Protocol::Server::Extremes: - onExtremes(packet.block); + if (!cancelled) + onExtremes(packet.block); return true; case Protocol::Server::Exception: @@ -1304,7 +1311,7 @@ private: while (packet_type && *packet_type == Protocol::Server::Log) { - receiveAndProcessPacket(); + receiveAndProcessPacket(false); packet_type = connection->checkPacket(); } } diff --git a/programs/server/config.xml b/programs/server/config.xml index e16af9d75d7..21605edeb36 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -191,7 +191,7 @@ /var/lib/clickhouse/access/ - + users.xml @@ -405,9 +405,6 @@ --> - - - + insert into main_table select number from numbers(1000000) + + drop table if exists main_table; + drop table if exists mv_1; + drop table if exists mv_2; + drop table if exists mv_3; + drop table if exists mv_4; + diff --git a/tests/queries/0_stateless/00098_k_union_all.reference b/tests/queries/0_stateless/00098_k_union_all.reference index 1b21a751afc..a6921e228af 100644 --- a/tests/queries/0_stateless/00098_k_union_all.reference +++ b/tests/queries/0_stateless/00098_k_union_all.reference @@ -1,18 +1,18 @@ -1 + 1 -1 -1 + 1 + 1 -1 -1 + 1 + 1 -1 -1 + 1 + 1 -1 -1 + 1 + 1 -1 -1 + 1 + 1 -1 + 1 diff --git a/tests/queries/0_stateless/00305_http_and_readonly.reference b/tests/queries/0_stateless/00305_http_and_readonly.reference index f148cbbb93b..4d0cb6bd6bc 100644 --- a/tests/queries/0_stateless/00305_http_and_readonly.reference +++ b/tests/queries/0_stateless/00305_http_and_readonly.reference @@ -1,19 +1,19 @@ -name value changed + name value changed -max_rows_to_read 10000 1 -readonly 0 0 -name value changed + max_rows_to_read 10000 1 + readonly 0 0 + name value changed -max_rows_to_read 10000 1 -readonly 2 1 -name value changed + max_rows_to_read 10000 1 + readonly 2 1 + name value changed -max_rows_to_read 10000 1 -readonly 1 1 -name value changed + max_rows_to_read 10000 1 + readonly 1 1 + name value changed -max_rows_to_read 10000 1 -readonly 2 1 + max_rows_to_read 10000 1 + readonly 2 1 Ok Ok 0 diff --git a/tests/queries/0_stateless/00405_pretty_formats.reference b/tests/queries/0_stateless/00405_pretty_formats.reference index 07dfd76339b..ef3184f2837 100644 --- a/tests/queries/0_stateless/00405_pretty_formats.reference +++ b/tests/queries/0_stateless/00405_pretty_formats.reference @@ -38,20 +38,20 @@ │ 8 │ 8 │ (8,'8') │ 2 │ │ 9 │ 9 │ (9,'9') │ ᴺᵁᴸᴸ │ └───────┴───────┴─────────┴─────────────────┘ -hello world tuple sometimes_nulls + hello world tuple sometimes_nulls - 0 0 (0,'0') ᴺᵁᴸᴸ - 1 1 (1,'1') 1 - 2 2 (2,'2') 2 - 3 3 (3,'3') ᴺᵁᴸᴸ - 4 4 (4,'4') 1 -hello world tuple sometimes_nulls + 0 0 (0,'0') ᴺᵁᴸᴸ + 1 1 (1,'1') 1 + 2 2 (2,'2') 2 + 3 3 (3,'3') ᴺᵁᴸᴸ + 4 4 (4,'4') 1 + hello world tuple sometimes_nulls - 5 5 (5,'5') 2 - 6 6 (6,'6') ᴺᵁᴸᴸ - 7 7 (7,'7') 1 - 8 8 (8,'8') 2 - 9 9 (9,'9') ᴺᵁᴸᴸ + 5 5 (5,'5') 2 + 6 6 (6,'6') ᴺᵁᴸᴸ + 7 7 (7,'7') 1 + 8 8 (8,'8') 2 + 9 9 (9,'9') ᴺᵁᴸᴸ ┌─hello─┬─world─┬─tuple───┬─sometimes_nulls─┐ │ 0 │ 0 │ (0,'0') │ ᴺᵁᴸᴸ │ │ 1 │ 1 │ (1,'1') │ 1 │ @@ -104,20 +104,20 @@ │ 8 │ 8 │ (8,'8') │ 2 │ │ 9 │ 9 │ (9,'9') │ ᴺᵁᴸᴸ │ └───────┴───────┴─────────┴─────────────────┘ -hello world tuple sometimes_nulls + hello world tuple sometimes_nulls - 0 0 (0,'0') ᴺᵁᴸᴸ - 1 1 (1,'1') 1 - 2 2 (2,'2') 2 - 3 3 (3,'3') ᴺᵁᴸᴸ - 4 4 (4,'4') 1 -hello world tuple sometimes_nulls + 0 0 (0,'0') ᴺᵁᴸᴸ + 1 1 (1,'1') 1 + 2 2 (2,'2') 2 + 3 3 (3,'3') ᴺᵁᴸᴸ + 4 4 (4,'4') 1 + hello world tuple sometimes_nulls - 5 5 (5,'5') 2 - 6 6 (6,'6') ᴺᵁᴸᴸ - 7 7 (7,'7') 1 - 8 8 (8,'8') 2 - 9 9 (9,'9') ᴺᵁᴸᴸ + 5 5 (5,'5') 2 + 6 6 (6,'6') ᴺᵁᴸᴸ + 7 7 (7,'7') 1 + 8 8 (8,'8') 2 + 9 9 (9,'9') ᴺᵁᴸᴸ ┏━━━━━━━┳━━━━━━━┳━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓ ┃ hello ┃ world ┃ tuple  ┃ sometimes_nulls ┃ ┡━━━━━━━╇━━━━━━━╇━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩ @@ -148,16 +148,16 @@ hello world tuple sometimes_nulls │ 5 │ 5 │ (5,'5') │ 2 │ └───────┴───────┴─────────┴─────────────────┘ Showed first 6. -hello world tuple sometimes_nulls + hello world tuple sometimes_nulls - 0 0 (0,'0') ᴺᵁᴸᴸ - 1 1 (1,'1') 1 - 2 2 (2,'2') 2 - 3 3 (3,'3') ᴺᵁᴸᴸ - 4 4 (4,'4') 1 -hello world tuple sometimes_nulls + 0 0 (0,'0') ᴺᵁᴸᴸ + 1 1 (1,'1') 1 + 2 2 (2,'2') 2 + 3 3 (3,'3') ᴺᵁᴸᴸ + 4 4 (4,'4') 1 + hello world tuple sometimes_nulls - 5 5 (5,'5') 2 + 5 5 (5,'5') 2 Showed first 6. ┌─hello─┬─world─┬─tuple───┬─sometimes_nulls─┐ @@ -199,15 +199,15 @@ Showed first 6. │ 5 │ 5 │ (5,'5') │ 2 │ └───────┴───────┴─────────┴─────────────────┘ Showed first 6. -hello world tuple sometimes_nulls + hello world tuple sometimes_nulls - 0 0 (0,'0') ᴺᵁᴸᴸ - 1 1 (1,'1') 1 - 2 2 (2,'2') 2 - 3 3 (3,'3') ᴺᵁᴸᴸ - 4 4 (4,'4') 1 -hello world tuple sometimes_nulls + 0 0 (0,'0') ᴺᵁᴸᴸ + 1 1 (1,'1') 1 + 2 2 (2,'2') 2 + 3 3 (3,'3') ᴺᵁᴸᴸ + 4 4 (4,'4') 1 + hello world tuple sometimes_nulls - 5 5 (5,'5') 2 + 5 5 (5,'5') 2 Showed first 6. diff --git a/tests/queries/0_stateless/00476_pretty_formats_and_widths.reference b/tests/queries/0_stateless/00476_pretty_formats_and_widths.reference index c97991ae0e9..eedaa4b5d8b 100644 --- a/tests/queries/0_stateless/00476_pretty_formats_and_widths.reference +++ b/tests/queries/0_stateless/00476_pretty_formats_and_widths.reference @@ -33,18 +33,18 @@ │ 100000000 │ 100000000 │ │ 1000000000 │ 1000000000 │ └────────────┴────────────┘ - x s + x s - 1 1 - 10 10 - 100 100 - 1000 1000 - 10000 10000 - 100000 100000 - 1000000 1000000 - 10000000 10000000 - 100000000 100000000 -1000000000 1000000000 + 1 1 + 10 10 + 100 100 + 1000 1000 + 10000 10000 + 100000 100000 + 1000000 1000000 + 10000000 10000000 + 100000000 100000000 + 1000000000 1000000000 ┌──────────x─┬─s──────────┐ │ 1 │ 1 │ │ 10 │ 10 │ diff --git a/tests/queries/0_stateless/00569_parse_date_time_best_effort.reference b/tests/queries/0_stateless/00569_parse_date_time_best_effort.reference index 8638c0b707f..b873e8b848d 100644 --- a/tests/queries/0_stateless/00569_parse_date_time_best_effort.reference +++ b/tests/queries/0_stateless/00569_parse_date_time_best_effort.reference @@ -1,106 +1,106 @@ -s a b + s a b -0 2000-01-01 00:00:00 2000-01-01 00:00:00 -0000 2000-01-01 00:00:00 2000-01-01 00:00:00 -00:00:00 2000-01-01 00:00:00 2000-01-01 00:00:00 -01:00:00 2000-01-01 01:00:00 2000-01-01 01:00:00 -02/01/17 010203 MSK 2017-01-01 22:02:03 2017-01-01 22:02:03 -02/01/17 010203 MSK+0100 2017-01-01 21:02:03 2017-01-01 21:02:03 -02/01/17 010203 UTC+0300 2017-01-01 22:02:03 2017-01-01 22:02:03 -02/01/17 010203Z 2017-01-02 01:02:03 2017-01-02 01:02:03 -02/01/1970 010203Z 1970-01-02 01:02:03 1970-01-02 01:02:03 -02/01/70 010203Z 1970-01-02 01:02:03 1970-01-02 01:02:03 -11 Feb 2018 06:40:50 +0300 2018-02-11 03:40:50 2018-02-11 03:40:50 -17 Apr 2 1:2:3 2000-04-17 01:02:03 2000-04-17 01:02:03 -19700102 01:00:00 1970-01-02 01:00:00 1970-01-02 01:00:00 -1970010201:00:00 2032-06-06 02:03:21 2032-06-06 02:03:21 -19700102010203 1970-01-02 01:02:03 1970-01-02 01:02:03 -19700102010203Z 1970-01-02 01:02:03 1970-01-02 01:02:03 -1970/01/02 010203Z 1970-01-02 01:02:03 1970-01-02 01:02:03 -20 2000-01-20 00:00:00 2000-01-20 00:00:00 -201 ᴺᵁᴸᴸ 0000-00-00 00:00:00 -20160101 2016-01-01 00:00:00 2016-01-01 00:00:00 -2016-01-01 2016-01-01 00:00:00 2016-01-01 00:00:00 -201601-01 2016-01-01 01:00:00 2016-01-01 01:00:00 -2016-01-01MSD 2015-12-31 20:00:00 2015-12-31 20:00:00 -2016-01-01 MSD 2015-12-31 20:00:00 2015-12-31 20:00:00 -201601-01 MSD 2016-01-01 04:00:00 2016-01-01 04:00:00 -2016-01-01UTC 2016-01-01 00:00:00 2016-01-01 00:00:00 -2016-01-01Z 2016-01-01 00:00:00 2016-01-01 00:00:00 -2017 2017-01-01 00:00:00 2017-01-01 00:00:00 -2017/01/00 2017-01-01 00:00:00 2017-01-01 00:00:00 -2017/01/00 MSD 2016-12-31 20:00:00 2016-12-31 20:00:00 -2017/01/00 MSD Jun 2017-05-31 20:00:00 2017-05-31 20:00:00 -2017/01/01 2017-01-01 00:00:00 2017-01-01 00:00:00 -201701 02 010203 UTC+0300 2017-01-01 22:02:03 2017-01-01 22:02:03 -2017-01-02 03:04:05 2017-01-02 03:04:05 2017-01-02 03:04:05 -2017-01-0203:04:05 ᴺᵁᴸᴸ 0000-00-00 00:00:00 -2017-01-02 03:04:05+0 2017-01-02 03:04:05 2017-01-02 03:04:05 -2017-01-02 03:04:05+00 2017-01-02 03:04:05 2017-01-02 03:04:05 -2017-01-02 03:04:05+0000 2017-01-02 03:04:05 2017-01-02 03:04:05 -2017-01-02 03:04:05 -0100 2017-01-02 04:04:05 2017-01-02 04:04:05 -2017-01-02 03:04:05+030 2017-01-02 02:34:05 2017-01-02 02:34:05 -2017-01-02 03:04:05+0300 2017-01-02 00:04:05 2017-01-02 00:04:05 -2017-01-02 03:04:05+1 2017-01-02 02:04:05 2017-01-02 02:04:05 -2017-01-02 03:04:05+300 2017-01-02 00:04:05 2017-01-02 00:04:05 -2017-01-02 03:04:05+900 2017-01-01 18:04:05 2017-01-01 18:04:05 -2017-01-02 03:04:05GMT 2017-01-02 03:04:05 2017-01-02 03:04:05 -2017-01-02 03:04:05 MSD 2017-01-01 23:04:05 2017-01-01 23:04:05 -2017-01-02 03:04:05 MSD Feb 2017-02-01 23:04:05 2017-02-01 23:04:05 -2017-01-02 03:04:05 MSD Jun 2017-06-01 23:04:05 2017-06-01 23:04:05 -2017-01-02 03:04:05 MSK 2017-01-02 00:04:05 2017-01-02 00:04:05 -2017-01-02T03:04:05 2017-01-02 03:04:05 2017-01-02 03:04:05 -2017-01-02T03:04:05+00 2017-01-02 03:04:05 2017-01-02 03:04:05 -2017-01-02T03:04:05 -0100 2017-01-02 04:04:05 2017-01-02 04:04:05 -2017-01-02T03:04:05-0100 2017-01-02 04:04:05 2017-01-02 04:04:05 -2017-01-02T03:04:05+0100 2017-01-02 02:04:05 2017-01-02 02:04:05 -2017-01-02T03:04:05Z 2017-01-02 03:04:05 2017-01-02 03:04:05 -2017-01 03:04:05 MSD Jun 2017-05-31 23:04:05 2017-05-31 23:04:05 -2017-01 03:04 MSD Jun 2017-05-31 23:04:00 2017-05-31 23:04:00 -2017/01/31 2017-01-31 00:00:00 2017-01-31 00:00:00 -2017/01/32 0000-00-00 00:00:00 0000-00-00 00:00:00 -2017-01 MSD Jun 2017-05-31 20:00:00 2017-05-31 20:00:00 -201701 MSD Jun 2017-05-31 20:00:00 2017-05-31 20:00:00 -2017 25 1:2:3 0000-00-00 00:00:00 0000-00-00 00:00:00 -2017 25 Apr 1:2:3 2017-04-01 01:02:03 2017-04-01 01:02:03 -2017 Apr 01 11:22:33 2017-04-01 11:22:33 2017-04-01 11:22:33 -2017 Apr 02 01/02/03 UTC+0300 ᴺᵁᴸᴸ 0000-00-00 00:00:00 -2017 Apr 02 010203 UTC+0300 2017-04-01 22:02:03 2017-04-01 22:02:03 -2017 Apr 02 01:2:3 UTC+0300 2017-04-01 22:02:03 2017-04-01 22:02:03 -2017 Apr 02 1:02:3 2017-04-02 01:02:03 2017-04-02 01:02:03 -2017 Apr 02 11:22:33 2017-04-02 11:22:33 2017-04-02 11:22:33 -2017 Apr 02 1:2:03 2017-04-02 01:02:03 2017-04-02 01:02:03 -2017 Apr 02 1:22:33 2017-04-02 01:22:33 2017-04-02 01:22:33 -2017 Apr 02 1:2:3 2017-04-02 01:02:03 2017-04-02 01:02:03 -2017 Apr 02 1:2:33 2017-04-02 01:02:33 2017-04-02 01:02:33 -2017 Apr 02 1:2:3 MSK 2017-04-01 22:02:03 2017-04-01 22:02:03 -2017 Apr 02 1:2:3 MSK 2017 2017-04-01 22:02:03 2017-04-01 22:02:03 -2017 Apr 02 1:2:3 MSK 2018 2017-04-01 22:02:03 2017-04-01 22:02:03 -2017 Apr 02 1:2:3 UTC+0000 2017-04-02 01:02:03 2017-04-02 01:02:03 -2017 Apr 02 1:2:3 UTC+0300 2017-04-01 22:02:03 2017-04-01 22:02:03 -2017 Apr 02 1:2:3 UTC+0400 2017-04-01 21:02:03 2017-04-01 21:02:03 -2017 Apr 2 1:2:3 2017-04-02 01:02:03 2017-04-02 01:02:03 -2017 Jan 02 010203 UTC+0300 2017-01-01 22:02:03 2017-01-01 22:02:03 -25 Apr 2017 01:02:03 2017-04-25 01:02:03 2017-04-25 01:02:03 -25 Apr 2017 1:2:3 2017-04-25 01:02:03 2017-04-25 01:02:03 -25 Jan 2017 1:2:3 2017-01-25 01:02:03 2017-01-25 01:02:03 -25 Jan 2017 1:2:3 MSK 2017-01-24 22:02:03 2017-01-24 22:02:03 -25 Jan 2017 1:2:3 PM 2017-01-25 13:02:03 2017-01-25 13:02:03 -25 Jan 2017 1:2:3Z 2017-01-25 01:02:03 2017-01-25 01:02:03 -25 Jan 2017 1:2:3 Z 2017-01-25 01:02:03 2017-01-25 01:02:03 -25 Jan 2017 1:2:3 Z +0300 2017-01-24 22:02:03 2017-01-24 22:02:03 -25 Jan 2017 1:2:3 Z+03:00 2017-01-24 22:02:03 2017-01-24 22:02:03 -25 Jan 2017 1:2:3 Z +0300 OM ᴺᵁᴸᴸ 0000-00-00 00:00:00 -25 Jan 2017 1:2:3 Z +03:00 PM 2017-01-25 10:02:03 2017-01-25 10:02:03 -25 Jan 2017 1:2:3 Z +0300 PM 2017-01-25 10:02:03 2017-01-25 10:02:03 -25 Jan 2017 1:2:3 Z+03:00 PM 2017-01-25 10:02:03 2017-01-25 10:02:03 -25 Jan 2017 1:2:3 Z +03:30 PM 2017-01-25 09:32:03 2017-01-25 09:32:03 -25 Jan 2017 1:2:3Z Mo ᴺᵁᴸᴸ 0000-00-00 00:00:00 -25 Jan 2017 1:2:3Z Mon 2017-01-25 01:02:03 2017-01-25 01:02:03 -25 Jan 2017 1:2:3Z Moo ᴺᵁᴸᴸ 0000-00-00 00:00:00 -25 Jan 2017 1:2:3 Z PM 2017-01-25 13:02:03 2017-01-25 13:02:03 -25 Jan 2017 1:2:3Z PM 2017-01-25 13:02:03 2017-01-25 13:02:03 -25 Jan 2017 1:2:3 Z PM +03:00 2017-01-25 10:02:03 2017-01-25 10:02:03 -Jun, 11 Feb 2018 06:40:50 +0300 2000-06-01 00:00:00 2000-06-01 00:00:00 -Sun 11 Feb 2018 06:40:50 +0300 2018-02-11 03:40:50 2018-02-11 03:40:50 -Sun, 11 Feb 2018 06:40:50 +0300 2018-02-11 03:40:50 2018-02-11 03:40:50 + 0 2000-01-01 00:00:00 2000-01-01 00:00:00 + 0000 2000-01-01 00:00:00 2000-01-01 00:00:00 + 00:00:00 2000-01-01 00:00:00 2000-01-01 00:00:00 + 01:00:00 2000-01-01 01:00:00 2000-01-01 01:00:00 + 02/01/17 010203 MSK 2017-01-01 22:02:03 2017-01-01 22:02:03 + 02/01/17 010203 MSK+0100 2017-01-01 21:02:03 2017-01-01 21:02:03 + 02/01/17 010203 UTC+0300 2017-01-01 22:02:03 2017-01-01 22:02:03 + 02/01/17 010203Z 2017-01-02 01:02:03 2017-01-02 01:02:03 + 02/01/1970 010203Z 1970-01-02 01:02:03 1970-01-02 01:02:03 + 02/01/70 010203Z 1970-01-02 01:02:03 1970-01-02 01:02:03 + 11 Feb 2018 06:40:50 +0300 2018-02-11 03:40:50 2018-02-11 03:40:50 + 17 Apr 2 1:2:3 2000-04-17 01:02:03 2000-04-17 01:02:03 + 19700102 01:00:00 1970-01-02 01:00:00 1970-01-02 01:00:00 + 1970010201:00:00 2032-06-06 02:03:21 2032-06-06 02:03:21 + 19700102010203 1970-01-02 01:02:03 1970-01-02 01:02:03 + 19700102010203Z 1970-01-02 01:02:03 1970-01-02 01:02:03 + 1970/01/02 010203Z 1970-01-02 01:02:03 1970-01-02 01:02:03 + 20 2000-01-20 00:00:00 2000-01-20 00:00:00 + 201 ᴺᵁᴸᴸ 0000-00-00 00:00:00 + 20160101 2016-01-01 00:00:00 2016-01-01 00:00:00 + 2016-01-01 2016-01-01 00:00:00 2016-01-01 00:00:00 + 201601-01 2016-01-01 01:00:00 2016-01-01 01:00:00 + 2016-01-01MSD 2015-12-31 20:00:00 2015-12-31 20:00:00 + 2016-01-01 MSD 2015-12-31 20:00:00 2015-12-31 20:00:00 + 201601-01 MSD 2016-01-01 04:00:00 2016-01-01 04:00:00 + 2016-01-01UTC 2016-01-01 00:00:00 2016-01-01 00:00:00 + 2016-01-01Z 2016-01-01 00:00:00 2016-01-01 00:00:00 + 2017 2017-01-01 00:00:00 2017-01-01 00:00:00 + 2017/01/00 2017-01-01 00:00:00 2017-01-01 00:00:00 + 2017/01/00 MSD 2016-12-31 20:00:00 2016-12-31 20:00:00 + 2017/01/00 MSD Jun 2017-05-31 20:00:00 2017-05-31 20:00:00 + 2017/01/01 2017-01-01 00:00:00 2017-01-01 00:00:00 + 201701 02 010203 UTC+0300 2017-01-01 22:02:03 2017-01-01 22:02:03 + 2017-01-02 03:04:05 2017-01-02 03:04:05 2017-01-02 03:04:05 + 2017-01-0203:04:05 ᴺᵁᴸᴸ 0000-00-00 00:00:00 + 2017-01-02 03:04:05+0 2017-01-02 03:04:05 2017-01-02 03:04:05 + 2017-01-02 03:04:05+00 2017-01-02 03:04:05 2017-01-02 03:04:05 + 2017-01-02 03:04:05+0000 2017-01-02 03:04:05 2017-01-02 03:04:05 + 2017-01-02 03:04:05 -0100 2017-01-02 04:04:05 2017-01-02 04:04:05 + 2017-01-02 03:04:05+030 2017-01-02 02:34:05 2017-01-02 02:34:05 + 2017-01-02 03:04:05+0300 2017-01-02 00:04:05 2017-01-02 00:04:05 + 2017-01-02 03:04:05+1 2017-01-02 02:04:05 2017-01-02 02:04:05 + 2017-01-02 03:04:05+300 2017-01-02 00:04:05 2017-01-02 00:04:05 + 2017-01-02 03:04:05+900 2017-01-01 18:04:05 2017-01-01 18:04:05 + 2017-01-02 03:04:05GMT 2017-01-02 03:04:05 2017-01-02 03:04:05 + 2017-01-02 03:04:05 MSD 2017-01-01 23:04:05 2017-01-01 23:04:05 + 2017-01-02 03:04:05 MSD Feb 2017-02-01 23:04:05 2017-02-01 23:04:05 + 2017-01-02 03:04:05 MSD Jun 2017-06-01 23:04:05 2017-06-01 23:04:05 + 2017-01-02 03:04:05 MSK 2017-01-02 00:04:05 2017-01-02 00:04:05 + 2017-01-02T03:04:05 2017-01-02 03:04:05 2017-01-02 03:04:05 + 2017-01-02T03:04:05+00 2017-01-02 03:04:05 2017-01-02 03:04:05 + 2017-01-02T03:04:05 -0100 2017-01-02 04:04:05 2017-01-02 04:04:05 + 2017-01-02T03:04:05-0100 2017-01-02 04:04:05 2017-01-02 04:04:05 + 2017-01-02T03:04:05+0100 2017-01-02 02:04:05 2017-01-02 02:04:05 + 2017-01-02T03:04:05Z 2017-01-02 03:04:05 2017-01-02 03:04:05 + 2017-01 03:04:05 MSD Jun 2017-05-31 23:04:05 2017-05-31 23:04:05 + 2017-01 03:04 MSD Jun 2017-05-31 23:04:00 2017-05-31 23:04:00 + 2017/01/31 2017-01-31 00:00:00 2017-01-31 00:00:00 + 2017/01/32 0000-00-00 00:00:00 0000-00-00 00:00:00 + 2017-01 MSD Jun 2017-05-31 20:00:00 2017-05-31 20:00:00 + 201701 MSD Jun 2017-05-31 20:00:00 2017-05-31 20:00:00 + 2017 25 1:2:3 0000-00-00 00:00:00 0000-00-00 00:00:00 + 2017 25 Apr 1:2:3 2017-04-01 01:02:03 2017-04-01 01:02:03 + 2017 Apr 01 11:22:33 2017-04-01 11:22:33 2017-04-01 11:22:33 + 2017 Apr 02 01/02/03 UTC+0300 ᴺᵁᴸᴸ 0000-00-00 00:00:00 + 2017 Apr 02 010203 UTC+0300 2017-04-01 22:02:03 2017-04-01 22:02:03 + 2017 Apr 02 01:2:3 UTC+0300 2017-04-01 22:02:03 2017-04-01 22:02:03 + 2017 Apr 02 1:02:3 2017-04-02 01:02:03 2017-04-02 01:02:03 + 2017 Apr 02 11:22:33 2017-04-02 11:22:33 2017-04-02 11:22:33 + 2017 Apr 02 1:2:03 2017-04-02 01:02:03 2017-04-02 01:02:03 + 2017 Apr 02 1:22:33 2017-04-02 01:22:33 2017-04-02 01:22:33 + 2017 Apr 02 1:2:3 2017-04-02 01:02:03 2017-04-02 01:02:03 + 2017 Apr 02 1:2:33 2017-04-02 01:02:33 2017-04-02 01:02:33 + 2017 Apr 02 1:2:3 MSK 2017-04-01 22:02:03 2017-04-01 22:02:03 + 2017 Apr 02 1:2:3 MSK 2017 2017-04-01 22:02:03 2017-04-01 22:02:03 + 2017 Apr 02 1:2:3 MSK 2018 2017-04-01 22:02:03 2017-04-01 22:02:03 + 2017 Apr 02 1:2:3 UTC+0000 2017-04-02 01:02:03 2017-04-02 01:02:03 + 2017 Apr 02 1:2:3 UTC+0300 2017-04-01 22:02:03 2017-04-01 22:02:03 + 2017 Apr 02 1:2:3 UTC+0400 2017-04-01 21:02:03 2017-04-01 21:02:03 + 2017 Apr 2 1:2:3 2017-04-02 01:02:03 2017-04-02 01:02:03 + 2017 Jan 02 010203 UTC+0300 2017-01-01 22:02:03 2017-01-01 22:02:03 + 25 Apr 2017 01:02:03 2017-04-25 01:02:03 2017-04-25 01:02:03 + 25 Apr 2017 1:2:3 2017-04-25 01:02:03 2017-04-25 01:02:03 + 25 Jan 2017 1:2:3 2017-01-25 01:02:03 2017-01-25 01:02:03 + 25 Jan 2017 1:2:3 MSK 2017-01-24 22:02:03 2017-01-24 22:02:03 + 25 Jan 2017 1:2:3 PM 2017-01-25 13:02:03 2017-01-25 13:02:03 + 25 Jan 2017 1:2:3Z 2017-01-25 01:02:03 2017-01-25 01:02:03 + 25 Jan 2017 1:2:3 Z 2017-01-25 01:02:03 2017-01-25 01:02:03 + 25 Jan 2017 1:2:3 Z +0300 2017-01-24 22:02:03 2017-01-24 22:02:03 + 25 Jan 2017 1:2:3 Z+03:00 2017-01-24 22:02:03 2017-01-24 22:02:03 + 25 Jan 2017 1:2:3 Z +0300 OM ᴺᵁᴸᴸ 0000-00-00 00:00:00 + 25 Jan 2017 1:2:3 Z +03:00 PM 2017-01-25 10:02:03 2017-01-25 10:02:03 + 25 Jan 2017 1:2:3 Z +0300 PM 2017-01-25 10:02:03 2017-01-25 10:02:03 + 25 Jan 2017 1:2:3 Z+03:00 PM 2017-01-25 10:02:03 2017-01-25 10:02:03 + 25 Jan 2017 1:2:3 Z +03:30 PM 2017-01-25 09:32:03 2017-01-25 09:32:03 + 25 Jan 2017 1:2:3Z Mo ᴺᵁᴸᴸ 0000-00-00 00:00:00 + 25 Jan 2017 1:2:3Z Mon 2017-01-25 01:02:03 2017-01-25 01:02:03 + 25 Jan 2017 1:2:3Z Moo ᴺᵁᴸᴸ 0000-00-00 00:00:00 + 25 Jan 2017 1:2:3 Z PM 2017-01-25 13:02:03 2017-01-25 13:02:03 + 25 Jan 2017 1:2:3Z PM 2017-01-25 13:02:03 2017-01-25 13:02:03 + 25 Jan 2017 1:2:3 Z PM +03:00 2017-01-25 10:02:03 2017-01-25 10:02:03 + Jun, 11 Feb 2018 06:40:50 +0300 2000-06-01 00:00:00 2000-06-01 00:00:00 + Sun 11 Feb 2018 06:40:50 +0300 2018-02-11 03:40:50 2018-02-11 03:40:50 + Sun, 11 Feb 2018 06:40:50 +0300 2018-02-11 03:40:50 2018-02-11 03:40:50 diff --git a/tests/queries/0_stateless/00813_parse_date_time_best_effort_more.reference b/tests/queries/0_stateless/00813_parse_date_time_best_effort_more.reference index 921400838d0..3acae55e846 100644 --- a/tests/queries/0_stateless/00813_parse_date_time_best_effort_more.reference +++ b/tests/queries/0_stateless/00813_parse_date_time_best_effort_more.reference @@ -1,22 +1,22 @@ -s a b + s a b -24.12.2018 2018-12-24 00:00:00 2018-12-24 00:00:00 -24-12-2018 2018-12-24 00:00:00 2018-12-24 00:00:00 -24.12.18 2018-12-24 00:00:00 2018-12-24 00:00:00 -24-12-18 2018-12-24 00:00:00 2018-12-24 00:00:00 -24-Dec-18 2018-12-24 00:00:00 2018-12-24 00:00:00 -24/DEC/18 2018-12-24 00:00:00 2018-12-24 00:00:00 -24/DEC/2018 2018-12-24 00:00:00 2018-12-24 00:00:00 -01-OCT-2015 2015-10-01 00:00:00 2015-10-01 00:00:00 -24.12.2018 2018-12-24 00:00:00 2018-12-24 00:00:00 -24-12-2018 2018-12-24 00:00:00 2018-12-24 00:00:00 -24.12.18 2018-12-24 00:00:00 2018-12-24 00:00:00 -24-12-18 2018-12-24 00:00:00 2018-12-24 00:00:00 -24-Dec-18 2018-12-24 00:00:00 2018-12-24 00:00:00 -24/DEC/18 2018-12-24 00:00:00 2018-12-24 00:00:00 -24/DEC/2018 2018-12-24 00:00:00 2018-12-24 00:00:00 -01-OCT-2015 2015-10-01 00:00:00 2015-10-01 00:00:00 -24.12.18 010203 2018-12-24 01:02:03 2018-12-24 01:02:03 -24.12.18 01:02:03 2018-12-24 01:02:03 2018-12-24 01:02:03 -24.DEC.18T01:02:03.000+0300 2018-12-23 22:02:03 2018-12-23 22:02:03 -01-September-2018 11:22 2018-09-01 11:22:00 2018-09-01 11:22:00 + 24.12.2018 2018-12-24 00:00:00 2018-12-24 00:00:00 + 24-12-2018 2018-12-24 00:00:00 2018-12-24 00:00:00 + 24.12.18 2018-12-24 00:00:00 2018-12-24 00:00:00 + 24-12-18 2018-12-24 00:00:00 2018-12-24 00:00:00 + 24-Dec-18 2018-12-24 00:00:00 2018-12-24 00:00:00 + 24/DEC/18 2018-12-24 00:00:00 2018-12-24 00:00:00 + 24/DEC/2018 2018-12-24 00:00:00 2018-12-24 00:00:00 + 01-OCT-2015 2015-10-01 00:00:00 2015-10-01 00:00:00 + 24.12.2018 2018-12-24 00:00:00 2018-12-24 00:00:00 + 24-12-2018 2018-12-24 00:00:00 2018-12-24 00:00:00 + 24.12.18 2018-12-24 00:00:00 2018-12-24 00:00:00 + 24-12-18 2018-12-24 00:00:00 2018-12-24 00:00:00 + 24-Dec-18 2018-12-24 00:00:00 2018-12-24 00:00:00 + 24/DEC/18 2018-12-24 00:00:00 2018-12-24 00:00:00 + 24/DEC/2018 2018-12-24 00:00:00 2018-12-24 00:00:00 + 01-OCT-2015 2015-10-01 00:00:00 2015-10-01 00:00:00 + 24.12.18 010203 2018-12-24 01:02:03 2018-12-24 01:02:03 + 24.12.18 01:02:03 2018-12-24 01:02:03 2018-12-24 01:02:03 + 24.DEC.18T01:02:03.000+0300 2018-12-23 22:02:03 2018-12-23 22:02:03 + 01-September-2018 11:22 2018-09-01 11:22:00 2018-09-01 11:22:00 diff --git a/tests/queries/0_stateless/01140_select_from_storage_join_fix.reference b/tests/queries/0_stateless/01140_select_from_storage_join_fix.reference new file mode 100644 index 00000000000..101a270ad39 --- /dev/null +++ b/tests/queries/0_stateless/01140_select_from_storage_join_fix.reference @@ -0,0 +1,8 @@ +1 s 1 String String +2 s 2 String String +3 s 3 Nullable(String) String +4 s 4 String Nullable(String) +1 s 1 String String +2 s 2 String String +3 s 3 Nullable(String) String +4 s 4 String Nullable(String) diff --git a/tests/queries/0_stateless/01140_select_from_storage_join_fix.sql b/tests/queries/0_stateless/01140_select_from_storage_join_fix.sql new file mode 100644 index 00000000000..4e64c90f56d --- /dev/null +++ b/tests/queries/0_stateless/01140_select_from_storage_join_fix.sql @@ -0,0 +1,42 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; + +CREATE TABLE t1 (id String, name String, value UInt32) +ENGINE = Join(ANY, LEFT, id) +SETTINGS join_use_nulls = 1; + +CREATE TABLE t2 (id String, name String, value UInt32) +ENGINE = Join(ANY, LEFT, id) +SETTINGS join_use_nulls = 0; + +CREATE TABLE t3 (id Nullable(String), name String, value UInt32) +ENGINE = Join(ANY, LEFT, id) +SETTINGS join_use_nulls = 1; + +CREATE TABLE t4 (id String, name Nullable(String), value UInt32) +ENGINE = Join(ANY, LEFT, id) +SETTINGS join_use_nulls = 0; + +insert into t1 values('1', 's', 1); +insert into t2 values('2', 's', 2); +insert into t3 values('3', 's', 3); +insert into t4 values('4', 's', 4); + +select *, toTypeName(id), toTypeName(name) from t1; +select *, toTypeName(id), toTypeName(name) from t2; +select *, toTypeName(id), toTypeName(name) from t3; +select *, toTypeName(id), toTypeName(name) from t4; + +SET join_use_nulls = 1; + +select *, toTypeName(id), toTypeName(name) from t1; +select *, toTypeName(id), toTypeName(name) from t2; +select *, toTypeName(id), toTypeName(name) from t3; +select *, toTypeName(id), toTypeName(name) from t4; + +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; +DROP TABLE t4; diff --git a/tests/queries/0_stateless/01141_join_get_negative.reference b/tests/queries/0_stateless/01141_join_get_negative.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01141_join_get_negative.sql b/tests/queries/0_stateless/01141_join_get_negative.sql new file mode 100644 index 00000000000..e165d34e460 --- /dev/null +++ b/tests/queries/0_stateless/01141_join_get_negative.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (`s` String, `x` Array(UInt8), `k` UInt64) ENGINE = Join(ANY, LEFT, k); +CREATE TABLE t2 (`s` String, `x` Array(UInt8), `k` UInt64) ENGINE = Join(ANY, INNER, k); + +SELECT joinGet('t1', '', number) FROM numbers(2); -- { serverError 16 } +SELECT joinGet('t2', 's', number) FROM numbers(2); -- { serverError 264 } + +DROP TABLE t1; +DROP TABLE t2; diff --git a/tests/queries/0_stateless/01249_bad_arguments_for_bloom_filter.reference b/tests/queries/0_stateless/01249_bad_arguments_for_bloom_filter.reference index 04ae001675f..70d176d9b7a 100644 --- a/tests/queries/0_stateless/01249_bad_arguments_for_bloom_filter.reference +++ b/tests/queries/0_stateless/01249_bad_arguments_for_bloom_filter.reference @@ -1,3 +1,3 @@ -CREATE TABLE default.bloom_filter_idx_good\n(\n `u64` UInt64, \n `i32` Int32, \n `f64` Float64, \n `d` Decimal(10, 2), \n `s` String, \n `e` Enum8(\'a\' = 1, \'b\' = 2, \'c\' = 3), \n `dt` Date, \n INDEX bloom_filter_a i32 TYPE bloom_filter(0.) GRANULARITY 1\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 -CREATE TABLE default.bloom_filter_idx_good\n(\n `u64` UInt64, \n `i32` Int32, \n `f64` Float64, \n `d` Decimal(10, 2), \n `s` String, \n `e` Enum8(\'a\' = 1, \'b\' = 2, \'c\' = 3), \n `dt` Date, \n INDEX bloom_filter_a i32 TYPE bloom_filter(0.) GRANULARITY 1\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 -CREATE TABLE default.bloom_filter_idx_good\n(\n `u64` UInt64, \n `i32` Int32, \n `f64` Float64, \n `d` Decimal(10, 2), \n `s` String, \n `e` Enum8(\'a\' = 1, \'b\' = 2, \'c\' = 3), \n `dt` Date, \n INDEX bloom_filter_a i32 TYPE bloom_filter(1.) GRANULARITY 1\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.bloom_filter_idx_good\n(\n `u64` UInt64, \n `i32` Int32, \n `f64` Float64, \n `d` Decimal(10, 2), \n `s` String, \n `e` Enum8(\'a\' = 1, \'b\' = 2, \'c\' = 3), \n `dt` Date, \n INDEX bloom_filter_a i32 TYPE bloom_filter(0., 1.) GRANULARITY 1\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.bloom_filter_idx_good\n(\n `u64` UInt64, \n `i32` Int32, \n `f64` Float64, \n `d` Decimal(10, 2), \n `s` String, \n `e` Enum8(\'a\' = 1, \'b\' = 2, \'c\' = 3), \n `dt` Date, \n INDEX bloom_filter_a i32 TYPE bloom_filter(-0.1) GRANULARITY 1\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.bloom_filter_idx_good\n(\n `u64` UInt64, \n `i32` Int32, \n `f64` Float64, \n `d` Decimal(10, 2), \n `s` String, \n `e` Enum8(\'a\' = 1, \'b\' = 2, \'c\' = 3), \n `dt` Date, \n INDEX bloom_filter_a i32 TYPE bloom_filter(1.01) GRANULARITY 1\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01271_optimize_arithmetic_operations_in_aggr_func.reference b/tests/queries/0_stateless/01271_optimize_arithmetic_operations_in_aggr_func.reference new file mode 100644 index 00000000000..2fe897e3819 --- /dev/null +++ b/tests/queries/0_stateless/01271_optimize_arithmetic_operations_in_aggr_func.reference @@ -0,0 +1,6 @@ +-150000044999994 +6931471.112452272 +24580677 +-150000044999994 +6931471.112452272 +24580677 diff --git a/tests/queries/0_stateless/01271_optimize_arithmetic_operations_in_aggr_func.sql b/tests/queries/0_stateless/01271_optimize_arithmetic_operations_in_aggr_func.sql new file mode 100644 index 00000000000..3550ed64e8c --- /dev/null +++ b/tests/queries/0_stateless/01271_optimize_arithmetic_operations_in_aggr_func.sql @@ -0,0 +1,9 @@ +set optimize_arithmetic_operations_in_agr_func = 1; +SELECT sum(number * -3) + min(2 * number * -3) - max(-1 * -2 * number * -3) FROM numbers(10000000); +SELECT max(log(2) * number) FROM numbers(10000000); +SELECT round(max(log(2) * 3 * sin(0.3) * number * 4)) FROM numbers(10000000); + +set optimize_arithmetic_operations_in_agr_func = 0; +SELECT sum(number * -3) + min(2 * number * -3) - max(-1 * -2 * number * -3) FROM numbers(10000000); +SELECT max(log(2) * number) FROM numbers(10000000); +SELECT round(max(log(2) * 3 * sin(0.3) * number * 4)) FROM numbers(10000000); diff --git a/tests/queries/0_stateless/01280_ttl_where_group_by.sql b/tests/queries/0_stateless/01280_ttl_where_group_by.sh old mode 100644 new mode 100755 similarity index 51% rename from tests/queries/0_stateless/01280_ttl_where_group_by.sql rename to tests/queries/0_stateless/01280_ttl_where_group_by.sh index e61716cfe81..d0e4c5b3099 --- a/tests/queries/0_stateless/01280_ttl_where_group_by.sql +++ b/tests/queries/0_stateless/01280_ttl_where_group_by.sh @@ -1,5 +1,19 @@ -drop table if exists ttl_01280_1; +#!/usr/bin/env bash +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +$CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_1" + +function optimize() +{ + for i in {0..20}; do + $CLICKHOUSE_CLIENT --query "OPTIMIZE TABLE $1 FINAL SETTINGS optimize_throw_if_noop=1" 2>/dev/null && break + sleep 0.3 + done +} + +$CLICKHOUSE_CLIENT -n --query " create table ttl_01280_1 (a Int, b Int, x Int, y Int, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second delete where x % 10 == 0 and y > 5; insert into ttl_01280_1 values (1, 1, 0, 4, now() + 10); insert into ttl_01280_1 values (1, 1, 10, 6, now()); @@ -7,13 +21,15 @@ insert into ttl_01280_1 values (1, 2, 3, 7, now()); insert into ttl_01280_1 values (1, 3, 0, 5, now()); insert into ttl_01280_1 values (2, 1, 20, 1, now()); insert into ttl_01280_1 values (2, 1, 0, 1, now()); -insert into ttl_01280_1 values (3, 1, 0, 8, now()); -select sleep(1.1) format Null; -optimize table ttl_01280_1 final; -select a, b, x, y from ttl_01280_1; +insert into ttl_01280_1 values (3, 1, 0, 8, now());" -drop table if exists ttl_01280_2; +sleep 2 +optimize "ttl_01280_1" +$CLICKHOUSE_CLIENT --query "select a, b, x, y from ttl_01280_1" +$CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_2" + +$CLICKHOUSE_CLIENT -n --query " create table ttl_01280_2 (a Int, b Int, x Array(Int32), y Double, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by a, b set x = minForEach(x), y = sum(y), d = max(d); insert into ttl_01280_2 values (1, 1, array(0, 2, 3), 4, now() + 10); insert into ttl_01280_2 values (1, 1, array(5, 4, 3), 6, now()); @@ -23,13 +39,15 @@ insert into ttl_01280_2 values (1, 3, array(1, 1, 2, 1), 9, now()); insert into ttl_01280_2 values (1, 3, array(3, 2, 1, 0), 3, now()); insert into ttl_01280_2 values (2, 1, array(3, 3, 3), 7, now()); insert into ttl_01280_2 values (2, 1, array(11, 1, 0, 3), 1, now()); -insert into ttl_01280_2 values (3, 1, array(2, 4, 5), 8, now()); -select sleep(1.1) format Null; -optimize table ttl_01280_2 final; -select a, b, x, y from ttl_01280_2; +insert into ttl_01280_2 values (3, 1, array(2, 4, 5), 8, now());" -drop table if exists ttl_01280_3; +sleep 2 +optimize "ttl_01280_2" +$CLICKHOUSE_CLIENT --query "select a, b, x, y from ttl_01280_2" +$CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_3" + +$CLICKHOUSE_CLIENT -n --query " create table ttl_01280_3 (a Int, b Int, x Int64, y Int, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by a set x = argMax(x, d), y = argMax(y, d), d = max(d); insert into ttl_01280_3 values (1, 1, 0, 4, now() + 10); insert into ttl_01280_3 values (1, 1, 10, 6, now() + 1); @@ -39,49 +57,47 @@ insert into ttl_01280_3 values (2, 1, 20, 1, now()); insert into ttl_01280_3 values (2, 1, 0, 3, now() + 1); insert into ttl_01280_3 values (3, 1, 0, 3, now()); insert into ttl_01280_3 values (3, 2, 8, 2, now() + 1); -insert into ttl_01280_3 values (3, 5, 5, 8, now()); -select sleep(2.1) format Null; -optimize table ttl_01280_3 final; -select a, b, x, y from ttl_01280_3; +insert into ttl_01280_3 values (3, 5, 5, 8, now());" -drop table if exists ttl_01280_4; +sleep 2 +optimize "ttl_01280_3" +$CLICKHOUSE_CLIENT --query "select a, b, x, y from ttl_01280_3" +$CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_4" + +$CLICKHOUSE_CLIENT -n --query " create table ttl_01280_4 (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (toDate(d), -(a + b)) ttl d + interval 1 second group by toDate(d) set x = sum(x), y = max(y); insert into ttl_01280_4 values (1, 1, 0, 4, now() + 10); insert into ttl_01280_4 values (10, 2, 3, 3, now()); insert into ttl_01280_4 values (2, 10, 1, 7, now()); insert into ttl_01280_4 values (3, 3, 5, 2, now()); -insert into ttl_01280_4 values (1, 5, 4, 9, now()); -select sleep(1.1) format Null; -optimize table ttl_01280_4 final; -select a, b, x, y from ttl_01280_4; +insert into ttl_01280_4 values (1, 5, 4, 9, now())" -drop table if exists ttl_01280_5; +sleep 2 +optimize "ttl_01280_4" +$CLICKHOUSE_CLIENT --query "select a, b, x, y from ttl_01280_4" -create table ttl_01280_5 (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (toDate(d), a, -b) ttl d + interval 1 second group by toDate(d), a set x = sum(x); +$CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_5" + +$CLICKHOUSE_CLIENT -n --query "create table ttl_01280_5 (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (toDate(d), a, -b) ttl d + interval 1 second group by toDate(d), a set x = sum(x); insert into ttl_01280_5 values (1, 2, 3, 5, now()); insert into ttl_01280_5 values (2, 10, 1, 5, now()); insert into ttl_01280_5 values (2, 3, 5, 5, now()); -insert into ttl_01280_5 values (1, 5, 4, 5, now()); -select sleep(1.1) format Null; -optimize table ttl_01280_5 final; -select a, b, x, y from ttl_01280_5; +insert into ttl_01280_5 values (1, 5, 4, 5, now());" -drop table if exists ttl_01280_6; +sleep 2 +optimize "ttl_01280_5" +$CLICKHOUSE_CLIENT --query "select a, b, x, y from ttl_01280_5" +$CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_6" + +$CLICKHOUSE_CLIENT -n --query " create table ttl_01280_6 (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (toDate(d), a, -b) ttl d + interval 1 second group by toDate(d), a; insert into ttl_01280_6 values (1, 2, 3, 5, now()); insert into ttl_01280_6 values (2, 10, 3, 5, now()); insert into ttl_01280_6 values (2, 3, 3, 5, now()); -insert into ttl_01280_6 values (1, 5, 3, 5, now()); -select sleep(1.1) format Null; -optimize table ttl_01280_6 final; -select a, b, x, y from ttl_01280_6; +insert into ttl_01280_6 values (1, 5, 3, 5, now())" -create table ttl_01280_error (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by x set y = max(y); -- { serverError 450} -create table ttl_01280_error (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by b set y = max(y); -- { serverError 450} -create table ttl_01280_error (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by a, b, x set y = max(y); -- { serverError 450} -create table ttl_01280_error (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by a set b = min(b), y = max(y); -- { serverError 450} -create table ttl_01280_error (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by a, b set y = max(y), y = max(y); -- { serverError 450} -create table ttl_01280_error (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (toDate(d), a) ttl d + interval 1 second group by toDate(d), a set d = min(d), b = max(b); -- { serverError 450} -create table ttl_01280_error (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (d, -(a + b)) ttl d + interval 1 second group by d, -(a + b) set a = sum(a), b = min(b); -- { serverError 450} +sleep 2 +optimize "ttl_01280_6" +$CLICKHOUSE_CLIENT --query "select a, b, x, y from ttl_01280_6" diff --git a/tests/queries/0_stateless/01280_ttl_where_group_by_negative.reference b/tests/queries/0_stateless/01280_ttl_where_group_by_negative.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01280_ttl_where_group_by_negative.sql b/tests/queries/0_stateless/01280_ttl_where_group_by_negative.sql new file mode 100644 index 00000000000..f2c26a3d495 --- /dev/null +++ b/tests/queries/0_stateless/01280_ttl_where_group_by_negative.sql @@ -0,0 +1,7 @@ +create table ttl_01280_error (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by x set y = max(y); -- { serverError 450} +create table ttl_01280_error (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by b set y = max(y); -- { serverError 450} +create table ttl_01280_error (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by a, b, x set y = max(y); -- { serverError 450} +create table ttl_01280_error (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by a set b = min(b), y = max(y); -- { serverError 450} +create table ttl_01280_error (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by a, b set y = max(y), y = max(y); -- { serverError 450} +create table ttl_01280_error (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (toDate(d), a) ttl d + interval 1 second group by toDate(d), a set d = min(d), b = max(b); -- { serverError 450} +create table ttl_01280_error (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (d, -(a + b)) ttl d + interval 1 second group by d, -(a + b) set a = sum(a), b = min(b); -- { serverError 450} diff --git a/tests/queries/0_stateless/01284_fuzz_bits.reference b/tests/queries/0_stateless/01284_fuzz_bits.reference new file mode 100644 index 00000000000..d35f590ed3d --- /dev/null +++ b/tests/queries/0_stateless/01284_fuzz_bits.reference @@ -0,0 +1,5 @@ + +100 +String +FixedString(10) +1 diff --git a/tests/queries/0_stateless/01284_fuzz_bits.sql b/tests/queries/0_stateless/01284_fuzz_bits.sql new file mode 100644 index 00000000000..24da23787cb --- /dev/null +++ b/tests/queries/0_stateless/01284_fuzz_bits.sql @@ -0,0 +1,31 @@ +SELECT fuzzBits(toString('string'), 1); -- { serverError 43 } +SELECT fuzzBits('string', -1.0); -- { serverError 69 } +SELECT fuzzBits('', 0.3); +SELECT length(fuzzBits(randomString(100), 0.5)); +SELECT toTypeName(fuzzBits(randomString(100), 0.5)); +SELECT toTypeName(fuzzBits(toFixedString('abacaba', 10), 0.9)); + +SELECT + ( + 0.29 * 8 * 10000 < sum + AND sum < 0.31 * 8 * 10000 + ) AS res +FROM + ( + SELECT + arraySum( + id -> bitCount( + reinterpretAsUInt8( + substring( + fuzzBits( + arrayStringConcat(arrayMap(x -> toString('\0'), range(10000))), + 0.3 + ), + id + 1, + 1 + ) + ) + ), + range(10000) + ) as sum + ) diff --git a/tests/queries/0_stateless/01292_optimize_data_skip_idx_order_by_expr.reference b/tests/queries/0_stateless/01292_optimize_data_skip_idx_order_by_expr.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/01292_optimize_data_skip_idx_order_by_expr.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/01292_optimize_data_skip_idx_order_by_expr.sql b/tests/queries/0_stateless/01292_optimize_data_skip_idx_order_by_expr.sql new file mode 100644 index 00000000000..cf823be1f79 --- /dev/null +++ b/tests/queries/0_stateless/01292_optimize_data_skip_idx_order_by_expr.sql @@ -0,0 +1,14 @@ +drop table if exists data_01292; + +create table data_01292 ( + key Int, + index key_idx (key) type minmax granularity 1 +) Engine=MergeTree() ORDER BY (key+0); + +insert into data_01292 values (1); + +optimize table data_01292 final; + +select * from data_01292 where key > 0; + +drop table if exists data_01292; diff --git a/tests/queries/0_stateless/01292_quantile_array_bug.reference b/tests/queries/0_stateless/01292_quantile_array_bug.reference new file mode 100644 index 00000000000..36abe23e89c --- /dev/null +++ b/tests/queries/0_stateless/01292_quantile_array_bug.reference @@ -0,0 +1 @@ +[7] diff --git a/tests/queries/0_stateless/01292_quantile_array_bug.sql b/tests/queries/0_stateless/01292_quantile_array_bug.sql new file mode 100644 index 00000000000..ecb1028d569 --- /dev/null +++ b/tests/queries/0_stateless/01292_quantile_array_bug.sql @@ -0,0 +1 @@ +select quantilesExactWeightedArray(0.5)(range(10), range(10)) diff --git a/tests/queries/0_stateless/01293_client_interactive_vertical_multiline.reference b/tests/queries/0_stateless/01293_client_interactive_vertical_multiline.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01293_client_interactive_vertical_multiline.sh b/tests/queries/0_stateless/01293_client_interactive_vertical_multiline.sh new file mode 100755 index 00000000000..4d4292472da --- /dev/null +++ b/tests/queries/0_stateless/01293_client_interactive_vertical_multiline.sh @@ -0,0 +1,88 @@ +#!/usr/bin/expect -f + +log_user 0 +set timeout 60 +spawn clickhouse-client +match_max 100000 + +expect ":) " + +send -- "SELECT 1\r" +expect "│ 1 │" +expect ":) " + +send -- "SELECT 1\\G\r" +expect "Row 1:" +expect "1: 1" +expect ":) " + +send -- "SELECT 1\\\r" +expect ":-] " +send -- ", 2\r" +expect "│ 1 │ 2 │" +expect ":) " + +send -- "SELECT 1\\\r" +expect ":-] " +send -- ", 2\\G\r" +expect "Row 1:" +expect "1: 1" +expect "2: 2" +expect ":) " + +send -- "" +expect eof + +set timeout 60 +spawn clickhouse-client --multiline +match_max 100000 + +expect ":) " + +send -- "SELECT 1;\r" +expect "│ 1 │" +expect ":) " + +send -- "SELECT 1\\G\r" +expect "Row 1:" +expect "1: 1" +expect ":) " + +send -- "SELECT 1; \r" +expect "│ 1 │" +expect ":) " + +send -- "SELECT 1\\G \r" +expect "Row 1:" +expect "1: 1" +expect ":) " + +send -- "SELECT 1\r" +expect ":-] " +send -- ";\r" +expect "│ 1 │" +expect ":) " + +send -- "SELECT 1\r" +expect ":-] " +send -- "\\G\r" +expect "Row 1:" +expect "1: 1" +expect ":) " + +send -- "SELECT 1\r" +expect ":-] " +send -- ", 2;\r" +expect "│ 1 │ 2 │" +expect ":) " + +send -- "SELECT 1\r" +expect ":-] " +send -- ", 2\\G\r" +expect "Row 1:" +expect "1: 1" +expect "2: 2" +expect ":) " + +send -- "" +expect eof diff --git a/tests/queries/0_stateless/01293_client_interactive_vertical_singleline.reference b/tests/queries/0_stateless/01293_client_interactive_vertical_singleline.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01293_client_interactive_vertical_singleline.sh b/tests/queries/0_stateless/01293_client_interactive_vertical_singleline.sh new file mode 100755 index 00000000000..3b98caeff2e --- /dev/null +++ b/tests/queries/0_stateless/01293_client_interactive_vertical_singleline.sh @@ -0,0 +1,34 @@ +#!/usr/bin/expect -f + +log_user 0 +set timeout 60 +spawn clickhouse-client +match_max 100000 + +expect ":) " + +send -- "SELECT 1\r" +expect "│ 1 │" +expect ":) " + +send -- "SELECT 1\\G\r" +expect "Row 1:" +expect "1: 1" +expect ":) " + +send -- "SELECT 1\\\r" +expect ":-] " +send -- ", 2\r" +expect "│ 1 │ 2 │" +expect ":) " + +send -- "SELECT 1\\\r" +expect ":-] " +send -- ", 2\\G\r" +expect "Row 1:" +expect "1: 1" +expect "2: 2" +expect ":) " + +send -- "" +expect eof diff --git a/tests/queries/0_stateless/01293_external_sorting_limit_bug.reference b/tests/queries/0_stateless/01293_external_sorting_limit_bug.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01293_external_sorting_limit_bug.sql b/tests/queries/0_stateless/01293_external_sorting_limit_bug.sql new file mode 100644 index 00000000000..570ec4b8c3c --- /dev/null +++ b/tests/queries/0_stateless/01293_external_sorting_limit_bug.sql @@ -0,0 +1 @@ +SELECT number FROM (SELECT number FROM system.numbers LIMIT 999990) ORDER BY number ASC LIMIT 100, 65535 SETTINGS max_bytes_before_external_sort = 1000000 format Null diff --git a/tests/queries/0_stateless/01293_optimize_final_force.reference b/tests/queries/0_stateless/01293_optimize_final_force.reference new file mode 100644 index 00000000000..b0b9422adf0 --- /dev/null +++ b/tests/queries/0_stateless/01293_optimize_final_force.reference @@ -0,0 +1,100 @@ +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 +55 0 diff --git a/tests/queries/0_stateless/01293_optimize_final_force.sh b/tests/queries/0_stateless/01293_optimize_final_force.sh new file mode 100755 index 00000000000..50cba1e7534 --- /dev/null +++ b/tests/queries/0_stateless/01293_optimize_final_force.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +for i in {1..100}; do $CLICKHOUSE_CLIENT --multiquery --query " +DROP TABLE IF EXISTS mt; +CREATE TABLE mt (x UInt8, k UInt8 DEFAULT 0) ENGINE = SummingMergeTree ORDER BY k; + +INSERT INTO mt (x) VALUES (1); +INSERT INTO mt (x) VALUES (2); +INSERT INTO mt (x) VALUES (3); +INSERT INTO mt (x) VALUES (4); +INSERT INTO mt (x) VALUES (5); +INSERT INTO mt (x) VALUES (6); +INSERT INTO mt (x) VALUES (7); +INSERT INTO mt (x) VALUES (8); +INSERT INTO mt (x) VALUES (9); +INSERT INTO mt (x) VALUES (10); + +OPTIMIZE TABLE mt FINAL; +SELECT * FROM mt; + +DROP TABLE mt; +"; done diff --git a/tests/queries/0_stateless/01293_pretty_max_value_width.reference b/tests/queries/0_stateless/01293_pretty_max_value_width.reference new file mode 100644 index 00000000000..9887169f7af --- /dev/null +++ b/tests/queries/0_stateless/01293_pretty_max_value_width.reference @@ -0,0 +1,114 @@ +┏━━━━━━━━┳━━━━━┓ +┃ x  ┃ y  ┃ +┡━━━━━━━━╇━━━━━┩ +│ привет │ мир │ +└────────┴─────┘ +┏━━━━━━━┳━━━━━┓ +┃ x  ┃ y  ┃ +┡━━━━━━━╇━━━━━┩ +│ приве⋯│ мир │ +└───────┴─────┘ +┌─x─────┬─y───┐ +│ приве⋯│ мир │ +└───────┴─────┘ + x y + + приве⋯ мир +┏━━━━━━━┳━━━━━━━┓ +┃ x  ┃ y  ┃ +┡━━━━━━━╇━━━━━━━┩ +│ приве⋯│ мир │ +├───────┼───────┤ +│ мир │ приве⋯│ +└───────┴───────┘ +┌─x─────┬─y─────┐ +│ приве⋯│ мир │ +│ мир │ приве⋯│ +└───────┴───────┘ + x y + + приве⋯ мир + мир приве⋯ +┏━━━━━━━━┳━━━━━┓ +┃ x  ┃ y  ┃ +┡━━━━━━━━╇━━━━━┩ +│ привет │ мир │ +└────────┴─────┘ +┌─x──────┬─y───┐ +│ привет │ мир │ +└────────┴─────┘ + x y + + привет мир +┏━━━━━━━━┳━━━━━━━━┓ +┃ x  ┃ y  ┃ +┡━━━━━━━━╇━━━━━━━━┩ +│ привет │ мир │ +├────────┼────────┤ +│ мир │ привет │ +└────────┴────────┘ +┌─x──────┬─y──────┐ +│ привет │ мир │ +│ мир │ привет │ +└────────┴────────┘ + x y + + привет мир + мир привет +┏━━━┳━━━┓ +┃ x ┃ y ┃ +┡━━━╇━━━┩ +│ п⋯│ м⋯│ +└───┴───┘ +┌─x─┬─y─┐ +│ п⋯│ м⋯│ +└───┴───┘ + x y + + п⋯ м⋯ +┏━━━┳━━━┓ +┃ x ┃ y ┃ +┡━━━╇━━━┩ +│ п⋯│ м⋯│ +├───┼───┤ +│ м⋯│ п⋯│ +└───┴───┘ +┌─x─┬─y─┐ +│ п⋯│ м⋯│ +│ м⋯│ п⋯│ +└───┴───┘ + x y + + п⋯ м⋯ + м⋯ п⋯ +┏━━━┳━━━┓ +┃ x ┃ y ┃ +┡━━━╇━━━┩ +│ ⋯ │ ⋯ │ +└───┴───┘ +┌─x─┬─y─┐ +│ ⋯ │ ⋯ │ +└───┴───┘ + x y + + ⋯ ⋯ +┏━━━┳━━━┓ +┃ x ┃ y ┃ +┡━━━╇━━━┩ +│ ⋯ │ ⋯ │ +├───┼───┤ +│ ⋯ │ ⋯ │ +└───┴───┘ +┌─x─┬─y─┐ +│ ⋯ │ ⋯ │ +│ ⋯ │ ⋯ │ +└───┴───┘ + x y + + ⋯ ⋯ + ⋯ ⋯ +┏━━━┳━━━┓ +┃ x ┃ y ┃ +┡━━━╇━━━┩ +│ ⋯ │ ⋯ │ +└───┴───┘ diff --git a/tests/queries/0_stateless/01293_pretty_max_value_width.sql b/tests/queries/0_stateless/01293_pretty_max_value_width.sql new file mode 100644 index 00000000000..992aec06f0a --- /dev/null +++ b/tests/queries/0_stateless/01293_pretty_max_value_width.sql @@ -0,0 +1,43 @@ +SELECT 'привет' AS x, 'мир' AS y FORMAT Pretty; + +SET output_format_pretty_max_value_width = 5; +SELECT 'привет' AS x, 'мир' AS y FORMAT Pretty; +SELECT 'привет' AS x, 'мир' AS y FORMAT PrettyCompact; +SELECT 'привет' AS x, 'мир' AS y FORMAT PrettySpace; + +SELECT * FROM VALUES('x String, y String', ('привет', 'мир'), ('мир', 'привет')) FORMAT Pretty; +SELECT * FROM VALUES('x String, y String', ('привет', 'мир'), ('мир', 'привет')) FORMAT PrettyCompact; +SELECT * FROM VALUES('x String, y String', ('привет', 'мир'), ('мир', 'привет')) FORMAT PrettySpace; + +SET output_format_pretty_max_value_width = 6; + +SELECT 'привет' AS x, 'мир' AS y FORMAT Pretty; +SELECT 'привет' AS x, 'мир' AS y FORMAT PrettyCompact; +SELECT 'привет' AS x, 'мир' AS y FORMAT PrettySpace; + +SELECT * FROM VALUES('x String, y String', ('привет', 'мир'), ('мир', 'привет')) FORMAT Pretty; +SELECT * FROM VALUES('x String, y String', ('привет', 'мир'), ('мир', 'привет')) FORMAT PrettyCompact; +SELECT * FROM VALUES('x String, y String', ('привет', 'мир'), ('мир', 'привет')) FORMAT PrettySpace; + +SET output_format_pretty_max_value_width = 1; + +SELECT 'привет' AS x, 'мир' AS y FORMAT Pretty; +SELECT 'привет' AS x, 'мир' AS y FORMAT PrettyCompact; +SELECT 'привет' AS x, 'мир' AS y FORMAT PrettySpace; + +SELECT * FROM VALUES('x String, y String', ('привет', 'мир'), ('мир', 'привет')) FORMAT Pretty; +SELECT * FROM VALUES('x String, y String', ('привет', 'мир'), ('мир', 'привет')) FORMAT PrettyCompact; +SELECT * FROM VALUES('x String, y String', ('привет', 'мир'), ('мир', 'привет')) FORMAT PrettySpace; + +SET output_format_pretty_max_value_width = 0; + +SELECT 'привет' AS x, 'мир' AS y FORMAT Pretty; +SELECT 'привет' AS x, 'мир' AS y FORMAT PrettyCompact; +SELECT 'привет' AS x, 'мир' AS y FORMAT PrettySpace; + +SELECT * FROM VALUES('x String, y String', ('привет', 'мир'), ('мир', 'привет')) FORMAT Pretty; +SELECT * FROM VALUES('x String, y String', ('привет', 'мир'), ('мир', 'привет')) FORMAT PrettyCompact; +SELECT * FROM VALUES('x String, y String', ('привет', 'мир'), ('мир', 'привет')) FORMAT PrettySpace; + +SET output_format_pretty_color = 0; +SELECT 'привет' AS x, 'мир' AS y FORMAT Pretty; diff --git a/tests/queries/0_stateless/01294_lazy_database_concurrent_recreate_reattach_and_show_tables.reference b/tests/queries/0_stateless/01294_lazy_database_concurrent_recreate_reattach_and_show_tables.reference new file mode 100644 index 00000000000..678f9a34e6f --- /dev/null +++ b/tests/queries/0_stateless/01294_lazy_database_concurrent_recreate_reattach_and_show_tables.reference @@ -0,0 +1 @@ +Test OK diff --git a/tests/queries/0_stateless/01294_lazy_database_concurrent_recreate_reattach_and_show_tables.sh b/tests/queries/0_stateless/01294_lazy_database_concurrent_recreate_reattach_and_show_tables.sh new file mode 100755 index 00000000000..cc0c7a52456 --- /dev/null +++ b/tests/queries/0_stateless/01294_lazy_database_concurrent_recreate_reattach_and_show_tables.sh @@ -0,0 +1,109 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +export CURR_DATABASE="test_lazy_01294_concurrent_${CLICKHOUSE_DATABASE}" + + +function recreate_lazy_func1() +{ + $CLICKHOUSE_CLIENT -q " + CREATE TABLE $CURR_DATABASE.log (a UInt64, b UInt64) ENGINE = Log; + "; + + while true; do + $CLICKHOUSE_CLIENT -q " + DETACH TABLE $CURR_DATABASE.log; + "; + + $CLICKHOUSE_CLIENT -q " + ATTACH TABLE $CURR_DATABASE.log; + "; + done +} + +function recreate_lazy_func2() +{ + while true; do + $CLICKHOUSE_CLIENT -q " + CREATE TABLE $CURR_DATABASE.tlog (a UInt64, b UInt64) ENGINE = TinyLog; + "; + + $CLICKHOUSE_CLIENT -q " + DROP TABLE $CURR_DATABASE.tlog; + "; + done +} + +function recreate_lazy_func3() +{ + $CLICKHOUSE_CLIENT -q " + CREATE TABLE $CURR_DATABASE.slog (a UInt64, b UInt64) ENGINE = StripeLog; + "; + + while true; do + $CLICKHOUSE_CLIENT -q " + ATTACH TABLE $CURR_DATABASE.slog; + "; + + $CLICKHOUSE_CLIENT -q " + DETACH TABLE $CURR_DATABASE.slog; + "; + done +} + +function recreate_lazy_func4() +{ + while true; do + $CLICKHOUSE_CLIENT -q " + CREATE TABLE $CURR_DATABASE.tlog2 (a UInt64, b UInt64) ENGINE = TinyLog; + "; + + $CLICKHOUSE_CLIENT -q " + DROP TABLE $CURR_DATABASE.tlog2; + "; + done +} + +function test_func() +{ + while true; do + $CLICKHOUSE_CLIENT -q "SYSTEM STOP TTL MERGES"; + done +} + + +export -f recreate_lazy_func1; +export -f recreate_lazy_func2; +export -f recreate_lazy_func3; +export -f recreate_lazy_func4; +export -f test_func; + + +${CLICKHOUSE_CLIENT} -n -q " + DROP DATABASE IF EXISTS $CURR_DATABASE; + CREATE DATABASE $CURR_DATABASE ENGINE = Lazy(1); +" + + +TIMEOUT=30 + +timeout $TIMEOUT bash -c recreate_lazy_func1 2> /dev/null & +timeout $TIMEOUT bash -c recreate_lazy_func2 2> /dev/null & +timeout $TIMEOUT bash -c recreate_lazy_func3 2> /dev/null & +timeout $TIMEOUT bash -c recreate_lazy_func4 2> /dev/null & +timeout $TIMEOUT bash -c test_func 2> /dev/null & + +wait +sleep 1 + +${CLICKHOUSE_CLIENT} -n -q " + DROP TABLE IF EXISTS $CURR_DATABASE.log; + DROP TABLE IF EXISTS $CURR_DATABASE.slog; + DROP TABLE IF EXISTS $CURR_DATABASE.tlog; + DROP TABLE IF EXISTS $CURR_DATABASE.tlog2; +" + +$CLICKHOUSE_CLIENT -q "SYSTEM START TTL MERGES"; +echo "Test OK" diff --git a/utils/zookeeper-cli/zookeeper-cli.cpp b/utils/zookeeper-cli/zookeeper-cli.cpp index 6fd7b39ab68..17a8c9f0da8 100644 --- a/utils/zookeeper-cli/zookeeper-cli.cpp +++ b/utils/zookeeper-cli/zookeeper-cli.cpp @@ -70,7 +70,7 @@ int main(int argc, char ** argv) Poco::Logger::root().setLevel("trace"); zkutil::ZooKeeper zk(argv[1]); - LineReader lr({}, '\\'); + LineReader lr({}, false, {"\\"}, {}); do {