mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-25 00:52:02 +00:00
Merge branch 'master' into issue-16775
This commit is contained in:
commit
20f59ffbb9
@ -29,7 +29,6 @@ message(STATUS "LLVM C++ compiler flags: ${LLVM_CXXFLAGS}")
|
|||||||
|
|
||||||
# This list was generated by listing all LLVM libraries, compiling the binary and removing all libraries while it still compiles.
|
# This list was generated by listing all LLVM libraries, compiling the binary and removing all libraries while it still compiles.
|
||||||
set (REQUIRED_LLVM_LIBRARIES
|
set (REQUIRED_LLVM_LIBRARIES
|
||||||
LLVMOrcJIT
|
|
||||||
LLVMExecutionEngine
|
LLVMExecutionEngine
|
||||||
LLVMRuntimeDyld
|
LLVMRuntimeDyld
|
||||||
LLVMX86CodeGen
|
LLVMX86CodeGen
|
||||||
|
2
contrib/libunwind
vendored
2
contrib/libunwind
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 8fe25d7dc70f2a4ea38c3e5a33fa9d4199b67a5a
|
Subproject commit a491c27b33109a842d577c0f7ac5f5f218859181
|
2
contrib/llvm
vendored
2
contrib/llvm
vendored
@ -1 +1 @@
|
|||||||
Subproject commit a7198805de67374eb3fb4c6b89797fa2d1cd7e50
|
Subproject commit e5751459412bce1391fb7a2e9bbc01e131bf72f1
|
@ -374,6 +374,7 @@ function run_tests
|
|||||||
01801_s3_cluster
|
01801_s3_cluster
|
||||||
|
|
||||||
# Depends on LLVM JIT
|
# Depends on LLVM JIT
|
||||||
|
01072_nullable_jit
|
||||||
01852_jit_if
|
01852_jit_if
|
||||||
01865_jit_comparison_constant_result
|
01865_jit_comparison_constant_result
|
||||||
01871_merge_tree_compile_expressions
|
01871_merge_tree_compile_expressions
|
||||||
|
@ -44,7 +44,7 @@ parser.add_argument('--port', nargs='*', default=[9000], help="Space-separated l
|
|||||||
parser.add_argument('--runs', type=int, default=1, help='Number of query runs per server.')
|
parser.add_argument('--runs', type=int, default=1, help='Number of query runs per server.')
|
||||||
parser.add_argument('--max-queries', type=int, default=None, help='Test no more than this number of queries, chosen at random.')
|
parser.add_argument('--max-queries', type=int, default=None, help='Test no more than this number of queries, chosen at random.')
|
||||||
parser.add_argument('--queries-to-run', nargs='*', type=int, default=None, help='Space-separated list of indexes of queries to test.')
|
parser.add_argument('--queries-to-run', nargs='*', type=int, default=None, help='Space-separated list of indexes of queries to test.')
|
||||||
parser.add_argument('--max-query-seconds', type=int, default=10, help='For how many seconds at most a query is allowed to run. The script finishes with error if this time is exceeded.')
|
parser.add_argument('--max-query-seconds', type=int, default=15, help='For how many seconds at most a query is allowed to run. The script finishes with error if this time is exceeded.')
|
||||||
parser.add_argument('--profile-seconds', type=int, default=0, help='For how many seconds to profile a query for which the performance has changed.')
|
parser.add_argument('--profile-seconds', type=int, default=0, help='For how many seconds to profile a query for which the performance has changed.')
|
||||||
parser.add_argument('--long', action='store_true', help='Do not skip the tests tagged as long.')
|
parser.add_argument('--long', action='store_true', help='Do not skip the tests tagged as long.')
|
||||||
parser.add_argument('--print-queries', action='store_true', help='Print test queries and exit.')
|
parser.add_argument('--print-queries', action='store_true', help='Print test queries and exit.')
|
||||||
@ -273,8 +273,14 @@ for query_index in queries_to_run:
|
|||||||
prewarm_id = f'{query_prefix}.prewarm0'
|
prewarm_id = f'{query_prefix}.prewarm0'
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Will also detect too long queries during warmup stage
|
# During the warmup runs, we will also:
|
||||||
res = c.execute(q, query_id = prewarm_id, settings = {'max_execution_time': args.max_query_seconds})
|
# * detect queries that are exceedingly long, to fail fast,
|
||||||
|
# * collect profiler traces, which might be helpful for analyzing
|
||||||
|
# test coverage. We disable profiler for normal runs because
|
||||||
|
# it makes the results unstable.
|
||||||
|
res = c.execute(q, query_id = prewarm_id,
|
||||||
|
settings = {'max_execution_time': args.max_query_seconds,
|
||||||
|
'query_profiler_real_time_period_ns': 10000000})
|
||||||
except clickhouse_driver.errors.Error as e:
|
except clickhouse_driver.errors.Error as e:
|
||||||
# Add query id to the exception to make debugging easier.
|
# Add query id to the exception to make debugging easier.
|
||||||
e.args = (prewarm_id, *e.args)
|
e.args = (prewarm_id, *e.args)
|
||||||
@ -359,10 +365,11 @@ for query_index in queries_to_run:
|
|||||||
# For very short queries we have a special mode where we run them for at
|
# For very short queries we have a special mode where we run them for at
|
||||||
# least some time. The recommended lower bound of run time for "normal"
|
# least some time. The recommended lower bound of run time for "normal"
|
||||||
# queries is about 0.1 s, and we run them about 10 times, giving the
|
# queries is about 0.1 s, and we run them about 10 times, giving the
|
||||||
# time per query per server of about one second. Use this value as a
|
# time per query per server of about one second. Run "short" queries
|
||||||
# reference for "short" queries.
|
# for longer time, because they have a high percentage of overhead and
|
||||||
|
# might give less stable results.
|
||||||
if is_short[query_index]:
|
if is_short[query_index]:
|
||||||
if server_seconds >= 2 * len(this_query_connections):
|
if server_seconds >= 8 * len(this_query_connections):
|
||||||
break
|
break
|
||||||
# Also limit the number of runs, so that we don't go crazy processing
|
# Also limit the number of runs, so that we don't go crazy processing
|
||||||
# the results -- 'eqmed.sql' is really suboptimal.
|
# the results -- 'eqmed.sql' is really suboptimal.
|
||||||
|
@ -446,6 +446,9 @@ if args.report == 'main':
|
|||||||
attrs[3] = f'style="background: {color_bad}"'
|
attrs[3] = f'style="background: {color_bad}"'
|
||||||
else:
|
else:
|
||||||
attrs[3] = ''
|
attrs[3] = ''
|
||||||
|
# Just don't add the slightly unstable queries we don't consider
|
||||||
|
# errors. It's not clear what the user should do with them.
|
||||||
|
continue
|
||||||
|
|
||||||
text += tableRow(r, attrs, anchor)
|
text += tableRow(r, attrs, anchor)
|
||||||
|
|
||||||
@ -553,12 +556,11 @@ if args.report == 'main':
|
|||||||
error_tests += unstable_partial_queries
|
error_tests += unstable_partial_queries
|
||||||
status = 'failure'
|
status = 'failure'
|
||||||
|
|
||||||
if unstable_queries:
|
# Don't show mildly unstable queries, only the very unstable ones we
|
||||||
message_array.append(str(unstable_queries) + ' unstable')
|
# treat as errors.
|
||||||
|
if very_unstable_queries:
|
||||||
# Disabled before fix.
|
status = 'failure'
|
||||||
# if very_unstable_queries:
|
message_array.append(str(very_unstable_queries) + ' unstable')
|
||||||
# status = 'failure'
|
|
||||||
|
|
||||||
error_tests += slow_average_tests
|
error_tests += slow_average_tests
|
||||||
if error_tests:
|
if error_tests:
|
||||||
|
@ -101,6 +101,8 @@ For very large clusters, you can use different ZooKeeper clusters for different
|
|||||||
|
|
||||||
Replication is asynchronous and multi-master. `INSERT` queries (as well as `ALTER`) can be sent to any available server. Data is inserted on the server where the query is run, and then it is copied to the other servers. Because it is asynchronous, recently inserted data appears on the other replicas with some latency. If part of the replicas are not available, the data is written when they become available. If a replica is available, the latency is the amount of time it takes to transfer the block of compressed data over the network. The number of threads performing background tasks for replicated tables can be set by [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size) setting.
|
Replication is asynchronous and multi-master. `INSERT` queries (as well as `ALTER`) can be sent to any available server. Data is inserted on the server where the query is run, and then it is copied to the other servers. Because it is asynchronous, recently inserted data appears on the other replicas with some latency. If part of the replicas are not available, the data is written when they become available. If a replica is available, the latency is the amount of time it takes to transfer the block of compressed data over the network. The number of threads performing background tasks for replicated tables can be set by [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size) setting.
|
||||||
|
|
||||||
|
`ReplicatedMergeTree` engine uses a separate thread pool for replicated fetches. Size of the pool is limited by the [background_fetches_pool_size](../../../operations/settings/settings.md#background_fetches_pool_size) setting which can be tuned with a server restart.
|
||||||
|
|
||||||
By default, an INSERT query waits for confirmation of writing the data from only one replica. If the data was successfully written to only one replica and the server with this replica ceases to exist, the stored data will be lost. To enable getting confirmation of data writes from multiple replicas, use the `insert_quorum` option.
|
By default, an INSERT query waits for confirmation of writing the data from only one replica. If the data was successfully written to only one replica and the server with this replica ceases to exist, the stored data will be lost. To enable getting confirmation of data writes from multiple replicas, use the `insert_quorum` option.
|
||||||
|
|
||||||
Each block of data is written atomically. The INSERT query is divided into blocks up to `max_insert_block_size = 1048576` rows. In other words, if the `INSERT` query has less than 1048576 rows, it is made atomically.
|
Each block of data is written atomically. The INSERT query is divided into blocks up to `max_insert_block_size = 1048576` rows. In other words, if the `INSERT` query has less than 1048576 rows, it is made atomically.
|
||||||
@ -284,6 +286,7 @@ If the data in ZooKeeper was lost or damaged, you can save data by moving it to
|
|||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size)
|
- [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size)
|
||||||
|
- [background_fetches_pool_size](../../../operations/settings/settings.md#background_fetches_pool_size)
|
||||||
- [execute_merges_on_single_replica_time_threshold](../../../operations/settings/settings.md#execute-merges-on-single-replica-time-threshold)
|
- [execute_merges_on_single_replica_time_threshold](../../../operations/settings/settings.md#execute-merges-on-single-replica-time-threshold)
|
||||||
|
|
||||||
[Original article](https://clickhouse.tech/docs/en/operations/table_engines/replication/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/operations/table_engines/replication/) <!--hide-->
|
||||||
|
@ -57,7 +57,7 @@ $ cat /etc/clickhouse-server/users.d/alice.xml
|
|||||||
|
|
||||||
## YAML examples {#example}
|
## YAML examples {#example}
|
||||||
|
|
||||||
Here you can see default config written in YAML: [config-example.yaml](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/config-example.yaml).
|
Here you can see default config written in YAML: [config.yaml.example](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/config.yaml.example).
|
||||||
|
|
||||||
There are some differences between YAML and XML formats in terms of ClickHouse configurations. Here are some tips for writing a configuration in YAML format.
|
There are some differences between YAML and XML formats in terms of ClickHouse configurations. Here are some tips for writing a configuration in YAML format.
|
||||||
|
|
||||||
|
@ -2034,6 +2034,16 @@ Possible values:
|
|||||||
|
|
||||||
Default value: 16.
|
Default value: 16.
|
||||||
|
|
||||||
|
## background_fetches_pool_size {#background_fetches_pool_size}
|
||||||
|
|
||||||
|
Sets the number of threads performing background fetches for [replicated](../../engines/table-engines/mergetree-family/replication.md) tables. This setting is applied at the ClickHouse server start and can’t be changed in a user session. For production usage with frequent small insertions or slow ZooKeeper cluster is recomended to use default value.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- Any positive integer.
|
||||||
|
|
||||||
|
Default value: 8.
|
||||||
|
|
||||||
## always_fetch_merged_part {#always_fetch_merged_part}
|
## always_fetch_merged_part {#always_fetch_merged_part}
|
||||||
|
|
||||||
Prohibits data parts merging in [Replicated\*MergeTree](../../engines/table-engines/mergetree-family/replication.md)-engine tables.
|
Prohibits data parts merging in [Replicated\*MergeTree](../../engines/table-engines/mergetree-family/replication.md)-engine tables.
|
||||||
|
@ -13,7 +13,7 @@ Returns an array of selected substrings. Empty substrings may be selected if the
|
|||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
splitByChar(<separator>, <s>)
|
splitByChar(separator, s)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Arguments**
|
**Arguments**
|
||||||
@ -29,12 +29,12 @@ Returns an array of selected substrings. Empty substrings may be selected when:
|
|||||||
- There are multiple consecutive separators;
|
- There are multiple consecutive separators;
|
||||||
- The original string `s` is empty.
|
- The original string `s` is empty.
|
||||||
|
|
||||||
Type: [Array](../../sql-reference/data-types/array.md) of [String](../../sql-reference/data-types/string.md).
|
Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT splitByChar(',', '1,2,3,abcde')
|
SELECT splitByChar(',', '1,2,3,abcde');
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
@ -50,7 +50,7 @@ Splits a string into substrings separated by a string. It uses a constant string
|
|||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
splitByString(<separator>, <s>)
|
splitByString(separator, s)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Arguments**
|
**Arguments**
|
||||||
@ -62,7 +62,7 @@ splitByString(<separator>, <s>)
|
|||||||
|
|
||||||
Returns an array of selected substrings. Empty substrings may be selected when:
|
Returns an array of selected substrings. Empty substrings may be selected when:
|
||||||
|
|
||||||
Type: [Array](../../sql-reference/data-types/array.md) of [String](../../sql-reference/data-types/string.md).
|
Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)).
|
||||||
|
|
||||||
- A non-empty separator occurs at the beginning or end of the string;
|
- A non-empty separator occurs at the beginning or end of the string;
|
||||||
- There are multiple consecutive non-empty separators;
|
- There are multiple consecutive non-empty separators;
|
||||||
@ -71,7 +71,7 @@ Type: [Array](../../sql-reference/data-types/array.md) of [String](../../sql-ref
|
|||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT splitByString(', ', '1, 2 3, 4,5, abcde')
|
SELECT splitByString(', ', '1, 2 3, 4,5, abcde');
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
@ -81,7 +81,7 @@ SELECT splitByString(', ', '1, 2 3, 4,5, abcde')
|
|||||||
```
|
```
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT splitByString('', 'abcde')
|
SELECT splitByString('', 'abcde');
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
@ -92,12 +92,12 @@ SELECT splitByString('', 'abcde')
|
|||||||
|
|
||||||
## splitByRegexp(regexp, s) {#splitbyregexpseparator-s}
|
## splitByRegexp(regexp, s) {#splitbyregexpseparator-s}
|
||||||
|
|
||||||
Splits a string into substrings separated by a regular expression. It uses a regular expression string `regexp` as the separator. If the `regexp` is empty, it will split the string s into an array of single characters. If no match is found for this regex expression, the string `s` won't be split.
|
Splits a string into substrings separated by a regular expression. It uses a regular expression string `regexp` as the separator. If the `regexp` is empty, it will split the string `s` into an array of single characters. If no match is found for this regular expression, the string `s` won't be split.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
splitByRegexp(<regexp>, <s>)
|
splitByRegexp(regexp, s)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Arguments**
|
**Arguments**
|
||||||
@ -109,28 +109,36 @@ splitByRegexp(<regexp>, <s>)
|
|||||||
|
|
||||||
Returns an array of selected substrings. Empty substrings may be selected when:
|
Returns an array of selected substrings. Empty substrings may be selected when:
|
||||||
|
|
||||||
|
|
||||||
- A non-empty regular expression match occurs at the beginning or end of the string;
|
- A non-empty regular expression match occurs at the beginning or end of the string;
|
||||||
- There are multiple consecutive non-empty regular expression matches;
|
- There are multiple consecutive non-empty regular expression matches;
|
||||||
- The original string `s` is empty while the regular expression is not empty.
|
- The original string `s` is empty while the regular expression is not empty.
|
||||||
|
|
||||||
Type: [Array](../../sql-reference/data-types/array.md) of [String](../../sql-reference/data-types/string.md).
|
Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT splitByRegexp('\\d+', 'a12bc23de345f')
|
SELECT splitByRegexp('\\d+', 'a12bc23de345f');
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
┌─splitByRegexp('\\d+', 'a12bc23de345f')─┐
|
┌─splitByRegexp('\\d+', 'a12bc23de345f')─┐
|
||||||
│ ['a','bc','de','f'] │
|
│ ['a','bc','de','f'] │
|
||||||
└────────────────────────────────────────┘
|
└────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT splitByRegexp('', 'abcde')
|
SELECT splitByRegexp('', 'abcde');
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
┌─splitByRegexp('', 'abcde')─┐
|
┌─splitByRegexp('', 'abcde')─┐
|
||||||
│ ['a','b','c','d','e'] │
|
│ ['a','b','c','d','e'] │
|
||||||
@ -149,7 +157,7 @@ Selects substrings of consecutive bytes from the ranges a-z and A-Z.Returns an a
|
|||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT alphaTokens('abca1abc')
|
SELECT alphaTokens('abca1abc');
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
|
@ -65,6 +65,8 @@ ClickHouse хранит метаинформацию о репликах в [Apa
|
|||||||
|
|
||||||
Репликация асинхронная, мульти-мастер. Запросы `INSERT` и `ALTER` можно направлять на любой доступный сервер. Данные вставятся на сервер, где выполнен запрос, а затем скопируются на остальные серверы. В связи с асинхронностью, только что вставленные данные появляются на остальных репликах с небольшой задержкой. Если часть реплик недоступна, данные на них запишутся тогда, когда они станут доступны. Если реплика доступна, то задержка составляет столько времени, сколько требуется для передачи блока сжатых данных по сети. Количество потоков для выполнения фоновых задач можно задать с помощью настройки [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size).
|
Репликация асинхронная, мульти-мастер. Запросы `INSERT` и `ALTER` можно направлять на любой доступный сервер. Данные вставятся на сервер, где выполнен запрос, а затем скопируются на остальные серверы. В связи с асинхронностью, только что вставленные данные появляются на остальных репликах с небольшой задержкой. Если часть реплик недоступна, данные на них запишутся тогда, когда они станут доступны. Если реплика доступна, то задержка составляет столько времени, сколько требуется для передачи блока сжатых данных по сети. Количество потоков для выполнения фоновых задач можно задать с помощью настройки [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size).
|
||||||
|
|
||||||
|
Движок `ReplicatedMergeTree` использует отдельный пул потоков для скачивания кусков данных. Размер пула ограничен настройкой [background_fetches_pool_size](../../../operations/settings/settings.md#background_fetches_pool_size), которую можно указать при перезапуске сервера.
|
||||||
|
|
||||||
По умолчанию, запрос INSERT ждёт подтверждения записи только от одной реплики. Если данные были успешно записаны только на одну реплику, и сервер с этой репликой перестал существовать, то записанные данные будут потеряны. Вы можете включить подтверждение записи от нескольких реплик, используя настройку `insert_quorum`.
|
По умолчанию, запрос INSERT ждёт подтверждения записи только от одной реплики. Если данные были успешно записаны только на одну реплику, и сервер с этой репликой перестал существовать, то записанные данные будут потеряны. Вы можете включить подтверждение записи от нескольких реплик, используя настройку `insert_quorum`.
|
||||||
|
|
||||||
Каждый блок данных записывается атомарно. Запрос INSERT разбивается на блоки данных размером до `max_insert_block_size = 1048576` строк. То есть, если в запросе `INSERT` менее 1048576 строк, то он делается атомарно.
|
Каждый блок данных записывается атомарно. Запрос INSERT разбивается на блоки данных размером до `max_insert_block_size = 1048576` строк. То есть, если в запросе `INSERT` менее 1048576 строк, то он делается атомарно.
|
||||||
@ -249,5 +251,6 @@ $ sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data
|
|||||||
**Смотрите также**
|
**Смотрите также**
|
||||||
|
|
||||||
- [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size)
|
- [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size)
|
||||||
|
- [background_fetches_pool_size](../../../operations/settings/settings.md#background_fetches_pool_size)
|
||||||
- [execute_merges_on_single_replica_time_threshold](../../../operations/settings/settings.md#execute-merges-on-single-replica-time-threshold)
|
- [execute_merges_on_single_replica_time_threshold](../../../operations/settings/settings.md#execute-merges-on-single-replica-time-threshold)
|
||||||
|
|
||||||
|
@ -6,9 +6,9 @@ toc_title: "Конфигурационные файлы"
|
|||||||
|
|
||||||
# Конфигурационные файлы {#configuration_files}
|
# Конфигурационные файлы {#configuration_files}
|
||||||
|
|
||||||
Основной конфигурационный файл сервера - `config.xml`. Он расположен в директории `/etc/clickhouse-server/`.
|
Основной конфигурационный файл сервера - `config.xml` или `config.yaml`. Он расположен в директории `/etc/clickhouse-server/`.
|
||||||
|
|
||||||
Отдельные настройки могут быть переопределены в файлах `*.xml` и `*.conf` из директории `config.d` рядом с конфигом.
|
Отдельные настройки могут быть переопределены в файлах `*.xml` и `*.conf`, а также `.yaml` (для файлов в формате YAML) из директории `config.d` рядом с конфигом.
|
||||||
|
|
||||||
У элементов этих конфигурационных файлов могут быть указаны атрибуты `replace` или `remove`.
|
У элементов этих конфигурационных файлов могут быть указаны атрибуты `replace` или `remove`.
|
||||||
|
|
||||||
@ -25,7 +25,7 @@ toc_title: "Конфигурационные файлы"
|
|||||||
В элементе `users_config` файла `config.xml` можно указать относительный путь к конфигурационному файлу с настройками пользователей, профилей и квот. Значение `users_config` по умолчанию — `users.xml`. Если `users_config` не указан, то настройки пользователей, профилей и квот можно задать непосредственно в `config.xml`.
|
В элементе `users_config` файла `config.xml` можно указать относительный путь к конфигурационному файлу с настройками пользователей, профилей и квот. Значение `users_config` по умолчанию — `users.xml`. Если `users_config` не указан, то настройки пользователей, профилей и квот можно задать непосредственно в `config.xml`.
|
||||||
|
|
||||||
Настройки пользователя могут быть разделены в несколько отдельных файлов аналогичных `config.xml` и `config.d\`. Имя директории задаётся также как `users_config`.
|
Настройки пользователя могут быть разделены в несколько отдельных файлов аналогичных `config.xml` и `config.d\`. Имя директории задаётся также как `users_config`.
|
||||||
Имя директории задаётся так же, как имя файла в `users_config`, с подстановкой `.d` вместо `.xml`.
|
Имя директории задаётся так же, как имя файла в `users_config`, с подстановкой `.d` вместо `.xml`/`.yaml`.
|
||||||
Директория `users.d` используется по умолчанию, также как `users.xml` используется для `users_config`.
|
Директория `users.d` используется по умолчанию, также как `users.xml` используется для `users_config`.
|
||||||
Например, можно иметь по отдельному конфигурационному файлу для каждого пользователя:
|
Например, можно иметь по отдельному конфигурационному файлу для каждого пользователя:
|
||||||
|
|
||||||
@ -52,3 +52,66 @@ $ cat /etc/clickhouse-server/users.d/alice.xml
|
|||||||
|
|
||||||
Сервер следит за изменениями конфигурационных файлов, а также файлов и ZooKeeper-узлов, которые были использованы при выполнении подстановок и переопределений, и перезагружает настройки пользователей и кластеров на лету. То есть, можно изменять кластера, пользователей и их настройки без перезапуска сервера.
|
Сервер следит за изменениями конфигурационных файлов, а также файлов и ZooKeeper-узлов, которые были использованы при выполнении подстановок и переопределений, и перезагружает настройки пользователей и кластеров на лету. То есть, можно изменять кластера, пользователей и их настройки без перезапуска сервера.
|
||||||
|
|
||||||
|
## Примеры записи конфигурации на YAML {#example}
|
||||||
|
|
||||||
|
Здесь можно рассмотреть пример реальной конфигурации записанной на YAML: [config.yaml.example](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/config.yaml.example).
|
||||||
|
|
||||||
|
Между стандартами XML и YAML имеются различия, поэтому в этом разделе будут перечислены некоторые подсказки для написания конфигурации на YMAL.
|
||||||
|
|
||||||
|
Для записи обычной пары ключ-значение следует использовать Scalar:
|
||||||
|
``` yaml
|
||||||
|
key: value
|
||||||
|
```
|
||||||
|
|
||||||
|
Для создания тега, содержащего подтеги следует использовать Map:
|
||||||
|
``` yaml
|
||||||
|
map_key:
|
||||||
|
key1: val1
|
||||||
|
key2: val2
|
||||||
|
key3: val3
|
||||||
|
```
|
||||||
|
|
||||||
|
Для создания списка значений или подтегов, расположенных по определенному ключу, следует использовать Sequence:
|
||||||
|
``` yaml
|
||||||
|
seq_key:
|
||||||
|
- val1
|
||||||
|
- val2
|
||||||
|
- key1: val3
|
||||||
|
- map:
|
||||||
|
key2: val4
|
||||||
|
key3: val5
|
||||||
|
```
|
||||||
|
|
||||||
|
В случае, усли необходимо объявить тег, аналогичный XML-атрибуту, необходимо задать скаляр, имеющий ключ с префиксом @ и заключенный в кавычки:
|
||||||
|
|
||||||
|
``` yaml
|
||||||
|
map:
|
||||||
|
"@attr1": value1
|
||||||
|
"@attr2": value2
|
||||||
|
key: 123
|
||||||
|
```
|
||||||
|
|
||||||
|
Из такой Map мы получим после конвертации:
|
||||||
|
|
||||||
|
``` xml
|
||||||
|
<map attr1="value1" attr2="value2">
|
||||||
|
<key>123</key>
|
||||||
|
</map>
|
||||||
|
```
|
||||||
|
|
||||||
|
Помимо Map, можно задавать атрибуты для Sequence:
|
||||||
|
|
||||||
|
``` yaml
|
||||||
|
seq:
|
||||||
|
- "@attr1": value1
|
||||||
|
- "@attr2": value2
|
||||||
|
- 123
|
||||||
|
- abc
|
||||||
|
```
|
||||||
|
|
||||||
|
Таким образом получая аналог следующей записи на XML:
|
||||||
|
|
||||||
|
``` xml
|
||||||
|
<seq attr1="value1" attr2="value2">123</seq>
|
||||||
|
<seq attr1="value1" attr2="value2">abc</seq>
|
||||||
|
```
|
||||||
|
@ -2043,6 +2043,16 @@ SELECT idx, i FROM null_in WHERE i IN (1, NULL) SETTINGS transform_null_in = 1;
|
|||||||
|
|
||||||
Значение по умолчанию: 16.
|
Значение по умолчанию: 16.
|
||||||
|
|
||||||
|
## background_fetches_pool_size {#background_fetches_pool_size}
|
||||||
|
|
||||||
|
Задает количество потоков для скачивания кусков данных для [реплицируемых](../../engines/table-engines/mergetree-family/replication.md) таблиц. Настройка применяется при запуске сервера ClickHouse и не может быть изменена в пользовательском сеансе. Для использования в продакшене с частыми небольшими вставками или медленным кластером ZooKeeper рекомендуется использовать значение по умолчанию.
|
||||||
|
|
||||||
|
Допустимые значения:
|
||||||
|
|
||||||
|
- Положительное целое число.
|
||||||
|
|
||||||
|
Значение по умолчанию: 8.
|
||||||
|
|
||||||
## background_distributed_schedule_pool_size {#background_distributed_schedule_pool_size}
|
## background_distributed_schedule_pool_size {#background_distributed_schedule_pool_size}
|
||||||
|
|
||||||
Задает количество потоков для выполнения фоновых задач. Работает для таблиц с движком [Distributed](../../engines/table-engines/special/distributed.md). Настройка применяется при запуске сервера ClickHouse и не может быть изменена в пользовательском сеансе.
|
Задает количество потоков для выполнения фоновых задач. Работает для таблиц с движком [Distributed](../../engines/table-engines/special/distributed.md). Настройка применяется при запуске сервера ClickHouse и не может быть изменена в пользовательском сеансе.
|
||||||
|
@ -18,37 +18,37 @@ toc_title: JSON
|
|||||||
|
|
||||||
Проверяет наличие поля с именем `name`.
|
Проверяет наличие поля с именем `name`.
|
||||||
|
|
||||||
Алиас: `simpleJSONHas`.
|
Синоним: `simpleJSONHas`.
|
||||||
|
|
||||||
## visitParamExtractUInt(params, name) {#visitparamextractuintparams-name}
|
## visitParamExtractUInt(params, name) {#visitparamextractuintparams-name}
|
||||||
|
|
||||||
Пытается выделить число типа UInt64 из значения поля с именем `name`. Если поле строковое, пытается выделить число из начала строки. Если такого поля нет, или если оно есть, но содержит не число, то возвращает 0.
|
Пытается выделить число типа UInt64 из значения поля с именем `name`. Если поле строковое, пытается выделить число из начала строки. Если такого поля нет, или если оно есть, но содержит не число, то возвращает 0.
|
||||||
|
|
||||||
Алиас: `simpleJSONExtractUInt`.
|
Синоним: `simpleJSONExtractUInt`.
|
||||||
|
|
||||||
## visitParamExtractInt(params, name) {#visitparamextractintparams-name}
|
## visitParamExtractInt(params, name) {#visitparamextractintparams-name}
|
||||||
|
|
||||||
Аналогично для Int64.
|
Аналогично для Int64.
|
||||||
|
|
||||||
Алиас: `simpleJSONExtractInt`.
|
Синоним: `simpleJSONExtractInt`.
|
||||||
|
|
||||||
## visitParamExtractFloat(params, name) {#visitparamextractfloatparams-name}
|
## visitParamExtractFloat(params, name) {#visitparamextractfloatparams-name}
|
||||||
|
|
||||||
Аналогично для Float64.
|
Аналогично для Float64.
|
||||||
|
|
||||||
Алиас: `simpleJSONExtractFloat`.
|
Синоним: `simpleJSONExtractFloat`.
|
||||||
|
|
||||||
## visitParamExtractBool(params, name) {#visitparamextractboolparams-name}
|
## visitParamExtractBool(params, name) {#visitparamextractboolparams-name}
|
||||||
|
|
||||||
Пытается выделить значение true/false. Результат — UInt8.
|
Пытается выделить значение true/false. Результат — UInt8.
|
||||||
|
|
||||||
Алиас: `simpleJSONExtractBool`.
|
Синоним: `simpleJSONExtractBool`.
|
||||||
|
|
||||||
## visitParamExtractRaw(params, name) {#visitparamextractrawparams-name}
|
## visitParamExtractRaw(params, name) {#visitparamextractrawparams-name}
|
||||||
|
|
||||||
Возвращает значение поля, включая разделители.
|
Возвращает значение поля, включая разделители.
|
||||||
|
|
||||||
Алиас: `simpleJSONExtractRaw`.
|
Синоним: `simpleJSONExtractRaw`.
|
||||||
|
|
||||||
Примеры:
|
Примеры:
|
||||||
|
|
||||||
@ -61,7 +61,7 @@ visitParamExtractRaw('{"abc":{"def":[1,2,3]}}', 'abc') = '{"def":[1,2,3]}';
|
|||||||
|
|
||||||
Разбирает строку в двойных кавычках. У значения убирается экранирование. Если убрать экранированные символы не удалось, то возвращается пустая строка.
|
Разбирает строку в двойных кавычках. У значения убирается экранирование. Если убрать экранированные символы не удалось, то возвращается пустая строка.
|
||||||
|
|
||||||
Алиас: `simpleJSONExtractString`.
|
Синоним: `simpleJSONExtractString`.
|
||||||
|
|
||||||
Примеры:
|
Примеры:
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ separator должен быть константной строкой из ро
|
|||||||
**Синтаксис**
|
**Синтаксис**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
splitByChar(<separator>, <s>)
|
splitByChar(separator, s)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Аргументы**
|
**Аргументы**
|
||||||
@ -30,12 +30,12 @@ splitByChar(<separator>, <s>)
|
|||||||
- Задано несколько последовательных разделителей;
|
- Задано несколько последовательных разделителей;
|
||||||
- Исходная строка `s` пуста.
|
- Исходная строка `s` пуста.
|
||||||
|
|
||||||
Type: [Array](../../sql-reference/data-types/array.md) of [String](../../sql-reference/data-types/string.md).
|
Тип: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)).
|
||||||
|
|
||||||
**Пример**
|
**Пример**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT splitByChar(',', '1,2,3,abcde')
|
SELECT splitByChar(',', '1,2,3,abcde');
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
@ -67,12 +67,12 @@ splitByString(separator, s)
|
|||||||
- Задано несколько последовательных разделителей;
|
- Задано несколько последовательных разделителей;
|
||||||
- Исходная строка `s` пуста.
|
- Исходная строка `s` пуста.
|
||||||
|
|
||||||
Тип: [Array](../../sql-reference/data-types/array.md) of [String](../../sql-reference/data-types/string.md).
|
Тип: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)).
|
||||||
|
|
||||||
**Примеры**
|
**Примеры**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT splitByString(', ', '1, 2 3, 4,5, abcde')
|
SELECT splitByString(', ', '1, 2 3, 4,5, abcde');
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
@ -82,7 +82,7 @@ SELECT splitByString(', ', '1, 2 3, 4,5, abcde')
|
|||||||
```
|
```
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT splitByString('', 'abcde')
|
SELECT splitByString('', 'abcde');
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
@ -91,6 +91,60 @@ SELECT splitByString('', 'abcde')
|
|||||||
└────────────────────────────┘
|
└────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## splitByRegexp(regexp, s) {#splitbyregexpseparator-s}
|
||||||
|
|
||||||
|
Разбивает строку на подстроки, разделенные регулярным выражением. В качестве разделителя используется строка регулярного выражения `regexp`. Если `regexp` пустая, функция разделит строку `s` на массив одиночных символов. Если для регулярного выражения совпадения не найдено, строка `s` не будет разбита.
|
||||||
|
|
||||||
|
**Синтаксис**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
splitByRegexp(regexp, s)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Аргументы**
|
||||||
|
|
||||||
|
- `regexp` — регулярное выражение. Константа. [String](../data-types/string.md) или [FixedString](../data-types/fixedstring.md).
|
||||||
|
- `s` — разбиваемая строка. [String](../../sql-reference/data-types/string.md).
|
||||||
|
|
||||||
|
**Возвращаемые значения**
|
||||||
|
|
||||||
|
Возвращает массив выбранных подстрок. Пустая подстрока может быть возвращена, если:
|
||||||
|
|
||||||
|
- Непустое совпадение с регулярным выражением происходит в начале или конце строки;
|
||||||
|
- Имеется несколько последовательных совпадений c непустым регулярным выражением;
|
||||||
|
- Исходная строка `s` пуста, а регулярное выражение не пустое.
|
||||||
|
|
||||||
|
Тип: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)).
|
||||||
|
|
||||||
|
**Примеры**
|
||||||
|
|
||||||
|
Запрос:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT splitByRegexp('\\d+', 'a12bc23de345f');
|
||||||
|
```
|
||||||
|
|
||||||
|
Результат:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─splitByRegexp('\\d+', 'a12bc23de345f')─┐
|
||||||
|
│ ['a','bc','de','f'] │
|
||||||
|
└────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Запрос:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT splitByRegexp('', 'abcde');
|
||||||
|
```
|
||||||
|
|
||||||
|
Результат:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─splitByRegexp('', 'abcde')─┐
|
||||||
|
│ ['a','b','c','d','e'] │
|
||||||
|
└────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## arrayStringConcat(arr\[, separator\]) {#arraystringconcatarr-separator}
|
## arrayStringConcat(arr\[, separator\]) {#arraystringconcatarr-separator}
|
||||||
|
|
||||||
@ -106,7 +160,7 @@ separator - необязательный параметр, константна
|
|||||||
**Пример:**
|
**Пример:**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT alphaTokens('abca1abc')
|
SELECT alphaTokens('abca1abc');
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
@ -114,4 +168,3 @@ SELECT alphaTokens('abca1abc')
|
|||||||
│ ['abca','abc'] │
|
│ ['abca','abc'] │
|
||||||
└─────────────────────────┘
|
└─────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -983,7 +983,7 @@ int mainEntryClickHouseStop(int argc, char ** argv)
|
|||||||
desc.add_options()
|
desc.add_options()
|
||||||
("help,h", "produce help message")
|
("help,h", "produce help message")
|
||||||
("pid-path", po::value<std::string>()->default_value("/var/run/clickhouse-server"), "directory for pid file")
|
("pid-path", po::value<std::string>()->default_value("/var/run/clickhouse-server"), "directory for pid file")
|
||||||
("force", po::value<bool>()->default_value(false), "Stop with KILL signal instead of TERM")
|
("force", po::bool_switch(), "Stop with KILL signal instead of TERM")
|
||||||
;
|
;
|
||||||
|
|
||||||
po::variables_map options;
|
po::variables_map options;
|
||||||
|
@ -1,4 +1,8 @@
|
|||||||
# NOTE: User and query level settings are set up in "users.xml" file.
|
# This is an example of a configuration file "config.xml" rewritten in YAML
|
||||||
|
# You can read this documentation for detailed information about YAML configuration:
|
||||||
|
# https://clickhouse.tech/docs/en/operations/configuration-files/
|
||||||
|
|
||||||
|
# NOTE: User and query level settings are set up in "users.yaml" file.
|
||||||
# If you have accidentally specified user-level settings here, server won't start.
|
# If you have accidentally specified user-level settings here, server won't start.
|
||||||
# You can either move the settings to the right place inside "users.xml" file
|
# You can either move the settings to the right place inside "users.xml" file
|
||||||
# or add skip_check_for_incorrect_settings: 1 here.
|
# or add skip_check_for_incorrect_settings: 1 here.
|
107
programs/server/users.yaml.example
Normal file
107
programs/server/users.yaml.example
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
# Profiles of settings.
|
||||||
|
profiles:
|
||||||
|
# Default settings.
|
||||||
|
default:
|
||||||
|
# Maximum memory usage for processing single query, in bytes.
|
||||||
|
max_memory_usage: 10000000000
|
||||||
|
|
||||||
|
# How to choose between replicas during distributed query processing.
|
||||||
|
# random - choose random replica from set of replicas with minimum number of errors
|
||||||
|
# nearest_hostname - from set of replicas with minimum number of errors, choose replica
|
||||||
|
# with minimum number of different symbols between replica's hostname and local hostname (Hamming distance).
|
||||||
|
# in_order - first live replica is chosen in specified order.
|
||||||
|
# first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
|
||||||
|
load_balancing: random
|
||||||
|
|
||||||
|
# Profile that allows only read queries.
|
||||||
|
readonly:
|
||||||
|
readonly: 1
|
||||||
|
|
||||||
|
# Users and ACL.
|
||||||
|
users:
|
||||||
|
# If user name was not specified, 'default' user is used.
|
||||||
|
default:
|
||||||
|
# Password could be specified in plaintext or in SHA256 (in hex format).
|
||||||
|
#
|
||||||
|
# If you want to specify password in plaintext (not recommended), place it in 'password' element.
|
||||||
|
# Example: password: qwerty
|
||||||
|
# Password could be empty.
|
||||||
|
#
|
||||||
|
# If you want to specify SHA256, place it in 'password_sha256_hex' element.
|
||||||
|
# Example: password_sha256_hex: 65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5
|
||||||
|
# Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
|
||||||
|
#
|
||||||
|
# If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
||||||
|
# Example: password_double_sha1_hex: e395796d6546b1b65db9d665cd43f0e858dd4303
|
||||||
|
#
|
||||||
|
# If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
|
||||||
|
# place its name in 'server' element inside 'ldap' element.
|
||||||
|
# Example: ldap:
|
||||||
|
# server: my_ldap_server
|
||||||
|
#
|
||||||
|
# If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
|
||||||
|
# place 'kerberos' element instead of 'password' (and similar) elements.
|
||||||
|
# The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
|
||||||
|
# You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
|
||||||
|
# whose initiator's realm matches it.
|
||||||
|
# Example: kerberos: ''
|
||||||
|
# Example: kerberos:
|
||||||
|
# realm: EXAMPLE.COM
|
||||||
|
#
|
||||||
|
# How to generate decent password:
|
||||||
|
# Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||||
|
# In first line will be password and in second - corresponding SHA256.
|
||||||
|
#
|
||||||
|
# How to generate double SHA1:
|
||||||
|
# Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
|
||||||
|
# In first line will be password and in second - corresponding double SHA1.
|
||||||
|
|
||||||
|
password: ''
|
||||||
|
|
||||||
|
# List of networks with open access.
|
||||||
|
#
|
||||||
|
# To open access from everywhere, specify:
|
||||||
|
# - ip: '::/0'
|
||||||
|
#
|
||||||
|
# To open access only from localhost, specify:
|
||||||
|
# - ip: '::1'
|
||||||
|
# - ip: 127.0.0.1
|
||||||
|
#
|
||||||
|
# Each element of list has one of the following forms:
|
||||||
|
# ip: IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||||
|
# 2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||||
|
# host: Hostname. Example: server01.yandex.ru.
|
||||||
|
# To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||||
|
# host_regexp: Regular expression for host names. Example, ^server\d\d-\d\d-\d\.yandex\.ru$
|
||||||
|
# To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||||
|
# Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||||
|
# Strongly recommended that regexp is ends with $ and take all expression in ''
|
||||||
|
# All results of DNS requests are cached till server restart.
|
||||||
|
|
||||||
|
networks:
|
||||||
|
ip: '::/0'
|
||||||
|
|
||||||
|
# Settings profile for user.
|
||||||
|
profile: default
|
||||||
|
|
||||||
|
# Quota for user.
|
||||||
|
quota: default
|
||||||
|
|
||||||
|
# User can create other users and grant rights to them.
|
||||||
|
# access_management: 1
|
||||||
|
|
||||||
|
# Quotas.
|
||||||
|
quotas:
|
||||||
|
# Name of quota.
|
||||||
|
default:
|
||||||
|
# Limits for time interval. You could specify many intervals with different limits.
|
||||||
|
interval:
|
||||||
|
# Length of interval.
|
||||||
|
duration: 3600
|
||||||
|
|
||||||
|
# No limits. Just calculate resource usage for time interval.
|
||||||
|
queries: 0
|
||||||
|
errors: 0
|
||||||
|
result_rows: 0
|
||||||
|
read_rows: 0
|
||||||
|
execution_time: 0
|
@ -33,6 +33,7 @@ SRCS(
|
|||||||
Config/AbstractConfigurationComparison.cpp
|
Config/AbstractConfigurationComparison.cpp
|
||||||
Config/ConfigProcessor.cpp
|
Config/ConfigProcessor.cpp
|
||||||
Config/ConfigReloader.cpp
|
Config/ConfigReloader.cpp
|
||||||
|
Config/YAMLParser.cpp
|
||||||
Config/configReadClient.cpp
|
Config/configReadClient.cpp
|
||||||
CurrentMemoryTracker.cpp
|
CurrentMemoryTracker.cpp
|
||||||
CurrentMetrics.cpp
|
CurrentMetrics.cpp
|
||||||
|
@ -54,6 +54,7 @@ class DiskS3::AwsS3KeyKeeper : public std::list<Aws::Vector<Aws::S3::Model::Obje
|
|||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
void addKey(const String & key);
|
void addKey(const String & key);
|
||||||
|
static String getChunkKeys(const Aws::Vector<Aws::S3::Model::ObjectIdentifier> & chunk);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// limit for one DeleteObject request
|
/// limit for one DeleteObject request
|
||||||
@ -74,6 +75,19 @@ void DiskS3::AwsS3KeyKeeper::addKey(const String & key)
|
|||||||
back().push_back(obj);
|
back().push_back(obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
String DiskS3::AwsS3KeyKeeper::getChunkKeys(const Aws::Vector<Aws::S3::Model::ObjectIdentifier> & chunk)
|
||||||
|
{
|
||||||
|
String res;
|
||||||
|
for (const auto & obj : chunk)
|
||||||
|
{
|
||||||
|
const auto & key = obj.GetKey();
|
||||||
|
if (!res.empty())
|
||||||
|
res.append(", ");
|
||||||
|
res.append(key.c_str(), key.size());
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
String getRandomName()
|
String getRandomName()
|
||||||
{
|
{
|
||||||
std::uniform_int_distribution<int> distribution('a', 'z');
|
std::uniform_int_distribution<int> distribution('a', 'z');
|
||||||
@ -794,6 +808,8 @@ void DiskS3::removeAws(const AwsS3KeyKeeper & keys)
|
|||||||
|
|
||||||
for (const auto & chunk : keys)
|
for (const auto & chunk : keys)
|
||||||
{
|
{
|
||||||
|
LOG_DEBUG(log, "Remove AWS keys {}", AwsS3KeyKeeper::getChunkKeys(chunk));
|
||||||
|
|
||||||
Aws::S3::Model::Delete delkeys;
|
Aws::S3::Model::Delete delkeys;
|
||||||
delkeys.SetObjects(chunk);
|
delkeys.SetObjects(chunk);
|
||||||
|
|
||||||
|
@ -521,6 +521,70 @@ ColumnPtr FunctionAnyArityLogical<Impl, Name>::executeImpl(
|
|||||||
return basicExecuteImpl<Impl>(std::move(args_in), input_rows_count);
|
return basicExecuteImpl<Impl>(std::move(args_in), input_rows_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename Impl, typename Name>
|
||||||
|
ColumnPtr FunctionAnyArityLogical<Impl, Name>::getConstantResultForNonConstArguments(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type) const
|
||||||
|
{
|
||||||
|
/** Try to perform optimization for saturable functions (AndFunction, OrFunction) in case some arguments are
|
||||||
|
* constants.
|
||||||
|
* If function is not saturable (XorFunction) we cannot perform such optimization.
|
||||||
|
* If function is AndFunction and in arguments there is constant false, result is false.
|
||||||
|
* If function is OrFunction and in arguments there is constant true, result is true.
|
||||||
|
*/
|
||||||
|
if constexpr (!Impl::isSaturable())
|
||||||
|
return nullptr;
|
||||||
|
|
||||||
|
bool has_true_constant = false;
|
||||||
|
bool has_false_constant = false;
|
||||||
|
|
||||||
|
for (const auto & argument : arguments)
|
||||||
|
{
|
||||||
|
ColumnPtr column = argument.column;
|
||||||
|
|
||||||
|
if (!column || !isColumnConst(*column))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
DataTypePtr non_nullable_type = removeNullable(argument.type);
|
||||||
|
TypeIndex data_type_index = non_nullable_type->getTypeId();
|
||||||
|
|
||||||
|
if (!isNativeNumber(data_type_index))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
const ColumnConst * const_column = static_cast<const ColumnConst *>(column.get());
|
||||||
|
|
||||||
|
Field constant_field_value = const_column->getField();
|
||||||
|
if (constant_field_value.isNull())
|
||||||
|
continue;
|
||||||
|
|
||||||
|
auto field_type = constant_field_value.getType();
|
||||||
|
|
||||||
|
bool constant_value_bool = false;
|
||||||
|
|
||||||
|
if (field_type == Field::Types::Float64)
|
||||||
|
constant_value_bool = static_cast<bool>(constant_field_value.get<Float64>());
|
||||||
|
else if (field_type == Field::Types::Int64)
|
||||||
|
constant_value_bool = static_cast<bool>(constant_field_value.get<Int64>());
|
||||||
|
else if (field_type == Field::Types::UInt64)
|
||||||
|
constant_value_bool = static_cast<bool>(constant_field_value.get<UInt64>());
|
||||||
|
|
||||||
|
has_true_constant = has_true_constant || constant_value_bool;
|
||||||
|
has_false_constant = has_false_constant || !constant_value_bool;
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnPtr result_column;
|
||||||
|
|
||||||
|
if constexpr (std::is_same_v<Impl, AndImpl>)
|
||||||
|
{
|
||||||
|
if (has_false_constant)
|
||||||
|
result_type->createColumnConst(0, static_cast<UInt8>(false));
|
||||||
|
}
|
||||||
|
else if constexpr (std::is_same_v<Impl, OrImpl>)
|
||||||
|
{
|
||||||
|
if (has_true_constant)
|
||||||
|
result_type->createColumnConst(0, static_cast<UInt8>(true));
|
||||||
|
}
|
||||||
|
|
||||||
|
return result_column;
|
||||||
|
}
|
||||||
|
|
||||||
template <typename A, typename Op>
|
template <typename A, typename Op>
|
||||||
struct UnaryOperationImpl
|
struct UnaryOperationImpl
|
||||||
|
@ -155,7 +155,9 @@ public:
|
|||||||
/// Get result types by argument types. If the function does not apply to these arguments, throw an exception.
|
/// Get result types by argument types. If the function does not apply to these arguments, throw an exception.
|
||||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override;
|
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override;
|
||||||
|
|
||||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override;
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override;
|
||||||
|
|
||||||
|
ColumnPtr getConstantResultForNonConstArguments(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type) const override;
|
||||||
|
|
||||||
#if USE_EMBEDDED_COMPILER
|
#if USE_EMBEDDED_COMPILER
|
||||||
bool isCompilableImpl(const DataTypes &) const override { return useDefaultImplementationForNulls(); }
|
bool isCompilableImpl(const DataTypes &) const override { return useDefaultImplementationForNulls(); }
|
||||||
|
@ -155,12 +155,13 @@ public:
|
|||||||
*/
|
*/
|
||||||
virtual bool isSuitableForConstantFolding() const { return true; }
|
virtual bool isSuitableForConstantFolding() const { return true; }
|
||||||
|
|
||||||
/** Some functions like ignore(...) or toTypeName(...) always return constant result which doesn't depend on arguments.
|
/** If function isSuitableForConstantFolding then, this method will be called during query analyzis
|
||||||
* In this case we can calculate result and assume that it's constant in stream header.
|
* if some arguments are constants. For example logical functions (AndFunction, OrFunction) can
|
||||||
* There is no need to implement function if it has zero arguments.
|
* return they result based on some constant arguments.
|
||||||
* Must return ColumnConst with single row or nullptr.
|
* Arguments are passed without modifications, useDefaultImplementationForNulls, useDefaultImplementationForConstants,
|
||||||
|
* useDefaultImplementationForLowCardinality are not applied.
|
||||||
*/
|
*/
|
||||||
virtual ColumnPtr getResultIfAlwaysReturnsConstantAndHasArguments(const ColumnsWithTypeAndName & /*columns*/) const { return nullptr; }
|
virtual ColumnPtr getConstantResultForNonConstArguments(const ColumnsWithTypeAndName & /* arguments */, const DataTypePtr & /* result_type */) const { return nullptr; }
|
||||||
|
|
||||||
/** Function is called "injective" if it returns different result for different values of arguments.
|
/** Function is called "injective" if it returns different result for different values of arguments.
|
||||||
* Example: hex, negate, tuple...
|
* Example: hex, negate, tuple...
|
||||||
@ -300,7 +301,7 @@ protected:
|
|||||||
return getReturnTypeImpl(data_types);
|
return getReturnTypeImpl(data_types);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** If useDefaultImplementationForNulls() is true, than change arguments for getReturnType() and build():
|
/** If useDefaultImplementationForNulls() is true, then change arguments for getReturnType() and build():
|
||||||
* if some of arguments are Nullable(Nothing) then don't call getReturnType(), call build() with return_type = Nullable(Nothing),
|
* if some of arguments are Nullable(Nothing) then don't call getReturnType(), call build() with return_type = Nullable(Nothing),
|
||||||
* if some of arguments are Nullable, then:
|
* if some of arguments are Nullable, then:
|
||||||
* - Nullable types are substituted with nested types for getReturnType() function
|
* - Nullable types are substituted with nested types for getReturnType() function
|
||||||
@ -310,7 +311,7 @@ protected:
|
|||||||
*/
|
*/
|
||||||
virtual bool useDefaultImplementationForNulls() const { return true; }
|
virtual bool useDefaultImplementationForNulls() const { return true; }
|
||||||
|
|
||||||
/** If useDefaultImplementationForNulls() is true, than change arguments for getReturnType() and build().
|
/** If useDefaultImplementationForNulls() is true, then change arguments for getReturnType() and build().
|
||||||
* If function arguments has low cardinality types, convert them to ordinary types.
|
* If function arguments has low cardinality types, convert them to ordinary types.
|
||||||
* getReturnType returns ColumnLowCardinality if at least one argument type is ColumnLowCardinality.
|
* getReturnType returns ColumnLowCardinality if at least one argument type is ColumnLowCardinality.
|
||||||
*/
|
*/
|
||||||
@ -377,7 +378,7 @@ public:
|
|||||||
|
|
||||||
/// Properties from IFunctionBase (see IFunction.h)
|
/// Properties from IFunctionBase (see IFunction.h)
|
||||||
virtual bool isSuitableForConstantFolding() const { return true; }
|
virtual bool isSuitableForConstantFolding() const { return true; }
|
||||||
virtual ColumnPtr getResultIfAlwaysReturnsConstantAndHasArguments(const ColumnsWithTypeAndName & /*arguments*/) const { return nullptr; }
|
virtual ColumnPtr getConstantResultForNonConstArguments(const ColumnsWithTypeAndName & /*arguments*/, const DataTypePtr & /*result_type*/) const { return nullptr; }
|
||||||
virtual bool isInjective(const ColumnsWithTypeAndName & /*sample_columns*/) const { return false; }
|
virtual bool isInjective(const ColumnsWithTypeAndName & /*sample_columns*/) const { return false; }
|
||||||
virtual bool isDeterministic() const { return true; }
|
virtual bool isDeterministic() const { return true; }
|
||||||
virtual bool isDeterministicInScopeOfQuery() const { return true; }
|
virtual bool isDeterministicInScopeOfQuery() const { return true; }
|
||||||
|
@ -66,9 +66,10 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool isSuitableForConstantFolding() const override { return function->isSuitableForConstantFolding(); }
|
bool isSuitableForConstantFolding() const override { return function->isSuitableForConstantFolding(); }
|
||||||
ColumnPtr getResultIfAlwaysReturnsConstantAndHasArguments(const ColumnsWithTypeAndName & arguments_) const override
|
|
||||||
|
ColumnPtr getConstantResultForNonConstArguments(const ColumnsWithTypeAndName & arguments_, const DataTypePtr & result_type_) const override
|
||||||
{
|
{
|
||||||
return function->getResultIfAlwaysReturnsConstantAndHasArguments(arguments_);
|
return function->getConstantResultForNonConstArguments(arguments_, result_type_);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isStateful() const override { return function->isStateful(); }
|
bool isStateful() const override { return function->isStateful(); }
|
||||||
|
@ -42,7 +42,7 @@ public:
|
|||||||
return type.createColumnConst(input_rows_count, type.getDefault());
|
return type.createColumnConst(input_rows_count, type.getDefault());
|
||||||
}
|
}
|
||||||
|
|
||||||
ColumnPtr getResultIfAlwaysReturnsConstantAndHasArguments(const ColumnsWithTypeAndName & arguments) const override
|
ColumnPtr getConstantResultForNonConstArguments(const ColumnsWithTypeAndName & arguments, const DataTypePtr &) const override
|
||||||
{
|
{
|
||||||
const IDataType & type = *arguments[0].type;
|
const IDataType & type = *arguments[0].type;
|
||||||
return type.createColumnConst(1, type.getDefault());
|
return type.createColumnConst(1, type.getDefault());
|
||||||
|
@ -51,17 +51,24 @@ public:
|
|||||||
|
|
||||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||||
{
|
{
|
||||||
return getResultIfAlwaysReturnsConstantAndHasArguments(arguments)->cloneResized(input_rows_count);
|
return getSizeOfEnumType(arguments[0].type, input_rows_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
ColumnPtr getResultIfAlwaysReturnsConstantAndHasArguments(const ColumnsWithTypeAndName & arguments) const override
|
ColumnPtr getConstantResultForNonConstArguments(const ColumnsWithTypeAndName & arguments, const DataTypePtr &) const override
|
||||||
{
|
{
|
||||||
if (const auto * type8 = checkAndGetDataType<DataTypeEnum8>(arguments[0].type.get()))
|
return getSizeOfEnumType(arguments[0].type, 1);
|
||||||
return DataTypeUInt8().createColumnConst(1, type8->getValues().size());
|
}
|
||||||
else if (const auto * type16 = checkAndGetDataType<DataTypeEnum16>(arguments[0].type.get()))
|
|
||||||
return DataTypeUInt16().createColumnConst(1, type16->getValues().size());
|
private:
|
||||||
|
|
||||||
|
ColumnPtr getSizeOfEnumType(const DataTypePtr & data_type, size_t input_rows_count) const
|
||||||
|
{
|
||||||
|
if (const auto * type8 = checkAndGetDataType<DataTypeEnum8>(data_type.get()))
|
||||||
|
return DataTypeUInt8().createColumnConst(input_rows_count, type8->getValues().size());
|
||||||
|
else if (const auto * type16 = checkAndGetDataType<DataTypeEnum16>(data_type.get()))
|
||||||
|
return DataTypeUInt16().createColumnConst(input_rows_count, type16->getValues().size());
|
||||||
else
|
else
|
||||||
throw Exception("The argument for function " + getName() + " must be Enum", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "The argument for function {} must be Enum", getName());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -49,11 +49,6 @@ public:
|
|||||||
{
|
{
|
||||||
return DataTypeUInt8().createColumnConst(input_rows_count, 0u);
|
return DataTypeUInt8().createColumnConst(input_rows_count, 0u);
|
||||||
}
|
}
|
||||||
|
|
||||||
ColumnPtr getResultIfAlwaysReturnsConstantAndHasArguments(const ColumnsWithTypeAndName &) const override
|
|
||||||
{
|
|
||||||
return DataTypeUInt8().createColumnConst(1, 0u);
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -55,10 +55,6 @@ public:
|
|||||||
return DataTypeUInt8().createColumnConst(input_rows_count, 1u);
|
return DataTypeUInt8().createColumnConst(input_rows_count, 1u);
|
||||||
}
|
}
|
||||||
|
|
||||||
ColumnPtr getResultIfAlwaysReturnsConstantAndHasArguments(const ColumnsWithTypeAndName &) const override
|
|
||||||
{
|
|
||||||
return DataTypeUInt8().createColumnConst(1, 1u);
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -23,66 +23,12 @@ namespace
|
|||||||
/** timezoneOf(x) - get the name of the timezone of DateTime data type.
|
/** timezoneOf(x) - get the name of the timezone of DateTime data type.
|
||||||
* Example: Europe/Moscow.
|
* Example: Europe/Moscow.
|
||||||
*/
|
*/
|
||||||
class ExecutableFunctionTimezoneOf : public IExecutableFunction
|
class FunctionTimezoneOf : public IFunction
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
static constexpr auto name = "timezoneOf";
|
static constexpr auto name = "timezoneOf";
|
||||||
String getName() const override { return name; }
|
String getName() const override { return name; }
|
||||||
|
static FunctionPtr create(ContextPtr) { return std::make_unique<FunctionTimezoneOf>(); }
|
||||||
bool useDefaultImplementationForNulls() const override { return false; }
|
|
||||||
bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
|
|
||||||
|
|
||||||
/// Execute the function on the columns.
|
|
||||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
|
||||||
{
|
|
||||||
DataTypePtr type_no_nullable = removeNullable(arguments[0].type);
|
|
||||||
|
|
||||||
return DataTypeString().createColumnConst(input_rows_count,
|
|
||||||
dynamic_cast<const TimezoneMixin &>(*type_no_nullable).getTimeZone().getTimeZone());
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
class BaseFunctionTimezoneOf : public IFunctionBase
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
BaseFunctionTimezoneOf(DataTypes argument_types_, DataTypePtr return_type_)
|
|
||||||
: argument_types(std::move(argument_types_)), return_type(std::move(return_type_)) {}
|
|
||||||
|
|
||||||
static constexpr auto name = "timezoneOf";
|
|
||||||
String getName() const override { return name; }
|
|
||||||
|
|
||||||
bool isDeterministic() const override { return true; }
|
|
||||||
bool isDeterministicInScopeOfQuery() const override { return true; }
|
|
||||||
|
|
||||||
const DataTypes & getArgumentTypes() const override { return argument_types; }
|
|
||||||
const DataTypePtr & getResultType() const override { return return_type; }
|
|
||||||
|
|
||||||
ExecutableFunctionPtr prepare(const ColumnsWithTypeAndName &) const override
|
|
||||||
{
|
|
||||||
return std::make_unique<ExecutableFunctionTimezoneOf>();
|
|
||||||
}
|
|
||||||
|
|
||||||
ColumnPtr getResultIfAlwaysReturnsConstantAndHasArguments(const ColumnsWithTypeAndName & arguments) const override
|
|
||||||
{
|
|
||||||
DataTypePtr type_no_nullable = removeNullable(arguments[0].type);
|
|
||||||
|
|
||||||
return DataTypeString().createColumnConst(1,
|
|
||||||
dynamic_cast<const TimezoneMixin &>(*type_no_nullable).getTimeZone().getTimeZone());
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
DataTypes argument_types;
|
|
||||||
DataTypePtr return_type;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
class FunctionTimezoneOfBuilder : public IFunctionOverloadResolver
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
static constexpr auto name = "timezoneOf";
|
|
||||||
String getName() const override { return name; }
|
|
||||||
static FunctionOverloadResolverPtr create(ContextPtr) { return std::make_unique<FunctionTimezoneOfBuilder>(); }
|
|
||||||
|
|
||||||
size_t getNumberOfArguments() const override { return 1; }
|
size_t getNumberOfArguments() const override { return 1; }
|
||||||
|
|
||||||
@ -96,21 +42,32 @@ public:
|
|||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad argument for function {}, should be DateTime or DateTime64", name);
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad argument for function {}, should be DateTime or DateTime64", name);
|
||||||
}
|
}
|
||||||
|
|
||||||
FunctionBasePtr buildImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & return_type) const override
|
|
||||||
{
|
|
||||||
return std::make_unique<BaseFunctionTimezoneOf>(DataTypes{arguments[0].type}, return_type);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool useDefaultImplementationForNulls() const override { return false; }
|
bool useDefaultImplementationForNulls() const override { return false; }
|
||||||
bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
|
bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
|
||||||
ColumnNumbers getArgumentsThatDontImplyNullableReturnType(size_t /*number_of_arguments*/) const override { return {0}; }
|
ColumnNumbers getArgumentsThatDontImplyNullableReturnType(size_t /*number_of_arguments*/) const override { return {0}; }
|
||||||
|
|
||||||
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||||
|
{
|
||||||
|
DataTypePtr type_no_nullable = removeNullable(arguments[0].type);
|
||||||
|
|
||||||
|
return DataTypeString().createColumnConst(input_rows_count,
|
||||||
|
dynamic_cast<const TimezoneMixin &>(*type_no_nullable).getTimeZone().getTimeZone());
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnPtr getConstantResultForNonConstArguments(const ColumnsWithTypeAndName & arguments, const DataTypePtr &) const override
|
||||||
|
{
|
||||||
|
DataTypePtr type_no_nullable = removeNullable(arguments[0].type);
|
||||||
|
|
||||||
|
return DataTypeString().createColumnConst(1,
|
||||||
|
dynamic_cast<const TimezoneMixin &>(*type_no_nullable).getTimeZone().getTimeZone());
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void registerFunctionTimezoneOf(FunctionFactory & factory)
|
void registerFunctionTimezoneOf(FunctionFactory & factory)
|
||||||
{
|
{
|
||||||
factory.registerFunction<FunctionTimezoneOfBuilder>();
|
factory.registerFunction<FunctionTimezoneOf>();
|
||||||
factory.registerAlias("timeZoneOf", "timezoneOf");
|
factory.registerAlias("timeZoneOf", "timezoneOf");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ public:
|
|||||||
return DataTypeString().createColumnConst(input_rows_count, arguments[0].column->getName());
|
return DataTypeString().createColumnConst(input_rows_count, arguments[0].column->getName());
|
||||||
}
|
}
|
||||||
|
|
||||||
ColumnPtr getResultIfAlwaysReturnsConstantAndHasArguments(const ColumnsWithTypeAndName & arguments) const override
|
ColumnPtr getConstantResultForNonConstArguments(const ColumnsWithTypeAndName & arguments, const DataTypePtr &) const override
|
||||||
{
|
{
|
||||||
return DataTypeString().createColumnConst(1, arguments[0].type->createColumn()->getName());
|
return DataTypeString().createColumnConst(1, arguments[0].type->createColumn()->getName());
|
||||||
}
|
}
|
||||||
|
@ -12,85 +12,55 @@ namespace
|
|||||||
/** toTypeName(x) - get the type name
|
/** toTypeName(x) - get the type name
|
||||||
* Returns name of IDataType instance (name of data type).
|
* Returns name of IDataType instance (name of data type).
|
||||||
*/
|
*/
|
||||||
class ExecutableFunctionToTypeName : public IExecutableFunction
|
class FunctionToTypeName : public IFunction
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|
||||||
static constexpr auto name = "toTypeName";
|
static constexpr auto name = "toTypeName";
|
||||||
String getName() const override { return name; }
|
|
||||||
|
static FunctionPtr create(ContextPtr)
|
||||||
|
{
|
||||||
|
return std::make_shared<FunctionToTypeName>();
|
||||||
|
}
|
||||||
|
|
||||||
|
String getName() const override
|
||||||
|
{
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
bool useDefaultImplementationForNulls() const override { return false; }
|
bool useDefaultImplementationForNulls() const override { return false; }
|
||||||
|
|
||||||
bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
|
bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
|
||||||
|
|
||||||
/// Execute the function on the columns.
|
size_t getNumberOfArguments() const override
|
||||||
|
{
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
DataTypePtr getReturnTypeImpl(const DataTypes & /*arguments*/) const override
|
||||||
|
{
|
||||||
|
return std::make_shared<DataTypeString>();
|
||||||
|
}
|
||||||
|
|
||||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||||
{
|
{
|
||||||
return DataTypeString().createColumnConst(input_rows_count, arguments[0].type->getName());
|
return DataTypeString().createColumnConst(input_rows_count, arguments[0].type->getName());
|
||||||
}
|
}
|
||||||
};
|
|
||||||
|
|
||||||
|
ColumnPtr getConstantResultForNonConstArguments(const ColumnsWithTypeAndName & arguments, const DataTypePtr &) const override
|
||||||
class BaseFunctionToTypeName : public IFunctionBase
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
BaseFunctionToTypeName(DataTypes argument_types_, DataTypePtr return_type_)
|
|
||||||
: argument_types(std::move(argument_types_)), return_type(std::move(return_type_)) {}
|
|
||||||
|
|
||||||
static constexpr auto name = "toTypeName";
|
|
||||||
String getName() const override { return name; }
|
|
||||||
|
|
||||||
bool isDeterministic() const override { return true; }
|
|
||||||
bool isDeterministicInScopeOfQuery() const override { return true; }
|
|
||||||
|
|
||||||
const DataTypes & getArgumentTypes() const override { return argument_types; }
|
|
||||||
const DataTypePtr & getResultType() const override { return return_type; }
|
|
||||||
|
|
||||||
ExecutableFunctionPtr prepare(const ColumnsWithTypeAndName &) const override
|
|
||||||
{
|
{
|
||||||
return std::make_unique<ExecutableFunctionToTypeName>();
|
return DataTypeString().createColumnConst(1, arguments[0].type->getName());
|
||||||
}
|
}
|
||||||
|
|
||||||
ColumnPtr getResultIfAlwaysReturnsConstantAndHasArguments(const ColumnsWithTypeAndName &) const override
|
|
||||||
{
|
|
||||||
return DataTypeString().createColumnConst(1, argument_types.at(0)->getName());
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
DataTypes argument_types;
|
|
||||||
DataTypePtr return_type;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
class FunctionToTypeNameBuilder : public IFunctionOverloadResolver
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
static constexpr auto name = "toTypeName";
|
|
||||||
String getName() const override { return name; }
|
|
||||||
static FunctionOverloadResolverPtr create(ContextPtr) { return std::make_unique<FunctionToTypeNameBuilder>(); }
|
|
||||||
|
|
||||||
size_t getNumberOfArguments() const override { return 1; }
|
|
||||||
|
|
||||||
DataTypePtr getReturnTypeImpl(const DataTypes &) const override { return std::make_shared<DataTypeString>(); }
|
|
||||||
|
|
||||||
FunctionBasePtr buildImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & return_type) const override
|
|
||||||
{
|
|
||||||
DataTypes types;
|
|
||||||
types.reserve(arguments.size());
|
|
||||||
for (const auto & elem : arguments)
|
|
||||||
types.emplace_back(elem.type);
|
|
||||||
|
|
||||||
return std::make_unique<BaseFunctionToTypeName>(types, return_type);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool useDefaultImplementationForNulls() const override { return false; }
|
|
||||||
bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
|
|
||||||
ColumnNumbers getArgumentsThatDontImplyNullableReturnType(size_t /*number_of_arguments*/) const override { return {0}; }
|
ColumnNumbers getArgumentsThatDontImplyNullableReturnType(size_t /*number_of_arguments*/) const override { return {0}; }
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void registerFunctionToTypeName(FunctionFactory & factory)
|
void registerFunctionToTypeName(FunctionFactory & factory)
|
||||||
{
|
{
|
||||||
factory.registerFunction<FunctionToTypeNameBuilder>();
|
factory.registerFunction<FunctionToTypeName>();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -205,34 +205,31 @@ const ActionsDAG::Node & ActionsDAG::addFunction(
|
|||||||
node.function = node.function_base->prepare(arguments);
|
node.function = node.function_base->prepare(arguments);
|
||||||
|
|
||||||
/// If all arguments are constants, and function is suitable to be executed in 'prepare' stage - execute function.
|
/// If all arguments are constants, and function is suitable to be executed in 'prepare' stage - execute function.
|
||||||
if (all_const && node.function_base->isSuitableForConstantFolding())
|
if (node.function_base->isSuitableForConstantFolding())
|
||||||
{
|
{
|
||||||
size_t num_rows = arguments.empty() ? 0 : arguments.front().column->size();
|
ColumnPtr column;
|
||||||
auto col = node.function->execute(arguments, node.result_type, num_rows, true);
|
|
||||||
|
if (all_const)
|
||||||
|
{
|
||||||
|
size_t num_rows = arguments.empty() ? 0 : arguments.front().column->size();
|
||||||
|
column = node.function->execute(arguments, node.result_type, num_rows, true);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
column = node.function_base->getConstantResultForNonConstArguments(arguments, node.result_type);
|
||||||
|
}
|
||||||
|
|
||||||
/// If the result is not a constant, just in case, we will consider the result as unknown.
|
/// If the result is not a constant, just in case, we will consider the result as unknown.
|
||||||
if (isColumnConst(*col))
|
if (column && isColumnConst(*column))
|
||||||
{
|
{
|
||||||
/// All constant (literal) columns in block are added with size 1.
|
/// All constant (literal) columns in block are added with size 1.
|
||||||
/// But if there was no columns in block before executing a function, the result has size 0.
|
/// But if there was no columns in block before executing a function, the result has size 0.
|
||||||
/// Change the size to 1.
|
/// Change the size to 1.
|
||||||
|
|
||||||
if (col->empty())
|
if (column->empty())
|
||||||
col = col->cloneResized(1);
|
column = column->cloneResized(1);
|
||||||
|
|
||||||
node.column = std::move(col);
|
node.column = std::move(column);
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Some functions like ignore(), indexHint() or getTypeName() always return constant result even if arguments are not constant.
|
|
||||||
/// We can't do constant folding, but can specify in sample block that function result is constant to avoid
|
|
||||||
/// unnecessary materialization.
|
|
||||||
if (!node.column && node.function_base->isSuitableForConstantFolding())
|
|
||||||
{
|
|
||||||
if (auto col = node.function_base->getResultIfAlwaysReturnsConstantAndHasArguments(arguments))
|
|
||||||
{
|
|
||||||
node.column = std::move(col);
|
|
||||||
node.allow_constant_folding = false;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -408,13 +405,10 @@ void ActionsDAG::removeUnusedActions(bool allow_remove_inputs)
|
|||||||
|
|
||||||
for (auto & node : nodes)
|
for (auto & node : nodes)
|
||||||
{
|
{
|
||||||
/// We cannot remove function with side effects even if it returns constant (e.g. ignore(...)).
|
|
||||||
bool prevent_constant_folding = node.column && isColumnConst(*node.column) && !node.allow_constant_folding;
|
|
||||||
/// We cannot remove arrayJoin because it changes the number of rows.
|
/// We cannot remove arrayJoin because it changes the number of rows.
|
||||||
bool is_array_join = node.type == ActionType::ARRAY_JOIN;
|
bool is_array_join = node.type == ActionType::ARRAY_JOIN;
|
||||||
|
|
||||||
bool must_keep_node = is_array_join || prevent_constant_folding;
|
if (is_array_join && visited_nodes.count(&node) == 0)
|
||||||
if (must_keep_node && visited_nodes.count(&node) == 0)
|
|
||||||
{
|
{
|
||||||
visited_nodes.insert(&node);
|
visited_nodes.insert(&node);
|
||||||
stack.push(&node);
|
stack.push(&node);
|
||||||
@ -429,7 +423,7 @@ void ActionsDAG::removeUnusedActions(bool allow_remove_inputs)
|
|||||||
auto * node = stack.top();
|
auto * node = stack.top();
|
||||||
stack.pop();
|
stack.pop();
|
||||||
|
|
||||||
if (!node->children.empty() && node->column && isColumnConst(*node->column) && node->allow_constant_folding)
|
if (!node->children.empty() && node->column && isColumnConst(*node->column))
|
||||||
{
|
{
|
||||||
/// Constant folding.
|
/// Constant folding.
|
||||||
node->type = ActionsDAG::ActionType::COLUMN;
|
node->type = ActionsDAG::ActionType::COLUMN;
|
||||||
@ -1520,7 +1514,7 @@ ActionsDAG::SplitResult ActionsDAG::splitActionsBeforeArrayJoin(const NameSet &
|
|||||||
|
|
||||||
auto res = split(split_nodes);
|
auto res = split(split_nodes);
|
||||||
/// Do not remove array joined columns if they are not used.
|
/// Do not remove array joined columns if they are not used.
|
||||||
res.first->project_input = false;
|
/// res.first->project_input = false;
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1533,7 +1527,9 @@ ActionsDAG::SplitResult ActionsDAG::splitActionsForFilter(const std::string & co
|
|||||||
column_name, dumpDAG());
|
column_name, dumpDAG());
|
||||||
|
|
||||||
std::unordered_set<const Node *> split_nodes = {node};
|
std::unordered_set<const Node *> split_nodes = {node};
|
||||||
return split(split_nodes);
|
auto res = split(split_nodes);
|
||||||
|
res.second->project_input = project_input;
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
|
@ -88,9 +88,6 @@ public:
|
|||||||
|
|
||||||
/// For COLUMN node and propagated constants.
|
/// For COLUMN node and propagated constants.
|
||||||
ColumnPtr column;
|
ColumnPtr column;
|
||||||
/// Some functions like `ignore()` always return constant but can't be replaced by constant it.
|
|
||||||
/// We calculate such constants in order to avoid unnecessary materialization, but prohibit it's folding.
|
|
||||||
bool allow_constant_folding = true;
|
|
||||||
|
|
||||||
void toTree(JSONBuilder::JSONMap & map) const;
|
void toTree(JSONBuilder::JSONMap & map) const;
|
||||||
};
|
};
|
||||||
|
@ -312,7 +312,7 @@ static FunctionBasePtr compile(
|
|||||||
|
|
||||||
static bool isCompilableConstant(const ActionsDAG::Node & node)
|
static bool isCompilableConstant(const ActionsDAG::Node & node)
|
||||||
{
|
{
|
||||||
return node.column && isColumnConst(*node.column) && canBeNativeType(*node.result_type) && node.allow_constant_folding;
|
return node.column && isColumnConst(*node.column) && canBeNativeType(*node.result_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool isCompilableFunction(const ActionsDAG::Node & node)
|
static bool isCompilableFunction(const ActionsDAG::Node & node)
|
||||||
@ -334,16 +334,11 @@ static bool isCompilableFunction(const ActionsDAG::Node & node)
|
|||||||
return function.isCompilable();
|
return function.isCompilable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool isCompilableInput(const ActionsDAG::Node & node)
|
|
||||||
{
|
|
||||||
return node.type == ActionsDAG::ActionType::INPUT || node.type == ActionsDAG::ActionType::ALIAS;
|
|
||||||
}
|
|
||||||
|
|
||||||
static CompileDAG getCompilableDAG(
|
static CompileDAG getCompilableDAG(
|
||||||
const ActionsDAG::Node * root,
|
const ActionsDAG::Node * root,
|
||||||
ActionsDAG::NodeRawConstPtrs & children)
|
ActionsDAG::NodeRawConstPtrs & children)
|
||||||
{
|
{
|
||||||
/// Extract CompileDAG from root actions dag node, it is important that each root child is compilable.
|
/// Extract CompileDAG from root actions dag node.
|
||||||
|
|
||||||
CompileDAG dag;
|
CompileDAG dag;
|
||||||
|
|
||||||
@ -363,6 +358,32 @@ static CompileDAG getCompilableDAG(
|
|||||||
auto & frame = stack.top();
|
auto & frame = stack.top();
|
||||||
const auto * node = frame.node;
|
const auto * node = frame.node;
|
||||||
|
|
||||||
|
bool is_compilable_constant = isCompilableConstant(*node);
|
||||||
|
bool is_compilable_function = isCompilableFunction(*node);
|
||||||
|
|
||||||
|
if (!is_compilable_function || is_compilable_constant)
|
||||||
|
{
|
||||||
|
CompileDAG::Node compile_node;
|
||||||
|
compile_node.function = node->function_base;
|
||||||
|
compile_node.result_type = node->result_type;
|
||||||
|
|
||||||
|
if (is_compilable_constant)
|
||||||
|
{
|
||||||
|
compile_node.type = CompileDAG::CompileType::CONSTANT;
|
||||||
|
compile_node.column = node->column;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
compile_node.type = CompileDAG::CompileType::INPUT;
|
||||||
|
children.emplace_back(node);
|
||||||
|
}
|
||||||
|
|
||||||
|
visited_node_to_compile_dag_position[node] = dag.getNodesCount();
|
||||||
|
dag.addNode(std::move(compile_node));
|
||||||
|
stack.pop();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
while (frame.next_child_to_visit < node->children.size())
|
while (frame.next_child_to_visit < node->children.size())
|
||||||
{
|
{
|
||||||
const auto & child = node->children[frame.next_child_to_visit];
|
const auto & child = node->children[frame.next_child_to_visit];
|
||||||
@ -382,26 +403,15 @@ static CompileDAG getCompilableDAG(
|
|||||||
if (!all_children_visited)
|
if (!all_children_visited)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
/// Here we process only functions that are not compiled constants
|
||||||
|
|
||||||
CompileDAG::Node compile_node;
|
CompileDAG::Node compile_node;
|
||||||
compile_node.function = node->function_base;
|
compile_node.function = node->function_base;
|
||||||
compile_node.result_type = node->result_type;
|
compile_node.result_type = node->result_type;
|
||||||
|
compile_node.type = CompileDAG::CompileType::FUNCTION;
|
||||||
|
|
||||||
if (isCompilableConstant(*node))
|
for (const auto * child : node->children)
|
||||||
{
|
compile_node.arguments.push_back(visited_node_to_compile_dag_position[child]);
|
||||||
compile_node.type = CompileDAG::CompileType::CONSTANT;
|
|
||||||
compile_node.column = node->column;
|
|
||||||
}
|
|
||||||
else if (node->type == ActionsDAG::ActionType::FUNCTION)
|
|
||||||
{
|
|
||||||
compile_node.type = CompileDAG::CompileType::FUNCTION;
|
|
||||||
for (const auto * child : node->children)
|
|
||||||
compile_node.arguments.push_back(visited_node_to_compile_dag_position[child]);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
compile_node.type = CompileDAG::CompileType::INPUT;
|
|
||||||
children.emplace_back(node);
|
|
||||||
}
|
|
||||||
|
|
||||||
visited_node_to_compile_dag_position[node] = dag.getNodesCount();
|
visited_node_to_compile_dag_position[node] = dag.getNodesCount();
|
||||||
|
|
||||||
@ -417,8 +427,8 @@ void ActionsDAG::compileFunctions(size_t min_count_to_compile_expression)
|
|||||||
struct Data
|
struct Data
|
||||||
{
|
{
|
||||||
bool is_compilable_in_isolation = false;
|
bool is_compilable_in_isolation = false;
|
||||||
bool all_children_compilable = false;
|
|
||||||
bool all_parents_compilable = true;
|
bool all_parents_compilable = true;
|
||||||
|
size_t compilable_children_size = 0;
|
||||||
size_t children_size = 0;
|
size_t children_size = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -428,7 +438,7 @@ void ActionsDAG::compileFunctions(size_t min_count_to_compile_expression)
|
|||||||
|
|
||||||
for (const auto & node : nodes)
|
for (const auto & node : nodes)
|
||||||
{
|
{
|
||||||
bool node_is_compilable_in_isolation = isCompilableConstant(node) || isCompilableFunction(node) || isCompilableInput(node);
|
bool node_is_compilable_in_isolation = isCompilableFunction(node) && !isCompilableConstant(node);
|
||||||
node_to_data[&node].is_compilable_in_isolation = node_is_compilable_in_isolation;
|
node_to_data[&node].is_compilable_in_isolation = node_is_compilable_in_isolation;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -441,8 +451,7 @@ void ActionsDAG::compileFunctions(size_t min_count_to_compile_expression)
|
|||||||
std::stack<Frame> stack;
|
std::stack<Frame> stack;
|
||||||
std::unordered_set<const Node *> visited_nodes;
|
std::unordered_set<const Node *> visited_nodes;
|
||||||
|
|
||||||
/** Algorithm is to iterate over each node in ActionsDAG, and update node compilable status.
|
/** Algorithm is to iterate over each node in ActionsDAG, and update node compilable_children_size.
|
||||||
* Node is compilable if all its children are compilable and node is also compilable.
|
|
||||||
* After this procedure data for each node is initialized.
|
* After this procedure data for each node is initialized.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -479,14 +488,18 @@ void ActionsDAG::compileFunctions(size_t min_count_to_compile_expression)
|
|||||||
|
|
||||||
auto & current_node_data = node_to_data[current_node];
|
auto & current_node_data = node_to_data[current_node];
|
||||||
|
|
||||||
current_node_data.all_children_compilable = true;
|
|
||||||
|
|
||||||
if (current_node_data.is_compilable_in_isolation)
|
if (current_node_data.is_compilable_in_isolation)
|
||||||
{
|
{
|
||||||
for (const auto * child : current_node->children)
|
for (const auto * child : current_node->children)
|
||||||
{
|
{
|
||||||
current_node_data.all_children_compilable &= node_to_data[child].is_compilable_in_isolation;
|
auto & child_data = node_to_data[child];
|
||||||
current_node_data.all_children_compilable &= node_to_data[child].all_children_compilable;
|
|
||||||
|
if (child_data.is_compilable_in_isolation)
|
||||||
|
{
|
||||||
|
current_node_data.compilable_children_size += child_data.compilable_children_size;
|
||||||
|
current_node_data.compilable_children_size += 1;
|
||||||
|
}
|
||||||
|
|
||||||
current_node_data.children_size += node_to_data[child].children_size;
|
current_node_data.children_size += node_to_data[child].children_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -501,10 +514,10 @@ void ActionsDAG::compileFunctions(size_t min_count_to_compile_expression)
|
|||||||
for (const auto & node : nodes)
|
for (const auto & node : nodes)
|
||||||
{
|
{
|
||||||
auto & node_data = node_to_data[&node];
|
auto & node_data = node_to_data[&node];
|
||||||
bool is_compilable = node_data.is_compilable_in_isolation && node_data.all_children_compilable;
|
bool node_is_valid_for_compilation = node_data.is_compilable_in_isolation && node_data.compilable_children_size > 0;
|
||||||
|
|
||||||
for (const auto & child : node.children)
|
for (const auto & child : node.children)
|
||||||
node_to_data[child].all_parents_compilable &= is_compilable;
|
node_to_data[child].all_parents_compilable &= node_is_valid_for_compilation;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (const auto & node : index)
|
for (const auto & node : index)
|
||||||
@ -519,11 +532,10 @@ void ActionsDAG::compileFunctions(size_t min_count_to_compile_expression)
|
|||||||
{
|
{
|
||||||
auto & node_data = node_to_data[&node];
|
auto & node_data = node_to_data[&node];
|
||||||
|
|
||||||
bool node_is_valid_for_compilation = !isCompilableConstant(node) && node.children.size() > 1;
|
bool node_is_valid_for_compilation = node_data.is_compilable_in_isolation && node_data.compilable_children_size > 0;
|
||||||
bool can_be_compiled = node_data.is_compilable_in_isolation && node_data.all_children_compilable && node_is_valid_for_compilation;
|
|
||||||
|
|
||||||
/// If all parents are compilable then this node should not be standalone compiled
|
/// If all parents are compilable then this node should not be standalone compiled
|
||||||
bool should_compile = can_be_compiled && !node_data.all_parents_compilable;
|
bool should_compile = node_is_valid_for_compilation && !node_data.all_parents_compilable;
|
||||||
|
|
||||||
if (!should_compile)
|
if (!should_compile)
|
||||||
continue;
|
continue;
|
||||||
|
@ -302,7 +302,7 @@ Block InterpreterKillQueryQuery::getSelectResult(const String & columns, const S
|
|||||||
if (where_expression)
|
if (where_expression)
|
||||||
select_query += " WHERE " + queryToString(where_expression);
|
select_query += " WHERE " + queryToString(where_expression);
|
||||||
|
|
||||||
auto stream = executeQuery(select_query, getContext()->getGlobalContext(), true).getInputStream();
|
auto stream = executeQuery(select_query, getContext(), true).getInputStream();
|
||||||
Block res = stream->read();
|
Block res = stream->read();
|
||||||
|
|
||||||
if (res && stream->read())
|
if (res && stream->read())
|
||||||
|
@ -1,7 +1,9 @@
|
|||||||
#include <Processors/QueryPlan/UnionStep.h>
|
#include <Processors/QueryPlan/UnionStep.h>
|
||||||
#include <Processors/QueryPipeline.h>
|
#include <Processors/QueryPipeline.h>
|
||||||
#include <Processors/Sources/NullSource.h>
|
#include <Processors/Sources/NullSource.h>
|
||||||
|
#include <Processors/Transforms/ExpressionTransform.h>
|
||||||
#include <Interpreters/ExpressionActions.h>
|
#include <Interpreters/ExpressionActions.h>
|
||||||
|
#include <common/defines.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -47,6 +49,28 @@ QueryPipelinePtr UnionStep::updatePipeline(QueryPipelines pipelines, const Build
|
|||||||
return pipeline;
|
return pipeline;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (auto & cur_pipeline : pipelines)
|
||||||
|
{
|
||||||
|
#if !defined(NDEBUG)
|
||||||
|
assertCompatibleHeader(cur_pipeline->getHeader(), getOutputStream().header, "UnionStep");
|
||||||
|
#endif
|
||||||
|
/// Headers for union must be equal.
|
||||||
|
/// But, just in case, convert it to the same header if not.
|
||||||
|
if (!isCompatibleHeader(cur_pipeline->getHeader(), getOutputStream().header))
|
||||||
|
{
|
||||||
|
auto converting_dag = ActionsDAG::makeConvertingActions(
|
||||||
|
cur_pipeline->getHeader().getColumnsWithTypeAndName(),
|
||||||
|
getOutputStream().header.getColumnsWithTypeAndName(),
|
||||||
|
ActionsDAG::MatchColumnsMode::Name);
|
||||||
|
|
||||||
|
auto converting_actions = std::make_shared<ExpressionActions>(std::move(converting_dag));
|
||||||
|
cur_pipeline->addSimpleTransform([&](const Block & cur_header)
|
||||||
|
{
|
||||||
|
return std::make_shared<ExpressionTransform>(cur_header, converting_actions);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
*pipeline = QueryPipeline::unitePipelines(std::move(pipelines), max_threads);
|
*pipeline = QueryPipeline::unitePipelines(std::move(pipelines), max_threads);
|
||||||
|
|
||||||
processors = collector.detachProcessors();
|
processors = collector.detachProcessors();
|
||||||
|
@ -268,8 +268,7 @@ void ReplicatedMergeTreeQueue::removeCoveredPartsFromMutations(const String & pa
|
|||||||
|
|
||||||
bool some_mutations_are_probably_done = false;
|
bool some_mutations_are_probably_done = false;
|
||||||
|
|
||||||
auto from_it = in_partition->second.lower_bound(part_info.getDataVersion());
|
for (auto it = in_partition->second.begin(); it != in_partition->second.end(); ++it)
|
||||||
for (auto it = from_it; it != in_partition->second.end(); ++it)
|
|
||||||
{
|
{
|
||||||
MutationStatus & status = *it->second;
|
MutationStatus & status = *it->second;
|
||||||
|
|
||||||
|
@ -4999,7 +4999,10 @@ bool StorageReplicatedMergeTree::getFakePartCoveringAllPartsInPartition(const St
|
|||||||
auto zookeeper = getZooKeeper();
|
auto zookeeper = getZooKeeper();
|
||||||
delimiting_block_lock = allocateBlockNumber(partition_id, zookeeper);
|
delimiting_block_lock = allocateBlockNumber(partition_id, zookeeper);
|
||||||
right = delimiting_block_lock->getNumber();
|
right = delimiting_block_lock->getNumber();
|
||||||
mutation_version = queue.getCurrentMutationVersion(partition_id, right);
|
/// Make sure we cover all parts in drop range.
|
||||||
|
/// There might be parts with mutation version greater than current block number
|
||||||
|
/// if some part mutation has been assigned after block number allocation, but before creation of DROP_RANGE entry.
|
||||||
|
mutation_version = MergeTreePartInfo::MAX_BLOCK_NUMBER;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (for_replace_range)
|
if (for_replace_range)
|
||||||
|
@ -38,6 +38,7 @@ MESSAGES_TO_RETRY = [
|
|||||||
"Coordination::Exception: Operation timeout",
|
"Coordination::Exception: Operation timeout",
|
||||||
"Operation timed out",
|
"Operation timed out",
|
||||||
"ConnectionPoolWithFailover: Connection failed at try",
|
"ConnectionPoolWithFailover: Connection failed at try",
|
||||||
|
"DB::Exception: New table appeared in database being dropped or detached. Try again"
|
||||||
]
|
]
|
||||||
|
|
||||||
class Terminated(KeyboardInterrupt):
|
class Terminated(KeyboardInterrupt):
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test>
|
<test max_ignored_relative_change="0.3">
|
||||||
<preconditions>
|
<preconditions>
|
||||||
<table_exists>hits_100m_single</table_exists>
|
<table_exists>hits_100m_single</table_exists>
|
||||||
</preconditions>
|
</preconditions>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.2">
|
<test max_ignored_relative_change="0.7">
|
||||||
<settings>
|
<settings>
|
||||||
<max_memory_usage>30000000000</max_memory_usage>
|
<max_memory_usage>30000000000</max_memory_usage>
|
||||||
</settings>
|
</settings>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.2">
|
<test>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.4">
|
<test>
|
||||||
<settings>
|
<settings>
|
||||||
<allow_experimental_bigint_types>1</allow_experimental_bigint_types>
|
<allow_experimental_bigint_types>1</allow_experimental_bigint_types>
|
||||||
<max_threads>1</max_threads>
|
<max_threads>1</max_threads>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.2">
|
<test>
|
||||||
<query>SELECT boundingRatio(number, number) FROM numbers(100000000)</query>
|
<query>SELECT boundingRatio(number, number) FROM numbers(100000000)</query>
|
||||||
<query>SELECT (argMax(number, number) - argMin(number, number)) / (max(number) - min(number)) FROM numbers(100000000)</query>
|
<query>SELECT (argMax(number, number) - argMin(number, number)) / (max(number) - min(number)) FROM numbers(100000000)</query>
|
||||||
</test>
|
</test>
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
<!-- FIXME this instability is abysmal, investigate the unstable queries -->
|
<test>
|
||||||
<test max_ignored_relative_change="0.2">
|
|
||||||
<settings>
|
<settings>
|
||||||
<allow_suspicious_codecs>1</allow_suspicious_codecs>
|
<allow_suspicious_codecs>1</allow_suspicious_codecs>
|
||||||
</settings>
|
</settings>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.2">
|
<test>
|
||||||
<settings>
|
<settings>
|
||||||
<allow_suspicious_codecs>1</allow_suspicious_codecs>
|
<allow_suspicious_codecs>1</allow_suspicious_codecs>
|
||||||
</settings>
|
</settings>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.2">
|
<test>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.2">
|
<test>
|
||||||
<preconditions>
|
<preconditions>
|
||||||
<table_exists>hits_100m_single</table_exists>
|
<table_exists>hits_100m_single</table_exists>
|
||||||
</preconditions>
|
</preconditions>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.3">
|
<test>
|
||||||
<substitutions>
|
<substitutions>
|
||||||
<substitution>
|
<substitution>
|
||||||
<name>datetime_transform</name>
|
<name>datetime_transform</name>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.5">
|
<test max_ignored_relative_change="0.2">
|
||||||
<settings>
|
<settings>
|
||||||
<max_memory_usage>35G</max_memory_usage>
|
<max_memory_usage>35G</max_memory_usage>
|
||||||
</settings>
|
</settings>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test>
|
<test max_ignored_relative_change="0.2">
|
||||||
<settings>
|
<settings>
|
||||||
<max_memory_usage>15G</max_memory_usage>
|
<max_memory_usage>15G</max_memory_usage>
|
||||||
</settings>
|
</settings>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.3">
|
<test max_ignored_relative_change="0.4">
|
||||||
<create_query>
|
<create_query>
|
||||||
CREATE TABLE simple_key_direct_dictionary_source_table
|
CREATE TABLE simple_key_direct_dictionary_source_table
|
||||||
(
|
(
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
<test>
|
<test>
|
||||||
<preconditions>
|
<preconditions>
|
||||||
<table_exists>test.hits</table_exists>
|
<table_exists>hits_100m_single</table_exists>
|
||||||
</preconditions>
|
</preconditions>
|
||||||
|
|
||||||
<query>SELECT count() FROM test.hits WHERE NOT ignore(encodeXMLComponent(URL))</query>
|
<query>SELECT count() FROM hits_100m_single WHERE NOT ignore(encodeXMLComponent(URL))</query>
|
||||||
</test>
|
</test>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.3">
|
<test>
|
||||||
<create_query>
|
<create_query>
|
||||||
CREATE TABLE simple_key_flat_dictionary_source_table
|
CREATE TABLE simple_key_flat_dictionary_source_table
|
||||||
(
|
(
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.2">
|
<test>
|
||||||
<substitutions>
|
<substitutions>
|
||||||
<substitution>
|
<substitution>
|
||||||
<name>expr</name>
|
<name>expr</name>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.3">
|
<test>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test>
|
<test max_ignored_relative_change="0.2">
|
||||||
<substitutions>
|
<substitutions>
|
||||||
<substitution>
|
<substitution>
|
||||||
<name>gp_hash_func</name>
|
<name>gp_hash_func</name>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test>
|
<test max_ignored_relative_change="0.6">
|
||||||
<substitutions>
|
<substitutions>
|
||||||
<substitution>
|
<substitution>
|
||||||
<name>hash_func</name>
|
<name>hash_func</name>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.2">
|
<test>
|
||||||
<query>SELECT sum(NOT ignore(*)) FROM (SELECT * FROM generateRandom('ui64 UInt64, i64 Int64, ui32 UInt32, i32 Int32, ui16 UInt16, i16 Int16, ui8 UInt8, i8 Int8') LIMIT 1000000000);</query>
|
<query>SELECT sum(NOT ignore(*)) FROM (SELECT * FROM generateRandom('ui64 UInt64, i64 Int64, ui32 UInt32, i32 Int32, ui16 UInt16, i16 Int16, ui8 UInt8, i8 Int8') LIMIT 1000000000);</query>
|
||||||
<query>SELECT sum(NOT ignore(*)) FROM (SELECT * FROM generateRandom('ui64 UInt64, i64 Int64, ui32 UInt32, i32 Int32, ui16 UInt16, i16 Int16, ui8 UInt8, i8 Int8', 0, 10, 10) LIMIT 1000000000);</query>
|
<query>SELECT sum(NOT ignore(*)) FROM (SELECT * FROM generateRandom('ui64 UInt64, i64 Int64, ui32 UInt32, i32 Int32, ui16 UInt16, i16 Int16, ui8 UInt8, i8 Int8', 0, 10, 10) LIMIT 1000000000);</query>
|
||||||
<query>SELECT sum(NOT ignore(*)) FROM (SELECT * FROM generateRandom('i Enum8(\'hello\' = 1, \'world\' = 5)', 0, 10, 10) LIMIT 1000000000);</query>
|
<query>SELECT sum(NOT ignore(*)) FROM (SELECT * FROM generateRandom('i Enum8(\'hello\' = 1, \'world\' = 5)', 0, 10, 10) LIMIT 1000000000);</query>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.3">
|
<test max_ignored_relative_change="0.2">
|
||||||
<create_query>
|
<create_query>
|
||||||
CREATE TABLE simple_key_hashed_dictionary_source_table
|
CREATE TABLE simple_key_hashed_dictionary_source_table
|
||||||
(
|
(
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.2">
|
<test>
|
||||||
<preconditions>
|
<preconditions>
|
||||||
<table_exists>hits_100m_single</table_exists>
|
<table_exists>hits_100m_single</table_exists>
|
||||||
<table_exists>hits_10m_single</table_exists>
|
<table_exists>hits_10m_single</table_exists>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.2">
|
<test>
|
||||||
<query>
|
<query>
|
||||||
WITH
|
WITH
|
||||||
bitXor(number, 0x4CF2D2BAAE6DA887) AS x0,
|
bitXor(number, 0x4CF2D2BAAE6DA887) AS x0,
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.2">
|
<test>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test>
|
<test max_ignored_relative_change="0.2">
|
||||||
<substitutions>
|
<substitutions>
|
||||||
<substitution>
|
<substitution>
|
||||||
<name>json</name>
|
<name>json</name>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.3">
|
<test max_ignored_relative_change="0.6">
|
||||||
<substitutions>
|
<substitutions>
|
||||||
<substitution>
|
<substitution>
|
||||||
<name>func_slow</name>
|
<name>func_slow</name>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.2">
|
<test>
|
||||||
|
|
||||||
<create_query>CREATE TABLE bad_partitions (a UInt64, b UInt64, c UInt64, d UInt64, e UInt64, f UInt64, g UInt64, h UInt64, i UInt64, j UInt64, k UInt64, l UInt64, m UInt64, n UInt64, o UInt64, p UInt64, q UInt64, r UInt64, s UInt64, t UInt64, u UInt64, v UInt64, w UInt64, x UInt64, y UInt64, z UInt64) ENGINE = MergeTree PARTITION BY x ORDER BY x</create_query>
|
<create_query>CREATE TABLE bad_partitions (a UInt64, b UInt64, c UInt64, d UInt64, e UInt64, f UInt64, g UInt64, h UInt64, i UInt64, j UInt64, k UInt64, l UInt64, m UInt64, n UInt64, o UInt64, p UInt64, q UInt64, r UInt64, s UInt64, t UInt64, u UInt64, v UInt64, w UInt64, x UInt64, y UInt64, z UInt64) ENGINE = MergeTree PARTITION BY x ORDER BY x</create_query>
|
||||||
<fill_query>INSERT INTO bad_partitions (x) SELECT * FROM numbers_mt(3000)</fill_query>
|
<fill_query>INSERT INTO bad_partitions (x) SELECT * FROM numbers_mt(3000)</fill_query>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.2">
|
<test max_ignored_relative_change="0.3">
|
||||||
<substitutions>
|
<substitutions>
|
||||||
<substitution>
|
<substitution>
|
||||||
<name>format</name>
|
<name>format</name>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.3">
|
<test>
|
||||||
<settings>
|
<settings>
|
||||||
<do_not_merge_across_partitions_select_final>1</do_not_merge_across_partitions_select_final>
|
<do_not_merge_across_partitions_select_final>1</do_not_merge_across_partitions_select_final>
|
||||||
</settings>
|
</settings>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.2">
|
<test>
|
||||||
<settings>
|
<settings>
|
||||||
<parallel_view_processing>1</parallel_view_processing>
|
<parallel_view_processing>1</parallel_view_processing>
|
||||||
</settings>
|
</settings>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test>
|
<test max_ignored_relative_change="0.2">
|
||||||
<settings>
|
<settings>
|
||||||
<!--
|
<!--
|
||||||
Not sure why it's needed. Maybe it has something to do with the
|
Not sure why it's needed. Maybe it has something to do with the
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.2">
|
<test>
|
||||||
<create_query>
|
<create_query>
|
||||||
CREATE TABLE hits_wide AS hits_10m_single ENGINE = MergeTree()
|
CREATE TABLE hits_wide AS hits_10m_single ENGINE = MergeTree()
|
||||||
PARTITION BY toYYYYMM(EventDate)
|
PARTITION BY toYYYYMM(EventDate)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.2">
|
<test>
|
||||||
<create_query>
|
<create_query>
|
||||||
CREATE TABLE hits_wide AS hits_10m_single ENGINE = MergeTree()
|
CREATE TABLE hits_wide AS hits_10m_single ENGINE = MergeTree()
|
||||||
PARTITION BY toYYYYMM(EventDate)
|
PARTITION BY toYYYYMM(EventDate)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.2">
|
<test>
|
||||||
<create_query>
|
<create_query>
|
||||||
CREATE TABLE hits_wide AS hits_10m_single ENGINE = MergeTree()
|
CREATE TABLE hits_wide AS hits_10m_single ENGINE = MergeTree()
|
||||||
PARTITION BY toYYYYMM(EventDate)
|
PARTITION BY toYYYYMM(EventDate)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test>
|
<test max_ignored_relative_change="0.2">
|
||||||
<settings>
|
<settings>
|
||||||
<max_threads>4</max_threads>
|
<max_threads>4</max_threads>
|
||||||
<max_memory_usage>20G</max_memory_usage>
|
<max_memory_usage>20G</max_memory_usage>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.2">
|
<test>
|
||||||
<query>SELECT count() FROM zeros(100000000) WHERE NOT ignore(randomString(10))</query>
|
<query>SELECT count() FROM zeros(100000000) WHERE NOT ignore(randomString(10))</query>
|
||||||
<query>SELECT count() FROM zeros(100000000) WHERE NOT ignore(randomString(100))</query>
|
<query>SELECT count() FROM zeros(100000000) WHERE NOT ignore(randomString(100))</query>
|
||||||
<query>SELECT count() FROM zeros(1000000) WHERE NOT ignore(randomString(1000))</query>
|
<query>SELECT count() FROM zeros(1000000) WHERE NOT ignore(randomString(1000))</query>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.3">
|
<test>
|
||||||
<preconditions>
|
<preconditions>
|
||||||
<table_exists>hits_100m_single</table_exists>
|
<table_exists>hits_100m_single</table_exists>
|
||||||
</preconditions>
|
</preconditions>
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
<test>
|
<test>
|
||||||
<query>SELECT sumIf(1, 0) FROM numbers(100000000)</query>
|
<!-- Shouldn't have been a perf test, but an EXPLAIN one. -->
|
||||||
<query>SELECT sumIf(1, 1) FROM numbers(100000000)</query>
|
<query>SELECT sumIf(1, 0) FROM numbers(1000000000)</query>
|
||||||
|
<query>SELECT sumIf(1, 1) FROM numbers(1000000000)</query>
|
||||||
</test>
|
</test>
|
||||||
|
File diff suppressed because one or more lines are too long
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.3">
|
<test max_ignored_relative_change="0.2">
|
||||||
<settings>
|
<settings>
|
||||||
<max_threads>1</max_threads>
|
<max_threads>1</max_threads>
|
||||||
</settings>
|
</settings>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.3">
|
<test max_ignored_relative_change="0.2">
|
||||||
<settings>
|
<settings>
|
||||||
<max_memory_usage>30000000000</max_memory_usage>
|
<max_memory_usage>30000000000</max_memory_usage>
|
||||||
</settings>
|
</settings>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.2">
|
<test>
|
||||||
<preconditions>
|
<preconditions>
|
||||||
<table_exists>hits_100m_single</table_exists>
|
<table_exists>hits_100m_single</table_exists>
|
||||||
<table_exists>hits_10m_single</table_exists>
|
<table_exists>hits_10m_single</table_exists>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<test max_ignored_relative_change="0.3">
|
<test max_ignored_relative_change="0.2">
|
||||||
<substitutions>
|
<substitutions>
|
||||||
<substitution>
|
<substitution>
|
||||||
<name>param</name>
|
<name>param</name>
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
SET compile_expressions = 1;
|
||||||
|
|
||||||
DROP TABLE IF EXISTS foo;
|
DROP TABLE IF EXISTS foo;
|
||||||
|
|
||||||
CREATE TABLE foo (
|
CREATE TABLE foo (
|
||||||
@ -12,7 +14,7 @@ CREATE TABLE foo (
|
|||||||
|
|
||||||
INSERT INTO foo VALUES (1, 0.5, 0.2, 0.3, 0.8);
|
INSERT INTO foo VALUES (1, 0.5, 0.2, 0.3, 0.8);
|
||||||
|
|
||||||
SELECT divide(sum(a) + sum(b), nullIf(sum(c) + sum(d), 0)) FROM foo SETTINGS compile_expressions = 1;
|
SELECT divide(sum(a) + sum(b), nullIf(sum(c) + sum(d), 0)) FROM foo;
|
||||||
SELECT divide(sum(a) + sum(b), nullIf(sum(c) + sum(d), 0)) FROM foo SETTINGS compile_expressions = 1;
|
SELECT divide(sum(a) + sum(b), nullIf(sum(c) + sum(d), 0)) FROM foo;
|
||||||
|
|
||||||
DROP TABLE foo;
|
DROP TABLE foo;
|
||||||
|
@ -1,9 +0,0 @@
|
|||||||
0
|
|
||||||
0
|
|
||||||
0
|
|
||||||
0
|
|
||||||
0
|
|
||||||
0
|
|
||||||
0
|
|
||||||
0
|
|
||||||
0
|
|
@ -4,11 +4,18 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|||||||
# shellcheck source=../shell_config.sh
|
# shellcheck source=../shell_config.sh
|
||||||
. "$CURDIR"/../shell_config.sh
|
. "$CURDIR"/../shell_config.sh
|
||||||
|
|
||||||
opts=(
|
# Sometimes five seconds are not enough due to system overload.
|
||||||
--max_distributed_connections 9
|
# But if it can run in less than five seconds at least sometimes - it is enough for the test.
|
||||||
--max_threads 1
|
while true
|
||||||
--query "SELECT sleepEachRow(1) FROM remote('127.{2..10}', system.one)"
|
do
|
||||||
)
|
opts=(
|
||||||
# 5 less then 9 seconds (9 streams), but long enough to cover possible load peaks
|
--max_distributed_connections 9
|
||||||
# "$@" left to pass manual options (like --experimental_use_processors 0) during manual testing
|
--max_threads 1
|
||||||
timeout 5s ${CLICKHOUSE_CLIENT} "${opts[@]}" "$@"
|
--query "SELECT sleepEachRow(1) FROM remote('127.{2..10}', system.one)"
|
||||||
|
--format Null
|
||||||
|
)
|
||||||
|
# 5 less then 9 seconds (9 streams), but long enough to cover possible load peaks
|
||||||
|
# "$@" left to pass manual options (like --experimental_use_processors 0) during manual testing
|
||||||
|
|
||||||
|
timeout 5s ${CLICKHOUSE_CLIENT} "${opts[@]}" "$@" && break
|
||||||
|
done
|
||||||
|
@ -1,2 +0,0 @@
|
|||||||
0
|
|
||||||
0
|
|
@ -4,6 +4,12 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|||||||
# shellcheck source=../shell_config.sh
|
# shellcheck source=../shell_config.sh
|
||||||
. "$CURDIR"/../shell_config.sh
|
. "$CURDIR"/../shell_config.sh
|
||||||
|
|
||||||
query="SELECT sleepEachRow(1) FROM remote('127.{2,3}', system.one)"
|
# Sometimes 1.8 seconds are not enough due to system overload.
|
||||||
# 1.8 less then 2 seconds, but long enough to cover possible load peaks
|
# But if it can run in less than five seconds at least sometimes - it is enough for the test.
|
||||||
timeout 1.8s ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&max_distributed_connections=2&max_threads=1" -d "$query"
|
|
||||||
|
while true
|
||||||
|
do
|
||||||
|
query="SELECT sleepEachRow(1) FROM remote('127.{2,3}', system.one) FORMAT Null"
|
||||||
|
# 1.8 less then 2 seconds, but long enough to cover possible load peaks
|
||||||
|
timeout 1.8s ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&max_distributed_connections=2&max_threads=1" -d "$query" && break
|
||||||
|
done
|
||||||
|
@ -0,0 +1,7 @@
|
|||||||
|
waiting default rmt 0000000002 UPDATE m = m * toInt8(s) WHERE n = 3
|
||||||
|
1 4 2
|
||||||
|
2 15 5
|
||||||
|
3 7 fail
|
||||||
|
4 11 13
|
||||||
|
0000000000 UPDATE m = m * toInt8(s) WHERE 1 [] 0 1
|
||||||
|
0000000001 UPDATE m = m * toInt8(s) WHERE 1 [] 0 1
|
34
tests/queries/0_stateless/01155_old_mutation_parts_to_do.sh
Executable file
34
tests/queries/0_stateless/01155_old_mutation_parts_to_do.sh
Executable file
@ -0,0 +1,34 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
|
# shellcheck source=../shell_config.sh
|
||||||
|
. "$CURDIR"/../shell_config.sh
|
||||||
|
# shellcheck source=./mergetree_mutations.lib
|
||||||
|
. "$CURDIR"/mergetree_mutations.lib
|
||||||
|
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} -q "drop table if exists rmt;"
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} -q "create table rmt (n int, m int, s String) engine=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt', '1')
|
||||||
|
order by n settings max_replicated_mutations_in_queue=0;"
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} -q "insert into rmt values (1, 1, '2');" # 0_0_0_0
|
||||||
|
${CLICKHOUSE_CLIENT} --mutations_sync=0 -q "alter table rmt update m = m*toInt8(s) where 1;" # 0000000000
|
||||||
|
${CLICKHOUSE_CLIENT} -q "insert into rmt values (2, 3, '5');" # 0_2_2_0
|
||||||
|
${CLICKHOUSE_CLIENT} --mutations_sync=0 -q "alter table rmt update m = m*toInt8(s) where 1;" # 0000000001
|
||||||
|
${CLICKHOUSE_CLIENT} -q "insert into rmt values (3, 7, 'fail');" # 0_4_4_0
|
||||||
|
${CLICKHOUSE_CLIENT} --mutations_sync=0 -q "alter table rmt update m = m*toInt8(s) where n=3;" # 0000000002, will fail to mutate 0_4_4_0 to 0_4_4_0_5
|
||||||
|
${CLICKHOUSE_CLIENT} -q "insert into rmt values (4, 11, '13');" # 0_6_6_0
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} -q "alter table rmt modify setting max_replicated_mutations_in_queue=1;"
|
||||||
|
sleep 5 # test does not rely on this, but it may help to reproduce a bug
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} -q "kill mutation where database=currentDatabase() and table='rmt' and mutation_id='0000000002'";
|
||||||
|
${CLICKHOUSE_CLIENT} -q "system sync replica rmt;"
|
||||||
|
|
||||||
|
# now check that mutations 0 and 1 are finished
|
||||||
|
wait_for_mutation "rmt" "0000000001"
|
||||||
|
${CLICKHOUSE_CLIENT} -q "select * from rmt order by n;"
|
||||||
|
${CLICKHOUSE_CLIENT} -q "select mutation_id, command, parts_to_do_names, parts_to_do, is_done from system.mutations where database=currentDatabase() and table='rmt';"
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} -q "drop table rmt;"
|
@ -1,4 +1,5 @@
|
|||||||
42
|
42
|
||||||
Hello
|
Hello
|
||||||
|
waiting default mutation_table mutation_3.txt MODIFY COLUMN `value` UInt64
|
||||||
42
|
42
|
||||||
Hello
|
Hello
|
||||||
|
@ -6,9 +6,9 @@ SET compile_expressions=true;
|
|||||||
|
|
||||||
-- CREATE TABLE will use global profile with default min_count_to_compile_expression=3
|
-- CREATE TABLE will use global profile with default min_count_to_compile_expression=3
|
||||||
-- so retry 3 times
|
-- so retry 3 times
|
||||||
CREATE TABLE data_01875_1 Engine=MergeTree ORDER BY number PARTITION BY bitShiftRight(number,8) AS SELECT * FROM numbers(16384);
|
CREATE TABLE data_01875_1 Engine=MergeTree ORDER BY number PARTITION BY bitShiftRight(number, 8) + 1 AS SELECT * FROM numbers(16384);
|
||||||
CREATE TABLE data_01875_2 Engine=MergeTree ORDER BY number PARTITION BY bitShiftRight(number,8) AS SELECT * FROM numbers(16384);
|
CREATE TABLE data_01875_2 Engine=MergeTree ORDER BY number PARTITION BY bitShiftRight(number, 8) + 1 AS SELECT * FROM numbers(16384);
|
||||||
CREATE TABLE data_01875_3 Engine=MergeTree ORDER BY number PARTITION BY bitShiftRight(number,8) AS SELECT * FROM numbers(16384);
|
CREATE TABLE data_01875_3 Engine=MergeTree ORDER BY number PARTITION BY bitShiftRight(number, 8) + 1 AS SELECT * FROM numbers(16384);
|
||||||
|
|
||||||
SELECT number FROM data_01875_3 WHERE number = 999;
|
SELECT number FROM data_01875_3 WHERE number = 999;
|
||||||
|
|
||||||
|
@ -0,0 +1,11 @@
|
|||||||
|
table 10 101
|
||||||
|
a 0
|
||||||
|
a 1
|
||||||
|
a 2
|
||||||
|
a 3
|
||||||
|
a 4
|
||||||
|
b 0
|
||||||
|
b 1
|
||||||
|
b 2
|
||||||
|
b 3
|
||||||
|
b 4
|
@ -0,0 +1,30 @@
|
|||||||
|
select * from ( select 'table' as table, toInt64(10) as rows, toInt64(101) as elements union all select 'another table' as table, toInt64(0) as rows, toInt64(0) as elements ) where rows - elements <> 0;
|
||||||
|
|
||||||
|
SELECT
|
||||||
|
label,
|
||||||
|
number
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT
|
||||||
|
'a' AS label,
|
||||||
|
number
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT number
|
||||||
|
FROM numbers(10)
|
||||||
|
)
|
||||||
|
UNION ALL
|
||||||
|
SELECT
|
||||||
|
'b' AS label,
|
||||||
|
number
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT number
|
||||||
|
FROM numbers(10)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
WHERE number IN
|
||||||
|
(
|
||||||
|
SELECT number
|
||||||
|
FROM numbers(5)
|
||||||
|
) order by label, number;
|
Loading…
Reference in New Issue
Block a user