Merge branch 'master' into rs/msan-randomStringUTF8

This commit is contained in:
Robert Schulze 2023-05-10 20:16:49 +02:00 committed by GitHub
commit 9795d5403f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
112 changed files with 1209 additions and 310 deletions

3
.gitmodules vendored
View File

@ -338,6 +338,9 @@
[submodule "contrib/liburing"] [submodule "contrib/liburing"]
path = contrib/liburing path = contrib/liburing
url = https://github.com/axboe/liburing url = https://github.com/axboe/liburing
[submodule "contrib/libfiu"]
path = contrib/libfiu
url = https://github.com/ClickHouse/libfiu.git
[submodule "contrib/isa-l"] [submodule "contrib/isa-l"]
path = contrib/isa-l path = contrib/isa-l
url = https://github.com/ClickHouse/isa-l.git url = https://github.com/ClickHouse/isa-l.git

View File

@ -395,6 +395,8 @@ if ((NOT OS_LINUX AND NOT OS_ANDROID) OR (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG"))
set(ENABLE_GWP_ASAN OFF) set(ENABLE_GWP_ASAN OFF)
endif () endif ()
option (ENABLE_FIU "Enable Fiu" ON)
option(WERROR "Enable -Werror compiler option" ON) option(WERROR "Enable -Werror compiler option" ON)
if (WERROR) if (WERROR)

View File

@ -53,7 +53,7 @@ float logf(float x)
tmp = ix - OFF; tmp = ix - OFF;
i = (tmp >> (23 - LOGF_TABLE_BITS)) % N; i = (tmp >> (23 - LOGF_TABLE_BITS)) % N;
k = (int32_t)tmp >> 23; /* arithmetic shift */ k = (int32_t)tmp >> 23; /* arithmetic shift */
iz = ix - (tmp & 0x1ff << 23); iz = ix - (tmp & 0xff800000);
invc = T[i].invc; invc = T[i].invc;
logc = T[i].logc; logc = T[i].logc;
z = (double_t)asfloat(iz); z = (double_t)asfloat(iz);

View File

@ -21,7 +21,7 @@ set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=bfd") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=bfd")
# Currently, lld does not work with the error: # Currently, lld does not work with the error:
# ld.lld: error: section size decrease is too large # ld.lld: error: section size decrease is too large

View File

@ -105,6 +105,7 @@ add_contrib (libfarmhash)
add_contrib (icu-cmake icu) add_contrib (icu-cmake icu)
add_contrib (h3-cmake h3) add_contrib (h3-cmake h3)
add_contrib (mariadb-connector-c-cmake mariadb-connector-c) add_contrib (mariadb-connector-c-cmake mariadb-connector-c)
add_contrib (libfiu-cmake libfiu)
if (ENABLE_TESTS) if (ENABLE_TESTS)
add_contrib (googletest-cmake googletest) add_contrib (googletest-cmake googletest)

View File

@ -111,6 +111,8 @@ elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "mips")
set(ARCH "generic") set(ARCH "generic")
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "ppc64le") elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "ppc64le")
set(ARCH "ppc64le") set(ARCH "ppc64le")
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "riscv64")
set(ARCH "riscv64")
else() else()
message(FATAL_ERROR "Unknown processor:" ${CMAKE_SYSTEM_PROCESSOR}) message(FATAL_ERROR "Unknown processor:" ${CMAKE_SYSTEM_PROCESSOR})
endif() endif()

1
contrib/libfiu vendored Submodule

@ -0,0 +1 @@
Subproject commit 7ae4328050ccad8867a05a37af941886f717b6fc

View File

@ -0,0 +1,20 @@
if (NOT ENABLE_FIU)
message (STATUS "Not using fiu")
return ()
endif ()
set(FIU_DIR "${ClickHouse_SOURCE_DIR}/contrib/libfiu/")
set(FIU_SOURCES
${FIU_DIR}/libfiu/fiu.c
${FIU_DIR}/libfiu/fiu-rc.c
${FIU_DIR}/libfiu/backtrace.c
${FIU_DIR}/libfiu/wtable.c
)
set(FIU_HEADERS "${FIU_DIR}/libfiu")
add_library(_fiu ${FIU_SOURCES})
target_compile_definitions(_fiu PUBLIC DUMMY_BACKTRACE)
target_include_directories(_fiu PUBLIC ${FIU_HEADERS})
add_library(ch_contrib::fiu ALIAS _fiu)

View File

@ -147,6 +147,7 @@ function clone_submodules
contrib/xxHash contrib/xxHash
contrib/simdjson contrib/simdjson
contrib/liburing contrib/liburing
contrib/libfiu
) )
git submodule sync git submodule sync

View File

@ -90,15 +90,17 @@ SELECT * FROM mySecondReplacingMT FINAL;
### is_deleted ### is_deleted
`is_deleted` — Name of the column with the type of row: `1` is a “deleted“ row, `0` is a “state“ row. `is_deleted` — Name of a column used during a merge to determine whether the data in this row represents the state or is to be deleted; `1` is a “deleted“ row, `0` is a “state“ row.
Column data type — `Int8`. Column data type — `UInt8`.
Can only be enabled when `ver` is used. :::note
The row is deleted when use the `OPTIMIZE ... FINAL CLEANUP`, or `OPTIMIZE ... FINAL` if the engine settings `clean_deleted_rows` has been set to `Always`. `is_deleted` can only be enabled when `ver` is used.
No matter the operation on the data, the version must be increased. If two inserted rows have the same version number, the last inserted one is the one kept.
The row is deleted when `OPTIMIZE ... FINAL CLEANUP` or `OPTIMIZE ... FINAL` is used, or if the engine setting `clean_deleted_rows` has been set to `Always`.
No matter the operation on the data, the version must be increased. If two inserted rows have the same version number, the last inserted row is the one kept.
:::
## Query clauses ## Query clauses

View File

@ -1410,8 +1410,8 @@ and [enable_writes_to_query_cache](#enable-writes-to-query-cache) control in mor
Possible values: Possible values:
- 0 - Yes - 0 - Disabled
- 1 - No - 1 - Enabled
Default value: `0`. Default value: `0`.
@ -3562,7 +3562,7 @@ Default value: `1`.
If the setting is set to `0`, the table function does not make Nullable columns and inserts default values instead of NULL. This is also applicable for NULL values inside arrays. If the setting is set to `0`, the table function does not make Nullable columns and inserts default values instead of NULL. This is also applicable for NULL values inside arrays.
## allow_experimental_projection_optimization {#allow-experimental-projection-optimization} ## optimize_use_projections {#optimize_use_projections}
Enables or disables [projection](../../engines/table-engines/mergetree-family/mergetree.md/#projections) optimization when processing `SELECT` queries. Enables or disables [projection](../../engines/table-engines/mergetree-family/mergetree.md/#projections) optimization when processing `SELECT` queries.
@ -3575,7 +3575,7 @@ Default value: `1`.
## force_optimize_projection {#force-optimize-projection} ## force_optimize_projection {#force-optimize-projection}
Enables or disables the obligatory use of [projections](../../engines/table-engines/mergetree-family/mergetree.md/#projections) in `SELECT` queries, when projection optimization is enabled (see [allow_experimental_projection_optimization](#allow-experimental-projection-optimization) setting). Enables or disables the obligatory use of [projections](../../engines/table-engines/mergetree-family/mergetree.md/#projections) in `SELECT` queries, when projection optimization is enabled (see [optimize_use_projections](#optimize_use_projections) setting).
Possible values: Possible values:

View File

@ -0,0 +1,55 @@
---
slug: /en/sql-reference/aggregate-functions/reference/first_value
sidebar_position: 7
---
# first_value
Selects the first encountered value, similar to `any`, but could accept NULL.
## examples
```sql
insert into test_data (a,b) values (1,null), (2,3), (4, 5), (6,null)
```
### example1
The NULL value is ignored at default.
```sql
select first_value(b) from test_data
```
```text
┌─first_value_ignore_nulls(b)─┐
│ 3 │
└─────────────────────────────┘
```
### example2
The NULL value is ignored.
```sql
select first_value(b) ignore nulls sfrom test_data
```
```text
┌─first_value_ignore_nulls(b)─┐
│ 3 │
└─────────────────────────────┘
```
### example3
The NULL value is accepted.
```sql
select first_value(b) respect nulls from test_data
```
```text
┌─first_value_respect_nulls(b)─┐
│ ᴺᵁᴸᴸ │
└──────────────────────────────┘
```

View File

@ -26,6 +26,8 @@ ClickHouse-specific aggregate functions:
- [anyHeavy](../../../sql-reference/aggregate-functions/reference/anyheavy.md) - [anyHeavy](../../../sql-reference/aggregate-functions/reference/anyheavy.md)
- [anyLast](../../../sql-reference/aggregate-functions/reference/anylast.md) - [anyLast](../../../sql-reference/aggregate-functions/reference/anylast.md)
- [first_value](../../../sql-reference/aggregate-functions/reference/first_value.md)
- [last_value](../../../sql-reference/aggregate-functions/reference/last_value.md)
- [argMin](../../../sql-reference/aggregate-functions/reference/argmin.md) - [argMin](../../../sql-reference/aggregate-functions/reference/argmin.md)
- [argMax](../../../sql-reference/aggregate-functions/reference/argmax.md) - [argMax](../../../sql-reference/aggregate-functions/reference/argmax.md)
- [avgWeighted](../../../sql-reference/aggregate-functions/reference/avgweighted.md) - [avgWeighted](../../../sql-reference/aggregate-functions/reference/avgweighted.md)

View File

@ -0,0 +1,53 @@
---
slug: /en/sql-reference/aggregate-functions/reference/last_value
sidebar_position: 8
---
# first_value
Selects the last encountered value, similar to `anyLast`, but could accept NULL.
## examples
```sql
insert into test_data (a,b) values (1,null), (2,3), (4, 5), (6,null)
```
### example1
The NULL value is ignored at default.
```sql
select last_value(b) from test_data
```
```text
┌─last_value_ignore_nulls(b)─┐
│ 5 │
└────────────────────────────┘
```
### example2
The NULL value is ignored.
```sql
select last_value(b) ignore nulls from test_data
```
```text
┌─last_value_ignore_nulls(b)─┐
│ 5 │
└────────────────────────────┘
```
### example3
The NULL value is accepted.
```sql
select last_value(b) respect nulls from test_data
```
```text
┌─last_value_respect_nulls(b)─┐
│ ᴺᵁᴸᴸ │
└─────────────────────────────┘
```

View File

@ -103,7 +103,11 @@ ALTER TABLE table2 [ON CLUSTER cluster] ATTACH PARTITION partition_expr FROM tab
``` ```
This query copies the data partition from `table1` to `table2`. This query copies the data partition from `table1` to `table2`.
Note that data will be deleted neither from `table1` nor from `table2`.
Note that:
- Data will be deleted neither from `table1` nor from `table2`.
- `table1` may be a temporary table.
For the query to run successfully, the following conditions must be met: For the query to run successfully, the following conditions must be met:
@ -117,7 +121,12 @@ For the query to run successfully, the following conditions must be met:
ALTER TABLE table2 [ON CLUSTER cluster] REPLACE PARTITION partition_expr FROM table1 ALTER TABLE table2 [ON CLUSTER cluster] REPLACE PARTITION partition_expr FROM table1
``` ```
This query copies the data partition from the `table1` to `table2` and replaces existing partition in the `table2`. Note that data wont be deleted from `table1`. This query copies the data partition from the `table1` to `table2` and replaces existing partition in the `table2`.
Note that:
- Data wont be deleted from `table1`.
- `table1` may be a temporary table.
For the query to run successfully, the following conditions must be met: For the query to run successfully, the following conditions must be met:

View File

@ -12,7 +12,7 @@ Compressed files are supported. Compression type is detected by the extension of
**Syntax** **Syntax**
```sql ```sql
SELECT <expr_list> INTO OUTFILE file_name [AND STDOUT] [COMPRESSION type [LEVEL level]] SELECT <expr_list> INTO OUTFILE file_name [AND STDOUT] [APPEND] [COMPRESSION type [LEVEL level]]
``` ```
`file_name` and `type` are string literals. Supported compression types are: `'none'`, `'gzip'`, `'deflate'`, `'br'`, `'xz'`, `'zstd'`, `'lz4'`, `'bz2'`. `file_name` and `type` are string literals. Supported compression types are: `'none'`, `'gzip'`, `'deflate'`, `'br'`, `'xz'`, `'zstd'`, `'lz4'`, `'bz2'`.
@ -25,6 +25,7 @@ SELECT <expr_list> INTO OUTFILE file_name [AND STDOUT] [COMPRESSION type [LEVEL
- The query will fail if a file with the same file name already exists. - The query will fail if a file with the same file name already exists.
- The default [output format](../../../interfaces/formats.md) is `TabSeparated` (like in the command-line client batch mode). Use [FORMAT](format.md) clause to change it. - The default [output format](../../../interfaces/formats.md) is `TabSeparated` (like in the command-line client batch mode). Use [FORMAT](format.md) clause to change it.
- If `AND STDOUT` is mentioned in the query then the output that is written to the file is also displayed on standard output. If used with compression, the plaintext is displayed on standard output. - If `AND STDOUT` is mentioned in the query then the output that is written to the file is also displayed on standard output. If used with compression, the plaintext is displayed on standard output.
- If `APPEND` is mentioned in the query then the output is appended to an existing file. If compression is used, append cannot be used.
**Example** **Example**

View File

@ -1,7 +1,7 @@
--- ---
slug: /en/sql-reference/table-functions/dictionary slug: /en/sql-reference/table-functions/dictionary
sidebar_position: 54 sidebar_position: 54
sidebar_label: dictionary function sidebar_label: dictionary
title: dictionary title: dictionary
--- ---

View File

@ -391,7 +391,7 @@ INDEX b (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARIT
## Проекции {#projections} ## Проекции {#projections}
Проекции похожи на [материализованные представления](../../../sql-reference/statements/create/view.md#materialized), но определяются на уровне кусков данных. Это обеспечивает гарантии согласованности данных наряду с автоматическим использованием в запросах. Проекции похожи на [материализованные представления](../../../sql-reference/statements/create/view.md#materialized), но определяются на уровне кусков данных. Это обеспечивает гарантии согласованности данных наряду с автоматическим использованием в запросах.
Проекции — это экспериментальная возможность. Чтобы включить поддержку проекций, установите настройку [allow_experimental_projection_optimization](../../../operations/settings/settings.md#allow-experimental-projection-optimization) в значение `1`. См. также настройку [force_optimize_projection ](../../../operations/settings/settings.md#force-optimize-projection). Проекции — это экспериментальная возможность. Чтобы включить поддержку проекций, установите настройку [optimize_use_projections](../../../operations/settings/settings.md#allow-experimental-projection-optimization) в значение `1`. См. также настройку [force_optimize_projection ](../../../operations/settings/settings.md#optimize_use_projections).
Проекции не поддерживаются для запросов `SELECT` с модификатором [FINAL](../../../sql-reference/statements/select/from.md#select-from-final). Проекции не поддерживаются для запросов `SELECT` с модификатором [FINAL](../../../sql-reference/statements/select/from.md#select-from-final).

View File

@ -3588,7 +3588,7 @@ SETTINGS index_granularity = 8192 │
Строка с идентификатором снэпшота, из которого будет выполняться [исходный дамп таблиц PostgreSQL](../../engines/database-engines/materialized-postgresql.md). Эта настройка должна использоваться совместно с [materialized_postgresql_replication_slot](#materialized-postgresql-replication-slot). Строка с идентификатором снэпшота, из которого будет выполняться [исходный дамп таблиц PostgreSQL](../../engines/database-engines/materialized-postgresql.md). Эта настройка должна использоваться совместно с [materialized_postgresql_replication_slot](#materialized-postgresql-replication-slot).
## allow_experimental_projection_optimization {#allow-experimental-projection-optimization} ## optimize_use_projections {#optimize_use_projections}
Включает или отключает поддержку [проекций](../../engines/table-engines/mergetree-family/mergetree.md#projections) при обработке запросов `SELECT`. Включает или отключает поддержку [проекций](../../engines/table-engines/mergetree-family/mergetree.md#projections) при обработке запросов `SELECT`.
@ -3601,7 +3601,7 @@ SETTINGS index_granularity = 8192 │
## force_optimize_projection {#force-optimize-projection} ## force_optimize_projection {#force-optimize-projection}
Включает или отключает обязательное использование [проекций](../../engines/table-engines/mergetree-family/mergetree.md#projections) в запросах `SELECT`, если поддержка проекций включена (см. настройку [allow_experimental_projection_optimization](#allow-experimental-projection-optimization)). Включает или отключает обязательное использование [проекций](../../engines/table-engines/mergetree-family/mergetree.md#projections) в запросах `SELECT`, если поддержка проекций включена (см. настройку [optimize_use_projections](#optimize_use_projections)).
Возможные значения: Возможные значения:

View File

@ -102,7 +102,11 @@ ALTER TABLE table2 [ON CLUSTER cluster] ATTACH PARTITION partition_expr FROM tab
``` ```
Копирует партицию из таблицы `table1` в таблицу `table2`. Копирует партицию из таблицы `table1` в таблицу `table2`.
Обратите внимание, что данные не удаляются ни из `table1`, ни из `table2`.
Обратите внимание, что:
- Данные не удаляются ни из `table1`, ни из `table2`.
- `table1` может быть временной таблицей.
Следует иметь в виду: Следует иметь в виду:
@ -118,7 +122,12 @@ ALTER TABLE table2 [ON CLUSTER cluster] ATTACH PARTITION partition_expr FROM tab
ALTER TABLE table2 [ON CLUSTER cluster] REPLACE PARTITION partition_expr FROM table1 ALTER TABLE table2 [ON CLUSTER cluster] REPLACE PARTITION partition_expr FROM table1
``` ```
Копирует партицию из таблицы `table1` в таблицу `table2` с заменой существующих данных в `table2`. Данные из `table1` не удаляются. Копирует партицию из таблицы `table1` в таблицу `table2` с заменой существующих данных в `table2`.
Обратите внимание, что:
- Данные из `table1` не удаляются.
- `table1` может быть временной таблицей.
Следует иметь в виду: Следует иметь в виду:

View File

@ -1074,7 +1074,7 @@ ClickHouse服务器日志文件中相应的跟踪日志确认了ClickHouse正在
<a href="https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#projections" target="_blank">Projections</a>目前是一个实验性的功能因此我们需要告诉ClickHouse <a href="https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#projections" target="_blank">Projections</a>目前是一个实验性的功能因此我们需要告诉ClickHouse
```sql ```sql
SET allow_experimental_projection_optimization = 1; SET optimize_use_projections = 1;
``` ```

View File

@ -862,7 +862,8 @@ bool Client::processWithFuzzing(const String & full_query)
const auto * tmp_pos = text_2.c_str(); const auto * tmp_pos = text_2.c_str();
const auto ast_3 = parseQuery(tmp_pos, tmp_pos + text_2.size(), const auto ast_3 = parseQuery(tmp_pos, tmp_pos + text_2.size(),
false /* allow_multi_statements */); false /* allow_multi_statements */);
const auto text_3 = ast_3->formatForErrorMessage(); const auto text_3 = ast_3 ? ast_3->formatForErrorMessage() : "";
if (text_3 != text_2) if (text_3 != text_2)
{ {
fmt::print(stderr, "Found error: The query formatting is broken.\n"); fmt::print(stderr, "Found error: The query formatting is broken.\n");
@ -877,7 +878,7 @@ bool Client::processWithFuzzing(const String & full_query)
fmt::print(stderr, "Text-1 (AST-1 formatted):\n'{}'\n", query_to_execute); fmt::print(stderr, "Text-1 (AST-1 formatted):\n'{}'\n", query_to_execute);
fmt::print(stderr, "AST-2 (Text-1 parsed):\n'{}'\n", ast_2->dumpTree()); fmt::print(stderr, "AST-2 (Text-1 parsed):\n'{}'\n", ast_2->dumpTree());
fmt::print(stderr, "Text-2 (AST-2 formatted):\n'{}'\n", text_2); fmt::print(stderr, "Text-2 (AST-2 formatted):\n'{}'\n", text_2);
fmt::print(stderr, "AST-3 (Text-2 parsed):\n'{}'\n", ast_3->dumpTree()); fmt::print(stderr, "AST-3 (Text-2 parsed):\n'{}'\n", ast_3 ? ast_3->dumpTree() : "");
fmt::print(stderr, "Text-3 (AST-3 formatted):\n'{}'\n", text_3); fmt::print(stderr, "Text-3 (AST-3 formatted):\n'{}'\n", text_3);
fmt::print(stderr, "Text-3 must be equal to Text-2, but it is not.\n"); fmt::print(stderr, "Text-3 must be equal to Text-2, but it is not.\n");

View File

@ -185,6 +185,7 @@ enum class AccessType
M(SYSTEM_FLUSH, "", GROUP, SYSTEM) \ M(SYSTEM_FLUSH, "", GROUP, SYSTEM) \
M(SYSTEM_THREAD_FUZZER, "SYSTEM START THREAD FUZZER, SYSTEM STOP THREAD FUZZER, START THREAD FUZZER, STOP THREAD FUZZER", GLOBAL, SYSTEM) \ M(SYSTEM_THREAD_FUZZER, "SYSTEM START THREAD FUZZER, SYSTEM STOP THREAD FUZZER, START THREAD FUZZER, STOP THREAD FUZZER", GLOBAL, SYSTEM) \
M(SYSTEM_UNFREEZE, "SYSTEM UNFREEZE", GLOBAL, SYSTEM) \ M(SYSTEM_UNFREEZE, "SYSTEM UNFREEZE", GLOBAL, SYSTEM) \
M(SYSTEM_FAILPOINT, "SYSTEM ENABLE FAILPOINT, SYSTEM DISABLE FAILPOINT", GLOBAL, SYSTEM) \
M(SYSTEM, "", GROUP, ALL) /* allows to execute SYSTEM {SHUTDOWN|RELOAD CONFIG|...} */ \ M(SYSTEM, "", GROUP, ALL) /* allows to execute SYSTEM {SHUTDOWN|RELOAD CONFIG|...} */ \
\ \
M(dictGet, "dictHas, dictGetHierarchy, dictIsIn", DICTIONARY, ALL) /* allows to execute functions dictGet(), dictHas(), dictGetHierarchy(), dictIsIn() */\ M(dictGet, "dictHas, dictGetHierarchy, dictIsIn", DICTIONARY, ALL) /* allows to execute functions dictGet(), dictHas(), dictGetHierarchy(), dictIsIn() */\

View File

@ -14,11 +14,29 @@ AggregateFunctionPtr createAggregateFunctionAny(const std::string & name, const
return AggregateFunctionPtr(createAggregateFunctionSingleValue<AggregateFunctionsSingleValue, AggregateFunctionAnyData>(name, argument_types, parameters, settings)); return AggregateFunctionPtr(createAggregateFunctionSingleValue<AggregateFunctionsSingleValue, AggregateFunctionAnyData>(name, argument_types, parameters, settings));
} }
template <bool RespectNulls = false>
AggregateFunctionPtr createAggregateFunctionNullableAny(
const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings * settings)
{
return AggregateFunctionPtr(
createAggregateFunctionSingleNullableValue<AggregateFunctionsSingleValue, AggregateFunctionAnyData, RespectNulls>(
name, argument_types, parameters, settings));
}
AggregateFunctionPtr createAggregateFunctionAnyLast(const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings * settings) AggregateFunctionPtr createAggregateFunctionAnyLast(const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings * settings)
{ {
return AggregateFunctionPtr(createAggregateFunctionSingleValue<AggregateFunctionsSingleValue, AggregateFunctionAnyLastData>(name, argument_types, parameters, settings)); return AggregateFunctionPtr(createAggregateFunctionSingleValue<AggregateFunctionsSingleValue, AggregateFunctionAnyLastData>(name, argument_types, parameters, settings));
} }
template <bool RespectNulls = false>
AggregateFunctionPtr createAggregateFunctionNullableAnyLast(const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings * settings)
{
return AggregateFunctionPtr(createAggregateFunctionSingleNullableValue<
AggregateFunctionsSingleValue,
AggregateFunctionAnyLastData,
RespectNulls>(name, argument_types, parameters, settings));
}
AggregateFunctionPtr createAggregateFunctionAnyHeavy(const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings * settings) AggregateFunctionPtr createAggregateFunctionAnyHeavy(const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings * settings)
{ {
return AggregateFunctionPtr(createAggregateFunctionSingleValue<AggregateFunctionsSingleValue, AggregateFunctionAnyHeavyData>(name, argument_types, parameters, settings)); return AggregateFunctionPtr(createAggregateFunctionSingleValue<AggregateFunctionsSingleValue, AggregateFunctionAnyHeavyData>(name, argument_types, parameters, settings));
@ -38,9 +56,15 @@ void registerAggregateFunctionsAny(AggregateFunctionFactory & factory)
factory.registerFunction("first_value", factory.registerFunction("first_value",
{ createAggregateFunctionAny, properties }, { createAggregateFunctionAny, properties },
AggregateFunctionFactory::CaseInsensitive); AggregateFunctionFactory::CaseInsensitive);
factory.registerFunction("first_value_respect_nulls",
{ createAggregateFunctionNullableAny<true>, properties },
AggregateFunctionFactory::CaseInsensitive);
factory.registerFunction("last_value", factory.registerFunction("last_value",
{ createAggregateFunctionAnyLast, properties }, { createAggregateFunctionAnyLast, properties },
AggregateFunctionFactory::CaseInsensitive); AggregateFunctionFactory::CaseInsensitive);
factory.registerFunction("last_value_respect_nulls",
{ createAggregateFunctionNullableAnyLast<true>, properties },
AggregateFunctionFactory::CaseInsensitive);
} }
} }

View File

@ -768,19 +768,23 @@ static_assert(
/// For any other value types. /// For any other value types.
template <bool IS_NULLABLE = false>
struct SingleValueDataGeneric struct SingleValueDataGeneric
{ {
private: private:
using Self = SingleValueDataGeneric; using Self = SingleValueDataGeneric;
Field value; Field value;
bool has_value = false;
public: public:
static constexpr bool is_nullable = false; static constexpr bool is_nullable = IS_NULLABLE;
static constexpr bool is_any = false; static constexpr bool is_any = false;
bool has() const bool has() const
{ {
if constexpr (is_nullable)
return has_value;
return !value.isNull(); return !value.isNull();
} }
@ -815,11 +819,15 @@ public:
void change(const IColumn & column, size_t row_num, Arena *) void change(const IColumn & column, size_t row_num, Arena *)
{ {
column.get(row_num, value); column.get(row_num, value);
if constexpr (is_nullable)
has_value = true;
} }
void change(const Self & to, Arena *) void change(const Self & to, Arena *)
{ {
value = to.value; value = to.value;
if constexpr (is_nullable)
has_value = true;
} }
bool changeFirstTime(const IColumn & column, size_t row_num, Arena * arena) bool changeFirstTime(const IColumn & column, size_t row_num, Arena * arena)
@ -835,7 +843,7 @@ public:
bool changeFirstTime(const Self & to, Arena * arena) bool changeFirstTime(const Self & to, Arena * arena)
{ {
if (!has() && to.has()) if (!has() && (is_nullable || to.has()))
{ {
change(to, arena); change(to, arena);
return true; return true;
@ -870,27 +878,61 @@ public:
} }
else else
{ {
Field new_value; if constexpr (is_nullable)
column.get(row_num, new_value);
if (new_value < value)
{ {
value = new_value; Field new_value;
return true; column.get(row_num, new_value);
if (!value.isNull() && (new_value.isNull() || new_value < value))
{
value = new_value;
return true;
}
else
return false;
} }
else else
return false; {
Field new_value;
column.get(row_num, new_value);
if (new_value < value)
{
value = new_value;
return true;
}
else
return false;
}
} }
} }
bool changeIfLess(const Self & to, Arena * arena) bool changeIfLess(const Self & to, Arena * arena)
{ {
if (to.has() && (!has() || to.value < value)) if (!to.has())
return false;
if constexpr (is_nullable)
{ {
change(to, arena); if (!has())
return true; {
change(to, arena);
return true;
}
if (to.value.isNull() || (!value.isNull() && to.value < value))
{
value = to.value;
return true;
}
return false;
} }
else else
return false; {
if (!has() || to.value < value)
{
change(to, arena);
return true;
}
else
return false;
}
} }
bool changeIfGreater(const IColumn & column, size_t row_num, Arena * arena) bool changeIfGreater(const IColumn & column, size_t row_num, Arena * arena)
@ -902,27 +944,55 @@ public:
} }
else else
{ {
Field new_value; if constexpr (is_nullable)
column.get(row_num, new_value);
if (new_value > value)
{ {
value = new_value; Field new_value;
return true; column.get(row_num, new_value);
if (!value.isNull() && (new_value.isNull() || value < new_value))
{
value = new_value;
return true;
}
return false;
} }
else else
return false; {
Field new_value;
column.get(row_num, new_value);
if (new_value > value)
{
value = new_value;
return true;
}
else
return false;
}
} }
} }
bool changeIfGreater(const Self & to, Arena * arena) bool changeIfGreater(const Self & to, Arena * arena)
{ {
if (to.has() && (!has() || to.value > value)) if (!to.has())
return false;
if constexpr (is_nullable)
{ {
change(to, arena); if (!value.isNull() && (to.value.isNull() || value < to.value))
return true; {
value = to.value;
return true;
}
return false;
} }
else else
return false; {
if (!has() || to.value > value)
{
change(to, arena);
return true;
}
else
return false;
}
} }
bool isEqualTo(const IColumn & column, size_t row_num) const bool isEqualTo(const IColumn & column, size_t row_num) const
@ -1359,6 +1429,17 @@ public:
this->data(place).insertResultInto(to); this->data(place).insertResultInto(to);
} }
AggregateFunctionPtr getOwnNullAdapter(
const AggregateFunctionPtr & nested_function,
const DataTypes & /*arguments*/,
const Array & /*params*/,
const AggregateFunctionProperties & /*properties*/) const override
{
if (Data::is_nullable)
return nested_function;
return nullptr;
}
#if USE_EMBEDDED_COMPILER #if USE_EMBEDDED_COMPILER
bool isCompilable() const override bool isCompilable() const override

View File

@ -9,7 +9,6 @@
#include <DataTypes/DataTypeDateTime.h> #include <DataTypes/DataTypeDateTime.h>
#include <DataTypes/DataTypeString.h> #include <DataTypes/DataTypeString.h>
namespace DB namespace DB
{ {
struct Settings; struct Settings;
@ -22,7 +21,6 @@ static IAggregateFunction * createAggregateFunctionSingleValue(const String & na
assertUnary(name, argument_types); assertUnary(name, argument_types);
const DataTypePtr & argument_type = argument_types[0]; const DataTypePtr & argument_type = argument_types[0];
WhichDataType which(argument_type); WhichDataType which(argument_type);
#define DISPATCH(TYPE) \ #define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<Data<SingleValueDataFixed<TYPE>>>(argument_type); /// NOLINT if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<Data<SingleValueDataFixed<TYPE>>>(argument_type); /// NOLINT
@ -46,7 +44,28 @@ static IAggregateFunction * createAggregateFunctionSingleValue(const String & na
if (which.idx == TypeIndex::String) if (which.idx == TypeIndex::String)
return new AggregateFunctionTemplate<Data<SingleValueDataString>>(argument_type); return new AggregateFunctionTemplate<Data<SingleValueDataString>>(argument_type);
return new AggregateFunctionTemplate<Data<SingleValueDataGeneric>>(argument_type); return new AggregateFunctionTemplate<Data<SingleValueDataGeneric<>>>(argument_type);
}
template <template <typename> class AggregateFunctionTemplate, template <typename> class Data, bool RespectNulls = false>
static IAggregateFunction * createAggregateFunctionSingleNullableValue(const String & name, const DataTypes & argument_types, const Array & parameters, const Settings * settings)
{
assertNoParameters(name, parameters);
assertUnary(name, argument_types);
const DataTypePtr & argument_type = argument_types[0];
WhichDataType which(argument_type);
// If the result value could be null (excluding the case that no row is matched),
// use SingleValueDataGeneric.
if constexpr (!RespectNulls)
{
return createAggregateFunctionSingleValue<AggregateFunctionTemplate, Data>(name, argument_types, Array(), settings);
}
else
{
return new AggregateFunctionTemplate<Data<SingleValueDataGeneric<true>>>(argument_type);
}
UNREACHABLE();
} }
@ -79,7 +98,7 @@ static IAggregateFunction * createAggregateFunctionArgMinMaxSecond(const DataTyp
if (which.idx == TypeIndex::String) if (which.idx == TypeIndex::String)
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataString>>>(res_type, val_type); return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataString>>>(res_type, val_type);
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataGeneric>>>(res_type, val_type); return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataGeneric<>>>>(res_type, val_type);
} }
template <template <typename> class MinMaxData> template <template <typename> class MinMaxData>
@ -115,7 +134,7 @@ static IAggregateFunction * createAggregateFunctionArgMinMax(const String & name
if (which.idx == TypeIndex::String) if (which.idx == TypeIndex::String)
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataString>(res_type, val_type); return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataString>(res_type, val_type);
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataGeneric>(res_type, val_type); return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataGeneric<>>(res_type, val_type);
} }
} }

View File

@ -162,14 +162,13 @@ private:
class PushOrVisitor class PushOrVisitor
{ {
public: public:
PushOrVisitor(ContextPtr context, size_t max_atoms_, size_t num_atoms_) PushOrVisitor(ContextPtr context, size_t max_atoms_)
: max_atoms(max_atoms_) : max_atoms(max_atoms_)
, num_atoms(num_atoms_)
, and_resolver(FunctionFactory::instance().get("and", context)) , and_resolver(FunctionFactory::instance().get("and", context))
, or_resolver(FunctionFactory::instance().get("or", context)) , or_resolver(FunctionFactory::instance().get("or", context))
{} {}
bool visit(QueryTreeNodePtr & node) bool visit(QueryTreeNodePtr & node, size_t num_atoms)
{ {
if (max_atoms && num_atoms > max_atoms) if (max_atoms && num_atoms > max_atoms)
return false; return false;
@ -187,7 +186,10 @@ public:
{ {
auto & arguments = function_node->getArguments().getNodes(); auto & arguments = function_node->getArguments().getNodes();
for (auto & argument : arguments) for (auto & argument : arguments)
visit(argument); {
if (!visit(argument, num_atoms))
return false;
}
} }
if (name == "or") if (name == "or")
@ -217,7 +219,7 @@ public:
auto rhs = createFunctionNode(or_resolver, std::move(other_node), std::move(and_function_arguments[1])); auto rhs = createFunctionNode(or_resolver, std::move(other_node), std::move(and_function_arguments[1]));
node = createFunctionNode(and_resolver, std::move(lhs), std::move(rhs)); node = createFunctionNode(and_resolver, std::move(lhs), std::move(rhs));
visit(node); return visit(node, num_atoms);
} }
return true; return true;
@ -225,7 +227,6 @@ public:
private: private:
size_t max_atoms; size_t max_atoms;
size_t num_atoms;
const FunctionOverloadResolverPtr and_resolver; const FunctionOverloadResolverPtr and_resolver;
const FunctionOverloadResolverPtr or_resolver; const FunctionOverloadResolverPtr or_resolver;
@ -516,8 +517,8 @@ std::optional<CNF> CNF::tryBuildCNF(const QueryTreeNodePtr & node, ContextPtr co
visitor.visit(node_cloned, false); visitor.visit(node_cloned, false);
} }
if (PushOrVisitor visitor(context, max_atoms, atom_count); if (PushOrVisitor visitor(context, max_atoms);
!visitor.visit(node_cloned)) !visitor.visit(node_cloned, atom_count))
return std::nullopt; return std::nullopt;
CollectGroupsVisitor collect_visitor; CollectGroupsVisitor collect_visitor;

View File

@ -353,6 +353,10 @@ target_link_libraries(clickhouse_common_io
Poco::Foundation Poco::Foundation
) )
if (TARGET ch_contrib::fiu)
target_link_libraries(clickhouse_common_io PUBLIC ch_contrib::fiu)
endif()
if (TARGET ch_contrib::cpuid) if (TARGET ch_contrib::cpuid)
target_link_libraries(clickhouse_common_io PRIVATE ch_contrib::cpuid) target_link_libraries(clickhouse_common_io PRIVATE ch_contrib::cpuid)
endif() endif()

View File

@ -573,6 +573,13 @@ try
CompressionMethod compression_method = chooseCompressionMethod(out_file, compression_method_string); CompressionMethod compression_method = chooseCompressionMethod(out_file, compression_method_string);
UInt64 compression_level = 3; UInt64 compression_level = 3;
if (query_with_output->is_outfile_append && compression_method != CompressionMethod::None)
{
throw Exception(
ErrorCodes::BAD_ARGUMENTS,
"Cannot append to compressed file. Please use uncompressed file or remove APPEND keyword.");
}
if (query_with_output->compression_level) if (query_with_output->compression_level)
{ {
const auto & compression_level_node = query_with_output->compression_level->as<ASTLiteral &>(); const auto & compression_level_node = query_with_output->compression_level->as<ASTLiteral &>();
@ -587,8 +594,14 @@ try
range.second); range.second);
} }
auto flags = O_WRONLY | O_EXCL;
if (query_with_output->is_outfile_append)
flags |= O_APPEND;
else
flags |= O_CREAT;
out_file_buf = wrapWriteBufferWithCompressionMethod( out_file_buf = wrapWriteBufferWithCompressionMethod(
std::make_unique<WriteBufferFromFile>(out_file, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_EXCL | O_CREAT), std::make_unique<WriteBufferFromFile>(out_file, DBMS_DEFAULT_BUFFER_SIZE, flags),
compression_method, compression_method,
static_cast<int>(compression_level) static_cast<int>(compression_level)
); );

View File

@ -264,7 +264,9 @@ void ColumnFunction::appendArgument(const ColumnWithTypeAndName & column)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot capture column {} because it has incompatible type: " throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot capture column {} because it has incompatible type: "
"got {}, but {} is expected.", argument_types.size(), column.type->getName(), argument_types[index]->getName()); "got {}, but {} is expected.", argument_types.size(), column.type->getName(), argument_types[index]->getName());
captured_columns.push_back(column); auto captured_column = column;
captured_column.column = captured_column.column->convertToFullColumnIfSparse();
captured_columns.push_back(std::move(captured_column));
} }
DataTypePtr ColumnFunction::getResultType() const DataTypePtr ColumnFunction::getResultType() const

View File

@ -29,21 +29,14 @@
M(13, SIZES_OF_COLUMNS_IN_TUPLE_DOESNT_MATCH) \ M(13, SIZES_OF_COLUMNS_IN_TUPLE_DOESNT_MATCH) \
M(15, DUPLICATE_COLUMN) \ M(15, DUPLICATE_COLUMN) \
M(16, NO_SUCH_COLUMN_IN_TABLE) \ M(16, NO_SUCH_COLUMN_IN_TABLE) \
M(17, DELIMITER_IN_STRING_LITERAL_DOESNT_MATCH) \
M(18, CANNOT_INSERT_ELEMENT_INTO_CONSTANT_COLUMN) \
M(19, SIZE_OF_FIXED_STRING_DOESNT_MATCH) \ M(19, SIZE_OF_FIXED_STRING_DOESNT_MATCH) \
M(20, NUMBER_OF_COLUMNS_DOESNT_MATCH) \ M(20, NUMBER_OF_COLUMNS_DOESNT_MATCH) \
M(21, CANNOT_READ_ALL_DATA_FROM_TAB_SEPARATED_INPUT) \
M(22, CANNOT_PARSE_ALL_VALUE_FROM_TAB_SEPARATED_INPUT) \
M(23, CANNOT_READ_FROM_ISTREAM) \ M(23, CANNOT_READ_FROM_ISTREAM) \
M(24, CANNOT_WRITE_TO_OSTREAM) \ M(24, CANNOT_WRITE_TO_OSTREAM) \
M(25, CANNOT_PARSE_ESCAPE_SEQUENCE) \ M(25, CANNOT_PARSE_ESCAPE_SEQUENCE) \
M(26, CANNOT_PARSE_QUOTED_STRING) \ M(26, CANNOT_PARSE_QUOTED_STRING) \
M(27, CANNOT_PARSE_INPUT_ASSERTION_FAILED) \ M(27, CANNOT_PARSE_INPUT_ASSERTION_FAILED) \
M(28, CANNOT_PRINT_FLOAT_OR_DOUBLE_NUMBER) \ M(28, CANNOT_PRINT_FLOAT_OR_DOUBLE_NUMBER) \
M(29, CANNOT_PRINT_INTEGER) \
M(30, CANNOT_READ_SIZE_OF_COMPRESSED_CHUNK) \
M(31, CANNOT_READ_COMPRESSED_CHUNK) \
M(32, ATTEMPT_TO_READ_AFTER_EOF) \ M(32, ATTEMPT_TO_READ_AFTER_EOF) \
M(33, CANNOT_READ_ALL_DATA) \ M(33, CANNOT_READ_ALL_DATA) \
M(34, TOO_MANY_ARGUMENTS_FOR_FUNCTION) \ M(34, TOO_MANY_ARGUMENTS_FOR_FUNCTION) \
@ -57,7 +50,6 @@
M(42, NUMBER_OF_ARGUMENTS_DOESNT_MATCH) \ M(42, NUMBER_OF_ARGUMENTS_DOESNT_MATCH) \
M(43, ILLEGAL_TYPE_OF_ARGUMENT) \ M(43, ILLEGAL_TYPE_OF_ARGUMENT) \
M(44, ILLEGAL_COLUMN) \ M(44, ILLEGAL_COLUMN) \
M(45, ILLEGAL_NUMBER_OF_RESULT_COLUMNS) \
M(46, UNKNOWN_FUNCTION) \ M(46, UNKNOWN_FUNCTION) \
M(47, UNKNOWN_IDENTIFIER) \ M(47, UNKNOWN_IDENTIFIER) \
M(48, NOT_IMPLEMENTED) \ M(48, NOT_IMPLEMENTED) \
@ -66,20 +58,14 @@
M(51, EMPTY_LIST_OF_COLUMNS_QUERIED) \ M(51, EMPTY_LIST_OF_COLUMNS_QUERIED) \
M(52, COLUMN_QUERIED_MORE_THAN_ONCE) \ M(52, COLUMN_QUERIED_MORE_THAN_ONCE) \
M(53, TYPE_MISMATCH) \ M(53, TYPE_MISMATCH) \
M(54, STORAGE_DOESNT_ALLOW_PARAMETERS) \
M(55, STORAGE_REQUIRES_PARAMETER) \ M(55, STORAGE_REQUIRES_PARAMETER) \
M(56, UNKNOWN_STORAGE) \ M(56, UNKNOWN_STORAGE) \
M(57, TABLE_ALREADY_EXISTS) \ M(57, TABLE_ALREADY_EXISTS) \
M(58, TABLE_METADATA_ALREADY_EXISTS) \ M(58, TABLE_METADATA_ALREADY_EXISTS) \
M(59, ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER) \ M(59, ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER) \
M(60, UNKNOWN_TABLE) \ M(60, UNKNOWN_TABLE) \
M(61, ONLY_FILTER_COLUMN_IN_BLOCK) \
M(62, SYNTAX_ERROR) \ M(62, SYNTAX_ERROR) \
M(63, UNKNOWN_AGGREGATE_FUNCTION) \ M(63, UNKNOWN_AGGREGATE_FUNCTION) \
M(64, CANNOT_READ_AGGREGATE_FUNCTION_FROM_TEXT) \
M(65, CANNOT_WRITE_AGGREGATE_FUNCTION_AS_TEXT) \
M(66, NOT_A_COLUMN) \
M(67, ILLEGAL_KEY_OF_AGGREGATION) \
M(68, CANNOT_GET_SIZE_OF_FIELD) \ M(68, CANNOT_GET_SIZE_OF_FIELD) \
M(69, ARGUMENT_OUT_OF_BOUND) \ M(69, ARGUMENT_OUT_OF_BOUND) \
M(70, CANNOT_CONVERT_TYPE) \ M(70, CANNOT_CONVERT_TYPE) \
@ -109,16 +95,11 @@
M(94, CANNOT_MERGE_DIFFERENT_AGGREGATED_DATA_VARIANTS) \ M(94, CANNOT_MERGE_DIFFERENT_AGGREGATED_DATA_VARIANTS) \
M(95, CANNOT_READ_FROM_SOCKET) \ M(95, CANNOT_READ_FROM_SOCKET) \
M(96, CANNOT_WRITE_TO_SOCKET) \ M(96, CANNOT_WRITE_TO_SOCKET) \
M(97, CANNOT_READ_ALL_DATA_FROM_CHUNKED_INPUT) \
M(98, CANNOT_WRITE_TO_EMPTY_BLOCK_OUTPUT_STREAM) \
M(99, UNKNOWN_PACKET_FROM_CLIENT) \ M(99, UNKNOWN_PACKET_FROM_CLIENT) \
M(100, UNKNOWN_PACKET_FROM_SERVER) \ M(100, UNKNOWN_PACKET_FROM_SERVER) \
M(101, UNEXPECTED_PACKET_FROM_CLIENT) \ M(101, UNEXPECTED_PACKET_FROM_CLIENT) \
M(102, UNEXPECTED_PACKET_FROM_SERVER) \ M(102, UNEXPECTED_PACKET_FROM_SERVER) \
M(103, RECEIVED_DATA_FOR_WRONG_QUERY_ID) \
M(104, TOO_SMALL_BUFFER_SIZE) \ M(104, TOO_SMALL_BUFFER_SIZE) \
M(105, CANNOT_READ_HISTORY) \
M(106, CANNOT_APPEND_HISTORY) \
M(107, FILE_DOESNT_EXIST) \ M(107, FILE_DOESNT_EXIST) \
M(108, NO_DATA_TO_INSERT) \ M(108, NO_DATA_TO_INSERT) \
M(109, CANNOT_BLOCK_SIGNAL) \ M(109, CANNOT_BLOCK_SIGNAL) \
@ -137,7 +118,6 @@
M(123, UNKNOWN_TYPE_OF_AST_NODE) \ M(123, UNKNOWN_TYPE_OF_AST_NODE) \
M(124, INCORRECT_ELEMENT_OF_SET) \ M(124, INCORRECT_ELEMENT_OF_SET) \
M(125, INCORRECT_RESULT_OF_SCALAR_SUBQUERY) \ M(125, INCORRECT_RESULT_OF_SCALAR_SUBQUERY) \
M(126, CANNOT_GET_RETURN_TYPE) \
M(127, ILLEGAL_INDEX) \ M(127, ILLEGAL_INDEX) \
M(128, TOO_LARGE_ARRAY_SIZE) \ M(128, TOO_LARGE_ARRAY_SIZE) \
M(129, FUNCTION_IS_SPECIAL) \ M(129, FUNCTION_IS_SPECIAL) \
@ -149,30 +129,17 @@
M(137, UNKNOWN_ELEMENT_IN_CONFIG) \ M(137, UNKNOWN_ELEMENT_IN_CONFIG) \
M(138, EXCESSIVE_ELEMENT_IN_CONFIG) \ M(138, EXCESSIVE_ELEMENT_IN_CONFIG) \
M(139, NO_ELEMENTS_IN_CONFIG) \ M(139, NO_ELEMENTS_IN_CONFIG) \
M(140, ALL_REQUESTED_COLUMNS_ARE_MISSING) \
M(141, SAMPLING_NOT_SUPPORTED) \ M(141, SAMPLING_NOT_SUPPORTED) \
M(142, NOT_FOUND_NODE) \ M(142, NOT_FOUND_NODE) \
M(143, FOUND_MORE_THAN_ONE_NODE) \
M(144, FIRST_DATE_IS_BIGGER_THAN_LAST_DATE) \
M(145, UNKNOWN_OVERFLOW_MODE) \ M(145, UNKNOWN_OVERFLOW_MODE) \
M(146, QUERY_SECTION_DOESNT_MAKE_SENSE) \
M(147, NOT_FOUND_FUNCTION_ELEMENT_FOR_AGGREGATE) \
M(148, NOT_FOUND_RELATION_ELEMENT_FOR_CONDITION) \
M(149, NOT_FOUND_RHS_ELEMENT_FOR_CONDITION) \
M(150, EMPTY_LIST_OF_ATTRIBUTES_PASSED) \
M(151, INDEX_OF_COLUMN_IN_SORT_CLAUSE_IS_OUT_OF_RANGE) \
M(152, UNKNOWN_DIRECTION_OF_SORTING) \ M(152, UNKNOWN_DIRECTION_OF_SORTING) \
M(153, ILLEGAL_DIVISION) \ M(153, ILLEGAL_DIVISION) \
M(154, AGGREGATE_FUNCTION_NOT_APPLICABLE) \
M(155, UNKNOWN_RELATION) \
M(156, DICTIONARIES_WAS_NOT_LOADED) \ M(156, DICTIONARIES_WAS_NOT_LOADED) \
M(157, ILLEGAL_OVERFLOW_MODE) \
M(158, TOO_MANY_ROWS) \ M(158, TOO_MANY_ROWS) \
M(159, TIMEOUT_EXCEEDED) \ M(159, TIMEOUT_EXCEEDED) \
M(160, TOO_SLOW) \ M(160, TOO_SLOW) \
M(161, TOO_MANY_COLUMNS) \ M(161, TOO_MANY_COLUMNS) \
M(162, TOO_DEEP_SUBQUERIES) \ M(162, TOO_DEEP_SUBQUERIES) \
M(163, TOO_DEEP_PIPELINE) \
M(164, READONLY) \ M(164, READONLY) \
M(165, TOO_MANY_TEMPORARY_COLUMNS) \ M(165, TOO_MANY_TEMPORARY_COLUMNS) \
M(166, TOO_MANY_TEMPORARY_NON_CONST_COLUMNS) \ M(166, TOO_MANY_TEMPORARY_NON_CONST_COLUMNS) \
@ -183,20 +150,14 @@
M(172, CANNOT_CREATE_DIRECTORY) \ M(172, CANNOT_CREATE_DIRECTORY) \
M(173, CANNOT_ALLOCATE_MEMORY) \ M(173, CANNOT_ALLOCATE_MEMORY) \
M(174, CYCLIC_ALIASES) \ M(174, CYCLIC_ALIASES) \
M(176, CHUNK_NOT_FOUND) \
M(177, DUPLICATE_CHUNK_NAME) \
M(178, MULTIPLE_ALIASES_FOR_EXPRESSION) \
M(179, MULTIPLE_EXPRESSIONS_FOR_ALIAS) \ M(179, MULTIPLE_EXPRESSIONS_FOR_ALIAS) \
M(180, THERE_IS_NO_PROFILE) \ M(180, THERE_IS_NO_PROFILE) \
M(181, ILLEGAL_FINAL) \ M(181, ILLEGAL_FINAL) \
M(182, ILLEGAL_PREWHERE) \ M(182, ILLEGAL_PREWHERE) \
M(183, UNEXPECTED_EXPRESSION) \ M(183, UNEXPECTED_EXPRESSION) \
M(184, ILLEGAL_AGGREGATION) \ M(184, ILLEGAL_AGGREGATION) \
M(185, UNSUPPORTED_MYISAM_BLOCK_TYPE) \
M(186, UNSUPPORTED_COLLATION_LOCALE) \ M(186, UNSUPPORTED_COLLATION_LOCALE) \
M(187, COLLATION_COMPARISON_FAILED) \ M(187, COLLATION_COMPARISON_FAILED) \
M(188, UNKNOWN_ACTION) \
M(189, TABLE_MUST_NOT_BE_CREATED_MANUALLY) \
M(190, SIZES_OF_ARRAYS_DONT_MATCH) \ M(190, SIZES_OF_ARRAYS_DONT_MATCH) \
M(191, SET_SIZE_LIMIT_EXCEEDED) \ M(191, SET_SIZE_LIMIT_EXCEEDED) \
M(192, UNKNOWN_USER) \ M(192, UNKNOWN_USER) \
@ -204,15 +165,12 @@
M(194, REQUIRED_PASSWORD) \ M(194, REQUIRED_PASSWORD) \
M(195, IP_ADDRESS_NOT_ALLOWED) \ M(195, IP_ADDRESS_NOT_ALLOWED) \
M(196, UNKNOWN_ADDRESS_PATTERN_TYPE) \ M(196, UNKNOWN_ADDRESS_PATTERN_TYPE) \
M(197, SERVER_REVISION_IS_TOO_OLD) \
M(198, DNS_ERROR) \ M(198, DNS_ERROR) \
M(199, UNKNOWN_QUOTA) \ M(199, UNKNOWN_QUOTA) \
M(200, QUOTA_DOESNT_ALLOW_KEYS) \
M(201, QUOTA_EXCEEDED) \ M(201, QUOTA_EXCEEDED) \
M(202, TOO_MANY_SIMULTANEOUS_QUERIES) \ M(202, TOO_MANY_SIMULTANEOUS_QUERIES) \
M(203, NO_FREE_CONNECTION) \ M(203, NO_FREE_CONNECTION) \
M(204, CANNOT_FSYNC) \ M(204, CANNOT_FSYNC) \
M(205, NESTED_TYPE_TOO_DEEP) \
M(206, ALIAS_REQUIRED) \ M(206, ALIAS_REQUIRED) \
M(207, AMBIGUOUS_IDENTIFIER) \ M(207, AMBIGUOUS_IDENTIFIER) \
M(208, EMPTY_NESTED_TABLE) \ M(208, EMPTY_NESTED_TABLE) \
@ -229,7 +187,6 @@
M(219, DATABASE_NOT_EMPTY) \ M(219, DATABASE_NOT_EMPTY) \
M(220, DUPLICATE_INTERSERVER_IO_ENDPOINT) \ M(220, DUPLICATE_INTERSERVER_IO_ENDPOINT) \
M(221, NO_SUCH_INTERSERVER_IO_ENDPOINT) \ M(221, NO_SUCH_INTERSERVER_IO_ENDPOINT) \
M(222, ADDING_REPLICA_TO_NON_EMPTY_TABLE) \
M(223, UNEXPECTED_AST_STRUCTURE) \ M(223, UNEXPECTED_AST_STRUCTURE) \
M(224, REPLICA_IS_ALREADY_ACTIVE) \ M(224, REPLICA_IS_ALREADY_ACTIVE) \
M(225, NO_ZOOKEEPER) \ M(225, NO_ZOOKEEPER) \
@ -253,9 +210,7 @@
M(243, NOT_ENOUGH_SPACE) \ M(243, NOT_ENOUGH_SPACE) \
M(244, UNEXPECTED_ZOOKEEPER_ERROR) \ M(244, UNEXPECTED_ZOOKEEPER_ERROR) \
M(246, CORRUPTED_DATA) \ M(246, CORRUPTED_DATA) \
M(247, INCORRECT_MARK) \
M(248, INVALID_PARTITION_VALUE) \ M(248, INVALID_PARTITION_VALUE) \
M(250, NOT_ENOUGH_BLOCK_NUMBERS) \
M(251, NO_SUCH_REPLICA) \ M(251, NO_SUCH_REPLICA) \
M(252, TOO_MANY_PARTS) \ M(252, TOO_MANY_PARTS) \
M(253, REPLICA_ALREADY_EXISTS) \ M(253, REPLICA_ALREADY_EXISTS) \
@ -271,8 +226,6 @@
M(264, INCOMPATIBLE_TYPE_OF_JOIN) \ M(264, INCOMPATIBLE_TYPE_OF_JOIN) \
M(265, NO_AVAILABLE_REPLICA) \ M(265, NO_AVAILABLE_REPLICA) \
M(266, MISMATCH_REPLICAS_DATA_SOURCES) \ M(266, MISMATCH_REPLICAS_DATA_SOURCES) \
M(267, STORAGE_DOESNT_SUPPORT_PARALLEL_REPLICAS) \
M(268, CPUID_ERROR) \
M(269, INFINITE_LOOP) \ M(269, INFINITE_LOOP) \
M(270, CANNOT_COMPRESS) \ M(270, CANNOT_COMPRESS) \
M(271, CANNOT_DECOMPRESS) \ M(271, CANNOT_DECOMPRESS) \
@ -295,9 +248,7 @@
M(290, LIMIT_EXCEEDED) \ M(290, LIMIT_EXCEEDED) \
M(291, DATABASE_ACCESS_DENIED) \ M(291, DATABASE_ACCESS_DENIED) \
M(293, MONGODB_CANNOT_AUTHENTICATE) \ M(293, MONGODB_CANNOT_AUTHENTICATE) \
M(294, INVALID_BLOCK_EXTRA_INFO) \
M(295, RECEIVED_EMPTY_DATA) \ M(295, RECEIVED_EMPTY_DATA) \
M(296, NO_REMOTE_SHARD_FOUND) \
M(297, SHARD_HAS_NO_CONNECTIONS) \ M(297, SHARD_HAS_NO_CONNECTIONS) \
M(298, CANNOT_PIPE) \ M(298, CANNOT_PIPE) \
M(299, CANNOT_FORK) \ M(299, CANNOT_FORK) \
@ -311,13 +262,10 @@
M(307, TOO_MANY_BYTES) \ M(307, TOO_MANY_BYTES) \
M(308, UNEXPECTED_NODE_IN_ZOOKEEPER) \ M(308, UNEXPECTED_NODE_IN_ZOOKEEPER) \
M(309, FUNCTION_CANNOT_HAVE_PARAMETERS) \ M(309, FUNCTION_CANNOT_HAVE_PARAMETERS) \
M(317, INVALID_SHARD_WEIGHT) \
M(318, INVALID_CONFIG_PARAMETER) \ M(318, INVALID_CONFIG_PARAMETER) \
M(319, UNKNOWN_STATUS_OF_INSERT) \ M(319, UNKNOWN_STATUS_OF_INSERT) \
M(321, VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE) \ M(321, VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE) \
M(335, BARRIER_TIMEOUT) \
M(336, UNKNOWN_DATABASE_ENGINE) \ M(336, UNKNOWN_DATABASE_ENGINE) \
M(337, DDL_GUARD_IS_ACTIVE) \
M(341, UNFINISHED) \ M(341, UNFINISHED) \
M(342, METADATA_MISMATCH) \ M(342, METADATA_MISMATCH) \
M(344, SUPPORT_IS_DISABLED) \ M(344, SUPPORT_IS_DISABLED) \
@ -325,14 +273,10 @@
M(346, CANNOT_CONVERT_CHARSET) \ M(346, CANNOT_CONVERT_CHARSET) \
M(347, CANNOT_LOAD_CONFIG) \ M(347, CANNOT_LOAD_CONFIG) \
M(349, CANNOT_INSERT_NULL_IN_ORDINARY_COLUMN) \ M(349, CANNOT_INSERT_NULL_IN_ORDINARY_COLUMN) \
M(350, INCOMPATIBLE_SOURCE_TABLES) \
M(351, AMBIGUOUS_TABLE_NAME) \
M(352, AMBIGUOUS_COLUMN_NAME) \ M(352, AMBIGUOUS_COLUMN_NAME) \
M(353, INDEX_OF_POSITIONAL_ARGUMENT_IS_OUT_OF_RANGE) \ M(353, INDEX_OF_POSITIONAL_ARGUMENT_IS_OUT_OF_RANGE) \
M(354, ZLIB_INFLATE_FAILED) \ M(354, ZLIB_INFLATE_FAILED) \
M(355, ZLIB_DEFLATE_FAILED) \ M(355, ZLIB_DEFLATE_FAILED) \
M(356, BAD_LAMBDA) \
M(357, RESERVED_IDENTIFIER_NAME) \
M(358, INTO_OUTFILE_NOT_ALLOWED) \ M(358, INTO_OUTFILE_NOT_ALLOWED) \
M(359, TABLE_SIZE_EXCEEDS_MAX_DROP_SIZE_LIMIT) \ M(359, TABLE_SIZE_EXCEEDS_MAX_DROP_SIZE_LIMIT) \
M(360, CANNOT_CREATE_CHARSET_CONVERTER) \ M(360, CANNOT_CREATE_CHARSET_CONVERTER) \
@ -341,7 +285,6 @@
M(363, CANNOT_CREATE_IO_BUFFER) \ M(363, CANNOT_CREATE_IO_BUFFER) \
M(364, RECEIVED_ERROR_TOO_MANY_REQUESTS) \ M(364, RECEIVED_ERROR_TOO_MANY_REQUESTS) \
M(366, SIZES_OF_NESTED_COLUMNS_ARE_INCONSISTENT) \ M(366, SIZES_OF_NESTED_COLUMNS_ARE_INCONSISTENT) \
M(367, TOO_MANY_FETCHES) \
M(369, ALL_REPLICAS_ARE_STALE) \ M(369, ALL_REPLICAS_ARE_STALE) \
M(370, DATA_TYPE_CANNOT_BE_USED_IN_TABLES) \ M(370, DATA_TYPE_CANNOT_BE_USED_IN_TABLES) \
M(371, INCONSISTENT_CLUSTER_DEFINITION) \ M(371, INCONSISTENT_CLUSTER_DEFINITION) \
@ -352,7 +295,6 @@
M(376, CANNOT_PARSE_UUID) \ M(376, CANNOT_PARSE_UUID) \
M(377, ILLEGAL_SYNTAX_FOR_DATA_TYPE) \ M(377, ILLEGAL_SYNTAX_FOR_DATA_TYPE) \
M(378, DATA_TYPE_CANNOT_HAVE_ARGUMENTS) \ M(378, DATA_TYPE_CANNOT_HAVE_ARGUMENTS) \
M(379, UNKNOWN_STATUS_OF_DISTRIBUTED_DDL_TASK) \
M(380, CANNOT_KILL) \ M(380, CANNOT_KILL) \
M(381, HTTP_LENGTH_REQUIRED) \ M(381, HTTP_LENGTH_REQUIRED) \
M(382, CANNOT_LOAD_CATBOOST_MODEL) \ M(382, CANNOT_LOAD_CATBOOST_MODEL) \
@ -378,11 +320,9 @@
M(402, CANNOT_IOSETUP) \ M(402, CANNOT_IOSETUP) \
M(403, INVALID_JOIN_ON_EXPRESSION) \ M(403, INVALID_JOIN_ON_EXPRESSION) \
M(404, BAD_ODBC_CONNECTION_STRING) \ M(404, BAD_ODBC_CONNECTION_STRING) \
M(405, PARTITION_SIZE_EXCEEDS_MAX_DROP_SIZE_LIMIT) \
M(406, TOP_AND_LIMIT_TOGETHER) \ M(406, TOP_AND_LIMIT_TOGETHER) \
M(407, DECIMAL_OVERFLOW) \ M(407, DECIMAL_OVERFLOW) \
M(408, BAD_REQUEST_PARAMETER) \ M(408, BAD_REQUEST_PARAMETER) \
M(409, EXTERNAL_EXECUTABLE_NOT_FOUND) \
M(410, EXTERNAL_SERVER_IS_NOT_RESPONDING) \ M(410, EXTERNAL_SERVER_IS_NOT_RESPONDING) \
M(411, PTHREAD_ERROR) \ M(411, PTHREAD_ERROR) \
M(412, NETLINK_ERROR) \ M(412, NETLINK_ERROR) \
@ -399,7 +339,6 @@
M(424, CANNOT_LINK) \ M(424, CANNOT_LINK) \
M(425, SYSTEM_ERROR) \ M(425, SYSTEM_ERROR) \
M(427, CANNOT_COMPILE_REGEXP) \ M(427, CANNOT_COMPILE_REGEXP) \
M(428, UNKNOWN_LOG_LEVEL) \
M(429, FAILED_TO_GETPWUID) \ M(429, FAILED_TO_GETPWUID) \
M(430, MISMATCHING_USERS_FOR_PROCESS_AND_DATA) \ M(430, MISMATCHING_USERS_FOR_PROCESS_AND_DATA) \
M(431, ILLEGAL_SYNTAX_FOR_CODEC_TYPE) \ M(431, ILLEGAL_SYNTAX_FOR_CODEC_TYPE) \
@ -433,7 +372,6 @@
M(459, CANNOT_SET_THREAD_PRIORITY) \ M(459, CANNOT_SET_THREAD_PRIORITY) \
M(460, CANNOT_CREATE_TIMER) \ M(460, CANNOT_CREATE_TIMER) \
M(461, CANNOT_SET_TIMER_PERIOD) \ M(461, CANNOT_SET_TIMER_PERIOD) \
M(462, CANNOT_DELETE_TIMER) \
M(463, CANNOT_FCNTL) \ M(463, CANNOT_FCNTL) \
M(464, CANNOT_PARSE_ELF) \ M(464, CANNOT_PARSE_ELF) \
M(465, CANNOT_PARSE_DWARF) \ M(465, CANNOT_PARSE_DWARF) \
@ -456,15 +394,12 @@
M(482, DICTIONARY_ACCESS_DENIED) \ M(482, DICTIONARY_ACCESS_DENIED) \
M(483, TOO_MANY_REDIRECTS) \ M(483, TOO_MANY_REDIRECTS) \
M(484, INTERNAL_REDIS_ERROR) \ M(484, INTERNAL_REDIS_ERROR) \
M(485, SCALAR_ALREADY_EXISTS) \
M(487, CANNOT_GET_CREATE_DICTIONARY_QUERY) \ M(487, CANNOT_GET_CREATE_DICTIONARY_QUERY) \
M(488, UNKNOWN_DICTIONARY) \
M(489, INCORRECT_DICTIONARY_DEFINITION) \ M(489, INCORRECT_DICTIONARY_DEFINITION) \
M(490, CANNOT_FORMAT_DATETIME) \ M(490, CANNOT_FORMAT_DATETIME) \
M(491, UNACCEPTABLE_URL) \ M(491, UNACCEPTABLE_URL) \
M(492, ACCESS_ENTITY_NOT_FOUND) \ M(492, ACCESS_ENTITY_NOT_FOUND) \
M(493, ACCESS_ENTITY_ALREADY_EXISTS) \ M(493, ACCESS_ENTITY_ALREADY_EXISTS) \
M(494, ACCESS_ENTITY_FOUND_DUPLICATES) \
M(495, ACCESS_STORAGE_READONLY) \ M(495, ACCESS_STORAGE_READONLY) \
M(496, QUOTA_REQUIRES_CLIENT_KEY) \ M(496, QUOTA_REQUIRES_CLIENT_KEY) \
M(497, ACCESS_DENIED) \ M(497, ACCESS_DENIED) \
@ -475,8 +410,6 @@
M(502, CANNOT_SIGQUEUE) \ M(502, CANNOT_SIGQUEUE) \
M(503, AGGREGATE_FUNCTION_THROW) \ M(503, AGGREGATE_FUNCTION_THROW) \
M(504, FILE_ALREADY_EXISTS) \ M(504, FILE_ALREADY_EXISTS) \
M(505, CANNOT_DELETE_DIRECTORY) \
M(506, UNEXPECTED_ERROR_CODE) \
M(507, UNABLE_TO_SKIP_UNUSED_SHARDS) \ M(507, UNABLE_TO_SKIP_UNUSED_SHARDS) \
M(508, UNKNOWN_ACCESS_TYPE) \ M(508, UNKNOWN_ACCESS_TYPE) \
M(509, INVALID_GRANT) \ M(509, INVALID_GRANT) \
@ -501,8 +434,6 @@
M(530, CANNOT_CONNECT_RABBITMQ) \ M(530, CANNOT_CONNECT_RABBITMQ) \
M(531, CANNOT_FSTAT) \ M(531, CANNOT_FSTAT) \
M(532, LDAP_ERROR) \ M(532, LDAP_ERROR) \
M(533, INCONSISTENT_RESERVATIONS) \
M(534, NO_RESERVATIONS_PROVIDED) \
M(535, UNKNOWN_RAID_TYPE) \ M(535, UNKNOWN_RAID_TYPE) \
M(536, CANNOT_RESTORE_FROM_FIELD_DUMP) \ M(536, CANNOT_RESTORE_FROM_FIELD_DUMP) \
M(537, ILLEGAL_MYSQL_VARIABLE) \ M(537, ILLEGAL_MYSQL_VARIABLE) \
@ -518,8 +449,6 @@
M(547, INVALID_RAID_TYPE) \ M(547, INVALID_RAID_TYPE) \
M(548, UNKNOWN_VOLUME) \ M(548, UNKNOWN_VOLUME) \
M(549, DATA_TYPE_CANNOT_BE_USED_IN_KEY) \ M(549, DATA_TYPE_CANNOT_BE_USED_IN_KEY) \
M(550, CONDITIONAL_TREE_PARENT_NOT_FOUND) \
M(551, ILLEGAL_PROJECTION_MANIPULATOR) \
M(552, UNRECOGNIZED_ARGUMENTS) \ M(552, UNRECOGNIZED_ARGUMENTS) \
M(553, LZMA_STREAM_ENCODER_FAILED) \ M(553, LZMA_STREAM_ENCODER_FAILED) \
M(554, LZMA_STREAM_DECODER_FAILED) \ M(554, LZMA_STREAM_DECODER_FAILED) \
@ -580,8 +509,6 @@
M(609, FUNCTION_ALREADY_EXISTS) \ M(609, FUNCTION_ALREADY_EXISTS) \
M(610, CANNOT_DROP_FUNCTION) \ M(610, CANNOT_DROP_FUNCTION) \
M(611, CANNOT_CREATE_RECURSIVE_FUNCTION) \ M(611, CANNOT_CREATE_RECURSIVE_FUNCTION) \
M(612, OBJECT_ALREADY_STORED_ON_DISK) \
M(613, OBJECT_WAS_NOT_STORED_ON_DISK) \
M(614, POSTGRESQL_CONNECTION_FAILURE) \ M(614, POSTGRESQL_CONNECTION_FAILURE) \
M(615, CANNOT_ADVISE) \ M(615, CANNOT_ADVISE) \
M(616, UNKNOWN_READ_METHOD) \ M(616, UNKNOWN_READ_METHOD) \
@ -612,9 +539,7 @@
M(641, CANNOT_APPEND_TO_FILE) \ M(641, CANNOT_APPEND_TO_FILE) \
M(642, CANNOT_PACK_ARCHIVE) \ M(642, CANNOT_PACK_ARCHIVE) \
M(643, CANNOT_UNPACK_ARCHIVE) \ M(643, CANNOT_UNPACK_ARCHIVE) \
M(644, REMOTE_FS_OBJECT_CACHE_ERROR) \
M(645, NUMBER_OF_DIMENSIONS_MISMATCHED) \ M(645, NUMBER_OF_DIMENSIONS_MISMATCHED) \
M(646, CANNOT_BACKUP_DATABASE) \
M(647, CANNOT_BACKUP_TABLE) \ M(647, CANNOT_BACKUP_TABLE) \
M(648, WRONG_DDL_RENAMING_SETTINGS) \ M(648, WRONG_DDL_RENAMING_SETTINGS) \
M(649, INVALID_TRANSACTION) \ M(649, INVALID_TRANSACTION) \

166
src/Common/FailPoint.cpp Normal file
View File

@ -0,0 +1,166 @@
#include <Common/Exception.h>
#include <Common/FailPoint.h>
#include <boost/core/noncopyable.hpp>
#include <chrono>
#include <condition_variable>
#include <mutex>
#include <optional>
namespace DB
{
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
};
#if FIU_ENABLE
static struct InitFiu
{
InitFiu()
{
fiu_init(0);
}
} init_fiu;
#endif
/// We should define different types of failpoints here. There are four types of them:
/// - ONCE: the failpoint will only be triggered once.
/// - REGULAR: the failpoint will always be triggered util disableFailPoint is called.
/// - PAUSAEBLE_ONCE: the failpoint will be blocked one time when pauseFailPoint is called, util disableFailPoint is called.
/// - PAUSAEBLE: the failpoint will be blocked every time when pauseFailPoint is called, util disableFailPoint is called.
#define APPLY_FOR_FAILPOINTS(ONCE, REGULAR, PAUSEABLE_ONCE, PAUSEABLE) \
ONCE(replicated_merge_tree_commit_zk_fail_after_op) \
REGULAR(dummy_failpoint) \
PAUSEABLE_ONCE(dummy_pausable_failpoint_once) \
PAUSEABLE(dummy_pausable_failpoint)
namespace FailPoints
{
#define M(NAME) extern const char(NAME)[] = #NAME "";
APPLY_FOR_FAILPOINTS(M, M, M, M)
#undef M
}
std::unordered_map<String, std::shared_ptr<FailPointChannel>> FailPointInjection::fail_point_wait_channels;
std::mutex FailPointInjection::mu;
class FailPointChannel : private boost::noncopyable
{
public:
explicit FailPointChannel(UInt64 timeout_)
: timeout_ms(timeout_)
{}
FailPointChannel()
: timeout_ms(0)
{}
void wait()
{
std::unique_lock lock(m);
if (timeout_ms == 0)
cv.wait(lock);
else
cv.wait_for(lock, std::chrono::milliseconds(timeout_ms));
}
void notifyAll()
{
std::unique_lock lock(m);
cv.notify_all();
}
private:
UInt64 timeout_ms;
std::mutex m;
std::condition_variable cv;
};
void FailPointInjection::enablePauseFailPoint(const String & fail_point_name, UInt64 time_ms)
{
#define SUB_M(NAME, flags) \
if (fail_point_name == FailPoints::NAME) \
{ \
/* FIU_ONETIME -- Only fail once; the point of failure will be automatically disabled afterwards.*/ \
fiu_enable(FailPoints::NAME, 1, nullptr, flags); \
std::lock_guard lock(mu); \
fail_point_wait_channels.try_emplace(FailPoints::NAME, std::make_shared<FailPointChannel>(time_ms)); \
return; \
}
#define ONCE(NAME)
#define REGULAR(NAME)
#define PAUSEABLE_ONCE(NAME) SUB_M(NAME, FIU_ONETIME)
#define PAUSEABLE(NAME) SUB_M(NAME, 0)
APPLY_FOR_FAILPOINTS(ONCE, REGULAR, PAUSEABLE_ONCE, PAUSEABLE)
#undef SUB_M
#undef ONCE
#undef REGULAR
#undef PAUSEABLE_ONCE
#undef PAUSEABLE
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot find fail point {}", fail_point_name);
}
void FailPointInjection::pauseFailPoint(const String & fail_point_name)
{
fiu_do_on(fail_point_name.c_str(), FailPointInjection::wait(fail_point_name););
}
void FailPointInjection::enableFailPoint(const String & fail_point_name)
{
#if FIU_ENABLE
#define SUB_M(NAME, flags, pause) \
if (fail_point_name == FailPoints::NAME) \
{ \
/* FIU_ONETIME -- Only fail once; the point of failure will be automatically disabled afterwards.*/ \
fiu_enable(FailPoints::NAME, 1, nullptr, flags); \
if (pause) \
{ \
std::lock_guard lock(mu); \
fail_point_wait_channels.try_emplace(FailPoints::NAME, std::make_shared<FailPointChannel>()); \
} \
return; \
}
#define ONCE(NAME) SUB_M(NAME, FIU_ONETIME, 0)
#define REGULAR(NAME) SUB_M(NAME, 0, 0)
#define PAUSEABLE_ONCE(NAME) SUB_M(NAME, FIU_ONETIME, 1)
#define PAUSEABLE(NAME) SUB_M(NAME, 0, 1)
APPLY_FOR_FAILPOINTS(ONCE, REGULAR, PAUSEABLE_ONCE, PAUSEABLE)
#undef SUB_M
#undef ONCE
#undef REGULAR
#undef PAUSEABLE_ONCE
#undef PAUSEABLE
#endif
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot find fail point {}", fail_point_name);
}
void FailPointInjection::disableFailPoint(const String & fail_point_name)
{
std::lock_guard lock(mu);
if (auto iter = fail_point_wait_channels.find(fail_point_name); iter != fail_point_wait_channels.end())
{
/// can not rely on deconstruction to do the notify_all things, because
/// if someone wait on this, the deconstruct will never be called.
iter->second->notifyAll();
fail_point_wait_channels.erase(iter);
}
fiu_disable(fail_point_name.c_str());
}
void FailPointInjection::wait(const String & fail_point_name)
{
std::unique_lock lock(mu);
if (auto iter = fail_point_wait_channels.find(fail_point_name); iter == fail_point_wait_channels.end())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Can not find channel for fail point {}", fail_point_name);
else
{
lock.unlock();
auto ptr = iter->second;
ptr->wait();
}
};
}

53
src/Common/FailPoint.h Normal file
View File

@ -0,0 +1,53 @@
#pragma once
#include "config.h"
#include <Common/Exception.h>
#include <Core/Types.h>
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdocumentation"
#pragma clang diagnostic ignored "-Wreserved-macro-identifier"
#endif
#include <fiu.h>
#include <fiu-control.h>
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#include <any>
#include <unordered_map>
namespace DB
{
/// This is a simple named failpoint library inspired by https://github.com/pingcap/tiflash
/// The usage is simple:
/// 1. define failpoint with a 'failpoint_name' in FailPoint.cpp
/// 2. inject failpoint in normal code
/// 2.1 use fiu_do_on which can inject any code blocks, when it is a regular-triggered / once-triggered failpoint
/// 2.2 use pauseFailPoint when it is a pausable failpoint
/// 3. in test file, we can use system failpoint enable/disable 'failpoint_name'
class FailPointChannel;
class FailPointInjection
{
public:
static void pauseFailPoint(const String & fail_point_name);
static void enableFailPoint(const String & fail_point_name);
static void enablePauseFailPoint(const String & fail_point_name, UInt64 time);
static void disableFailPoint(const String & fail_point_name);
static void wait(const String & fail_point_name);
private:
static std::mutex mu;
static std::unordered_map<String, std::shared_ptr<FailPointChannel>> fail_point_wait_channels;
};
}

View File

@ -19,17 +19,26 @@ namespace ErrorCodes
class RandomFaultInjection class RandomFaultInjection
{ {
public: public:
bool must_fail_after_op = false;
bool must_fail_before_op = false;
RandomFaultInjection(double probability, UInt64 seed_) : rndgen(seed_), distribution(probability) { } RandomFaultInjection(double probability, UInt64 seed_) : rndgen(seed_), distribution(probability) { }
void beforeOperation() void beforeOperation()
{ {
if (distribution(rndgen)) if (distribution(rndgen) || must_fail_before_op)
{
must_fail_before_op = false;
throw zkutil::KeeperException("Fault injection before operation", Coordination::Error::ZSESSIONEXPIRED); throw zkutil::KeeperException("Fault injection before operation", Coordination::Error::ZSESSIONEXPIRED);
}
} }
void afterOperation() void afterOperation()
{ {
if (distribution(rndgen)) if (distribution(rndgen) || must_fail_after_op)
{
must_fail_after_op = false;
throw zkutil::KeeperException("Fault injection after operation", Coordination::Error::ZOPERATIONTIMEOUT); throw zkutil::KeeperException("Fault injection after operation", Coordination::Error::ZOPERATIONTIMEOUT);
}
} }
private: private:
@ -42,6 +51,9 @@ private:
/// ///
class ZooKeeperWithFaultInjection class ZooKeeperWithFaultInjection
{ {
template<bool async_insert>
friend class ReplicatedMergeTreeSinkImpl;
using zk = zkutil::ZooKeeper; using zk = zkutil::ZooKeeper;
zk::Ptr keeper; zk::Ptr keeper;

View File

@ -57,4 +57,5 @@
#cmakedefine01 USE_SKIM #cmakedefine01 USE_SKIM
#cmakedefine01 USE_OPENSSL_INTREE #cmakedefine01 USE_OPENSSL_INTREE
#cmakedefine01 USE_ULID #cmakedefine01 USE_ULID
#cmakedefine01 FIU_ENABLE
#cmakedefine01 USE_BCRYPT #cmakedefine01 USE_BCRYPT

View File

@ -560,6 +560,7 @@ class IColumn;
M(Bool, asterisk_include_alias_columns, false, "Include ALIAS columns for wildcard query", 0) \ M(Bool, asterisk_include_alias_columns, false, "Include ALIAS columns for wildcard query", 0) \
M(Bool, optimize_skip_merged_partitions, false, "Skip partitions with one part with level > 0 in optimize final", 0) \ M(Bool, optimize_skip_merged_partitions, false, "Skip partitions with one part with level > 0 in optimize final", 0) \
M(Bool, optimize_on_insert, true, "Do the same transformation for inserted block of data as if merge was done on this block.", 0) \ M(Bool, optimize_on_insert, true, "Do the same transformation for inserted block of data as if merge was done on this block.", 0) \
M(Bool, optimize_use_projections, true, "Automatically choose projections to perform SELECT query", 0) \
M(Bool, force_optimize_projection, false, "If projection optimization is enabled, SELECT queries need to use projection", 0) \ M(Bool, force_optimize_projection, false, "If projection optimization is enabled, SELECT queries need to use projection", 0) \
M(Bool, async_socket_for_remote, true, "Asynchronously read from socket executing remote query", 0) \ M(Bool, async_socket_for_remote, true, "Asynchronously read from socket executing remote query", 0) \
M(Bool, async_query_sending_for_remote, true, "Asynchronously create connections and send query to shards in remote query", 0) \ M(Bool, async_query_sending_for_remote, true, "Asynchronously create connections and send query to shards in remote query", 0) \
@ -715,26 +716,12 @@ class IColumn;
M(Milliseconds, storage_system_stack_trace_pipe_read_timeout_ms, 100, "Maximum time to read from a pipe for receiving information from the threads when querying the `system.stack_trace` table. This setting is used for testing purposes and not meant to be changed by users.", 0) \ M(Milliseconds, storage_system_stack_trace_pipe_read_timeout_ms, 100, "Maximum time to read from a pipe for receiving information from the threads when querying the `system.stack_trace` table. This setting is used for testing purposes and not meant to be changed by users.", 0) \
\ \
M(Bool, parallelize_output_from_storages, true, "Parallelize output for reading step from storage. It allows parallelizing query processing right after reading from storage if possible", 0) \ M(Bool, parallelize_output_from_storages, true, "Parallelize output for reading step from storage. It allows parallelizing query processing right after reading from storage if possible", 0) \
\
/** Experimental functions */ \
M(Bool, allow_experimental_funnel_functions, false, "Enable experimental functions for funnel analysis.", 0) \
M(Bool, allow_experimental_nlp_functions, false, "Enable experimental functions for natural language processing.", 0) \
M(Bool, allow_experimental_hash_functions, false, "Enable experimental hash functions (hashid, etc)", 0) \
M(Bool, allow_experimental_object_type, false, "Allow Object and JSON data types", 0) \
M(String, insert_deduplication_token, "", "If not empty, used for duplicate detection instead of data digest", 0) \ M(String, insert_deduplication_token, "", "If not empty, used for duplicate detection instead of data digest", 0) \
M(String, ann_index_select_query_params, "", "Parameters passed to ANN indexes in SELECT queries, the format is 'param1=x, param2=y, ...'", 0) \ M(String, ann_index_select_query_params, "", "Parameters passed to ANN indexes in SELECT queries, the format is 'param1=x, param2=y, ...'", 0) \
M(UInt64, max_limit_for_ann_queries, 1000000, "Maximum limit value for using ANN indexes is used to prevent memory overflow in search queries for indexes", 0) \
M(Bool, allow_experimental_annoy_index, false, "Allows to use Annoy index. Disabled by default because this feature is experimental", 0) \
M(Bool, count_distinct_optimization, false, "Rewrite count distinct to subquery of group by", 0) \ M(Bool, count_distinct_optimization, false, "Rewrite count distinct to subquery of group by", 0) \
M(Bool, throw_on_unsupported_query_inside_transaction, true, "Throw exception if unsupported query is used inside transaction", 0) \
M(TransactionsWaitCSNMode, wait_changes_become_visible_after_commit_mode, TransactionsWaitCSNMode::WAIT_UNKNOWN, "Wait for committed changes to become actually visible in the latest snapshot", 0) \
M(Bool, implicit_transaction, false, "If enabled and not already inside a transaction, wraps the query inside a full transaction (begin + commit or rollback)", 0) \
M(Bool, throw_if_no_data_to_insert, true, "Enables or disables empty INSERTs, enabled by default", 0) \ M(Bool, throw_if_no_data_to_insert, true, "Enables or disables empty INSERTs, enabled by default", 0) \
M(Bool, compatibility_ignore_auto_increment_in_create_table, false, "Ignore AUTO_INCREMENT keyword in column declaration if true, otherwise return error. It simplifies migration from MySQL", 0) \ M(Bool, compatibility_ignore_auto_increment_in_create_table, false, "Ignore AUTO_INCREMENT keyword in column declaration if true, otherwise return error. It simplifies migration from MySQL", 0) \
M(Bool, multiple_joins_try_to_keep_original_names, false, "Do not add aliases to top level expression list on multiple joins rewrite", 0) \ M(Bool, multiple_joins_try_to_keep_original_names, false, "Do not add aliases to top level expression list on multiple joins rewrite", 0) \
M(UInt64, grace_hash_join_initial_buckets, 1, "Initial number of grace hash join buckets", 0) \
M(UInt64, grace_hash_join_max_buckets, 1024, "Limit on the number of grace hash join buckets", 0) \
M(Bool, optimize_distinct_in_order, false, "This optimization has a bug and it is disabled. Enable DISTINCT optimization if some columns in DISTINCT form a prefix of sorting. For example, prefix of sorting key in merge tree or ORDER BY statement", 0) \
M(Bool, optimize_sorting_by_input_stream_properties, true, "Optimize sorting by sorting properties of input stream", 0) \ M(Bool, optimize_sorting_by_input_stream_properties, true, "Optimize sorting by sorting properties of input stream", 0) \
M(UInt64, insert_keeper_max_retries, 20, "Max retries for keeper operations during insert", 0) \ M(UInt64, insert_keeper_max_retries, 20, "Max retries for keeper operations during insert", 0) \
M(UInt64, insert_keeper_retry_initial_backoff_ms, 100, "Initial backoff timeout for keeper operations during insert", 0) \ M(UInt64, insert_keeper_retry_initial_backoff_ms, 100, "Initial backoff timeout for keeper operations during insert", 0) \
@ -743,10 +730,24 @@ class IColumn;
M(UInt64, insert_keeper_fault_injection_seed, 0, "0 - random seed, otherwise the setting value", 0) \ M(UInt64, insert_keeper_fault_injection_seed, 0, "0 - random seed, otherwise the setting value", 0) \
M(Bool, force_aggregation_in_order, false, "Force use of aggregation in order on remote nodes during distributed aggregation. PLEASE, NEVER CHANGE THIS SETTING VALUE MANUALLY!", IMPORTANT) \ M(Bool, force_aggregation_in_order, false, "Force use of aggregation in order on remote nodes during distributed aggregation. PLEASE, NEVER CHANGE THIS SETTING VALUE MANUALLY!", IMPORTANT) \
M(UInt64, http_max_request_param_data_size, 10_MiB, "Limit on size of request data used as a query parameter in predefined HTTP requests.", 0) \ M(UInt64, http_max_request_param_data_size, 10_MiB, "Limit on size of request data used as a query parameter in predefined HTTP requests.", 0) \
M(Bool, function_json_value_return_type_allow_nullable, false, "Allow function JSON_VALUE to return nullable type.", 0) \
M(Bool, function_json_value_return_type_allow_complex, false, "Allow function JSON_VALUE to return complex type, such as: struct, array, map.", 0) \
\
/** Experimental functions */ \
M(Bool, allow_experimental_funnel_functions, false, "Enable experimental functions for funnel analysis.", 0) \
M(Bool, allow_experimental_nlp_functions, false, "Enable experimental functions for natural language processing.", 0) \
M(Bool, allow_experimental_hash_functions, false, "Enable experimental hash functions (hashid, etc)", 0) \
M(Bool, allow_experimental_object_type, false, "Allow Object and JSON data types", 0) \
M(Bool, allow_experimental_annoy_index, false, "Allows to use Annoy index. Disabled by default because this feature is experimental", 0) \
M(UInt64, max_limit_for_ann_queries, 1000000, "Maximum limit value for using ANN indexes is used to prevent memory overflow in search queries for indexes", 0) \
M(Bool, throw_on_unsupported_query_inside_transaction, true, "Throw exception if unsupported query is used inside transaction", 0) \
M(TransactionsWaitCSNMode, wait_changes_become_visible_after_commit_mode, TransactionsWaitCSNMode::WAIT_UNKNOWN, "Wait for committed changes to become actually visible in the latest snapshot", 0) \
M(Bool, implicit_transaction, false, "If enabled and not already inside a transaction, wraps the query inside a full transaction (begin + commit or rollback)", 0) \
M(UInt64, grace_hash_join_initial_buckets, 1, "Initial number of grace hash join buckets", 0) \
M(UInt64, grace_hash_join_max_buckets, 1024, "Limit on the number of grace hash join buckets", 0) \
M(Bool, optimize_distinct_in_order, false, "This optimization has a bug and it is disabled. Enable DISTINCT optimization if some columns in DISTINCT form a prefix of sorting. For example, prefix of sorting key in merge tree or ORDER BY statement", 0) \
M(Bool, allow_experimental_undrop_table_query, false, "Allow to use undrop query to restore dropped table in a limited time", 0) \ M(Bool, allow_experimental_undrop_table_query, false, "Allow to use undrop query to restore dropped table in a limited time", 0) \
M(Bool, keeper_map_strict_mode, false, "Enforce additional checks during operations on KeeperMap. E.g. throw an exception on an insert for already existing key", 0) \ M(Bool, keeper_map_strict_mode, false, "Enforce additional checks during operations on KeeperMap. E.g. throw an exception on an insert for already existing key", 0) \
M(Bool, function_json_value_return_type_allow_nullable, false, "Allow function to return nullable type.", 0) \
M(Bool, function_json_value_return_type_allow_complex, false, "Allow function to return complex type, such as: struct, array, map.", 0) \
// End of COMMON_SETTINGS // End of COMMON_SETTINGS
// Please add settings related to formats into the FORMAT_FACTORY_SETTINGS and move obsolete settings to OBSOLETE_SETTINGS. // Please add settings related to formats into the FORMAT_FACTORY_SETTINGS and move obsolete settings to OBSOLETE_SETTINGS.

View File

@ -246,7 +246,8 @@ void SerializationInfoByName::writeJSON(WriteBuffer & out) const
return writeString(oss.str(), out); return writeString(oss.str(), out);
} }
void SerializationInfoByName::readJSON(ReadBuffer & in) SerializationInfoByName SerializationInfoByName::readJSON(
const NamesAndTypesList & columns, const Settings & settings, ReadBuffer & in)
{ {
String json_str; String json_str;
readString(json_str, in); readString(json_str, in);
@ -262,8 +263,13 @@ void SerializationInfoByName::readJSON(ReadBuffer & in)
"Unknown version of serialization infos ({}). Should be less or equal than {}", "Unknown version of serialization infos ({}). Should be less or equal than {}",
object->getValue<size_t>(KEY_VERSION), SERIALIZATION_INFO_VERSION); object->getValue<size_t>(KEY_VERSION), SERIALIZATION_INFO_VERSION);
SerializationInfoByName infos;
if (object->has(KEY_COLUMNS)) if (object->has(KEY_COLUMNS))
{ {
std::unordered_map<std::string_view, const IDataType *> column_type_by_name;
for (const auto & [name, type] : columns)
column_type_by_name.emplace(name, type.get());
auto array = object->getArray(KEY_COLUMNS); auto array = object->getArray(KEY_COLUMNS);
for (const auto & elem : *array) for (const auto & elem : *array)
{ {
@ -271,13 +277,22 @@ void SerializationInfoByName::readJSON(ReadBuffer & in)
if (!elem_object->has(KEY_NAME)) if (!elem_object->has(KEY_NAME))
throw Exception(ErrorCodes::CORRUPTED_DATA, throw Exception(ErrorCodes::CORRUPTED_DATA,
"Missed field '{}' in SerializationInfo of columns", KEY_NAME); "Missed field '{}' in serialization infos", KEY_NAME);
auto name = elem_object->getValue<String>(KEY_NAME); auto name = elem_object->getValue<String>(KEY_NAME);
if (auto it = find(name); it != end()) auto it = column_type_by_name.find(name);
it->second->fromJSON(*elem_object);
if (it == column_type_by_name.end())
throw Exception(ErrorCodes::CORRUPTED_DATA,
"Found unexpected column '{}' in serialization infos", name);
auto info = it->second->createSerializationInfo(settings);
info->fromJSON(*elem_object);
infos.emplace(name, std::move(info));
} }
} }
return infos;
} }
} }

View File

@ -96,8 +96,10 @@ using MutableSerializationInfos = std::vector<MutableSerializationInfoPtr>;
class SerializationInfoByName : public std::map<String, MutableSerializationInfoPtr> class SerializationInfoByName : public std::map<String, MutableSerializationInfoPtr>
{ {
public: public:
using Settings = SerializationInfo::Settings;
SerializationInfoByName() = default; SerializationInfoByName() = default;
SerializationInfoByName(const NamesAndTypesList & columns, const SerializationInfo::Settings & settings); SerializationInfoByName(const NamesAndTypesList & columns, const Settings & settings);
void add(const Block & block); void add(const Block & block);
void add(const SerializationInfoByName & other); void add(const SerializationInfoByName & other);
@ -108,7 +110,9 @@ public:
void replaceData(const SerializationInfoByName & other); void replaceData(const SerializationInfoByName & other);
void writeJSON(WriteBuffer & out) const; void writeJSON(WriteBuffer & out) const;
void readJSON(ReadBuffer & in);
static SerializationInfoByName readJSON(
const NamesAndTypesList & columns, const Settings & settings, ReadBuffer & in);
}; };
} }

View File

@ -580,7 +580,7 @@ private:
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
unalignedStoreLittleEndian<UInt64>(buf + 8, 0x00000000FFFF0000ull | (static_cast<UInt64>(ntohl(in)) << 32)); unalignedStoreLittleEndian<UInt64>(buf + 8, 0x00000000FFFF0000ull | (static_cast<UInt64>(ntohl(in)) << 32));
#else #else
unalignedStoreLittleEndian<UInt64>(buf + 8, 0x00000000FFFF0000ull | (static_cast<UInt64>(in)) << 32)); unalignedStoreLittleEndian<UInt64>(buf + 8, 0x00000000FFFF0000ull | (static_cast<UInt64>(__builtin_bswap32(in))) << 32));
#endif #endif
} }
}; };

View File

@ -239,11 +239,11 @@ namespace
// Multipart upload failed because it wasn't possible to schedule all the tasks. // Multipart upload failed because it wasn't possible to schedule all the tasks.
// To avoid execution of already scheduled tasks we abort MultipartUpload. // To avoid execution of already scheduled tasks we abort MultipartUpload.
abortMultipartUpload(); abortMultipartUpload();
waitForAllBackGroundTasks(); waitForAllBackgroundTasks();
throw; throw;
} }
waitForAllBackGroundTasks(); waitForAllBackgroundTasks();
completeMultipartUpload(); completeMultipartUpload();
} }
@ -381,7 +381,7 @@ namespace
virtual std::unique_ptr<Aws::AmazonWebServiceRequest> fillUploadPartRequest(size_t part_number, size_t part_offset, size_t part_size) = 0; virtual std::unique_ptr<Aws::AmazonWebServiceRequest> fillUploadPartRequest(size_t part_number, size_t part_offset, size_t part_size) = 0;
virtual String processUploadPartRequest(Aws::AmazonWebServiceRequest & request) = 0; virtual String processUploadPartRequest(Aws::AmazonWebServiceRequest & request) = 0;
void waitForAllBackGroundTasks() void waitForAllBackgroundTasks()
{ {
if (!schedule) if (!schedule)
return; return;

View File

@ -109,7 +109,7 @@ void WriteBufferFromS3::nextImpl()
else else
processWithDynamicParts(); processWithDynamicParts();
waitForReadyBackGroundTasks(); waitForReadyBackgroundTasks();
} }
void WriteBufferFromS3::processWithStrictParts() void WriteBufferFromS3::processWithStrictParts()
@ -225,7 +225,7 @@ void WriteBufferFromS3::finalizeImpl()
if (!is_prefinalized) if (!is_prefinalized)
preFinalize(); preFinalize();
waitForAllBackGroundTasks(); waitForAllBackgroundTasks();
if (!multipart_upload_id.empty()) if (!multipart_upload_id.empty())
completeMultipartUpload(); completeMultipartUpload();
@ -632,7 +632,7 @@ void WriteBufferFromS3::processPutRequest(const PutObjectTask & task)
max_retry, key, bucket); max_retry, key, bucket);
} }
void WriteBufferFromS3::waitForReadyBackGroundTasks() void WriteBufferFromS3::waitForReadyBackgroundTasks()
{ {
if (schedule) if (schedule)
{ {
@ -650,7 +650,7 @@ void WriteBufferFromS3::waitForReadyBackGroundTasks()
if (exception) if (exception)
{ {
waitForAllBackGroundTasksUnlocked(lock); waitForAllBackgroundTasksUnlocked(lock);
std::rethrow_exception(exception); std::rethrow_exception(exception);
} }
@ -659,16 +659,16 @@ void WriteBufferFromS3::waitForReadyBackGroundTasks()
} }
} }
void WriteBufferFromS3::waitForAllBackGroundTasks() void WriteBufferFromS3::waitForAllBackgroundTasks()
{ {
if (schedule) if (schedule)
{ {
std::unique_lock lock(bg_tasks_mutex); std::unique_lock lock(bg_tasks_mutex);
waitForAllBackGroundTasksUnlocked(lock); waitForAllBackgroundTasksUnlocked(lock);
} }
} }
void WriteBufferFromS3::waitForAllBackGroundTasksUnlocked(std::unique_lock<std::mutex> & bg_tasks_lock) void WriteBufferFromS3::waitForAllBackgroundTasksUnlocked(std::unique_lock<std::mutex> & bg_tasks_lock)
{ {
if (schedule) if (schedule)
{ {

View File

@ -79,9 +79,9 @@ private:
void fillPutRequest(S3::PutObjectRequest & req); void fillPutRequest(S3::PutObjectRequest & req);
void processPutRequest(const PutObjectTask & task); void processPutRequest(const PutObjectTask & task);
void waitForReadyBackGroundTasks(); void waitForReadyBackgroundTasks();
void waitForAllBackGroundTasks(); void waitForAllBackgroundTasks();
void waitForAllBackGroundTasksUnlocked(std::unique_lock<std::mutex> & bg_tasks_lock); void waitForAllBackgroundTasksUnlocked(std::unique_lock<std::mutex> & bg_tasks_lock);
const String bucket; const String bucket;
const String key; const String key;

View File

@ -533,6 +533,11 @@ bool FileCache::tryReserve(FileSegment & file_segment, size_t size)
assertInitialized(); assertInitialized();
auto cache_lock = cache_guard.lock(); auto cache_lock = cache_guard.lock();
LOG_TEST(
log, "Trying to reserve space ({} bytes) for {}:{}, current usage {}/{}",
size, file_segment.key(), file_segment.offset(),
main_priority->getSize(cache_lock), main_priority->getSizeLimit());
/// In case of per query cache limit (by default disabled), we add/remove entries from both /// In case of per query cache limit (by default disabled), we add/remove entries from both
/// (main_priority and query_priority) priority queues, but iterate entries in order of query_priority, /// (main_priority and query_priority) priority queues, but iterate entries in order of query_priority,
/// while checking the limits in both. /// while checking the limits in both.
@ -545,7 +550,17 @@ bool FileCache::tryReserve(FileSegment & file_segment, size_t size)
const bool query_limit_exceeded = query_priority->getSize(cache_lock) + size > query_priority->getSizeLimit(); const bool query_limit_exceeded = query_priority->getSize(cache_lock) + size > query_priority->getSizeLimit();
if (query_limit_exceeded && !query_context->recacheOnFileCacheQueryLimitExceeded()) if (query_limit_exceeded && !query_context->recacheOnFileCacheQueryLimitExceeded())
{
LOG_TEST(log, "Query limit exceeded, space reservation failed, "
"recache_on_query_limit_exceeded is disabled (while reserving for {}:{})",
file_segment.key(), file_segment.offset());
return false; return false;
}
LOG_TEST(
log, "Using query limit, current usage: {}/{} (while reserving for {}:{})",
query_priority->getSize(cache_lock), query_priority->getSizeLimit(),
file_segment.key(), file_segment.offset());
} }
size_t queue_size = main_priority->getElementsCount(cache_lock); size_t queue_size = main_priority->getElementsCount(cache_lock);
@ -630,6 +645,10 @@ bool FileCache::tryReserve(FileSegment & file_segment, size_t size)
if (is_query_priority_overflow()) if (is_query_priority_overflow())
return false; return false;
LOG_TEST(
log, "Query limits satisfied (while reserving for {}:{})",
file_segment.key(), file_segment.offset());
} }
auto is_main_priority_overflow = [&] auto is_main_priority_overflow = [&]

View File

@ -47,7 +47,7 @@ void FileCacheSettings::loadFromConfig(const Poco::Util::AbstractConfiguration &
else else
bypass_cache_threashold = FILECACHE_BYPASS_THRESHOLD; bypass_cache_threashold = FILECACHE_BYPASS_THRESHOLD;
do_not_evict_index_and_mark_files = config.getUInt64(config_prefix + ".do_not_evict_index_and_mark_files", false); do_not_evict_index_and_mark_files = config.getUInt64(config_prefix + ".do_not_evict_index_and_mark_files", true);
delayed_cleanup_interval_ms = config.getUInt64(config_prefix + ".delayed_cleanup_interval_ms", FILECACHE_DELAYED_CLEANUP_INTERVAL_MS); delayed_cleanup_interval_ms = config.getUInt64(config_prefix + ".delayed_cleanup_interval_ms", FILECACHE_DELAYED_CLEANUP_INTERVAL_MS);
} }

View File

@ -753,7 +753,7 @@ bool FileSegment::assertCorrectnessUnlocked(const FileSegmentGuard::Lock &) cons
} }
else else
{ {
if (download_state == State::DOWNLOADED) if (download_state == State::DOWNLOADING)
{ {
chassert(!downloader_id.empty()); chassert(!downloader_id.empty());
} }

View File

@ -32,7 +32,8 @@ IFileCachePriority::Iterator LRUFileCachePriority::add(
if (entry.size != 0 && entry.key == key && entry.offset == offset) if (entry.size != 0 && entry.key == key && entry.offset == offset)
throw Exception( throw Exception(
ErrorCodes::LOGICAL_ERROR, ErrorCodes::LOGICAL_ERROR,
"Attempt to add duplicate queue entry to queue. (Key: {}, offset: {}, size: {})", "Attempt to add duplicate queue entry to queue. "
"(Key: {}, offset: {}, size: {})",
entry.key, entry.offset, entry.size); entry.key, entry.offset, entry.size);
} }
#endif #endif
@ -46,14 +47,15 @@ IFileCachePriority::Iterator LRUFileCachePriority::add(
key, offset, size, current_size, getSizeLimit()); key, offset, size, current_size, getSizeLimit());
} }
current_size += size;
auto iter = queue.insert(queue.end(), Entry(key, offset, size, key_metadata)); auto iter = queue.insert(queue.end(), Entry(key, offset, size, key_metadata));
current_size += size;
CurrentMetrics::add(CurrentMetrics::FilesystemCacheSize, size); CurrentMetrics::add(CurrentMetrics::FilesystemCacheSize, size);
CurrentMetrics::add(CurrentMetrics::FilesystemCacheElements); CurrentMetrics::add(CurrentMetrics::FilesystemCacheElements);
LOG_TEST(log, "Added entry into LRU queue, key: {}, offset: {}, size: {}", key, offset, size); LOG_TEST(
log, "Added entry into LRU queue, key: {}, offset: {}, size: {}",
key, offset, size);
return std::make_shared<LRUFileCacheIterator>(this, iter); return std::make_shared<LRUFileCacheIterator>(this, iter);
} }
@ -81,13 +83,18 @@ LRUFileCachePriority::LRUQueueIterator LRUFileCachePriority::remove(LRUQueueIter
CurrentMetrics::sub(CurrentMetrics::FilesystemCacheSize, it->size); CurrentMetrics::sub(CurrentMetrics::FilesystemCacheSize, it->size);
CurrentMetrics::sub(CurrentMetrics::FilesystemCacheElements); CurrentMetrics::sub(CurrentMetrics::FilesystemCacheElements);
LOG_TEST(log, "Removed entry from LRU queue, key: {}, offset: {}", it->key, it->offset); LOG_TEST(
log, "Removed entry from LRU queue, key: {}, offset: {}, size: {}",
it->key, it->offset, it->size);
return queue.erase(it); return queue.erase(it);
} }
LRUFileCachePriority::LRUFileCacheIterator::LRUFileCacheIterator( LRUFileCachePriority::LRUFileCacheIterator::LRUFileCacheIterator(
LRUFileCachePriority * cache_priority_, LRUFileCachePriority::LRUQueueIterator queue_iter_) LRUFileCachePriority * cache_priority_,
: cache_priority(cache_priority_), queue_iter(queue_iter_) LRUFileCachePriority::LRUQueueIterator queue_iter_)
: cache_priority(cache_priority_)
, queue_iter(queue_iter_)
{ {
} }
@ -113,7 +120,8 @@ void LRUFileCachePriority::iterate(IterateFunc && func, const CacheGuard::Lock &
{ {
throw Exception( throw Exception(
ErrorCodes::LOGICAL_ERROR, ErrorCodes::LOGICAL_ERROR,
"Mismatch of file segment size in file segment metadata and priority queue: {} != {} ({})", "Mismatch of file segment size in file segment metadata "
"and priority queue: {} != {} ({})",
it->size, metadata->size(), metadata->file_segment->getInfoForLog()); it->size, metadata->size(), metadata->file_segment->getInfoForLog());
} }
@ -138,28 +146,26 @@ void LRUFileCachePriority::iterate(IterateFunc && func, const CacheGuard::Lock &
} }
} }
LRUFileCachePriority::Iterator LRUFileCachePriority::LRUFileCacheIterator::remove(const CacheGuard::Lock &) LRUFileCachePriority::Iterator
LRUFileCachePriority::LRUFileCacheIterator::remove(const CacheGuard::Lock &)
{ {
return std::make_shared<LRUFileCacheIterator>(cache_priority, cache_priority->remove(queue_iter)); return std::make_shared<LRUFileCacheIterator>(
cache_priority, cache_priority->remove(queue_iter));
} }
void LRUFileCachePriority::LRUFileCacheIterator::annul() void LRUFileCachePriority::LRUFileCacheIterator::annul()
{ {
cache_priority->current_size -= queue_iter->size; updateSize(-queue_iter->size);
queue_iter->size = 0; chassert(queue_iter->size == 0);
} }
void LRUFileCachePriority::LRUFileCacheIterator::updateSize(int64_t size) void LRUFileCachePriority::LRUFileCacheIterator::updateSize(int64_t size)
{ {
cache_priority->current_size += size; cache_priority->current_size += size;
if (size > 0)
CurrentMetrics::add(CurrentMetrics::FilesystemCacheSize, size);
else
CurrentMetrics::sub(CurrentMetrics::FilesystemCacheSize, size);
queue_iter->size += size; queue_iter->size += size;
CurrentMetrics::add(CurrentMetrics::FilesystemCacheSize, size);
chassert(cache_priority->current_size >= 0); chassert(cache_priority->current_size >= 0);
chassert(queue_iter->size >= 0); chassert(queue_iter->size >= 0);
} }

View File

@ -336,13 +336,18 @@ void LockedKey::removeAllReleasable()
KeyMetadata::iterator LockedKey::removeFileSegment(size_t offset, const FileSegmentGuard::Lock & segment_lock) KeyMetadata::iterator LockedKey::removeFileSegment(size_t offset, const FileSegmentGuard::Lock & segment_lock)
{ {
LOG_DEBUG(log, "Remove from cache. Key: {}, offset: {}", getKey(), offset);
auto it = key_metadata->find(offset); auto it = key_metadata->find(offset);
if (it == key_metadata->end()) if (it == key_metadata->end())
throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no offset {}", offset); throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no offset {}", offset);
auto file_segment = it->second->file_segment; auto file_segment = it->second->file_segment;
LOG_DEBUG(
log, "Remove from cache. Key: {}, offset: {}, size: {}",
getKey(), offset, file_segment->reserved_size);
chassert(file_segment->assertCorrectnessUnlocked(segment_lock));
if (file_segment->queue_iterator) if (file_segment->queue_iterator)
file_segment->queue_iterator->annul(); file_segment->queue_iterator->annul();

View File

@ -669,13 +669,15 @@ SharedContextHolder Context::createShared()
ContextMutablePtr Context::createCopy(const ContextPtr & other) ContextMutablePtr Context::createCopy(const ContextPtr & other)
{ {
auto lock = other->getLock();
return std::shared_ptr<Context>(new Context(*other)); return std::shared_ptr<Context>(new Context(*other));
} }
ContextMutablePtr Context::createCopy(const ContextWeakPtr & other) ContextMutablePtr Context::createCopy(const ContextWeakPtr & other)
{ {
auto ptr = other.lock(); auto ptr = other.lock();
if (!ptr) throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't copy an expired context"); if (!ptr)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't copy an expired context");
return createCopy(ptr); return createCopy(ptr);
} }

View File

@ -8,6 +8,7 @@
#include <Common/escapeForFileName.h> #include <Common/escapeForFileName.h>
#include <Common/ShellCommand.h> #include <Common/ShellCommand.h>
#include <Common/CurrentMetrics.h> #include <Common/CurrentMetrics.h>
#include <Common/FailPoint.h>
#include <Interpreters/Cache/FileCacheFactory.h> #include <Interpreters/Cache/FileCacheFactory.h>
#include <Interpreters/Cache/FileCache.h> #include <Interpreters/Cache/FileCache.h>
#include <Interpreters/Context.h> #include <Interpreters/Context.h>
@ -582,6 +583,18 @@ BlockIO InterpreterSystemQuery::execute()
result = Unfreezer(getContext()).systemUnfreeze(query.backup_name); result = Unfreezer(getContext()).systemUnfreeze(query.backup_name);
break; break;
} }
case Type::ENABLE_FAILPOINT:
{
getContext()->checkAccess(AccessType::SYSTEM_FAILPOINT);
FailPointInjection::enableFailPoint(query.fail_point_name);
break;
}
case Type::DISABLE_FAILPOINT:
{
getContext()->checkAccess(AccessType::SYSTEM_FAILPOINT);
FailPointInjection::disableFailPoint(query.fail_point_name);
break;
}
default: default:
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unknown type of SYSTEM query"); throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unknown type of SYSTEM query");
} }
@ -1170,6 +1183,8 @@ AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster()
case Type::START_LISTEN_QUERIES: case Type::START_LISTEN_QUERIES:
case Type::STOP_THREAD_FUZZER: case Type::STOP_THREAD_FUZZER:
case Type::START_THREAD_FUZZER: case Type::START_THREAD_FUZZER:
case Type::ENABLE_FAILPOINT:
case Type::DISABLE_FAILPOINT:
case Type::UNKNOWN: case Type::UNKNOWN:
case Type::END: break; case Type::END: break;
} }

View File

@ -0,0 +1,49 @@
#include <Interpreters/Context.h>
#include <Common/tests/gtest_global_context.h>
#include <gtest/gtest.h>
using namespace DB;
template <typename Ptr>
void run(Ptr context)
{
for (size_t i = 0; i < 100; ++i)
{
std::thread t1([context]
{
if constexpr (std::is_same_v<ContextWeakPtr, Ptr>)
context.lock()->getAsyncReadCounters();
else
context->getAsyncReadCounters();
});
std::thread t2([context]
{
Context::createCopy(context);
});
t1.join();
t2.join();
}
}
TEST(Context, MutableRace)
{
auto context = Context::createCopy(getContext().context);
context->makeQueryContext();
run<ContextMutablePtr>(context);
}
TEST(Context, ConstRace)
{
auto context = Context::createCopy(getContext().context);
context->makeQueryContext();
run<ContextPtr>(context);
}
TEST(Context, WeakRace)
{
auto context = Context::createCopy(getContext().context);
context->makeQueryContext();
run<ContextWeakPtr>(context);
}

View File

@ -16,6 +16,7 @@ class ASTQueryWithOutput : public IAST
public: public:
ASTPtr out_file; ASTPtr out_file;
bool is_into_outfile_with_stdout; bool is_into_outfile_with_stdout;
bool is_outfile_append;
ASTPtr format; ASTPtr format;
ASTPtr settings_ast; ASTPtr settings_ast;
ASTPtr compression; ASTPtr compression;

View File

@ -77,6 +77,8 @@ public:
START_THREAD_FUZZER, START_THREAD_FUZZER,
STOP_THREAD_FUZZER, STOP_THREAD_FUZZER,
UNFREEZE, UNFREEZE,
ENABLE_FAILPOINT,
DISABLE_FAILPOINT,
END END
}; };
@ -110,6 +112,8 @@ public:
String schema_cache_storage; String schema_cache_storage;
String fail_point_name;
SyncReplicaMode sync_replica_mode = SyncReplicaMode::DEFAULT; SyncReplicaMode sync_replica_mode = SyncReplicaMode::DEFAULT;
String getID(char) const override { return "SYSTEM query"; } String getID(char) const override { return "SYSTEM query"; }

View File

@ -1,4 +1,5 @@
#include <string_view> #include <string_view>
#include <unordered_map>
#include <Parsers/ExpressionListParsers.h> #include <Parsers/ExpressionListParsers.h>
#include <Parsers/ParserSetQuery.h> #include <Parsers/ParserSetQuery.h>
@ -25,6 +26,7 @@
#include <Common/logger_useful.h> #include <Common/logger_useful.h>
#include <Parsers/queryToString.h> #include <Parsers/queryToString.h>
#include <Parsers/CommonParsers.h>
using namespace std::literals; using namespace std::literals;
@ -1085,6 +1087,8 @@ public:
ParserKeyword filter("FILTER"); ParserKeyword filter("FILTER");
ParserKeyword over("OVER"); ParserKeyword over("OVER");
ParserKeyword respect_nulls("RESPECT NULLS");
ParserKeyword ignore_nulls("IGNORE NULLS");
if (filter.ignore(pos, expected)) if (filter.ignore(pos, expected))
{ {
@ -1100,6 +1104,17 @@ public:
return false; return false;
} }
NullsAction nulls_action = NullsAction::EMPTY;
if (respect_nulls.ignore(pos, expected))
{
nulls_action = NullsAction::RESPECT_NULLS;
}
if (ignore_nulls.ignore(pos, expected))
{
nulls_action = NullsAction::IGNORE_NULLS;
}
function_node->name = transformFunctionNameForRepectNulls(function_node->name, nulls_action);
if (over.ignore(pos, expected)) if (over.ignore(pos, expected))
{ {
function_node->is_window_function = true; function_node->is_window_function = true;
@ -1132,6 +1147,30 @@ private:
bool allow_function_parameters; bool allow_function_parameters;
bool is_compound_name; bool is_compound_name;
enum NullsAction
{
EMPTY = 0,
RESPECT_NULLS = 1,
IGNORE_NULLS = 2,
};
static String transformFunctionNameForRepectNulls(const String & original_function_name, NullsAction nulls_action)
{
static std::unordered_map<String, std::vector<String>> renamed_functions_with_nulls = {
{"first_value", {"first_value", "first_value_respect_nulls", "first_value"}},
{"last_value", {"last_value", "last_value_respect_nulls", "last_value"}},
};
auto it = renamed_functions_with_nulls.find(original_function_name);
if (it == renamed_functions_with_nulls.end())
{
if (nulls_action == NullsAction::EMPTY)
return original_function_name;
else
throw Exception(
ErrorCodes::SYNTAX_ERROR, "Function {} does not support RESPECT NULLS or IGNORE NULLS", original_function_name);
}
return it->second[nulls_action];
}
}; };
/// Layer for priority brackets and tuple function /// Layer for priority brackets and tuple function
@ -2353,7 +2392,6 @@ bool ParserExpressionImpl::parse(std::unique_ptr<Layer> start, IParser::Pos & po
{ {
if (!layers.back()->parse(pos, expected, next)) if (!layers.back()->parse(pos, expected, next))
break; break;
if (layers.back()->isFinished()) if (layers.back()->isFinished())
{ {
if (layers.size() == 1) if (layers.size() == 1)

View File

@ -103,6 +103,12 @@ bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
if (!out_file_p.parse(pos, query_with_output.out_file, expected)) if (!out_file_p.parse(pos, query_with_output.out_file, expected))
return false; return false;
ParserKeyword s_append("APPEND");
if (s_append.ignore(pos, expected))
{
query_with_output.is_outfile_append = true;
}
ParserKeyword s_stdout("AND STDOUT"); ParserKeyword s_stdout("AND STDOUT");
if (s_stdout.ignore(pos, expected)) if (s_stdout.ignore(pos, expected))
{ {

View File

@ -258,6 +258,16 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected &
return false; return false;
break; break;
} }
case Type::ENABLE_FAILPOINT:
case Type::DISABLE_FAILPOINT:
{
ASTPtr ast;
if (ParserIdentifier{}.parse(pos, ast, expected))
res->fail_point_name = ast->as<ASTIdentifier &>().name();
else
return false;
break;
}
case Type::RESTART_REPLICA: case Type::RESTART_REPLICA:
case Type::SYNC_REPLICA: case Type::SYNC_REPLICA:

View File

@ -17,7 +17,7 @@ QueryPlanOptimizationSettings QueryPlanOptimizationSettings::fromSettings(const
settings.remove_redundant_sorting = from.query_plan_remove_redundant_sorting; settings.remove_redundant_sorting = from.query_plan_remove_redundant_sorting;
settings.aggregate_partitions_independently = from.allow_aggregate_partitions_independently; settings.aggregate_partitions_independently = from.allow_aggregate_partitions_independently;
settings.remove_redundant_distinct = from.query_plan_remove_redundant_distinct; settings.remove_redundant_distinct = from.query_plan_remove_redundant_distinct;
settings.optimize_projection = from.allow_experimental_projection_optimization && from.query_plan_optimize_projection; settings.optimize_projection = from.optimize_use_projections && from.query_plan_optimize_projection;
settings.force_use_projection = settings.optimize_projection && from.force_optimize_projection; settings.force_use_projection = settings.optimize_projection && from.force_optimize_projection;
return settings; return settings;
} }

View File

@ -170,7 +170,7 @@ void optimizeTreeSecondPass(const QueryPlanOptimizationSettings & optimization_s
if (optimization_settings.force_use_projection && has_reading_from_mt && num_applied_projection == 0) if (optimization_settings.force_use_projection && has_reading_from_mt && num_applied_projection == 0)
throw Exception( throw Exception(
ErrorCodes::PROJECTION_NOT_USED, ErrorCodes::PROJECTION_NOT_USED,
"No projection is used when allow_experimental_projection_optimization = 1 and force_optimize_projection = 1"); "No projection is used when optimize_use_projections = 1 and force_optimize_projection = 1");
} }
} }

View File

@ -1341,11 +1341,11 @@ void IMergeTreeDataPart::loadColumns(bool require)
.choose_kind = false, .choose_kind = false,
}; };
SerializationInfoByName infos(loaded_columns, settings); SerializationInfoByName infos;
if (metadata_manager->exists(SERIALIZATION_FILE_NAME)) if (metadata_manager->exists(SERIALIZATION_FILE_NAME))
{ {
auto in = metadata_manager->read(SERIALIZATION_FILE_NAME); auto in = metadata_manager->read(SERIALIZATION_FILE_NAME);
infos.readJSON(*in); infos = SerializationInfoByName::readJSON(loaded_columns, settings, *in);
} }
int32_t loaded_metadata_version; int32_t loaded_metadata_version;

View File

@ -326,6 +326,7 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare()
if (!ctx->need_remove_expired_values) if (!ctx->need_remove_expired_values)
{ {
size_t expired_columns = 0; size_t expired_columns = 0;
auto part_serialization_infos = global_ctx->new_data_part->getSerializationInfos();
for (auto & [column_name, ttl] : global_ctx->new_data_part->ttl_infos.columns_ttl) for (auto & [column_name, ttl] : global_ctx->new_data_part->ttl_infos.columns_ttl)
{ {
@ -335,6 +336,8 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare()
LOG_TRACE(ctx->log, "Adding expired column {} for part {}", column_name, global_ctx->new_data_part->name); LOG_TRACE(ctx->log, "Adding expired column {} for part {}", column_name, global_ctx->new_data_part->name);
std::erase(global_ctx->gathering_column_names, column_name); std::erase(global_ctx->gathering_column_names, column_name);
std::erase(global_ctx->merging_column_names, column_name); std::erase(global_ctx->merging_column_names, column_name);
std::erase(global_ctx->all_column_names, column_name);
part_serialization_infos.erase(column_name);
++expired_columns; ++expired_columns;
} }
} }
@ -343,6 +346,12 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare()
{ {
global_ctx->gathering_columns = global_ctx->gathering_columns.filter(global_ctx->gathering_column_names); global_ctx->gathering_columns = global_ctx->gathering_columns.filter(global_ctx->gathering_column_names);
global_ctx->merging_columns = global_ctx->merging_columns.filter(global_ctx->merging_column_names); global_ctx->merging_columns = global_ctx->merging_columns.filter(global_ctx->merging_column_names);
global_ctx->storage_columns = global_ctx->storage_columns.filter(global_ctx->all_column_names);
global_ctx->new_data_part->setColumns(
global_ctx->storage_columns,
part_serialization_infos,
global_ctx->metadata_snapshot->getMetadataVersion());
} }
} }

View File

@ -4985,8 +4985,8 @@ Pipe MergeTreeData::alterPartition(
if (command.replace) if (command.replace)
checkPartitionCanBeDropped(command.partition, query_context); checkPartitionCanBeDropped(command.partition, query_context);
String from_database = query_context->resolveDatabase(command.from_database); auto resolved = query_context->resolveStorageID({command.from_database, command.from_table});
auto from_storage = DatabaseCatalog::instance().getTable({from_database, command.from_table}, query_context); auto from_storage = DatabaseCatalog::instance().getTable(resolved, query_context);
auto * from_storage_merge_tree = dynamic_cast<MergeTreeData *>(from_storage.get()); auto * from_storage_merge_tree = dynamic_cast<MergeTreeData *>(from_storage.get());
if (!from_storage_merge_tree) if (!from_storage_merge_tree)
@ -6242,7 +6242,7 @@ bool MergeTreeData::mayBenefitFromIndexForIn(
return true; return true;
} }
if (query_settings.allow_experimental_projection_optimization) if (query_settings.optimize_use_projections)
{ {
for (const auto & projection : metadata_snapshot->getProjections()) for (const auto & projection : metadata_snapshot->getProjections())
if (projection.isPrimaryKeyColumnPossiblyWrappedInFunctions(ast)) if (projection.isPrimaryKeyColumnPossiblyWrappedInFunctions(ast))
@ -6613,7 +6613,7 @@ std::optional<ProjectionCandidate> MergeTreeData::getQueryProcessingStageWithAgg
if (!query_info.syntax_analyzer_result) if (!query_info.syntax_analyzer_result)
return std::nullopt; return std::nullopt;
if (!settings.allow_experimental_projection_optimization || query_info.ignore_projections || query_info.is_projection_query if (!settings.optimize_use_projections || query_info.ignore_projections || query_info.is_projection_query
|| settings.aggregate_functions_null_for_empty /* projections don't work correctly with this setting */) || settings.aggregate_functions_null_for_empty /* projections don't work correctly with this setting */)
return std::nullopt; return std::nullopt;

View File

@ -175,10 +175,10 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read(
query_info.merge_tree_select_result_ptr, query_info.merge_tree_select_result_ptr,
enable_parallel_reading); enable_parallel_reading);
if (!step && settings.allow_experimental_projection_optimization && settings.force_optimize_projection if (!step && settings.optimize_use_projections && settings.force_optimize_projection
&& !metadata_for_reading->projections.empty() && !settings.query_plan_optimize_projection) && !metadata_for_reading->projections.empty() && !settings.query_plan_optimize_projection)
throw Exception(ErrorCodes::PROJECTION_NOT_USED, throw Exception(ErrorCodes::PROJECTION_NOT_USED,
"No projection is used when allow_experimental_projection_optimization = 1 and force_optimize_projection = 1"); "No projection is used when optimize_use_projections = 1 and force_optimize_projection = 1");
auto plan = std::make_unique<QueryPlan>(); auto plan = std::make_unique<QueryPlan>();
if (step) if (step)

View File

@ -2,6 +2,7 @@
#include <Storages/MergeTree/ReplicatedMergeTreeQuorumEntry.h> #include <Storages/MergeTree/ReplicatedMergeTreeQuorumEntry.h>
#include <Storages/MergeTree/ReplicatedMergeTreeSink.h> #include <Storages/MergeTree/ReplicatedMergeTreeSink.h>
#include <Interpreters/PartLog.h> #include <Interpreters/PartLog.h>
#include <Common/FailPoint.h>
#include <Common/ProfileEventsScope.h> #include <Common/ProfileEventsScope.h>
#include <Common/SipHash.h> #include <Common/SipHash.h>
#include <Common/ZooKeeper/KeeperException.h> #include <Common/ZooKeeper/KeeperException.h>
@ -20,6 +21,11 @@ namespace ProfileEvents
namespace DB namespace DB
{ {
namespace FailPoints
{
extern const char replicated_merge_tree_commit_zk_fail_after_op[];
}
namespace ErrorCodes namespace ErrorCodes
{ {
extern const int TOO_FEW_LIVE_REPLICAS; extern const int TOO_FEW_LIVE_REPLICAS;
@ -940,6 +946,16 @@ std::vector<String> ReplicatedMergeTreeSinkImpl<async_insert>::commitPart(
ThreadFuzzer::maybeInjectSleep(); ThreadFuzzer::maybeInjectSleep();
fiu_do_on(FailPoints::replicated_merge_tree_commit_zk_fail_after_op,
{
if (!zookeeper->fault_policy)
{
zookeeper->logger = log;
zookeeper->fault_policy = std::make_unique<RandomFaultInjection>(0, 0);
}
zookeeper->fault_policy->must_fail_after_op = true;
});
Coordination::Responses responses; Coordination::Responses responses;
Coordination::Error multi_code = zookeeper->tryMultiNoThrow(ops, responses); /// 1 RTT Coordination::Error multi_code = zookeeper->tryMultiNoThrow(ops, responses); /// 1 RTT
if (multi_code == Coordination::Error::ZOK) if (multi_code == Coordination::Error::ZOK)

View File

@ -94,12 +94,13 @@ IMergeTreeDataPart::Checksums checkDataPart(
}; };
auto ratio_of_defaults = data_part->storage.getSettings()->ratio_of_defaults_for_sparse_serialization; auto ratio_of_defaults = data_part->storage.getSettings()->ratio_of_defaults_for_sparse_serialization;
SerializationInfoByName serialization_infos(columns_txt, SerializationInfo::Settings{ratio_of_defaults, false}); SerializationInfoByName serialization_infos;
if (data_part_storage.exists(IMergeTreeDataPart::SERIALIZATION_FILE_NAME)) if (data_part_storage.exists(IMergeTreeDataPart::SERIALIZATION_FILE_NAME))
{ {
auto serialization_file = data_part_storage.readFile(IMergeTreeDataPart::SERIALIZATION_FILE_NAME, {}, std::nullopt, std::nullopt); auto serialization_file = data_part_storage.readFile(IMergeTreeDataPart::SERIALIZATION_FILE_NAME, {}, std::nullopt, std::nullopt);
serialization_infos.readJSON(*serialization_file); SerializationInfo::Settings settings{ratio_of_defaults, false};
serialization_infos = SerializationInfoByName::readJSON(columns_txt, settings, *serialization_file);
} }
auto get_serialization = [&serialization_infos](const auto & column) auto get_serialization = [&serialization_infos](const auto & column)

View File

@ -156,3 +156,6 @@ endif ()
if (ENABLE_OPENSSL) if (ENABLE_OPENSSL)
set(USE_OPENSSL_INTREE 1) set(USE_OPENSSL_INTREE 1)
endif () endif ()
if (TARGET ch_contrib::fiu)
set(FIU_ENABLE 1)
endif()

View File

@ -124,7 +124,6 @@
02324_map_combinator_bug 02324_map_combinator_bug
02241_join_rocksdb_bs 02241_join_rocksdb_bs
02003_WithMergeableStateAfterAggregationAndLimit_LIMIT_BY_LIMIT_OFFSET 02003_WithMergeableStateAfterAggregationAndLimit_LIMIT_BY_LIMIT_OFFSET
01626_cnf_fuzz_long
01115_join_with_dictionary 01115_join_with_dictionary
01009_global_array_join_names 01009_global_array_join_names
00917_multiple_joins_denny_crane 00917_multiple_joins_denny_crane

View File

@ -2244,7 +2244,7 @@ def main(args):
"\nFound hung queries in processlist:", args, "red", attrs=["bold"] "\nFound hung queries in processlist:", args, "red", attrs=["bold"]
) )
) )
print(processlist) print(processlist.decode())
print(get_transactions_list(args)) print(get_transactions_list(args))
print_stacktraces() print_stacktraces()

View File

@ -1,7 +1,7 @@
<test> <test>
<settings> <settings>
<max_insert_threads>8</max_insert_threads> <max_insert_threads>8</max_insert_threads>
<allow_experimental_projection_optimization>0</allow_experimental_projection_optimization> <optimize_use_projections>0</optimize_use_projections>
</settings> </settings>
<create_query> <create_query>

View File

@ -1,7 +1,7 @@
<test> <test>
<settings> <settings>
<max_insert_threads>8</max_insert_threads> <max_insert_threads>8</max_insert_threads>
<allow_experimental_projection_optimization>0</allow_experimental_projection_optimization> <optimize_use_projections>0</optimize_use_projections>
</settings> </settings>
<substitutions> <substitutions>
@ -46,4 +46,4 @@
<query>select min(i1), min(i2) from t_nullable group by {key_type} format Null</query> <query>select min(i1), min(i2) from t_nullable group by {key_type} format Null</query>
<drop_query>drop table if exists t_nullable</drop_query> <drop_query>drop table if exists t_nullable</drop_query>
</test> </test>

View File

@ -136,6 +136,7 @@ SYSTEM FLUSH LOGS ['FLUSH LOGS'] GLOBAL SYSTEM FLUSH
SYSTEM FLUSH [] \N SYSTEM SYSTEM FLUSH [] \N SYSTEM
SYSTEM THREAD FUZZER ['SYSTEM START THREAD FUZZER','SYSTEM STOP THREAD FUZZER','START THREAD FUZZER','STOP THREAD FUZZER'] GLOBAL SYSTEM SYSTEM THREAD FUZZER ['SYSTEM START THREAD FUZZER','SYSTEM STOP THREAD FUZZER','START THREAD FUZZER','STOP THREAD FUZZER'] GLOBAL SYSTEM
SYSTEM UNFREEZE ['SYSTEM UNFREEZE'] GLOBAL SYSTEM SYSTEM UNFREEZE ['SYSTEM UNFREEZE'] GLOBAL SYSTEM
SYSTEM FAILPOINT ['SYSTEM ENABLE FAILPOINT','SYSTEM DISABLE FAILPOINT'] GLOBAL SYSTEM
SYSTEM [] \N ALL SYSTEM [] \N ALL
dictGet ['dictHas','dictGetHierarchy','dictIsIn'] DICTIONARY ALL dictGet ['dictHas','dictGetHierarchy','dictIsIn'] DICTIONARY ALL
displaySecretsInShowAndSelect [] GLOBAL ALL displaySecretsInShowAndSelect [] GLOBAL ALL

View File

@ -122,7 +122,7 @@ create table pl (dt DateTime, i int, projection p (select sum(i) group by toStar
insert into pl values ('2020-10-24', 1); insert into pl values ('2020-10-24', 1);
set max_rows_to_read = 2; set max_rows_to_read = 2;
select sum(i) from pd group by dt_m settings allow_experimental_projection_optimization = 1, force_optimize_projection = 1; select sum(i) from pd group by dt_m settings optimize_use_projections = 1, force_optimize_projection = 1;
drop table pd; drop table pd;
drop table pl; drop table pl;

View File

@ -1,4 +1,4 @@
set allow_experimental_projection_optimization = 1, force_optimize_projection = 1; set optimize_use_projections = 1, force_optimize_projection = 1;
drop table if exists tp; drop table if exists tp;

View File

@ -8,22 +8,22 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
$CLICKHOUSE_CLIENT -q "CREATE TABLE test_agg_proj (x Int32, y Int32, PROJECTION x_plus_y (SELECT sum(x - y), argMax(x, y) group by x + y)) ENGINE = MergeTree ORDER BY tuple() settings index_granularity = 1" $CLICKHOUSE_CLIENT -q "CREATE TABLE test_agg_proj (x Int32, y Int32, PROJECTION x_plus_y (SELECT sum(x - y), argMax(x, y) group by x + y)) ENGINE = MergeTree ORDER BY tuple() settings index_granularity = 1"
$CLICKHOUSE_CLIENT -q "insert into test_agg_proj select intDiv(number, 2), -intDiv(number,3) - 1 from numbers(100)" $CLICKHOUSE_CLIENT -q "insert into test_agg_proj select intDiv(number, 2), -intDiv(number,3) - 1 from numbers(100)"
$CLICKHOUSE_CLIENT -q "select x + y, sum(x - y) as s from test_agg_proj group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1" $CLICKHOUSE_CLIENT -q "select x + y, sum(x - y) as s from test_agg_proj group by x + y order by s desc limit 5 settings optimize_use_projections=1"
$CLICKHOUSE_CLIENT -q "select x + y, sum(x - y) as s from test_agg_proj group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1 format JSON" | grep "rows_read" $CLICKHOUSE_CLIENT -q "select x + y, sum(x - y) as s from test_agg_proj group by x + y order by s desc limit 5 settings optimize_use_projections=1 format JSON" | grep "rows_read"
$CLICKHOUSE_CLIENT -q "select (x + y) * 2, sum(x - y) * 2 as s from test_agg_proj group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1" $CLICKHOUSE_CLIENT -q "select (x + y) * 2, sum(x - y) * 2 as s from test_agg_proj group by x + y order by s desc limit 5 settings optimize_use_projections=1"
$CLICKHOUSE_CLIENT -q "select (x + y) * 2, sum(x - y) * 2 as s from test_agg_proj group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1 format JSON" | grep "rows_read" $CLICKHOUSE_CLIENT -q "select (x + y) * 2, sum(x - y) * 2 as s from test_agg_proj group by x + y order by s desc limit 5 settings optimize_use_projections=1 format JSON" | grep "rows_read"
$CLICKHOUSE_CLIENT -q "select intDiv(x + y, 2) as v, intDiv(x + y, 3), sum(x - y) as s from test_agg_proj group by intDiv(x + y, 2), intDiv(x + y, 3) order by s desc, v limit 5 settings allow_experimental_projection_optimization=1" $CLICKHOUSE_CLIENT -q "select intDiv(x + y, 2) as v, intDiv(x + y, 3), sum(x - y) as s from test_agg_proj group by intDiv(x + y, 2), intDiv(x + y, 3) order by s desc, v limit 5 settings optimize_use_projections=1"
$CLICKHOUSE_CLIENT -q "select intDiv(x + y, 2) as v, intDiv(x + y, 3), sum(x - y) as s from test_agg_proj group by intDiv(x + y, 2), intDiv(x + y, 3) order by s desc, v limit 5 settings allow_experimental_projection_optimization=1 format JSON" | grep "rows_read" $CLICKHOUSE_CLIENT -q "select intDiv(x + y, 2) as v, intDiv(x + y, 3), sum(x - y) as s from test_agg_proj group by intDiv(x + y, 2), intDiv(x + y, 3) order by s desc, v limit 5 settings optimize_use_projections=1 format JSON" | grep "rows_read"
$CLICKHOUSE_CLIENT -q "select x + y + 1, argMax(x, y) * sum(x - y) as s from test_agg_proj group by x + y + 1 order by s desc limit 5 settings allow_experimental_projection_optimization=1" $CLICKHOUSE_CLIENT -q "select x + y + 1, argMax(x, y) * sum(x - y) as s from test_agg_proj group by x + y + 1 order by s desc limit 5 settings optimize_use_projections=1"
$CLICKHOUSE_CLIENT -q "select x + y + 1, argMax(x, y) * sum(x - y) as s from test_agg_proj group by x + y + 1 order by s desc limit 5 settings allow_experimental_projection_optimization=1 format JSON" | grep "rows_read" $CLICKHOUSE_CLIENT -q "select x + y + 1, argMax(x, y) * sum(x - y) as s from test_agg_proj group by x + y + 1 order by s desc limit 5 settings optimize_use_projections=1 format JSON" | grep "rows_read"
$CLICKHOUSE_CLIENT -q "select x + y + 1, argMax(y, x), sum(x - y) as s from test_agg_proj group by x + y + 1 order by s desc limit 5 settings allow_experimental_projection_optimization=1" $CLICKHOUSE_CLIENT -q "select x + y + 1, argMax(y, x), sum(x - y) as s from test_agg_proj group by x + y + 1 order by s desc limit 5 settings optimize_use_projections=1"
$CLICKHOUSE_CLIENT -q "select x + y + 1, argMax(y, x), sum(x - y) as s from test_agg_proj group by x + y + 1 order by s desc limit 5 settings allow_experimental_projection_optimization=1 format JSON" | grep "rows_read" $CLICKHOUSE_CLIENT -q "select x + y + 1, argMax(y, x), sum(x - y) as s from test_agg_proj group by x + y + 1 order by s desc limit 5 settings optimize_use_projections=1 format JSON" | grep "rows_read"
$CLICKHOUSE_CLIENT -q "select x + y, sum(x - y) as s from test_agg_proj prewhere (x + y) % 2 = 1 group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1" $CLICKHOUSE_CLIENT -q "select x + y, sum(x - y) as s from test_agg_proj prewhere (x + y) % 2 = 1 group by x + y order by s desc limit 5 settings optimize_use_projections=1"
$CLICKHOUSE_CLIENT -q "select x + y, sum(x - y) as s from test_agg_proj prewhere (x + y) % 2 = 1 group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1 format JSON" | grep "rows_read" $CLICKHOUSE_CLIENT -q "select x + y, sum(x - y) as s from test_agg_proj prewhere (x + y) % 2 = 1 group by x + y order by s desc limit 5 settings optimize_use_projections=1 format JSON" | grep "rows_read"
$CLICKHOUSE_CLIENT -q "drop table test_agg_proj" $CLICKHOUSE_CLIENT -q "drop table test_agg_proj"

View File

@ -2,7 +2,7 @@ drop table if exists tp;
create table tp (d1 Int32, d2 Int32, eventcnt Int64, projection p (select sum(eventcnt) group by d1)) engine = MergeTree order by (d1, d2); create table tp (d1 Int32, d2 Int32, eventcnt Int64, projection p (select sum(eventcnt) group by d1)) engine = MergeTree order by (d1, d2);
set allow_experimental_projection_optimization = 1, force_optimize_projection = 1; set optimize_use_projections = 1, force_optimize_projection = 1;
select sum(eventcnt) eventcnt, d1 from tp group by d1; select sum(eventcnt) eventcnt, d1 from tp group by d1;

View File

@ -4,7 +4,7 @@ create table d (i int, j int) engine MergeTree partition by i % 2 order by tuple
insert into d select number, number from numbers(10000); insert into d select number, number from numbers(10000);
set max_rows_to_read = 2, allow_experimental_projection_optimization = 1; set max_rows_to_read = 2, optimize_use_projections = 1;
select min(i), max(i), count() from d; select min(i), max(i), count() from d;
select min(i), max(i), count() from d group by _partition_id order by _partition_id; select min(i), max(i), count() from d group by _partition_id order by _partition_id;

View File

@ -9,7 +9,7 @@ alter table t add projection x (select * order by j);
insert into t values (1, 4); insert into t values (1, 4);
insert into t values (1, 5); insert into t values (1, 5);
set allow_experimental_projection_optimization = 1, force_optimize_projection = 1; set optimize_use_projections = 1, force_optimize_projection = 1;
select i from t prewhere j = 4; select i from t prewhere j = 4;

View File

@ -1,5 +1,5 @@
select where x < 10 select where x < 10
optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0 optimize_move_to_prewhere = 0, optimize_use_projections = 0
0 4294967295 0 4294967295
1 4294967294 1 4294967294
2 4294967293 2 4294967293
@ -10,7 +10,7 @@ optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0
7 4294967288 7 4294967288
8 4294967287 8 4294967287
9 4294967286 9 4294967286
optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1 optimize_move_to_prewhere = 0, optimize_use_projections = 1
0 4294967295 0 4294967295
1 4294967294 1 4294967294
2 4294967293 2 4294967293
@ -21,7 +21,7 @@ optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1
7 4294967288 7 4294967288
8 4294967287 8 4294967287
9 4294967286 9 4294967286
optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0 optimize_move_to_prewhere = 1, optimize_use_projections = 0
0 4294967295 0 4294967295
1 4294967294 1 4294967294
2 4294967293 2 4294967293
@ -32,7 +32,7 @@ optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0
7 4294967288 7 4294967288
8 4294967287 8 4294967287
9 4294967286 9 4294967286
optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1 optimize_move_to_prewhere = 1, optimize_use_projections = 1
0 4294967295 0 4294967295
1 4294967294 1 4294967294
2 4294967293 2 4294967293
@ -43,16 +43,16 @@ optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1
7 4294967288 7 4294967288
8 4294967287 8 4294967287
9 4294967286 9 4294967286
optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0 optimize_move_to_prewhere = 0, optimize_use_projections = 0
"rows_read": 100, "rows_read": 100,
optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1 optimize_move_to_prewhere = 0, optimize_use_projections = 1
"rows_read": 100, "rows_read": 100,
optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0 optimize_move_to_prewhere = 1, optimize_use_projections = 0
"rows_read": 100, "rows_read": 100,
optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1 optimize_move_to_prewhere = 1, optimize_use_projections = 1
"rows_read": 100, "rows_read": 100,
select where y > 4294967286 select where y > 4294967286
optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0 optimize_move_to_prewhere = 0, optimize_use_projections = 0
0 4294967295 0 4294967295
1 4294967294 1 4294967294
2 4294967293 2 4294967293
@ -62,7 +62,7 @@ optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0
6 4294967289 6 4294967289
7 4294967288 7 4294967288
8 4294967287 8 4294967287
optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1 optimize_move_to_prewhere = 0, optimize_use_projections = 1
0 4294967295 0 4294967295
1 4294967294 1 4294967294
2 4294967293 2 4294967293
@ -72,7 +72,7 @@ optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1
6 4294967289 6 4294967289
7 4294967288 7 4294967288
8 4294967287 8 4294967287
optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0 optimize_move_to_prewhere = 1, optimize_use_projections = 0
0 4294967295 0 4294967295
1 4294967294 1 4294967294
2 4294967293 2 4294967293
@ -82,7 +82,7 @@ optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0
6 4294967289 6 4294967289
7 4294967288 7 4294967288
8 4294967287 8 4294967287
optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1 optimize_move_to_prewhere = 1, optimize_use_projections = 1
0 4294967295 0 4294967295
1 4294967294 1 4294967294
2 4294967293 2 4294967293
@ -92,12 +92,12 @@ optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1
6 4294967289 6 4294967289
7 4294967288 7 4294967288
8 4294967287 8 4294967287
optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0 optimize_move_to_prewhere = 0, optimize_use_projections = 0
"rows_read": 100, "rows_read": 100,
optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1 optimize_move_to_prewhere = 0, optimize_use_projections = 1
"rows_read": 100, "rows_read": 100,
optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0 optimize_move_to_prewhere = 1, optimize_use_projections = 0
"rows_read": 100, "rows_read": 100,
optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1 optimize_move_to_prewhere = 1, optimize_use_projections = 1
"rows_read": 100, "rows_read": 100,
50 50

View File

@ -9,73 +9,73 @@ $CLICKHOUSE_CLIENT -q "insert into test_sort_proj select number, toUInt32(-numbe
echo "select where x < 10" echo "select where x < 10"
echo "optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0" echo "optimize_move_to_prewhere = 0, optimize_use_projections = 0"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x $CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x
SETTINGS optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0" SETTINGS optimize_move_to_prewhere = 0, optimize_use_projections = 0"
echo "optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1" echo "optimize_move_to_prewhere = 0, optimize_use_projections = 1"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x $CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x
SETTINGS optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1" SETTINGS optimize_move_to_prewhere = 0, optimize_use_projections = 1"
echo "optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0" echo "optimize_move_to_prewhere = 1, optimize_use_projections = 0"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x $CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x
SETTINGS optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0" SETTINGS optimize_move_to_prewhere = 1, optimize_use_projections = 0"
echo "optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1" echo "optimize_move_to_prewhere = 1, optimize_use_projections = 1"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x $CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x
SETTINGS optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1" SETTINGS optimize_move_to_prewhere = 1, optimize_use_projections = 1"
echo "optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0" echo "optimize_move_to_prewhere = 0, optimize_use_projections = 0"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x FORMAT JSON $CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x FORMAT JSON
SETTINGS optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0" | grep rows_read SETTINGS optimize_move_to_prewhere = 0, optimize_use_projections = 0" | grep rows_read
echo "optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1" echo "optimize_move_to_prewhere = 0, optimize_use_projections = 1"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x FORMAT JSON $CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x FORMAT JSON
SETTINGS optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1" | grep rows_read SETTINGS optimize_move_to_prewhere = 0, optimize_use_projections = 1" | grep rows_read
echo "optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0" echo "optimize_move_to_prewhere = 1, optimize_use_projections = 0"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x FORMAT JSON $CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x FORMAT JSON
SETTINGS optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0" | grep rows_read SETTINGS optimize_move_to_prewhere = 1, optimize_use_projections = 0" | grep rows_read
echo "optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1" echo "optimize_move_to_prewhere = 1, optimize_use_projections = 1"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x FORMAT JSON $CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x FORMAT JSON
SETTINGS optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1" | grep rows_read SETTINGS optimize_move_to_prewhere = 1, optimize_use_projections = 1" | grep rows_read
echo "select where y > 4294967286" echo "select where y > 4294967286"
echo "optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0" echo "optimize_move_to_prewhere = 0, optimize_use_projections = 0"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x $CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x
SETTINGS optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0" SETTINGS optimize_move_to_prewhere = 0, optimize_use_projections = 0"
echo "optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1" echo "optimize_move_to_prewhere = 0, optimize_use_projections = 1"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x $CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x
SETTINGS optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1" SETTINGS optimize_move_to_prewhere = 0, optimize_use_projections = 1"
echo "optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0" echo "optimize_move_to_prewhere = 1, optimize_use_projections = 0"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x $CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x
SETTINGS optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0" SETTINGS optimize_move_to_prewhere = 1, optimize_use_projections = 0"
echo "optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1" echo "optimize_move_to_prewhere = 1, optimize_use_projections = 1"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x $CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x
SETTINGS optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1" SETTINGS optimize_move_to_prewhere = 1, optimize_use_projections = 1"
echo "optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0" echo "optimize_move_to_prewhere = 0, optimize_use_projections = 0"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x FORMAT JSON $CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x FORMAT JSON
SETTINGS optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0" | grep rows_read SETTINGS optimize_move_to_prewhere = 0, optimize_use_projections = 0" | grep rows_read
echo "optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1" echo "optimize_move_to_prewhere = 0, optimize_use_projections = 1"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x FORMAT JSON $CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x FORMAT JSON
SETTINGS optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1" | grep rows_read SETTINGS optimize_move_to_prewhere = 0, optimize_use_projections = 1" | grep rows_read
echo "optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0" echo "optimize_move_to_prewhere = 1, optimize_use_projections = 0"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x FORMAT JSON $CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x FORMAT JSON
SETTINGS optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0" | grep rows_read SETTINGS optimize_move_to_prewhere = 1, optimize_use_projections = 0" | grep rows_read
echo "optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1" echo "optimize_move_to_prewhere = 1, optimize_use_projections = 1"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x FORMAT JSON $CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x FORMAT JSON
SETTINGS optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1" | grep rows_read SETTINGS optimize_move_to_prewhere = 1, optimize_use_projections = 1" | grep rows_read
$CLICKHOUSE_CLIENT -q "ALTER TABLE test_sort_proj DELETE WHERE x % 2 = 0 SETTINGS mutations_sync=2;" $CLICKHOUSE_CLIENT -q "ALTER TABLE test_sort_proj DELETE WHERE x % 2 = 0 SETTINGS mutations_sync=2;"
$CLICKHOUSE_CLIENT -q "SELECT count() from test_sort_proj;" $CLICKHOUSE_CLIENT -q "SELECT count() from test_sort_proj;"

View File

@ -28,7 +28,7 @@ INSERT INTO normal SELECT
number number
FROM numbers(100000); FROM numbers(100000);
SET allow_experimental_projection_optimization=1, optimize_aggregation_in_order=1, force_optimize_projection=1; SET optimize_use_projections=1, optimize_aggregation_in_order=1, force_optimize_projection=1;
WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM normal WHERE ts > '2021-12-06 22:00:00' GROUP BY a ORDER BY v LIMIT 5; WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM normal WHERE ts > '2021-12-06 22:00:00' GROUP BY a ORDER BY v LIMIT 5;
WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM normal WHERE ts > '2021-12-06 22:00:00' GROUP BY toStartOfHour(ts), a ORDER BY v LIMIT 5; WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM normal WHERE ts > '2021-12-06 22:00:00' GROUP BY toStartOfHour(ts), a ORDER BY v LIMIT 5;
@ -60,7 +60,7 @@ INSERT INTO agg SELECT
number number
FROM numbers(100000); FROM numbers(100000);
SET allow_experimental_projection_optimization=1, optimize_aggregation_in_order=1, force_optimize_projection = 1; SET optimize_use_projections=1, optimize_aggregation_in_order=1, force_optimize_projection = 1;
WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM agg WHERE ts > '2021-12-06 22:00:00' GROUP BY a ORDER BY v LIMIT 5; WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM agg WHERE ts > '2021-12-06 22:00:00' GROUP BY a ORDER BY v LIMIT 5;
WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM agg WHERE ts > '2021-12-06 22:00:00' GROUP BY toStartOfHour(ts), a ORDER BY v LIMIT 5; WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM agg WHERE ts > '2021-12-06 22:00:00' GROUP BY toStartOfHour(ts), a ORDER BY v LIMIT 5;

View File

@ -1,4 +1,4 @@
set allow_experimental_projection_optimization = 1; set optimize_use_projections = 1;
drop table if exists x; drop table if exists x;

View File

@ -1,4 +1,4 @@
set allow_experimental_projection_optimization = 1; set optimize_use_projections = 1;
drop table if exists t; drop table if exists t;

View File

@ -4,7 +4,7 @@ create table t (i int, j int, k int, projection p (select * order by j)) engine
insert into t select number, number, number from numbers(10); insert into t select number, number, number from numbers(10);
set allow_experimental_projection_optimization = 1, max_rows_to_read = 3; set optimize_use_projections = 1, max_rows_to_read = 3;
select * from t where i < 5 and j in (1, 2); select * from t where i < 5 and j in (1, 2);

View File

@ -3,7 +3,7 @@ create table x (i UInt64, j UInt64, k UInt64, projection agg (select sum(j), avg
insert into x values (1, 2, 3); insert into x values (1, 2, 3);
set allow_experimental_projection_optimization = 1, use_index_for_in_with_subqueries = 0; set optimize_use_projections = 1, use_index_for_in_with_subqueries = 0;
select sum(j), avg(k) from x where i in (select number from numbers(4)); select sum(j), avg(k) from x where i in (select number from numbers(4));

View File

@ -4,6 +4,6 @@ CREATE TABLE t (`key` UInt32, `created_at` Date, `value` UInt32, PROJECTION xxx
INSERT INTO t SELECT 1 AS key, today() + (number % 30), number FROM numbers(1000); INSERT INTO t SELECT 1 AS key, today() + (number % 30), number FROM numbers(1000);
ALTER TABLE t UPDATE value = 0 WHERE (value > 0) AND (created_at >= '2021-12-21') SETTINGS allow_experimental_projection_optimization = 1; ALTER TABLE t UPDATE value = 0 WHERE (value > 0) AND (created_at >= '2021-12-21') SETTINGS optimize_use_projections = 1;
DROP TABLE IF EXISTS t; DROP TABLE IF EXISTS t;

View File

@ -12,7 +12,7 @@ optimize table t final;
alter table t materialize projection p_norm settings mutations_sync = 1; alter table t materialize projection p_norm settings mutations_sync = 1;
set allow_experimental_projection_optimization = 1, max_rows_to_read = 3; set optimize_use_projections = 1, max_rows_to_read = 3;
select c18 from t where c1 < 0; select c18 from t where c1 < 0;

View File

@ -2,20 +2,20 @@ drop table if exists t;
create table t (s UInt16, l UInt16, projection p (select s, l order by l)) engine MergeTree order by s; create table t (s UInt16, l UInt16, projection p (select s, l order by l)) engine MergeTree order by s;
select s from t join (select toUInt16(1) as s) x using (s) order by s settings allow_experimental_projection_optimization = 1; select s from t join (select toUInt16(1) as s) x using (s) order by s settings optimize_use_projections = 1;
select s from t join (select toUInt16(1) as s) x using (s) order by s settings allow_experimental_projection_optimization = 0; select s from t join (select toUInt16(1) as s) x using (s) order by s settings optimize_use_projections = 0;
drop table t; drop table t;
drop table if exists mt; drop table if exists mt;
create table mt (id1 Int8, id2 Int8) Engine=MergeTree order by tuple(); create table mt (id1 Int8, id2 Int8) Engine=MergeTree order by tuple();
select alias1 from (select id1, id1 as alias1 from mt) as l all inner join (select id2 as alias1 from mt) as t using (alias1) order by l.id1 settings allow_experimental_projection_optimization = 1; select alias1 from (select id1, id1 as alias1 from mt) as l all inner join (select id2 as alias1 from mt) as t using (alias1) order by l.id1 settings optimize_use_projections = 1;
select id1 from mt all inner join (select id2 as id1 from mt) as t using (id1) order by id1 settings allow_experimental_projection_optimization = 1; select id1 from mt all inner join (select id2 as id1 from mt) as t using (id1) order by id1 settings optimize_use_projections = 1;
select id2 as id1 from mt all inner join (select id1 from mt) as t using (id1) order by id1 settings allow_experimental_projection_optimization = 1; select id2 as id1 from mt all inner join (select id1 from mt) as t using (id1) order by id1 settings optimize_use_projections = 1;
drop table mt; drop table mt;
drop table if exists j; drop table if exists j;
create table j (id1 Int8, id2 Int8, projection p (select id1, id2 order by id2)) Engine=MergeTree order by id1 settings index_granularity = 1; create table j (id1 Int8, id2 Int8, projection p (select id1, id2 order by id2)) Engine=MergeTree order by id1 settings index_granularity = 1;
insert into j select number, number from numbers(10); insert into j select number, number from numbers(10);
select alias1 from (select id1, id1 as alias1 from j) as l all inner join (select id2, id2 as alias1 from j where id2 in (1, 2, 3)) as t using (alias1) where id2 in (2, 3, 4) order by id1 settings allow_experimental_projection_optimization = 1; select alias1 from (select id1, id1 as alias1 from j) as l all inner join (select id2, id2 as alias1 from j where id2 in (1, 2, 3)) as t using (alias1) where id2 in (2, 3, 4) order by id1 settings optimize_use_projections = 1;
drop table j; drop table j;

View File

@ -4,6 +4,6 @@ create table t (x UInt32) engine = MergeTree order by tuple() settings index_gra
insert into t select number from numbers(100); insert into t select number from numbers(100);
alter table t add projection p (select uniqHLL12(x)); alter table t add projection p (select uniqHLL12(x));
insert into t select number + 100 from numbers(100); insert into t select number + 100 from numbers(100);
select uniqHLL12(x) from t settings allow_experimental_projection_optimization = 1, max_bytes_to_read=400, max_block_size=8; -- { serverError 307 } select uniqHLL12(x) from t settings optimize_use_projections = 1, max_bytes_to_read=400, max_block_size=8; -- { serverError 307 }
drop table if exists t; drop table if exists t;

View File

@ -4,7 +4,7 @@ create table projection_test (`sum(block_count)` UInt64, domain_alias UInt64 ali
insert into projection_test with rowNumberInAllBlocks() as id select 1, toDateTime('2020-10-24 00:00:00') + (id / 20), toString(id % 100), * from generateRandom('x_id String, y_id String, block_count Int64, retry_count Int64, duration Int64, kbytes Int64, buffer_time Int64, first_time Int64, total_bytes Nullable(UInt64), valid_bytes Nullable(UInt64), completed_bytes Nullable(UInt64), fixed_bytes Nullable(UInt64), force_bytes Nullable(UInt64)', 10, 10, 1) limit 1000 settings max_threads = 1; insert into projection_test with rowNumberInAllBlocks() as id select 1, toDateTime('2020-10-24 00:00:00') + (id / 20), toString(id % 100), * from generateRandom('x_id String, y_id String, block_count Int64, retry_count Int64, duration Int64, kbytes Int64, buffer_time Int64, first_time Int64, total_bytes Nullable(UInt64), valid_bytes Nullable(UInt64), completed_bytes Nullable(UInt64), fixed_bytes Nullable(UInt64), force_bytes Nullable(UInt64)', 10, 10, 1) limit 1000 settings max_threads = 1;
set allow_experimental_projection_optimization = 1, force_optimize_projection = 1; set optimize_use_projections = 1, force_optimize_projection = 1;
select * from projection_test; -- { serverError 584 } select * from projection_test; -- { serverError 584 }
select toStartOfMinute(datetime) dt_m, countIf(first_time = 0) from projection_test join (select 1) x on 1 where domain = '1' group by dt_m order by dt_m; -- { serverError 584 } select toStartOfMinute(datetime) dt_m, countIf(first_time = 0) from projection_test join (select 1) x on 1 where domain = '1' group by dt_m order by dt_m; -- { serverError 584 }
@ -47,6 +47,6 @@ drop table if exists projection_test;
drop table if exists projection_without_key; drop table if exists projection_without_key;
create table projection_without_key (key UInt32, PROJECTION x (SELECT max(key))) engine MergeTree order by key; create table projection_without_key (key UInt32, PROJECTION x (SELECT max(key))) engine MergeTree order by key;
insert into projection_without_key select number from numbers(1000); insert into projection_without_key select number from numbers(1000);
set force_optimize_projection = 1, allow_experimental_projection_optimization = 1; set force_optimize_projection = 1, optimize_use_projections = 1;
select max(key) from projection_without_key; select max(key) from projection_without_key;
drop table projection_without_key; drop table projection_without_key;

View File

@ -2,7 +2,7 @@ drop table if exists projection_without_key;
create table projection_without_key (key UInt32, PROJECTION x (SELECT sum(key) group by key % 3)) engine MergeTree order by key; create table projection_without_key (key UInt32, PROJECTION x (SELECT sum(key) group by key % 3)) engine MergeTree order by key;
insert into projection_without_key select number from numbers(1000); insert into projection_without_key select number from numbers(1000);
select sum(key) from projection_without_key settings allow_experimental_projection_optimization = 1; select sum(key) from projection_without_key settings optimize_use_projections = 1;
select sum(key) from projection_without_key settings allow_experimental_projection_optimization = 0; select sum(key) from projection_without_key settings optimize_use_projections = 0;
drop table projection_without_key; drop table projection_without_key;

View File

@ -8,7 +8,7 @@ create table projection_test (dt DateTime, cost Int64, projection p (select toSt
insert into projection_test with rowNumberInAllBlocks() as id select toDateTime('2020-10-24 00:00:00') + (id / 20), * from generateRandom('cost Int64', 10, 10, 1) limit 1000 settings max_threads = 1; insert into projection_test with rowNumberInAllBlocks() as id select toDateTime('2020-10-24 00:00:00') + (id / 20), * from generateRandom('cost Int64', 10, 10, 1) limit 1000 settings max_threads = 1;
set allow_experimental_projection_optimization = 1, force_optimize_projection = 1; set optimize_use_projections = 1, force_optimize_projection = 1;
select toStartOfMinute(dt) dt_m, sum(cost) from projection_test group by dt_m; select toStartOfMinute(dt) dt_m, sum(cost) from projection_test group by dt_m;
select sum(cost) from projection_test; select sum(cost) from projection_test;

View File

@ -38,7 +38,7 @@ function run_query()
echo "$query" echo "$query"
local opts=( local opts=(
--allow_experimental_projection_optimization 1 --optimize_use_projections 1
--force_optimize_projection 1 --force_optimize_projection 1
--log_processors_profiles 1 --log_processors_profiles 1
--query_id "$query_id" --query_id "$query_id"

View File

@ -44,7 +44,7 @@ function run_query()
echo "$query" echo "$query"
local opts=( local opts=(
--allow_experimental_projection_optimization 1 --optimize_use_projections 1
--force_optimize_projection 1 --force_optimize_projection 1
--log_processors_profiles 1 --log_processors_profiles 1
--query_id "$query_id" --query_id "$query_id"

View File

@ -0,0 +1,2 @@
Hello, World! From client.
Hello, World! From local.

View File

@ -0,0 +1,15 @@
#!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
set -e
[ -e "${CLICKHOUSE_TMP}"/test_append_to_output_file ] && rm "${CLICKHOUSE_TMP}"/test_append_to_output_file
${CLICKHOUSE_CLIENT} --query "SELECT * FROM (SELECT 'Hello, World! From client.') INTO OUTFILE '${CLICKHOUSE_TMP}/test_append_to_output_file'"
${CLICKHOUSE_LOCAL} --query "SELECT * FROM (SELECT 'Hello, World! From local.') INTO OUTFILE '${CLICKHOUSE_TMP}/test_append_to_output_file' APPEND"
cat ${CLICKHOUSE_TMP}/test_append_to_output_file
rm -f "${CLICKHOUSE_TMP}/test_append_to_output_file"

View File

@ -297,7 +297,7 @@ CREATE TABLE system.grants
( (
`user_name` Nullable(String), `user_name` Nullable(String),
`role_name` Nullable(String), `role_name` Nullable(String),
`access_type` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION CONTROL' = 96, 'SYSTEM SHUTDOWN' = 97, 'SYSTEM DROP DNS CACHE' = 98, 'SYSTEM DROP MARK CACHE' = 99, 'SYSTEM DROP UNCOMPRESSED CACHE' = 100, 'SYSTEM DROP MMAP CACHE' = 101, 'SYSTEM DROP QUERY CACHE' = 102, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 103, 'SYSTEM DROP FILESYSTEM CACHE' = 104, 'SYSTEM DROP SCHEMA CACHE' = 105, 'SYSTEM DROP S3 CLIENT CACHE' = 106, 'SYSTEM DROP CACHE' = 107, 'SYSTEM RELOAD CONFIG' = 108, 'SYSTEM RELOAD USERS' = 109, 'SYSTEM RELOAD SYMBOLS' = 110, 'SYSTEM RELOAD DICTIONARY' = 111, 'SYSTEM RELOAD MODEL' = 112, 'SYSTEM RELOAD FUNCTION' = 113, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 114, 'SYSTEM RELOAD' = 115, 'SYSTEM RESTART DISK' = 116, 'SYSTEM MERGES' = 117, 'SYSTEM TTL MERGES' = 118, 'SYSTEM FETCHES' = 119, 'SYSTEM MOVES' = 120, 'SYSTEM DISTRIBUTED SENDS' = 121, 'SYSTEM REPLICATED SENDS' = 122, 'SYSTEM SENDS' = 123, 'SYSTEM REPLICATION QUEUES' = 124, 'SYSTEM DROP REPLICA' = 125, 'SYSTEM SYNC REPLICA' = 126, 'SYSTEM RESTART REPLICA' = 127, 'SYSTEM RESTORE REPLICA' = 128, 'SYSTEM WAIT LOADING PARTS' = 129, 'SYSTEM SYNC DATABASE REPLICA' = 130, 'SYSTEM SYNC TRANSACTION LOG' = 131, 'SYSTEM SYNC FILE CACHE' = 132, 'SYSTEM FLUSH DISTRIBUTED' = 133, 'SYSTEM FLUSH LOGS' = 134, 'SYSTEM FLUSH' = 135, 'SYSTEM THREAD FUZZER' = 136, 'SYSTEM UNFREEZE' = 137, 'SYSTEM' = 138, 'dictGet' = 139, 'displaySecretsInShowAndSelect' = 140, 'addressToLine' = 141, 'addressToLineWithInlines' = 142, 'addressToSymbol' = 143, 'demangle' = 144, 'INTROSPECTION' = 145, 'FILE' = 146, 'URL' = 147, 'REMOTE' = 148, 'MONGO' = 149, 'MEILISEARCH' = 150, 'MYSQL' = 151, 'POSTGRES' = 152, 'SQLITE' = 153, 'ODBC' = 154, 'JDBC' = 155, 'HDFS' = 156, 'S3' = 157, 'HIVE' = 158, 'SOURCES' = 159, 'CLUSTER' = 160, 'ALL' = 161, 'NONE' = 162), `access_type` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION CONTROL' = 96, 'SYSTEM SHUTDOWN' = 97, 'SYSTEM DROP DNS CACHE' = 98, 'SYSTEM DROP MARK CACHE' = 99, 'SYSTEM DROP UNCOMPRESSED CACHE' = 100, 'SYSTEM DROP MMAP CACHE' = 101, 'SYSTEM DROP QUERY CACHE' = 102, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 103, 'SYSTEM DROP FILESYSTEM CACHE' = 104, 'SYSTEM DROP SCHEMA CACHE' = 105, 'SYSTEM DROP S3 CLIENT CACHE' = 106, 'SYSTEM DROP CACHE' = 107, 'SYSTEM RELOAD CONFIG' = 108, 'SYSTEM RELOAD USERS' = 109, 'SYSTEM RELOAD SYMBOLS' = 110, 'SYSTEM RELOAD DICTIONARY' = 111, 'SYSTEM RELOAD MODEL' = 112, 'SYSTEM RELOAD FUNCTION' = 113, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 114, 'SYSTEM RELOAD' = 115, 'SYSTEM RESTART DISK' = 116, 'SYSTEM MERGES' = 117, 'SYSTEM TTL MERGES' = 118, 'SYSTEM FETCHES' = 119, 'SYSTEM MOVES' = 120, 'SYSTEM DISTRIBUTED SENDS' = 121, 'SYSTEM REPLICATED SENDS' = 122, 'SYSTEM SENDS' = 123, 'SYSTEM REPLICATION QUEUES' = 124, 'SYSTEM DROP REPLICA' = 125, 'SYSTEM SYNC REPLICA' = 126, 'SYSTEM RESTART REPLICA' = 127, 'SYSTEM RESTORE REPLICA' = 128, 'SYSTEM WAIT LOADING PARTS' = 129, 'SYSTEM SYNC DATABASE REPLICA' = 130, 'SYSTEM SYNC TRANSACTION LOG' = 131, 'SYSTEM SYNC FILE CACHE' = 132, 'SYSTEM FLUSH DISTRIBUTED' = 133, 'SYSTEM FLUSH LOGS' = 134, 'SYSTEM FLUSH' = 135, 'SYSTEM THREAD FUZZER' = 136, 'SYSTEM UNFREEZE' = 137, 'SYSTEM FAILPOINT' = 138, 'SYSTEM' = 139, 'dictGet' = 140, 'displaySecretsInShowAndSelect' = 141, 'addressToLine' = 142, 'addressToLineWithInlines' = 143, 'addressToSymbol' = 144, 'demangle' = 145, 'INTROSPECTION' = 146, 'FILE' = 147, 'URL' = 148, 'REMOTE' = 149, 'MONGO' = 150, 'MEILISEARCH' = 151, 'MYSQL' = 152, 'POSTGRES' = 153, 'SQLITE' = 154, 'ODBC' = 155, 'JDBC' = 156, 'HDFS' = 157, 'S3' = 158, 'HIVE' = 159, 'SOURCES' = 160, 'CLUSTER' = 161, 'ALL' = 162, 'NONE' = 163),
`database` Nullable(String), `database` Nullable(String),
`table` Nullable(String), `table` Nullable(String),
`column` Nullable(String), `column` Nullable(String),
@ -581,10 +581,10 @@ ENGINE = SystemPartsColumns
COMMENT 'SYSTEM TABLE is built on the fly.' COMMENT 'SYSTEM TABLE is built on the fly.'
CREATE TABLE system.privileges CREATE TABLE system.privileges
( (
`privilege` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION CONTROL' = 96, 'SYSTEM SHUTDOWN' = 97, 'SYSTEM DROP DNS CACHE' = 98, 'SYSTEM DROP MARK CACHE' = 99, 'SYSTEM DROP UNCOMPRESSED CACHE' = 100, 'SYSTEM DROP MMAP CACHE' = 101, 'SYSTEM DROP QUERY CACHE' = 102, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 103, 'SYSTEM DROP FILESYSTEM CACHE' = 104, 'SYSTEM DROP SCHEMA CACHE' = 105, 'SYSTEM DROP S3 CLIENT CACHE' = 106, 'SYSTEM DROP CACHE' = 107, 'SYSTEM RELOAD CONFIG' = 108, 'SYSTEM RELOAD USERS' = 109, 'SYSTEM RELOAD SYMBOLS' = 110, 'SYSTEM RELOAD DICTIONARY' = 111, 'SYSTEM RELOAD MODEL' = 112, 'SYSTEM RELOAD FUNCTION' = 113, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 114, 'SYSTEM RELOAD' = 115, 'SYSTEM RESTART DISK' = 116, 'SYSTEM MERGES' = 117, 'SYSTEM TTL MERGES' = 118, 'SYSTEM FETCHES' = 119, 'SYSTEM MOVES' = 120, 'SYSTEM DISTRIBUTED SENDS' = 121, 'SYSTEM REPLICATED SENDS' = 122, 'SYSTEM SENDS' = 123, 'SYSTEM REPLICATION QUEUES' = 124, 'SYSTEM DROP REPLICA' = 125, 'SYSTEM SYNC REPLICA' = 126, 'SYSTEM RESTART REPLICA' = 127, 'SYSTEM RESTORE REPLICA' = 128, 'SYSTEM WAIT LOADING PARTS' = 129, 'SYSTEM SYNC DATABASE REPLICA' = 130, 'SYSTEM SYNC TRANSACTION LOG' = 131, 'SYSTEM SYNC FILE CACHE' = 132, 'SYSTEM FLUSH DISTRIBUTED' = 133, 'SYSTEM FLUSH LOGS' = 134, 'SYSTEM FLUSH' = 135, 'SYSTEM THREAD FUZZER' = 136, 'SYSTEM UNFREEZE' = 137, 'SYSTEM' = 138, 'dictGet' = 139, 'displaySecretsInShowAndSelect' = 140, 'addressToLine' = 141, 'addressToLineWithInlines' = 142, 'addressToSymbol' = 143, 'demangle' = 144, 'INTROSPECTION' = 145, 'FILE' = 146, 'URL' = 147, 'REMOTE' = 148, 'MONGO' = 149, 'MEILISEARCH' = 150, 'MYSQL' = 151, 'POSTGRES' = 152, 'SQLITE' = 153, 'ODBC' = 154, 'JDBC' = 155, 'HDFS' = 156, 'S3' = 157, 'HIVE' = 158, 'SOURCES' = 159, 'CLUSTER' = 160, 'ALL' = 161, 'NONE' = 162), `privilege` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION CONTROL' = 96, 'SYSTEM SHUTDOWN' = 97, 'SYSTEM DROP DNS CACHE' = 98, 'SYSTEM DROP MARK CACHE' = 99, 'SYSTEM DROP UNCOMPRESSED CACHE' = 100, 'SYSTEM DROP MMAP CACHE' = 101, 'SYSTEM DROP QUERY CACHE' = 102, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 103, 'SYSTEM DROP FILESYSTEM CACHE' = 104, 'SYSTEM DROP SCHEMA CACHE' = 105, 'SYSTEM DROP S3 CLIENT CACHE' = 106, 'SYSTEM DROP CACHE' = 107, 'SYSTEM RELOAD CONFIG' = 108, 'SYSTEM RELOAD USERS' = 109, 'SYSTEM RELOAD SYMBOLS' = 110, 'SYSTEM RELOAD DICTIONARY' = 111, 'SYSTEM RELOAD MODEL' = 112, 'SYSTEM RELOAD FUNCTION' = 113, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 114, 'SYSTEM RELOAD' = 115, 'SYSTEM RESTART DISK' = 116, 'SYSTEM MERGES' = 117, 'SYSTEM TTL MERGES' = 118, 'SYSTEM FETCHES' = 119, 'SYSTEM MOVES' = 120, 'SYSTEM DISTRIBUTED SENDS' = 121, 'SYSTEM REPLICATED SENDS' = 122, 'SYSTEM SENDS' = 123, 'SYSTEM REPLICATION QUEUES' = 124, 'SYSTEM DROP REPLICA' = 125, 'SYSTEM SYNC REPLICA' = 126, 'SYSTEM RESTART REPLICA' = 127, 'SYSTEM RESTORE REPLICA' = 128, 'SYSTEM WAIT LOADING PARTS' = 129, 'SYSTEM SYNC DATABASE REPLICA' = 130, 'SYSTEM SYNC TRANSACTION LOG' = 131, 'SYSTEM SYNC FILE CACHE' = 132, 'SYSTEM FLUSH DISTRIBUTED' = 133, 'SYSTEM FLUSH LOGS' = 134, 'SYSTEM FLUSH' = 135, 'SYSTEM THREAD FUZZER' = 136, 'SYSTEM UNFREEZE' = 137, 'SYSTEM FAILPOINT' = 138, 'SYSTEM' = 139, 'dictGet' = 140, 'displaySecretsInShowAndSelect' = 141, 'addressToLine' = 142, 'addressToLineWithInlines' = 143, 'addressToSymbol' = 144, 'demangle' = 145, 'INTROSPECTION' = 146, 'FILE' = 147, 'URL' = 148, 'REMOTE' = 149, 'MONGO' = 150, 'MEILISEARCH' = 151, 'MYSQL' = 152, 'POSTGRES' = 153, 'SQLITE' = 154, 'ODBC' = 155, 'JDBC' = 156, 'HDFS' = 157, 'S3' = 158, 'HIVE' = 159, 'SOURCES' = 160, 'CLUSTER' = 161, 'ALL' = 162, 'NONE' = 163),
`aliases` Array(String), `aliases` Array(String),
`level` Nullable(Enum8('GLOBAL' = 0, 'DATABASE' = 1, 'TABLE' = 2, 'DICTIONARY' = 3, 'VIEW' = 4, 'COLUMN' = 5, 'NAMED_COLLECTION' = 6)), `level` Nullable(Enum8('GLOBAL' = 0, 'DATABASE' = 1, 'TABLE' = 2, 'DICTIONARY' = 3, 'VIEW' = 4, 'COLUMN' = 5, 'NAMED_COLLECTION' = 6)),
`parent_group` Nullable(Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION CONTROL' = 96, 'SYSTEM SHUTDOWN' = 97, 'SYSTEM DROP DNS CACHE' = 98, 'SYSTEM DROP MARK CACHE' = 99, 'SYSTEM DROP UNCOMPRESSED CACHE' = 100, 'SYSTEM DROP MMAP CACHE' = 101, 'SYSTEM DROP QUERY CACHE' = 102, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 103, 'SYSTEM DROP FILESYSTEM CACHE' = 104, 'SYSTEM DROP SCHEMA CACHE' = 105, 'SYSTEM DROP S3 CLIENT CACHE' = 106, 'SYSTEM DROP CACHE' = 107, 'SYSTEM RELOAD CONFIG' = 108, 'SYSTEM RELOAD USERS' = 109, 'SYSTEM RELOAD SYMBOLS' = 110, 'SYSTEM RELOAD DICTIONARY' = 111, 'SYSTEM RELOAD MODEL' = 112, 'SYSTEM RELOAD FUNCTION' = 113, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 114, 'SYSTEM RELOAD' = 115, 'SYSTEM RESTART DISK' = 116, 'SYSTEM MERGES' = 117, 'SYSTEM TTL MERGES' = 118, 'SYSTEM FETCHES' = 119, 'SYSTEM MOVES' = 120, 'SYSTEM DISTRIBUTED SENDS' = 121, 'SYSTEM REPLICATED SENDS' = 122, 'SYSTEM SENDS' = 123, 'SYSTEM REPLICATION QUEUES' = 124, 'SYSTEM DROP REPLICA' = 125, 'SYSTEM SYNC REPLICA' = 126, 'SYSTEM RESTART REPLICA' = 127, 'SYSTEM RESTORE REPLICA' = 128, 'SYSTEM WAIT LOADING PARTS' = 129, 'SYSTEM SYNC DATABASE REPLICA' = 130, 'SYSTEM SYNC TRANSACTION LOG' = 131, 'SYSTEM SYNC FILE CACHE' = 132, 'SYSTEM FLUSH DISTRIBUTED' = 133, 'SYSTEM FLUSH LOGS' = 134, 'SYSTEM FLUSH' = 135, 'SYSTEM THREAD FUZZER' = 136, 'SYSTEM UNFREEZE' = 137, 'SYSTEM' = 138, 'dictGet' = 139, 'displaySecretsInShowAndSelect' = 140, 'addressToLine' = 141, 'addressToLineWithInlines' = 142, 'addressToSymbol' = 143, 'demangle' = 144, 'INTROSPECTION' = 145, 'FILE' = 146, 'URL' = 147, 'REMOTE' = 148, 'MONGO' = 149, 'MEILISEARCH' = 150, 'MYSQL' = 151, 'POSTGRES' = 152, 'SQLITE' = 153, 'ODBC' = 154, 'JDBC' = 155, 'HDFS' = 156, 'S3' = 157, 'HIVE' = 158, 'SOURCES' = 159, 'CLUSTER' = 160, 'ALL' = 161, 'NONE' = 162)) `parent_group` Nullable(Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION CONTROL' = 96, 'SYSTEM SHUTDOWN' = 97, 'SYSTEM DROP DNS CACHE' = 98, 'SYSTEM DROP MARK CACHE' = 99, 'SYSTEM DROP UNCOMPRESSED CACHE' = 100, 'SYSTEM DROP MMAP CACHE' = 101, 'SYSTEM DROP QUERY CACHE' = 102, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 103, 'SYSTEM DROP FILESYSTEM CACHE' = 104, 'SYSTEM DROP SCHEMA CACHE' = 105, 'SYSTEM DROP S3 CLIENT CACHE' = 106, 'SYSTEM DROP CACHE' = 107, 'SYSTEM RELOAD CONFIG' = 108, 'SYSTEM RELOAD USERS' = 109, 'SYSTEM RELOAD SYMBOLS' = 110, 'SYSTEM RELOAD DICTIONARY' = 111, 'SYSTEM RELOAD MODEL' = 112, 'SYSTEM RELOAD FUNCTION' = 113, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 114, 'SYSTEM RELOAD' = 115, 'SYSTEM RESTART DISK' = 116, 'SYSTEM MERGES' = 117, 'SYSTEM TTL MERGES' = 118, 'SYSTEM FETCHES' = 119, 'SYSTEM MOVES' = 120, 'SYSTEM DISTRIBUTED SENDS' = 121, 'SYSTEM REPLICATED SENDS' = 122, 'SYSTEM SENDS' = 123, 'SYSTEM REPLICATION QUEUES' = 124, 'SYSTEM DROP REPLICA' = 125, 'SYSTEM SYNC REPLICA' = 126, 'SYSTEM RESTART REPLICA' = 127, 'SYSTEM RESTORE REPLICA' = 128, 'SYSTEM WAIT LOADING PARTS' = 129, 'SYSTEM SYNC DATABASE REPLICA' = 130, 'SYSTEM SYNC TRANSACTION LOG' = 131, 'SYSTEM SYNC FILE CACHE' = 132, 'SYSTEM FLUSH DISTRIBUTED' = 133, 'SYSTEM FLUSH LOGS' = 134, 'SYSTEM FLUSH' = 135, 'SYSTEM THREAD FUZZER' = 136, 'SYSTEM UNFREEZE' = 137, 'SYSTEM FAILPOINT' = 138, 'SYSTEM' = 139, 'dictGet' = 140, 'displaySecretsInShowAndSelect' = 141, 'addressToLine' = 142, 'addressToLineWithInlines' = 143, 'addressToSymbol' = 144, 'demangle' = 145, 'INTROSPECTION' = 146, 'FILE' = 147, 'URL' = 148, 'REMOTE' = 149, 'MONGO' = 150, 'MEILISEARCH' = 151, 'MYSQL' = 152, 'POSTGRES' = 153, 'SQLITE' = 154, 'ODBC' = 155, 'JDBC' = 156, 'HDFS' = 157, 'S3' = 158, 'HIVE' = 159, 'SOURCES' = 160, 'CLUSTER' = 161, 'ALL' = 162, 'NONE' = 163))
) )
ENGINE = SystemPrivileges ENGINE = SystemPrivileges
COMMENT 'SYSTEM TABLE is built on the fly.' COMMENT 'SYSTEM TABLE is built on the fly.'

View File

@ -72,8 +72,8 @@ SYSTEM SYNC REPLICA wikistat2;
-- Such condition will lead to successful queries. -- Such condition will lead to successful queries.
SELECT 0 FROM numbers(5) WHERE sleepEachRow(1) = 1; SELECT 0 FROM numbers(5) WHERE sleepEachRow(1) = 1;
select sum(hits), count() from wikistat1 GROUP BY project, subproject, path settings allow_experimental_projection_optimization = 1, force_optimize_projection = 1; select sum(hits), count() from wikistat1 GROUP BY project, subproject, path settings optimize_use_projections = 1, force_optimize_projection = 1;
select sum(hits), count() from wikistat2 GROUP BY project, subproject, path settings allow_experimental_projection_optimization = 1, force_optimize_projection = 1; select sum(hits), count() from wikistat2 GROUP BY project, subproject, path settings optimize_use_projections = 1, force_optimize_projection = 1;
DROP TABLE wikistat1; DROP TABLE wikistat1;
DROP TABLE wikistat2; DROP TABLE wikistat2;

View File

@ -3,7 +3,7 @@
set max_threads = 16; set max_threads = 16;
set allow_aggregate_partitions_independently = 1; set allow_aggregate_partitions_independently = 1;
set force_aggregate_partitions_independently = 1; set force_aggregate_partitions_independently = 1;
set allow_experimental_projection_optimization = 0; set optimize_use_projections = 0;
create table t1(a UInt32) engine=MergeTree order by tuple() partition by a % 4 settings index_granularity = 8192, index_granularity_bytes = 10485760; create table t1(a UInt32) engine=MergeTree order by tuple() partition by a % 4 settings index_granularity = 8192, index_granularity_bytes = 10485760;

View File

@ -9,6 +9,6 @@ ENGINE = Memory;
INSERT INTO data_a_02187 INSERT INTO data_a_02187
SELECT * SELECT *
FROM system.one FROM system.one
SETTINGS max_block_size = '1', min_insert_block_size_rows = '65536', min_insert_block_size_bytes = '0', max_insert_threads = '0', max_threads = '3', receive_timeout = '10', receive_data_timeout_ms = '10000', connections_with_failover_max_tries = '0', extremes = '1', use_uncompressed_cache = '0', optimize_move_to_prewhere = '1', optimize_move_to_prewhere_if_final = '0', replication_alter_partitions_sync = '2', totals_mode = 'before_having', allow_suspicious_low_cardinality_types = '1', compile_expressions = '1', min_count_to_compile_expression = '0', group_by_two_level_threshold = '100', distributed_aggregation_memory_efficient = '0', distributed_group_by_no_merge = '1', optimize_distributed_group_by_sharding_key = '1', optimize_skip_unused_shards = '1', optimize_skip_unused_shards_rewrite_in = '1', force_optimize_skip_unused_shards = '2', optimize_skip_unused_shards_nesting = '1', force_optimize_skip_unused_shards_nesting = '2', merge_tree_min_rows_for_concurrent_read = '10000', force_primary_key = '1', network_compression_method = 'ZSTD', network_zstd_compression_level = '7', log_queries = '0', log_queries_min_type = 'QUERY_FINISH', distributed_product_mode = 'local', insert_quorum = '2', insert_quorum_timeout = '0', insert_quorum_parallel = '0', select_sequential_consistency = '1', join_use_nulls = '1', any_join_distinct_right_table_keys = '1', preferred_max_column_in_block_size_bytes = '32', insert_distributed_sync = '1', insert_allow_materialized_columns = '1', use_index_for_in_with_subqueries = '1', joined_subquery_requires_alias = '0', empty_result_for_aggregation_by_empty_set = '1', allow_suspicious_codecs = '1', query_profiler_real_time_period_ns = '0', query_profiler_cpu_time_period_ns = '0', opentelemetry_start_trace_probability = '1', max_rows_to_read = '1000000', read_overflow_mode = 'break', max_rows_to_group_by = '10', group_by_overflow_mode = 'any', max_rows_to_sort = '100', sort_overflow_mode = 'break', max_result_rows = '10', max_execution_time = '3', max_execution_speed = '1', max_bytes_in_join = '100', join_algorithm = 'partial_merge', max_memory_usage = '1099511627776', log_query_threads = '1', send_logs_level = 'fatal', enable_optimize_predicate_expression = '1', prefer_localhost_replica = '1', optimize_read_in_order = '1', optimize_aggregation_in_order = '1', read_in_order_two_level_merge_threshold = '1', allow_introspection_functions = '1', check_query_single_value_result = '1', allow_experimental_live_view = '1', default_table_engine = 'Memory', mutations_sync = '2', convert_query_to_cnf = '0', optimize_arithmetic_operations_in_aggregate_functions = '1', optimize_duplicate_order_by_and_distinct = '0', optimize_multiif_to_if = '0', optimize_monotonous_functions_in_order_by = '1', optimize_functions_to_subcolumns = '1', optimize_using_constraints = '1', optimize_substitute_columns = '1', optimize_append_index = '1', transform_null_in = '1', allow_experimental_geo_types = '1', data_type_default_nullable = '1', cast_keep_nullable = '1', cast_ipv4_ipv6_default_on_conversion_error = '0', system_events_show_zero_values = '1', enable_global_with_statement = '1', optimize_on_insert = '0', optimize_rewrite_sum_if_to_count_if = '1', distributed_ddl_output_mode = 'throw', union_default_mode = 'ALL', optimize_aggregators_of_group_by_keys = '1', optimize_group_by_function_keys = '1', short_circuit_function_evaluation = 'enable', async_insert = '1', enable_filesystem_cache = '0', allow_deprecated_database_ordinary = '1', allow_deprecated_syntax_for_merge_tree = '1', allow_experimental_nlp_functions = '1', allow_experimental_object_type = '1', allow_experimental_map_type = '1', allow_experimental_projection_optimization = '1', input_format_null_as_default = '1', input_format_ipv4_default_on_conversion_error = '0', input_format_ipv6_default_on_conversion_error = '0', output_format_json_named_tuples_as_objects = '1', output_format_write_statistics = '0', output_format_pretty_row_numbers = '1'; SETTINGS max_block_size = '1', min_insert_block_size_rows = '65536', min_insert_block_size_bytes = '0', max_insert_threads = '0', max_threads = '3', receive_timeout = '10', receive_data_timeout_ms = '10000', connections_with_failover_max_tries = '0', extremes = '1', use_uncompressed_cache = '0', optimize_move_to_prewhere = '1', optimize_move_to_prewhere_if_final = '0', replication_alter_partitions_sync = '2', totals_mode = 'before_having', allow_suspicious_low_cardinality_types = '1', compile_expressions = '1', min_count_to_compile_expression = '0', group_by_two_level_threshold = '100', distributed_aggregation_memory_efficient = '0', distributed_group_by_no_merge = '1', optimize_distributed_group_by_sharding_key = '1', optimize_skip_unused_shards = '1', optimize_skip_unused_shards_rewrite_in = '1', force_optimize_skip_unused_shards = '2', optimize_skip_unused_shards_nesting = '1', force_optimize_skip_unused_shards_nesting = '2', merge_tree_min_rows_for_concurrent_read = '10000', force_primary_key = '1', network_compression_method = 'ZSTD', network_zstd_compression_level = '7', log_queries = '0', log_queries_min_type = 'QUERY_FINISH', distributed_product_mode = 'local', insert_quorum = '2', insert_quorum_timeout = '0', insert_quorum_parallel = '0', select_sequential_consistency = '1', join_use_nulls = '1', any_join_distinct_right_table_keys = '1', preferred_max_column_in_block_size_bytes = '32', insert_distributed_sync = '1', insert_allow_materialized_columns = '1', use_index_for_in_with_subqueries = '1', joined_subquery_requires_alias = '0', empty_result_for_aggregation_by_empty_set = '1', allow_suspicious_codecs = '1', query_profiler_real_time_period_ns = '0', query_profiler_cpu_time_period_ns = '0', opentelemetry_start_trace_probability = '1', max_rows_to_read = '1000000', read_overflow_mode = 'break', max_rows_to_group_by = '10', group_by_overflow_mode = 'any', max_rows_to_sort = '100', sort_overflow_mode = 'break', max_result_rows = '10', max_execution_time = '3', max_execution_speed = '1', max_bytes_in_join = '100', join_algorithm = 'partial_merge', max_memory_usage = '1099511627776', log_query_threads = '1', send_logs_level = 'fatal', enable_optimize_predicate_expression = '1', prefer_localhost_replica = '1', optimize_read_in_order = '1', optimize_aggregation_in_order = '1', read_in_order_two_level_merge_threshold = '1', allow_introspection_functions = '1', check_query_single_value_result = '1', allow_experimental_live_view = '1', default_table_engine = 'Memory', mutations_sync = '2', convert_query_to_cnf = '0', optimize_arithmetic_operations_in_aggregate_functions = '1', optimize_duplicate_order_by_and_distinct = '0', optimize_multiif_to_if = '0', optimize_monotonous_functions_in_order_by = '1', optimize_functions_to_subcolumns = '1', optimize_using_constraints = '1', optimize_substitute_columns = '1', optimize_append_index = '1', transform_null_in = '1', allow_experimental_geo_types = '1', data_type_default_nullable = '1', cast_keep_nullable = '1', cast_ipv4_ipv6_default_on_conversion_error = '0', system_events_show_zero_values = '1', enable_global_with_statement = '1', optimize_on_insert = '0', optimize_rewrite_sum_if_to_count_if = '1', distributed_ddl_output_mode = 'throw', union_default_mode = 'ALL', optimize_aggregators_of_group_by_keys = '1', optimize_group_by_function_keys = '1', short_circuit_function_evaluation = 'enable', async_insert = '1', enable_filesystem_cache = '0', allow_deprecated_database_ordinary = '1', allow_deprecated_syntax_for_merge_tree = '1', allow_experimental_nlp_functions = '1', allow_experimental_object_type = '1', allow_experimental_map_type = '1', optimize_use_projections = '1', input_format_null_as_default = '1', input_format_ipv4_default_on_conversion_error = '0', input_format_ipv6_default_on_conversion_error = '0', output_format_json_named_tuples_as_objects = '1', output_format_write_statistics = '0', output_format_pretty_row_numbers = '1';
DROP TABLE data_a_02187; DROP TABLE data_a_02187;

View File

@ -0,0 +1,20 @@
-- { echo }
-- create table
drop table if exists test;
create table test(`a` Nullable(Int32), `b` Nullable(Int32)) ENGINE = Memory;
insert into test (a,b) values (1,null), (2,3), (4, 5), (6,null);
-- first value
select first_value(b) from test;
3
select first_value(b) ignore nulls from test;
3
select first_value(b) respect nulls from test;
\N
-- last value
select last_value(b) from test;
5
select last_value(b) ignore nulls from test;
5
select last_value(b) respect nulls from test;
\N

View File

@ -0,0 +1,16 @@
-- { echo }
-- create table
drop table if exists test;
create table test(`a` Nullable(Int32), `b` Nullable(Int32)) ENGINE = Memory;
insert into test (a,b) values (1,null), (2,3), (4, 5), (6,null);
-- first value
select first_value(b) from test;
select first_value(b) ignore nulls from test;
select first_value(b) respect nulls from test;
-- last value
select last_value(b) from test;
select last_value(b) ignore nulls from test;
select last_value(b) respect nulls from test;

View File

@ -62,8 +62,8 @@ def test_ks_all_alternatives(rvs1, rvs2):
def test_kolmogorov_smirnov(): def test_kolmogorov_smirnov():
rvs1 = np.round(stats.norm.rvs(loc=1, scale=5, size=10), 2) rvs1 = np.round(stats.norm.rvs(loc=1, scale=5, size=100), 2)
rvs2 = np.round(stats.norm.rvs(loc=1.5, scale=5, size=20), 2) rvs2 = np.round(stats.norm.rvs(loc=1.5, scale=5, size=200), 2)
test_ks_all_alternatives(rvs1, rvs2) test_ks_all_alternatives(rvs1, rvs2)
rvs1 = np.round(stats.norm.rvs(loc=13, scale=1, size=100), 2) rvs1 = np.round(stats.norm.rvs(loc=13, scale=1, size=100), 2)

Some files were not shown because too many files have changed in this diff Show More