Make projections production-ready

This commit is contained in:
Alexey Milovidov 2023-05-10 03:35:13 +02:00
parent c698d2af99
commit 8a6e07f0ea
36 changed files with 123 additions and 122 deletions

View File

@ -3562,7 +3562,7 @@ Default value: `1`.
If the setting is set to `0`, the table function does not make Nullable columns and inserts default values instead of NULL. This is also applicable for NULL values inside arrays.
## allow_experimental_projection_optimization {#allow-experimental-projection-optimization}
## optimize_use_projections {#optimize_use_projections}
Enables or disables [projection](../../engines/table-engines/mergetree-family/mergetree.md/#projections) optimization when processing `SELECT` queries.
@ -3575,7 +3575,7 @@ Default value: `1`.
## force_optimize_projection {#force-optimize-projection}
Enables or disables the obligatory use of [projections](../../engines/table-engines/mergetree-family/mergetree.md/#projections) in `SELECT` queries, when projection optimization is enabled (see [allow_experimental_projection_optimization](#allow-experimental-projection-optimization) setting).
Enables or disables the obligatory use of [projections](../../engines/table-engines/mergetree-family/mergetree.md/#projections) in `SELECT` queries, when projection optimization is enabled (see [optimize_use_projections](#optimize_use_projections) setting).
Possible values:

View File

@ -391,7 +391,7 @@ INDEX b (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARIT
## Проекции {#projections}
Проекции похожи на [материализованные представления](../../../sql-reference/statements/create/view.md#materialized), но определяются на уровне кусков данных. Это обеспечивает гарантии согласованности данных наряду с автоматическим использованием в запросах.
Проекции — это экспериментальная возможность. Чтобы включить поддержку проекций, установите настройку [allow_experimental_projection_optimization](../../../operations/settings/settings.md#allow-experimental-projection-optimization) в значение `1`. См. также настройку [force_optimize_projection ](../../../operations/settings/settings.md#force-optimize-projection).
Проекции — это экспериментальная возможность. Чтобы включить поддержку проекций, установите настройку [optimize_use_projections](../../../operations/settings/settings.md#allow-experimental-projection-optimization) в значение `1`. См. также настройку [force_optimize_projection ](../../../operations/settings/settings.md#optimize_use_projections).
Проекции не поддерживаются для запросов `SELECT` с модификатором [FINAL](../../../sql-reference/statements/select/from.md#select-from-final).

View File

@ -3588,7 +3588,7 @@ SETTINGS index_granularity = 8192 │
Строка с идентификатором снэпшота, из которого будет выполняться [исходный дамп таблиц PostgreSQL](../../engines/database-engines/materialized-postgresql.md). Эта настройка должна использоваться совместно с [materialized_postgresql_replication_slot](#materialized-postgresql-replication-slot).
## allow_experimental_projection_optimization {#allow-experimental-projection-optimization}
## optimize_use_projections {#optimize_use_projections}
Включает или отключает поддержку [проекций](../../engines/table-engines/mergetree-family/mergetree.md#projections) при обработке запросов `SELECT`.
@ -3601,7 +3601,7 @@ SETTINGS index_granularity = 8192 │
## force_optimize_projection {#force-optimize-projection}
Включает или отключает обязательное использование [проекций](../../engines/table-engines/mergetree-family/mergetree.md#projections) в запросах `SELECT`, если поддержка проекций включена (см. настройку [allow_experimental_projection_optimization](#allow-experimental-projection-optimization)).
Включает или отключает обязательное использование [проекций](../../engines/table-engines/mergetree-family/mergetree.md#projections) в запросах `SELECT`, если поддержка проекций включена (см. настройку [optimize_use_projections](#optimize_use_projections)).
Возможные значения:

View File

@ -1074,7 +1074,7 @@ ClickHouse服务器日志文件中相应的跟踪日志确认了ClickHouse正在
<a href="https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#projections" target="_blank">Projections</a>目前是一个实验性的功能因此我们需要告诉ClickHouse
```sql
SET allow_experimental_projection_optimization = 1;
SET optimize_use_projections = 1;
```

View File

@ -560,6 +560,7 @@ class IColumn;
M(Bool, asterisk_include_alias_columns, false, "Include ALIAS columns for wildcard query", 0) \
M(Bool, optimize_skip_merged_partitions, false, "Skip partitions with one part with level > 0 in optimize final", 0) \
M(Bool, optimize_on_insert, true, "Do the same transformation for inserted block of data as if merge was done on this block.", 0) \
M(Bool, optimize_use_projections, true, "Automatically choose projections to perform SELECT query", 0) \
M(Bool, force_optimize_projection, false, "If projection optimization is enabled, SELECT queries need to use projection", 0) \
M(Bool, async_socket_for_remote, true, "Asynchronously read from socket executing remote query", 0) \
M(Bool, async_query_sending_for_remote, true, "Asynchronously create connections and send query to shards in remote query", 0) \
@ -715,26 +716,12 @@ class IColumn;
M(Milliseconds, storage_system_stack_trace_pipe_read_timeout_ms, 100, "Maximum time to read from a pipe for receiving information from the threads when querying the `system.stack_trace` table. This setting is used for testing purposes and not meant to be changed by users.", 0) \
\
M(Bool, parallelize_output_from_storages, true, "Parallelize output for reading step from storage. It allows parallelizing query processing right after reading from storage if possible", 0) \
\
/** Experimental functions */ \
M(Bool, allow_experimental_funnel_functions, false, "Enable experimental functions for funnel analysis.", 0) \
M(Bool, allow_experimental_nlp_functions, false, "Enable experimental functions for natural language processing.", 0) \
M(Bool, allow_experimental_hash_functions, false, "Enable experimental hash functions (hashid, etc)", 0) \
M(Bool, allow_experimental_object_type, false, "Allow Object and JSON data types", 0) \
M(String, insert_deduplication_token, "", "If not empty, used for duplicate detection instead of data digest", 0) \
M(String, ann_index_select_query_params, "", "Parameters passed to ANN indexes in SELECT queries, the format is 'param1=x, param2=y, ...'", 0) \
M(UInt64, max_limit_for_ann_queries, 1000000, "Maximum limit value for using ANN indexes is used to prevent memory overflow in search queries for indexes", 0) \
M(Bool, allow_experimental_annoy_index, false, "Allows to use Annoy index. Disabled by default because this feature is experimental", 0) \
M(Bool, count_distinct_optimization, false, "Rewrite count distinct to subquery of group by", 0) \
M(Bool, throw_on_unsupported_query_inside_transaction, true, "Throw exception if unsupported query is used inside transaction", 0) \
M(TransactionsWaitCSNMode, wait_changes_become_visible_after_commit_mode, TransactionsWaitCSNMode::WAIT_UNKNOWN, "Wait for committed changes to become actually visible in the latest snapshot", 0) \
M(Bool, implicit_transaction, false, "If enabled and not already inside a transaction, wraps the query inside a full transaction (begin + commit or rollback)", 0) \
M(Bool, throw_if_no_data_to_insert, true, "Enables or disables empty INSERTs, enabled by default", 0) \
M(Bool, compatibility_ignore_auto_increment_in_create_table, false, "Ignore AUTO_INCREMENT keyword in column declaration if true, otherwise return error. It simplifies migration from MySQL", 0) \
M(Bool, multiple_joins_try_to_keep_original_names, false, "Do not add aliases to top level expression list on multiple joins rewrite", 0) \
M(UInt64, grace_hash_join_initial_buckets, 1, "Initial number of grace hash join buckets", 0) \
M(UInt64, grace_hash_join_max_buckets, 1024, "Limit on the number of grace hash join buckets", 0) \
M(Bool, optimize_distinct_in_order, false, "This optimization has a bug and it is disabled. Enable DISTINCT optimization if some columns in DISTINCT form a prefix of sorting. For example, prefix of sorting key in merge tree or ORDER BY statement", 0) \
M(Bool, optimize_sorting_by_input_stream_properties, true, "Optimize sorting by sorting properties of input stream", 0) \
M(UInt64, insert_keeper_max_retries, 20, "Max retries for keeper operations during insert", 0) \
M(UInt64, insert_keeper_retry_initial_backoff_ms, 100, "Initial backoff timeout for keeper operations during insert", 0) \
@ -743,10 +730,24 @@ class IColumn;
M(UInt64, insert_keeper_fault_injection_seed, 0, "0 - random seed, otherwise the setting value", 0) \
M(Bool, force_aggregation_in_order, false, "Force use of aggregation in order on remote nodes during distributed aggregation. PLEASE, NEVER CHANGE THIS SETTING VALUE MANUALLY!", IMPORTANT) \
M(UInt64, http_max_request_param_data_size, 10_MiB, "Limit on size of request data used as a query parameter in predefined HTTP requests.", 0) \
M(Bool, function_json_value_return_type_allow_nullable, false, "Allow function JSON_VALUE to return nullable type.", 0) \
M(Bool, function_json_value_return_type_allow_complex, false, "Allow function JSON_VALUE to return complex type, such as: struct, array, map.", 0) \
\
/** Experimental functions */ \
M(Bool, allow_experimental_funnel_functions, false, "Enable experimental functions for funnel analysis.", 0) \
M(Bool, allow_experimental_nlp_functions, false, "Enable experimental functions for natural language processing.", 0) \
M(Bool, allow_experimental_hash_functions, false, "Enable experimental hash functions (hashid, etc)", 0) \
M(Bool, allow_experimental_object_type, false, "Allow Object and JSON data types", 0) \
M(Bool, allow_experimental_annoy_index, false, "Allows to use Annoy index. Disabled by default because this feature is experimental", 0) \
M(UInt64, max_limit_for_ann_queries, 1000000, "Maximum limit value for using ANN indexes is used to prevent memory overflow in search queries for indexes", 0) \
M(Bool, throw_on_unsupported_query_inside_transaction, true, "Throw exception if unsupported query is used inside transaction", 0) \
M(TransactionsWaitCSNMode, wait_changes_become_visible_after_commit_mode, TransactionsWaitCSNMode::WAIT_UNKNOWN, "Wait for committed changes to become actually visible in the latest snapshot", 0) \
M(Bool, implicit_transaction, false, "If enabled and not already inside a transaction, wraps the query inside a full transaction (begin + commit or rollback)", 0) \
M(UInt64, grace_hash_join_initial_buckets, 1, "Initial number of grace hash join buckets", 0) \
M(UInt64, grace_hash_join_max_buckets, 1024, "Limit on the number of grace hash join buckets", 0) \
M(Bool, optimize_distinct_in_order, false, "This optimization has a bug and it is disabled. Enable DISTINCT optimization if some columns in DISTINCT form a prefix of sorting. For example, prefix of sorting key in merge tree or ORDER BY statement", 0) \
M(Bool, allow_experimental_undrop_table_query, false, "Allow to use undrop query to restore dropped table in a limited time", 0) \
M(Bool, keeper_map_strict_mode, false, "Enforce additional checks during operations on KeeperMap. E.g. throw an exception on an insert for already existing key", 0) \
M(Bool, function_json_value_return_type_allow_nullable, false, "Allow function to return nullable type.", 0) \
M(Bool, function_json_value_return_type_allow_complex, false, "Allow function to return complex type, such as: struct, array, map.", 0) \
// End of COMMON_SETTINGS
// Please add settings related to formats into the FORMAT_FACTORY_SETTINGS and move obsolete settings to OBSOLETE_SETTINGS.

View File

@ -17,7 +17,7 @@ QueryPlanOptimizationSettings QueryPlanOptimizationSettings::fromSettings(const
settings.remove_redundant_sorting = from.query_plan_remove_redundant_sorting;
settings.aggregate_partitions_independently = from.allow_aggregate_partitions_independently;
settings.remove_redundant_distinct = from.query_plan_remove_redundant_distinct;
settings.optimize_projection = from.allow_experimental_projection_optimization && from.query_plan_optimize_projection;
settings.optimize_projection = from.optimize_use_projections && from.query_plan_optimize_projection;
settings.force_use_projection = settings.optimize_projection && from.force_optimize_projection;
return settings;
}

View File

@ -170,7 +170,7 @@ void optimizeTreeSecondPass(const QueryPlanOptimizationSettings & optimization_s
if (optimization_settings.force_use_projection && has_reading_from_mt && num_applied_projection == 0)
throw Exception(
ErrorCodes::PROJECTION_NOT_USED,
"No projection is used when allow_experimental_projection_optimization = 1 and force_optimize_projection = 1");
"No projection is used when optimize_use_projections = 1 and force_optimize_projection = 1");
}
}

View File

@ -6242,7 +6242,7 @@ bool MergeTreeData::mayBenefitFromIndexForIn(
return true;
}
if (query_settings.allow_experimental_projection_optimization)
if (query_settings.optimize_use_projections)
{
for (const auto & projection : metadata_snapshot->getProjections())
if (projection.isPrimaryKeyColumnPossiblyWrappedInFunctions(ast))
@ -6613,7 +6613,7 @@ std::optional<ProjectionCandidate> MergeTreeData::getQueryProcessingStageWithAgg
if (!query_info.syntax_analyzer_result)
return std::nullopt;
if (!settings.allow_experimental_projection_optimization || query_info.ignore_projections || query_info.is_projection_query
if (!settings.optimize_use_projections || query_info.ignore_projections || query_info.is_projection_query
|| settings.aggregate_functions_null_for_empty /* projections don't work correctly with this setting */)
return std::nullopt;

View File

@ -175,10 +175,10 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read(
query_info.merge_tree_select_result_ptr,
enable_parallel_reading);
if (!step && settings.allow_experimental_projection_optimization && settings.force_optimize_projection
if (!step && settings.optimize_use_projections && settings.force_optimize_projection
&& !metadata_for_reading->projections.empty() && !settings.query_plan_optimize_projection)
throw Exception(ErrorCodes::PROJECTION_NOT_USED,
"No projection is used when allow_experimental_projection_optimization = 1 and force_optimize_projection = 1");
"No projection is used when optimize_use_projections = 1 and force_optimize_projection = 1");
auto plan = std::make_unique<QueryPlan>();
if (step)

View File

@ -1,7 +1,7 @@
<test>
<settings>
<max_insert_threads>8</max_insert_threads>
<allow_experimental_projection_optimization>0</allow_experimental_projection_optimization>
<optimize_use_projections>0</optimize_use_projections>
</settings>
<create_query>

View File

@ -1,7 +1,7 @@
<test>
<settings>
<max_insert_threads>8</max_insert_threads>
<allow_experimental_projection_optimization>0</allow_experimental_projection_optimization>
<optimize_use_projections>0</optimize_use_projections>
</settings>
<substitutions>
@ -46,4 +46,4 @@
<query>select min(i1), min(i2) from t_nullable group by {key_type} format Null</query>
<drop_query>drop table if exists t_nullable</drop_query>
</test>
</test>

View File

@ -122,7 +122,7 @@ create table pl (dt DateTime, i int, projection p (select sum(i) group by toStar
insert into pl values ('2020-10-24', 1);
set max_rows_to_read = 2;
select sum(i) from pd group by dt_m settings allow_experimental_projection_optimization = 1, force_optimize_projection = 1;
select sum(i) from pd group by dt_m settings optimize_use_projections = 1, force_optimize_projection = 1;
drop table pd;
drop table pl;

View File

@ -1,4 +1,4 @@
set allow_experimental_projection_optimization = 1, force_optimize_projection = 1;
set optimize_use_projections = 1, force_optimize_projection = 1;
drop table if exists tp;

View File

@ -8,22 +8,22 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
$CLICKHOUSE_CLIENT -q "CREATE TABLE test_agg_proj (x Int32, y Int32, PROJECTION x_plus_y (SELECT sum(x - y), argMax(x, y) group by x + y)) ENGINE = MergeTree ORDER BY tuple() settings index_granularity = 1"
$CLICKHOUSE_CLIENT -q "insert into test_agg_proj select intDiv(number, 2), -intDiv(number,3) - 1 from numbers(100)"
$CLICKHOUSE_CLIENT -q "select x + y, sum(x - y) as s from test_agg_proj group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1"
$CLICKHOUSE_CLIENT -q "select x + y, sum(x - y) as s from test_agg_proj group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1 format JSON" | grep "rows_read"
$CLICKHOUSE_CLIENT -q "select x + y, sum(x - y) as s from test_agg_proj group by x + y order by s desc limit 5 settings optimize_use_projections=1"
$CLICKHOUSE_CLIENT -q "select x + y, sum(x - y) as s from test_agg_proj group by x + y order by s desc limit 5 settings optimize_use_projections=1 format JSON" | grep "rows_read"
$CLICKHOUSE_CLIENT -q "select (x + y) * 2, sum(x - y) * 2 as s from test_agg_proj group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1"
$CLICKHOUSE_CLIENT -q "select (x + y) * 2, sum(x - y) * 2 as s from test_agg_proj group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1 format JSON" | grep "rows_read"
$CLICKHOUSE_CLIENT -q "select (x + y) * 2, sum(x - y) * 2 as s from test_agg_proj group by x + y order by s desc limit 5 settings optimize_use_projections=1"
$CLICKHOUSE_CLIENT -q "select (x + y) * 2, sum(x - y) * 2 as s from test_agg_proj group by x + y order by s desc limit 5 settings optimize_use_projections=1 format JSON" | grep "rows_read"
$CLICKHOUSE_CLIENT -q "select intDiv(x + y, 2) as v, intDiv(x + y, 3), sum(x - y) as s from test_agg_proj group by intDiv(x + y, 2), intDiv(x + y, 3) order by s desc, v limit 5 settings allow_experimental_projection_optimization=1"
$CLICKHOUSE_CLIENT -q "select intDiv(x + y, 2) as v, intDiv(x + y, 3), sum(x - y) as s from test_agg_proj group by intDiv(x + y, 2), intDiv(x + y, 3) order by s desc, v limit 5 settings allow_experimental_projection_optimization=1 format JSON" | grep "rows_read"
$CLICKHOUSE_CLIENT -q "select intDiv(x + y, 2) as v, intDiv(x + y, 3), sum(x - y) as s from test_agg_proj group by intDiv(x + y, 2), intDiv(x + y, 3) order by s desc, v limit 5 settings optimize_use_projections=1"
$CLICKHOUSE_CLIENT -q "select intDiv(x + y, 2) as v, intDiv(x + y, 3), sum(x - y) as s from test_agg_proj group by intDiv(x + y, 2), intDiv(x + y, 3) order by s desc, v limit 5 settings optimize_use_projections=1 format JSON" | grep "rows_read"
$CLICKHOUSE_CLIENT -q "select x + y + 1, argMax(x, y) * sum(x - y) as s from test_agg_proj group by x + y + 1 order by s desc limit 5 settings allow_experimental_projection_optimization=1"
$CLICKHOUSE_CLIENT -q "select x + y + 1, argMax(x, y) * sum(x - y) as s from test_agg_proj group by x + y + 1 order by s desc limit 5 settings allow_experimental_projection_optimization=1 format JSON" | grep "rows_read"
$CLICKHOUSE_CLIENT -q "select x + y + 1, argMax(x, y) * sum(x - y) as s from test_agg_proj group by x + y + 1 order by s desc limit 5 settings optimize_use_projections=1"
$CLICKHOUSE_CLIENT -q "select x + y + 1, argMax(x, y) * sum(x - y) as s from test_agg_proj group by x + y + 1 order by s desc limit 5 settings optimize_use_projections=1 format JSON" | grep "rows_read"
$CLICKHOUSE_CLIENT -q "select x + y + 1, argMax(y, x), sum(x - y) as s from test_agg_proj group by x + y + 1 order by s desc limit 5 settings allow_experimental_projection_optimization=1"
$CLICKHOUSE_CLIENT -q "select x + y + 1, argMax(y, x), sum(x - y) as s from test_agg_proj group by x + y + 1 order by s desc limit 5 settings allow_experimental_projection_optimization=1 format JSON" | grep "rows_read"
$CLICKHOUSE_CLIENT -q "select x + y + 1, argMax(y, x), sum(x - y) as s from test_agg_proj group by x + y + 1 order by s desc limit 5 settings optimize_use_projections=1"
$CLICKHOUSE_CLIENT -q "select x + y + 1, argMax(y, x), sum(x - y) as s from test_agg_proj group by x + y + 1 order by s desc limit 5 settings optimize_use_projections=1 format JSON" | grep "rows_read"
$CLICKHOUSE_CLIENT -q "select x + y, sum(x - y) as s from test_agg_proj prewhere (x + y) % 2 = 1 group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1"
$CLICKHOUSE_CLIENT -q "select x + y, sum(x - y) as s from test_agg_proj prewhere (x + y) % 2 = 1 group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1 format JSON" | grep "rows_read"
$CLICKHOUSE_CLIENT -q "select x + y, sum(x - y) as s from test_agg_proj prewhere (x + y) % 2 = 1 group by x + y order by s desc limit 5 settings optimize_use_projections=1"
$CLICKHOUSE_CLIENT -q "select x + y, sum(x - y) as s from test_agg_proj prewhere (x + y) % 2 = 1 group by x + y order by s desc limit 5 settings optimize_use_projections=1 format JSON" | grep "rows_read"
$CLICKHOUSE_CLIENT -q "drop table test_agg_proj"

View File

@ -2,7 +2,7 @@ drop table if exists tp;
create table tp (d1 Int32, d2 Int32, eventcnt Int64, projection p (select sum(eventcnt) group by d1)) engine = MergeTree order by (d1, d2);
set allow_experimental_projection_optimization = 1, force_optimize_projection = 1;
set optimize_use_projections = 1, force_optimize_projection = 1;
select sum(eventcnt) eventcnt, d1 from tp group by d1;

View File

@ -4,7 +4,7 @@ create table d (i int, j int) engine MergeTree partition by i % 2 order by tuple
insert into d select number, number from numbers(10000);
set max_rows_to_read = 2, allow_experimental_projection_optimization = 1;
set max_rows_to_read = 2, optimize_use_projections = 1;
select min(i), max(i), count() from d;
select min(i), max(i), count() from d group by _partition_id order by _partition_id;

View File

@ -9,7 +9,7 @@ alter table t add projection x (select * order by j);
insert into t values (1, 4);
insert into t values (1, 5);
set allow_experimental_projection_optimization = 1, force_optimize_projection = 1;
set optimize_use_projections = 1, force_optimize_projection = 1;
select i from t prewhere j = 4;

View File

@ -1,5 +1,5 @@
select where x < 10
optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0
optimize_move_to_prewhere = 0, optimize_use_projections = 0
0 4294967295
1 4294967294
2 4294967293
@ -10,7 +10,7 @@ optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0
7 4294967288
8 4294967287
9 4294967286
optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1
optimize_move_to_prewhere = 0, optimize_use_projections = 1
0 4294967295
1 4294967294
2 4294967293
@ -21,7 +21,7 @@ optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1
7 4294967288
8 4294967287
9 4294967286
optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0
optimize_move_to_prewhere = 1, optimize_use_projections = 0
0 4294967295
1 4294967294
2 4294967293
@ -32,7 +32,7 @@ optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0
7 4294967288
8 4294967287
9 4294967286
optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1
optimize_move_to_prewhere = 1, optimize_use_projections = 1
0 4294967295
1 4294967294
2 4294967293
@ -43,16 +43,16 @@ optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1
7 4294967288
8 4294967287
9 4294967286
optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0
optimize_move_to_prewhere = 0, optimize_use_projections = 0
"rows_read": 100,
optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1
optimize_move_to_prewhere = 0, optimize_use_projections = 1
"rows_read": 100,
optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0
optimize_move_to_prewhere = 1, optimize_use_projections = 0
"rows_read": 100,
optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1
optimize_move_to_prewhere = 1, optimize_use_projections = 1
"rows_read": 100,
select where y > 4294967286
optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0
optimize_move_to_prewhere = 0, optimize_use_projections = 0
0 4294967295
1 4294967294
2 4294967293
@ -62,7 +62,7 @@ optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0
6 4294967289
7 4294967288
8 4294967287
optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1
optimize_move_to_prewhere = 0, optimize_use_projections = 1
0 4294967295
1 4294967294
2 4294967293
@ -72,7 +72,7 @@ optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1
6 4294967289
7 4294967288
8 4294967287
optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0
optimize_move_to_prewhere = 1, optimize_use_projections = 0
0 4294967295
1 4294967294
2 4294967293
@ -82,7 +82,7 @@ optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0
6 4294967289
7 4294967288
8 4294967287
optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1
optimize_move_to_prewhere = 1, optimize_use_projections = 1
0 4294967295
1 4294967294
2 4294967293
@ -92,12 +92,12 @@ optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1
6 4294967289
7 4294967288
8 4294967287
optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0
optimize_move_to_prewhere = 0, optimize_use_projections = 0
"rows_read": 100,
optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1
optimize_move_to_prewhere = 0, optimize_use_projections = 1
"rows_read": 100,
optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0
optimize_move_to_prewhere = 1, optimize_use_projections = 0
"rows_read": 100,
optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1
optimize_move_to_prewhere = 1, optimize_use_projections = 1
"rows_read": 100,
50

View File

@ -9,73 +9,73 @@ $CLICKHOUSE_CLIENT -q "insert into test_sort_proj select number, toUInt32(-numbe
echo "select where x < 10"
echo "optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0"
echo "optimize_move_to_prewhere = 0, optimize_use_projections = 0"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x
SETTINGS optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0"
SETTINGS optimize_move_to_prewhere = 0, optimize_use_projections = 0"
echo "optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1"
echo "optimize_move_to_prewhere = 0, optimize_use_projections = 1"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x
SETTINGS optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1"
SETTINGS optimize_move_to_prewhere = 0, optimize_use_projections = 1"
echo "optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0"
echo "optimize_move_to_prewhere = 1, optimize_use_projections = 0"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x
SETTINGS optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0"
SETTINGS optimize_move_to_prewhere = 1, optimize_use_projections = 0"
echo "optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1"
echo "optimize_move_to_prewhere = 1, optimize_use_projections = 1"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x
SETTINGS optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1"
SETTINGS optimize_move_to_prewhere = 1, optimize_use_projections = 1"
echo "optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0"
echo "optimize_move_to_prewhere = 0, optimize_use_projections = 0"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x FORMAT JSON
SETTINGS optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0" | grep rows_read
SETTINGS optimize_move_to_prewhere = 0, optimize_use_projections = 0" | grep rows_read
echo "optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1"
echo "optimize_move_to_prewhere = 0, optimize_use_projections = 1"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x FORMAT JSON
SETTINGS optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1" | grep rows_read
SETTINGS optimize_move_to_prewhere = 0, optimize_use_projections = 1" | grep rows_read
echo "optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0"
echo "optimize_move_to_prewhere = 1, optimize_use_projections = 0"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x FORMAT JSON
SETTINGS optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0" | grep rows_read
SETTINGS optimize_move_to_prewhere = 1, optimize_use_projections = 0" | grep rows_read
echo "optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1"
echo "optimize_move_to_prewhere = 1, optimize_use_projections = 1"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x FORMAT JSON
SETTINGS optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1" | grep rows_read
SETTINGS optimize_move_to_prewhere = 1, optimize_use_projections = 1" | grep rows_read
echo "select where y > 4294967286"
echo "optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0"
echo "optimize_move_to_prewhere = 0, optimize_use_projections = 0"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x
SETTINGS optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0"
SETTINGS optimize_move_to_prewhere = 0, optimize_use_projections = 0"
echo "optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1"
echo "optimize_move_to_prewhere = 0, optimize_use_projections = 1"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x
SETTINGS optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1"
SETTINGS optimize_move_to_prewhere = 0, optimize_use_projections = 1"
echo "optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0"
echo "optimize_move_to_prewhere = 1, optimize_use_projections = 0"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x
SETTINGS optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0"
SETTINGS optimize_move_to_prewhere = 1, optimize_use_projections = 0"
echo "optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1"
echo "optimize_move_to_prewhere = 1, optimize_use_projections = 1"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x
SETTINGS optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1"
SETTINGS optimize_move_to_prewhere = 1, optimize_use_projections = 1"
echo "optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0"
echo "optimize_move_to_prewhere = 0, optimize_use_projections = 0"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x FORMAT JSON
SETTINGS optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0" | grep rows_read
SETTINGS optimize_move_to_prewhere = 0, optimize_use_projections = 0" | grep rows_read
echo "optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1"
echo "optimize_move_to_prewhere = 0, optimize_use_projections = 1"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x FORMAT JSON
SETTINGS optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1" | grep rows_read
SETTINGS optimize_move_to_prewhere = 0, optimize_use_projections = 1" | grep rows_read
echo "optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0"
echo "optimize_move_to_prewhere = 1, optimize_use_projections = 0"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x FORMAT JSON
SETTINGS optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0" | grep rows_read
SETTINGS optimize_move_to_prewhere = 1, optimize_use_projections = 0" | grep rows_read
echo "optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1"
echo "optimize_move_to_prewhere = 1, optimize_use_projections = 1"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x FORMAT JSON
SETTINGS optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1" | grep rows_read
SETTINGS optimize_move_to_prewhere = 1, optimize_use_projections = 1" | grep rows_read
$CLICKHOUSE_CLIENT -q "ALTER TABLE test_sort_proj DELETE WHERE x % 2 = 0 SETTINGS mutations_sync=2;"
$CLICKHOUSE_CLIENT -q "SELECT count() from test_sort_proj;"

View File

@ -28,7 +28,7 @@ INSERT INTO normal SELECT
number
FROM numbers(100000);
SET allow_experimental_projection_optimization=1, optimize_aggregation_in_order=1, force_optimize_projection=1;
SET optimize_use_projections=1, optimize_aggregation_in_order=1, force_optimize_projection=1;
WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM normal WHERE ts > '2021-12-06 22:00:00' GROUP BY a ORDER BY v LIMIT 5;
WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM normal WHERE ts > '2021-12-06 22:00:00' GROUP BY toStartOfHour(ts), a ORDER BY v LIMIT 5;
@ -60,7 +60,7 @@ INSERT INTO agg SELECT
number
FROM numbers(100000);
SET allow_experimental_projection_optimization=1, optimize_aggregation_in_order=1, force_optimize_projection = 1;
SET optimize_use_projections=1, optimize_aggregation_in_order=1, force_optimize_projection = 1;
WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM agg WHERE ts > '2021-12-06 22:00:00' GROUP BY a ORDER BY v LIMIT 5;
WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM agg WHERE ts > '2021-12-06 22:00:00' GROUP BY toStartOfHour(ts), a ORDER BY v LIMIT 5;

View File

@ -1,4 +1,4 @@
set allow_experimental_projection_optimization = 1;
set optimize_use_projections = 1;
drop table if exists x;

View File

@ -1,4 +1,4 @@
set allow_experimental_projection_optimization = 1;
set optimize_use_projections = 1;
drop table if exists t;

View File

@ -4,7 +4,7 @@ create table t (i int, j int, k int, projection p (select * order by j)) engine
insert into t select number, number, number from numbers(10);
set allow_experimental_projection_optimization = 1, max_rows_to_read = 3;
set optimize_use_projections = 1, max_rows_to_read = 3;
select * from t where i < 5 and j in (1, 2);

View File

@ -3,7 +3,7 @@ create table x (i UInt64, j UInt64, k UInt64, projection agg (select sum(j), avg
insert into x values (1, 2, 3);
set allow_experimental_projection_optimization = 1, use_index_for_in_with_subqueries = 0;
set optimize_use_projections = 1, use_index_for_in_with_subqueries = 0;
select sum(j), avg(k) from x where i in (select number from numbers(4));

View File

@ -4,6 +4,6 @@ CREATE TABLE t (`key` UInt32, `created_at` Date, `value` UInt32, PROJECTION xxx
INSERT INTO t SELECT 1 AS key, today() + (number % 30), number FROM numbers(1000);
ALTER TABLE t UPDATE value = 0 WHERE (value > 0) AND (created_at >= '2021-12-21') SETTINGS allow_experimental_projection_optimization = 1;
ALTER TABLE t UPDATE value = 0 WHERE (value > 0) AND (created_at >= '2021-12-21') SETTINGS optimize_use_projections = 1;
DROP TABLE IF EXISTS t;

View File

@ -12,7 +12,7 @@ optimize table t final;
alter table t materialize projection p_norm settings mutations_sync = 1;
set allow_experimental_projection_optimization = 1, max_rows_to_read = 3;
set optimize_use_projections = 1, max_rows_to_read = 3;
select c18 from t where c1 < 0;

View File

@ -2,20 +2,20 @@ drop table if exists t;
create table t (s UInt16, l UInt16, projection p (select s, l order by l)) engine MergeTree order by s;
select s from t join (select toUInt16(1) as s) x using (s) order by s settings allow_experimental_projection_optimization = 1;
select s from t join (select toUInt16(1) as s) x using (s) order by s settings allow_experimental_projection_optimization = 0;
select s from t join (select toUInt16(1) as s) x using (s) order by s settings optimize_use_projections = 1;
select s from t join (select toUInt16(1) as s) x using (s) order by s settings optimize_use_projections = 0;
drop table t;
drop table if exists mt;
create table mt (id1 Int8, id2 Int8) Engine=MergeTree order by tuple();
select alias1 from (select id1, id1 as alias1 from mt) as l all inner join (select id2 as alias1 from mt) as t using (alias1) order by l.id1 settings allow_experimental_projection_optimization = 1;
select id1 from mt all inner join (select id2 as id1 from mt) as t using (id1) order by id1 settings allow_experimental_projection_optimization = 1;
select id2 as id1 from mt all inner join (select id1 from mt) as t using (id1) order by id1 settings allow_experimental_projection_optimization = 1;
select alias1 from (select id1, id1 as alias1 from mt) as l all inner join (select id2 as alias1 from mt) as t using (alias1) order by l.id1 settings optimize_use_projections = 1;
select id1 from mt all inner join (select id2 as id1 from mt) as t using (id1) order by id1 settings optimize_use_projections = 1;
select id2 as id1 from mt all inner join (select id1 from mt) as t using (id1) order by id1 settings optimize_use_projections = 1;
drop table mt;
drop table if exists j;
create table j (id1 Int8, id2 Int8, projection p (select id1, id2 order by id2)) Engine=MergeTree order by id1 settings index_granularity = 1;
insert into j select number, number from numbers(10);
select alias1 from (select id1, id1 as alias1 from j) as l all inner join (select id2, id2 as alias1 from j where id2 in (1, 2, 3)) as t using (alias1) where id2 in (2, 3, 4) order by id1 settings allow_experimental_projection_optimization = 1;
select alias1 from (select id1, id1 as alias1 from j) as l all inner join (select id2, id2 as alias1 from j where id2 in (1, 2, 3)) as t using (alias1) where id2 in (2, 3, 4) order by id1 settings optimize_use_projections = 1;
drop table j;

View File

@ -4,6 +4,6 @@ create table t (x UInt32) engine = MergeTree order by tuple() settings index_gra
insert into t select number from numbers(100);
alter table t add projection p (select uniqHLL12(x));
insert into t select number + 100 from numbers(100);
select uniqHLL12(x) from t settings allow_experimental_projection_optimization = 1, max_bytes_to_read=400, max_block_size=8; -- { serverError 307 }
select uniqHLL12(x) from t settings optimize_use_projections = 1, max_bytes_to_read=400, max_block_size=8; -- { serverError 307 }
drop table if exists t;

View File

@ -4,7 +4,7 @@ create table projection_test (`sum(block_count)` UInt64, domain_alias UInt64 ali
insert into projection_test with rowNumberInAllBlocks() as id select 1, toDateTime('2020-10-24 00:00:00') + (id / 20), toString(id % 100), * from generateRandom('x_id String, y_id String, block_count Int64, retry_count Int64, duration Int64, kbytes Int64, buffer_time Int64, first_time Int64, total_bytes Nullable(UInt64), valid_bytes Nullable(UInt64), completed_bytes Nullable(UInt64), fixed_bytes Nullable(UInt64), force_bytes Nullable(UInt64)', 10, 10, 1) limit 1000 settings max_threads = 1;
set allow_experimental_projection_optimization = 1, force_optimize_projection = 1;
set optimize_use_projections = 1, force_optimize_projection = 1;
select * from projection_test; -- { serverError 584 }
select toStartOfMinute(datetime) dt_m, countIf(first_time = 0) from projection_test join (select 1) x on 1 where domain = '1' group by dt_m order by dt_m; -- { serverError 584 }
@ -47,6 +47,6 @@ drop table if exists projection_test;
drop table if exists projection_without_key;
create table projection_without_key (key UInt32, PROJECTION x (SELECT max(key))) engine MergeTree order by key;
insert into projection_without_key select number from numbers(1000);
set force_optimize_projection = 1, allow_experimental_projection_optimization = 1;
set force_optimize_projection = 1, optimize_use_projections = 1;
select max(key) from projection_without_key;
drop table projection_without_key;

View File

@ -2,7 +2,7 @@ drop table if exists projection_without_key;
create table projection_without_key (key UInt32, PROJECTION x (SELECT sum(key) group by key % 3)) engine MergeTree order by key;
insert into projection_without_key select number from numbers(1000);
select sum(key) from projection_without_key settings allow_experimental_projection_optimization = 1;
select sum(key) from projection_without_key settings allow_experimental_projection_optimization = 0;
select sum(key) from projection_without_key settings optimize_use_projections = 1;
select sum(key) from projection_without_key settings optimize_use_projections = 0;
drop table projection_without_key;

View File

@ -8,7 +8,7 @@ create table projection_test (dt DateTime, cost Int64, projection p (select toSt
insert into projection_test with rowNumberInAllBlocks() as id select toDateTime('2020-10-24 00:00:00') + (id / 20), * from generateRandom('cost Int64', 10, 10, 1) limit 1000 settings max_threads = 1;
set allow_experimental_projection_optimization = 1, force_optimize_projection = 1;
set optimize_use_projections = 1, force_optimize_projection = 1;
select toStartOfMinute(dt) dt_m, sum(cost) from projection_test group by dt_m;
select sum(cost) from projection_test;

View File

@ -38,7 +38,7 @@ function run_query()
echo "$query"
local opts=(
--allow_experimental_projection_optimization 1
--optimize_use_projections 1
--force_optimize_projection 1
--log_processors_profiles 1
--query_id "$query_id"

View File

@ -44,7 +44,7 @@ function run_query()
echo "$query"
local opts=(
--allow_experimental_projection_optimization 1
--optimize_use_projections 1
--force_optimize_projection 1
--log_processors_profiles 1
--query_id "$query_id"

View File

@ -72,8 +72,8 @@ SYSTEM SYNC REPLICA wikistat2;
-- Such condition will lead to successful queries.
SELECT 0 FROM numbers(5) WHERE sleepEachRow(1) = 1;
select sum(hits), count() from wikistat1 GROUP BY project, subproject, path settings allow_experimental_projection_optimization = 1, force_optimize_projection = 1;
select sum(hits), count() from wikistat2 GROUP BY project, subproject, path settings allow_experimental_projection_optimization = 1, force_optimize_projection = 1;
select sum(hits), count() from wikistat1 GROUP BY project, subproject, path settings optimize_use_projections = 1, force_optimize_projection = 1;
select sum(hits), count() from wikistat2 GROUP BY project, subproject, path settings optimize_use_projections = 1, force_optimize_projection = 1;
DROP TABLE wikistat1;
DROP TABLE wikistat2;

View File

@ -3,7 +3,7 @@
set max_threads = 16;
set allow_aggregate_partitions_independently = 1;
set force_aggregate_partitions_independently = 1;
set allow_experimental_projection_optimization = 0;
set optimize_use_projections = 0;
create table t1(a UInt32) engine=MergeTree order by tuple() partition by a % 4 settings index_granularity = 8192, index_granularity_bytes = 10485760;

View File

@ -9,6 +9,6 @@ ENGINE = Memory;
INSERT INTO data_a_02187
SELECT *
FROM system.one
SETTINGS max_block_size = '1', min_insert_block_size_rows = '65536', min_insert_block_size_bytes = '0', max_insert_threads = '0', max_threads = '3', receive_timeout = '10', receive_data_timeout_ms = '10000', connections_with_failover_max_tries = '0', extremes = '1', use_uncompressed_cache = '0', optimize_move_to_prewhere = '1', optimize_move_to_prewhere_if_final = '0', replication_alter_partitions_sync = '2', totals_mode = 'before_having', allow_suspicious_low_cardinality_types = '1', compile_expressions = '1', min_count_to_compile_expression = '0', group_by_two_level_threshold = '100', distributed_aggregation_memory_efficient = '0', distributed_group_by_no_merge = '1', optimize_distributed_group_by_sharding_key = '1', optimize_skip_unused_shards = '1', optimize_skip_unused_shards_rewrite_in = '1', force_optimize_skip_unused_shards = '2', optimize_skip_unused_shards_nesting = '1', force_optimize_skip_unused_shards_nesting = '2', merge_tree_min_rows_for_concurrent_read = '10000', force_primary_key = '1', network_compression_method = 'ZSTD', network_zstd_compression_level = '7', log_queries = '0', log_queries_min_type = 'QUERY_FINISH', distributed_product_mode = 'local', insert_quorum = '2', insert_quorum_timeout = '0', insert_quorum_parallel = '0', select_sequential_consistency = '1', join_use_nulls = '1', any_join_distinct_right_table_keys = '1', preferred_max_column_in_block_size_bytes = '32', insert_distributed_sync = '1', insert_allow_materialized_columns = '1', use_index_for_in_with_subqueries = '1', joined_subquery_requires_alias = '0', empty_result_for_aggregation_by_empty_set = '1', allow_suspicious_codecs = '1', query_profiler_real_time_period_ns = '0', query_profiler_cpu_time_period_ns = '0', opentelemetry_start_trace_probability = '1', max_rows_to_read = '1000000', read_overflow_mode = 'break', max_rows_to_group_by = '10', group_by_overflow_mode = 'any', max_rows_to_sort = '100', sort_overflow_mode = 'break', max_result_rows = '10', max_execution_time = '3', max_execution_speed = '1', max_bytes_in_join = '100', join_algorithm = 'partial_merge', max_memory_usage = '1099511627776', log_query_threads = '1', send_logs_level = 'fatal', enable_optimize_predicate_expression = '1', prefer_localhost_replica = '1', optimize_read_in_order = '1', optimize_aggregation_in_order = '1', read_in_order_two_level_merge_threshold = '1', allow_introspection_functions = '1', check_query_single_value_result = '1', allow_experimental_live_view = '1', default_table_engine = 'Memory', mutations_sync = '2', convert_query_to_cnf = '0', optimize_arithmetic_operations_in_aggregate_functions = '1', optimize_duplicate_order_by_and_distinct = '0', optimize_multiif_to_if = '0', optimize_monotonous_functions_in_order_by = '1', optimize_functions_to_subcolumns = '1', optimize_using_constraints = '1', optimize_substitute_columns = '1', optimize_append_index = '1', transform_null_in = '1', allow_experimental_geo_types = '1', data_type_default_nullable = '1', cast_keep_nullable = '1', cast_ipv4_ipv6_default_on_conversion_error = '0', system_events_show_zero_values = '1', enable_global_with_statement = '1', optimize_on_insert = '0', optimize_rewrite_sum_if_to_count_if = '1', distributed_ddl_output_mode = 'throw', union_default_mode = 'ALL', optimize_aggregators_of_group_by_keys = '1', optimize_group_by_function_keys = '1', short_circuit_function_evaluation = 'enable', async_insert = '1', enable_filesystem_cache = '0', allow_deprecated_database_ordinary = '1', allow_deprecated_syntax_for_merge_tree = '1', allow_experimental_nlp_functions = '1', allow_experimental_object_type = '1', allow_experimental_map_type = '1', allow_experimental_projection_optimization = '1', input_format_null_as_default = '1', input_format_ipv4_default_on_conversion_error = '0', input_format_ipv6_default_on_conversion_error = '0', output_format_json_named_tuples_as_objects = '1', output_format_write_statistics = '0', output_format_pretty_row_numbers = '1';
SETTINGS max_block_size = '1', min_insert_block_size_rows = '65536', min_insert_block_size_bytes = '0', max_insert_threads = '0', max_threads = '3', receive_timeout = '10', receive_data_timeout_ms = '10000', connections_with_failover_max_tries = '0', extremes = '1', use_uncompressed_cache = '0', optimize_move_to_prewhere = '1', optimize_move_to_prewhere_if_final = '0', replication_alter_partitions_sync = '2', totals_mode = 'before_having', allow_suspicious_low_cardinality_types = '1', compile_expressions = '1', min_count_to_compile_expression = '0', group_by_two_level_threshold = '100', distributed_aggregation_memory_efficient = '0', distributed_group_by_no_merge = '1', optimize_distributed_group_by_sharding_key = '1', optimize_skip_unused_shards = '1', optimize_skip_unused_shards_rewrite_in = '1', force_optimize_skip_unused_shards = '2', optimize_skip_unused_shards_nesting = '1', force_optimize_skip_unused_shards_nesting = '2', merge_tree_min_rows_for_concurrent_read = '10000', force_primary_key = '1', network_compression_method = 'ZSTD', network_zstd_compression_level = '7', log_queries = '0', log_queries_min_type = 'QUERY_FINISH', distributed_product_mode = 'local', insert_quorum = '2', insert_quorum_timeout = '0', insert_quorum_parallel = '0', select_sequential_consistency = '1', join_use_nulls = '1', any_join_distinct_right_table_keys = '1', preferred_max_column_in_block_size_bytes = '32', insert_distributed_sync = '1', insert_allow_materialized_columns = '1', use_index_for_in_with_subqueries = '1', joined_subquery_requires_alias = '0', empty_result_for_aggregation_by_empty_set = '1', allow_suspicious_codecs = '1', query_profiler_real_time_period_ns = '0', query_profiler_cpu_time_period_ns = '0', opentelemetry_start_trace_probability = '1', max_rows_to_read = '1000000', read_overflow_mode = 'break', max_rows_to_group_by = '10', group_by_overflow_mode = 'any', max_rows_to_sort = '100', sort_overflow_mode = 'break', max_result_rows = '10', max_execution_time = '3', max_execution_speed = '1', max_bytes_in_join = '100', join_algorithm = 'partial_merge', max_memory_usage = '1099511627776', log_query_threads = '1', send_logs_level = 'fatal', enable_optimize_predicate_expression = '1', prefer_localhost_replica = '1', optimize_read_in_order = '1', optimize_aggregation_in_order = '1', read_in_order_two_level_merge_threshold = '1', allow_introspection_functions = '1', check_query_single_value_result = '1', allow_experimental_live_view = '1', default_table_engine = 'Memory', mutations_sync = '2', convert_query_to_cnf = '0', optimize_arithmetic_operations_in_aggregate_functions = '1', optimize_duplicate_order_by_and_distinct = '0', optimize_multiif_to_if = '0', optimize_monotonous_functions_in_order_by = '1', optimize_functions_to_subcolumns = '1', optimize_using_constraints = '1', optimize_substitute_columns = '1', optimize_append_index = '1', transform_null_in = '1', allow_experimental_geo_types = '1', data_type_default_nullable = '1', cast_keep_nullable = '1', cast_ipv4_ipv6_default_on_conversion_error = '0', system_events_show_zero_values = '1', enable_global_with_statement = '1', optimize_on_insert = '0', optimize_rewrite_sum_if_to_count_if = '1', distributed_ddl_output_mode = 'throw', union_default_mode = 'ALL', optimize_aggregators_of_group_by_keys = '1', optimize_group_by_function_keys = '1', short_circuit_function_evaluation = 'enable', async_insert = '1', enable_filesystem_cache = '0', allow_deprecated_database_ordinary = '1', allow_deprecated_syntax_for_merge_tree = '1', allow_experimental_nlp_functions = '1', allow_experimental_object_type = '1', allow_experimental_map_type = '1', optimize_use_projections = '1', input_format_null_as_default = '1', input_format_ipv4_default_on_conversion_error = '0', input_format_ipv6_default_on_conversion_error = '0', output_format_json_named_tuples_as_objects = '1', output_format_write_statistics = '0', output_format_pretty_row_numbers = '1';
DROP TABLE data_a_02187;