mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-05 14:02:21 +00:00
Merge remote-tracking branch 'origin/master' into fill_with_by_sorting_prefix_2
This commit is contained in:
commit
268d7d70fc
12
README.md
12
README.md
@ -21,11 +21,17 @@ curl https://clickhouse.com/ | sh
|
|||||||
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
|
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
|
||||||
|
|
||||||
## Upcoming Events
|
## Upcoming Events
|
||||||
* [**ClickHouse Spring Meetup in Manhattan**](https://www.meetup.com/clickhouse-new-york-user-group/events/292517734) - April 26 - It's spring, and it's time to meet again in the city! Talks include: "Building a domain specific query language on top of Clickhouse", "A Galaxy of Information", "Our Journey to ClickHouse Cloud from Redshift", and a ClickHouse update!
|
|
||||||
* [**v23.4 Release Webinar**](https://clickhouse.com/company/events/v23-4-release-webinar?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-04) - April 26 - 23.4 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
|
* [**v23.4 Release Webinar**](https://clickhouse.com/company/events/v23-4-release-webinar?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-04) - April 26 - 23.4 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
|
||||||
* [**ClickHouse Meetup in Berlin**](https://www.meetup.com/clickhouse-berlin-user-group/events/292892466) - May 16 - Save the date! ClickHouse is coming back to Berlin. We’re excited to announce an upcoming ClickHouse Meetup that you won’t want to miss. Join us as we gather together to discuss the latest in the world of ClickHouse and share user stories.
|
* [**ClickHouse Meetup in Berlin**](https://www.meetup.com/clickhouse-berlin-user-group/events/292892466) - May 16
|
||||||
|
* [**ClickHouse Meetup in Barcelona**](https://www.meetup.com/clickhouse-barcelona-user-group/events/292892669) - May 25
|
||||||
|
* [**ClickHouse Meetup in London**](https://www.meetup.com/clickhouse-london-user-group/events/292892824) - May 25
|
||||||
|
* [**ClickHouse Meetup in San Francisco**](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/293426725/) - Jun 7
|
||||||
|
* [**ClickHouse Meetup in Stockholm**](https://www.meetup.com/clickhouse-berlin-user-group/events/292892466) - Jun 13
|
||||||
|
|
||||||
|
Also, keep an eye out for upcoming meetups in Amsterdam, Boston, NYC, Beijing, and Toronto. Somewhere else you want us to be? Please feel free to reach out to tyler <at> clickhouse <dot> com.
|
||||||
|
|
||||||
## Recent Recordings
|
## Recent Recordings
|
||||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||||
* **Recording available**: [**v23.3 Release Webinar**](https://www.youtube.com/watch?v=ISaGUjvBNao) UNDROP TABLE, server settings introspection, nested dynamic disks, MySQL compatibility, parseDate Time, Lightweight Deletes, Parallel Replicas, integrations updates, and so much more! Watch it now!
|
* **Recording available**: [**v23.4 Release Webinar**]([https://www.youtube.com/watch?v=ISaGUjvBNao](https://www.youtube.com/watch?v=4rrf6bk_mOg)) UNDROP TABLE, server settings introspection, nested dynamic disks, MySQL compatibility, parseDate Time, Lightweight Deletes, Parallel Replicas, integrations updates, and so much more! Watch it now!
|
||||||
* **All release webinar recordings**: [YouTube playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3jAlSy1JxyP8zluvXaN3nxU)
|
* **All release webinar recordings**: [YouTube playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3jAlSy1JxyP8zluvXaN3nxU)
|
||||||
|
@ -1125,6 +1125,12 @@ If unsuccessful, several attempts are made to connect to various replicas.
|
|||||||
|
|
||||||
Default value: 1000.
|
Default value: 1000.
|
||||||
|
|
||||||
|
## connect_timeout_with_failover_secure_ms
|
||||||
|
|
||||||
|
Connection timeout for selecting first healthy replica (for secure connections)
|
||||||
|
|
||||||
|
Default value: 1000.
|
||||||
|
|
||||||
## connection_pool_max_wait_ms {#connection-pool-max-wait-ms}
|
## connection_pool_max_wait_ms {#connection-pool-max-wait-ms}
|
||||||
|
|
||||||
The wait time in milliseconds for a connection when the connection pool is full.
|
The wait time in milliseconds for a connection when the connection pool is full.
|
||||||
@ -3562,7 +3568,7 @@ Default value: `1`.
|
|||||||
|
|
||||||
If the setting is set to `0`, the table function does not make Nullable columns and inserts default values instead of NULL. This is also applicable for NULL values inside arrays.
|
If the setting is set to `0`, the table function does not make Nullable columns and inserts default values instead of NULL. This is also applicable for NULL values inside arrays.
|
||||||
|
|
||||||
## allow_experimental_projection_optimization {#allow-experimental-projection-optimization}
|
## optimize_use_projections {#optimize_use_projections}
|
||||||
|
|
||||||
Enables or disables [projection](../../engines/table-engines/mergetree-family/mergetree.md/#projections) optimization when processing `SELECT` queries.
|
Enables or disables [projection](../../engines/table-engines/mergetree-family/mergetree.md/#projections) optimization when processing `SELECT` queries.
|
||||||
|
|
||||||
@ -3575,7 +3581,7 @@ Default value: `1`.
|
|||||||
|
|
||||||
## force_optimize_projection {#force-optimize-projection}
|
## force_optimize_projection {#force-optimize-projection}
|
||||||
|
|
||||||
Enables or disables the obligatory use of [projections](../../engines/table-engines/mergetree-family/mergetree.md/#projections) in `SELECT` queries, when projection optimization is enabled (see [allow_experimental_projection_optimization](#allow-experimental-projection-optimization) setting).
|
Enables or disables the obligatory use of [projections](../../engines/table-engines/mergetree-family/mergetree.md/#projections) in `SELECT` queries, when projection optimization is enabled (see [optimize_use_projections](#optimize_use_projections) setting).
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
|
@ -391,7 +391,7 @@ INDEX b (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARIT
|
|||||||
## Проекции {#projections}
|
## Проекции {#projections}
|
||||||
Проекции похожи на [материализованные представления](../../../sql-reference/statements/create/view.md#materialized), но определяются на уровне кусков данных. Это обеспечивает гарантии согласованности данных наряду с автоматическим использованием в запросах.
|
Проекции похожи на [материализованные представления](../../../sql-reference/statements/create/view.md#materialized), но определяются на уровне кусков данных. Это обеспечивает гарантии согласованности данных наряду с автоматическим использованием в запросах.
|
||||||
|
|
||||||
Проекции — это экспериментальная возможность. Чтобы включить поддержку проекций, установите настройку [allow_experimental_projection_optimization](../../../operations/settings/settings.md#allow-experimental-projection-optimization) в значение `1`. См. также настройку [force_optimize_projection ](../../../operations/settings/settings.md#force-optimize-projection).
|
Проекции — это экспериментальная возможность. Чтобы включить поддержку проекций, установите настройку [optimize_use_projections](../../../operations/settings/settings.md#allow-experimental-projection-optimization) в значение `1`. См. также настройку [force_optimize_projection ](../../../operations/settings/settings.md#optimize_use_projections).
|
||||||
|
|
||||||
Проекции не поддерживаются для запросов `SELECT` с модификатором [FINAL](../../../sql-reference/statements/select/from.md#select-from-final).
|
Проекции не поддерживаются для запросов `SELECT` с модификатором [FINAL](../../../sql-reference/statements/select/from.md#select-from-final).
|
||||||
|
|
||||||
|
@ -3588,7 +3588,7 @@ SETTINGS index_granularity = 8192 │
|
|||||||
|
|
||||||
Строка с идентификатором снэпшота, из которого будет выполняться [исходный дамп таблиц PostgreSQL](../../engines/database-engines/materialized-postgresql.md). Эта настройка должна использоваться совместно с [materialized_postgresql_replication_slot](#materialized-postgresql-replication-slot).
|
Строка с идентификатором снэпшота, из которого будет выполняться [исходный дамп таблиц PostgreSQL](../../engines/database-engines/materialized-postgresql.md). Эта настройка должна использоваться совместно с [materialized_postgresql_replication_slot](#materialized-postgresql-replication-slot).
|
||||||
|
|
||||||
## allow_experimental_projection_optimization {#allow-experimental-projection-optimization}
|
## optimize_use_projections {#optimize_use_projections}
|
||||||
|
|
||||||
Включает или отключает поддержку [проекций](../../engines/table-engines/mergetree-family/mergetree.md#projections) при обработке запросов `SELECT`.
|
Включает или отключает поддержку [проекций](../../engines/table-engines/mergetree-family/mergetree.md#projections) при обработке запросов `SELECT`.
|
||||||
|
|
||||||
@ -3601,7 +3601,7 @@ SETTINGS index_granularity = 8192 │
|
|||||||
|
|
||||||
## force_optimize_projection {#force-optimize-projection}
|
## force_optimize_projection {#force-optimize-projection}
|
||||||
|
|
||||||
Включает или отключает обязательное использование [проекций](../../engines/table-engines/mergetree-family/mergetree.md#projections) в запросах `SELECT`, если поддержка проекций включена (см. настройку [allow_experimental_projection_optimization](#allow-experimental-projection-optimization)).
|
Включает или отключает обязательное использование [проекций](../../engines/table-engines/mergetree-family/mergetree.md#projections) в запросах `SELECT`, если поддержка проекций включена (см. настройку [optimize_use_projections](#optimize_use_projections)).
|
||||||
|
|
||||||
Возможные значения:
|
Возможные значения:
|
||||||
|
|
||||||
|
@ -1074,7 +1074,7 @@ ClickHouse服务器日志文件中相应的跟踪日志确认了ClickHouse正在
|
|||||||
<a href="https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#projections" target="_blank">Projections</a>目前是一个实验性的功能,因此我们需要告诉ClickHouse:
|
<a href="https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#projections" target="_blank">Projections</a>目前是一个实验性的功能,因此我们需要告诉ClickHouse:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SET allow_experimental_projection_optimization = 1;
|
SET optimize_use_projections = 1;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
@ -264,7 +264,9 @@ void ColumnFunction::appendArgument(const ColumnWithTypeAndName & column)
|
|||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot capture column {} because it has incompatible type: "
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot capture column {} because it has incompatible type: "
|
||||||
"got {}, but {} is expected.", argument_types.size(), column.type->getName(), argument_types[index]->getName());
|
"got {}, but {} is expected.", argument_types.size(), column.type->getName(), argument_types[index]->getName());
|
||||||
|
|
||||||
captured_columns.push_back(column);
|
auto captured_column = column;
|
||||||
|
captured_column.column = captured_column.column->convertToFullColumnIfSparse();
|
||||||
|
captured_columns.push_back(std::move(captured_column));
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr ColumnFunction::getResultType() const
|
DataTypePtr ColumnFunction::getResultType() const
|
||||||
|
@ -560,6 +560,7 @@ class IColumn;
|
|||||||
M(Bool, asterisk_include_alias_columns, false, "Include ALIAS columns for wildcard query", 0) \
|
M(Bool, asterisk_include_alias_columns, false, "Include ALIAS columns for wildcard query", 0) \
|
||||||
M(Bool, optimize_skip_merged_partitions, false, "Skip partitions with one part with level > 0 in optimize final", 0) \
|
M(Bool, optimize_skip_merged_partitions, false, "Skip partitions with one part with level > 0 in optimize final", 0) \
|
||||||
M(Bool, optimize_on_insert, true, "Do the same transformation for inserted block of data as if merge was done on this block.", 0) \
|
M(Bool, optimize_on_insert, true, "Do the same transformation for inserted block of data as if merge was done on this block.", 0) \
|
||||||
|
M(Bool, optimize_use_projections, true, "Automatically choose projections to perform SELECT query", 0) \
|
||||||
M(Bool, force_optimize_projection, false, "If projection optimization is enabled, SELECT queries need to use projection", 0) \
|
M(Bool, force_optimize_projection, false, "If projection optimization is enabled, SELECT queries need to use projection", 0) \
|
||||||
M(Bool, async_socket_for_remote, true, "Asynchronously read from socket executing remote query", 0) \
|
M(Bool, async_socket_for_remote, true, "Asynchronously read from socket executing remote query", 0) \
|
||||||
M(Bool, async_query_sending_for_remote, true, "Asynchronously create connections and send query to shards in remote query", 0) \
|
M(Bool, async_query_sending_for_remote, true, "Asynchronously create connections and send query to shards in remote query", 0) \
|
||||||
@ -715,27 +716,12 @@ class IColumn;
|
|||||||
M(Milliseconds, storage_system_stack_trace_pipe_read_timeout_ms, 100, "Maximum time to read from a pipe for receiving information from the threads when querying the `system.stack_trace` table. This setting is used for testing purposes and not meant to be changed by users.", 0) \
|
M(Milliseconds, storage_system_stack_trace_pipe_read_timeout_ms, 100, "Maximum time to read from a pipe for receiving information from the threads when querying the `system.stack_trace` table. This setting is used for testing purposes and not meant to be changed by users.", 0) \
|
||||||
\
|
\
|
||||||
M(Bool, parallelize_output_from_storages, true, "Parallelize output for reading step from storage. It allows parallelizing query processing right after reading from storage if possible", 0) \
|
M(Bool, parallelize_output_from_storages, true, "Parallelize output for reading step from storage. It allows parallelizing query processing right after reading from storage if possible", 0) \
|
||||||
\
|
|
||||||
M(Bool, use_with_fill_by_sorting_prefix, false, "Columns preceding WITH FILL columns in ORDER BY clause form sorting prefix. Rows with the same values in sorting prefix will be filled separately", 0) \
|
|
||||||
/** Experimental functions */ \
|
|
||||||
M(Bool, allow_experimental_funnel_functions, false, "Enable experimental functions for funnel analysis.", 0) \
|
|
||||||
M(Bool, allow_experimental_nlp_functions, false, "Enable experimental functions for natural language processing.", 0) \
|
|
||||||
M(Bool, allow_experimental_hash_functions, false, "Enable experimental hash functions (hashid, etc)", 0) \
|
|
||||||
M(Bool, allow_experimental_object_type, false, "Allow Object and JSON data types", 0) \
|
|
||||||
M(String, insert_deduplication_token, "", "If not empty, used for duplicate detection instead of data digest", 0) \
|
M(String, insert_deduplication_token, "", "If not empty, used for duplicate detection instead of data digest", 0) \
|
||||||
M(String, ann_index_select_query_params, "", "Parameters passed to ANN indexes in SELECT queries, the format is 'param1=x, param2=y, ...'", 0) \
|
M(String, ann_index_select_query_params, "", "Parameters passed to ANN indexes in SELECT queries, the format is 'param1=x, param2=y, ...'", 0) \
|
||||||
M(UInt64, max_limit_for_ann_queries, 1000000, "Maximum limit value for using ANN indexes is used to prevent memory overflow in search queries for indexes", 0) \
|
|
||||||
M(Bool, allow_experimental_annoy_index, false, "Allows to use Annoy index. Disabled by default because this feature is experimental", 0) \
|
|
||||||
M(Bool, count_distinct_optimization, false, "Rewrite count distinct to subquery of group by", 0) \
|
M(Bool, count_distinct_optimization, false, "Rewrite count distinct to subquery of group by", 0) \
|
||||||
M(Bool, throw_on_unsupported_query_inside_transaction, true, "Throw exception if unsupported query is used inside transaction", 0) \
|
|
||||||
M(TransactionsWaitCSNMode, wait_changes_become_visible_after_commit_mode, TransactionsWaitCSNMode::WAIT_UNKNOWN, "Wait for committed changes to become actually visible in the latest snapshot", 0) \
|
|
||||||
M(Bool, implicit_transaction, false, "If enabled and not already inside a transaction, wraps the query inside a full transaction (begin + commit or rollback)", 0) \
|
|
||||||
M(Bool, throw_if_no_data_to_insert, true, "Enables or disables empty INSERTs, enabled by default", 0) \
|
M(Bool, throw_if_no_data_to_insert, true, "Enables or disables empty INSERTs, enabled by default", 0) \
|
||||||
M(Bool, compatibility_ignore_auto_increment_in_create_table, false, "Ignore AUTO_INCREMENT keyword in column declaration if true, otherwise return error. It simplifies migration from MySQL", 0) \
|
M(Bool, compatibility_ignore_auto_increment_in_create_table, false, "Ignore AUTO_INCREMENT keyword in column declaration if true, otherwise return error. It simplifies migration from MySQL", 0) \
|
||||||
M(Bool, multiple_joins_try_to_keep_original_names, false, "Do not add aliases to top level expression list on multiple joins rewrite", 0) \
|
M(Bool, multiple_joins_try_to_keep_original_names, false, "Do not add aliases to top level expression list on multiple joins rewrite", 0) \
|
||||||
M(UInt64, grace_hash_join_initial_buckets, 1, "Initial number of grace hash join buckets", 0) \
|
|
||||||
M(UInt64, grace_hash_join_max_buckets, 1024, "Limit on the number of grace hash join buckets", 0) \
|
|
||||||
M(Bool, optimize_distinct_in_order, false, "This optimization has a bug and it is disabled. Enable DISTINCT optimization if some columns in DISTINCT form a prefix of sorting. For example, prefix of sorting key in merge tree or ORDER BY statement", 0) \
|
|
||||||
M(Bool, optimize_sorting_by_input_stream_properties, true, "Optimize sorting by sorting properties of input stream", 0) \
|
M(Bool, optimize_sorting_by_input_stream_properties, true, "Optimize sorting by sorting properties of input stream", 0) \
|
||||||
M(UInt64, insert_keeper_max_retries, 20, "Max retries for keeper operations during insert", 0) \
|
M(UInt64, insert_keeper_max_retries, 20, "Max retries for keeper operations during insert", 0) \
|
||||||
M(UInt64, insert_keeper_retry_initial_backoff_ms, 100, "Initial backoff timeout for keeper operations during insert", 0) \
|
M(UInt64, insert_keeper_retry_initial_backoff_ms, 100, "Initial backoff timeout for keeper operations during insert", 0) \
|
||||||
@ -744,10 +730,25 @@ class IColumn;
|
|||||||
M(UInt64, insert_keeper_fault_injection_seed, 0, "0 - random seed, otherwise the setting value", 0) \
|
M(UInt64, insert_keeper_fault_injection_seed, 0, "0 - random seed, otherwise the setting value", 0) \
|
||||||
M(Bool, force_aggregation_in_order, false, "Force use of aggregation in order on remote nodes during distributed aggregation. PLEASE, NEVER CHANGE THIS SETTING VALUE MANUALLY!", IMPORTANT) \
|
M(Bool, force_aggregation_in_order, false, "Force use of aggregation in order on remote nodes during distributed aggregation. PLEASE, NEVER CHANGE THIS SETTING VALUE MANUALLY!", IMPORTANT) \
|
||||||
M(UInt64, http_max_request_param_data_size, 10_MiB, "Limit on size of request data used as a query parameter in predefined HTTP requests.", 0) \
|
M(UInt64, http_max_request_param_data_size, 10_MiB, "Limit on size of request data used as a query parameter in predefined HTTP requests.", 0) \
|
||||||
|
M(Bool, function_json_value_return_type_allow_nullable, false, "Allow function JSON_VALUE to return nullable type.", 0) \
|
||||||
|
M(Bool, function_json_value_return_type_allow_complex, false, "Allow function JSON_VALUE to return complex type, such as: struct, array, map.", 0) \
|
||||||
|
M(Bool, use_with_fill_by_sorting_prefix, true, "Columns preceding WITH FILL columns in ORDER BY clause from sorting prefix. Gaps for rows with the different values in sorting prefix will be filled independently", 0) \
|
||||||
|
\
|
||||||
|
/** Experimental functions */ \
|
||||||
|
M(Bool, allow_experimental_funnel_functions, false, "Enable experimental functions for funnel analysis.", 0) \
|
||||||
|
M(Bool, allow_experimental_nlp_functions, false, "Enable experimental functions for natural language processing.", 0) \
|
||||||
|
M(Bool, allow_experimental_hash_functions, false, "Enable experimental hash functions (hashid, etc)", 0) \
|
||||||
|
M(Bool, allow_experimental_object_type, false, "Allow Object and JSON data types", 0) \
|
||||||
|
M(Bool, allow_experimental_annoy_index, false, "Allows to use Annoy index. Disabled by default because this feature is experimental", 0) \
|
||||||
|
M(UInt64, max_limit_for_ann_queries, 1000000, "Maximum limit value for using ANN indexes is used to prevent memory overflow in search queries for indexes", 0) \
|
||||||
|
M(Bool, throw_on_unsupported_query_inside_transaction, true, "Throw exception if unsupported query is used inside transaction", 0) \
|
||||||
|
M(TransactionsWaitCSNMode, wait_changes_become_visible_after_commit_mode, TransactionsWaitCSNMode::WAIT_UNKNOWN, "Wait for committed changes to become actually visible in the latest snapshot", 0) \
|
||||||
|
M(Bool, implicit_transaction, false, "If enabled and not already inside a transaction, wraps the query inside a full transaction (begin + commit or rollback)", 0) \
|
||||||
|
M(UInt64, grace_hash_join_initial_buckets, 1, "Initial number of grace hash join buckets", 0) \
|
||||||
|
M(UInt64, grace_hash_join_max_buckets, 1024, "Limit on the number of grace hash join buckets", 0) \
|
||||||
|
M(Bool, optimize_distinct_in_order, false, "This optimization has a bug and it is disabled. Enable DISTINCT optimization if some columns in DISTINCT form a prefix of sorting. For example, prefix of sorting key in merge tree or ORDER BY statement", 0) \
|
||||||
M(Bool, allow_experimental_undrop_table_query, false, "Allow to use undrop query to restore dropped table in a limited time", 0) \
|
M(Bool, allow_experimental_undrop_table_query, false, "Allow to use undrop query to restore dropped table in a limited time", 0) \
|
||||||
M(Bool, keeper_map_strict_mode, false, "Enforce additional checks during operations on KeeperMap. E.g. throw an exception on an insert for already existing key", 0) \
|
M(Bool, keeper_map_strict_mode, false, "Enforce additional checks during operations on KeeperMap. E.g. throw an exception on an insert for already existing key", 0) \
|
||||||
M(Bool, function_json_value_return_type_allow_nullable, false, "Allow function to return nullable type.", 0) \
|
|
||||||
M(Bool, function_json_value_return_type_allow_complex, false, "Allow function to return complex type, such as: struct, array, map.", 0) \
|
|
||||||
// End of COMMON_SETTINGS
|
// End of COMMON_SETTINGS
|
||||||
// Please add settings related to formats into the FORMAT_FACTORY_SETTINGS and move obsolete settings to OBSOLETE_SETTINGS.
|
// Please add settings related to formats into the FORMAT_FACTORY_SETTINGS and move obsolete settings to OBSOLETE_SETTINGS.
|
||||||
|
|
||||||
|
@ -246,7 +246,8 @@ void SerializationInfoByName::writeJSON(WriteBuffer & out) const
|
|||||||
return writeString(oss.str(), out);
|
return writeString(oss.str(), out);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SerializationInfoByName::readJSON(ReadBuffer & in)
|
SerializationInfoByName SerializationInfoByName::readJSON(
|
||||||
|
const NamesAndTypesList & columns, const Settings & settings, ReadBuffer & in)
|
||||||
{
|
{
|
||||||
String json_str;
|
String json_str;
|
||||||
readString(json_str, in);
|
readString(json_str, in);
|
||||||
@ -262,8 +263,13 @@ void SerializationInfoByName::readJSON(ReadBuffer & in)
|
|||||||
"Unknown version of serialization infos ({}). Should be less or equal than {}",
|
"Unknown version of serialization infos ({}). Should be less or equal than {}",
|
||||||
object->getValue<size_t>(KEY_VERSION), SERIALIZATION_INFO_VERSION);
|
object->getValue<size_t>(KEY_VERSION), SERIALIZATION_INFO_VERSION);
|
||||||
|
|
||||||
|
SerializationInfoByName infos;
|
||||||
if (object->has(KEY_COLUMNS))
|
if (object->has(KEY_COLUMNS))
|
||||||
{
|
{
|
||||||
|
std::unordered_map<std::string_view, const IDataType *> column_type_by_name;
|
||||||
|
for (const auto & [name, type] : columns)
|
||||||
|
column_type_by_name.emplace(name, type.get());
|
||||||
|
|
||||||
auto array = object->getArray(KEY_COLUMNS);
|
auto array = object->getArray(KEY_COLUMNS);
|
||||||
for (const auto & elem : *array)
|
for (const auto & elem : *array)
|
||||||
{
|
{
|
||||||
@ -271,13 +277,22 @@ void SerializationInfoByName::readJSON(ReadBuffer & in)
|
|||||||
|
|
||||||
if (!elem_object->has(KEY_NAME))
|
if (!elem_object->has(KEY_NAME))
|
||||||
throw Exception(ErrorCodes::CORRUPTED_DATA,
|
throw Exception(ErrorCodes::CORRUPTED_DATA,
|
||||||
"Missed field '{}' in SerializationInfo of columns", KEY_NAME);
|
"Missed field '{}' in serialization infos", KEY_NAME);
|
||||||
|
|
||||||
auto name = elem_object->getValue<String>(KEY_NAME);
|
auto name = elem_object->getValue<String>(KEY_NAME);
|
||||||
if (auto it = find(name); it != end())
|
auto it = column_type_by_name.find(name);
|
||||||
it->second->fromJSON(*elem_object);
|
|
||||||
|
if (it == column_type_by_name.end())
|
||||||
|
throw Exception(ErrorCodes::CORRUPTED_DATA,
|
||||||
|
"Found unexpected column '{}' in serialization infos", name);
|
||||||
|
|
||||||
|
auto info = it->second->createSerializationInfo(settings);
|
||||||
|
info->fromJSON(*elem_object);
|
||||||
|
infos.emplace(name, std::move(info));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return infos;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -96,8 +96,10 @@ using MutableSerializationInfos = std::vector<MutableSerializationInfoPtr>;
|
|||||||
class SerializationInfoByName : public std::map<String, MutableSerializationInfoPtr>
|
class SerializationInfoByName : public std::map<String, MutableSerializationInfoPtr>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
using Settings = SerializationInfo::Settings;
|
||||||
|
|
||||||
SerializationInfoByName() = default;
|
SerializationInfoByName() = default;
|
||||||
SerializationInfoByName(const NamesAndTypesList & columns, const SerializationInfo::Settings & settings);
|
SerializationInfoByName(const NamesAndTypesList & columns, const Settings & settings);
|
||||||
|
|
||||||
void add(const Block & block);
|
void add(const Block & block);
|
||||||
void add(const SerializationInfoByName & other);
|
void add(const SerializationInfoByName & other);
|
||||||
@ -108,7 +110,9 @@ public:
|
|||||||
void replaceData(const SerializationInfoByName & other);
|
void replaceData(const SerializationInfoByName & other);
|
||||||
|
|
||||||
void writeJSON(WriteBuffer & out) const;
|
void writeJSON(WriteBuffer & out) const;
|
||||||
void readJSON(ReadBuffer & in);
|
|
||||||
|
static SerializationInfoByName readJSON(
|
||||||
|
const NamesAndTypesList & columns, const Settings & settings, ReadBuffer & in);
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,7 @@ QueryPlanOptimizationSettings QueryPlanOptimizationSettings::fromSettings(const
|
|||||||
settings.remove_redundant_sorting = from.query_plan_remove_redundant_sorting;
|
settings.remove_redundant_sorting = from.query_plan_remove_redundant_sorting;
|
||||||
settings.aggregate_partitions_independently = from.allow_aggregate_partitions_independently;
|
settings.aggregate_partitions_independently = from.allow_aggregate_partitions_independently;
|
||||||
settings.remove_redundant_distinct = from.query_plan_remove_redundant_distinct;
|
settings.remove_redundant_distinct = from.query_plan_remove_redundant_distinct;
|
||||||
settings.optimize_projection = from.allow_experimental_projection_optimization && from.query_plan_optimize_projection;
|
settings.optimize_projection = from.optimize_use_projections && from.query_plan_optimize_projection;
|
||||||
settings.force_use_projection = settings.optimize_projection && from.force_optimize_projection;
|
settings.force_use_projection = settings.optimize_projection && from.force_optimize_projection;
|
||||||
return settings;
|
return settings;
|
||||||
}
|
}
|
||||||
|
@ -170,7 +170,7 @@ void optimizeTreeSecondPass(const QueryPlanOptimizationSettings & optimization_s
|
|||||||
if (optimization_settings.force_use_projection && has_reading_from_mt && num_applied_projection == 0)
|
if (optimization_settings.force_use_projection && has_reading_from_mt && num_applied_projection == 0)
|
||||||
throw Exception(
|
throw Exception(
|
||||||
ErrorCodes::PROJECTION_NOT_USED,
|
ErrorCodes::PROJECTION_NOT_USED,
|
||||||
"No projection is used when allow_experimental_projection_optimization = 1 and force_optimize_projection = 1");
|
"No projection is used when optimize_use_projections = 1 and force_optimize_projection = 1");
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1341,11 +1341,11 @@ void IMergeTreeDataPart::loadColumns(bool require)
|
|||||||
.choose_kind = false,
|
.choose_kind = false,
|
||||||
};
|
};
|
||||||
|
|
||||||
SerializationInfoByName infos(loaded_columns, settings);
|
SerializationInfoByName infos;
|
||||||
if (metadata_manager->exists(SERIALIZATION_FILE_NAME))
|
if (metadata_manager->exists(SERIALIZATION_FILE_NAME))
|
||||||
{
|
{
|
||||||
auto in = metadata_manager->read(SERIALIZATION_FILE_NAME);
|
auto in = metadata_manager->read(SERIALIZATION_FILE_NAME);
|
||||||
infos.readJSON(*in);
|
infos = SerializationInfoByName::readJSON(loaded_columns, settings, *in);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t loaded_metadata_version;
|
int32_t loaded_metadata_version;
|
||||||
|
@ -326,6 +326,7 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare()
|
|||||||
if (!ctx->need_remove_expired_values)
|
if (!ctx->need_remove_expired_values)
|
||||||
{
|
{
|
||||||
size_t expired_columns = 0;
|
size_t expired_columns = 0;
|
||||||
|
auto part_serialization_infos = global_ctx->new_data_part->getSerializationInfos();
|
||||||
|
|
||||||
for (auto & [column_name, ttl] : global_ctx->new_data_part->ttl_infos.columns_ttl)
|
for (auto & [column_name, ttl] : global_ctx->new_data_part->ttl_infos.columns_ttl)
|
||||||
{
|
{
|
||||||
@ -335,6 +336,8 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare()
|
|||||||
LOG_TRACE(ctx->log, "Adding expired column {} for part {}", column_name, global_ctx->new_data_part->name);
|
LOG_TRACE(ctx->log, "Adding expired column {} for part {}", column_name, global_ctx->new_data_part->name);
|
||||||
std::erase(global_ctx->gathering_column_names, column_name);
|
std::erase(global_ctx->gathering_column_names, column_name);
|
||||||
std::erase(global_ctx->merging_column_names, column_name);
|
std::erase(global_ctx->merging_column_names, column_name);
|
||||||
|
std::erase(global_ctx->all_column_names, column_name);
|
||||||
|
part_serialization_infos.erase(column_name);
|
||||||
++expired_columns;
|
++expired_columns;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -343,6 +346,12 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare()
|
|||||||
{
|
{
|
||||||
global_ctx->gathering_columns = global_ctx->gathering_columns.filter(global_ctx->gathering_column_names);
|
global_ctx->gathering_columns = global_ctx->gathering_columns.filter(global_ctx->gathering_column_names);
|
||||||
global_ctx->merging_columns = global_ctx->merging_columns.filter(global_ctx->merging_column_names);
|
global_ctx->merging_columns = global_ctx->merging_columns.filter(global_ctx->merging_column_names);
|
||||||
|
global_ctx->storage_columns = global_ctx->storage_columns.filter(global_ctx->all_column_names);
|
||||||
|
|
||||||
|
global_ctx->new_data_part->setColumns(
|
||||||
|
global_ctx->storage_columns,
|
||||||
|
part_serialization_infos,
|
||||||
|
global_ctx->metadata_snapshot->getMetadataVersion());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6242,7 +6242,7 @@ bool MergeTreeData::mayBenefitFromIndexForIn(
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (query_settings.allow_experimental_projection_optimization)
|
if (query_settings.optimize_use_projections)
|
||||||
{
|
{
|
||||||
for (const auto & projection : metadata_snapshot->getProjections())
|
for (const auto & projection : metadata_snapshot->getProjections())
|
||||||
if (projection.isPrimaryKeyColumnPossiblyWrappedInFunctions(ast))
|
if (projection.isPrimaryKeyColumnPossiblyWrappedInFunctions(ast))
|
||||||
@ -6613,7 +6613,7 @@ std::optional<ProjectionCandidate> MergeTreeData::getQueryProcessingStageWithAgg
|
|||||||
if (!query_info.syntax_analyzer_result)
|
if (!query_info.syntax_analyzer_result)
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
|
|
||||||
if (!settings.allow_experimental_projection_optimization || query_info.ignore_projections || query_info.is_projection_query
|
if (!settings.optimize_use_projections || query_info.ignore_projections || query_info.is_projection_query
|
||||||
|| settings.aggregate_functions_null_for_empty /* projections don't work correctly with this setting */)
|
|| settings.aggregate_functions_null_for_empty /* projections don't work correctly with this setting */)
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
|
|
||||||
|
@ -175,10 +175,10 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read(
|
|||||||
query_info.merge_tree_select_result_ptr,
|
query_info.merge_tree_select_result_ptr,
|
||||||
enable_parallel_reading);
|
enable_parallel_reading);
|
||||||
|
|
||||||
if (!step && settings.allow_experimental_projection_optimization && settings.force_optimize_projection
|
if (!step && settings.optimize_use_projections && settings.force_optimize_projection
|
||||||
&& !metadata_for_reading->projections.empty() && !settings.query_plan_optimize_projection)
|
&& !metadata_for_reading->projections.empty() && !settings.query_plan_optimize_projection)
|
||||||
throw Exception(ErrorCodes::PROJECTION_NOT_USED,
|
throw Exception(ErrorCodes::PROJECTION_NOT_USED,
|
||||||
"No projection is used when allow_experimental_projection_optimization = 1 and force_optimize_projection = 1");
|
"No projection is used when optimize_use_projections = 1 and force_optimize_projection = 1");
|
||||||
|
|
||||||
auto plan = std::make_unique<QueryPlan>();
|
auto plan = std::make_unique<QueryPlan>();
|
||||||
if (step)
|
if (step)
|
||||||
|
@ -94,12 +94,13 @@ IMergeTreeDataPart::Checksums checkDataPart(
|
|||||||
};
|
};
|
||||||
|
|
||||||
auto ratio_of_defaults = data_part->storage.getSettings()->ratio_of_defaults_for_sparse_serialization;
|
auto ratio_of_defaults = data_part->storage.getSettings()->ratio_of_defaults_for_sparse_serialization;
|
||||||
SerializationInfoByName serialization_infos(columns_txt, SerializationInfo::Settings{ratio_of_defaults, false});
|
SerializationInfoByName serialization_infos;
|
||||||
|
|
||||||
if (data_part_storage.exists(IMergeTreeDataPart::SERIALIZATION_FILE_NAME))
|
if (data_part_storage.exists(IMergeTreeDataPart::SERIALIZATION_FILE_NAME))
|
||||||
{
|
{
|
||||||
auto serialization_file = data_part_storage.readFile(IMergeTreeDataPart::SERIALIZATION_FILE_NAME, {}, std::nullopt, std::nullopt);
|
auto serialization_file = data_part_storage.readFile(IMergeTreeDataPart::SERIALIZATION_FILE_NAME, {}, std::nullopt, std::nullopt);
|
||||||
serialization_infos.readJSON(*serialization_file);
|
SerializationInfo::Settings settings{ratio_of_defaults, false};
|
||||||
|
serialization_infos = SerializationInfoByName::readJSON(columns_txt, settings, *serialization_file);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto get_serialization = [&serialization_infos](const auto & column)
|
auto get_serialization = [&serialization_infos](const auto & column)
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
<test>
|
<test>
|
||||||
<settings>
|
<settings>
|
||||||
<max_insert_threads>8</max_insert_threads>
|
<max_insert_threads>8</max_insert_threads>
|
||||||
<allow_experimental_projection_optimization>0</allow_experimental_projection_optimization>
|
<optimize_use_projections>0</optimize_use_projections>
|
||||||
</settings>
|
</settings>
|
||||||
|
|
||||||
<create_query>
|
<create_query>
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
<test>
|
<test>
|
||||||
<settings>
|
<settings>
|
||||||
<max_insert_threads>8</max_insert_threads>
|
<max_insert_threads>8</max_insert_threads>
|
||||||
<allow_experimental_projection_optimization>0</allow_experimental_projection_optimization>
|
<optimize_use_projections>0</optimize_use_projections>
|
||||||
</settings>
|
</settings>
|
||||||
|
|
||||||
<substitutions>
|
<substitutions>
|
||||||
|
@ -122,7 +122,7 @@ create table pl (dt DateTime, i int, projection p (select sum(i) group by toStar
|
|||||||
insert into pl values ('2020-10-24', 1);
|
insert into pl values ('2020-10-24', 1);
|
||||||
|
|
||||||
set max_rows_to_read = 2;
|
set max_rows_to_read = 2;
|
||||||
select sum(i) from pd group by dt_m settings allow_experimental_projection_optimization = 1, force_optimize_projection = 1;
|
select sum(i) from pd group by dt_m settings optimize_use_projections = 1, force_optimize_projection = 1;
|
||||||
|
|
||||||
drop table pd;
|
drop table pd;
|
||||||
drop table pl;
|
drop table pl;
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
set allow_experimental_projection_optimization = 1, force_optimize_projection = 1;
|
set optimize_use_projections = 1, force_optimize_projection = 1;
|
||||||
|
|
||||||
drop table if exists tp;
|
drop table if exists tp;
|
||||||
|
|
||||||
|
@ -8,22 +8,22 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|||||||
$CLICKHOUSE_CLIENT -q "CREATE TABLE test_agg_proj (x Int32, y Int32, PROJECTION x_plus_y (SELECT sum(x - y), argMax(x, y) group by x + y)) ENGINE = MergeTree ORDER BY tuple() settings index_granularity = 1"
|
$CLICKHOUSE_CLIENT -q "CREATE TABLE test_agg_proj (x Int32, y Int32, PROJECTION x_plus_y (SELECT sum(x - y), argMax(x, y) group by x + y)) ENGINE = MergeTree ORDER BY tuple() settings index_granularity = 1"
|
||||||
$CLICKHOUSE_CLIENT -q "insert into test_agg_proj select intDiv(number, 2), -intDiv(number,3) - 1 from numbers(100)"
|
$CLICKHOUSE_CLIENT -q "insert into test_agg_proj select intDiv(number, 2), -intDiv(number,3) - 1 from numbers(100)"
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT -q "select x + y, sum(x - y) as s from test_agg_proj group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1"
|
$CLICKHOUSE_CLIENT -q "select x + y, sum(x - y) as s from test_agg_proj group by x + y order by s desc limit 5 settings optimize_use_projections=1"
|
||||||
$CLICKHOUSE_CLIENT -q "select x + y, sum(x - y) as s from test_agg_proj group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1 format JSON" | grep "rows_read"
|
$CLICKHOUSE_CLIENT -q "select x + y, sum(x - y) as s from test_agg_proj group by x + y order by s desc limit 5 settings optimize_use_projections=1 format JSON" | grep "rows_read"
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT -q "select (x + y) * 2, sum(x - y) * 2 as s from test_agg_proj group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1"
|
$CLICKHOUSE_CLIENT -q "select (x + y) * 2, sum(x - y) * 2 as s from test_agg_proj group by x + y order by s desc limit 5 settings optimize_use_projections=1"
|
||||||
$CLICKHOUSE_CLIENT -q "select (x + y) * 2, sum(x - y) * 2 as s from test_agg_proj group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1 format JSON" | grep "rows_read"
|
$CLICKHOUSE_CLIENT -q "select (x + y) * 2, sum(x - y) * 2 as s from test_agg_proj group by x + y order by s desc limit 5 settings optimize_use_projections=1 format JSON" | grep "rows_read"
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT -q "select intDiv(x + y, 2) as v, intDiv(x + y, 3), sum(x - y) as s from test_agg_proj group by intDiv(x + y, 2), intDiv(x + y, 3) order by s desc, v limit 5 settings allow_experimental_projection_optimization=1"
|
$CLICKHOUSE_CLIENT -q "select intDiv(x + y, 2) as v, intDiv(x + y, 3), sum(x - y) as s from test_agg_proj group by intDiv(x + y, 2), intDiv(x + y, 3) order by s desc, v limit 5 settings optimize_use_projections=1"
|
||||||
$CLICKHOUSE_CLIENT -q "select intDiv(x + y, 2) as v, intDiv(x + y, 3), sum(x - y) as s from test_agg_proj group by intDiv(x + y, 2), intDiv(x + y, 3) order by s desc, v limit 5 settings allow_experimental_projection_optimization=1 format JSON" | grep "rows_read"
|
$CLICKHOUSE_CLIENT -q "select intDiv(x + y, 2) as v, intDiv(x + y, 3), sum(x - y) as s from test_agg_proj group by intDiv(x + y, 2), intDiv(x + y, 3) order by s desc, v limit 5 settings optimize_use_projections=1 format JSON" | grep "rows_read"
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT -q "select x + y + 1, argMax(x, y) * sum(x - y) as s from test_agg_proj group by x + y + 1 order by s desc limit 5 settings allow_experimental_projection_optimization=1"
|
$CLICKHOUSE_CLIENT -q "select x + y + 1, argMax(x, y) * sum(x - y) as s from test_agg_proj group by x + y + 1 order by s desc limit 5 settings optimize_use_projections=1"
|
||||||
$CLICKHOUSE_CLIENT -q "select x + y + 1, argMax(x, y) * sum(x - y) as s from test_agg_proj group by x + y + 1 order by s desc limit 5 settings allow_experimental_projection_optimization=1 format JSON" | grep "rows_read"
|
$CLICKHOUSE_CLIENT -q "select x + y + 1, argMax(x, y) * sum(x - y) as s from test_agg_proj group by x + y + 1 order by s desc limit 5 settings optimize_use_projections=1 format JSON" | grep "rows_read"
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT -q "select x + y + 1, argMax(y, x), sum(x - y) as s from test_agg_proj group by x + y + 1 order by s desc limit 5 settings allow_experimental_projection_optimization=1"
|
$CLICKHOUSE_CLIENT -q "select x + y + 1, argMax(y, x), sum(x - y) as s from test_agg_proj group by x + y + 1 order by s desc limit 5 settings optimize_use_projections=1"
|
||||||
$CLICKHOUSE_CLIENT -q "select x + y + 1, argMax(y, x), sum(x - y) as s from test_agg_proj group by x + y + 1 order by s desc limit 5 settings allow_experimental_projection_optimization=1 format JSON" | grep "rows_read"
|
$CLICKHOUSE_CLIENT -q "select x + y + 1, argMax(y, x), sum(x - y) as s from test_agg_proj group by x + y + 1 order by s desc limit 5 settings optimize_use_projections=1 format JSON" | grep "rows_read"
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT -q "select x + y, sum(x - y) as s from test_agg_proj prewhere (x + y) % 2 = 1 group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1"
|
$CLICKHOUSE_CLIENT -q "select x + y, sum(x - y) as s from test_agg_proj prewhere (x + y) % 2 = 1 group by x + y order by s desc limit 5 settings optimize_use_projections=1"
|
||||||
$CLICKHOUSE_CLIENT -q "select x + y, sum(x - y) as s from test_agg_proj prewhere (x + y) % 2 = 1 group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1 format JSON" | grep "rows_read"
|
$CLICKHOUSE_CLIENT -q "select x + y, sum(x - y) as s from test_agg_proj prewhere (x + y) % 2 = 1 group by x + y order by s desc limit 5 settings optimize_use_projections=1 format JSON" | grep "rows_read"
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT -q "drop table test_agg_proj"
|
$CLICKHOUSE_CLIENT -q "drop table test_agg_proj"
|
||||||
|
@ -2,7 +2,7 @@ drop table if exists tp;
|
|||||||
|
|
||||||
create table tp (d1 Int32, d2 Int32, eventcnt Int64, projection p (select sum(eventcnt) group by d1)) engine = MergeTree order by (d1, d2);
|
create table tp (d1 Int32, d2 Int32, eventcnt Int64, projection p (select sum(eventcnt) group by d1)) engine = MergeTree order by (d1, d2);
|
||||||
|
|
||||||
set allow_experimental_projection_optimization = 1, force_optimize_projection = 1;
|
set optimize_use_projections = 1, force_optimize_projection = 1;
|
||||||
|
|
||||||
select sum(eventcnt) eventcnt, d1 from tp group by d1;
|
select sum(eventcnt) eventcnt, d1 from tp group by d1;
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ create table d (i int, j int) engine MergeTree partition by i % 2 order by tuple
|
|||||||
|
|
||||||
insert into d select number, number from numbers(10000);
|
insert into d select number, number from numbers(10000);
|
||||||
|
|
||||||
set max_rows_to_read = 2, allow_experimental_projection_optimization = 1;
|
set max_rows_to_read = 2, optimize_use_projections = 1;
|
||||||
|
|
||||||
select min(i), max(i), count() from d;
|
select min(i), max(i), count() from d;
|
||||||
select min(i), max(i), count() from d group by _partition_id order by _partition_id;
|
select min(i), max(i), count() from d group by _partition_id order by _partition_id;
|
||||||
|
@ -9,7 +9,7 @@ alter table t add projection x (select * order by j);
|
|||||||
insert into t values (1, 4);
|
insert into t values (1, 4);
|
||||||
insert into t values (1, 5);
|
insert into t values (1, 5);
|
||||||
|
|
||||||
set allow_experimental_projection_optimization = 1, force_optimize_projection = 1;
|
set optimize_use_projections = 1, force_optimize_projection = 1;
|
||||||
|
|
||||||
select i from t prewhere j = 4;
|
select i from t prewhere j = 4;
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
select where x < 10
|
select where x < 10
|
||||||
optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0
|
optimize_move_to_prewhere = 0, optimize_use_projections = 0
|
||||||
0 4294967295
|
0 4294967295
|
||||||
1 4294967294
|
1 4294967294
|
||||||
2 4294967293
|
2 4294967293
|
||||||
@ -10,7 +10,7 @@ optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0
|
|||||||
7 4294967288
|
7 4294967288
|
||||||
8 4294967287
|
8 4294967287
|
||||||
9 4294967286
|
9 4294967286
|
||||||
optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1
|
optimize_move_to_prewhere = 0, optimize_use_projections = 1
|
||||||
0 4294967295
|
0 4294967295
|
||||||
1 4294967294
|
1 4294967294
|
||||||
2 4294967293
|
2 4294967293
|
||||||
@ -21,7 +21,7 @@ optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1
|
|||||||
7 4294967288
|
7 4294967288
|
||||||
8 4294967287
|
8 4294967287
|
||||||
9 4294967286
|
9 4294967286
|
||||||
optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0
|
optimize_move_to_prewhere = 1, optimize_use_projections = 0
|
||||||
0 4294967295
|
0 4294967295
|
||||||
1 4294967294
|
1 4294967294
|
||||||
2 4294967293
|
2 4294967293
|
||||||
@ -32,7 +32,7 @@ optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0
|
|||||||
7 4294967288
|
7 4294967288
|
||||||
8 4294967287
|
8 4294967287
|
||||||
9 4294967286
|
9 4294967286
|
||||||
optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1
|
optimize_move_to_prewhere = 1, optimize_use_projections = 1
|
||||||
0 4294967295
|
0 4294967295
|
||||||
1 4294967294
|
1 4294967294
|
||||||
2 4294967293
|
2 4294967293
|
||||||
@ -43,16 +43,16 @@ optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1
|
|||||||
7 4294967288
|
7 4294967288
|
||||||
8 4294967287
|
8 4294967287
|
||||||
9 4294967286
|
9 4294967286
|
||||||
optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0
|
optimize_move_to_prewhere = 0, optimize_use_projections = 0
|
||||||
"rows_read": 100,
|
"rows_read": 100,
|
||||||
optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1
|
optimize_move_to_prewhere = 0, optimize_use_projections = 1
|
||||||
"rows_read": 100,
|
"rows_read": 100,
|
||||||
optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0
|
optimize_move_to_prewhere = 1, optimize_use_projections = 0
|
||||||
"rows_read": 100,
|
"rows_read": 100,
|
||||||
optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1
|
optimize_move_to_prewhere = 1, optimize_use_projections = 1
|
||||||
"rows_read": 100,
|
"rows_read": 100,
|
||||||
select where y > 4294967286
|
select where y > 4294967286
|
||||||
optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0
|
optimize_move_to_prewhere = 0, optimize_use_projections = 0
|
||||||
0 4294967295
|
0 4294967295
|
||||||
1 4294967294
|
1 4294967294
|
||||||
2 4294967293
|
2 4294967293
|
||||||
@ -62,7 +62,7 @@ optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0
|
|||||||
6 4294967289
|
6 4294967289
|
||||||
7 4294967288
|
7 4294967288
|
||||||
8 4294967287
|
8 4294967287
|
||||||
optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1
|
optimize_move_to_prewhere = 0, optimize_use_projections = 1
|
||||||
0 4294967295
|
0 4294967295
|
||||||
1 4294967294
|
1 4294967294
|
||||||
2 4294967293
|
2 4294967293
|
||||||
@ -72,7 +72,7 @@ optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1
|
|||||||
6 4294967289
|
6 4294967289
|
||||||
7 4294967288
|
7 4294967288
|
||||||
8 4294967287
|
8 4294967287
|
||||||
optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0
|
optimize_move_to_prewhere = 1, optimize_use_projections = 0
|
||||||
0 4294967295
|
0 4294967295
|
||||||
1 4294967294
|
1 4294967294
|
||||||
2 4294967293
|
2 4294967293
|
||||||
@ -82,7 +82,7 @@ optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0
|
|||||||
6 4294967289
|
6 4294967289
|
||||||
7 4294967288
|
7 4294967288
|
||||||
8 4294967287
|
8 4294967287
|
||||||
optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1
|
optimize_move_to_prewhere = 1, optimize_use_projections = 1
|
||||||
0 4294967295
|
0 4294967295
|
||||||
1 4294967294
|
1 4294967294
|
||||||
2 4294967293
|
2 4294967293
|
||||||
@ -92,12 +92,12 @@ optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1
|
|||||||
6 4294967289
|
6 4294967289
|
||||||
7 4294967288
|
7 4294967288
|
||||||
8 4294967287
|
8 4294967287
|
||||||
optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0
|
optimize_move_to_prewhere = 0, optimize_use_projections = 0
|
||||||
"rows_read": 100,
|
"rows_read": 100,
|
||||||
optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1
|
optimize_move_to_prewhere = 0, optimize_use_projections = 1
|
||||||
"rows_read": 100,
|
"rows_read": 100,
|
||||||
optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0
|
optimize_move_to_prewhere = 1, optimize_use_projections = 0
|
||||||
"rows_read": 100,
|
"rows_read": 100,
|
||||||
optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1
|
optimize_move_to_prewhere = 1, optimize_use_projections = 1
|
||||||
"rows_read": 100,
|
"rows_read": 100,
|
||||||
50
|
50
|
||||||
|
@ -9,73 +9,73 @@ $CLICKHOUSE_CLIENT -q "insert into test_sort_proj select number, toUInt32(-numbe
|
|||||||
|
|
||||||
echo "select where x < 10"
|
echo "select where x < 10"
|
||||||
|
|
||||||
echo "optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0"
|
echo "optimize_move_to_prewhere = 0, optimize_use_projections = 0"
|
||||||
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x
|
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x
|
||||||
SETTINGS optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0"
|
SETTINGS optimize_move_to_prewhere = 0, optimize_use_projections = 0"
|
||||||
|
|
||||||
echo "optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1"
|
echo "optimize_move_to_prewhere = 0, optimize_use_projections = 1"
|
||||||
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x
|
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x
|
||||||
SETTINGS optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1"
|
SETTINGS optimize_move_to_prewhere = 0, optimize_use_projections = 1"
|
||||||
|
|
||||||
echo "optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0"
|
echo "optimize_move_to_prewhere = 1, optimize_use_projections = 0"
|
||||||
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x
|
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x
|
||||||
SETTINGS optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0"
|
SETTINGS optimize_move_to_prewhere = 1, optimize_use_projections = 0"
|
||||||
|
|
||||||
echo "optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1"
|
echo "optimize_move_to_prewhere = 1, optimize_use_projections = 1"
|
||||||
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x
|
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x
|
||||||
SETTINGS optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1"
|
SETTINGS optimize_move_to_prewhere = 1, optimize_use_projections = 1"
|
||||||
|
|
||||||
echo "optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0"
|
echo "optimize_move_to_prewhere = 0, optimize_use_projections = 0"
|
||||||
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x FORMAT JSON
|
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x FORMAT JSON
|
||||||
SETTINGS optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0" | grep rows_read
|
SETTINGS optimize_move_to_prewhere = 0, optimize_use_projections = 0" | grep rows_read
|
||||||
|
|
||||||
echo "optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1"
|
echo "optimize_move_to_prewhere = 0, optimize_use_projections = 1"
|
||||||
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x FORMAT JSON
|
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x FORMAT JSON
|
||||||
SETTINGS optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1" | grep rows_read
|
SETTINGS optimize_move_to_prewhere = 0, optimize_use_projections = 1" | grep rows_read
|
||||||
|
|
||||||
echo "optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0"
|
echo "optimize_move_to_prewhere = 1, optimize_use_projections = 0"
|
||||||
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x FORMAT JSON
|
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x FORMAT JSON
|
||||||
SETTINGS optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0" | grep rows_read
|
SETTINGS optimize_move_to_prewhere = 1, optimize_use_projections = 0" | grep rows_read
|
||||||
|
|
||||||
echo "optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1"
|
echo "optimize_move_to_prewhere = 1, optimize_use_projections = 1"
|
||||||
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x FORMAT JSON
|
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE x < 10 order by x FORMAT JSON
|
||||||
SETTINGS optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1" | grep rows_read
|
SETTINGS optimize_move_to_prewhere = 1, optimize_use_projections = 1" | grep rows_read
|
||||||
|
|
||||||
|
|
||||||
echo "select where y > 4294967286"
|
echo "select where y > 4294967286"
|
||||||
|
|
||||||
echo "optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0"
|
echo "optimize_move_to_prewhere = 0, optimize_use_projections = 0"
|
||||||
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x
|
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x
|
||||||
SETTINGS optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0"
|
SETTINGS optimize_move_to_prewhere = 0, optimize_use_projections = 0"
|
||||||
|
|
||||||
echo "optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1"
|
echo "optimize_move_to_prewhere = 0, optimize_use_projections = 1"
|
||||||
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x
|
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x
|
||||||
SETTINGS optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1"
|
SETTINGS optimize_move_to_prewhere = 0, optimize_use_projections = 1"
|
||||||
|
|
||||||
echo "optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0"
|
echo "optimize_move_to_prewhere = 1, optimize_use_projections = 0"
|
||||||
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x
|
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x
|
||||||
SETTINGS optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0"
|
SETTINGS optimize_move_to_prewhere = 1, optimize_use_projections = 0"
|
||||||
|
|
||||||
echo "optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1"
|
echo "optimize_move_to_prewhere = 1, optimize_use_projections = 1"
|
||||||
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x
|
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x
|
||||||
SETTINGS optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1"
|
SETTINGS optimize_move_to_prewhere = 1, optimize_use_projections = 1"
|
||||||
|
|
||||||
|
|
||||||
echo "optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0"
|
echo "optimize_move_to_prewhere = 0, optimize_use_projections = 0"
|
||||||
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x FORMAT JSON
|
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x FORMAT JSON
|
||||||
SETTINGS optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 0" | grep rows_read
|
SETTINGS optimize_move_to_prewhere = 0, optimize_use_projections = 0" | grep rows_read
|
||||||
|
|
||||||
echo "optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1"
|
echo "optimize_move_to_prewhere = 0, optimize_use_projections = 1"
|
||||||
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x FORMAT JSON
|
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x FORMAT JSON
|
||||||
SETTINGS optimize_move_to_prewhere = 0, allow_experimental_projection_optimization = 1" | grep rows_read
|
SETTINGS optimize_move_to_prewhere = 0, optimize_use_projections = 1" | grep rows_read
|
||||||
|
|
||||||
echo "optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0"
|
echo "optimize_move_to_prewhere = 1, optimize_use_projections = 0"
|
||||||
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x FORMAT JSON
|
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x FORMAT JSON
|
||||||
SETTINGS optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 0" | grep rows_read
|
SETTINGS optimize_move_to_prewhere = 1, optimize_use_projections = 0" | grep rows_read
|
||||||
|
|
||||||
echo "optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1"
|
echo "optimize_move_to_prewhere = 1, optimize_use_projections = 1"
|
||||||
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x FORMAT JSON
|
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_sort_proj WHERE y > 4294967286 order by x FORMAT JSON
|
||||||
SETTINGS optimize_move_to_prewhere = 1, allow_experimental_projection_optimization = 1" | grep rows_read
|
SETTINGS optimize_move_to_prewhere = 1, optimize_use_projections = 1" | grep rows_read
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT -q "ALTER TABLE test_sort_proj DELETE WHERE x % 2 = 0 SETTINGS mutations_sync=2;"
|
$CLICKHOUSE_CLIENT -q "ALTER TABLE test_sort_proj DELETE WHERE x % 2 = 0 SETTINGS mutations_sync=2;"
|
||||||
$CLICKHOUSE_CLIENT -q "SELECT count() from test_sort_proj;"
|
$CLICKHOUSE_CLIENT -q "SELECT count() from test_sort_proj;"
|
||||||
|
@ -28,7 +28,7 @@ INSERT INTO normal SELECT
|
|||||||
number
|
number
|
||||||
FROM numbers(100000);
|
FROM numbers(100000);
|
||||||
|
|
||||||
SET allow_experimental_projection_optimization=1, optimize_aggregation_in_order=1, force_optimize_projection=1;
|
SET optimize_use_projections=1, optimize_aggregation_in_order=1, force_optimize_projection=1;
|
||||||
|
|
||||||
WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM normal WHERE ts > '2021-12-06 22:00:00' GROUP BY a ORDER BY v LIMIT 5;
|
WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM normal WHERE ts > '2021-12-06 22:00:00' GROUP BY a ORDER BY v LIMIT 5;
|
||||||
WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM normal WHERE ts > '2021-12-06 22:00:00' GROUP BY toStartOfHour(ts), a ORDER BY v LIMIT 5;
|
WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM normal WHERE ts > '2021-12-06 22:00:00' GROUP BY toStartOfHour(ts), a ORDER BY v LIMIT 5;
|
||||||
@ -60,7 +60,7 @@ INSERT INTO agg SELECT
|
|||||||
number
|
number
|
||||||
FROM numbers(100000);
|
FROM numbers(100000);
|
||||||
|
|
||||||
SET allow_experimental_projection_optimization=1, optimize_aggregation_in_order=1, force_optimize_projection = 1;
|
SET optimize_use_projections=1, optimize_aggregation_in_order=1, force_optimize_projection = 1;
|
||||||
|
|
||||||
WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM agg WHERE ts > '2021-12-06 22:00:00' GROUP BY a ORDER BY v LIMIT 5;
|
WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM agg WHERE ts > '2021-12-06 22:00:00' GROUP BY a ORDER BY v LIMIT 5;
|
||||||
WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM agg WHERE ts > '2021-12-06 22:00:00' GROUP BY toStartOfHour(ts), a ORDER BY v LIMIT 5;
|
WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM agg WHERE ts > '2021-12-06 22:00:00' GROUP BY toStartOfHour(ts), a ORDER BY v LIMIT 5;
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
set allow_experimental_projection_optimization = 1;
|
set optimize_use_projections = 1;
|
||||||
|
|
||||||
drop table if exists x;
|
drop table if exists x;
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
set allow_experimental_projection_optimization = 1;
|
set optimize_use_projections = 1;
|
||||||
|
|
||||||
drop table if exists t;
|
drop table if exists t;
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ create table t (i int, j int, k int, projection p (select * order by j)) engine
|
|||||||
|
|
||||||
insert into t select number, number, number from numbers(10);
|
insert into t select number, number, number from numbers(10);
|
||||||
|
|
||||||
set allow_experimental_projection_optimization = 1, max_rows_to_read = 3;
|
set optimize_use_projections = 1, max_rows_to_read = 3;
|
||||||
|
|
||||||
select * from t where i < 5 and j in (1, 2);
|
select * from t where i < 5 and j in (1, 2);
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@ create table x (i UInt64, j UInt64, k UInt64, projection agg (select sum(j), avg
|
|||||||
|
|
||||||
insert into x values (1, 2, 3);
|
insert into x values (1, 2, 3);
|
||||||
|
|
||||||
set allow_experimental_projection_optimization = 1, use_index_for_in_with_subqueries = 0;
|
set optimize_use_projections = 1, use_index_for_in_with_subqueries = 0;
|
||||||
|
|
||||||
select sum(j), avg(k) from x where i in (select number from numbers(4));
|
select sum(j), avg(k) from x where i in (select number from numbers(4));
|
||||||
|
|
||||||
|
@ -4,6 +4,6 @@ CREATE TABLE t (`key` UInt32, `created_at` Date, `value` UInt32, PROJECTION xxx
|
|||||||
|
|
||||||
INSERT INTO t SELECT 1 AS key, today() + (number % 30), number FROM numbers(1000);
|
INSERT INTO t SELECT 1 AS key, today() + (number % 30), number FROM numbers(1000);
|
||||||
|
|
||||||
ALTER TABLE t UPDATE value = 0 WHERE (value > 0) AND (created_at >= '2021-12-21') SETTINGS allow_experimental_projection_optimization = 1;
|
ALTER TABLE t UPDATE value = 0 WHERE (value > 0) AND (created_at >= '2021-12-21') SETTINGS optimize_use_projections = 1;
|
||||||
|
|
||||||
DROP TABLE IF EXISTS t;
|
DROP TABLE IF EXISTS t;
|
||||||
|
@ -12,7 +12,7 @@ optimize table t final;
|
|||||||
|
|
||||||
alter table t materialize projection p_norm settings mutations_sync = 1;
|
alter table t materialize projection p_norm settings mutations_sync = 1;
|
||||||
|
|
||||||
set allow_experimental_projection_optimization = 1, max_rows_to_read = 3;
|
set optimize_use_projections = 1, max_rows_to_read = 3;
|
||||||
|
|
||||||
select c18 from t where c1 < 0;
|
select c18 from t where c1 < 0;
|
||||||
|
|
||||||
|
@ -2,20 +2,20 @@ drop table if exists t;
|
|||||||
|
|
||||||
create table t (s UInt16, l UInt16, projection p (select s, l order by l)) engine MergeTree order by s;
|
create table t (s UInt16, l UInt16, projection p (select s, l order by l)) engine MergeTree order by s;
|
||||||
|
|
||||||
select s from t join (select toUInt16(1) as s) x using (s) order by s settings allow_experimental_projection_optimization = 1;
|
select s from t join (select toUInt16(1) as s) x using (s) order by s settings optimize_use_projections = 1;
|
||||||
select s from t join (select toUInt16(1) as s) x using (s) order by s settings allow_experimental_projection_optimization = 0;
|
select s from t join (select toUInt16(1) as s) x using (s) order by s settings optimize_use_projections = 0;
|
||||||
|
|
||||||
drop table t;
|
drop table t;
|
||||||
|
|
||||||
drop table if exists mt;
|
drop table if exists mt;
|
||||||
create table mt (id1 Int8, id2 Int8) Engine=MergeTree order by tuple();
|
create table mt (id1 Int8, id2 Int8) Engine=MergeTree order by tuple();
|
||||||
select alias1 from (select id1, id1 as alias1 from mt) as l all inner join (select id2 as alias1 from mt) as t using (alias1) order by l.id1 settings allow_experimental_projection_optimization = 1;
|
select alias1 from (select id1, id1 as alias1 from mt) as l all inner join (select id2 as alias1 from mt) as t using (alias1) order by l.id1 settings optimize_use_projections = 1;
|
||||||
select id1 from mt all inner join (select id2 as id1 from mt) as t using (id1) order by id1 settings allow_experimental_projection_optimization = 1;
|
select id1 from mt all inner join (select id2 as id1 from mt) as t using (id1) order by id1 settings optimize_use_projections = 1;
|
||||||
select id2 as id1 from mt all inner join (select id1 from mt) as t using (id1) order by id1 settings allow_experimental_projection_optimization = 1;
|
select id2 as id1 from mt all inner join (select id1 from mt) as t using (id1) order by id1 settings optimize_use_projections = 1;
|
||||||
drop table mt;
|
drop table mt;
|
||||||
|
|
||||||
drop table if exists j;
|
drop table if exists j;
|
||||||
create table j (id1 Int8, id2 Int8, projection p (select id1, id2 order by id2)) Engine=MergeTree order by id1 settings index_granularity = 1;
|
create table j (id1 Int8, id2 Int8, projection p (select id1, id2 order by id2)) Engine=MergeTree order by id1 settings index_granularity = 1;
|
||||||
insert into j select number, number from numbers(10);
|
insert into j select number, number from numbers(10);
|
||||||
select alias1 from (select id1, id1 as alias1 from j) as l all inner join (select id2, id2 as alias1 from j where id2 in (1, 2, 3)) as t using (alias1) where id2 in (2, 3, 4) order by id1 settings allow_experimental_projection_optimization = 1;
|
select alias1 from (select id1, id1 as alias1 from j) as l all inner join (select id2, id2 as alias1 from j where id2 in (1, 2, 3)) as t using (alias1) where id2 in (2, 3, 4) order by id1 settings optimize_use_projections = 1;
|
||||||
drop table j;
|
drop table j;
|
||||||
|
@ -4,6 +4,6 @@ create table t (x UInt32) engine = MergeTree order by tuple() settings index_gra
|
|||||||
insert into t select number from numbers(100);
|
insert into t select number from numbers(100);
|
||||||
alter table t add projection p (select uniqHLL12(x));
|
alter table t add projection p (select uniqHLL12(x));
|
||||||
insert into t select number + 100 from numbers(100);
|
insert into t select number + 100 from numbers(100);
|
||||||
select uniqHLL12(x) from t settings allow_experimental_projection_optimization = 1, max_bytes_to_read=400, max_block_size=8; -- { serverError 307 }
|
select uniqHLL12(x) from t settings optimize_use_projections = 1, max_bytes_to_read=400, max_block_size=8; -- { serverError 307 }
|
||||||
|
|
||||||
drop table if exists t;
|
drop table if exists t;
|
||||||
|
@ -4,7 +4,7 @@ create table projection_test (`sum(block_count)` UInt64, domain_alias UInt64 ali
|
|||||||
|
|
||||||
insert into projection_test with rowNumberInAllBlocks() as id select 1, toDateTime('2020-10-24 00:00:00') + (id / 20), toString(id % 100), * from generateRandom('x_id String, y_id String, block_count Int64, retry_count Int64, duration Int64, kbytes Int64, buffer_time Int64, first_time Int64, total_bytes Nullable(UInt64), valid_bytes Nullable(UInt64), completed_bytes Nullable(UInt64), fixed_bytes Nullable(UInt64), force_bytes Nullable(UInt64)', 10, 10, 1) limit 1000 settings max_threads = 1;
|
insert into projection_test with rowNumberInAllBlocks() as id select 1, toDateTime('2020-10-24 00:00:00') + (id / 20), toString(id % 100), * from generateRandom('x_id String, y_id String, block_count Int64, retry_count Int64, duration Int64, kbytes Int64, buffer_time Int64, first_time Int64, total_bytes Nullable(UInt64), valid_bytes Nullable(UInt64), completed_bytes Nullable(UInt64), fixed_bytes Nullable(UInt64), force_bytes Nullable(UInt64)', 10, 10, 1) limit 1000 settings max_threads = 1;
|
||||||
|
|
||||||
set allow_experimental_projection_optimization = 1, force_optimize_projection = 1;
|
set optimize_use_projections = 1, force_optimize_projection = 1;
|
||||||
|
|
||||||
select * from projection_test; -- { serverError 584 }
|
select * from projection_test; -- { serverError 584 }
|
||||||
select toStartOfMinute(datetime) dt_m, countIf(first_time = 0) from projection_test join (select 1) x on 1 where domain = '1' group by dt_m order by dt_m; -- { serverError 584 }
|
select toStartOfMinute(datetime) dt_m, countIf(first_time = 0) from projection_test join (select 1) x on 1 where domain = '1' group by dt_m order by dt_m; -- { serverError 584 }
|
||||||
@ -47,6 +47,6 @@ drop table if exists projection_test;
|
|||||||
drop table if exists projection_without_key;
|
drop table if exists projection_without_key;
|
||||||
create table projection_without_key (key UInt32, PROJECTION x (SELECT max(key))) engine MergeTree order by key;
|
create table projection_without_key (key UInt32, PROJECTION x (SELECT max(key))) engine MergeTree order by key;
|
||||||
insert into projection_without_key select number from numbers(1000);
|
insert into projection_without_key select number from numbers(1000);
|
||||||
set force_optimize_projection = 1, allow_experimental_projection_optimization = 1;
|
set force_optimize_projection = 1, optimize_use_projections = 1;
|
||||||
select max(key) from projection_without_key;
|
select max(key) from projection_without_key;
|
||||||
drop table projection_without_key;
|
drop table projection_without_key;
|
||||||
|
@ -2,7 +2,7 @@ drop table if exists projection_without_key;
|
|||||||
|
|
||||||
create table projection_without_key (key UInt32, PROJECTION x (SELECT sum(key) group by key % 3)) engine MergeTree order by key;
|
create table projection_without_key (key UInt32, PROJECTION x (SELECT sum(key) group by key % 3)) engine MergeTree order by key;
|
||||||
insert into projection_without_key select number from numbers(1000);
|
insert into projection_without_key select number from numbers(1000);
|
||||||
select sum(key) from projection_without_key settings allow_experimental_projection_optimization = 1;
|
select sum(key) from projection_without_key settings optimize_use_projections = 1;
|
||||||
select sum(key) from projection_without_key settings allow_experimental_projection_optimization = 0;
|
select sum(key) from projection_without_key settings optimize_use_projections = 0;
|
||||||
|
|
||||||
drop table projection_without_key;
|
drop table projection_without_key;
|
||||||
|
@ -8,7 +8,7 @@ create table projection_test (dt DateTime, cost Int64, projection p (select toSt
|
|||||||
|
|
||||||
insert into projection_test with rowNumberInAllBlocks() as id select toDateTime('2020-10-24 00:00:00') + (id / 20), * from generateRandom('cost Int64', 10, 10, 1) limit 1000 settings max_threads = 1;
|
insert into projection_test with rowNumberInAllBlocks() as id select toDateTime('2020-10-24 00:00:00') + (id / 20), * from generateRandom('cost Int64', 10, 10, 1) limit 1000 settings max_threads = 1;
|
||||||
|
|
||||||
set allow_experimental_projection_optimization = 1, force_optimize_projection = 1;
|
set optimize_use_projections = 1, force_optimize_projection = 1;
|
||||||
|
|
||||||
select toStartOfMinute(dt) dt_m, sum(cost) from projection_test group by dt_m;
|
select toStartOfMinute(dt) dt_m, sum(cost) from projection_test group by dt_m;
|
||||||
select sum(cost) from projection_test;
|
select sum(cost) from projection_test;
|
||||||
|
@ -38,7 +38,7 @@ function run_query()
|
|||||||
|
|
||||||
echo "$query"
|
echo "$query"
|
||||||
local opts=(
|
local opts=(
|
||||||
--allow_experimental_projection_optimization 1
|
--optimize_use_projections 1
|
||||||
--force_optimize_projection 1
|
--force_optimize_projection 1
|
||||||
--log_processors_profiles 1
|
--log_processors_profiles 1
|
||||||
--query_id "$query_id"
|
--query_id "$query_id"
|
||||||
|
@ -44,7 +44,7 @@ function run_query()
|
|||||||
|
|
||||||
echo "$query"
|
echo "$query"
|
||||||
local opts=(
|
local opts=(
|
||||||
--allow_experimental_projection_optimization 1
|
--optimize_use_projections 1
|
||||||
--force_optimize_projection 1
|
--force_optimize_projection 1
|
||||||
--log_processors_profiles 1
|
--log_processors_profiles 1
|
||||||
--query_id "$query_id"
|
--query_id "$query_id"
|
||||||
|
@ -72,8 +72,8 @@ SYSTEM SYNC REPLICA wikistat2;
|
|||||||
-- Such condition will lead to successful queries.
|
-- Such condition will lead to successful queries.
|
||||||
SELECT 0 FROM numbers(5) WHERE sleepEachRow(1) = 1;
|
SELECT 0 FROM numbers(5) WHERE sleepEachRow(1) = 1;
|
||||||
|
|
||||||
select sum(hits), count() from wikistat1 GROUP BY project, subproject, path settings allow_experimental_projection_optimization = 1, force_optimize_projection = 1;
|
select sum(hits), count() from wikistat1 GROUP BY project, subproject, path settings optimize_use_projections = 1, force_optimize_projection = 1;
|
||||||
select sum(hits), count() from wikistat2 GROUP BY project, subproject, path settings allow_experimental_projection_optimization = 1, force_optimize_projection = 1;
|
select sum(hits), count() from wikistat2 GROUP BY project, subproject, path settings optimize_use_projections = 1, force_optimize_projection = 1;
|
||||||
|
|
||||||
DROP TABLE wikistat1;
|
DROP TABLE wikistat1;
|
||||||
DROP TABLE wikistat2;
|
DROP TABLE wikistat2;
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
set max_threads = 16;
|
set max_threads = 16;
|
||||||
set allow_aggregate_partitions_independently = 1;
|
set allow_aggregate_partitions_independently = 1;
|
||||||
set force_aggregate_partitions_independently = 1;
|
set force_aggregate_partitions_independently = 1;
|
||||||
set allow_experimental_projection_optimization = 0;
|
set optimize_use_projections = 0;
|
||||||
|
|
||||||
create table t1(a UInt32) engine=MergeTree order by tuple() partition by a % 4 settings index_granularity = 8192, index_granularity_bytes = 10485760;
|
create table t1(a UInt32) engine=MergeTree order by tuple() partition by a % 4 settings index_granularity = 8192, index_granularity_bytes = 10485760;
|
||||||
|
|
||||||
|
@ -9,6 +9,6 @@ ENGINE = Memory;
|
|||||||
INSERT INTO data_a_02187
|
INSERT INTO data_a_02187
|
||||||
SELECT *
|
SELECT *
|
||||||
FROM system.one
|
FROM system.one
|
||||||
SETTINGS max_block_size = '1', min_insert_block_size_rows = '65536', min_insert_block_size_bytes = '0', max_insert_threads = '0', max_threads = '3', receive_timeout = '10', receive_data_timeout_ms = '10000', connections_with_failover_max_tries = '0', extremes = '1', use_uncompressed_cache = '0', optimize_move_to_prewhere = '1', optimize_move_to_prewhere_if_final = '0', replication_alter_partitions_sync = '2', totals_mode = 'before_having', allow_suspicious_low_cardinality_types = '1', compile_expressions = '1', min_count_to_compile_expression = '0', group_by_two_level_threshold = '100', distributed_aggregation_memory_efficient = '0', distributed_group_by_no_merge = '1', optimize_distributed_group_by_sharding_key = '1', optimize_skip_unused_shards = '1', optimize_skip_unused_shards_rewrite_in = '1', force_optimize_skip_unused_shards = '2', optimize_skip_unused_shards_nesting = '1', force_optimize_skip_unused_shards_nesting = '2', merge_tree_min_rows_for_concurrent_read = '10000', force_primary_key = '1', network_compression_method = 'ZSTD', network_zstd_compression_level = '7', log_queries = '0', log_queries_min_type = 'QUERY_FINISH', distributed_product_mode = 'local', insert_quorum = '2', insert_quorum_timeout = '0', insert_quorum_parallel = '0', select_sequential_consistency = '1', join_use_nulls = '1', any_join_distinct_right_table_keys = '1', preferred_max_column_in_block_size_bytes = '32', insert_distributed_sync = '1', insert_allow_materialized_columns = '1', use_index_for_in_with_subqueries = '1', joined_subquery_requires_alias = '0', empty_result_for_aggregation_by_empty_set = '1', allow_suspicious_codecs = '1', query_profiler_real_time_period_ns = '0', query_profiler_cpu_time_period_ns = '0', opentelemetry_start_trace_probability = '1', max_rows_to_read = '1000000', read_overflow_mode = 'break', max_rows_to_group_by = '10', group_by_overflow_mode = 'any', max_rows_to_sort = '100', sort_overflow_mode = 'break', max_result_rows = '10', max_execution_time = '3', max_execution_speed = '1', max_bytes_in_join = '100', join_algorithm = 'partial_merge', max_memory_usage = '1099511627776', log_query_threads = '1', send_logs_level = 'fatal', enable_optimize_predicate_expression = '1', prefer_localhost_replica = '1', optimize_read_in_order = '1', optimize_aggregation_in_order = '1', read_in_order_two_level_merge_threshold = '1', allow_introspection_functions = '1', check_query_single_value_result = '1', allow_experimental_live_view = '1', default_table_engine = 'Memory', mutations_sync = '2', convert_query_to_cnf = '0', optimize_arithmetic_operations_in_aggregate_functions = '1', optimize_duplicate_order_by_and_distinct = '0', optimize_multiif_to_if = '0', optimize_monotonous_functions_in_order_by = '1', optimize_functions_to_subcolumns = '1', optimize_using_constraints = '1', optimize_substitute_columns = '1', optimize_append_index = '1', transform_null_in = '1', allow_experimental_geo_types = '1', data_type_default_nullable = '1', cast_keep_nullable = '1', cast_ipv4_ipv6_default_on_conversion_error = '0', system_events_show_zero_values = '1', enable_global_with_statement = '1', optimize_on_insert = '0', optimize_rewrite_sum_if_to_count_if = '1', distributed_ddl_output_mode = 'throw', union_default_mode = 'ALL', optimize_aggregators_of_group_by_keys = '1', optimize_group_by_function_keys = '1', short_circuit_function_evaluation = 'enable', async_insert = '1', enable_filesystem_cache = '0', allow_deprecated_database_ordinary = '1', allow_deprecated_syntax_for_merge_tree = '1', allow_experimental_nlp_functions = '1', allow_experimental_object_type = '1', allow_experimental_map_type = '1', allow_experimental_projection_optimization = '1', input_format_null_as_default = '1', input_format_ipv4_default_on_conversion_error = '0', input_format_ipv6_default_on_conversion_error = '0', output_format_json_named_tuples_as_objects = '1', output_format_write_statistics = '0', output_format_pretty_row_numbers = '1';
|
SETTINGS max_block_size = '1', min_insert_block_size_rows = '65536', min_insert_block_size_bytes = '0', max_insert_threads = '0', max_threads = '3', receive_timeout = '10', receive_data_timeout_ms = '10000', connections_with_failover_max_tries = '0', extremes = '1', use_uncompressed_cache = '0', optimize_move_to_prewhere = '1', optimize_move_to_prewhere_if_final = '0', replication_alter_partitions_sync = '2', totals_mode = 'before_having', allow_suspicious_low_cardinality_types = '1', compile_expressions = '1', min_count_to_compile_expression = '0', group_by_two_level_threshold = '100', distributed_aggregation_memory_efficient = '0', distributed_group_by_no_merge = '1', optimize_distributed_group_by_sharding_key = '1', optimize_skip_unused_shards = '1', optimize_skip_unused_shards_rewrite_in = '1', force_optimize_skip_unused_shards = '2', optimize_skip_unused_shards_nesting = '1', force_optimize_skip_unused_shards_nesting = '2', merge_tree_min_rows_for_concurrent_read = '10000', force_primary_key = '1', network_compression_method = 'ZSTD', network_zstd_compression_level = '7', log_queries = '0', log_queries_min_type = 'QUERY_FINISH', distributed_product_mode = 'local', insert_quorum = '2', insert_quorum_timeout = '0', insert_quorum_parallel = '0', select_sequential_consistency = '1', join_use_nulls = '1', any_join_distinct_right_table_keys = '1', preferred_max_column_in_block_size_bytes = '32', insert_distributed_sync = '1', insert_allow_materialized_columns = '1', use_index_for_in_with_subqueries = '1', joined_subquery_requires_alias = '0', empty_result_for_aggregation_by_empty_set = '1', allow_suspicious_codecs = '1', query_profiler_real_time_period_ns = '0', query_profiler_cpu_time_period_ns = '0', opentelemetry_start_trace_probability = '1', max_rows_to_read = '1000000', read_overflow_mode = 'break', max_rows_to_group_by = '10', group_by_overflow_mode = 'any', max_rows_to_sort = '100', sort_overflow_mode = 'break', max_result_rows = '10', max_execution_time = '3', max_execution_speed = '1', max_bytes_in_join = '100', join_algorithm = 'partial_merge', max_memory_usage = '1099511627776', log_query_threads = '1', send_logs_level = 'fatal', enable_optimize_predicate_expression = '1', prefer_localhost_replica = '1', optimize_read_in_order = '1', optimize_aggregation_in_order = '1', read_in_order_two_level_merge_threshold = '1', allow_introspection_functions = '1', check_query_single_value_result = '1', allow_experimental_live_view = '1', default_table_engine = 'Memory', mutations_sync = '2', convert_query_to_cnf = '0', optimize_arithmetic_operations_in_aggregate_functions = '1', optimize_duplicate_order_by_and_distinct = '0', optimize_multiif_to_if = '0', optimize_monotonous_functions_in_order_by = '1', optimize_functions_to_subcolumns = '1', optimize_using_constraints = '1', optimize_substitute_columns = '1', optimize_append_index = '1', transform_null_in = '1', allow_experimental_geo_types = '1', data_type_default_nullable = '1', cast_keep_nullable = '1', cast_ipv4_ipv6_default_on_conversion_error = '0', system_events_show_zero_values = '1', enable_global_with_statement = '1', optimize_on_insert = '0', optimize_rewrite_sum_if_to_count_if = '1', distributed_ddl_output_mode = 'throw', union_default_mode = 'ALL', optimize_aggregators_of_group_by_keys = '1', optimize_group_by_function_keys = '1', short_circuit_function_evaluation = 'enable', async_insert = '1', enable_filesystem_cache = '0', allow_deprecated_database_ordinary = '1', allow_deprecated_syntax_for_merge_tree = '1', allow_experimental_nlp_functions = '1', allow_experimental_object_type = '1', allow_experimental_map_type = '1', optimize_use_projections = '1', input_format_null_as_default = '1', input_format_ipv4_default_on_conversion_error = '0', input_format_ipv6_default_on_conversion_error = '0', output_format_json_named_tuples_as_objects = '1', output_format_write_statistics = '0', output_format_pretty_row_numbers = '1';
|
||||||
|
|
||||||
DROP TABLE data_a_02187;
|
DROP TABLE data_a_02187;
|
||||||
|
@ -0,0 +1,2 @@
|
|||||||
|
100000
|
||||||
|
100000
|
18
tests/queries/0_stateless/02733_sparse_columns_reload.sql
Normal file
18
tests/queries/0_stateless/02733_sparse_columns_reload.sql
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
DROP TABLE IF EXISTS t_sparse_reload;
|
||||||
|
|
||||||
|
CREATE TABLE t_sparse_reload (id UInt64, v UInt64)
|
||||||
|
ENGINE = MergeTree ORDER BY id
|
||||||
|
SETTINGS ratio_of_defaults_for_sparse_serialization = 0.95;
|
||||||
|
|
||||||
|
INSERT INTO t_sparse_reload SELECT number, 0 FROM numbers(100000);
|
||||||
|
|
||||||
|
SELECT count() FROM t_sparse_reload WHERE NOT ignore(*);
|
||||||
|
|
||||||
|
ALTER TABLE t_sparse_reload MODIFY SETTING ratio_of_defaults_for_sparse_serialization = 1.0;
|
||||||
|
|
||||||
|
DETACH TABLE t_sparse_reload;
|
||||||
|
ATTACH TABLE t_sparse_reload;
|
||||||
|
|
||||||
|
SELECT count() FROM t_sparse_reload WHERE NOT ignore(*);
|
||||||
|
|
||||||
|
DROP TABLE t_sparse_reload;
|
@ -0,0 +1,5 @@
|
|||||||
|
5000 1189524
|
||||||
|
1
|
||||||
|
3333 0
|
||||||
|
0
|
||||||
|
3333 0
|
30
tests/queries/0_stateless/02734_sparse_columns_mutation.sql
Normal file
30
tests/queries/0_stateless/02734_sparse_columns_mutation.sql
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
DROP TABLE IF EXISTS t_sparse_mutation;
|
||||||
|
|
||||||
|
CREATE TABLE t_sparse_mutation (id UInt64, v UInt64)
|
||||||
|
ENGINE = MergeTree ORDER BY id
|
||||||
|
SETTINGS ratio_of_defaults_for_sparse_serialization = 0.9;
|
||||||
|
|
||||||
|
INSERT INTO t_sparse_mutation select number, if (number % 21 = 0, number, 0) FROM numbers(10000);
|
||||||
|
|
||||||
|
SET mutations_sync = 2;
|
||||||
|
|
||||||
|
DELETE FROM t_sparse_mutation WHERE id % 2 = 0;
|
||||||
|
|
||||||
|
SELECT count(), sum(v) FROM t_sparse_mutation;
|
||||||
|
|
||||||
|
SELECT sum(has_lightweight_delete) FROM system.parts
|
||||||
|
WHERE database = currentDatabase() AND table = 't_sparse_mutation' AND active;
|
||||||
|
|
||||||
|
ALTER TABLE t_sparse_mutation UPDATE v = v * 2 WHERE id % 5 = 0;
|
||||||
|
ALTER TABLE t_sparse_mutation DELETE WHERE id % 3 = 0;
|
||||||
|
|
||||||
|
SELECT count(), sum(v) FROM t_sparse_mutation;
|
||||||
|
|
||||||
|
OPTIMIZE TABLE t_sparse_mutation FINAL;
|
||||||
|
|
||||||
|
SELECT sum(has_lightweight_delete) FROM system.parts
|
||||||
|
WHERE database = currentDatabase() AND table = 't_sparse_mutation' AND active;
|
||||||
|
|
||||||
|
SELECT count(), sum(v) FROM t_sparse_mutation;
|
||||||
|
|
||||||
|
DROP TABLE t_sparse_mutation;
|
@ -0,0 +1 @@
|
|||||||
|
477
|
@ -0,0 +1,13 @@
|
|||||||
|
DROP TABLE IF EXISTS t_sparse_short_circuit;
|
||||||
|
|
||||||
|
SET short_circuit_function_evaluation = 'force_enable';
|
||||||
|
|
||||||
|
CREATE TABLE t_sparse_short_circuit (a UInt64, b UInt64)
|
||||||
|
ENGINE = MergeTree ORDER BY tuple()
|
||||||
|
SETTINGS ratio_of_defaults_for_sparse_serialization = 0.9;
|
||||||
|
|
||||||
|
INSERT INTO t_sparse_short_circuit select number, if (number % 21 = 0, number % 10 + 1, 0) FROM numbers(100000);
|
||||||
|
|
||||||
|
SELECT sum(if(a % 10 = 0, CAST(b, 'UInt8'), 0)) FROM t_sparse_short_circuit;
|
||||||
|
|
||||||
|
DROP TABLE t_sparse_short_circuit;
|
Loading…
Reference in New Issue
Block a user