mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
Move analyzer to Beta stage
This commit is contained in:
parent
8af682f748
commit
4e9c3baa25
@ -28,9 +28,9 @@
|
||||
</table_function_remote_max_addresses>
|
||||
|
||||
<!-- Don't waste cycles testing the old interpreter. Spend time in the new analyzer instead -->
|
||||
<allow_experimental_analyzer>
|
||||
<enable_analyzer>
|
||||
<readonly/>
|
||||
</allow_experimental_analyzer>
|
||||
</enable_analyzer>
|
||||
|
||||
<!-- This feature is broken, deprecated and will be removed. We don't want more reports about it -->
|
||||
<allow_experimental_object_type>
|
||||
|
@ -139,9 +139,9 @@ EOL
|
||||
</table_function_remote_max_addresses>
|
||||
|
||||
<!-- Don't waste cycles testing the old interpreter. Spend time in the new analyzer instead -->
|
||||
<allow_experimental_analyzer>
|
||||
<enable_analyzer>
|
||||
<readonly/>
|
||||
</allow_experimental_analyzer>
|
||||
</enable_analyzer>
|
||||
|
||||
<!-- This feature is broken, deprecated and will be removed. We don't want more reports about it -->
|
||||
<allow_experimental_object_type>
|
||||
|
@ -118,7 +118,7 @@ And the result of interpreting the `INSERT SELECT` query is a "completed" `Query
|
||||
|
||||
`InterpreterSelectQuery` uses `ExpressionAnalyzer` and `ExpressionActions` machinery for query analysis and transformations. This is where most rule-based query optimizations are performed. `ExpressionAnalyzer` is quite messy and should be rewritten: various query transformations and optimizations should be extracted into separate classes to allow for modular transformations of the query.
|
||||
|
||||
To address current problems that exist in interpreters, a new `InterpreterSelectQueryAnalyzer` is being developed. It is a new version of `InterpreterSelectQuery` that does not use `ExpressionAnalyzer` and introduces an additional abstraction level between `AST` and `QueryPipeline` called `QueryTree`. It is not production-ready yet, but it can be tested with the `allow_experimental_analyzer` flag.
|
||||
To address current problems that exist in interpreters, a new `InterpreterSelectQueryAnalyzer` is being developed. It is a new version of `InterpreterSelectQuery` that does not use `ExpressionAnalyzer` and introduces an additional abstraction level between `AST` and `QueryPipeline` called `QueryTree`. It is not production-ready yet, but it can be tested with the `enable_analyzer` flag.
|
||||
|
||||
## Functions {#functions}
|
||||
|
||||
|
@ -123,7 +123,7 @@ To ensure consistent and expected results, especially when migrating old queries
|
||||
In the new version of the analyzer, the rules for determining the common supertype for columns specified in the `USING` clause have been standardized to produce more predictable outcomes, especially when dealing with type modifiers like `LowCardinality` and `Nullable`.
|
||||
|
||||
- `LowCardinality(T)` and `T`: When a column of type `LowCardinality(T)` is joined with a column of type `T`, the resulting common supertype will be `T`, effectively discarding the `LowCardinality` modifier.
|
||||
|
||||
|
||||
- `Nullable(T)` and `T`: When a column of type `Nullable(T)` is joined with a column of type `T`, the resulting common supertype will be `Nullable(T)`, ensuring that the nullable property is preserved.
|
||||
|
||||
**Example:**
|
||||
@ -144,7 +144,7 @@ During projection names computation, aliases are not substituted.
|
||||
SELECT
|
||||
1 + 1 AS x,
|
||||
x + 1
|
||||
SETTINGS allow_experimental_analyzer = 0
|
||||
SETTINGS enable_analyzer = 0
|
||||
FORMAT PrettyCompact
|
||||
|
||||
┌─x─┬─plus(plus(1, 1), 1)─┐
|
||||
@ -154,7 +154,7 @@ FORMAT PrettyCompact
|
||||
SELECT
|
||||
1 + 1 AS x,
|
||||
x + 1
|
||||
SETTINGS allow_experimental_analyzer = 1
|
||||
SETTINGS enable_analyzer = 1
|
||||
FORMAT PrettyCompact
|
||||
|
||||
┌─x─┬─plus(x, 1)─┐
|
||||
@ -177,7 +177,7 @@ SELECT toTypeName(if(0, [2, 3, 4], 'String'))
|
||||
|
||||
### Heterogeneous clusters
|
||||
|
||||
The new analyzer significantly changed the communication protocol between servers in the cluster. Thus, it's impossible to run distributed queries on servers with different `allow_experimental_analyzer` setting values.
|
||||
The new analyzer significantly changed the communication protocol between servers in the cluster. Thus, it's impossible to run distributed queries on servers with different `enable_analyzer` setting values.
|
||||
|
||||
### Mutations are interpreted by previous analyzer
|
||||
|
||||
|
@ -4051,7 +4051,7 @@ Rewrite aggregate functions with if expression as argument when logically equiva
|
||||
For example, `avg(if(cond, col, null))` can be rewritten to `avgOrNullIf(cond, col)`. It may improve performance.
|
||||
|
||||
:::note
|
||||
Supported only with experimental analyzer (`allow_experimental_analyzer = 1`).
|
||||
Supported only with experimental analyzer (`enable_analyzer = 1`).
|
||||
:::
|
||||
|
||||
## database_replicated_initial_query_timeout_sec {#database_replicated_initial_query_timeout_sec}
|
||||
|
@ -115,7 +115,7 @@ ClickHouse — полноценная столбцовая СУБД. Данны
|
||||
|
||||
`InterpreterSelectQuery` использует `ExpressionAnalyzer` и `ExpressionActions` механизмы для анализа запросов и преобразований. Именно здесь выполняется большинство оптимизаций запросов на основе правил. `ExpressionAnalyzer` написан довольно грязно и должен быть переписан: различные преобразования запросов и оптимизации должны быть извлечены в отдельные классы, чтобы позволить модульные преобразования или запросы.
|
||||
|
||||
Для решения текущих проблем, существующих в интерпретаторах, разрабатывается новый `InterpreterSelectQueryAnalyzer`. Это новая версия `InterpreterSelectQuery`, которая не использует `ExpressionAnalyzer` и вводит дополнительный уровень абстракции между `AST` и `QueryPipeline`, называемый `QueryTree`. Он еще не готов к использованию в продакшене, но его можно протестировать с помощью флага `allow_experimental_analyzer`.
|
||||
Для решения текущих проблем, существующих в интерпретаторах, разрабатывается новый `InterpreterSelectQueryAnalyzer`. Это новая версия `InterpreterSelectQuery`, которая не использует `ExpressionAnalyzer` и вводит дополнительный уровень абстракции между `AST` и `QueryPipeline`, называемый `QueryTree`. Он еще не готов к использованию в продакшене, но его можно протестировать с помощью флага `enable_analyzer`.
|
||||
|
||||
## Функции {#functions}
|
||||
|
||||
|
@ -196,11 +196,11 @@ void HedgedConnections::sendQuery(
|
||||
modified_settings.parallel_replica_offset = fd_to_replica_location[replica.packet_receiver->getFileDescriptor()].offset;
|
||||
}
|
||||
|
||||
/// FIXME: Remove once we will make `allow_experimental_analyzer` obsolete setting.
|
||||
/// FIXME: Remove once we will make `enable_analyzer` obsolete setting.
|
||||
/// Make the analyzer being set, so it will be effectively applied on the remote server.
|
||||
/// In other words, the initiator always controls whether the analyzer enabled or not for
|
||||
/// all servers involved in the distributed query processing.
|
||||
modified_settings.set("allow_experimental_analyzer", static_cast<bool>(modified_settings.allow_experimental_analyzer));
|
||||
modified_settings.set("enable_analyzer", static_cast<bool>(modified_settings.enable_analyzer));
|
||||
|
||||
replica.connection->sendQuery(
|
||||
timeouts, query, /* query_parameters */ {}, query_id, stage, &modified_settings, &client_info, with_pending_data, {});
|
||||
|
@ -150,11 +150,11 @@ void MultiplexedConnections::sendQuery(
|
||||
client_info.number_of_current_replica = replica_info->number_of_current_replica;
|
||||
}
|
||||
|
||||
/// FIXME: Remove once we will make `allow_experimental_analyzer` obsolete setting.
|
||||
/// FIXME: Remove once we will make `enable_analyzer` obsolete setting.
|
||||
/// Make the analyzer being set, so it will be effectively applied on the remote server.
|
||||
/// In other words, the initiator always controls whether the analyzer enabled or not for
|
||||
/// all servers involved in the distributed query processing.
|
||||
modified_settings.set("allow_experimental_analyzer", static_cast<bool>(modified_settings.allow_experimental_analyzer));
|
||||
modified_settings.set("enable_analyzer", static_cast<bool>(modified_settings.enable_analyzer));
|
||||
|
||||
const bool enable_offset_parallel_processing = context->canUseOffsetParallelReplicas();
|
||||
|
||||
|
@ -638,7 +638,7 @@ class IColumn;
|
||||
M(Bool, allow_non_metadata_alters, true, "Allow to execute alters which affects not only tables metadata, but also data on disk", 0) \
|
||||
M(Bool, enable_global_with_statement, true, "Propagate WITH statements to UNION queries and all subqueries", 0) \
|
||||
M(Bool, aggregate_functions_null_for_empty, false, "Rewrite all aggregate functions in a query, adding -OrNull suffix to them", 0) \
|
||||
M(Bool, optimize_syntax_fuse_functions, false, "Allow apply fuse aggregating function. Available only with `allow_experimental_analyzer`", 0) \
|
||||
M(Bool, optimize_syntax_fuse_functions, false, "Allow apply fuse aggregating function. Available only with `enable_analyzer`", 0) \
|
||||
M(Bool, flatten_nested, true, "If true, columns of type Nested will be flatten to separate array columns instead of one array of tuples", 0) \
|
||||
M(Bool, asterisk_include_materialized_columns, false, "Include MATERIALIZED columns for wildcard query", 0) \
|
||||
M(Bool, asterisk_include_alias_columns, false, "Include ALIAS columns for wildcard query", 0) \
|
||||
@ -943,8 +943,7 @@ class IColumn;
|
||||
\
|
||||
M(Bool, allow_experimental_join_condition, false, "Support join with inequal conditions which involve columns from both left and right table. e.g. t1.y < t2.y.", 0) \
|
||||
\
|
||||
/* Analyzer: It's not experimental anymore (WIP) */ \
|
||||
M(Bool, allow_experimental_analyzer, true, "Allow new query analyzer.", IMPORTANT) \
|
||||
M(Bool, enable_analyzer, true, "Allow new query analyzer.", IMPORTANT) ALIAS(allow_experimental_analyzer) \
|
||||
M(Bool, analyzer_compatibility_join_using_top_level_identifier, false, "Force to resolve identifier in JOIN USING from projection (for example, in `SELECT a + 1 AS b FROM t1 JOIN t2 USING (b)` join will be performed by `t1.a + 1 = t2.b`, rather then `t1.b = t2.b`).", 0) \
|
||||
\
|
||||
M(Bool, allow_experimental_live_view, false, "Enable LIVE VIEW. Not mature enough.", 0) \
|
||||
|
@ -500,6 +500,265 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
||||
{"enable_optimize_predicate_expression", 0, 1, "Optimize predicates to subqueries by default"}
|
||||
}
|
||||
},
|
||||
{"24.7", {{"output_format_parquet_write_page_index", false, true, "Add a possibility to write page index into parquet files."},
|
||||
{"output_format_binary_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in RowBinaryWithNamesAndTypes output format"},
|
||||
{"input_format_binary_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in RowBinaryWithNamesAndTypes input format"},
|
||||
{"output_format_native_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in Native output format"},
|
||||
{"input_format_native_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in Native output format"},
|
||||
{"read_in_order_use_buffering", false, true, "Use buffering before merging while reading in order of primary key"},
|
||||
{"enable_named_columns_in_function_tuple", false, true, "Generate named tuples in function tuple() when all names are unique and can be treated as unquoted identifiers."},
|
||||
{"input_format_json_case_insensitive_column_matching", false, false, "Ignore case when matching JSON keys with CH columns."},
|
||||
{"optimize_trivial_insert_select", true, false, "The optimization does not make sense in many cases."},
|
||||
{"dictionary_validate_primary_key_type", false, false, "Validate primary key type for dictionaries. By default id type for simple layouts will be implicitly converted to UInt64."},
|
||||
{"collect_hash_table_stats_during_joins", false, true, "New setting."},
|
||||
{"max_size_to_preallocate_for_joins", 0, 100'000'000, "New setting."},
|
||||
{"input_format_orc_reader_time_zone_name", "GMT", "GMT", "The time zone name for ORC row reader, the default ORC row reader's time zone is GMT."},
|
||||
{"lightweight_mutation_projection_mode", "throw", "throw", "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop all projection related to this table then do lightweight delete."},
|
||||
{"database_replicated_allow_heavy_create", true, false, "Long-running DDL queries (CREATE AS SELECT and POPULATE) for Replicated database engine was forbidden"},
|
||||
{"query_plan_merge_filters", false, false, "Allow to merge filters in the query plan"},
|
||||
{"azure_sdk_max_retries", 10, 10, "Maximum number of retries in azure sdk"},
|
||||
{"azure_sdk_retry_initial_backoff_ms", 10, 10, "Minimal backoff between retries in azure sdk"},
|
||||
{"azure_sdk_retry_max_backoff_ms", 1000, 1000, "Maximal backoff between retries in azure sdk"},
|
||||
{"merge_tree_min_bytes_per_task_for_remote_reading", 4194304, 2097152, "Value is unified with `filesystem_prefetch_min_bytes_for_single_read_task`"},
|
||||
{"ignore_on_cluster_for_replicated_named_collections_queries", false, false, "Ignore ON CLUSTER clause for replicated named collections management queries."},
|
||||
{"backup_restore_s3_retry_attempts", 1000,1000, "Setting for Aws::Client::RetryStrategy, Aws::Client does retries itself, 0 means no retries. It takes place only for backup/restore."},
|
||||
{"postgresql_connection_attempt_timeout", 2, 2, "Allow to control 'connect_timeout' parameter of PostgreSQL connection."},
|
||||
{"postgresql_connection_pool_retries", 2, 2, "Allow to control the number of retries in PostgreSQL connection pool."}
|
||||
}},
|
||||
{"24.6", {{"materialize_skip_indexes_on_insert", true, true, "Added new setting to allow to disable materialization of skip indexes on insert"},
|
||||
{"materialize_statistics_on_insert", true, true, "Added new setting to allow to disable materialization of statistics on insert"},
|
||||
{"input_format_parquet_use_native_reader", false, false, "When reading Parquet files, to use native reader instead of arrow reader."},
|
||||
{"hdfs_throw_on_zero_files_match", false, false, "Allow to throw an error when ListObjects request cannot match any files in HDFS engine instead of empty query result"},
|
||||
{"azure_throw_on_zero_files_match", false, false, "Allow to throw an error when ListObjects request cannot match any files in AzureBlobStorage engine instead of empty query result"},
|
||||
{"s3_validate_request_settings", true, true, "Allow to disable S3 request settings validation"},
|
||||
{"allow_experimental_full_text_index", false, false, "Enable experimental full-text index"},
|
||||
{"azure_skip_empty_files", false, false, "Allow to skip empty files in azure table engine"},
|
||||
{"hdfs_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in HDFS table engine"},
|
||||
{"azure_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in AzureBlobStorage table engine"},
|
||||
{"s3_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in S3 table engine"},
|
||||
{"s3_max_part_number", 10000, 10000, "Maximum part number number for s3 upload part"},
|
||||
{"s3_max_single_operation_copy_size", 32 * 1024 * 1024, 32 * 1024 * 1024, "Maximum size for a single copy operation in s3"},
|
||||
{"input_format_parquet_max_block_size", 8192, DEFAULT_BLOCK_SIZE, "Increase block size for parquet reader."},
|
||||
{"input_format_parquet_prefer_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Average block bytes output by parquet reader."},
|
||||
{"enable_blob_storage_log", true, true, "Write information about blob storage operations to system.blob_storage_log table"},
|
||||
{"allow_deprecated_snowflake_conversion_functions", true, false, "Disabled deprecated functions snowflakeToDateTime[64] and dateTime[64]ToSnowflake."},
|
||||
{"allow_statistic_optimize", false, false, "Old setting which popped up here being renamed."},
|
||||
{"allow_experimental_statistic", false, false, "Old setting which popped up here being renamed."},
|
||||
{"allow_statistics_optimize", false, false, "The setting was renamed. The previous name is `allow_statistic_optimize`."},
|
||||
{"allow_experimental_statistics", false, false, "The setting was renamed. The previous name is `allow_experimental_statistic`."},
|
||||
{"enable_vertical_final", false, true, "Enable vertical final by default again after fixing bug"},
|
||||
{"parallel_replicas_custom_key_range_lower", 0, 0, "Add settings to control the range filter when using parallel replicas with dynamic shards"},
|
||||
{"parallel_replicas_custom_key_range_upper", 0, 0, "Add settings to control the range filter when using parallel replicas with dynamic shards. A value of 0 disables the upper limit"},
|
||||
{"output_format_pretty_display_footer_column_names", 0, 1, "Add a setting to display column names in the footer if there are many rows. Threshold value is controlled by output_format_pretty_display_footer_column_names_min_rows."},
|
||||
{"output_format_pretty_display_footer_column_names_min_rows", 0, 50, "Add a setting to control the threshold value for setting output_format_pretty_display_footer_column_names_min_rows. Default 50."},
|
||||
{"output_format_csv_serialize_tuple_into_separate_columns", true, true, "A new way of how interpret tuples in CSV format was added."},
|
||||
{"input_format_csv_deserialize_separate_columns_into_tuple", true, true, "A new way of how interpret tuples in CSV format was added."},
|
||||
{"input_format_csv_try_infer_strings_from_quoted_tuples", true, true, "A new way of how interpret tuples in CSV format was added."},
|
||||
}},
|
||||
{"24.5", {{"allow_deprecated_error_prone_window_functions", true, false, "Allow usage of deprecated error prone window functions (neighbor, runningAccumulate, runningDifferenceStartingWithFirstValue, runningDifference)"},
|
||||
{"allow_experimental_join_condition", false, false, "Support join with inequal conditions which involve columns from both left and right table. e.g. t1.y < t2.y."},
|
||||
{"input_format_tsv_crlf_end_of_line", false, false, "Enables reading of CRLF line endings with TSV formats"},
|
||||
{"output_format_parquet_use_custom_encoder", false, true, "Enable custom Parquet encoder."},
|
||||
{"cross_join_min_rows_to_compress", 0, 10000000, "Minimal count of rows to compress block in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached."},
|
||||
{"cross_join_min_bytes_to_compress", 0, 1_GiB, "Minimal size of block to compress in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached."},
|
||||
{"http_max_chunk_size", 0, 0, "Internal limitation"},
|
||||
{"prefer_external_sort_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Prefer maximum block bytes for external sort, reduce the memory usage during merging."},
|
||||
{"input_format_force_null_for_omitted_fields", false, false, "Disable type-defaults for omitted fields when needed"},
|
||||
{"cast_string_to_dynamic_use_inference", false, false, "Add setting to allow converting String to Dynamic through parsing"},
|
||||
{"allow_experimental_dynamic_type", false, false, "Add new experimental Dynamic type"},
|
||||
{"azure_max_blocks_in_multipart_upload", 50000, 50000, "Maximum number of blocks in multipart upload for Azure."},
|
||||
}},
|
||||
{"24.4", {{"input_format_json_throw_on_bad_escape_sequence", true, true, "Allow to save JSON strings with bad escape sequences"},
|
||||
{"max_parsing_threads", 0, 0, "Add a separate setting to control number of threads in parallel parsing from files"},
|
||||
{"ignore_drop_queries_probability", 0, 0, "Allow to ignore drop queries in server with specified probability for testing purposes"},
|
||||
{"lightweight_deletes_sync", 2, 2, "The same as 'mutation_sync', but controls only execution of lightweight deletes"},
|
||||
{"query_cache_system_table_handling", "save", "throw", "The query cache no longer caches results of queries against system tables"},
|
||||
{"input_format_json_ignore_unnecessary_fields", false, true, "Ignore unnecessary fields and not parse them. Enabling this may not throw exceptions on json strings of invalid format or with duplicated fields"},
|
||||
{"input_format_hive_text_allow_variable_number_of_columns", false, true, "Ignore extra columns in Hive Text input (if file has more columns than expected) and treat missing fields in Hive Text input as default values."},
|
||||
{"allow_experimental_database_replicated", false, true, "Database engine Replicated is now in Beta stage"},
|
||||
{"temporary_data_in_cache_reserve_space_wait_lock_timeout_milliseconds", (10 * 60 * 1000), (10 * 60 * 1000), "Wait time to lock cache for sapce reservation in temporary data in filesystem cache"},
|
||||
{"optimize_rewrite_sum_if_to_count_if", false, true, "Only available for the analyzer, where it works correctly"},
|
||||
{"azure_allow_parallel_part_upload", "true", "true", "Use multiple threads for azure multipart upload."},
|
||||
{"max_recursive_cte_evaluation_depth", DBMS_RECURSIVE_CTE_MAX_EVALUATION_DEPTH, DBMS_RECURSIVE_CTE_MAX_EVALUATION_DEPTH, "Maximum limit on recursive CTE evaluation depth"},
|
||||
{"query_plan_convert_outer_join_to_inner_join", false, true, "Allow to convert OUTER JOIN to INNER JOIN if filter after JOIN always filters default values"},
|
||||
}},
|
||||
{"24.3", {{"s3_connect_timeout_ms", 1000, 1000, "Introduce new dedicated setting for s3 connection timeout"},
|
||||
{"allow_experimental_shared_merge_tree", false, true, "The setting is obsolete"},
|
||||
{"use_page_cache_for_disks_without_file_cache", false, false, "Added userspace page cache"},
|
||||
{"read_from_page_cache_if_exists_otherwise_bypass_cache", false, false, "Added userspace page cache"},
|
||||
{"page_cache_inject_eviction", false, false, "Added userspace page cache"},
|
||||
{"default_table_engine", "None", "MergeTree", "Set default table engine to MergeTree for better usability"},
|
||||
{"input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects", false, false, "Allow to use String type for ambiguous paths during named tuple inference from JSON objects"},
|
||||
{"traverse_shadow_remote_data_paths", false, false, "Traverse shadow directory when query system.remote_data_paths."},
|
||||
{"throw_if_deduplication_in_dependent_materialized_views_enabled_with_async_insert", false, true, "Deduplication in dependent materialized view cannot work together with async inserts."},
|
||||
{"parallel_replicas_allow_in_with_subquery", false, true, "If true, subquery for IN will be executed on every follower replica"},
|
||||
{"log_processors_profiles", false, true, "Enable by default"},
|
||||
{"function_locate_has_mysql_compatible_argument_order", false, true, "Increase compatibility with MySQL's locate function."},
|
||||
{"allow_suspicious_primary_key", true, false, "Forbid suspicious PRIMARY KEY/ORDER BY for MergeTree (i.e. SimpleAggregateFunction)"},
|
||||
{"filesystem_cache_reserve_space_wait_lock_timeout_milliseconds", 1000, 1000, "Wait time to lock cache for sapce reservation in filesystem cache"},
|
||||
{"max_parser_backtracks", 0, 1000000, "Limiting the complexity of parsing"},
|
||||
{"analyzer_compatibility_join_using_top_level_identifier", false, false, "Force to resolve identifier in JOIN USING from projection"},
|
||||
{"distributed_insert_skip_read_only_replicas", false, false, "If true, INSERT into Distributed will skip read-only replicas"},
|
||||
{"keeper_max_retries", 10, 10, "Max retries for general keeper operations"},
|
||||
{"keeper_retry_initial_backoff_ms", 100, 100, "Initial backoff timeout for general keeper operations"},
|
||||
{"keeper_retry_max_backoff_ms", 5000, 5000, "Max backoff timeout for general keeper operations"},
|
||||
{"s3queue_allow_experimental_sharded_mode", false, false, "Enable experimental sharded mode of S3Queue table engine. It is experimental because it will be rewritten"},
|
||||
{"enable_analyzer", false, true, "Enable analyzer and planner by default."},
|
||||
{"merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability", 0.0, 0.0, "For testing of `PartsSplitter` - split read ranges into intersecting and non intersecting every time you read from MergeTree with the specified probability."},
|
||||
{"allow_get_client_http_header", false, false, "Introduced a new function."},
|
||||
{"output_format_pretty_row_numbers", false, true, "It is better for usability."},
|
||||
{"output_format_pretty_max_value_width_apply_for_single_value", true, false, "Single values in Pretty formats won't be cut."},
|
||||
{"output_format_parquet_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."},
|
||||
{"output_format_orc_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."},
|
||||
{"output_format_arrow_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."},
|
||||
{"output_format_parquet_compression_method", "lz4", "zstd", "Parquet/ORC/Arrow support many compression methods, including lz4 and zstd. ClickHouse supports each and every compression method. Some inferior tools, such as 'duckdb', lack support for the faster `lz4` compression method, that's why we set zstd by default."},
|
||||
{"output_format_orc_compression_method", "lz4", "zstd", "Parquet/ORC/Arrow support many compression methods, including lz4 and zstd. ClickHouse supports each and every compression method. Some inferior tools, such as 'duckdb', lack support for the faster `lz4` compression method, that's why we set zstd by default."},
|
||||
{"output_format_pretty_highlight_digit_groups", false, true, "If enabled and if output is a terminal, highlight every digit corresponding to the number of thousands, millions, etc. with underline."},
|
||||
{"geo_distance_returns_float64_on_float64_arguments", false, true, "Increase the default precision."},
|
||||
{"azure_max_inflight_parts_for_one_file", 20, 20, "The maximum number of a concurrent loaded parts in multipart upload request. 0 means unlimited."},
|
||||
{"azure_strict_upload_part_size", 0, 0, "The exact size of part to upload during multipart upload to Azure blob storage."},
|
||||
{"azure_min_upload_part_size", 16*1024*1024, 16*1024*1024, "The minimum size of part to upload during multipart upload to Azure blob storage."},
|
||||
{"azure_max_upload_part_size", 5ull*1024*1024*1024, 5ull*1024*1024*1024, "The maximum size of part to upload during multipart upload to Azure blob storage."},
|
||||
{"azure_upload_part_size_multiply_factor", 2, 2, "Multiply azure_min_upload_part_size by this factor each time azure_multiply_parts_count_threshold parts were uploaded from a single write to Azure blob storage."},
|
||||
{"azure_upload_part_size_multiply_parts_count_threshold", 500, 500, "Each time this number of parts was uploaded to Azure blob storage, azure_min_upload_part_size is multiplied by azure_upload_part_size_multiply_factor."},
|
||||
{"output_format_csv_serialize_tuple_into_separate_columns", true, true, "A new way of how interpret tuples in CSV format was added."},
|
||||
{"input_format_csv_deserialize_separate_columns_into_tuple", true, true, "A new way of how interpret tuples in CSV format was added."},
|
||||
{"input_format_csv_try_infer_strings_from_quoted_tuples", true, true, "A new way of how interpret tuples in CSV format was added."},
|
||||
}},
|
||||
{"24.2", {{"allow_suspicious_variant_types", true, false, "Don't allow creating Variant type with suspicious variants by default"},
|
||||
{"validate_experimental_and_suspicious_types_inside_nested_types", false, true, "Validate usage of experimental and suspicious types inside nested types"},
|
||||
{"output_format_values_escape_quote_with_quote", false, false, "If true escape ' with '', otherwise quoted with \\'"},
|
||||
{"output_format_pretty_single_large_number_tip_threshold", 0, 1'000'000, "Print a readable number tip on the right side of the table if the block consists of a single number which exceeds this value (except 0)"},
|
||||
{"input_format_try_infer_exponent_floats", true, false, "Don't infer floats in exponential notation by default"},
|
||||
{"query_plan_optimize_prewhere", true, true, "Allow to push down filter to PREWHERE expression for supported storages"},
|
||||
{"async_insert_max_data_size", 1000000, 10485760, "The previous value appeared to be too small."},
|
||||
{"async_insert_poll_timeout_ms", 10, 10, "Timeout in milliseconds for polling data from asynchronous insert queue"},
|
||||
{"async_insert_use_adaptive_busy_timeout", false, true, "Use adaptive asynchronous insert timeout"},
|
||||
{"async_insert_busy_timeout_min_ms", 50, 50, "The minimum value of the asynchronous insert timeout in milliseconds; it also serves as the initial value, which may be increased later by the adaptive algorithm"},
|
||||
{"async_insert_busy_timeout_max_ms", 200, 200, "The minimum value of the asynchronous insert timeout in milliseconds; async_insert_busy_timeout_ms is aliased to async_insert_busy_timeout_max_ms"},
|
||||
{"async_insert_busy_timeout_increase_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout increases"},
|
||||
{"async_insert_busy_timeout_decrease_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout decreases"},
|
||||
{"format_template_row_format", "", "", "Template row format string can be set directly in query"},
|
||||
{"format_template_resultset_format", "", "", "Template result set format string can be set in query"},
|
||||
{"split_parts_ranges_into_intersecting_and_non_intersecting_final", true, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"},
|
||||
{"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"},
|
||||
{"azure_max_single_part_copy_size", 256*1024*1024, 256*1024*1024, "The maximum size of object to copy using single part copy to Azure blob storage."},
|
||||
{"min_external_table_block_size_rows", DEFAULT_INSERT_BLOCK_SIZE, DEFAULT_INSERT_BLOCK_SIZE, "Squash blocks passed to external table to specified size in rows, if blocks are not big enough"},
|
||||
{"min_external_table_block_size_bytes", DEFAULT_INSERT_BLOCK_SIZE * 256, DEFAULT_INSERT_BLOCK_SIZE * 256, "Squash blocks passed to external table to specified size in bytes, if blocks are not big enough."},
|
||||
{"parallel_replicas_prefer_local_join", true, true, "If true, and JOIN can be executed with parallel replicas algorithm, and all storages of right JOIN part are *MergeTree, local JOIN will be used instead of GLOBAL JOIN."},
|
||||
{"optimize_time_filter_with_preimage", true, true, "Optimize Date and DateTime predicates by converting functions into equivalent comparisons without conversions (e.g. toYear(col) = 2023 -> col >= '2023-01-01' AND col <= '2023-12-31')"},
|
||||
{"extract_key_value_pairs_max_pairs_per_row", 0, 0, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory."},
|
||||
{"default_view_definer", "CURRENT_USER", "CURRENT_USER", "Allows to set default `DEFINER` option while creating a view"},
|
||||
{"default_materialized_view_sql_security", "DEFINER", "DEFINER", "Allows to set a default value for SQL SECURITY option when creating a materialized view"},
|
||||
{"default_normal_view_sql_security", "INVOKER", "INVOKER", "Allows to set default `SQL SECURITY` option while creating a normal view"},
|
||||
{"mysql_map_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."},
|
||||
{"mysql_map_fixed_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."},
|
||||
}},
|
||||
{"24.1", {{"print_pretty_type_names", false, true, "Better user experience."},
|
||||
{"input_format_json_read_bools_as_strings", false, true, "Allow to read bools as strings in JSON formats by default"},
|
||||
{"output_format_arrow_use_signed_indexes_for_dictionary", false, true, "Use signed indexes type for Arrow dictionaries by default as it's recommended"},
|
||||
{"allow_experimental_variant_type", false, false, "Add new experimental Variant type"},
|
||||
{"use_variant_as_common_type", false, false, "Allow to use Variant in if/multiIf if there is no common type"},
|
||||
{"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"},
|
||||
{"parallel_replicas_mark_segment_size", 128, 128, "Add new setting to control segment size in new parallel replicas coordinator implementation"},
|
||||
{"ignore_materialized_views_with_dropped_target_table", false, false, "Add new setting to allow to ignore materialized views with dropped target table"},
|
||||
{"output_format_compression_level", 3, 3, "Allow to change compression level in the query output"},
|
||||
{"output_format_compression_zstd_window_log", 0, 0, "Allow to change zstd window log in the query output when zstd compression is used"},
|
||||
{"enable_zstd_qat_codec", false, false, "Add new ZSTD_QAT codec"},
|
||||
{"enable_vertical_final", false, true, "Use vertical final by default"},
|
||||
{"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"},
|
||||
{"max_rows_in_set_to_optimize_join", 100000, 0, "Disable join optimization as it prevents from read in order optimization"},
|
||||
{"output_format_pretty_color", true, "auto", "Setting is changed to allow also for auto value, disabling ANSI escapes if output is not a tty"},
|
||||
{"function_visible_width_behavior", 0, 1, "We changed the default behavior of `visibleWidth` to be more precise"},
|
||||
{"max_estimated_execution_time", 0, 0, "Separate max_execution_time and max_estimated_execution_time"},
|
||||
{"iceberg_engine_ignore_schema_evolution", false, false, "Allow to ignore schema evolution in Iceberg table engine"},
|
||||
{"optimize_injective_functions_in_group_by", false, true, "Replace injective functions by it's arguments in GROUP BY section in analyzer"},
|
||||
{"update_insert_deduplication_token_in_dependent_materialized_views", false, false, "Allow to update insert deduplication token with table identifier during insert in dependent materialized views"},
|
||||
{"azure_max_unexpected_write_error_retries", 4, 4, "The maximum number of retries in case of unexpected errors during Azure blob storage write"},
|
||||
{"split_parts_ranges_into_intersecting_and_non_intersecting_final", false, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"},
|
||||
{"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"}}},
|
||||
{"23.12", {{"allow_suspicious_ttl_expressions", true, false, "It is a new setting, and in previous versions the behavior was equivalent to allowing."},
|
||||
{"input_format_parquet_allow_missing_columns", false, true, "Allow missing columns in Parquet files by default"},
|
||||
{"input_format_orc_allow_missing_columns", false, true, "Allow missing columns in ORC files by default"},
|
||||
{"input_format_arrow_allow_missing_columns", false, true, "Allow missing columns in Arrow files by default"}}},
|
||||
{"23.11", {{"parsedatetime_parse_without_leading_zeros", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}},
|
||||
{"23.9", {{"optimize_group_by_constant_keys", false, true, "Optimize group by constant keys by default"},
|
||||
{"input_format_json_try_infer_named_tuples_from_objects", false, true, "Try to infer named Tuples from JSON objects by default"},
|
||||
{"input_format_json_read_numbers_as_strings", false, true, "Allow to read numbers as strings in JSON formats by default"},
|
||||
{"input_format_json_read_arrays_as_strings", false, true, "Allow to read arrays as strings in JSON formats by default"},
|
||||
{"input_format_json_infer_incomplete_types_as_strings", false, true, "Allow to infer incomplete types as Strings in JSON formats by default"},
|
||||
{"input_format_json_try_infer_numbers_from_strings", true, false, "Don't infer numbers from strings in JSON formats by default to prevent possible parsing errors"},
|
||||
{"http_write_exception_in_output_format", false, true, "Output valid JSON/XML on exception in HTTP streaming."}}},
|
||||
{"23.8", {{"rewrite_count_distinct_if_with_count_distinct_implementation", false, true, "Rewrite countDistinctIf with count_distinct_implementation configuration"}}},
|
||||
{"23.7", {{"function_sleep_max_microseconds_per_block", 0, 3000000, "In previous versions, the maximum sleep time of 3 seconds was applied only for `sleep`, but not for `sleepEachRow` function. In the new version, we introduce this setting. If you set compatibility with the previous versions, we will disable the limit altogether."}}},
|
||||
{"23.6", {{"http_send_timeout", 180, 30, "3 minutes seems crazy long. Note that this is timeout for a single network write call, not for the whole upload operation."},
|
||||
{"http_receive_timeout", 180, 30, "See http_send_timeout."}}},
|
||||
{"23.5", {{"input_format_parquet_preserve_order", true, false, "Allow Parquet reader to reorder rows for better parallelism."},
|
||||
{"parallelize_output_from_storages", false, true, "Allow parallelism when executing queries that read from file/url/s3/etc. This may reorder rows."},
|
||||
{"use_with_fill_by_sorting_prefix", false, true, "Columns preceding WITH FILL columns in ORDER BY clause form sorting prefix. Rows with different values in sorting prefix are filled independently"},
|
||||
{"output_format_parquet_compliant_nested_types", false, true, "Change an internal field name in output Parquet file schema."}}},
|
||||
{"23.4", {{"allow_suspicious_indices", true, false, "If true, index can defined with identical expressions"},
|
||||
{"allow_nonconst_timezone_arguments", true, false, "Allow non-const timezone arguments in certain time-related functions like toTimeZone(), fromUnixTimestamp*(), snowflakeToDateTime*()."},
|
||||
{"connect_timeout_with_failover_ms", 50, 1000, "Increase default connect timeout because of async connect"},
|
||||
{"connect_timeout_with_failover_secure_ms", 100, 1000, "Increase default secure connect timeout because of async connect"},
|
||||
{"hedged_connection_timeout_ms", 100, 50, "Start new connection in hedged requests after 50 ms instead of 100 to correspond with previous connect timeout"},
|
||||
{"formatdatetime_f_prints_single_zero", true, false, "Improved compatibility with MySQL DATE_FORMAT()/STR_TO_DATE()"},
|
||||
{"formatdatetime_parsedatetime_m_is_month_name", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}},
|
||||
{"23.3", {{"output_format_parquet_version", "1.0", "2.latest", "Use latest Parquet format version for output format"},
|
||||
{"input_format_json_ignore_unknown_keys_in_named_tuple", false, true, "Improve parsing JSON objects as named tuples"},
|
||||
{"input_format_native_allow_types_conversion", false, true, "Allow types conversion in Native input forma"},
|
||||
{"output_format_arrow_compression_method", "none", "lz4_frame", "Use lz4 compression in Arrow output format by default"},
|
||||
{"output_format_parquet_compression_method", "snappy", "lz4", "Use lz4 compression in Parquet output format by default"},
|
||||
{"output_format_orc_compression_method", "none", "lz4_frame", "Use lz4 compression in ORC output format by default"},
|
||||
{"async_query_sending_for_remote", false, true, "Create connections and send query async across shards"}}},
|
||||
{"23.2", {{"output_format_parquet_fixed_string_as_fixed_byte_array", false, true, "Use Parquet FIXED_LENGTH_BYTE_ARRAY type for FixedString by default"},
|
||||
{"output_format_arrow_fixed_string_as_fixed_byte_array", false, true, "Use Arrow FIXED_SIZE_BINARY type for FixedString by default"},
|
||||
{"query_plan_remove_redundant_distinct", false, true, "Remove redundant Distinct step in query plan"},
|
||||
{"optimize_duplicate_order_by_and_distinct", true, false, "Remove duplicate ORDER BY and DISTINCT if it's possible"},
|
||||
{"insert_keeper_max_retries", 0, 20, "Enable reconnections to Keeper on INSERT, improve reliability"}}},
|
||||
{"23.1", {{"input_format_json_read_objects_as_strings", 0, 1, "Enable reading nested json objects as strings while object type is experimental"},
|
||||
{"input_format_json_defaults_for_missing_elements_in_named_tuple", false, true, "Allow missing elements in JSON objects while reading named tuples by default"},
|
||||
{"input_format_csv_detect_header", false, true, "Detect header in CSV format by default"},
|
||||
{"input_format_tsv_detect_header", false, true, "Detect header in TSV format by default"},
|
||||
{"input_format_custom_detect_header", false, true, "Detect header in CustomSeparated format by default"},
|
||||
{"query_plan_remove_redundant_sorting", false, true, "Remove redundant sorting in query plan. For example, sorting steps related to ORDER BY clauses in subqueries"}}},
|
||||
{"22.12", {{"max_size_to_preallocate_for_aggregation", 10'000'000, 100'000'000, "This optimizes performance"},
|
||||
{"query_plan_aggregation_in_order", 0, 1, "Enable some refactoring around query plan"},
|
||||
{"format_binary_max_string_size", 0, 1_GiB, "Prevent allocating large amount of memory"}}},
|
||||
{"22.11", {{"use_structure_from_insertion_table_in_table_functions", 0, 2, "Improve using structure from insertion table in table functions"}}},
|
||||
{"22.9", {{"force_grouping_standard_compatibility", false, true, "Make GROUPING function output the same as in SQL standard and other DBMS"}}},
|
||||
{"22.7", {{"cross_to_inner_join_rewrite", 1, 2, "Force rewrite comma join to inner"},
|
||||
{"enable_positional_arguments", false, true, "Enable positional arguments feature by default"},
|
||||
{"format_csv_allow_single_quotes", true, false, "Most tools don't treat single quote in CSV specially, don't do it by default too"}}},
|
||||
{"22.6", {{"output_format_json_named_tuples_as_objects", false, true, "Allow to serialize named tuples as JSON objects in JSON formats by default"},
|
||||
{"input_format_skip_unknown_fields", false, true, "Optimize reading subset of columns for some input formats"}}},
|
||||
{"22.5", {{"memory_overcommit_ratio_denominator", 0, 1073741824, "Enable memory overcommit feature by default"},
|
||||
{"memory_overcommit_ratio_denominator_for_user", 0, 1073741824, "Enable memory overcommit feature by default"}}},
|
||||
{"22.4", {{"allow_settings_after_format_in_insert", true, false, "Do not allow SETTINGS after FORMAT for INSERT queries because ClickHouse interpret SETTINGS as some values, which is misleading"}}},
|
||||
{"22.3", {{"cast_ipv4_ipv6_default_on_conversion_error", true, false, "Make functions cast(value, 'IPv4') and cast(value, 'IPv6') behave same as toIPv4 and toIPv6 functions"}}},
|
||||
{"21.12", {{"stream_like_engine_allow_direct_select", true, false, "Do not allow direct select for Kafka/RabbitMQ/FileLog by default"}}},
|
||||
{"21.9", {{"output_format_decimal_trailing_zeros", true, false, "Do not output trailing zeros in text representation of Decimal types by default for better looking output"},
|
||||
{"use_hedged_requests", false, true, "Enable Hedged Requests feature by default"}}},
|
||||
{"21.7", {{"legacy_column_name_of_tuple_literal", true, false, "Add this setting only for compatibility reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher"}}},
|
||||
{"21.5", {{"async_socket_for_remote", false, true, "Fix all problems and turn on asynchronous reads from socket for remote queries by default again"}}},
|
||||
{"21.3", {{"async_socket_for_remote", true, false, "Turn off asynchronous reads from socket for remote queries because of some problems"},
|
||||
{"optimize_normalize_count_variants", false, true, "Rewrite aggregate functions that semantically equals to count() as count() by default"},
|
||||
{"normalize_function_names", false, true, "Normalize function names to their canonical names, this was needed for projection query routing"}}},
|
||||
{"21.2", {{"enable_global_with_statement", false, true, "Propagate WITH statements to UNION queries and all subqueries by default"}}},
|
||||
{"21.1", {{"insert_quorum_parallel", false, true, "Use parallel quorum inserts by default. It is significantly more convenient to use than sequential quorum inserts"},
|
||||
{"input_format_null_as_default", false, true, "Allow to insert NULL as default for input formats by default"},
|
||||
{"optimize_on_insert", false, true, "Enable data optimization on INSERT by default for better user experience"},
|
||||
{"use_compact_format_in_distributed_parts_names", false, true, "Use compact format for async INSERT into Distributed tables by default"}}},
|
||||
{"20.10", {{"format_regexp_escaping_rule", "Escaped", "Raw", "Use Raw as default escaping rule for Regexp format to male the behaviour more like to what users expect"}}},
|
||||
{"20.7", {{"show_table_uuid_in_table_create_query_if_not_nil", true, false, "Stop showing UID of the table in its CREATE query for Engine=Atomic"}}},
|
||||
{"20.5", {{"input_format_with_names_use_header", false, true, "Enable using header with names for formats with WithNames/WithNamesAndTypes suffixes"},
|
||||
{"allow_suspicious_codecs", true, false, "Don't allow to specify meaningless compression codecs"}}},
|
||||
{"20.4", {{"validate_polygons", false, true, "Throw exception if polygon is invalid in function pointInPolygon by default instead of returning possibly wrong results"}}},
|
||||
{"19.18", {{"enable_scalar_subquery_optimization", false, true, "Prevent scalar subqueries from (de)serializing large scalar values and possibly avoid running the same subquery more than once"}}},
|
||||
{"19.14", {{"any_join_distinct_right_table_keys", true, false, "Disable ANY RIGHT and ANY FULL JOINs by default to avoid inconsistency"}}},
|
||||
{"19.12", {{"input_format_defaults_for_omitted_fields", false, true, "Enable calculation of complex default expressions for omitted fields for some input formats, because it should be the expected behaviour"}}},
|
||||
{"19.5", {{"max_partitions_per_insert_block", 0, 100, "Add a limit for the number of partitions in one block"}}},
|
||||
{"18.12.17", {{"enable_optimize_predicate_expression", 0, 1, "Optimize predicates to subqueries by default"}}},
|
||||
};
|
||||
|
||||
|
||||
|
@ -1411,7 +1411,7 @@ FutureSetPtr ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool
|
||||
return {};
|
||||
|
||||
PreparedSets::Hash set_key;
|
||||
if (data.getContext()->getSettingsRef().allow_experimental_analyzer && !identifier)
|
||||
if (data.getContext()->getSettingsRef().enable_analyzer && !identifier)
|
||||
{
|
||||
/// Here we can be only from mutation interpreter. Normal selects with analyzed use other interpreter.
|
||||
/// This is a hacky way to allow reusing cache for prepared sets.
|
||||
|
@ -68,7 +68,7 @@ ASTPtr rewriteSelectQuery(
|
||||
// are written into the query context and will be sent by the query pipeline.
|
||||
select_query.setExpression(ASTSelectQuery::Expression::SETTINGS, {});
|
||||
|
||||
if (!context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (!context->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
if (table_function_ptr)
|
||||
select_query.addTableFunction(table_function_ptr);
|
||||
@ -165,7 +165,7 @@ void SelectStreamFactory::createForShardImpl(
|
||||
auto emplace_remote_stream = [&](bool lazy = false, time_t local_delay = 0)
|
||||
{
|
||||
Block shard_header;
|
||||
if (context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (context->getSettingsRef().enable_analyzer)
|
||||
shard_header = InterpreterSelectQueryAnalyzer::getSampleBlock(query_tree, context, SelectQueryOptions(processed_stage).analyze());
|
||||
else
|
||||
shard_header = header;
|
||||
|
@ -300,7 +300,7 @@ void executeQuery(
|
||||
|
||||
const size_t shards = cluster->getShardCount();
|
||||
|
||||
if (context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (context->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
for (size_t i = 0, s = cluster->getShardsInfo().size(); i < s; ++i)
|
||||
{
|
||||
@ -581,7 +581,7 @@ void executeQueryWithParallelReplicasCustomKey(
|
||||
/// Return directly (with correct header) if no shard to query.
|
||||
if (query_info.getCluster()->getShardsInfo().empty())
|
||||
{
|
||||
if (context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (context->getSettingsRef().enable_analyzer)
|
||||
return;
|
||||
|
||||
Pipe pipe(std::make_shared<NullSource>(header));
|
||||
|
@ -834,7 +834,7 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::getTableProperti
|
||||
|
||||
Block as_select_sample;
|
||||
|
||||
if (getContext()->getSettingsRef().allow_experimental_analyzer)
|
||||
if (getContext()->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
as_select_sample = InterpreterSelectQueryAnalyzer::getSampleBlock(create.select->clone(), getContext());
|
||||
}
|
||||
@ -1327,7 +1327,7 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create)
|
||||
{
|
||||
Block input_block;
|
||||
|
||||
if (getContext()->getSettingsRef().allow_experimental_analyzer)
|
||||
if (getContext()->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
input_block = InterpreterSelectQueryAnalyzer::getSampleBlock(create.select->clone(), getContext());
|
||||
}
|
||||
|
@ -129,7 +129,7 @@ void InterpreterDescribeQuery::fillColumnsFromSubquery(const ASTTableExpression
|
||||
auto select_query = table_expression.subquery->children.at(0);
|
||||
auto current_context = getContext();
|
||||
|
||||
if (settings.allow_experimental_analyzer)
|
||||
if (settings.enable_analyzer)
|
||||
{
|
||||
SelectQueryOptions select_query_options;
|
||||
sample_block = InterpreterSelectQueryAnalyzer(select_query, current_context, select_query_options).getSampleBlock();
|
||||
|
@ -394,9 +394,9 @@ QueryPipeline InterpreterExplainQuery::executeImpl()
|
||||
}
|
||||
case ASTExplainQuery::QueryTree:
|
||||
{
|
||||
if (!getContext()->getSettingsRef().allow_experimental_analyzer)
|
||||
if (!getContext()->getSettingsRef().enable_analyzer)
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED,
|
||||
"EXPLAIN QUERY TREE is only supported with a new analyzer. Set allow_experimental_analyzer = 1.");
|
||||
"EXPLAIN QUERY TREE is only supported with a new analyzer. Set enable_analyzer = 1.");
|
||||
|
||||
if (ast.getExplainedQuery()->as<ASTSelectWithUnionQuery>() == nullptr)
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY, "Only SELECT is supported for EXPLAIN QUERY TREE query");
|
||||
@ -453,7 +453,7 @@ QueryPipeline InterpreterExplainQuery::executeImpl()
|
||||
|
||||
ContextPtr context;
|
||||
|
||||
if (getContext()->getSettingsRef().allow_experimental_analyzer)
|
||||
if (getContext()->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
InterpreterSelectQueryAnalyzer interpreter(ast.getExplainedQuery(), getContext(), options);
|
||||
context = interpreter.getContext();
|
||||
@ -499,7 +499,7 @@ QueryPipeline InterpreterExplainQuery::executeImpl()
|
||||
QueryPlan plan;
|
||||
ContextPtr context;
|
||||
|
||||
if (getContext()->getSettingsRef().allow_experimental_analyzer)
|
||||
if (getContext()->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
InterpreterSelectQueryAnalyzer interpreter(ast.getExplainedQuery(), getContext(), options);
|
||||
context = interpreter.getContext();
|
||||
@ -558,7 +558,7 @@ QueryPipeline InterpreterExplainQuery::executeImpl()
|
||||
QueryPlan plan;
|
||||
ContextPtr context = getContext();
|
||||
|
||||
if (context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (context->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
InterpreterSelectQueryAnalyzer interpreter(ast.getExplainedQuery(), getContext(), SelectQueryOptions());
|
||||
context = interpreter.getContext();
|
||||
|
@ -118,7 +118,7 @@ InterpreterFactory::InterpreterPtr InterpreterFactory::get(ASTPtr & query, Conte
|
||||
|
||||
if (query->as<ASTSelectQuery>())
|
||||
{
|
||||
if (context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (context->getSettingsRef().enable_analyzer)
|
||||
interpreter_name = "InterpreterSelectQueryAnalyzer";
|
||||
/// This is internal part of ASTSelectWithUnionQuery.
|
||||
/// Even if there is SELECT without union, it is represented by ASTSelectWithUnionQuery with single ASTSelectQuery as a child.
|
||||
@ -129,7 +129,7 @@ InterpreterFactory::InterpreterPtr InterpreterFactory::get(ASTPtr & query, Conte
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::SelectQuery);
|
||||
|
||||
if (context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (context->getSettingsRef().enable_analyzer)
|
||||
interpreter_name = "InterpreterSelectQueryAnalyzer";
|
||||
else
|
||||
interpreter_name = "InterpreterSelectWithUnionQuery";
|
||||
@ -222,7 +222,7 @@ InterpreterFactory::InterpreterPtr InterpreterFactory::get(ASTPtr & query, Conte
|
||||
{
|
||||
const auto kind = query->as<ASTExplainQuery>()->getKind();
|
||||
if (kind == ASTExplainQuery::ParsedAST || kind == ASTExplainQuery::AnalyzedSyntax)
|
||||
context->setSetting("allow_experimental_analyzer", false);
|
||||
context->setSetting("enable_analyzer", false);
|
||||
|
||||
interpreter_name = "InterpreterExplainQuery";
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ StoragePtr InterpreterInsertQuery::getTable(ASTInsertQuery & query)
|
||||
Block header_block;
|
||||
auto select_query_options = SelectQueryOptions(QueryProcessingStage::Complete, 1);
|
||||
|
||||
if (current_context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (current_context->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
InterpreterSelectQueryAnalyzer interpreter_select(query.select, current_context, select_query_options);
|
||||
header_block = interpreter_select.getSampleBlock();
|
||||
|
@ -189,7 +189,7 @@ bool isStorageTouchedByMutations(
|
||||
std::optional<InterpreterSelectQuery> interpreter_select_query;
|
||||
BlockIO io;
|
||||
|
||||
if (context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (context->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
auto select_query_tree = prepareQueryAffectedQueryTree(commands, storage.shared_from_this(), context);
|
||||
InterpreterSelectQueryAnalyzer interpreter(select_query_tree, context, SelectQueryOptions().ignoreLimits());
|
||||
@ -415,9 +415,9 @@ MutationsInterpreter::MutationsInterpreter(
|
||||
, logger(getLogger("MutationsInterpreter(" + source.getStorage()->getStorageID().getFullTableName() + ")"))
|
||||
{
|
||||
auto new_context = Context::createCopy(context_);
|
||||
if (new_context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (new_context->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
new_context->setSetting("allow_experimental_analyzer", false);
|
||||
new_context->setSetting("enable_analyzer", false);
|
||||
LOG_DEBUG(logger, "Will use old analyzer to prepare mutation");
|
||||
}
|
||||
context = std::move(new_context);
|
||||
|
@ -685,10 +685,10 @@ void validateAnalyzerSettings(ASTPtr ast, bool context_value)
|
||||
|
||||
if (auto * set_query = node->as<ASTSetQuery>())
|
||||
{
|
||||
if (auto * value = set_query->changes.tryGet("allow_experimental_analyzer"))
|
||||
if (auto * value = set_query->changes.tryGet("enable_analyzer"))
|
||||
{
|
||||
if (top_level != value->safeGet<bool>())
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY, "Setting 'allow_experimental_analyzer' is changed in the subquery. Top level value: {}", top_level);
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY, "Setting 'enable_analyzer' is changed in the subquery. Top level value: {}", top_level);
|
||||
}
|
||||
}
|
||||
|
||||
@ -912,7 +912,7 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
||||
/// Interpret SETTINGS clauses as early as possible (before invoking the corresponding interpreter),
|
||||
/// to allow settings to take effect.
|
||||
InterpreterSetQuery::applySettingsFromQuery(ast, context);
|
||||
validateAnalyzerSettings(ast, context->getSettingsRef().allow_experimental_analyzer);
|
||||
validateAnalyzerSettings(ast, context->getSettingsRef().enable_analyzer);
|
||||
|
||||
if (auto * insert_query = ast->as<ASTInsertQuery>())
|
||||
insert_query->tail = istr;
|
||||
|
@ -141,7 +141,7 @@ Block getHeaderForProcessingStage(
|
||||
|
||||
Block result;
|
||||
|
||||
if (context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (context->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
auto storage = std::make_shared<StorageDummy>(storage_snapshot->storage.getStorageID(),
|
||||
storage_snapshot->getAllColumnsDescription(),
|
||||
|
@ -65,7 +65,7 @@ std::unique_ptr<QueryPlan> createLocalPlan(
|
||||
.setShardInfo(static_cast<UInt32>(shard_num), static_cast<UInt32>(shard_count))
|
||||
.ignoreASTOptimizations();
|
||||
|
||||
if (context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (context->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
/// For Analyzer, identifier in GROUP BY/ORDER BY/LIMIT BY lists has been resolved to
|
||||
/// ConstantNode in QueryTree if it is an alias of a constant, so we should not replace
|
||||
|
@ -319,7 +319,7 @@ std::optional<Chain> generateViewChain(
|
||||
Block header;
|
||||
|
||||
/// Get list of columns we get from select query.
|
||||
if (select_context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (select_context->getSettingsRef().enable_analyzer)
|
||||
header = InterpreterSelectQueryAnalyzer::getSampleBlock(query, select_context);
|
||||
else
|
||||
header = InterpreterSelectQuery(query, select_context, SelectQueryOptions()).getSampleBlock();
|
||||
@ -613,7 +613,7 @@ static QueryPipeline process(Block block, ViewRuntimeData & view, const ViewsDat
|
||||
|
||||
QueryPipelineBuilder pipeline;
|
||||
|
||||
if (local_context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (local_context->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
InterpreterSelectQueryAnalyzer interpreter(view.query, local_context, local_context->getViewSource(), SelectQueryOptions().ignoreAccessCheck());
|
||||
pipeline = interpreter.buildQueryPipeline();
|
||||
|
@ -1904,14 +1904,14 @@ void TCPHandler::receiveQuery()
|
||||
/// Settings
|
||||
///
|
||||
|
||||
/// FIXME: Remove when allow_experimental_analyzer will become obsolete.
|
||||
/// FIXME: Remove when enable_analyzer will become obsolete.
|
||||
/// Analyzer became Beta in 24.3 and started to be enabled by default.
|
||||
/// We have to disable it for ourselves to make sure we don't have different settings on
|
||||
/// different servers.
|
||||
if (query_kind == ClientInfo::QueryKind::SECONDARY_QUERY
|
||||
&& client_info.getVersionNumber() < VersionNumber(23, 3, 0)
|
||||
&& !passed_settings.allow_experimental_analyzer.changed)
|
||||
passed_settings.set("allow_experimental_analyzer", false);
|
||||
&& !passed_settings.enable_analyzer.changed)
|
||||
passed_settings.set("enable_analyzer", false);
|
||||
|
||||
auto settings_changes = passed_settings.changes();
|
||||
query_kind = query_context->getClientInfo().query_kind;
|
||||
|
@ -806,7 +806,7 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata, ContextPtr context)
|
||||
metadata.select = SelectQueryDescription::getSelectQueryFromASTForMatView(select, metadata.refresh != nullptr, context);
|
||||
Block as_select_sample;
|
||||
|
||||
if (context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (context->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
as_select_sample = InterpreterSelectQueryAnalyzer::getSampleBlock(select->clone(), context);
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ void IStorageCluster::read(
|
||||
Block sample_block;
|
||||
ASTPtr query_to_send = query_info.query;
|
||||
|
||||
if (context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (context->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
sample_block = InterpreterSelectQueryAnalyzer::getSampleBlock(query_info.query, context, SelectQueryOptions(processed_stage));
|
||||
}
|
||||
|
@ -378,7 +378,7 @@ void StorageLiveView::writeBlock(StorageLiveView & live_view, Block && block, Ch
|
||||
|
||||
QueryPipelineBuilder builder;
|
||||
|
||||
if (local_context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (local_context->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
auto select_description = buildSelectQueryTreeDescription(select_query_description.inner_query, local_context);
|
||||
if (select_description.dependent_table_node)
|
||||
@ -475,7 +475,7 @@ Block StorageLiveView::getHeader() const
|
||||
|
||||
if (!sample_block)
|
||||
{
|
||||
if (live_view_context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (live_view_context->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
sample_block = InterpreterSelectQueryAnalyzer::getSampleBlock(select_query_description.select_query,
|
||||
live_view_context,
|
||||
@ -519,7 +519,7 @@ ASTPtr StorageLiveView::getInnerBlocksQuery()
|
||||
auto & select_with_union_query = select_query_description.select_query->as<ASTSelectWithUnionQuery &>();
|
||||
auto blocks_query = select_with_union_query.list_of_selects->children.at(0)->clone();
|
||||
|
||||
if (!live_view_context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (!live_view_context->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
/// Rewrite inner query with right aliases for JOIN.
|
||||
/// It cannot be done in constructor or startup() because InterpreterSelectQuery may access table,
|
||||
@ -543,7 +543,7 @@ MergeableBlocksPtr StorageLiveView::collectMergeableBlocks(ContextPtr local_cont
|
||||
|
||||
QueryPipelineBuilder builder;
|
||||
|
||||
if (local_context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (local_context->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
InterpreterSelectQueryAnalyzer interpreter(select_query_description.inner_query,
|
||||
local_context,
|
||||
@ -599,7 +599,7 @@ QueryPipelineBuilder StorageLiveView::completeQuery(Pipes pipes)
|
||||
|
||||
QueryPipelineBuilder builder;
|
||||
|
||||
if (block_context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (block_context->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
auto select_description = buildSelectQueryTreeDescription(select_query_description.select_query, block_context);
|
||||
|
||||
|
@ -7097,7 +7097,7 @@ QueryProcessingStage::Enum MergeTreeData::getQueryProcessingStage(
|
||||
SelectQueryInfo &) const
|
||||
{
|
||||
/// with new analyzer, Planner make decision regarding parallel replicas usage, and so about processing stage on reading
|
||||
if (!query_context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (!query_context->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
const auto & settings = query_context->getSettingsRef();
|
||||
if (query_context->canUseParallelReplicasCustomKey())
|
||||
|
@ -33,7 +33,7 @@ namespace ErrorCodes
|
||||
namespace
|
||||
{
|
||||
|
||||
void appendColumnNameWithoutAlias(const ActionsDAG::Node & node, WriteBuffer & out, bool allow_experimental_analyzer, bool legacy = false)
|
||||
void appendColumnNameWithoutAlias(const ActionsDAG::Node & node, WriteBuffer & out, bool enable_analyzer, bool legacy = false)
|
||||
{
|
||||
switch (node.type)
|
||||
{
|
||||
@ -45,18 +45,18 @@ void appendColumnNameWithoutAlias(const ActionsDAG::Node & node, WriteBuffer & o
|
||||
/// If it was created from ASTLiteral, then result_name can be an alias.
|
||||
/// We need to convert value back to string here.
|
||||
const auto * column_const = typeid_cast<const ColumnConst *>(node.column.get());
|
||||
if (column_const && !allow_experimental_analyzer)
|
||||
if (column_const && !enable_analyzer)
|
||||
writeString(applyVisitor(FieldVisitorToString(), column_const->getField()), out);
|
||||
else
|
||||
writeString(node.result_name, out);
|
||||
break;
|
||||
}
|
||||
case ActionsDAG::ActionType::ALIAS:
|
||||
appendColumnNameWithoutAlias(*node.children.front(), out, allow_experimental_analyzer, legacy);
|
||||
appendColumnNameWithoutAlias(*node.children.front(), out, enable_analyzer, legacy);
|
||||
break;
|
||||
case ActionsDAG::ActionType::ARRAY_JOIN:
|
||||
writeCString("arrayJoin(", out);
|
||||
appendColumnNameWithoutAlias(*node.children.front(), out, allow_experimental_analyzer, legacy);
|
||||
appendColumnNameWithoutAlias(*node.children.front(), out, enable_analyzer, legacy);
|
||||
writeChar(')', out);
|
||||
break;
|
||||
case ActionsDAG::ActionType::FUNCTION:
|
||||
@ -75,17 +75,17 @@ void appendColumnNameWithoutAlias(const ActionsDAG::Node & node, WriteBuffer & o
|
||||
writeCString(", ", out);
|
||||
first = false;
|
||||
|
||||
appendColumnNameWithoutAlias(*arg, out, allow_experimental_analyzer, legacy);
|
||||
appendColumnNameWithoutAlias(*arg, out, enable_analyzer, legacy);
|
||||
}
|
||||
writeChar(')', out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
String getColumnNameWithoutAlias(const ActionsDAG::Node & node, bool allow_experimental_analyzer, bool legacy = false)
|
||||
String getColumnNameWithoutAlias(const ActionsDAG::Node & node, bool enable_analyzer, bool legacy = false)
|
||||
{
|
||||
WriteBufferFromOwnString out;
|
||||
appendColumnNameWithoutAlias(node, out, allow_experimental_analyzer, legacy);
|
||||
appendColumnNameWithoutAlias(node, out, enable_analyzer, legacy);
|
||||
|
||||
return std::move(out.str());
|
||||
}
|
||||
@ -131,7 +131,7 @@ std::string RPNBuilderTreeNode::getColumnName() const
|
||||
if (ast_node)
|
||||
return ast_node->getColumnNameWithoutAlias();
|
||||
else
|
||||
return getColumnNameWithoutAlias(*dag_node, getTreeContext().getSettings().allow_experimental_analyzer);
|
||||
return getColumnNameWithoutAlias(*dag_node, getTreeContext().getSettings().enable_analyzer);
|
||||
}
|
||||
|
||||
std::string RPNBuilderTreeNode::getColumnNameWithModuloLegacy() const
|
||||
@ -144,7 +144,7 @@ std::string RPNBuilderTreeNode::getColumnNameWithModuloLegacy() const
|
||||
}
|
||||
else
|
||||
{
|
||||
return getColumnNameWithoutAlias(*dag_node, getTreeContext().getSettings().allow_experimental_analyzer, true /*legacy*/);
|
||||
return getColumnNameWithoutAlias(*dag_node, getTreeContext().getSettings().enable_analyzer, true /*legacy*/);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -397,7 +397,7 @@ void StorageBuffer::read(
|
||||
/// TODO: Find a way to support projections for StorageBuffer
|
||||
if (processed_stage > QueryProcessingStage::FetchColumns)
|
||||
{
|
||||
if (local_context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (local_context->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
auto storage = std::make_shared<StorageValues>(
|
||||
getStorageID(),
|
||||
|
@ -833,7 +833,7 @@ void StorageDistributed::read(
|
||||
|
||||
const auto & settings = local_context->getSettingsRef();
|
||||
|
||||
if (settings.allow_experimental_analyzer)
|
||||
if (settings.enable_analyzer)
|
||||
{
|
||||
StorageID remote_storage_id = StorageID::createEmpty();
|
||||
if (!remote_table_function_ptr)
|
||||
@ -1057,7 +1057,7 @@ static std::optional<ActionsDAG> getFilterFromQuery(const ASTPtr & ast, ContextP
|
||||
QueryPlan plan;
|
||||
SelectQueryOptions options;
|
||||
options.only_analyze = true;
|
||||
if (context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (context->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
InterpreterSelectQueryAnalyzer interpreter(ast, context, options);
|
||||
plan = std::move(interpreter).extractQueryPlan();
|
||||
@ -1611,7 +1611,7 @@ ClusterPtr StorageDistributed::skipUnusedShards(
|
||||
const StorageSnapshotPtr & storage_snapshot,
|
||||
ContextPtr local_context) const
|
||||
{
|
||||
if (local_context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (local_context->getSettingsRef().enable_analyzer)
|
||||
return skipUnusedShardsWithAnalyzer(cluster, query_info, storage_snapshot, local_context);
|
||||
|
||||
const auto & select = query_info.query->as<ASTSelectQuery &>();
|
||||
|
@ -150,7 +150,7 @@ void StorageExecutable::read(
|
||||
for (auto & input_query : input_queries)
|
||||
{
|
||||
QueryPipelineBuilder builder;
|
||||
if (context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (context->getSettingsRef().enable_analyzer)
|
||||
builder = InterpreterSelectQueryAnalyzer(input_query, context, {}).buildQueryPipeline();
|
||||
else
|
||||
builder = InterpreterSelectWithUnionQuery(input_query, context, {}).buildQueryPipeline();
|
||||
|
@ -590,7 +590,7 @@ std::vector<ReadFromMerge::ChildPlan> ReadFromMerge::createChildrenPlans(SelectQ
|
||||
auto modified_query_info
|
||||
= getModifiedQueryInfo(modified_context, table, nested_storage_snaphsot, real_column_names, column_names_as_aliases, aliases);
|
||||
|
||||
if (!context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (!context->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
auto storage_columns = storage_metadata_snapshot->getColumns();
|
||||
auto syntax_result = TreeRewriter(context).analyzeSelect(
|
||||
@ -1047,13 +1047,13 @@ void ReadFromMerge::addVirtualColumns(
|
||||
const StorageWithLockAndName & storage_with_lock) const
|
||||
{
|
||||
const auto & [database_name, _, storage, table_name] = storage_with_lock;
|
||||
bool allow_experimental_analyzer = context->getSettingsRef().allow_experimental_analyzer;
|
||||
bool enable_analyzer = context->getSettingsRef().enable_analyzer;
|
||||
|
||||
/// Add virtual columns if we don't already have them.
|
||||
|
||||
Block plan_header = child.plan.getCurrentDataStream().header;
|
||||
|
||||
if (allow_experimental_analyzer)
|
||||
if (enable_analyzer)
|
||||
{
|
||||
String table_alias = modified_query_info.query_tree->as<QueryNode>()->getJoinTree()->as<TableNode>()->getAlias();
|
||||
|
||||
@ -1133,8 +1133,8 @@ QueryPipelineBuilderPtr ReadFromMerge::buildPipeline(
|
||||
if (!builder->initialized())
|
||||
return builder;
|
||||
|
||||
bool allow_experimental_analyzer = context->getSettingsRef().allow_experimental_analyzer;
|
||||
if (processed_stage > child.stage || (allow_experimental_analyzer && processed_stage != QueryProcessingStage::FetchColumns))
|
||||
bool enable_analyzer = context->getSettingsRef().enable_analyzer;
|
||||
if (processed_stage > child.stage || (enable_analyzer && processed_stage != QueryProcessingStage::FetchColumns))
|
||||
{
|
||||
/** Materialization is needed, since from distributed storage the constants come materialized.
|
||||
* If you do not do this, different types (Const and non-Const) columns will be produced in different threads,
|
||||
@ -1168,7 +1168,7 @@ ReadFromMerge::ChildPlan ReadFromMerge::createPlanForTable(
|
||||
modified_select.setFinal();
|
||||
}
|
||||
|
||||
bool allow_experimental_analyzer = modified_context->getSettingsRef().allow_experimental_analyzer;
|
||||
bool enable_analyzer = modified_context->getSettingsRef().enable_analyzer;
|
||||
|
||||
auto storage_stage = storage->getQueryProcessingStage(modified_context,
|
||||
processed_stage,
|
||||
@ -1201,13 +1201,13 @@ ReadFromMerge::ChildPlan ReadFromMerge::createPlanForTable(
|
||||
row_policy_data_opt->addStorageFilter(source_step_with_filter);
|
||||
}
|
||||
}
|
||||
else if (processed_stage > storage_stage || allow_experimental_analyzer)
|
||||
else if (processed_stage > storage_stage || enable_analyzer)
|
||||
{
|
||||
/// Maximum permissible parallelism is streams_num
|
||||
modified_context->setSetting("max_threads", streams_num);
|
||||
modified_context->setSetting("max_streams_to_max_threads_ratio", 1);
|
||||
|
||||
if (allow_experimental_analyzer)
|
||||
if (enable_analyzer)
|
||||
{
|
||||
/// Converting query to AST because types might be different in the source table.
|
||||
/// Need to resolve types again.
|
||||
@ -1479,7 +1479,7 @@ void ReadFromMerge::convertAndFilterSourceStream(
|
||||
auto storage_sample_block = snapshot->metadata->getSampleBlock();
|
||||
auto pipe_columns = before_block_header.getNamesAndTypesList();
|
||||
|
||||
if (local_context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (local_context->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
for (const auto & alias : aliases)
|
||||
{
|
||||
@ -1522,7 +1522,7 @@ void ReadFromMerge::convertAndFilterSourceStream(
|
||||
|
||||
ActionsDAG::MatchColumnsMode convert_actions_match_columns_mode = ActionsDAG::MatchColumnsMode::Name;
|
||||
|
||||
if (local_context->getSettingsRef().allow_experimental_analyzer
|
||||
if (local_context->getSettingsRef().enable_analyzer
|
||||
&& (child.stage != QueryProcessingStage::FetchColumns || dynamic_cast<const StorageDistributed *>(&snapshot->storage) != nullptr))
|
||||
convert_actions_match_columns_mode = ActionsDAG::MatchColumnsMode::Position;
|
||||
|
||||
|
@ -208,7 +208,7 @@ void StorageMergeTree::read(
|
||||
const auto & settings = local_context->getSettingsRef();
|
||||
/// reading step for parallel replicas with new analyzer is built in Planner, so don't do it here
|
||||
if (local_context->canUseParallelReplicasOnInitiator() && settings.parallel_replicas_for_non_replicated_merge_tree
|
||||
&& !settings.allow_experimental_analyzer)
|
||||
&& !settings.enable_analyzer)
|
||||
{
|
||||
ClusterProxy::executeQueryWithParallelReplicas(
|
||||
query_plan, getStorageID(), processed_stage, query_info.query, local_context, query_info.storage_limits);
|
||||
@ -216,7 +216,7 @@ void StorageMergeTree::read(
|
||||
}
|
||||
|
||||
if (local_context->canUseParallelReplicasCustomKey() && settings.parallel_replicas_for_non_replicated_merge_tree
|
||||
&& !settings.allow_experimental_analyzer && local_context->getClientInfo().distributed_depth == 0)
|
||||
&& !settings.enable_analyzer && local_context->getClientInfo().distributed_depth == 0)
|
||||
{
|
||||
if (auto cluster = local_context->getClusterForParallelReplicas();
|
||||
local_context->canUseParallelReplicasCustomKeyForCluster(*cluster))
|
||||
@ -244,7 +244,7 @@ void StorageMergeTree::read(
|
||||
|
||||
const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower()
|
||||
&& local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree
|
||||
&& (!local_context->getSettingsRef().allow_experimental_analyzer || query_info.current_table_chosen_for_reading_with_parallel_replicas);
|
||||
&& (!local_context->getSettingsRef().enable_analyzer || query_info.current_table_chosen_for_reading_with_parallel_replicas);
|
||||
|
||||
if (auto plan = reader.read(
|
||||
column_names,
|
||||
|
@ -5480,13 +5480,13 @@ void StorageReplicatedMergeTree::read(
|
||||
return;
|
||||
}
|
||||
/// reading step for parallel replicas with new analyzer is built in Planner, so don't do it here
|
||||
if (local_context->canUseParallelReplicasOnInitiator() && !settings.allow_experimental_analyzer)
|
||||
if (local_context->canUseParallelReplicasOnInitiator() && !settings.enable_analyzer)
|
||||
{
|
||||
readParallelReplicasImpl(query_plan, column_names, query_info, local_context, processed_stage);
|
||||
return;
|
||||
}
|
||||
|
||||
if (local_context->canUseParallelReplicasCustomKey() && !settings.allow_experimental_analyzer
|
||||
if (local_context->canUseParallelReplicasCustomKey() && !settings.enable_analyzer
|
||||
&& local_context->getClientInfo().distributed_depth == 0)
|
||||
{
|
||||
if (auto cluster = local_context->getClusterForParallelReplicas();
|
||||
@ -5555,7 +5555,7 @@ void StorageReplicatedMergeTree::readLocalImpl(
|
||||
const size_t num_streams)
|
||||
{
|
||||
const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower()
|
||||
&& (!local_context->getSettingsRef().allow_experimental_analyzer
|
||||
&& (!local_context->getSettingsRef().enable_analyzer
|
||||
|| query_info.current_table_chosen_for_reading_with_parallel_replicas);
|
||||
|
||||
auto plan = reader.read(
|
||||
|
@ -164,7 +164,7 @@ void StorageView::read(
|
||||
|
||||
auto options = SelectQueryOptions(QueryProcessingStage::Complete, 0, false, query_info.settings_limit_offset_done);
|
||||
|
||||
if (context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (context->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
InterpreterSelectQueryAnalyzer interpreter(current_inner_query, getViewContext(context, storage_snapshot), options, column_names);
|
||||
interpreter.addStorageLimits(*query_info.storage_limits);
|
||||
|
@ -172,7 +172,7 @@ static ExpressionAndSets buildExpressionAndSets(ASTPtr & ast, const NamesAndType
|
||||
/// with subqueries it's possible that new analyzer will be enabled in ::read method
|
||||
/// of underlying storage when all other parts of infra are not ready for it
|
||||
/// (built with old analyzer).
|
||||
context_copy->setSetting("allow_experimental_analyzer", false);
|
||||
context_copy->setSetting("enable_analyzer", false);
|
||||
auto syntax_analyzer_result = TreeRewriter(context_copy).analyze(ast, columns);
|
||||
ExpressionAnalyzer analyzer(ast, syntax_analyzer_result, context_copy);
|
||||
auto dag = analyzer.getActionsDAG(false);
|
||||
|
@ -1197,7 +1197,7 @@ StorageWindowView::StorageWindowView(
|
||||
, fire_signal_timeout_s(context_->getSettingsRef().wait_for_window_view_fire_signal_timeout.totalSeconds())
|
||||
, clean_interval_usec(context_->getSettingsRef().window_view_clean_interval.totalMicroseconds())
|
||||
{
|
||||
if (context_->getSettingsRef().allow_experimental_analyzer)
|
||||
if (context_->getSettingsRef().enable_analyzer)
|
||||
disabled_due_to_analyzer = true;
|
||||
|
||||
if (mode <= LoadingStrictnessLevel::CREATE)
|
||||
@ -1753,9 +1753,9 @@ StoragePtr StorageWindowView::getTargetTable() const
|
||||
|
||||
void StorageWindowView::throwIfWindowViewIsDisabled(ContextPtr local_context) const
|
||||
{
|
||||
if (disabled_due_to_analyzer || (local_context && local_context->getSettingsRef().allow_experimental_analyzer))
|
||||
if (disabled_due_to_analyzer || (local_context && local_context->getSettingsRef().enable_analyzer))
|
||||
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Experimental WINDOW VIEW feature is not supported "
|
||||
"in the current infrastructure for query analysis (the setting 'allow_experimental_analyzer')");
|
||||
"in the current infrastructure for query analysis (the setting 'enable_analyzer')");
|
||||
}
|
||||
|
||||
void registerStorageWindowView(StorageFactory & factory)
|
||||
|
@ -50,7 +50,7 @@ ColumnsDescription TableFunctionView::getActualTableStructure(ContextPtr context
|
||||
|
||||
Block sample_block;
|
||||
|
||||
if (context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (context->getSettingsRef().enable_analyzer)
|
||||
sample_block = InterpreterSelectQueryAnalyzer::getSampleBlock(create.children[0], context);
|
||||
else
|
||||
sample_block = InterpreterSelectWithUnionQuery::getSampleBlock(create.children[0], context);
|
||||
|
@ -114,7 +114,7 @@ bool TableFunctionViewIfPermitted::isPermitted(const ContextPtr & context, const
|
||||
|
||||
try
|
||||
{
|
||||
if (context->getSettingsRef().allow_experimental_analyzer)
|
||||
if (context->getSettingsRef().enable_analyzer)
|
||||
{
|
||||
sample_block = InterpreterSelectQueryAnalyzer::getSampleBlock(create.children[0], context);
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
<clickhouse>
|
||||
<profiles>
|
||||
<default>
|
||||
<allow_experimental_analyzer>0</allow_experimental_analyzer>
|
||||
<enable_analyzer>0</enable_analyzer>
|
||||
</default>
|
||||
</profiles>
|
||||
</clickhouse>
|
||||
|
@ -1,7 +1,7 @@
|
||||
<clickhouse>
|
||||
<profiles>
|
||||
<default>
|
||||
<allow_experimental_analyzer>0</allow_experimental_analyzer>
|
||||
<enable_analyzer>0</enable_analyzer>
|
||||
</default>
|
||||
</profiles>
|
||||
</clickhouse>
|
||||
|
@ -4484,7 +4484,7 @@ class ClickHouseInstance:
|
||||
|
||||
use_old_analyzer = os.environ.get("CLICKHOUSE_USE_OLD_ANALYZER") is not None
|
||||
# If specific version was used there can be no
|
||||
# allow_experimental_analyzer setting, so do this only if it was
|
||||
# enable_analyzer setting, so do this only if it was
|
||||
# explicitly requested.
|
||||
if self.tag:
|
||||
use_old_analyzer = False
|
||||
|
@ -51,7 +51,7 @@ def test_two_new_versions(start_cluster):
|
||||
assert (
|
||||
current.query(
|
||||
"""
|
||||
SELECT hostname() AS h, getSetting('allow_experimental_analyzer')
|
||||
SELECT hostname() AS h, getSetting('enable_analyzer')
|
||||
FROM clusterAllReplicas('test_cluster_mixed', system.one)
|
||||
ORDER BY h;"""
|
||||
)
|
||||
@ -62,7 +62,7 @@ ORDER BY h;"""
|
||||
analyzer_enabled = current.query(
|
||||
f"""
|
||||
SELECT
|
||||
DISTINCT Settings['allow_experimental_analyzer']
|
||||
DISTINCT Settings['enable_analyzer']
|
||||
FROM clusterAllReplicas('test_cluster_mixed', system.query_log)
|
||||
WHERE initial_query_id = '{query_id}';"""
|
||||
)
|
||||
@ -81,7 +81,7 @@ WHERE initial_query_id = '{query_id}';"""
|
||||
assert (
|
||||
backward.query(
|
||||
"""
|
||||
SELECT hostname() AS h, getSetting('allow_experimental_analyzer')
|
||||
SELECT hostname() AS h, getSetting('enable_analyzer')
|
||||
FROM clusterAllReplicas('test_cluster_mixed', system.one)
|
||||
ORDER BY h;"""
|
||||
)
|
||||
@ -92,7 +92,7 @@ ORDER BY h;"""
|
||||
analyzer_enabled = backward.query(
|
||||
f"""
|
||||
SELECT
|
||||
DISTINCT Settings['allow_experimental_analyzer']
|
||||
DISTINCT Settings['enable_analyzer']
|
||||
FROM clusterAllReplicas('test_cluster_mixed', system.query_log)
|
||||
WHERE initial_query_id = '{query_id}';"""
|
||||
)
|
||||
|
@ -89,7 +89,7 @@ def test_distributed_type_object(started_cluster):
|
||||
assert (
|
||||
TSV(
|
||||
node1.query(
|
||||
"SELECT id, data.k1, data.k2.k3, data.k2.k4, data.k5 FROM dist_table ORDER BY id SETTINGS allow_experimental_analyzer = 0"
|
||||
"SELECT id, data.k1, data.k2.k3, data.k2.k4, data.k5 FROM dist_table ORDER BY id SETTINGS enable_analyzer = 0"
|
||||
)
|
||||
)
|
||||
== expected
|
||||
|
@ -1,7 +1,7 @@
|
||||
<clickhouse>
|
||||
<profiles>
|
||||
<default>
|
||||
<allow_experimental_analyzer>1</allow_experimental_analyzer>
|
||||
<enable_analyzer>1</enable_analyzer>
|
||||
<allow_experimental_parallel_reading_from_replicas>1</allow_experimental_parallel_reading_from_replicas>
|
||||
<cluster_for_parallel_replicas>default</cluster_for_parallel_replicas>
|
||||
<max_parallel_replicas>100</max_parallel_replicas>
|
||||
|
@ -459,7 +459,7 @@ def test_show_profiles():
|
||||
|
||||
query_possible_response = [
|
||||
"CREATE SETTINGS PROFILE `default`\n",
|
||||
"CREATE SETTINGS PROFILE `default` SETTINGS allow_experimental_analyzer = true\n",
|
||||
"CREATE SETTINGS PROFILE `default` SETTINGS enable_analyzer = true\n",
|
||||
]
|
||||
assert (
|
||||
instance.query("SHOW CREATE SETTINGS PROFILE default")
|
||||
@ -470,7 +470,7 @@ def test_show_profiles():
|
||||
"CREATE SETTINGS PROFILE `default`\n"
|
||||
"CREATE SETTINGS PROFILE `readonly` SETTINGS readonly = 1\n"
|
||||
"CREATE SETTINGS PROFILE `xyz`\n",
|
||||
"CREATE SETTINGS PROFILE `default` SETTINGS allow_experimental_analyzer = true\n"
|
||||
"CREATE SETTINGS PROFILE `default` SETTINGS enable_analyzer = true\n"
|
||||
"CREATE SETTINGS PROFILE `readonly` SETTINGS readonly = 1\n"
|
||||
"CREATE SETTINGS PROFILE `xyz`\n",
|
||||
]
|
||||
@ -482,7 +482,7 @@ def test_show_profiles():
|
||||
"CREATE SETTINGS PROFILE `xyz`\n"
|
||||
)
|
||||
expected_access_analyzer = (
|
||||
"CREATE SETTINGS PROFILE `default` SETTINGS allow_experimental_analyzer = true\n"
|
||||
"CREATE SETTINGS PROFILE `default` SETTINGS enable_analyzer = true\n"
|
||||
"CREATE SETTINGS PROFILE `readonly` SETTINGS readonly = 1\n"
|
||||
"CREATE SETTINGS PROFILE `xyz`\n"
|
||||
)
|
||||
|
@ -15,5 +15,5 @@
|
||||
|
||||
<query>SELECT keys.key, value1 FROM keys ANY LEFT JOIN dict AS d ON (keys.key = d.key) FORMAT Null;</query>
|
||||
<query>SELECT keys.key, value1 FROM keys ANY LEFT JOIN dict AS d ON (keys.key = d.key) FORMAT Null SETTINGS
|
||||
allow_experimental_analyzer=1</query>
|
||||
</test>
|
||||
enable_analyzer=1</query>
|
||||
</test>
|
||||
|
@ -3,6 +3,6 @@
|
||||
<query>select uniq(number) from (select number from numbers(1000000) group by number)</query>
|
||||
|
||||
<!--For new analyzer-->
|
||||
<query>select uniq(number) from (select DISTINCT number from numbers(1000000)) SETTINGS allow_experimental_analyzer=1</query>
|
||||
<query>select uniq(number) from (select number from numbers(1000000) group by number) SETTINGS allow_experimental_analyzer=1</query>
|
||||
<query>select uniq(number) from (select DISTINCT number from numbers(1000000)) SETTINGS enable_analyzer=1</query>
|
||||
<query>select uniq(number) from (select number from numbers(1000000) group by number) SETTINGS enable_analyzer=1</query>
|
||||
</test>
|
||||
|
@ -28,8 +28,8 @@ RENAME TABLE set2 TO set;
|
||||
SELECT arrayJoin(['Hello', 'test', 'World', 'world', 'abc', 'xyz']) AS s WHERE s IN set;
|
||||
|
||||
create table tab (x String) engine = MergeTree order by x as select 'Hello';
|
||||
SELECT * FROM tab PREWHERE x IN (set) WHERE x IN (set) LIMIT 1 settings allow_experimental_analyzer=0;
|
||||
SELECT * FROM tab PREWHERE x IN (set) WHERE x IN (set) LIMIT 1 settings allow_experimental_analyzer=1;
|
||||
SELECT * FROM tab PREWHERE x IN (set) WHERE x IN (set) LIMIT 1 settings enable_analyzer=0;
|
||||
SELECT * FROM tab PREWHERE x IN (set) WHERE x IN (set) LIMIT 1 settings enable_analyzer=1;
|
||||
DROP TABLE tab;
|
||||
|
||||
DROP TABLE set;
|
||||
|
@ -3,5 +3,6 @@ SELECT x, y FROM (SELECT number AS x FROM system.numbers LIMIT 3) js1 CROSS JOIN
|
||||
SET join_algorithm = 'auto';
|
||||
SELECT x, y FROM (SELECT number AS x FROM system.numbers LIMIT 3) js1 CROSS JOIN (SELECT number AS y FROM system.numbers LIMIT 5) js2;
|
||||
|
||||
SET allow_experimental_analyzer = 1;
|
||||
-- Just to test that we preserved old setting name this we use `enable_analyzer` instead of `enable_analyzer` here.
|
||||
SET enable_analyzer = 1;
|
||||
SELECT x, y FROM (SELECT number AS x FROM system.numbers LIMIT 3) js1 CROSS JOIN (SELECT number AS y FROM system.numbers LIMIT 5) js2;
|
||||
|
@ -1,6 +1,6 @@
|
||||
-- Tags: shard
|
||||
|
||||
set allow_experimental_analyzer = 1;
|
||||
set enable_analyzer = 1;
|
||||
set enable_positional_arguments = 0;
|
||||
|
||||
select 40 as z from (select * from system.numbers limit 3) group by z;
|
||||
|
@ -4,10 +4,10 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT 1 AS k, count() GROUP BY k WITH TOTALS SETTINGS allow_experimental_analyzer = 1";
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT 1234567890123 AS k, count() GROUP BY k WITH TOTALS SETTINGS allow_experimental_analyzer = 1 FORMAT JSON";
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT toFloat32(1.23) AS k, count() GROUP BY k WITH TOTALS SETTINGS allow_experimental_analyzer = 1 FORMAT JSONCompact";
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT 1 AS k, count() GROUP BY k WITH TOTALS SETTINGS enable_analyzer = 1";
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT 1234567890123 AS k, count() GROUP BY k WITH TOTALS SETTINGS enable_analyzer = 1 FORMAT JSON";
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT toFloat32(1.23) AS k, count() GROUP BY k WITH TOTALS SETTINGS enable_analyzer = 1 FORMAT JSONCompact";
|
||||
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT toDate('2010-01-01') AS k, count() GROUP BY k WITH TOTALS SETTINGS allow_experimental_analyzer = 1";
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT toDateTime('2010-01-01 01:02:03', 'UTC') AS k, count() GROUP BY k WITH TOTALS SETTINGS allow_experimental_analyzer = 1 FORMAT JSON";
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT 1.1 AS k, count() GROUP BY k WITH TOTALS SETTINGS allow_experimental_analyzer = 1 FORMAT JSONCompact";
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT toDate('2010-01-01') AS k, count() GROUP BY k WITH TOTALS SETTINGS enable_analyzer = 1";
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT toDateTime('2010-01-01 01:02:03', 'UTC') AS k, count() GROUP BY k WITH TOTALS SETTINGS enable_analyzer = 1 FORMAT JSON";
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT 1.1 AS k, count() GROUP BY k WITH TOTALS SETTINGS enable_analyzer = 1 FORMAT JSONCompact";
|
||||
|
@ -1,8 +1,8 @@
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
-- https://github.com/ClickHouse/ClickHouse/issues/45804
|
||||
|
||||
CREATE TABLE myRMT(
|
||||
CREATE TABLE myRMT(
|
||||
key Int64,
|
||||
someCol String,
|
||||
ver DateTime
|
||||
|
@ -1,6 +1,6 @@
|
||||
SET any_join_distinct_right_table_keys = 1;
|
||||
SET joined_subquery_requires_alias = 0;
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
select x, y from (select 1 as x, 2 as y, x, y);
|
||||
select x, y from (select 1 as x, 1 as y, x, y);
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
SET output_format_write_statistics = 0;
|
||||
SET extremes = 1;
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
SET output_format_json_quote_64bit_integers = 1;
|
||||
SELECT toInt64(0) as i0, toUInt64(0) as u0, toInt64(9223372036854775807) as ip, toInt64(-9223372036854775808) as in, toUInt64(18446744073709551615) as up, [toInt64(0)] as arr, (toUInt64(0), toUInt64(0)) as tuple GROUP BY i0, u0, ip, in, up, arr, tuple WITH TOTALS FORMAT JSON;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
SET join_use_nulls = 0;
|
||||
SET any_join_distinct_right_table_keys = 1;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
with pow(2,2) as four select pow(four, 2), 2 as two, pow(two, 2);
|
||||
select `pow(four, 2)`, `pow(two, 2)` from (with pow(2,2) as four select pow(four, 2), 2 as two, pow(two, 2));
|
||||
|
@ -390,7 +390,7 @@ ANY LEFT JOIN
|
||||
) USING (id)
|
||||
WHERE id = 1
|
||||
2000-01-01 1 test string 1 1 2000-01-01 test string 1 1
|
||||
------- allow_experimental_analyzer=1 -------
|
||||
------- enable_analyzer=1 -------
|
||||
1 2000-01-01 test string 1 1 2000-01-01 test string 1 1
|
||||
SELECT
|
||||
id,
|
||||
@ -454,7 +454,7 @@ FROM
|
||||
)
|
||||
WHERE id = 1
|
||||
2000-01-01 1 test string 1 1
|
||||
------- allow_experimental_analyzer=1 -------
|
||||
------- enable_analyzer=1 -------
|
||||
1 2000-01-01 test string 1 1 2000-01-01 test string 1 1
|
||||
SELECT
|
||||
date,
|
||||
@ -484,7 +484,7 @@ ANY LEFT JOIN
|
||||
) AS b USING (id)
|
||||
WHERE b.id = 1
|
||||
2000-01-01 1 test string 1 1 2000-01-01 test string 1 1
|
||||
------- allow_experimental_analyzer=1 -------
|
||||
------- enable_analyzer=1 -------
|
||||
1 2000-01-01 test string 1 1 2000-01-01 test string 1 1
|
||||
SELECT
|
||||
id,
|
||||
@ -510,7 +510,7 @@ ANY LEFT JOIN
|
||||
) AS b USING (date, id)
|
||||
WHERE b.date = toDate(\'2000-01-01\')
|
||||
1 2000-01-01 test string 1 1
|
||||
------- allow_experimental_analyzer=1 -------
|
||||
------- enable_analyzer=1 -------
|
||||
2000-01-01 1 test string 1 1
|
||||
SELECT
|
||||
date,
|
||||
@ -593,7 +593,7 @@ SEMI LEFT JOIN
|
||||
) AS r USING (id)
|
||||
WHERE r.id = 1
|
||||
2000-01-01 1 test string 1 1 2000-01-01 test string 1 1
|
||||
------- allow_experimental_analyzer=1 -------
|
||||
------- enable_analyzer=1 -------
|
||||
1 2000-01-01 test string 1 1 2000-01-01 test string 1 1
|
||||
SELECT value + t1.value AS expr
|
||||
FROM
|
||||
|
@ -110,9 +110,9 @@ SELECT * FROM (SELECT * FROM test_00597 UNION ALL SELECT * FROM test_00597) WHER
|
||||
|
||||
-- Optimize predicate expression with join query
|
||||
EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id WHERE id = 1;
|
||||
SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id WHERE id = 1 SETTINGS allow_experimental_analyzer=0;
|
||||
SELECT '------- allow_experimental_analyzer=1 -------';
|
||||
SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id WHERE id = 1 SETTINGS allow_experimental_analyzer=1;
|
||||
SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id WHERE id = 1 SETTINGS enable_analyzer=0;
|
||||
SELECT '------- enable_analyzer=1 -------';
|
||||
SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id WHERE id = 1 SETTINGS enable_analyzer=1;
|
||||
|
||||
EXPLAIN SYNTAX SELECT * FROM (SELECT toInt8(1) AS id) ANY LEFT JOIN test_00597 USING id WHERE value = 1;
|
||||
SELECT * FROM (SELECT toInt8(1) AS id) ANY LEFT JOIN test_00597 USING id WHERE value = 1;
|
||||
@ -123,30 +123,30 @@ SELECT b.value FROM (SELECT toInt8(1) AS id) ANY LEFT JOIN test_00597 AS b USING
|
||||
|
||||
-- Optimize predicate expression with join and nested subquery
|
||||
EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id) WHERE id = 1;
|
||||
SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id) WHERE id = 1 SETTINGS allow_experimental_analyzer=0;
|
||||
SELECT '------- allow_experimental_analyzer=1 -------';
|
||||
SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id) WHERE id = 1 SETTINGS allow_experimental_analyzer=1;
|
||||
SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id) WHERE id = 1 SETTINGS enable_analyzer=0;
|
||||
SELECT '------- enable_analyzer=1 -------';
|
||||
SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id) WHERE id = 1 SETTINGS enable_analyzer=1;
|
||||
|
||||
-- Optimize predicate expression with join query and qualified
|
||||
EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING id WHERE b.id = 1;
|
||||
SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING id WHERE b.id = 1 SETTINGS allow_experimental_analyzer=0;
|
||||
SELECT '------- allow_experimental_analyzer=1 -------';
|
||||
SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING id WHERE b.id = 1 SETTINGS allow_experimental_analyzer=1;
|
||||
SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING id WHERE b.id = 1 SETTINGS enable_analyzer=0;
|
||||
SELECT '------- enable_analyzer=1 -------';
|
||||
SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING id WHERE b.id = 1 SETTINGS enable_analyzer=1;
|
||||
|
||||
-- Compatibility test
|
||||
EXPLAIN SYNTAX SELECT * FROM (SELECT toInt8(1) AS id, toDate('2000-01-01') AS date FROM system.numbers LIMIT 1) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING date, id WHERE b.date = toDate('2000-01-01');
|
||||
SELECT * FROM (SELECT toInt8(1) AS id, toDate('2000-01-01') AS date FROM system.numbers LIMIT 1) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING date, id WHERE b.date = toDate('2000-01-01') SETTINGS allow_experimental_analyzer=0;
|
||||
SELECT '------- allow_experimental_analyzer=1 -------';
|
||||
SELECT * FROM (SELECT toInt8(1) AS id, toDate('2000-01-01') AS date FROM system.numbers LIMIT 1) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING date, id WHERE b.date = toDate('2000-01-01') SETTINGS allow_experimental_analyzer=1;
|
||||
SELECT * FROM (SELECT toInt8(1) AS id, toDate('2000-01-01') AS date FROM system.numbers LIMIT 1) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING date, id WHERE b.date = toDate('2000-01-01') SETTINGS enable_analyzer=0;
|
||||
SELECT '------- enable_analyzer=1 -------';
|
||||
SELECT * FROM (SELECT toInt8(1) AS id, toDate('2000-01-01') AS date FROM system.numbers LIMIT 1) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING date, id WHERE b.date = toDate('2000-01-01') SETTINGS enable_analyzer=1;
|
||||
|
||||
EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) AS a ANY LEFT JOIN (SELECT * FROM test_00597) AS b ON a.id = b.id) WHERE id = 1;
|
||||
SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) AS a ANY LEFT JOIN (SELECT * FROM test_00597) AS b ON a.id = b.id) WHERE id = 1;
|
||||
|
||||
-- Explain with join subquery
|
||||
EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM test_00597) ANY INNER JOIN (SELECT * FROM (SELECT * FROM test_00597)) as r USING id WHERE r.id = 1;
|
||||
SELECT * FROM (SELECT * FROM test_00597) ANY INNER JOIN (SELECT * FROM (SELECT * FROM test_00597)) as r USING id WHERE r.id = 1 SETTINGS allow_experimental_analyzer=0;
|
||||
SELECT '------- allow_experimental_analyzer=1 -------';
|
||||
SELECT * FROM (SELECT * FROM test_00597) ANY INNER JOIN (SELECT * FROM (SELECT * FROM test_00597)) as r USING id WHERE r.id = 1 SETTINGS allow_experimental_analyzer=1;
|
||||
SELECT * FROM (SELECT * FROM test_00597) ANY INNER JOIN (SELECT * FROM (SELECT * FROM test_00597)) as r USING id WHERE r.id = 1 SETTINGS enable_analyzer=0;
|
||||
SELECT '------- enable_analyzer=1 -------';
|
||||
SELECT * FROM (SELECT * FROM test_00597) ANY INNER JOIN (SELECT * FROM (SELECT * FROM test_00597)) as r USING id WHERE r.id = 1 SETTINGS enable_analyzer=1;
|
||||
|
||||
-- issue 20497
|
||||
EXPLAIN SYNTAX SELECT value + t1.value AS expr FROM (SELECT t0.value, t1.value FROM test_00597 AS t0 FULL JOIN test_00597 AS t1 USING date) WHERE expr < 3;
|
||||
|
@ -17,7 +17,7 @@ QUERY id: 0
|
||||
LIST id: 5, nodes: 2
|
||||
COLUMN id: 6, column_name: g, result_type: String, source_id: 3
|
||||
CONSTANT id: 7, constant_value: Tuple_(\'5\', \'6\'), constant_value_type: Tuple(String, String)
|
||||
SETTINGS allow_experimental_analyzer=1
|
||||
SETTINGS enable_analyzer=1
|
||||
2
|
||||
2
|
||||
QUERY id: 0
|
||||
@ -42,4 +42,4 @@ QUERY id: 0
|
||||
LIST id: 11, nodes: 2
|
||||
COLUMN id: 8, column_name: g, result_type: String, source_id: 3
|
||||
CONSTANT id: 12, constant_value: \'6\', constant_value_type: String
|
||||
SETTINGS allow_experimental_analyzer=1
|
||||
SETTINGS enable_analyzer=1
|
||||
|
@ -12,13 +12,13 @@ SELECT count() FROM regression_for_in_operator_view WHERE g IN ('5','6');
|
||||
|
||||
SET optimize_min_equality_disjunction_chain_length = 1;
|
||||
SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6';
|
||||
SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS allow_experimental_analyzer = 1;
|
||||
SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS enable_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS enable_analyzer = 1;
|
||||
|
||||
SET optimize_min_equality_disjunction_chain_length = 3;
|
||||
SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6';
|
||||
SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS allow_experimental_analyzer = 1;
|
||||
SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS enable_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS enable_analyzer = 1;
|
||||
|
||||
DROP TABLE regression_for_in_operator_view;
|
||||
DROP TABLE regression_for_in_operator;
|
||||
|
@ -1,5 +1,5 @@
|
||||
SET joined_subquery_requires_alias = 0;
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
drop table if exists tab1;
|
||||
drop table if exists tab2;
|
||||
|
@ -27,7 +27,7 @@ SELECT a > 0, b > 0, g > 0 FROM decimal ORDER BY a DESC;
|
||||
SELECT a, g > toInt8(0), g > toInt16(0), g > toInt32(0), g > toInt64(0) FROM decimal ORDER BY a;
|
||||
SELECT a, g > toUInt8(0), g > toUInt16(0), g > toUInt32(0), g > toUInt64(0) FROM decimal ORDER BY a;
|
||||
SELECT a, b, g FROM decimal WHERE a IN(42) AND b IN(42) AND g IN(42);
|
||||
SELECT a, b, g FROM decimal WHERE a IN(42) AND b IN(42) AND g IN(42) SETTINGS allow_experimental_analyzer = 1;
|
||||
SELECT a, b, g FROM decimal WHERE a IN(42) AND b IN(42) AND g IN(42) SETTINGS enable_analyzer = 1;
|
||||
SELECT a, b, g FROM decimal WHERE a > 0 AND a <= 42 AND b <= 42 AND g <= 42;
|
||||
|
||||
SELECT d, e, f from decimal WHERE d > 0 AND d < 1 AND e > 0 AND e < 1 AND f > 0 AND f < 1;
|
||||
|
@ -1,6 +1,6 @@
|
||||
-- Tags: no-parallel
|
||||
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
DROP TABLE IF EXISTS one;
|
||||
CREATE TABLE one(dummy UInt8) ENGINE = Memory;
|
||||
|
@ -49,7 +49,7 @@ QUERY id: 0
|
||||
LIST id: 12, nodes: 2
|
||||
COLUMN id: 13, column_name: s, result_type: UInt64, source_id: 3
|
||||
CONSTANT id: 14, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8)
|
||||
SETTINGS allow_experimental_analyzer=1
|
||||
SETTINGS enable_analyzer=1
|
||||
1 21
|
||||
1 22
|
||||
1 23
|
||||
@ -99,7 +99,7 @@ QUERY id: 0
|
||||
LIST id: 14, nodes: 2
|
||||
COLUMN id: 15, column_name: s, result_type: UInt64, source_id: 3
|
||||
CONSTANT id: 16, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8)
|
||||
SETTINGS allow_experimental_analyzer=1
|
||||
SETTINGS enable_analyzer=1
|
||||
1 1 21 1 1 1
|
||||
1 1 22 0 1 1
|
||||
1 1 23 0 0 1
|
||||
@ -152,7 +152,7 @@ QUERY id: 0
|
||||
CONSTANT id: 16, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8)
|
||||
JOIN TREE
|
||||
TABLE id: 3, alias: __table1, table_name: default.bug
|
||||
SETTINGS allow_experimental_analyzer=1
|
||||
SETTINGS enable_analyzer=1
|
||||
21 1
|
||||
22 1
|
||||
23 1
|
||||
@ -185,7 +185,7 @@ QUERY id: 0
|
||||
CONSTANT id: 6, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8)
|
||||
JOIN TREE
|
||||
TABLE id: 3, alias: __table1, table_name: default.bug
|
||||
SETTINGS allow_experimental_analyzer=1
|
||||
SETTINGS enable_analyzer=1
|
||||
1 21
|
||||
1 22
|
||||
1 23
|
||||
@ -237,7 +237,7 @@ QUERY id: 0
|
||||
LIST id: 12, nodes: 2
|
||||
COLUMN id: 13, column_name: s, result_type: UInt64, source_id: 3
|
||||
CONSTANT id: 14, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8)
|
||||
SETTINGS allow_experimental_analyzer=1
|
||||
SETTINGS enable_analyzer=1
|
||||
1 21
|
||||
1 22
|
||||
1 23
|
||||
@ -287,7 +287,7 @@ QUERY id: 0
|
||||
LIST id: 14, nodes: 2
|
||||
COLUMN id: 15, column_name: s, result_type: UInt64, source_id: 3
|
||||
CONSTANT id: 16, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8)
|
||||
SETTINGS allow_experimental_analyzer=1
|
||||
SETTINGS enable_analyzer=1
|
||||
1 1 21 1 1 1
|
||||
1 1 22 0 1 1
|
||||
1 1 23 0 0 1
|
||||
@ -348,7 +348,7 @@ QUERY id: 0
|
||||
CONSTANT id: 21, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8)
|
||||
JOIN TREE
|
||||
TABLE id: 3, alias: __table1, table_name: default.bug
|
||||
SETTINGS allow_experimental_analyzer=1
|
||||
SETTINGS enable_analyzer=1
|
||||
21 1
|
||||
22 1
|
||||
23 1
|
||||
@ -381,7 +381,7 @@ QUERY id: 0
|
||||
CONSTANT id: 6, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8)
|
||||
JOIN TREE
|
||||
TABLE id: 3, alias: __table1, table_name: default.bug
|
||||
SETTINGS allow_experimental_analyzer=1
|
||||
SETTINGS enable_analyzer=1
|
||||
21 1
|
||||
22 1
|
||||
23 1
|
||||
@ -414,4 +414,4 @@ QUERY id: 0
|
||||
CONSTANT id: 6, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8)
|
||||
JOIN TREE
|
||||
TABLE id: 3, alias: __table1, table_name: default.bug
|
||||
SETTINGS allow_experimental_analyzer=1
|
||||
SETTINGS enable_analyzer=1
|
||||
|
@ -7,43 +7,43 @@ set optimize_min_equality_disjunction_chain_length = 2;
|
||||
select * from bug;
|
||||
|
||||
select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23);
|
||||
select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS allow_experimental_analyzer = 1;
|
||||
explain query tree select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS allow_experimental_analyzer = 1;;
|
||||
select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;
|
||||
explain query tree select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;;
|
||||
|
||||
select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23);
|
||||
select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS allow_experimental_analyzer = 1;;
|
||||
explain query tree select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS allow_experimental_analyzer = 1;;
|
||||
select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;;
|
||||
explain query tree select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;;
|
||||
|
||||
select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug;
|
||||
select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS allow_experimental_analyzer = 1;;
|
||||
explain query tree select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS allow_experimental_analyzer = 1;;
|
||||
select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;;
|
||||
explain query tree select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;;
|
||||
|
||||
select s, (s=21 or s=22 or s=23) from bug;
|
||||
select s, (s=21 or s=22 or s=23) from bug SETTINGS allow_experimental_analyzer = 1;;
|
||||
explain query tree select s, (s=21 or s=22 or s=23) from bug SETTINGS allow_experimental_analyzer = 1;;
|
||||
select s, (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;;
|
||||
explain query tree select s, (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;;
|
||||
|
||||
set optimize_min_equality_disjunction_chain_length = 3;
|
||||
|
||||
select * from bug;
|
||||
|
||||
select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23);
|
||||
select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS allow_experimental_analyzer = 1;
|
||||
explain query tree select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS allow_experimental_analyzer = 1;;
|
||||
select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;
|
||||
explain query tree select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;;
|
||||
|
||||
select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23);
|
||||
select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS allow_experimental_analyzer = 1;;
|
||||
explain query tree select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS allow_experimental_analyzer = 1;;
|
||||
select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;;
|
||||
explain query tree select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;;
|
||||
|
||||
select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug;
|
||||
select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS allow_experimental_analyzer = 1;;
|
||||
explain query tree select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS allow_experimental_analyzer = 1;;
|
||||
select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;;
|
||||
explain query tree select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;;
|
||||
|
||||
select s, (s=21 or s=22 or s=23) from bug;
|
||||
select s, (s=21 or s=22 or s=23) from bug SETTINGS allow_experimental_analyzer = 1;;
|
||||
explain query tree select s, (s=21 or s=22 or s=23) from bug SETTINGS allow_experimental_analyzer = 1;;
|
||||
select s, (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;;
|
||||
explain query tree select s, (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;;
|
||||
|
||||
select s, (s=21 or 22=s or 23=s) from bug;
|
||||
select s, (s=21 or 22=s or 23=s) from bug SETTINGS allow_experimental_analyzer = 1;;
|
||||
explain query tree select s, (s=21 or 22=s or 23=s) from bug SETTINGS allow_experimental_analyzer = 1;;
|
||||
select s, (s=21 or 22=s or 23=s) from bug SETTINGS enable_analyzer = 1;;
|
||||
explain query tree select s, (s=21 or 22=s or 23=s) from bug SETTINGS enable_analyzer = 1;;
|
||||
|
||||
DROP TABLE bug;
|
||||
|
@ -1,3 +1,3 @@
|
||||
SET allow_experimental_analyzer=0;
|
||||
SET enable_analyzer=0;
|
||||
select os_name, count() from (SELECT CAST('iphone' AS Enum8('iphone' = 1, 'android' = 2)) AS os_name) group by os_name WITH TOTALS;
|
||||
select toNullable(os_name) AS os_name, count() from (SELECT CAST('iphone' AS Enum8('iphone' = 1, 'android' = 2)) AS os_name) group by os_name WITH TOTALS;
|
||||
|
@ -1,3 +1,3 @@
|
||||
SET allow_experimental_analyzer=1;
|
||||
SET enable_analyzer=1;
|
||||
select os_name, count() from (SELECT CAST('iphone' AS Enum8('iphone' = 1, 'android' = 2)) AS os_name) group by os_name WITH TOTALS;
|
||||
select toNullable(os_name) AS os_name, count() from (SELECT CAST('iphone' AS Enum8('iphone' = 1, 'android' = 2)) AS os_name) group by os_name WITH TOTALS;
|
||||
|
@ -1,4 +1,4 @@
|
||||
set allow_experimental_analyzer = 1;
|
||||
set enable_analyzer = 1;
|
||||
set joined_subquery_requires_alias = 0;
|
||||
|
||||
select * from (select dummy as val from system.one) any left join (select dummy as val from system.one) using val;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
{% for join_algorithm in ['partial_merge', 'full_sorting_merge', 'grace_hash'] -%}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
select s.a as a, s.a + 1 as b from (select 10 as a) s;
|
||||
select s.a + 1 as a, s.a as b from (select 10 as a) s;
|
||||
|
@ -1,5 +1,5 @@
|
||||
SET output_format_pretty_color = 1;
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
DROP TABLE IF EXISTS table1;
|
||||
DROP TABLE IF EXISTS table2;
|
||||
|
@ -1,6 +1,6 @@
|
||||
SET any_join_distinct_right_table_keys = 1;
|
||||
SET joined_subquery_requires_alias = 0;
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
SELECT * FROM (SELECT 1 AS a, 'x' AS b) join (SELECT 1 as a, 'y' as b) using a;
|
||||
SELECT * FROM (SELECT 1 AS a, 'x' AS b) left join (SELECT 1 as a, 'y' as b) using a;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
DROP TABLE IF EXISTS table1;
|
||||
DROP TABLE IF EXISTS table2;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
DROP TABLE IF EXISTS table1;
|
||||
DROP TABLE IF EXISTS table2;
|
||||
|
@ -13,7 +13,7 @@ SELECT joinGet('kv_overwrite', 'v', toUInt32(1));
|
||||
CREATE TABLE t2 (k UInt32, v UInt32) ENGINE = Memory;
|
||||
INSERT INTO t2 VALUES (1, 2), (1, 3);
|
||||
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
SELECT v FROM (SELECT 1 as k) t1 ANY INNER JOIN t2 USING (k) SETTINGS join_any_take_last_row = 0;
|
||||
SELECT v FROM (SELECT 1 as k) t1 ANY INNER JOIN t2 USING (k) SETTINGS join_any_take_last_row = 1;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
SELECT toDateTime('2000-01-01 00:00:00', 'Asia/Istanbul') AS x, toDate('2000-01-02') AS y, x > y ? x : y AS z;
|
||||
SELECT materialize(toDateTime('2000-01-01 00:00:00', 'Asia/Istanbul')) AS x, toDate('2000-01-02') AS y, x > y ? x : y AS z;
|
||||
|
@ -1,5 +1,5 @@
|
||||
SET any_join_distinct_right_table_keys = 1;
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
DROP TABLE IF EXISTS t1_00848;
|
||||
DROP TABLE IF EXISTS t2_00848;
|
||||
|
@ -12,7 +12,7 @@ CREATE TABLE t2 (a UInt32, b Nullable(Int32)) ENGINE = Memory;
|
||||
CREATE TABLE t3 (a UInt32, b Nullable(Int32)) ENGINE = Memory;
|
||||
CREATE TABLE t4 (a UInt32, b Nullable(Int32)) ENGINE = Memory;
|
||||
|
||||
SET allow_experimental_analyzer = 0;
|
||||
SET enable_analyzer = 0;
|
||||
|
||||
--- EXPLAIN SYNTAX (old AST based optimization)
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
@ -62,56 +62,56 @@ SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explai
|
||||
|
||||
--- EXPLAIN QUERY TREE
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2 WHERE t1.b = t2.b) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2 WHERE t1.b = t2.b) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3 WHERE t1.a = t2.a AND t1.a = t3.a) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3 WHERE t1.a = t2.a AND t1.a = t3.a) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3 WHERE t1.b = t2.b AND t1.b = t3.b) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3 WHERE t1.b = t2.b AND t1.b = t3.b) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.a = t2.a AND t1.a = t3.a AND t1.a = t4.a) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.a = t2.a AND t1.a = t3.a AND t1.a = t4.a) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.b = t2.b AND t1.b = t3.b AND t1.b = t4.b) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.b = t2.b AND t1.b = t3.b AND t1.b = t4.b) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t2.a = t1.a AND t2.a = t3.a AND t2.a = t4.a) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t2.a = t1.a AND t2.a = t3.a AND t2.a = t4.a) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t3.a = t1.a AND t3.a = t2.a AND t3.a = t4.a) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t3.a = t1.a AND t3.a = t2.a AND t3.a = t4.a) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t4.a = t1.a AND t4.a = t2.a AND t4.a = t3.a) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t4.a = t1.a AND t4.a = t2.a AND t4.a = t3.a) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.a = t2.a AND t2.a = t3.a AND t3.a = t4.a) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.a = t2.a AND t2.a = t3.a AND t3.a = t4.a) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1 CROSS JOIN t2 CROSS JOIN t3 CROSS JOIN t4) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1 CROSS JOIN t2 CROSS JOIN t3 CROSS JOIN t4) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2 CROSS JOIN t3) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2 CROSS JOIN t3) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1 JOIN t2 USING a CROSS JOIN t3) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1 JOIN t2 USING a CROSS JOIN t3) SETTINGS enable_analyzer = 1;
|
||||
|
||||
SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM (
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1 JOIN t2 ON t1.a = t2.a CROSS JOIN t3) SETTINGS allow_experimental_analyzer = 1;
|
||||
EXPLAIN QUERY TREE SELECT t1.a FROM t1 JOIN t2 ON t1.a = t2.a CROSS JOIN t3) SETTINGS enable_analyzer = 1;
|
||||
|
||||
INSERT INTO t1 values (1,1), (2,2), (3,3), (4,4);
|
||||
INSERT INTO t2 values (1,1), (1, Null);
|
||||
INSERT INTO t3 values (1,1), (1, Null);
|
||||
INSERT INTO t4 values (1,1), (1, Null);
|
||||
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
SELECT 'SELECT * FROM t1, t2';
|
||||
SELECT * FROM t1, t2
|
||||
|
@ -1,5 +1,5 @@
|
||||
SET joined_subquery_requires_alias = 0;
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
SELECT ax, c FROM (SELECT [1,2] ax, 0 c) ARRAY JOIN ax JOIN (SELECT 0 c) USING (c);
|
||||
SELECT ax, c FROM (SELECT [3,4] ax, 0 c) JOIN (SELECT 0 c) USING (c) ARRAY JOIN ax;
|
||||
|
@ -1,4 +1,4 @@
|
||||
set allow_experimental_analyzer = 1;
|
||||
set enable_analyzer = 1;
|
||||
set distributed_product_mode = 'local';
|
||||
|
||||
drop table if exists shard1;
|
||||
|
@ -2,7 +2,7 @@ SELECT flatten(arrayJoin([[[1, 2, 3], [4, 5]], [[6], [7, 8]]]));
|
||||
SELECT arrayFlatten(arrayJoin([[[[]], [[1], [], [2, 3]]], [[[4]]]]));
|
||||
SELECT flatten(arrayMap(x -> arrayMap(y -> arrayMap(z -> range(x), range(x)), range(x)), range(number))) FROM numbers(6);
|
||||
SELECT flatten(arrayMap(x -> arrayMap(y -> arrayMap(z -> range(z), range(y)), range(x)), range(number))) FROM numbers(6);
|
||||
SELECT flatten(arrayMap(x -> arrayMap(x -> arrayMap(x -> range(x), range(x)), range(x)), range(number))) FROM numbers(6) SETTINGS allow_experimental_analyzer=1;
|
||||
SELECT flatten(arrayMap(x -> arrayMap(x -> arrayMap(x -> range(x), range(x)), range(x)), range(number))) FROM numbers(6) SETTINGS enable_analyzer=1;
|
||||
SELECT arrayFlatten([[[1, 2, 3], [4, 5]], [[6], [7, 8]]]);
|
||||
SELECT flatten([[[]]]);
|
||||
SELECT arrayFlatten([]);
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET optimize_read_in_order = 1, query_plan_read_in_order = 1, allow_experimental_analyzer = 0;
|
||||
SET optimize_read_in_order = 1, query_plan_read_in_order = 1, enable_analyzer = 0;
|
||||
|
||||
drop table if exists tab;
|
||||
drop table if exists tab2;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
SET joined_subquery_requires_alias = 1;
|
||||
|
||||
SELECT * FROM (SELECT 1 as A, 2 as B) X
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
SELECT 11 AS n GROUP BY n WITH TOTALS;
|
||||
SELECT 12 AS n GROUP BY n WITH ROLLUP;
|
||||
|
@ -1,5 +1,5 @@
|
||||
SET output_format_pretty_color=1;
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
|
||||
select * from system.one cross join system.one;
|
||||
select * from system.one cross join system.one r;
|
||||
|
@ -1,5 +1,5 @@
|
||||
SET send_logs_level = 'fatal';
|
||||
SET allow_experimental_analyzer = 0;
|
||||
SET enable_analyzer = 0;
|
||||
SET allow_experimental_window_view = 1;
|
||||
DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier};
|
||||
set allow_deprecated_database_ordinary=1;
|
||||
|
@ -1,7 +1,7 @@
|
||||
-- Tags: no-parallel
|
||||
|
||||
SET send_logs_level = 'fatal';
|
||||
SET allow_experimental_analyzer = 0;
|
||||
SET enable_analyzer = 0;
|
||||
SET allow_experimental_window_view = 1;
|
||||
DROP DATABASE IF EXISTS test_01048;
|
||||
set allow_deprecated_database_ordinary=1;
|
||||
|
@ -1,5 +1,5 @@
|
||||
-- { echoOn }
|
||||
SET allow_experimental_analyzer = 0;
|
||||
SET enable_analyzer = 0;
|
||||
{% for join_algorithm in ['default', 'partial_merge', 'parallel_hash'] -%}
|
||||
SET join_algorithm = '{{ join_algorithm }}';
|
||||
SET join_use_nulls = 0;
|
||||
@ -19,17 +19,17 @@ str_r LowCardinality(String)
|
||||
str_l LowCardinality(String)
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN r_lc AS r USING (x) ORDER BY x, r.lc, l.lc;
|
||||
LowCardinality(String) LowCardinality(String) str str LowCardinality(String) LowCardinality(String) str str
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN r_lc AS r USING (lc) ORDER BY x, r.lc, l.lc;
|
||||
LowCardinality(String) LowCardinality(String) str str LowCardinality(String) LowCardinality(String) str str
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN r_lc AS r USING (x) ORDER BY x, r.lc, l.lc;
|
||||
LowCardinality(String) LowCardinality(String) str str LowCardinality(String) LowCardinality(String) str str
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) str_l str_l
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN r_lc AS r USING (lc) ORDER BY x, r.lc, l.lc;
|
||||
LowCardinality(String) LowCardinality(String) str str LowCardinality(String) LowCardinality(String) str str
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) str_l str_l
|
||||
--
|
||||
|
||||
@ -49,17 +49,17 @@ str_r LowCardinality(String)
|
||||
str_l LowCardinality(String)
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN r USING (x) ORDER BY x, r.lc, l.lc;
|
||||
String String str str LowCardinality(String) LowCardinality(String) str str
|
||||
String String str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
String String str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN r USING (lc) ORDER BY x, r.lc, l.lc;
|
||||
String String str str LowCardinality(String) LowCardinality(String) str str
|
||||
String String str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
String String str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN r USING (x) ORDER BY x, r.lc, l.lc;
|
||||
String String str str LowCardinality(String) LowCardinality(String) str str
|
||||
String String str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
String String str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
String String LowCardinality(String) LowCardinality(String) str_l str_l
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN r USING (lc) ORDER BY x, r.lc, l.lc;
|
||||
String String str str LowCardinality(String) LowCardinality(String) str str
|
||||
String String str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
String String str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
String String LowCardinality(String) LowCardinality(String) str_l str_l
|
||||
--
|
||||
|
||||
@ -79,17 +79,17 @@ str_r String
|
||||
str_l String
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l RIGHT JOIN r_lc AS r USING (x) ORDER BY x, r.lc, l.lc;
|
||||
LowCardinality(String) LowCardinality(String) str str String String str str
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r String String
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r String String
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l RIGHT JOIN r_lc AS r USING (lc) ORDER BY x, r.lc, l.lc;
|
||||
LowCardinality(String) LowCardinality(String) str str String String str str
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r String String
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r String String
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l FULL JOIN r_lc AS r USING (x) ORDER BY x, r.lc, l.lc;
|
||||
LowCardinality(String) LowCardinality(String) str str String String str str
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r String String
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r String String
|
||||
LowCardinality(String) LowCardinality(String) String String str_l str_l
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l FULL JOIN r_lc AS r USING (lc) ORDER BY x, r.lc, l.lc;
|
||||
LowCardinality(String) LowCardinality(String) str str String String str str
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r String String
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r String String
|
||||
LowCardinality(String) LowCardinality(String) String String str_l str_l
|
||||
--
|
||||
|
||||
@ -109,17 +109,17 @@ str_r LowCardinality(String)
|
||||
str_l LowCardinality(String)
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN nr AS r USING (x) ORDER BY x, r.lc, l.lc;
|
||||
Nullable(String) Nullable(String) str str LowCardinality(String) LowCardinality(String) str str
|
||||
Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN nr AS r USING (lc) ORDER BY x, r.lc, l.lc;
|
||||
Nullable(String) Nullable(String) str str LowCardinality(String) LowCardinality(String) str str
|
||||
Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN nr AS r USING (x) ORDER BY x, r.lc, l.lc;
|
||||
Nullable(String) Nullable(String) str str LowCardinality(String) LowCardinality(String) str str
|
||||
Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
Nullable(String) Nullable(String) \N \N LowCardinality(String) LowCardinality(String) str_l str_l
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN nr AS r USING (lc) ORDER BY x, r.lc, l.lc;
|
||||
Nullable(String) Nullable(String) str str LowCardinality(String) LowCardinality(String) str str
|
||||
Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
Nullable(String) Nullable(String) \N \N LowCardinality(String) LowCardinality(String) str_l str_l
|
||||
--
|
||||
|
||||
@ -313,7 +313,7 @@ SELECT r.lc, materialize(r.lc), toTypeName(r.lc), toTypeName(materialize(r.lc))
|
||||
str str LowCardinality(Nullable(String)) LowCardinality(Nullable(String))
|
||||
str_r str_r LowCardinality(Nullable(String)) LowCardinality(Nullable(String))
|
||||
\N \N LowCardinality(Nullable(String)) LowCardinality(Nullable(String))
|
||||
SET allow_experimental_analyzer = 1;
|
||||
SET enable_analyzer = 1;
|
||||
{% for join_algorithm in ['default', 'partial_merge', 'parallel_hash'] -%}
|
||||
SET join_algorithm = '{{ join_algorithm }}';
|
||||
SET join_use_nulls = 0;
|
||||
@ -333,17 +333,17 @@ str_r LowCardinality(String)
|
||||
str_l LowCardinality(String)
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN r_lc AS r USING (x) ORDER BY x, r.lc, l.lc;
|
||||
LowCardinality(String) LowCardinality(String) str str LowCardinality(String) LowCardinality(String) str str
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN r_lc AS r USING (lc) ORDER BY x, r.lc, l.lc;
|
||||
LowCardinality(String) LowCardinality(String) str str LowCardinality(String) LowCardinality(String) str str
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN r_lc AS r USING (x) ORDER BY x, r.lc, l.lc;
|
||||
LowCardinality(String) LowCardinality(String) str str LowCardinality(String) LowCardinality(String) str str
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) str_l str_l
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN r_lc AS r USING (lc) ORDER BY x, r.lc, l.lc;
|
||||
LowCardinality(String) LowCardinality(String) str str LowCardinality(String) LowCardinality(String) str str
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) str_l str_l
|
||||
--
|
||||
|
||||
@ -363,17 +363,17 @@ str_r String
|
||||
str_l String
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN r USING (x) ORDER BY x, r.lc, l.lc;
|
||||
String String str str LowCardinality(String) LowCardinality(String) str str
|
||||
String String str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
String String str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN r USING (lc) ORDER BY x, r.lc, l.lc;
|
||||
String String str str String String str str
|
||||
String String str_r str_r String String
|
||||
String String str_r str_r String String
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN r USING (x) ORDER BY x, r.lc, l.lc;
|
||||
String String str str LowCardinality(String) LowCardinality(String) str str
|
||||
String String str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
String String str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
String String LowCardinality(String) LowCardinality(String) str_l str_l
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN r USING (lc) ORDER BY x, r.lc, l.lc;
|
||||
String String str str String String str str
|
||||
String String str_r str_r String String
|
||||
String String str_r str_r String String
|
||||
String String String String str_l str_l
|
||||
--
|
||||
|
||||
@ -393,17 +393,17 @@ str_r String
|
||||
str_l String
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l RIGHT JOIN r_lc AS r USING (x) ORDER BY x, r.lc, l.lc;
|
||||
LowCardinality(String) LowCardinality(String) str str String String str str
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r String String
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r String String
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l RIGHT JOIN r_lc AS r USING (lc) ORDER BY x, r.lc, l.lc;
|
||||
String String str str String String str str
|
||||
String String str_r str_r String String
|
||||
String String str_r str_r String String
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l FULL JOIN r_lc AS r USING (x) ORDER BY x, r.lc, l.lc;
|
||||
LowCardinality(String) LowCardinality(String) str str String String str str
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r String String
|
||||
LowCardinality(String) LowCardinality(String) str_r str_r String String
|
||||
LowCardinality(String) LowCardinality(String) String String str_l str_l
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l FULL JOIN r_lc AS r USING (lc) ORDER BY x, r.lc, l.lc;
|
||||
String String str str String String str str
|
||||
String String str_r str_r String String
|
||||
String String str_r str_r String String
|
||||
String String String String str_l str_l
|
||||
--
|
||||
|
||||
@ -423,13 +423,13 @@ str_r Nullable(String)
|
||||
str_l Nullable(String)
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN nr AS r USING (x) ORDER BY x, r.lc, l.lc;
|
||||
Nullable(String) Nullable(String) str str LowCardinality(String) LowCardinality(String) str str
|
||||
Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN nr AS r USING (lc) ORDER BY x, r.lc, l.lc;
|
||||
Nullable(String) Nullable(String) str str Nullable(String) Nullable(String) str str
|
||||
Nullable(String) Nullable(String) str_r str_r Nullable(String) Nullable(String) \N \N
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN nr AS r USING (x) ORDER BY x, r.lc, l.lc;
|
||||
Nullable(String) Nullable(String) str str LowCardinality(String) LowCardinality(String) str str
|
||||
Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String)
|
||||
Nullable(String) Nullable(String) \N \N LowCardinality(String) LowCardinality(String) str_l str_l
|
||||
SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN nr AS r USING (lc) ORDER BY x, r.lc, l.lc;
|
||||
Nullable(String) Nullable(String) str str Nullable(String) Nullable(String) str str
|
||||
|
@ -23,9 +23,9 @@ INSERT INTO nl VALUES (0, 'str'), (2, 'str_l');
|
||||
INSERT INTO l_lc VALUES (0, 'str'), (2, 'str_l');
|
||||
|
||||
-- { echoOn }
|
||||
{% for allow_experimental_analyzer in [0, 1] -%}
|
||||
{% for enable_analyzer in [0, 1] -%}
|
||||
|
||||
SET allow_experimental_analyzer = {{ allow_experimental_analyzer }};
|
||||
SET enable_analyzer = {{ enable_analyzer }};
|
||||
|
||||
{% for join_algorithm in ['default', 'partial_merge', 'parallel_hash'] -%}
|
||||
SET join_algorithm = '{{ join_algorithm }}';
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET allow_experimental_analyzer = 0;
|
||||
SET enable_analyzer = 0;
|
||||
SET allow_experimental_window_view = 1;
|
||||
|
||||
DROP TABLE IF EXISTS mt;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET allow_experimental_analyzer = 0;
|
||||
SET enable_analyzer = 0;
|
||||
SET allow_experimental_window_view = 1;
|
||||
|
||||
DROP TABLE IF EXISTS mt;
|
||||
|
@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
opts=(
|
||||
"--allow_experimental_analyzer=0"
|
||||
"--enable_analyzer=0"
|
||||
)
|
||||
|
||||
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||
|
@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
opts=(
|
||||
"--allow_experimental_analyzer=0"
|
||||
"--enable_analyzer=0"
|
||||
)
|
||||
|
||||
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||
|
@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
opts=(
|
||||
"--allow_experimental_analyzer=0"
|
||||
"--enable_analyzer=0"
|
||||
)
|
||||
|
||||
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||
|
@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
opts=(
|
||||
"--allow_experimental_analyzer=0"
|
||||
"--enable_analyzer=0"
|
||||
)
|
||||
|
||||
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||
|
@ -20,7 +20,7 @@ with client(name="client1>", log=log) as client1, client(
|
||||
client1.expect(prompt)
|
||||
client2.expect(prompt)
|
||||
|
||||
client1.send("SET allow_experimental_analyzer = 0")
|
||||
client1.send("SET enable_analyzer = 0")
|
||||
client1.expect(prompt)
|
||||
client1.send("SET allow_experimental_window_view = 1")
|
||||
client1.expect(prompt)
|
||||
@ -28,7 +28,7 @@ with client(name="client1>", log=log) as client1, client(
|
||||
client1.expect(prompt)
|
||||
client2.send("SET allow_experimental_window_view = 1")
|
||||
client2.expect(prompt)
|
||||
client2.send("SET allow_experimental_analyzer = 0")
|
||||
client2.send("SET enable_analyzer = 0")
|
||||
client2.expect(prompt)
|
||||
|
||||
client1.send("CREATE DATABASE IF NOT EXISTS 01056_window_view_proc_hop_watch")
|
||||
|
@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
opts=(
|
||||
"--allow_experimental_analyzer=0"
|
||||
"--enable_analyzer=0"
|
||||
)
|
||||
|
||||
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||
|
@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
opts=(
|
||||
"--allow_experimental_analyzer=0"
|
||||
"--enable_analyzer=0"
|
||||
)
|
||||
|
||||
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user