From 8b17616bff5c886c0ae8efee29950a7946538524 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 19 Jan 2024 05:12:25 +0100 Subject: [PATCH 01/13] Disable `optimize_trivial_insert_select` by default --- src/Core/Settings.h | 10 +++++----- src/Core/SettingsChangesHistory.h | 3 ++- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 292e945a29c..bfc3b3e4e43 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -341,7 +341,7 @@ class IColumn; M(UInt64, http_max_field_name_size, 128 * 1024, "Maximum length of field name in HTTP header", 0) \ M(UInt64, http_max_field_value_size, 128 * 1024, "Maximum length of field value in HTTP header", 0) \ M(UInt64, http_max_chunk_size, 100_GiB, "Maximum value of a chunk size in HTTP chunked transfer encoding", 0) \ - M(Bool, http_skip_not_found_url_for_globs, true, "Skip url's for globs with HTTP_NOT_FOUND error", 0) \ + M(Bool, http_skip_not_found_url_for_globs, true, "Skip URLs for globs with HTTP_NOT_FOUND error", 0) \ M(Bool, http_make_head_request, true, "Allows the execution of a `HEAD` request while reading data from HTTP to retrieve information about the file to be read, such as its size", 0) \ M(Bool, optimize_throw_if_noop, false, "If setting is enabled and OPTIMIZE query didn't actually assign a merge then an explanatory exception is thrown", 0) \ M(Bool, use_index_for_in_with_subqueries, true, "Try using an index if there is a subquery or a table expression on the right side of the IN operator.", 0) \ @@ -610,7 +610,7 @@ class IColumn; M(Bool, allow_experimental_database_materialized_postgresql, false, "Allow to create database with Engine=MaterializedPostgreSQL(...).", 0) \ M(Bool, system_events_show_zero_values, false, "When querying system.events or system.metrics tables, include all metrics, even with zero values.", 0) \ M(MySQLDataTypesSupport, mysql_datatypes_support_level, MySQLDataTypesSupportList{}, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal', 'datetime64', 'date2Date32' or 'date2String'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precision are seen as String on ClickHouse's side.", 0) \ - M(Bool, optimize_trivial_insert_select, true, "Optimize trivial 'INSERT INTO table SELECT ... FROM TABLES' query", 0) \ + M(Bool, optimize_trivial_insert_select, false, "Optimize trivial 'INSERT INTO table SELECT ... FROM TABLES' query", 0) \ M(Bool, allow_non_metadata_alters, true, "Allow to execute alters which affects not only tables metadata, but also data on disk", 0) \ M(Bool, enable_global_with_statement, true, "Propagate WITH statements to UNION queries and all subqueries", 0) \ M(Bool, aggregate_functions_null_for_empty, false, "Rewrite all aggregate functions in a query, adding -OrNull suffix to them", 0) \ @@ -676,8 +676,8 @@ class IColumn; M(Bool, engine_file_truncate_on_insert, false, "Enables or disables truncate before insert in file engine tables", 0) \ M(Bool, engine_file_allow_create_multiple_files, false, "Enables or disables creating a new file on each insert in file engine tables if format has suffix.", 0) \ M(Bool, engine_file_skip_empty_files, false, "Allows to skip empty files in file table engine", 0) \ - M(Bool, engine_url_skip_empty_files, false, "Allows to skip empty files in url table engine", 0) \ - M(Bool, enable_url_encoding, true, " Allows to enable/disable decoding/encoding path in uri in URL table engine", 0) \ + M(Bool, engine_url_skip_empty_files, false, "Allows to skip empty files in the URL table engine", 0) \ + M(Bool, enable_url_encoding, true, " Allows to enable/disable decoding/encoding path in URI in the URL table engine", 0) \ M(Bool, allow_experimental_database_replicated, false, "Allow to create databases with Replicated engine", 0) \ M(UInt64, database_replicated_initial_query_timeout_sec, 300, "How long initial DDL query should wait for Replicated database to precess previous DDL queue entries", 0) \ M(Bool, database_replicated_enforce_synchronous_settings, false, "Enforces synchronous waiting for some queries (see also database_atomic_wait_for_drop_and_detach_synchronously, mutation_sync, alter_sync). Not recommended to enable these settings.", 0) \ @@ -795,7 +795,7 @@ class IColumn; M(Bool, schema_inference_use_cache_for_azure, true, "Use cache in schema inference while using azure table function", 0) \ M(Bool, schema_inference_use_cache_for_hdfs, true, "Use cache in schema inference while using hdfs table function", 0) \ M(Bool, schema_inference_use_cache_for_url, true, "Use cache in schema inference while using url table function", 0) \ - M(Bool, schema_inference_cache_require_modification_time_for_url, true, "Use schema from cache for URL with last modification time validation (for urls with Last-Modified header)", 0) \ + M(Bool, schema_inference_cache_require_modification_time_for_url, true, "Use schema from cache for URL with last modification time validation (for URLs with Last-Modified header)", 0) \ \ M(String, compatibility, "", "Changes other settings according to provided ClickHouse version. If we know that we changed some behaviour in ClickHouse by changing some settings in some version, this compatibility setting will control these settings", 0) \ \ diff --git a/src/Core/SettingsChangesHistory.h b/src/Core/SettingsChangesHistory.h index 62ffd837a33..173333a6e2e 100644 --- a/src/Core/SettingsChangesHistory.h +++ b/src/Core/SettingsChangesHistory.h @@ -83,7 +83,8 @@ static std::map sett { {"24.1", {{"print_pretty_type_names", false, true, "Better user experience."}, {"input_format_json_read_bools_as_strings", false, true, "Allow to read bools as strings in JSON formats by default"}, - {"output_format_arrow_use_signed_indexes_for_dictionary", false, true, "Use signed indexes type for Arrow dictionaries by default as it's recommended"}}}, + {"output_format_arrow_use_signed_indexes_for_dictionary", false, true, "Use signed indexes type for Arrow dictionaries by default as it's recommended"}, + {"optimize_trivial_insert_select", true, false, "The optimization does not make sense in many cases."}}}, {"23.12", {{"allow_suspicious_ttl_expressions", true, false, "It is a new setting, and in previous versions the behavior was equivalent to allowing."}, {"input_format_parquet_allow_missing_columns", false, true, "Allow missing columns in Parquet files by default"}, {"input_format_orc_allow_missing_columns", false, true, "Allow missing columns in ORC files by default"}, From 99ff3de76563bca8a49d0b0c0046d67feb142937 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 19 Jan 2024 05:26:14 +0100 Subject: [PATCH 02/13] Do not check for large translation units with coverage --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 063cfc77302..aa0b7b4efde 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -546,7 +546,7 @@ if (ENABLE_RUST) endif() endif() -if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND NOT SANITIZE AND OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64)) +if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND NOT SANITIZE AND NOT SANITIZE_COVERAGE AND OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64)) set(CHECK_LARGE_OBJECT_SIZES_DEFAULT ON) else () set(CHECK_LARGE_OBJECT_SIZES_DEFAULT OFF) From eb7cae5dc61e9d0b6fe27d5bda78d55718cd7a3f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 28 Jan 2024 22:20:55 +0100 Subject: [PATCH 03/13] Fix tests --- .../0_stateless/01455_optimize_trivial_insert_select.sql | 1 + .../0_stateless/01605_adaptive_granularity_block_borders.sql | 1 + tests/queries/0_stateless/02139_MV_with_scalar_subquery.sql | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01455_optimize_trivial_insert_select.sql b/tests/queries/0_stateless/01455_optimize_trivial_insert_select.sql index 5b59bc065dd..a3bb3ef65fe 100644 --- a/tests/queries/0_stateless/01455_optimize_trivial_insert_select.sql +++ b/tests/queries/0_stateless/01455_optimize_trivial_insert_select.sql @@ -2,6 +2,7 @@ SET max_insert_threads = 1, max_threads = 100, min_insert_block_size_rows = 1048 DROP TABLE IF EXISTS t; CREATE TABLE t (x UInt64) ENGINE = StripeLog; -- For trivial INSERT SELECT, max_threads is lowered to max_insert_threads and max_block_size is changed to min_insert_block_size_rows. +SET optimize_trivial_insert_select = 1; INSERT INTO t SELECT * FROM numbers_mt(1000000); SET max_threads = 1; -- If data was inserted by more threads, we will probably see data out of order. diff --git a/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql b/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql index 694e961bc4a..187ff5c37e1 100644 --- a/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql +++ b/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql @@ -24,6 +24,7 @@ enable_vertical_merge_algorithm = 0; SET max_block_size=900; -- There are about 900 marks for our settings. +SET optimize_trivial_insert_select = 1; INSERT INTO adaptive_table SELECT number, if(number > 700, randomPrintableASCII(102400), randomPrintableASCII(1)) FROM numbers(10000); OPTIMIZE TABLE adaptive_table FINAL; diff --git a/tests/queries/0_stateless/02139_MV_with_scalar_subquery.sql b/tests/queries/0_stateless/02139_MV_with_scalar_subquery.sql index f0285bbec3d..63c894cfb85 100644 --- a/tests/queries/0_stateless/02139_MV_with_scalar_subquery.sql +++ b/tests/queries/0_stateless/02139_MV_with_scalar_subquery.sql @@ -16,7 +16,7 @@ SELECT FROM source_null GROUP BY count_subquery, min_subquery, max_subquery; - +SET optimize_trivial_insert_select = 1; INSERT INTO source SELECT number FROM numbers(2000) SETTINGS min_insert_block_size_rows=1500, max_insert_block_size=1500; SELECT count() FROM source; From a0b70abfe5eb3474c1865961547831f89d412bd0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 31 Jan 2024 23:32:33 +0100 Subject: [PATCH 04/13] Fix tests --- .../queries/0_stateless/02235_add_part_offset_virtual_column.sql | 1 + tests/queries/0_stateless/02521_aggregation_by_partitions.sql | 1 + tests/queries/0_stateless/02725_agg_projection_resprect_PK.sql | 1 + 3 files changed, 3 insertions(+) diff --git a/tests/queries/0_stateless/02235_add_part_offset_virtual_column.sql b/tests/queries/0_stateless/02235_add_part_offset_virtual_column.sql index dc8fceddc52..2e6be26ea56 100644 --- a/tests/queries/0_stateless/02235_add_part_offset_virtual_column.sql +++ b/tests/queries/0_stateless/02235_add_part_offset_virtual_column.sql @@ -21,6 +21,7 @@ CREATE TABLE t_random_1 ) ENGINE = GenerateRandom(1, 5, 3); +SET optimize_trivial_insert_select = 1; INSERT INTO t_1 select rowNumberInAllBlocks(), *, '1984-01-01' from t_random_1 limit 1000000; OPTIMIZE TABLE t_1 FINAL; diff --git a/tests/queries/0_stateless/02521_aggregation_by_partitions.sql b/tests/queries/0_stateless/02521_aggregation_by_partitions.sql index 87317e5fba4..53eac1e2514 100644 --- a/tests/queries/0_stateless/02521_aggregation_by_partitions.sql +++ b/tests/queries/0_stateless/02521_aggregation_by_partitions.sql @@ -4,6 +4,7 @@ set max_threads = 16; set allow_aggregate_partitions_independently = 1; set force_aggregate_partitions_independently = 1; set optimize_use_projections = 0; +set optimize_trivial_insert_select = 1; set allow_prefetched_read_pool_for_remote_filesystem = 0; set allow_prefetched_read_pool_for_local_filesystem = 0; diff --git a/tests/queries/0_stateless/02725_agg_projection_resprect_PK.sql b/tests/queries/0_stateless/02725_agg_projection_resprect_PK.sql index a2355f78f4c..459ebc1bc22 100644 --- a/tests/queries/0_stateless/02725_agg_projection_resprect_PK.sql +++ b/tests/queries/0_stateless/02725_agg_projection_resprect_PK.sql @@ -20,6 +20,7 @@ CREATE TABLE t0 ) ENGINE = MergeTree ORDER BY (c1, c2) settings min_bytes_for_wide_part = 10485760, min_rows_for_wide_part = 0; +SET optimize_trivial_insert_select = 1; INSERT INTO t0 SELECT number, -number, From a681fabc029d27dabab640cffe12d73929ef5826 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 31 Jan 2024 23:35:30 +0100 Subject: [PATCH 05/13] Fix tests --- tests/integration/test_merge_tree_s3/test.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/integration/test_merge_tree_s3/test.py b/tests/integration/test_merge_tree_s3/test.py index 9216b08f942..869938fba67 100644 --- a/tests/integration/test_merge_tree_s3/test.py +++ b/tests/integration/test_merge_tree_s3/test.py @@ -962,7 +962,7 @@ def test_s3_engine_heavy_write_check_mem( "INSERT INTO s3_test SELECT number, toString(number) FROM numbers(50000000)" f" SETTINGS " f" max_memory_usage={2*memory}" - f", max_threads=1" # ParallelFormattingOutputFormat consumption depends on it + ", max_threads=1, optimize_trivial_insert_select=1" # ParallelFormattingOutputFormat consumption depends on it f", s3_max_inflight_parts_for_one_file={in_flight}", query_id=query_id, ) @@ -1010,9 +1010,10 @@ def test_s3_disk_heavy_write_check_mem(cluster, broken_s3, node_name): node.query( "INSERT INTO s3_test SELECT number, toString(number) FROM numbers(50000000)" f" SETTINGS max_memory_usage={2*memory}" - f", max_insert_block_size=50000000" - f", min_insert_block_size_rows=50000000" - f", min_insert_block_size_bytes=1000000000000", + ", max_insert_block_size=50000000" + ", min_insert_block_size_rows=50000000" + ", min_insert_block_size_bytes=1000000000000" + ", optimize_trivial_insert_select=1", query_id=query_id, ) From 3c2c2c1404c09ceb63c1f44be632874deb62f9bd Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 16 Mar 2024 13:57:40 +0100 Subject: [PATCH 06/13] Fix tests --- tests/queries/0_stateless/00997_set_index_array.sql | 2 ++ .../queries/0_stateless/01200_mutations_memory_consumption.sql | 1 + .../queries/0_stateless/02231_buffer_aggregate_states_leak.sql | 1 + .../0_stateless/02499_monotonicity_toUnixTimestamp64.sh | 3 +-- 4 files changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/00997_set_index_array.sql b/tests/queries/0_stateless/00997_set_index_array.sql index 34d0f0b71ec..d6d27f5a6a0 100644 --- a/tests/queries/0_stateless/00997_set_index_array.sql +++ b/tests/queries/0_stateless/00997_set_index_array.sql @@ -17,6 +17,8 @@ select from system.numbers limit 10000000; +OPTIMIZE TABLE set_array FINAL; + SET max_rows_to_read = 8192; select count() from set_array where has(index_array, 333); diff --git a/tests/queries/0_stateless/01200_mutations_memory_consumption.sql b/tests/queries/0_stateless/01200_mutations_memory_consumption.sql index bca2286aa22..af7df9ddd79 100644 --- a/tests/queries/0_stateless/01200_mutations_memory_consumption.sql +++ b/tests/queries/0_stateless/01200_mutations_memory_consumption.sql @@ -1,4 +1,5 @@ -- Tags: no-debug, no-parallel, long, no-s3-storage, no-random-merge-tree-settings +SET optimize_trivial_insert_select = 1; DROP TABLE IF EXISTS table_with_single_pk; diff --git a/tests/queries/0_stateless/02231_buffer_aggregate_states_leak.sql b/tests/queries/0_stateless/02231_buffer_aggregate_states_leak.sql index dbe18953957..f1f7c876ba6 100644 --- a/tests/queries/0_stateless/02231_buffer_aggregate_states_leak.sql +++ b/tests/queries/0_stateless/02231_buffer_aggregate_states_leak.sql @@ -28,6 +28,7 @@ create materialized view mv_02231 to buffer_02231 as select from in_02231 group by key; +set optimize_trivial_insert_select = 1; insert into in_02231 select * from numbers(10e6) settings max_memory_usage='310Mi', max_threads=1; drop table buffer_02231; diff --git a/tests/queries/0_stateless/02499_monotonicity_toUnixTimestamp64.sh b/tests/queries/0_stateless/02499_monotonicity_toUnixTimestamp64.sh index 59b6e2abb06..7f0796c4395 100755 --- a/tests/queries/0_stateless/02499_monotonicity_toUnixTimestamp64.sh +++ b/tests/queries/0_stateless/02499_monotonicity_toUnixTimestamp64.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -q "create table t(ts DateTime64) engine=MergeTree order by ts as select * from numbers_mt(1e6);" +$CLICKHOUSE_CLIENT --optimize_trivial_insert_select 1 -q "create table t(ts DateTime64) engine=MergeTree order by ts as select * from numbers_mt(1e6);" max_block_size=8192 @@ -17,4 +17,3 @@ $CLICKHOUSE_CLIENT --query_id="$query_id" -q "select ts from t order by toUnixTi $CLICKHOUSE_CLIENT -q "system flush logs;" $CLICKHOUSE_CLIENT --param_query_id="$query_id" -q "select read_rows <= $max_block_size from system.query_log where event_date >= yesterday() and current_database = '$CLICKHOUSE_DATABASE' and query_id = {query_id:String} and type = 'QueryFinish';" - From 387bf7e4d66e8935d472b8f03dc90ffade13b110 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 24 Mar 2024 19:10:20 +0100 Subject: [PATCH 07/13] Merge with master --- src/Core/SettingsChangesHistory.h | 2 +- .../0_stateless/00564_versioned_collapsing_merge_tree.sql | 1 + .../0_stateless/01042_check_query_and_last_granule_size.sql | 2 ++ .../queries/0_stateless/01045_order_by_pk_special_storages.sh | 4 ++-- .../01045_zookeeper_system_mutations_with_parts_names.sh | 4 ++-- ...1312_comparison_with_constant_string_in_index_analysis.sql | 1 + tests/queries/0_stateless/01623_constraints_column_swap.sql | 1 + tests/queries/0_stateless/01780_column_sparse_filter.sql | 2 ++ .../queries/0_stateless/02149_read_in_order_fixed_prefix.sql | 1 + tests/queries/0_stateless/02404_memory_bound_merging.sql | 1 + .../02461_prewhere_row_level_policy_lightweight_delete.sql.j2 | 2 ++ 11 files changed, 16 insertions(+), 5 deletions(-) diff --git a/src/Core/SettingsChangesHistory.h b/src/Core/SettingsChangesHistory.h index 6f09fad4363..7500eb9508f 100644 --- a/src/Core/SettingsChangesHistory.h +++ b/src/Core/SettingsChangesHistory.h @@ -106,6 +106,7 @@ static std::map sett {"keeper_retry_max_backoff_ms", 5000, 5000, "Max backoff timeout for general keeper operations"}, {"s3queue_allow_experimental_sharded_mode", false, false, "Enable experimental sharded mode of S3Queue table engine. It is experimental because it will be rewritten"}, {"merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability", 0.0, 0.0, "For testing of `PartsSplitter` - split read ranges into intersecting and non intersecting every time you read from MergeTree with the specified probability."}, + {"optimize_trivial_insert_select", true, false, "The optimization does not make sense in many cases."}, {"output_format_pretty_max_value_width_apply_for_single_value", true, false, "Single values in Pretty formats won't be cut."}, {"output_format_parquet_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, {"output_format_orc_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, @@ -146,7 +147,6 @@ static std::map sett {"24.1", {{"print_pretty_type_names", false, true, "Better user experience."}, {"input_format_json_read_bools_as_strings", false, true, "Allow to read bools as strings in JSON formats by default"}, {"output_format_arrow_use_signed_indexes_for_dictionary", false, true, "Use signed indexes type for Arrow dictionaries by default as it's recommended"}, - {"optimize_trivial_insert_select", true, false, "The optimization does not make sense in many cases."}, {"allow_experimental_variant_type", false, false, "Add new experimental Variant type"}, {"use_variant_as_common_type", false, false, "Allow to use Variant in if/multiIf if there is no common type"}, {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, diff --git a/tests/queries/0_stateless/00564_versioned_collapsing_merge_tree.sql b/tests/queries/0_stateless/00564_versioned_collapsing_merge_tree.sql index 22f6da71247..494d8243534 100644 --- a/tests/queries/0_stateless/00564_versioned_collapsing_merge_tree.sql +++ b/tests/queries/0_stateless/00564_versioned_collapsing_merge_tree.sql @@ -2,6 +2,7 @@ set allow_deprecated_syntax_for_merge_tree=1; set optimize_on_insert = 0; +set optimize_trivial_insert_select = 1; drop table if exists mult_tab; create table mult_tab (date Date, value String, version UInt64, sign Int8) engine = VersionedCollapsingMergeTree(date, (date), 8192, sign, version); diff --git a/tests/queries/0_stateless/01042_check_query_and_last_granule_size.sql b/tests/queries/0_stateless/01042_check_query_and_last_granule_size.sql index eccb2d25878..7b7d1706346 100644 --- a/tests/queries/0_stateless/01042_check_query_and_last_granule_size.sql +++ b/tests/queries/0_stateless/01042_check_query_and_last_granule_size.sql @@ -1,4 +1,6 @@ +SET optimize_trivial_insert_select = 1; SET check_query_single_value_result = 0; + DROP TABLE IF EXISTS check_query_test; CREATE TABLE check_query_test (SomeKey UInt64, SomeValue String) ENGINE = MergeTree() ORDER BY SomeKey SETTINGS min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0; diff --git a/tests/queries/0_stateless/01045_order_by_pk_special_storages.sh b/tests/queries/0_stateless/01045_order_by_pk_special_storages.sh index 12421a2b308..0714b4c91ed 100755 --- a/tests/queries/0_stateless/01045_order_by_pk_special_storages.sh +++ b/tests/queries/0_stateless/01045_order_by_pk_special_storages.sh @@ -16,8 +16,8 @@ $CLICKHOUSE_CLIENT -q "CREATE TABLE s1 (a UInt32, s String) ENGINE = MergeTree O $CLICKHOUSE_CLIENT -q "CREATE TABLE s2 (a UInt32, s String) ENGINE = MergeTree ORDER BY a PARTITION BY a % 3 SETTINGS min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0" $CLICKHOUSE_CLIENT -q "CREATE TABLE m (a UInt32, s String) engine = Merge('$CLICKHOUSE_DATABASE', 's[1,2]')" -$CLICKHOUSE_CLIENT -q "INSERT INTO s1 select (number % 20) * 2 as n, toString(number * number) from numbers(100000)" -$CLICKHOUSE_CLIENT -q "INSERT INTO s2 select (number % 20) * 2 + 1 as n, toString(number * number * number) from numbers(100000)" +$CLICKHOUSE_CLIENT --optimize_trivial_insert_select 1 -q "INSERT INTO s1 select (number % 20) * 2 as n, toString(number * number) from numbers(100000)" +$CLICKHOUSE_CLIENT --optimize_trivial_insert_select 1 -q "INSERT INTO s2 select (number % 20) * 2 + 1 as n, toString(number * number * number) from numbers(100000)" $CLICKHOUSE_CLIENT -q "SELECT '---StorageMerge---'" $CLICKHOUSE_CLIENT -q "SELECT a FROM m ORDER BY a LIMIT 5" diff --git a/tests/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.sh b/tests/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.sh index cd6501bbebf..1185498a5f7 100755 --- a/tests/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.sh +++ b/tests/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.sh @@ -25,7 +25,7 @@ ${CLICKHOUSE_CLIENT} --query="CREATE TABLE table_for_mutations(k UInt32, v1 UInt ${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES table_for_mutations" -${CLICKHOUSE_CLIENT} --query="INSERT INTO table_for_mutations select number, number from numbers(100000)" +${CLICKHOUSE_CLIENT} --optimize_trivial_insert_select 1 --query="INSERT INTO table_for_mutations select number, number from numbers(100000)" ${CLICKHOUSE_CLIENT} --query="SELECT sum(v1) FROM table_for_mutations" @@ -53,7 +53,7 @@ ${CLICKHOUSE_CLIENT} --query="CREATE TABLE replicated_table_for_mutations(k UInt ${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES replicated_table_for_mutations" # test relays on part ids, which are non-deterministic with keeper fault injections, so disable it -${CLICKHOUSE_CLIENT} --insert_keeper_fault_injection_probability=0 --query="INSERT INTO replicated_table_for_mutations select number, number from numbers(100000)" +${CLICKHOUSE_CLIENT} --optimize_trivial_insert_select 1 --insert_keeper_fault_injection_probability=0 --query="INSERT INTO replicated_table_for_mutations select number, number from numbers(100000)" ${CLICKHOUSE_CLIENT} --query="SELECT sum(v1) FROM replicated_table_for_mutations" diff --git a/tests/queries/0_stateless/01312_comparison_with_constant_string_in_index_analysis.sql b/tests/queries/0_stateless/01312_comparison_with_constant_string_in_index_analysis.sql index b7778dfd780..9fca9b09e1f 100644 --- a/tests/queries/0_stateless/01312_comparison_with_constant_string_in_index_analysis.sql +++ b/tests/queries/0_stateless/01312_comparison_with_constant_string_in_index_analysis.sql @@ -1,3 +1,4 @@ +SET optimize_trivial_insert_select = 1; SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; DROP TABLE IF EXISTS test; diff --git a/tests/queries/0_stateless/01623_constraints_column_swap.sql b/tests/queries/0_stateless/01623_constraints_column_swap.sql index 3219ee3cda7..242be87938d 100644 --- a/tests/queries/0_stateless/01623_constraints_column_swap.sql +++ b/tests/queries/0_stateless/01623_constraints_column_swap.sql @@ -5,6 +5,7 @@ SET optimize_using_constraints = 1; SET optimize_move_to_prewhere = 1; SET optimize_substitute_columns = 1; SET optimize_append_index = 1; +SET optimize_trivial_insert_select = 1; DROP TABLE IF EXISTS column_swap_test_test; diff --git a/tests/queries/0_stateless/01780_column_sparse_filter.sql b/tests/queries/0_stateless/01780_column_sparse_filter.sql index f52beba50b0..245c7c121b7 100644 --- a/tests/queries/0_stateless/01780_column_sparse_filter.sql +++ b/tests/queries/0_stateless/01780_column_sparse_filter.sql @@ -1,3 +1,5 @@ +SET optimize_trivial_insert_select = 1; + DROP TABLE IF EXISTS t_sparse; CREATE TABLE t_sparse (id UInt64, u UInt64, s String) diff --git a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql index 0834b76d4ec..ae8c39b49bc 100644 --- a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql +++ b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql @@ -1,5 +1,6 @@ SET max_threads=0; SET optimize_read_in_order=1; +SET optimize_trivial_insert_select = 1; SET read_in_order_two_level_merge_threshold=100; DROP TABLE IF EXISTS t_read_in_order; diff --git a/tests/queries/0_stateless/02404_memory_bound_merging.sql b/tests/queries/0_stateless/02404_memory_bound_merging.sql index 5e017e79309..a2de19dff8a 100644 --- a/tests/queries/0_stateless/02404_memory_bound_merging.sql +++ b/tests/queries/0_stateless/02404_memory_bound_merging.sql @@ -7,6 +7,7 @@ drop table if exists t_different_dbs; drop table if exists dist_t; drop table if exists t; +set optimize_trivial_insert_select = 1; create table t(a UInt64, b UInt64) engine=MergeTree order by a; system stop merges t; diff --git a/tests/queries/0_stateless/02461_prewhere_row_level_policy_lightweight_delete.sql.j2 b/tests/queries/0_stateless/02461_prewhere_row_level_policy_lightweight_delete.sql.j2 index 0ec6b2ed144..d7cbf210506 100644 --- a/tests/queries/0_stateless/02461_prewhere_row_level_policy_lightweight_delete.sql.j2 +++ b/tests/queries/0_stateless/02461_prewhere_row_level_policy_lightweight_delete.sql.j2 @@ -1,5 +1,7 @@ {% for index_granularity in [999, 1000, 1001, 9999, 10000, 10001] %} +SET optimize_trivial_insert_select = 1; + DROP TABLE IF EXISTS url_na_log; CREATE TABLE url_na_log(SiteId UInt32, DateVisit Date, PRIMARY KEY (SiteId)) From 885e5f78c880806642af79aceefafa088db66c89 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 24 Mar 2024 23:21:28 +0100 Subject: [PATCH 08/13] Fix test --- tests/queries/0_stateless/01200_mutations_memory_consumption.sql | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/queries/0_stateless/01200_mutations_memory_consumption.sql b/tests/queries/0_stateless/01200_mutations_memory_consumption.sql index ba91d77f22e..5019abc38ab 100644 --- a/tests/queries/0_stateless/01200_mutations_memory_consumption.sql +++ b/tests/queries/0_stateless/01200_mutations_memory_consumption.sql @@ -1,4 +1,3 @@ -<<<<<<< HEAD -- Tags: no-debug, no-parallel, long, no-s3-storage, no-random-settings, no-random-merge-tree-settings SET optimize_trivial_insert_select = 1; From c18e498d80ea52df4183c9e21e212131f447f7b8 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 25 Mar 2024 03:31:24 +0100 Subject: [PATCH 09/13] Fix tests --- .../0_stateless/01551_mergetree_read_in_order_spread.sql | 1 + tests/queries/0_stateless/01780_column_sparse_distinct.sql | 2 ++ tests/queries/0_stateless/01825_type_json_sparse.sql | 1 + tests/queries/0_stateless/02993_lazy_index_loading.sql | 1 + tests/queries/0_stateless/03000_virtual_columns_in_prewhere.sql | 2 ++ 5 files changed, 7 insertions(+) diff --git a/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.sql b/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.sql index c202ad349d6..95b46c69e83 100644 --- a/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.sql +++ b/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.sql @@ -10,6 +10,7 @@ PARTITION BY key%2 ORDER BY (key, key/2) SETTINGS index_granularity=10, index_granularity_bytes='10Mi'; +SET optimize_trivial_insert_select = 1; INSERT INTO data_01551 SELECT number FROM numbers(100000); SET max_threads=3; SET merge_tree_min_rows_for_concurrent_read=10000; diff --git a/tests/queries/0_stateless/01780_column_sparse_distinct.sql b/tests/queries/0_stateless/01780_column_sparse_distinct.sql index e98bada1aac..a0735e38f18 100644 --- a/tests/queries/0_stateless/01780_column_sparse_distinct.sql +++ b/tests/queries/0_stateless/01780_column_sparse_distinct.sql @@ -1,3 +1,5 @@ +SET optimize_trivial_insert_select = 1; + DROP TABLE IF EXISTS t_sparse_distinct; CREATE TABLE t_sparse_distinct (id UInt32, v UInt64) diff --git a/tests/queries/0_stateless/01825_type_json_sparse.sql b/tests/queries/0_stateless/01825_type_json_sparse.sql index cc7c66382a3..69ca1ff8406 100644 --- a/tests/queries/0_stateless/01825_type_json_sparse.sql +++ b/tests/queries/0_stateless/01825_type_json_sparse.sql @@ -3,6 +3,7 @@ DROP TABLE IF EXISTS t_json_sparse; SET allow_experimental_object_type = 1; +SET optimize_trivial_insert_select = 1; CREATE TABLE t_json_sparse (data JSON) ENGINE = MergeTree ORDER BY tuple() diff --git a/tests/queries/0_stateless/02993_lazy_index_loading.sql b/tests/queries/0_stateless/02993_lazy_index_loading.sql index 7de4af9ef0e..ffb4b7547bf 100644 --- a/tests/queries/0_stateless/02993_lazy_index_loading.sql +++ b/tests/queries/0_stateless/02993_lazy_index_loading.sql @@ -1,6 +1,7 @@ DROP TABLE IF EXISTS test; CREATE TABLE test (s String) ENGINE = MergeTree ORDER BY s SETTINGS index_granularity = 1; +SET optimize_trivial_insert_select = 1; INSERT INTO test SELECT randomString(1000) FROM numbers(100000); SELECT round(primary_key_bytes_in_memory, -7), round(primary_key_bytes_in_memory_allocated, -7) FROM system.parts WHERE database = currentDatabase() AND table = 'test'; diff --git a/tests/queries/0_stateless/03000_virtual_columns_in_prewhere.sql b/tests/queries/0_stateless/03000_virtual_columns_in_prewhere.sql index d57db9151b9..c1e6eba6b6f 100644 --- a/tests/queries/0_stateless/03000_virtual_columns_in_prewhere.sql +++ b/tests/queries/0_stateless/03000_virtual_columns_in_prewhere.sql @@ -1,3 +1,5 @@ +SET optimize_trivial_insert_select = 1; + drop table if exists x; create table x (i int, j int, k int) engine MergeTree order by tuple() settings index_granularity=8192, index_granularity_bytes = '10Mi', min_bytes_for_wide_part=0, min_rows_for_wide_part=0, ratio_of_defaults_for_sparse_serialization=1; From 614988686bfdd3c9aa36f410387ed66d41f96281 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 26 Jun 2024 00:04:13 +0200 Subject: [PATCH 10/13] Update tests --- .../0_stateless/02381_compress_marks_and_primary_key.sql | 2 ++ .../02870_move_partition_to_volume_io_throttling.sql | 2 ++ tests/queries/0_stateless/03033_set_index_in.sql | 4 +++- .../0_stateless/03038_move_partition_to_oneself_deadlock.sql | 2 ++ 4 files changed, 9 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02381_compress_marks_and_primary_key.sql b/tests/queries/0_stateless/02381_compress_marks_and_primary_key.sql index 0c228c13f19..1a584b8b5b2 100644 --- a/tests/queries/0_stateless/02381_compress_marks_and_primary_key.sql +++ b/tests/queries/0_stateless/02381_compress_marks_and_primary_key.sql @@ -1,5 +1,7 @@ -- Tags: no-random-merge-tree-settings +SET optimize_trivial_insert_select = 1; + drop table if exists test_02381; create table test_02381(a UInt64, b UInt64) ENGINE = MergeTree order by (a, b) SETTINGS compress_marks = false, compress_primary_key = false, ratio_of_defaults_for_sparse_serialization = 1; insert into test_02381 select number, number * 10 from system.numbers limit 1000000; diff --git a/tests/queries/0_stateless/02870_move_partition_to_volume_io_throttling.sql b/tests/queries/0_stateless/02870_move_partition_to_volume_io_throttling.sql index b03d9849a80..2d76ab0d8e3 100644 --- a/tests/queries/0_stateless/02870_move_partition_to_volume_io_throttling.sql +++ b/tests/queries/0_stateless/02870_move_partition_to_volume_io_throttling.sql @@ -2,6 +2,8 @@ -- Tag: no-fasttest -- requires S3 -- Tag: no-replicated-database -- ALTER MOVE PARTITION TO should not be replicated (will be fixed separatelly) +SET optimize_trivial_insert_select = 1; + CREATE TABLE test_move_partition_throttling (key UInt64 CODEC(NONE)) ENGINE = MergeTree ORDER BY tuple() SETTINGS storage_policy='local_remote'; INSERT INTO test_move_partition_throttling SELECT number FROM numbers(1e6); SELECT disk_name, partition, rows FROM system.parts WHERE database = currentDatabase() AND table = 'test_move_partition_throttling' and active; diff --git a/tests/queries/0_stateless/03033_set_index_in.sql b/tests/queries/0_stateless/03033_set_index_in.sql index ad42a576444..bc0676fc5ef 100644 --- a/tests/queries/0_stateless/03033_set_index_in.sql +++ b/tests/queries/0_stateless/03033_set_index_in.sql @@ -1,3 +1,5 @@ +SET optimize_trivial_insert_select = 1; + create table a (k UInt64, v UInt64, index i (v) type set(100) granularity 2) engine MergeTree order by k settings index_granularity=8192, index_granularity_bytes=1000000000, min_index_granularity_bytes=0; insert into a select number, intDiv(number, 4096) from numbers(1000000); select sum(1+ignore(*)) from a where indexHint(v in (20, 40)); @@ -6,4 +8,4 @@ select sum(1+ignore(*)) from a where indexHint(v in (select 20 union all select SELECT 1 FROM a PREWHERE v IN (SELECT 1) WHERE v IN (SELECT 2); select 1 from a where indexHint(indexHint(materialize(0))); -select sum(1+ignore(*)) from a where indexHint(indexHint(v in (20, 40))); \ No newline at end of file +select sum(1+ignore(*)) from a where indexHint(indexHint(v in (20, 40))); diff --git a/tests/queries/0_stateless/03038_move_partition_to_oneself_deadlock.sql b/tests/queries/0_stateless/03038_move_partition_to_oneself_deadlock.sql index 6eefa5270c5..f3072fb3539 100644 --- a/tests/queries/0_stateless/03038_move_partition_to_oneself_deadlock.sql +++ b/tests/queries/0_stateless/03038_move_partition_to_oneself_deadlock.sql @@ -1,3 +1,5 @@ +SET optimize_trivial_insert_select = 1; + DROP TABLE IF EXISTS move_partition_to_oneself; CREATE TABLE move_partition_to_oneself (key UInt64 CODEC(NONE)) ENGINE = MergeTree ORDER BY tuple(); INSERT INTO move_partition_to_oneself SELECT number FROM numbers(1e6); From cf1f0d3cf6da8be1ebe6b16101201922dd8b3be5 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 30 Jun 2024 01:28:50 +0200 Subject: [PATCH 11/13] Fix test --- tests/queries/0_stateless/01825_type_json_from_map.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/01825_type_json_from_map.sql b/tests/queries/0_stateless/01825_type_json_from_map.sql index 7cad50b363b..50cefefc0ea 100644 --- a/tests/queries/0_stateless/01825_type_json_from_map.sql +++ b/tests/queries/0_stateless/01825_type_json_from_map.sql @@ -5,6 +5,7 @@ DROP TABLE IF EXISTS t_json; DROP TABLE IF EXISTS t_map; SET allow_experimental_object_type = 1; +SET optimize_trivial_insert_select = 1; CREATE TABLE t_json(id UInt64, obj JSON) ENGINE = MergeTree ORDER BY id; CREATE TABLE t_map(id UInt64, m Map(String, UInt64)) ENGINE = MergeTree ORDER BY id; From 60853e36605760251e4fd2cffcc3184da452f2c4 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 30 Jun 2024 04:09:59 +0200 Subject: [PATCH 12/13] Fix test --- .../0_stateless/03030_system_flush_distributed_settings.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/03030_system_flush_distributed_settings.sql b/tests/queries/0_stateless/03030_system_flush_distributed_settings.sql index da2a387e07c..ac64135b593 100644 --- a/tests/queries/0_stateless/03030_system_flush_distributed_settings.sql +++ b/tests/queries/0_stateless/03030_system_flush_distributed_settings.sql @@ -12,6 +12,7 @@ system stop distributed sends dist_in; create table dist_out as data engine=Distributed(test_shard_localhost, currentDatabase(), data); set prefer_localhost_replica=0; +SET optimize_trivial_insert_select = 1; insert into dist_in select number/100, number from system.numbers limit 1e6 settings max_memory_usage='20Mi'; system flush distributed dist_in; -- { serverError MEMORY_LIMIT_EXCEEDED } From 1eac2abf8f8dc278e9f206a7be3385675088d97c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 30 Jun 2024 20:58:00 +0200 Subject: [PATCH 13/13] Fix test --- .../queries/0_stateless/02956_rocksdb_bulk_sink.sh | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/queries/0_stateless/02956_rocksdb_bulk_sink.sh b/tests/queries/0_stateless/02956_rocksdb_bulk_sink.sh index b1d1c483396..f7111d0afe2 100755 --- a/tests/queries/0_stateless/02956_rocksdb_bulk_sink.sh +++ b/tests/queries/0_stateless/02956_rocksdb_bulk_sink.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # Normal importing, as we only insert 1000 rows, so it should be in memtable ${CLICKHOUSE_CLIENT} --query "CREATE TABLE IF NOT EXISTS rocksdb_worm (key UInt64, value UInt64) ENGINE = EmbeddedRocksDB() PRIMARY KEY key SETTINGS optimize_for_bulk_insert = 0;" -${CLICKHOUSE_CLIENT} --query "INSERT INTO rocksdb_worm SELECT number, number+1 FROM numbers(1000);" +${CLICKHOUSE_CLIENT} --query "INSERT INTO rocksdb_worm SELECT number, number+1 FROM numbers(1000) SETTINGS optimize_trivial_insert_select = 1;" ${CLICKHOUSE_CLIENT} --query "SELECT sum(value) FROM system.rocksdb WHERE database = currentDatabase() AND table = 'rocksdb_worm' AND name = 'no.file.opens';" # should be 0 because all data is still in memtable ${CLICKHOUSE_CLIENT} --query "SELECT count() FROM rocksdb_worm;" @@ -16,32 +16,32 @@ ${CLICKHOUSE_CLIENT} --query "ALTER TABLE rocksdb_worm MODIFY SETTING optimize_f # Testing that key serialization is identical w. and w/o bulk sink ${CLICKHOUSE_CLIENT} --query "TRUNCATE TABLE rocksdb_worm;" -${CLICKHOUSE_CLIENT} --query "INSERT INTO rocksdb_worm SELECT number, number+2 FROM numbers(1000);" # should override previous keys +${CLICKHOUSE_CLIENT} --query "INSERT INTO rocksdb_worm SELECT number, number+2 FROM numbers(1000) SETTINGS optimize_trivial_insert_select = 1;" # should override previous keys ${CLICKHOUSE_CLIENT} --query "SELECT count() FROM rocksdb_worm WHERE value = key + 2;" # With bulk insertion, there is no memtable, so a small insert should create a new file ${CLICKHOUSE_CLIENT} --query "TRUNCATE TABLE rocksdb_worm;" -${CLICKHOUSE_CLIENT} --query "INSERT INTO rocksdb_worm SELECT number, number+1 FROM numbers(1000);" +${CLICKHOUSE_CLIENT} --query "INSERT INTO rocksdb_worm SELECT number, number+1 FROM numbers(1000) SETTINGS optimize_trivial_insert_select = 1;" ${CLICKHOUSE_CLIENT} --query "SELECT sum(value) FROM system.rocksdb WHERE database = currentDatabase() AND table = 'rocksdb_worm' AND name = 'no.file.opens';" # should be 1 ${CLICKHOUSE_CLIENT} --query "SELECT count() FROM rocksdb_worm;" # Testing insert with multiple sinks and fixed block size ${CLICKHOUSE_CLIENT} --query "TRUNCATE TABLE rocksdb_worm;" # Must set both max_threads and max_insert_threads to 2 to make sure there is only two sinks -${CLICKHOUSE_CLIENT} --query "INSERT INTO rocksdb_worm SELECT number, number+1 FROM numbers_mt(1000000) SETTINGS max_threads = 2, max_insert_threads = 2, max_block_size = 10000, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0, insert_deduplication_token = '';" +${CLICKHOUSE_CLIENT} --query "INSERT INTO rocksdb_worm SELECT number, number+1 FROM numbers_mt(1000000) SETTINGS max_threads = 2, max_insert_threads = 2, max_block_size = 10000, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0, insert_deduplication_token = '', optimize_trivial_insert_select = 1;" ${CLICKHOUSE_CLIENT} --query "SELECT sum(value) FROM system.rocksdb WHERE database = currentDatabase() AND table = 'rocksdb_worm' AND name = 'no.file.opens';" # should be 2 because default bulk sink size is ~1M rows / SST file ${CLICKHOUSE_CLIENT} --query "SELECT count() FROM rocksdb_worm;" # Testing insert with duplicated keys ${CLICKHOUSE_CLIENT} --query "TRUNCATE TABLE rocksdb_worm;" -${CLICKHOUSE_CLIENT} --query "INSERT INTO rocksdb_worm SELECT number % 1000, number+1 FROM numbers_mt(1000000) SETTINGS max_block_size = 100000, max_insert_threads = 1;" +${CLICKHOUSE_CLIENT} --query "INSERT INTO rocksdb_worm SELECT number % 1000, number+1 FROM numbers_mt(1000000) SETTINGS max_block_size = 100000, max_insert_threads = 1, optimize_trivial_insert_select = 1;" ${CLICKHOUSE_CLIENT} --query "SELECT count() FROM rocksdb_worm;" ${CLICKHOUSE_CLIENT} --query "SELECT * FROM rocksdb_worm WHERE key = 0;" # should be the latest value - 999001 # Testing insert with multiple threads ${CLICKHOUSE_CLIENT} --query "TRUNCATE TABLE rocksdb_worm;" -${CLICKHOUSE_CLIENT} --query "INSERT INTO rocksdb_worm SELECT number, number+1 FROM numbers_mt(1000000)" & -${CLICKHOUSE_CLIENT} --query "INSERT INTO rocksdb_worm SELECT number, number+1 FROM numbers_mt(1000000)" & +${CLICKHOUSE_CLIENT} --query "INSERT INTO rocksdb_worm SELECT number, number+1 FROM numbers_mt(1000000) SETTINGS optimize_trivial_insert_select = 1" & +${CLICKHOUSE_CLIENT} --query "INSERT INTO rocksdb_worm SELECT number, number+1 FROM numbers_mt(1000000) SETTINGS optimize_trivial_insert_select = 1" & wait ${CLICKHOUSE_CLIENT} --query "SELECT count() FROM rocksdb_worm;"