This commit is contained in:
max-vostrikov 2024-08-27 17:29:14 -07:00 committed by GitHub
commit fd67c15ff8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
34 changed files with 7 additions and 45 deletions

View File

@ -913,6 +913,7 @@ class SettingsRandomizer:
"min_external_table_block_size_bytes": lambda: random.choice([0, 1, 100000000]),
"max_parsing_threads": lambda: random.choice([0, 1, 10]),
"optimize_functions_to_subcolumns": lambda: random.randint(0, 1),
"optimize_trivial_insert_select": lambda: random.randint(0, 1),
}
@staticmethod

View File

@ -1,7 +1,6 @@
DROP TABLE IF EXISTS numbers_squashed;
CREATE TABLE numbers_squashed AS system.numbers ENGINE = StripeLog;
SET optimize_trivial_insert_select = 'false';
SET max_block_size = 10000;
SET min_insert_block_size_rows = 1000000;

View File

@ -2,7 +2,6 @@
set allow_deprecated_syntax_for_merge_tree=1;
set optimize_on_insert = 0;
set optimize_trivial_insert_select = 1;
drop table if exists mult_tab;
create table mult_tab (date Date, value String, version UInt64, sign Int8) engine = VersionedCollapsingMergeTree(date, (date), 8192, sign, version);

View File

@ -1,4 +1,3 @@
SET optimize_trivial_insert_select = 1;
SET check_query_single_value_result = 0;
DROP TABLE IF EXISTS check_query_test;

View File

@ -16,8 +16,8 @@ $CLICKHOUSE_CLIENT -q "CREATE TABLE s1 (a UInt32, s String) ENGINE = MergeTree O
$CLICKHOUSE_CLIENT -q "CREATE TABLE s2 (a UInt32, s String) ENGINE = MergeTree ORDER BY a PARTITION BY a % 3 SETTINGS min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0"
$CLICKHOUSE_CLIENT -q "CREATE TABLE m (a UInt32, s String) engine = Merge('$CLICKHOUSE_DATABASE', 's[1,2]')"
$CLICKHOUSE_CLIENT --optimize_trivial_insert_select 1 -q "INSERT INTO s1 select (number % 20) * 2 as n, toString(number * number) from numbers(100000)"
$CLICKHOUSE_CLIENT --optimize_trivial_insert_select 1 -q "INSERT INTO s2 select (number % 20) * 2 + 1 as n, toString(number * number * number) from numbers(100000)"
$CLICKHOUSE_CLIENT -q "INSERT INTO s1 select (number % 20) * 2 as n, toString(number * number) from numbers(100000)"
$CLICKHOUSE_CLIENT -q "INSERT INTO s2 select (number % 20) * 2 + 1 as n, toString(number * number * number) from numbers(100000)"
$CLICKHOUSE_CLIENT -q "SELECT '---StorageMerge---'"
$CLICKHOUSE_CLIENT -q "SELECT a FROM m ORDER BY a LIMIT 5"

View File

@ -25,7 +25,7 @@ ${CLICKHOUSE_CLIENT} --query="CREATE TABLE table_for_mutations(k UInt32, v1 UInt
${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES table_for_mutations"
${CLICKHOUSE_CLIENT} --optimize_trivial_insert_select 1 --query="INSERT INTO table_for_mutations select number, number from numbers(100000)"
${CLICKHOUSE_CLIENT} --query="INSERT INTO table_for_mutations select number, number from numbers(100000)"
${CLICKHOUSE_CLIENT} --query="SELECT sum(v1) FROM table_for_mutations"
@ -53,7 +53,7 @@ ${CLICKHOUSE_CLIENT} --query="CREATE TABLE replicated_table_for_mutations(k UInt
${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES replicated_table_for_mutations"
# test relays on part ids, which are non-deterministic with keeper fault injections, so disable it
${CLICKHOUSE_CLIENT} --optimize_trivial_insert_select 1 --insert_keeper_fault_injection_probability=0 --query="INSERT INTO replicated_table_for_mutations select number, number from numbers(100000)"
${CLICKHOUSE_CLIENT} --insert_keeper_fault_injection_probability=0 --query="INSERT INTO replicated_table_for_mutations select number, number from numbers(100000)"
${CLICKHOUSE_CLIENT} --query="SELECT sum(v1) FROM replicated_table_for_mutations"

View File

@ -1,6 +1,4 @@
-- Tags: no-debug, no-parallel, long, no-object-storage, no-random-settings, no-random-merge-tree-settings
SET optimize_trivial_insert_select = 1;
DROP TABLE IF EXISTS table_with_single_pk;
CREATE TABLE table_with_single_pk

View File

@ -65,7 +65,7 @@ echo "create table out_01278 as data_01278 Engine=Merge('$CLICKHOUSE_DATABASE',
#
function execute_insert()
{
${CLICKHOUSE_CLIENT} --max_memory_usage=$TEST_01278_MEMORY --optimize_trivial_insert_select='false' "$@" -q "
${CLICKHOUSE_CLIENT} --max_memory_usage=$TEST_01278_MEMORY "$@" -q "
insert into data_01278 select
number,
reinterpretAsString(number), // s1

View File

@ -1,4 +1,3 @@
SET optimize_trivial_insert_select = 1;
SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0;
DROP TABLE IF EXISTS test;

View File

@ -3,7 +3,6 @@ SET allow_deprecated_error_prone_window_functions = 1;
DROP TABLE IF EXISTS t;
CREATE TABLE t (x UInt64) ENGINE = StripeLog;
-- For trivial INSERT SELECT, max_threads is lowered to max_insert_threads and max_block_size is changed to min_insert_block_size_rows.
SET optimize_trivial_insert_select = 1;
INSERT INTO t SELECT * FROM numbers_mt(1000000);
SET max_threads = 1;
-- If data was inserted by more threads, we will probably see data out of order.

View File

@ -10,7 +10,6 @@ PARTITION BY key%2
ORDER BY (key, key/2)
SETTINGS index_granularity=10, index_granularity_bytes='10Mi';
SET optimize_trivial_insert_select = 1;
INSERT INTO data_01551 SELECT number FROM numbers(100000);
SET max_threads=3;
SET merge_tree_min_rows_for_concurrent_read=10000;

View File

@ -24,7 +24,6 @@ enable_vertical_merge_algorithm = 0;
SET max_block_size=900;
-- There are about 900 marks for our settings.
SET optimize_trivial_insert_select = 1;
INSERT INTO adaptive_table SELECT number, if(number > 700, randomPrintableASCII(102400), randomPrintableASCII(1)) FROM numbers(10000);
OPTIMIZE TABLE adaptive_table FINAL;

View File

@ -5,7 +5,6 @@ SET optimize_using_constraints = 1;
SET optimize_move_to_prewhere = 1;
SET optimize_substitute_columns = 1;
SET optimize_append_index = 1;
SET optimize_trivial_insert_select = 1;
DROP TABLE IF EXISTS column_swap_test_test;

View File

@ -1,5 +1,3 @@
SET optimize_trivial_insert_select = 1;
DROP TABLE IF EXISTS t_sparse_distinct;
CREATE TABLE t_sparse_distinct (id UInt32, v UInt64)

View File

@ -1,5 +1,3 @@
SET optimize_trivial_insert_select = 1;
DROP TABLE IF EXISTS t_sparse;
CREATE TABLE t_sparse (id UInt64, u UInt64, s String)

View File

@ -5,7 +5,6 @@ DROP TABLE IF EXISTS t_json;
DROP TABLE IF EXISTS t_map;
SET allow_experimental_object_type = 1;
SET optimize_trivial_insert_select = 1;
CREATE TABLE t_json(id UInt64, obj Object('json')) ENGINE = MergeTree ORDER BY id;
CREATE TABLE t_map(id UInt64, m Map(String, UInt64)) ENGINE = MergeTree ORDER BY id;

View File

@ -3,7 +3,6 @@
DROP TABLE IF EXISTS t_json_sparse;
SET allow_experimental_object_type = 1;
SET optimize_trivial_insert_select = 1;
CREATE TABLE t_json_sparse (data Object('json'))
ENGINE = MergeTree ORDER BY tuple()

View File

@ -16,7 +16,6 @@ SELECT
FROM source_null
GROUP BY count_subquery, min_subquery, max_subquery;
SET optimize_trivial_insert_select = 1;
INSERT INTO source SELECT number FROM numbers(2000) SETTINGS min_insert_block_size_rows=1500, max_insert_block_size=1500;
SELECT count() FROM source;

View File

@ -1,6 +1,5 @@
SET max_threads=0;
SET optimize_read_in_order=1;
SET optimize_trivial_insert_select = 1;
SET read_in_order_two_level_merge_threshold=100;
DROP TABLE IF EXISTS t_read_in_order;

View File

@ -28,7 +28,6 @@ create materialized view mv_02231 to buffer_02231 as select
from in_02231
group by key;
set optimize_trivial_insert_select = 1;
insert into in_02231 select * from numbers(5e6) settings max_memory_usage='400Mi', max_threads=1;
drop table buffer_02231;

View File

@ -21,7 +21,6 @@ CREATE TABLE t_random_1
)
ENGINE = GenerateRandom(1, 5, 3);
SET optimize_trivial_insert_select = 1;
INSERT INTO t_1 select rowNumberInAllBlocks(), *, '1984-01-01' from t_random_1 limit 1000000;
OPTIMIZE TABLE t_1 FINAL;

View File

@ -1,7 +1,5 @@
-- Tags: no-random-merge-tree-settings
SET optimize_trivial_insert_select = 1;
drop table if exists test_02381;
create table test_02381(a UInt64, b UInt64) ENGINE = MergeTree order by (a, b) SETTINGS compress_marks = false, compress_primary_key = false, ratio_of_defaults_for_sparse_serialization = 1;
insert into test_02381 select number, number * 10 from system.numbers limit 1000000;

View File

@ -7,8 +7,6 @@ drop table if exists t_different_dbs;
drop table if exists dist_t;
drop table if exists t;
set optimize_trivial_insert_select = 1;
create table t(a UInt64, b UInt64) engine=MergeTree order by a;
system stop merges t;
insert into t select number, number from numbers_mt(1e6);

View File

@ -1,7 +1,5 @@
{% for index_granularity in [999, 1000, 1001, 9999, 10000, 10001] %}
SET optimize_trivial_insert_select = 1;
DROP TABLE IF EXISTS url_na_log;
CREATE TABLE url_na_log(SiteId UInt32, DateVisit Date, PRIMARY KEY (SiteId))

View File

@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --optimize_trivial_insert_select 1 -q "create table t(ts DateTime64) engine=MergeTree order by ts as select * from numbers_mt(1e6);"
$CLICKHOUSE_CLIENT -q "create table t(ts DateTime64) engine=MergeTree order by ts as select * from numbers_mt(1e6);"
max_block_size=8192

View File

@ -6,7 +6,6 @@ set max_threads = 16;
set allow_aggregate_partitions_independently = 1;
set force_aggregate_partitions_independently = 1;
set optimize_use_projections = 0;
set optimize_trivial_insert_select = 1;
set allow_prefetched_read_pool_for_remote_filesystem = 0;
set allow_prefetched_read_pool_for_local_filesystem = 0;

View File

@ -20,7 +20,6 @@ CREATE TABLE t0
)
ENGINE = MergeTree ORDER BY (c1, c2) settings min_bytes_for_wide_part = 10485760, min_rows_for_wide_part = 0;
SET optimize_trivial_insert_select = 1;
INSERT INTO t0 SELECT
number,
-number,

View File

@ -1,4 +1,3 @@
-- Tags: no-fasttest, no-tsan, no-asan, no-msan, no-ubsan
-- This tests depends on internet access, but it does not matter, because it only has to check that there is no abort due to a bug in Apache Arrow library.
SET optimize_trivial_insert_select=1;
INSERT INTO TABLE FUNCTION url('https://clickhouse-public-datasets.s3.amazonaws.com/hits_compatible/athena_partitioned/hits_9.parquet') SELECT * FROM url('https://clickhouse-public-datasets.s3.amazonaws.com/hits_compatible/athena_partitioned/hits_9.parquet'); -- { serverError CANNOT_WRITE_TO_OSTREAM, RECEIVED_ERROR_FROM_REMOTE_IO_SERVER, POCO_EXCEPTION }

View File

@ -2,8 +2,6 @@
-- Tag: no-fasttest -- requires S3
-- Tag: no-replicated-database -- ALTER MOVE PARTITION TO should not be replicated (will be fixed separatelly)
SET optimize_trivial_insert_select = 1;
CREATE TABLE test_move_partition_throttling (key UInt64 CODEC(NONE)) ENGINE = MergeTree ORDER BY tuple() SETTINGS storage_policy='local_remote';
INSERT INTO test_move_partition_throttling SELECT number FROM numbers(1e6);
SELECT disk_name, partition, rows FROM system.parts WHERE database = currentDatabase() AND table = 'test_move_partition_throttling' and active;

View File

@ -1,7 +1,6 @@
DROP TABLE IF EXISTS test;
CREATE TABLE test (s String) ENGINE = MergeTree ORDER BY s SETTINGS index_granularity = 1;
SET optimize_trivial_insert_select = 1;
INSERT INTO test SELECT randomString(1000) FROM numbers(100000);
SELECT round(primary_key_bytes_in_memory, -7), round(primary_key_bytes_in_memory_allocated, -7) FROM system.parts WHERE database = currentDatabase() AND table = 'test';

View File

@ -1,5 +1,3 @@
SET optimize_trivial_insert_select = 1;
drop table if exists x;
create table x (i int, j int, k int) engine MergeTree order by tuple() settings index_granularity=8192, index_granularity_bytes = '10Mi', min_bytes_for_wide_part=0, min_rows_for_wide_part=0, ratio_of_defaults_for_sparse_serialization=1;

View File

@ -12,7 +12,6 @@ system stop distributed sends dist_in;
create table dist_out as data engine=Distributed(test_shard_localhost, currentDatabase(), data);
set prefer_localhost_replica=0;
SET optimize_trivial_insert_select = 1;
-- due to pushing to MV with aggregation the query needs ~300MiB
-- but it will be done in background via "system flush distributed"

View File

@ -1,5 +1,3 @@
SET optimize_trivial_insert_select = 1;
create table a (k UInt64, v UInt64, index i (v) type set(100) granularity 2) engine MergeTree order by k settings index_granularity=8192, index_granularity_bytes=1000000000, min_index_granularity_bytes=0;
insert into a select number, intDiv(number, 4096) from numbers(1000000);
select sum(1+ignore(*)) from a where indexHint(v in (20, 40));

View File

@ -1,5 +1,3 @@
SET optimize_trivial_insert_select = 1;
DROP TABLE IF EXISTS move_partition_to_oneself;
CREATE TABLE move_partition_to_oneself (key UInt64 CODEC(NONE)) ENGINE = MergeTree ORDER BY tuple();
INSERT INTO move_partition_to_oneself SELECT number FROM numbers(1e6);