fix some tests

This commit is contained in:
Anton Popov 2022-10-28 23:26:06 +00:00
parent 6a9f195390
commit c7f6a410ac
30 changed files with 91 additions and 43 deletions

View File

@ -226,6 +226,7 @@ function run_tests
--hung-check --hung-check
--fast-tests-only --fast-tests-only
--no-random-settings --no-random-settings
--no-random-merge-tree-settings
--no-long --no-long
--testname --testname
--shard --shard

View File

@ -486,7 +486,6 @@ class MergeTreeSettingsRandomizer:
"prefer_fetch_merged_part_size_threshold": lambda: random.randint( "prefer_fetch_merged_part_size_threshold": lambda: random.randint(
1, 10 * 1024 * 1024 * 1024 1, 10 * 1024 * 1024 * 1024
), ),
"always_fetch_merged_part": lambda: random.randint(0, 1),
"vertical_merge_algorithm_min_rows_to_activate": lambda: random.randint( "vertical_merge_algorithm_min_rows_to_activate": lambda: random.randint(
1, 32 * 8192 1, 32 * 8192
), ),
@ -618,7 +617,7 @@ class TestCase:
return " ".join([f"--{setting}" for setting in settings_list]) return " ".join([f"--{setting}" for setting in settings_list])
def has_show_create_table_in_test(self): def has_show_create_table_in_test(self):
return not subprocess.call(["grep", "-iq", "show create table", self.case_file]) return not subprocess.call(["grep", "-iq", "show create", self.case_file])
def add_random_settings(self, client_options): def add_random_settings(self, client_options):
new_options = "" new_options = ""
@ -684,12 +683,17 @@ class TestCase:
args.no_random_settings or has_no_random_settings_tag args.no_random_settings or has_no_random_settings_tag
) )
has_no_random_merge_tree_settings_tag = (
self.tags and "no-random-merge-tree-settings" in self.tags
)
# If test contains SHOW CREATE TABLE do not # If test contains SHOW CREATE TABLE do not
# randomize merge tree settings, because # randomize merge tree settings, because
# they are added to table definition and test will fail # they will be added to table definition and test will fail
self.randomize_merge_tree_settings = not ( self.randomize_merge_tree_settings = not (
args.no_random_merge_tree_settings args.no_random_merge_tree_settings
or has_no_random_settings_tag or has_no_random_settings_tag
or has_no_random_merge_tree_settings_tag
or self.has_show_create_table_in_test() or self.has_show_create_table_in_test()
) )

View File

@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS preferred_block_size_bytes" $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS preferred_block_size_bytes"
$CLICKHOUSE_CLIENT -q "CREATE TABLE preferred_block_size_bytes (p Date, s String) ENGINE = MergeTree PARTITION BY p ORDER BY p SETTINGS index_granularity=1, index_granularity_bytes=0, min_bytes_for_wide_part = 0" $CLICKHOUSE_CLIENT -q "CREATE TABLE preferred_block_size_bytes (p Date, s String) ENGINE = MergeTree PARTITION BY p ORDER BY p SETTINGS index_granularity=1, index_granularity_bytes=0, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0"
$CLICKHOUSE_CLIENT -q "INSERT INTO preferred_block_size_bytes (s) SELECT '16_bytes_-_-_-_' AS s FROM system.numbers LIMIT 10, 90" $CLICKHOUSE_CLIENT -q "INSERT INTO preferred_block_size_bytes (s) SELECT '16_bytes_-_-_-_' AS s FROM system.numbers LIMIT 10, 90"
$CLICKHOUSE_CLIENT -q "OPTIMIZE TABLE preferred_block_size_bytes" $CLICKHOUSE_CLIENT -q "OPTIMIZE TABLE preferred_block_size_bytes"
$CLICKHOUSE_CLIENT --preferred_block_size_bytes=26 -q "SELECT DISTINCT blockSize(), ignore(p, s) FROM preferred_block_size_bytes" $CLICKHOUSE_CLIENT --preferred_block_size_bytes=26 -q "SELECT DISTINCT blockSize(), ignore(p, s) FROM preferred_block_size_bytes"
@ -18,7 +18,7 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS preferred_block_size_bytes"
# PREWHERE using empty column # PREWHERE using empty column
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS pbs" $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS pbs"
$CLICKHOUSE_CLIENT -q "CREATE TABLE pbs (p Date, i UInt64, sa Array(String)) ENGINE = MergeTree PARTITION BY p ORDER BY p SETTINGS index_granularity=100, index_granularity_bytes=0, min_bytes_for_wide_part = 0" $CLICKHOUSE_CLIENT -q "CREATE TABLE pbs (p Date, i UInt64, sa Array(String)) ENGINE = MergeTree PARTITION BY p ORDER BY p SETTINGS index_granularity=100, index_granularity_bytes=0, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0"
$CLICKHOUSE_CLIENT -q "INSERT INTO pbs (p, i, sa) SELECT toDate(i % 30) AS p, number AS i, ['a'] AS sa FROM system.numbers LIMIT 1000" $CLICKHOUSE_CLIENT -q "INSERT INTO pbs (p, i, sa) SELECT toDate(i % 30) AS p, number AS i, ['a'] AS sa FROM system.numbers LIMIT 1000"
$CLICKHOUSE_CLIENT -q "ALTER TABLE pbs ADD COLUMN s UInt8 DEFAULT 0" $CLICKHOUSE_CLIENT -q "ALTER TABLE pbs ADD COLUMN s UInt8 DEFAULT 0"
$CLICKHOUSE_CLIENT --preferred_block_size_bytes=100000 -q "SELECT count() FROM pbs PREWHERE s = 0" $CLICKHOUSE_CLIENT --preferred_block_size_bytes=100000 -q "SELECT count() FROM pbs PREWHERE s = 0"
@ -29,7 +29,7 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE pbs"
# Nullable PREWHERE # Nullable PREWHERE
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS nullable_prewhere" $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS nullable_prewhere"
$CLICKHOUSE_CLIENT -q "CREATE TABLE nullable_prewhere (p Date, f Nullable(UInt64), d UInt64) ENGINE = MergeTree PARTITION BY p ORDER BY p SETTINGS index_granularity=8, index_granularity_bytes=0, min_bytes_for_wide_part = 0" $CLICKHOUSE_CLIENT -q "CREATE TABLE nullable_prewhere (p Date, f Nullable(UInt64), d UInt64) ENGINE = MergeTree PARTITION BY p ORDER BY p SETTINGS index_granularity=8, index_granularity_bytes=0, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0"
$CLICKHOUSE_CLIENT -q "INSERT INTO nullable_prewhere SELECT toDate(0) AS p, if(number % 2 = 0, CAST(number AS Nullable(UInt64)), CAST(NULL AS Nullable(UInt64))) AS f, number as d FROM system.numbers LIMIT 1001" $CLICKHOUSE_CLIENT -q "INSERT INTO nullable_prewhere SELECT toDate(0) AS p, if(number % 2 = 0, CAST(number AS Nullable(UInt64)), CAST(NULL AS Nullable(UInt64))) AS f, number as d FROM system.numbers LIMIT 1001"
$CLICKHOUSE_CLIENT -q "SELECT sum(d), sum(f), max(d) FROM nullable_prewhere PREWHERE NOT isNull(f)" $CLICKHOUSE_CLIENT -q "SELECT sum(d), sum(f), max(d) FROM nullable_prewhere PREWHERE NOT isNull(f)"
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS nullable_prewhere" $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS nullable_prewhere"

View File

@ -1,4 +1,5 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Tags: no-random-merge-tree-settings
#-------------------------------------------- #--------------------------------------------
# Description of test result: # Description of test result:

View File

@ -10,6 +10,7 @@ CREATE TABLE zero_rows_per_granule (
) ENGINE CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(p) ORDER BY k ) ENGINE CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(p) ORDER BY k
SETTINGS index_granularity_bytes=20, min_index_granularity_bytes=10, write_final_mark = 0, SETTINGS index_granularity_bytes=20, min_index_granularity_bytes=10, write_final_mark = 0,
min_bytes_for_wide_part = 0, min_bytes_for_wide_part = 0,
min_rows_for_wide_part = 0,
enable_vertical_merge_algorithm=1, enable_vertical_merge_algorithm=1,
vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_rows_to_activate=0,
vertical_merge_algorithm_min_columns_to_activate=0; vertical_merge_algorithm_min_columns_to_activate=0;
@ -42,6 +43,7 @@ CREATE TABLE four_rows_per_granule (
) ENGINE CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(p) ORDER BY k ) ENGINE CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(p) ORDER BY k
SETTINGS index_granularity_bytes=110, min_index_granularity_bytes=100, write_final_mark = 0, SETTINGS index_granularity_bytes=110, min_index_granularity_bytes=100, write_final_mark = 0,
min_bytes_for_wide_part = 0, min_bytes_for_wide_part = 0,
min_rows_for_wide_part = 0,
enable_vertical_merge_algorithm=1, enable_vertical_merge_algorithm=1,
vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_rows_to_activate=0,
vertical_merge_algorithm_min_columns_to_activate=0; vertical_merge_algorithm_min_columns_to_activate=0;

View File

@ -9,7 +9,7 @@ CREATE TABLE zero_rows_per_granule (
k UInt64, k UInt64,
v1 UInt64, v1 UInt64,
v2 Int64 v2 Int64
) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 20, min_index_granularity_bytes = 10, write_final_mark = 0, min_bytes_for_wide_part = 0; ) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 20, min_index_granularity_bytes = 10, write_final_mark = 0, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0;
INSERT INTO zero_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); INSERT INTO zero_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
@ -36,7 +36,7 @@ CREATE TABLE two_rows_per_granule (
k UInt64, k UInt64,
v1 UInt64, v1 UInt64,
v2 Int64 v2 Int64
) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 40, min_index_granularity_bytes = 10, write_final_mark = 0, min_bytes_for_wide_part = 0; ) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 40, min_index_granularity_bytes = 10, write_final_mark = 0, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0;
INSERT INTO two_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); INSERT INTO two_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
@ -63,7 +63,7 @@ CREATE TABLE four_rows_per_granule (
k UInt64, k UInt64,
v1 UInt64, v1 UInt64,
v2 Int64 v2 Int64
) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 10, write_final_mark = 0, min_bytes_for_wide_part = 0; ) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 10, write_final_mark = 0, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0;
INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
@ -97,7 +97,7 @@ CREATE TABLE huge_granularity_small_blocks (
k UInt64, k UInt64,
v1 UInt64, v1 UInt64,
v2 Int64 v2 Int64
) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 1000000, write_final_mark = 0, min_bytes_for_wide_part = 0; ) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 1000000, write_final_mark = 0, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0;
INSERT INTO huge_granularity_small_blocks (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); INSERT INTO huge_granularity_small_blocks (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
@ -128,7 +128,7 @@ CREATE TABLE adaptive_granularity_alter (
k UInt64, k UInt64,
v1 UInt64, v1 UInt64,
v2 Int64 v2 Int64
) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 100, write_final_mark = 0, min_bytes_for_wide_part = 0; ) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 100, write_final_mark = 0, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0;
INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
@ -188,7 +188,8 @@ CREATE TABLE zero_rows_per_granule (
enable_vertical_merge_algorithm=1, enable_vertical_merge_algorithm=1,
vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_rows_to_activate=0,
vertical_merge_algorithm_min_columns_to_activate=0, vertical_merge_algorithm_min_columns_to_activate=0,
min_bytes_for_wide_part = 0; min_bytes_for_wide_part = 0,
min_rows_for_wide_part = 0;
INSERT INTO zero_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); INSERT INTO zero_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
@ -223,7 +224,8 @@ CREATE TABLE two_rows_per_granule (
enable_vertical_merge_algorithm=1, enable_vertical_merge_algorithm=1,
vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_rows_to_activate=0,
vertical_merge_algorithm_min_columns_to_activate=0, vertical_merge_algorithm_min_columns_to_activate=0,
min_bytes_for_wide_part = 0; min_bytes_for_wide_part = 0,
min_rows_for_wide_part = 0;
INSERT INTO two_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); INSERT INTO two_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
@ -257,7 +259,8 @@ CREATE TABLE four_rows_per_granule (
enable_vertical_merge_algorithm=1, enable_vertical_merge_algorithm=1,
vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_rows_to_activate=0,
vertical_merge_algorithm_min_columns_to_activate=0, vertical_merge_algorithm_min_columns_to_activate=0,
min_bytes_for_wide_part = 0; min_bytes_for_wide_part = 0,
min_rows_for_wide_part = 0;
INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
@ -296,7 +299,8 @@ CREATE TABLE huge_granularity_small_blocks (
enable_vertical_merge_algorithm=1, enable_vertical_merge_algorithm=1,
vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_rows_to_activate=0,
vertical_merge_algorithm_min_columns_to_activate=0, vertical_merge_algorithm_min_columns_to_activate=0,
min_bytes_for_wide_part = 0; min_bytes_for_wide_part = 0,
min_rows_for_wide_part = 0;
INSERT INTO huge_granularity_small_blocks (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); INSERT INTO huge_granularity_small_blocks (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
@ -334,7 +338,8 @@ CREATE TABLE adaptive_granularity_alter (
enable_vertical_merge_algorithm=1, enable_vertical_merge_algorithm=1,
vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_rows_to_activate=0,
vertical_merge_algorithm_min_columns_to_activate=0, vertical_merge_algorithm_min_columns_to_activate=0,
min_bytes_for_wide_part = 0; min_bytes_for_wide_part = 0,
min_rows_for_wide_part = 0;
INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);

View File

@ -77,7 +77,7 @@ CREATE TABLE large_alter_table_00926 (
somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12)), somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12)),
id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC), id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC),
data String CODEC(ZSTD(2), LZ4HC, NONE, LZ4, LZ4) data String CODEC(ZSTD(2), LZ4HC, NONE, LZ4, LZ4)
) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS min_index_granularity_bytes=30, write_final_mark = 0, min_bytes_for_wide_part = '10M'; ) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS min_index_granularity_bytes=30, write_final_mark = 0, min_bytes_for_wide_part = '10M', min_rows_for_wide_part = 0;
INSERT INTO large_alter_table_00926 SELECT toDate('2019-01-01'), number, toString(number + rand()) FROM system.numbers LIMIT 300000; INSERT INTO large_alter_table_00926 SELECT toDate('2019-01-01'), number, toString(number + rand()) FROM system.numbers LIMIT 300000;

View File

@ -14,7 +14,9 @@ CREATE TABLE zero_rows_per_granule (
write_final_mark = 0, write_final_mark = 0,
enable_vertical_merge_algorithm=1, enable_vertical_merge_algorithm=1,
vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_rows_to_activate=0,
vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0; vertical_merge_algorithm_min_columns_to_activate=0,
min_bytes_for_wide_part = 0,
min_rows_for_wide_part = 0;
INSERT INTO zero_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); INSERT INTO zero_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
@ -47,7 +49,9 @@ CREATE TABLE two_rows_per_granule (
write_final_mark = 0, write_final_mark = 0,
enable_vertical_merge_algorithm=1, enable_vertical_merge_algorithm=1,
vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_rows_to_activate=0,
vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0; vertical_merge_algorithm_min_columns_to_activate=0,
min_bytes_for_wide_part = 0,
min_rows_for_wide_part = 0;
INSERT INTO two_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); INSERT INTO two_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
@ -80,7 +84,9 @@ CREATE TABLE four_rows_per_granule (
write_final_mark = 0, write_final_mark = 0,
enable_vertical_merge_algorithm=1, enable_vertical_merge_algorithm=1,
vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_rows_to_activate=0,
vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0; vertical_merge_algorithm_min_columns_to_activate=0,
min_bytes_for_wide_part = 0,
min_rows_for_wide_part = 0;
INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
@ -126,7 +132,9 @@ CREATE TABLE huge_granularity_small_blocks (
SETTINGS index_granularity_bytes=1000000, write_final_mark = 0, SETTINGS index_granularity_bytes=1000000, write_final_mark = 0,
enable_vertical_merge_algorithm=1, enable_vertical_merge_algorithm=1,
vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_rows_to_activate=0,
vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0; vertical_merge_algorithm_min_columns_to_activate=0,
min_bytes_for_wide_part = 0,
min_rows_for_wide_part = 0;
INSERT INTO huge_granularity_small_blocks (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); INSERT INTO huge_granularity_small_blocks (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
@ -164,7 +172,9 @@ CREATE TABLE adaptive_granularity_alter (
write_final_mark = 0, write_final_mark = 0,
enable_vertical_merge_algorithm=1, enable_vertical_merge_algorithm=1,
vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_rows_to_activate=0,
vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0; vertical_merge_algorithm_min_columns_to_activate=0,
min_bytes_for_wide_part = 0,
min_rows_for_wide_part = 0;
INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);

View File

@ -14,7 +14,9 @@ CREATE TABLE zero_rows_per_granule (
write_final_mark = 0, write_final_mark = 0,
enable_vertical_merge_algorithm=1, enable_vertical_merge_algorithm=1,
vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_rows_to_activate=0,
vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0; vertical_merge_algorithm_min_columns_to_activate=0,
min_bytes_for_wide_part = 0,
min_rows_for_wide_part = 0;
INSERT INTO zero_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 1, 1000, 2000, 1, 1), ('2018-05-16', 2, 3000, 4000, 1, 1), ('2018-05-17', 3, 5000, 6000, 1, 1), ('2018-05-18', 4, 7000, 8000, 1, 1); INSERT INTO zero_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 1, 1000, 2000, 1, 1), ('2018-05-16', 2, 3000, 4000, 1, 1), ('2018-05-17', 3, 5000, 6000, 1, 1), ('2018-05-18', 4, 7000, 8000, 1, 1);
@ -48,7 +50,9 @@ CREATE TABLE four_rows_per_granule (
write_final_mark = 0, write_final_mark = 0,
enable_vertical_merge_algorithm=1, enable_vertical_merge_algorithm=1,
vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_rows_to_activate=0,
vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0; vertical_merge_algorithm_min_columns_to_activate=0,
min_bytes_for_wide_part = 0,
min_rows_for_wide_part = 0;
INSERT INTO four_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 1, 1000, 2000, 1, 1), ('2018-05-16', 2, 3000, 4000, 1, 1), ('2018-05-17', 3, 5000, 6000, 1, 1), ('2018-05-18', 4, 7000, 8000, 1, 1); INSERT INTO four_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 1, 1000, 2000, 1, 1), ('2018-05-16', 2, 3000, 4000, 1, 1), ('2018-05-17', 3, 5000, 6000, 1, 1), ('2018-05-18', 4, 7000, 8000, 1, 1);
@ -99,7 +103,9 @@ CREATE TABLE six_rows_per_granule (
write_final_mark = 0, write_final_mark = 0,
enable_vertical_merge_algorithm=1, enable_vertical_merge_algorithm=1,
vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_rows_to_activate=0,
vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0; vertical_merge_algorithm_min_columns_to_activate=0,
min_bytes_for_wide_part = 0,
min_rows_for_wide_part = 0;
INSERT INTO six_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 1, 1000, 2000, 1, 1), ('2018-05-16', 1, 1000, 2000, -1, 2); INSERT INTO six_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 1, 1000, 2000, 1, 1), ('2018-05-16', 1, 1000, 2000, -1, 2);

View File

@ -1,5 +1,5 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Tags: no-parallel # Tags: no-parallel, no-random-merge-tree-settings
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS small_table" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS small_table"
$CLICKHOUSE_CLIENT --query="CREATE TABLE small_table (a UInt64 default 0, n UInt64) ENGINE = MergeTree() PARTITION BY tuple() ORDER BY (a) SETTINGS min_bytes_for_wide_part = 0;" $CLICKHOUSE_CLIENT --query="CREATE TABLE small_table (a UInt64 default 0, n UInt64) ENGINE = MergeTree() PARTITION BY tuple() ORDER BY (a) SETTINGS min_bytes_for_wide_part = 0"
$CLICKHOUSE_CLIENT --query="INSERT INTO small_table (n) SELECT * from system.numbers limit 100000;" $CLICKHOUSE_CLIENT --query="INSERT INTO small_table (n) SELECT * from system.numbers limit 100000;"
$CLICKHOUSE_CLIENT --query="OPTIMIZE TABLE small_table FINAL;" $CLICKHOUSE_CLIENT --query="OPTIMIZE TABLE small_table FINAL;"

View File

@ -1,7 +1,8 @@
DROP TABLE IF EXISTS test_00961; DROP TABLE IF EXISTS test_00961;
CREATE TABLE test_00961 (d Date, a String, b UInt8, x String, y Int8, z UInt32) CREATE TABLE test_00961 (d Date, a String, b UInt8, x String, y Int8, z UInt32)
ENGINE = MergeTree PARTITION BY d ORDER BY (a, b) SETTINGS index_granularity = 111, min_bytes_for_wide_part = 0; ENGINE = MergeTree PARTITION BY d ORDER BY (a, b)
SETTINGS index_granularity = 111, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0;
INSERT INTO test_00961 VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); INSERT INTO test_00961 VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789);

View File

@ -1,4 +1,5 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Tags: no-random-merge-tree-settings
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh

View File

@ -1,3 +1,5 @@
-- Tags: no-random-merge-tree-settings
DROP TABLE IF EXISTS set_array; DROP TABLE IF EXISTS set_array;
CREATE TABLE set_array CREATE TABLE set_array

View File

@ -1,5 +1,5 @@
DROP TABLE IF EXISTS tab; DROP TABLE IF EXISTS tab;
create table tab (A Int64) Engine=MergeTree order by tuple() SETTINGS min_bytes_for_wide_part = 0; create table tab (A Int64) Engine=MergeTree order by tuple() SETTINGS min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0;
insert into tab select cityHash64(number) from numbers(1000); insert into tab select cityHash64(number) from numbers(1000);
select sum(sleep(0.1)) from tab settings max_block_size = 1, max_execution_time=1; -- { serverError 159 } select sum(sleep(0.1)) from tab settings max_block_size = 1, max_execution_time=1; -- { serverError 159 }
DROP TABLE IF EXISTS tab; DROP TABLE IF EXISTS tab;

View File

@ -1,7 +1,7 @@
SET check_query_single_value_result = 0; SET check_query_single_value_result = 0;
DROP TABLE IF EXISTS check_query_test; DROP TABLE IF EXISTS check_query_test;
CREATE TABLE check_query_test (SomeKey UInt64, SomeValue String) ENGINE = MergeTree() ORDER BY SomeKey SETTINGS min_bytes_for_wide_part = 0; CREATE TABLE check_query_test (SomeKey UInt64, SomeValue String) ENGINE = MergeTree() ORDER BY SomeKey SETTINGS min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0;
-- Number of rows in last granule should be equals to granularity. -- Number of rows in last granule should be equals to granularity.
-- Rows in this table are short, so granularity will be 8192. -- Rows in this table are short, so granularity will be 8192.
@ -17,7 +17,7 @@ DROP TABLE IF EXISTS check_query_test;
DROP TABLE IF EXISTS check_query_test_non_adaptive; DROP TABLE IF EXISTS check_query_test_non_adaptive;
CREATE TABLE check_query_test_non_adaptive (SomeKey UInt64, SomeValue String) ENGINE = MergeTree() ORDER BY SomeKey SETTINGS index_granularity_bytes = 0, min_bytes_for_wide_part = 0; CREATE TABLE check_query_test_non_adaptive (SomeKey UInt64, SomeValue String) ENGINE = MergeTree() ORDER BY SomeKey SETTINGS index_granularity_bytes = 0, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0;
INSERT INTO check_query_test_non_adaptive SELECT number, toString(number) FROM system.numbers LIMIT 81920; INSERT INTO check_query_test_non_adaptive SELECT number, toString(number) FROM system.numbers LIMIT 81920;

View File

@ -12,8 +12,8 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS m"
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS buf" $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS buf"
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS mv" $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS mv"
$CLICKHOUSE_CLIENT -q "CREATE TABLE s1 (a UInt32, s String) ENGINE = MergeTree ORDER BY a PARTITION BY a % 3 SETTINGS min_bytes_for_wide_part = 0" $CLICKHOUSE_CLIENT -q "CREATE TABLE s1 (a UInt32, s String) ENGINE = MergeTree ORDER BY a PARTITION BY a % 3 SETTINGS min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0"
$CLICKHOUSE_CLIENT -q "CREATE TABLE s2 (a UInt32, s String) ENGINE = MergeTree ORDER BY a PARTITION BY a % 3 SETTINGS min_bytes_for_wide_part = 0" $CLICKHOUSE_CLIENT -q "CREATE TABLE s2 (a UInt32, s String) ENGINE = MergeTree ORDER BY a PARTITION BY a % 3 SETTINGS min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0"
$CLICKHOUSE_CLIENT -q "CREATE TABLE m (a UInt32, s String) engine = Merge('$CLICKHOUSE_DATABASE', 's[1,2]')" $CLICKHOUSE_CLIENT -q "CREATE TABLE m (a UInt32, s String) engine = Merge('$CLICKHOUSE_DATABASE', 's[1,2]')"
$CLICKHOUSE_CLIENT -q "INSERT INTO s1 select (number % 20) * 2 as n, toString(number * number) from numbers(100000)" $CLICKHOUSE_CLIENT -q "INSERT INTO s1 select (number % 20) * 2 as n, toString(number * number) from numbers(100000)"
@ -46,7 +46,7 @@ else
fi fi
$CLICKHOUSE_CLIENT -q "SELECT '---MaterializedView---'" $CLICKHOUSE_CLIENT -q "SELECT '---MaterializedView---'"
$CLICKHOUSE_CLIENT -q "CREATE MATERIALIZED VIEW mv (a UInt32, s String) engine = MergeTree ORDER BY s SETTINGS min_bytes_for_wide_part = 0 POPULATE AS SELECT a, s FROM s1 WHERE a % 7 = 0" $CLICKHOUSE_CLIENT -q "CREATE MATERIALIZED VIEW mv (a UInt32, s String) engine = MergeTree ORDER BY s SETTINGS min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0 POPULATE AS SELECT a, s FROM s1 WHERE a % 7 = 0"
$CLICKHOUSE_CLIENT -q "SELECT a, s FROM mv ORDER BY s LIMIT 10" $CLICKHOUSE_CLIENT -q "SELECT a, s FROM mv ORDER BY s LIMIT 10"
rows_read=$($CLICKHOUSE_CLIENT -q "SELECT a, s FROM mv ORDER BY s LIMIT 10 FORMAT JSON" --max_threads=1 --max_block_size=20 --optimize_read_in_order=1 | grep "rows_read" | sed 's/[^0-9]*//g') rows_read=$($CLICKHOUSE_CLIENT -q "SELECT a, s FROM mv ORDER BY s LIMIT 10 FORMAT JSON" --max_threads=1 --max_block_size=20 --optimize_read_in_order=1 | grep "rows_read" | sed 's/[^0-9]*//g')

View File

@ -1,4 +1,5 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Tags: no-random-merge-tree-settings
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh

View File

@ -1,4 +1,4 @@
-- Tags: zookeeper -- Tags: zookeeper, no-random-merge-tree-settings
DROP TABLE IF EXISTS versioned_collapsing_table; DROP TABLE IF EXISTS versioned_collapsing_table;

View File

@ -1,4 +1,4 @@
-- Tags: no-s3-storage -- Tags: no-s3-storage, no-random-merge-tree-settings
-- no-s3 because read FileOpen metric -- no-s3 because read FileOpen metric
DROP TABLE IF EXISTS nested; DROP TABLE IF EXISTS nested;

View File

@ -1,5 +1,5 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Tags: no-parallel # Tags: no-parallel, no-random-merge-tree-settings
set -ue set -ue

View File

@ -11,7 +11,10 @@ CREATE TABLE adaptive_table(
value String value String
) ENGINE MergeTree() ) ENGINE MergeTree()
ORDER BY key ORDER BY key
SETTINGS index_granularity_bytes=1048576, min_bytes_for_wide_part = 0, enable_vertical_merge_algorithm = 0; SETTINGS index_granularity_bytes=1048576,
min_bytes_for_wide_part = 0,
min_rows_for_wide_part = 0,
enable_vertical_merge_algorithm = 0;
SET max_block_size=900; SET max_block_size=900;

View File

@ -4,7 +4,8 @@ CREATE TABLE wide_to_comp (a Int, b Int, c Int)
ENGINE = MergeTree ORDER BY a ENGINE = MergeTree ORDER BY a
settings vertical_merge_algorithm_min_rows_to_activate = 1, settings vertical_merge_algorithm_min_rows_to_activate = 1,
vertical_merge_algorithm_min_columns_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 1,
min_bytes_for_wide_part = 0; min_bytes_for_wide_part = 0,
min_rows_for_wide_part = 0;
SYSTEM STOP merges wide_to_comp; SYSTEM STOP merges wide_to_comp;

View File

@ -1,3 +1,5 @@
-- Tags: no-random-merge-tree-settings
SET convert_query_to_cnf = 1; SET convert_query_to_cnf = 1;
SET optimize_using_constraints = 1; SET optimize_using_constraints = 1;
SET optimize_move_to_prewhere = 1; SET optimize_move_to_prewhere = 1;

View File

@ -4,5 +4,5 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh . "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --compress 0 --max_memory_usage 1G --query "SELECT range(65535) FROM system.one ARRAY JOIN range(65536) AS number" 2>&1 | grep -oF 'Code: 241' | head -n1 $CLICKHOUSE_CLIENT --compression 0 --max_memory_usage 1G --query "SELECT range(65535) FROM system.one ARRAY JOIN range(65536) AS number" 2>&1 | grep -oF 'Code: 241' | head -n1
$CLICKHOUSE_CLIENT --compress 1 --max_memory_usage 1G --query "SELECT range(65535) FROM system.one ARRAY JOIN range(65536) AS number" 2>&1 | grep -oF 'Code: 241' | head -n1 $CLICKHOUSE_CLIENT --compression 1 --max_memory_usage 1G --query "SELECT range(65535) FROM system.one ARRAY JOIN range(65536) AS number" 2>&1 | grep -oF 'Code: 241' | head -n1

View File

@ -1,4 +1,5 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Tags: no-random-merge-tree-settings
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh

View File

@ -1,3 +1,5 @@
-- Tags: no-random-merge-tree-settings
SET optimize_move_to_prewhere = 1; SET optimize_move_to_prewhere = 1;
SET convert_query_to_cnf = 0; SET convert_query_to_cnf = 0;

View File

@ -1,4 +1,5 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Tags: no-random-merge-tree-settings
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh

View File

@ -1,3 +1,7 @@
-- Tags: no-random-merge-tree-settings
-- Tag no-random-merge-tree-settings: query is rewritten in parser
-- while adding merge tree settings
select 'disable AUTO_INCREMENT compatibility mode'; select 'disable AUTO_INCREMENT compatibility mode';
set compatibility_ignore_auto_increment_in_create_table=false; set compatibility_ignore_auto_increment_in_create_table=false;

View File

@ -1,5 +1,5 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Tags: no-s3-storage # Tags: no-s3-storage, no-random-merge-tree-settings
# Tag no-s3-storage: s3 does not have fsync # Tag no-s3-storage: s3 does not have fsync
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)

View File

@ -1,4 +1,4 @@
-- Tags: no-backward-compatibility-check -- Tags: no-backward-compatibility-check, no-random-merge-tree-settings
drop table if exists test_02381; drop table if exists test_02381;
create table test_02381(a UInt64, b UInt64) ENGINE = MergeTree order by (a, b); create table test_02381(a UInt64, b UInt64) ENGINE = MergeTree order by (a, b);