From 91e78672fa3fb3b74c233516ae85a7ecd89b7c14 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jun 2020 15:19:29 +0300 Subject: [PATCH 01/11] Enable compact parts by default for small parts --- src/Interpreters/SystemLog.cpp | 3 +-- src/Storages/MergeTree/MergeTreeSettings.h | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/SystemLog.cpp b/src/Interpreters/SystemLog.cpp index b432cd8803b..c2e82646de9 100644 --- a/src/Interpreters/SystemLog.cpp +++ b/src/Interpreters/SystemLog.cpp @@ -56,8 +56,7 @@ std::shared_ptr createSystemLog( else { String partition_by = config.getString(config_prefix + ".partition_by", "toYYYYMM(event_date)"); - engine = "ENGINE = MergeTree PARTITION BY (" + partition_by + ") ORDER BY (event_date, event_time)" - "SETTINGS min_bytes_for_wide_part = '10M'"; /// Use polymorphic parts for log tables by default + engine = "ENGINE = MergeTree PARTITION BY (" + partition_by + ") ORDER BY (event_date, event_time)"; } size_t flush_interval_milliseconds = config.getUInt64(config_prefix + ".flush_interval_milliseconds", DEFAULT_SYSTEM_LOG_FLUSH_INTERVAL_MILLISECONDS); diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index f2d2a7cc3d4..d88fe26454c 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -29,7 +29,7 @@ struct MergeTreeSettings : public SettingsCollection M(SettingUInt64, index_granularity, 8192, "How many rows correspond to one primary key value.", 0) \ \ /** Data storing format settings. */ \ - M(SettingUInt64, min_bytes_for_wide_part, 0, "Minimal uncompressed size in bytes to create part in wide format instead of compact", 0) \ + M(SettingUInt64, min_bytes_for_wide_part, 10485760, "Minimal uncompressed size in bytes to create part in wide format instead of compact", 0) \ M(SettingUInt64, min_rows_for_wide_part, 0, "Minimal number of rows to create part in wide format instead of compact", 0) \ \ /** Merge settings. */ \ From c5f46b37e6a044c3a690678c7542f749a265a546 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 28 Jun 2020 00:18:27 +0300 Subject: [PATCH 02/11] Update some tests but not all --- ...system_columns_and_system_tables.reference | 2 +- .../00804_test_delta_codec_compression.sql | 12 ++++----- ...ndex_granularity_collapsing_merge_tree.sql | 18 +++++++------ ..._adaptive_index_granularity_merge_tree.sql | 25 +++++++++++-------- ...index_granularity_replacing_merge_tree.sql | 10 ++++---- ...larity_versioned_collapsing_merge_tree.sql | 6 ++--- ...test_fix_extra_seek_on_compressed_cache.sh | 2 +- .../queries/0_stateless/00933_ttl_simple.sql | 10 +------- ...hecksums_in_system_parts_columns_table.sql | 12 ++++----- .../0_stateless/01039_mergetree_exec_time.sql | 2 +- ...1042_check_query_and_last_granule_size.sql | 4 +-- .../01045_order_by_pk_special_storages.sh | 12 ++++----- .../0_stateless/01055_compact_parts.sql | 3 ++- .../00152_insert_different_granularity.sql | 4 +-- 14 files changed, 61 insertions(+), 61 deletions(-) diff --git a/tests/queries/0_stateless/00753_system_columns_and_system_tables.reference b/tests/queries/0_stateless/00753_system_columns_and_system_tables.reference index 1d1177ba447..ff02b0ba702 100644 --- a/tests/queries/0_stateless/00753_system_columns_and_system_tables.reference +++ b/tests/queries/0_stateless/00753_system_columns_and_system_tables.reference @@ -6,7 +6,7 @@ │ name2 │ 1 │ 0 │ 0 │ 0 │ │ name3 │ 0 │ 0 │ 0 │ 0 │ └───────┴─────────────────────┴───────────────────┴───────────────────┴────────────────────┘ -231 1 +147 1 ┌─name────────────────┬─partition_key─┬─sorting_key───┬─primary_key─┬─sampling_key─┐ │ check_system_tables │ date │ date, version │ date │ │ └─────────────────────┴───────────────┴───────────────┴─────────────┴──────────────┘ diff --git a/tests/queries/0_stateless/00804_test_delta_codec_compression.sql b/tests/queries/0_stateless/00804_test_delta_codec_compression.sql index ad104eff92c..91bc45df63d 100644 --- a/tests/queries/0_stateless/00804_test_delta_codec_compression.sql +++ b/tests/queries/0_stateless/00804_test_delta_codec_compression.sql @@ -7,12 +7,12 @@ DROP TABLE IF EXISTS default_codec_synthetic; CREATE TABLE delta_codec_synthetic ( id UInt64 Codec(Delta, ZSTD(3)) -) ENGINE MergeTree() ORDER BY tuple(); +) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0; CREATE TABLE default_codec_synthetic ( id UInt64 Codec(ZSTD(3)) -) ENGINE MergeTree() ORDER BY tuple(); +) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0; INSERT INTO delta_codec_synthetic SELECT number FROM system.numbers LIMIT 5000000; INSERT INTO default_codec_synthetic SELECT number FROM system.numbers LIMIT 5000000; @@ -45,12 +45,12 @@ DROP TABLE IF EXISTS default_codec_float; CREATE TABLE delta_codec_float ( id Float64 Codec(Delta, LZ4HC) -) ENGINE MergeTree() ORDER BY tuple(); +) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0; CREATE TABLE default_codec_float ( id Float64 Codec(LZ4HC) -) ENGINE MergeTree() ORDER BY tuple(); +) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0; INSERT INTO delta_codec_float SELECT number FROM numbers(1547510400, 500000) WHERE number % 3 == 0 OR number % 5 == 0 OR number % 7 == 0 OR number % 11 == 0; INSERT INTO default_codec_float SELECT * from delta_codec_float; @@ -83,12 +83,12 @@ DROP TABLE IF EXISTS default_codec_string; CREATE TABLE delta_codec_string ( id Float64 Codec(Delta, LZ4) -) ENGINE MergeTree() ORDER BY tuple(); +) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0; CREATE TABLE default_codec_string ( id Float64 Codec(LZ4) -) ENGINE MergeTree() ORDER BY tuple(); +) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0; INSERT INTO delta_codec_string SELECT concat(toString(number), toString(number % 100)) FROM numbers(1547510400, 500000); INSERT INTO default_codec_string SELECT * from delta_codec_string; diff --git a/tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.sql b/tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.sql index b610d605e23..5603b722513 100644 --- a/tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.sql +++ b/tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.sql @@ -8,10 +8,11 @@ CREATE TABLE zero_rows_per_granule ( v2 Int64, Sign Int8 ) ENGINE CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(p) ORDER BY k - SETTINGS index_granularity_bytes=20, write_final_mark = 0, - enable_vertical_merge_algorithm=1, - vertical_merge_algorithm_min_rows_to_activate=0, - vertical_merge_algorithm_min_columns_to_activate=0; + SETTINGS index_granularity_bytes=20, write_final_mark = 0, + min_bytes_for_wide_part = 0, + enable_vertical_merge_algorithm=1, + vertical_merge_algorithm_min_rows_to_activate=0, + vertical_merge_algorithm_min_columns_to_activate=0; INSERT INTO zero_rows_per_granule (p, k, v1, v2, Sign) VALUES ('2018-05-15', 1, 1000, 2000, 1), ('2018-05-16', 2, 3000, 4000, 1), ('2018-05-17', 3, 5000, 6000, 1), ('2018-05-18', 4, 7000, 8000, 1); @@ -39,10 +40,11 @@ CREATE TABLE four_rows_per_granule ( v2 Int64, Sign Int8 ) ENGINE CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(p) ORDER BY k - SETTINGS index_granularity_bytes=110, write_final_mark = 0, - enable_vertical_merge_algorithm=1, - vertical_merge_algorithm_min_rows_to_activate=0, - vertical_merge_algorithm_min_columns_to_activate=0; + SETTINGS index_granularity_bytes=110, write_final_mark = 0, + min_bytes_for_wide_part = 0, + enable_vertical_merge_algorithm=1, + vertical_merge_algorithm_min_rows_to_activate=0, + vertical_merge_algorithm_min_columns_to_activate=0; INSERT INTO four_rows_per_granule (p, k, v1, v2, Sign) VALUES ('2018-05-15', 1, 1000, 2000, 1), ('2018-05-16', 2, 3000, 4000, 1), ('2018-05-17', 3, 5000, 6000, 1), ('2018-05-18', 4, 7000, 8000, 1); diff --git a/tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.sql b/tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.sql index 18ebebb316c..48b6fae19fe 100644 --- a/tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.sql +++ b/tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.sql @@ -7,7 +7,7 @@ CREATE TABLE zero_rows_per_granule ( k UInt64, v1 UInt64, v2 Int64 -) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 20, write_final_mark = 0; +) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 20, write_final_mark = 0, min_bytes_for_wide_part = 0; INSERT INTO zero_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -34,7 +34,7 @@ CREATE TABLE two_rows_per_granule ( k UInt64, v1 UInt64, v2 Int64 -) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 40, write_final_mark = 0; +) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 40, write_final_mark = 0, min_bytes_for_wide_part = 0; INSERT INTO two_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -61,7 +61,7 @@ CREATE TABLE four_rows_per_granule ( k UInt64, v1 UInt64, v2 Int64 -) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, write_final_mark = 0; +) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, write_final_mark = 0, min_bytes_for_wide_part = 0; INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -95,7 +95,7 @@ CREATE TABLE huge_granularity_small_blocks ( k UInt64, v1 UInt64, v2 Int64 -) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 1000000, write_final_mark = 0; +) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 1000000, write_final_mark = 0, min_bytes_for_wide_part = 0; INSERT INTO huge_granularity_small_blocks (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -126,7 +126,7 @@ CREATE TABLE adaptive_granularity_alter ( k UInt64, v1 UInt64, v2 Int64 -) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, write_final_mark = 0; +) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, write_final_mark = 0, min_bytes_for_wide_part = 0; INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -183,7 +183,8 @@ CREATE TABLE zero_rows_per_granule ( SETTINGS index_granularity_bytes=20, write_final_mark = 0, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, - vertical_merge_algorithm_min_columns_to_activate=0; + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0; INSERT INTO zero_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -215,7 +216,8 @@ CREATE TABLE two_rows_per_granule ( SETTINGS index_granularity_bytes=40, write_final_mark = 0, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, - vertical_merge_algorithm_min_columns_to_activate=0; + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0; INSERT INTO two_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -246,7 +248,8 @@ CREATE TABLE four_rows_per_granule ( SETTINGS index_granularity_bytes = 110, write_final_mark = 0, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, - vertical_merge_algorithm_min_columns_to_activate=0; + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0; INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -284,7 +287,8 @@ CREATE TABLE huge_granularity_small_blocks ( SETTINGS index_granularity_bytes=1000000, write_final_mark = 0, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, - vertical_merge_algorithm_min_columns_to_activate=0; + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0; INSERT INTO huge_granularity_small_blocks (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -319,7 +323,8 @@ CREATE TABLE adaptive_granularity_alter ( SETTINGS index_granularity_bytes=110, write_final_mark = 0, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, - vertical_merge_algorithm_min_columns_to_activate=0; + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0; INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); diff --git a/tests/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.sql b/tests/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.sql index f72d5f0f9cb..53a546f9d0f 100644 --- a/tests/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.sql +++ b/tests/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.sql @@ -10,7 +10,7 @@ CREATE TABLE zero_rows_per_granule ( SETTINGS index_granularity_bytes=20, write_final_mark = 0, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, - vertical_merge_algorithm_min_columns_to_activate=0; + vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0; INSERT INTO zero_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -41,7 +41,7 @@ CREATE TABLE two_rows_per_granule ( SETTINGS index_granularity_bytes=40, write_final_mark = 0, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, - vertical_merge_algorithm_min_columns_to_activate=0; + vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0; INSERT INTO two_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -72,7 +72,7 @@ CREATE TABLE four_rows_per_granule ( SETTINGS index_granularity_bytes = 110, write_final_mark = 0, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, - vertical_merge_algorithm_min_columns_to_activate=0; + vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0; INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -118,7 +118,7 @@ CREATE TABLE huge_granularity_small_blocks ( SETTINGS index_granularity_bytes=1000000, write_final_mark = 0, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, - vertical_merge_algorithm_min_columns_to_activate=0; + vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0; INSERT INTO huge_granularity_small_blocks (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -154,7 +154,7 @@ CREATE TABLE adaptive_granularity_alter ( SETTINGS index_granularity_bytes=110, write_final_mark = 0, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, - vertical_merge_algorithm_min_columns_to_activate=0; + vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0; INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); diff --git a/tests/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.sql b/tests/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.sql index c5b65839b2a..05f4dc835e5 100644 --- a/tests/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.sql +++ b/tests/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.sql @@ -12,7 +12,7 @@ CREATE TABLE zero_rows_per_granule ( SETTINGS index_granularity_bytes=20, write_final_mark = 0, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, - vertical_merge_algorithm_min_columns_to_activate=0; + vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0; INSERT INTO zero_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 1, 1000, 2000, 1, 1), ('2018-05-16', 2, 3000, 4000, 1, 1), ('2018-05-17', 3, 5000, 6000, 1, 1), ('2018-05-18', 4, 7000, 8000, 1, 1); @@ -44,7 +44,7 @@ CREATE TABLE four_rows_per_granule ( SETTINGS index_granularity_bytes=120, write_final_mark = 0, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, - vertical_merge_algorithm_min_columns_to_activate=0; + vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0; INSERT INTO four_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 1, 1000, 2000, 1, 1), ('2018-05-16', 2, 3000, 4000, 1, 1), ('2018-05-17', 3, 5000, 6000, 1, 1), ('2018-05-18', 4, 7000, 8000, 1, 1); @@ -89,7 +89,7 @@ CREATE TABLE six_rows_per_granule ( SETTINGS index_granularity_bytes=170, write_final_mark = 0, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, - vertical_merge_algorithm_min_columns_to_activate=0; + vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0; INSERT INTO six_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 1, 1000, 2000, 1, 1), ('2018-05-16', 1, 1000, 2000, -1, 2); diff --git a/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh b/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh index 1f7571a2404..e0225f0d31d 100755 --- a/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh +++ b/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS small_table" -$CLICKHOUSE_CLIENT --query="CREATE TABLE small_table (a UInt64 default 0, n UInt64) ENGINE = MergeTree() PARTITION BY tuple() ORDER BY (a);" +$CLICKHOUSE_CLIENT --query="CREATE TABLE small_table (a UInt64 default 0, n UInt64) ENGINE = MergeTree() PARTITION BY tuple() ORDER BY (a) SETTINGS min_bytes_for_wide_part = 0;" $CLICKHOUSE_CLIENT --query="INSERT INTO small_table(n) SELECT * from system.numbers limit 100000;" diff --git a/tests/queries/0_stateless/00933_ttl_simple.sql b/tests/queries/0_stateless/00933_ttl_simple.sql index b924faad3f5..83d9962043d 100644 --- a/tests/queries/0_stateless/00933_ttl_simple.sql +++ b/tests/queries/0_stateless/00933_ttl_simple.sql @@ -13,9 +13,8 @@ create table ttl_00933_1 (d DateTime, a Int, b Int) engine = MergeTree order by insert into ttl_00933_1 values (now(), 1, 2); insert into ttl_00933_1 values (now(), 3, 4); insert into ttl_00933_1 values (now() + 1000, 5, 6); +select sleep(1.1) format Null; optimize table ttl_00933_1 final; -- check ttl merge for part with both expired and unexpired values -select sleep(1.1) format Null; -- wait if very fast merge happen -optimize table ttl_00933_1 final; select a, b from ttl_00933_1; drop table if exists ttl_00933_1; @@ -24,7 +23,6 @@ create table ttl_00933_1 (d DateTime, a Int ttl d + interval 1 DAY) engine = Mer insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 1); insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 2); insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 3); -select sleep(0.7) format Null; -- wait if very fast merge happen optimize table ttl_00933_1 final; select * from ttl_00933_1 order by d; @@ -34,7 +32,6 @@ create table ttl_00933_1 (d DateTime, a Int) engine = MergeTree order by tuple() insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 1); insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 2); insert into ttl_00933_1 values (toDateTime('2100-10-10 00:00:00'), 3); -select sleep(0.7) format Null; -- wait if very fast merge happen optimize table ttl_00933_1 final; select * from ttl_00933_1 order by d; @@ -43,7 +40,6 @@ drop table if exists ttl_00933_1; create table ttl_00933_1 (d Date, a Int) engine = MergeTree order by a partition by toDayOfMonth(d) ttl d + interval 1 day; insert into ttl_00933_1 values (toDate('2000-10-10'), 1); insert into ttl_00933_1 values (toDate('2100-10-10'), 2); -select sleep(0.7) format Null; -- wait if very fast merge happen optimize table ttl_00933_1 final; select * from ttl_00933_1 order by d; @@ -52,7 +48,6 @@ drop table if exists ttl_00933_1; create table ttl_00933_1 (b Int, a Int ttl now()-1000) engine = MergeTree order by tuple() partition by tuple(); show create table ttl_00933_1; insert into ttl_00933_1 values (1, 1); -select sleep(0.7) format Null; -- wait if very fast merge happen optimize table ttl_00933_1 final; select * from ttl_00933_1; @@ -61,7 +56,6 @@ drop table if exists ttl_00933_1; create table ttl_00933_1 (b Int, a Int ttl now()+1000) engine = MergeTree order by tuple() partition by tuple(); show create table ttl_00933_1; insert into ttl_00933_1 values (1, 1); -select sleep(0.7) format Null; -- wait if very fast merge happen optimize table ttl_00933_1 final; select * from ttl_00933_1; @@ -70,7 +64,6 @@ drop table if exists ttl_00933_1; create table ttl_00933_1 (b Int, a Int ttl today()-1) engine = MergeTree order by tuple() partition by tuple(); show create table ttl_00933_1; insert into ttl_00933_1 values (1, 1); -select sleep(0.7) format Null; -- wait if very fast merge happen optimize table ttl_00933_1 final; select * from ttl_00933_1; @@ -79,7 +72,6 @@ drop table if exists ttl_00933_1; create table ttl_00933_1 (b Int, a Int ttl today()+1) engine = MergeTree order by tuple() partition by tuple(); show create table ttl_00933_1; insert into ttl_00933_1 values (1, 1); -select sleep(0.7) format Null; -- wait if very fast merge happen optimize table ttl_00933_1 final; select * from ttl_00933_1; diff --git a/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql b/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql index b9eed1e8183..e865ed609be 100644 --- a/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql +++ b/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql @@ -1,14 +1,14 @@ DROP TABLE IF EXISTS test_00961; -CREATE TABLE test_00961 (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = MergeTree(d, (a, b), 111); +CREATE TABLE test_00961 (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = MergeTree(d, (a, b), 111) SETTINGS min_bytes_for_wide_part = 0; INSERT INTO test_00961 VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); -SELECT - name, - table, - hash_of_all_files, - hash_of_uncompressed_files, +SELECT + name, + table, + hash_of_all_files, + hash_of_uncompressed_files, uncompressed_hash_of_compressed_files FROM system.parts WHERE table = 'test_00961' and database = currentDatabase(); diff --git a/tests/queries/0_stateless/01039_mergetree_exec_time.sql b/tests/queries/0_stateless/01039_mergetree_exec_time.sql index 4cefb2e9305..d3aade41cea 100644 --- a/tests/queries/0_stateless/01039_mergetree_exec_time.sql +++ b/tests/queries/0_stateless/01039_mergetree_exec_time.sql @@ -1,5 +1,5 @@ DROP TABLE IF EXISTS tab; -create table tab (A Int64) Engine=MergeTree order by tuple(); +create table tab (A Int64) Engine=MergeTree order by tuple() SETTINGS min_bytes_for_wide_part = 0; insert into tab select cityHash64(number) from numbers(1000); select sum(sleep(0.1)) from tab settings max_block_size = 1, max_execution_time=1; -- { serverError 159 } DROP TABLE IF EXISTS tab; diff --git a/tests/queries/0_stateless/01042_check_query_and_last_granule_size.sql b/tests/queries/0_stateless/01042_check_query_and_last_granule_size.sql index 9777ea1dc45..c62fe25a041 100644 --- a/tests/queries/0_stateless/01042_check_query_and_last_granule_size.sql +++ b/tests/queries/0_stateless/01042_check_query_and_last_granule_size.sql @@ -1,7 +1,7 @@ SET check_query_single_value_result = 0; DROP TABLE IF EXISTS check_query_test; -CREATE TABLE check_query_test (SomeKey UInt64, SomeValue String) ENGINE = MergeTree() ORDER BY SomeKey; +CREATE TABLE check_query_test (SomeKey UInt64, SomeValue String) ENGINE = MergeTree() ORDER BY SomeKey SETTINGS min_bytes_for_wide_part = 0; -- Number of rows in last granule should be equals to granularity. -- Rows in this table are short, so granularity will be 8192. @@ -17,7 +17,7 @@ DROP TABLE IF EXISTS check_query_test; DROP TABLE IF EXISTS check_query_test_non_adaptive; -CREATE TABLE check_query_test_non_adaptive (SomeKey UInt64, SomeValue String) ENGINE = MergeTree() ORDER BY SomeKey SETTINGS index_granularity_bytes = 0; +CREATE TABLE check_query_test_non_adaptive (SomeKey UInt64, SomeValue String) ENGINE = MergeTree() ORDER BY SomeKey SETTINGS index_granularity_bytes = 0, min_bytes_for_wide_part = 0; INSERT INTO check_query_test_non_adaptive SELECT number, toString(number) FROM system.numbers LIMIT 81920; diff --git a/tests/queries/0_stateless/01045_order_by_pk_special_storages.sh b/tests/queries/0_stateless/01045_order_by_pk_special_storages.sh index 0898fec802c..b0d63b75dd5 100755 --- a/tests/queries/0_stateless/01045_order_by_pk_special_storages.sh +++ b/tests/queries/0_stateless/01045_order_by_pk_special_storages.sh @@ -10,11 +10,11 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS s2" $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS m" $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS buf" $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS mv" - -$CLICKHOUSE_CLIENT -q "CREATE TABLE s1 (a UInt32, s String) ENGINE = MergeTree ORDER BY a PARTITION BY a % 3" -$CLICKHOUSE_CLIENT -q "CREATE TABLE s2 (a UInt32, s String) ENGINE = MergeTree ORDER BY a PARTITION BY a % 3" -$CLICKHOUSE_CLIENT -q "CREATE TABLE m (a UInt32, s String) engine = Merge(currentDatabase(), 's[1,2]')" +$CLICKHOUSE_CLIENT -q "CREATE TABLE s1 (a UInt32, s String) ENGINE = MergeTree ORDER BY a PARTITION BY a % 3 SETTINGS min_bytes_for_wide_part = 0" +$CLICKHOUSE_CLIENT -q "CREATE TABLE s2 (a UInt32, s String) ENGINE = MergeTree ORDER BY a PARTITION BY a % 3 SETTINGS min_bytes_for_wide_part = 0" + +$CLICKHOUSE_CLIENT -q "CREATE TABLE m (a UInt32, s String) engine = Merge(currentDatabase(), 's[1,2]') SETTINGS min_bytes_for_wide_part = 0" $CLICKHOUSE_CLIENT -q "INSERT INTO s1 select (number % 20) * 2 as n, toString(number * number) from numbers(100000)" $CLICKHOUSE_CLIENT -q "INSERT INTO s2 select (number % 20) * 2 + 1 as n, toString(number * number * number) from numbers(100000)" @@ -45,7 +45,7 @@ else fi $CLICKHOUSE_CLIENT -q "SELECT '---MaterializedView---'" -$CLICKHOUSE_CLIENT -q "CREATE MATERIALIZED VIEW mv (a UInt32, s String) engine = MergeTree ORDER BY s POPULATE AS SELECT a, s FROM s1 WHERE a % 7 = 0" +$CLICKHOUSE_CLIENT -q "CREATE MATERIALIZED VIEW mv (a UInt32, s String) engine = MergeTree ORDER BY s SETTINGS min_bytes_for_wide_part = 0 POPULATE AS SELECT a, s FROM s1 WHERE a % 7 = 0" $CLICKHOUSE_CLIENT -q "SELECT a, s FROM mv ORDER BY s LIMIT 10" rows_read=`$CLICKHOUSE_CLIENT -q "SELECT a, s FROM mv ORDER BY s LIMIT 10 FORMAT JSON" --max_threads=1 --max_block_size=20 | grep "rows_read" | sed 's/[^0-9]*//g'` @@ -59,4 +59,4 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS s1" $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS s2" $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS m" $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS buf" -$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS mv" \ No newline at end of file +$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS mv" diff --git a/tests/queries/0_stateless/01055_compact_parts.sql b/tests/queries/0_stateless/01055_compact_parts.sql index 05b0657ba7e..e99af76439c 100755 --- a/tests/queries/0_stateless/01055_compact_parts.sql +++ b/tests/queries/0_stateless/01055_compact_parts.sql @@ -3,9 +3,10 @@ set mutations_sync = 2; drop table if exists mt_compact; create table mt_compact(a UInt64, b UInt64 DEFAULT a * a, s String, n Nested(x UInt32, y String), lc LowCardinality(String)) -engine = MergeTree +engine = MergeTree order by a partition by a % 10 settings index_granularity = 8, +min_bytes_for_wide_part = 0, min_rows_for_wide_part = 10; insert into mt_compact (a, s, n.y, lc) select number, toString((number * 2132214234 + 5434543) % 2133443), ['a', 'b', 'c'], number % 2 ? 'bar' : 'baz' from numbers(90); diff --git a/tests/queries/1_stateful/00152_insert_different_granularity.sql b/tests/queries/1_stateful/00152_insert_different_granularity.sql index 5ca34bbe48e..7e04aedf2dd 100644 --- a/tests/queries/1_stateful/00152_insert_different_granularity.sql +++ b/tests/queries/1_stateful/00152_insert_different_granularity.sql @@ -1,6 +1,6 @@ DROP TABLE IF EXISTS fixed_granularity_table; -CREATE TABLE fixed_granularity_table (`WatchID` UInt64, `JavaEnable` UInt8, `Title` String, `GoodEvent` Int16, `EventTime` DateTime, `EventDate` Date, `CounterID` UInt32, `ClientIP` UInt32, `ClientIP6` FixedString(16), `RegionID` UInt32, `UserID` UInt64, `CounterClass` Int8, `OS` UInt8, `UserAgent` UInt8, `URL` String, `Referer` String, `URLDomain` String, `RefererDomain` String, `Refresh` UInt8, `IsRobot` UInt8, `RefererCategories` Array(UInt16), `URLCategories` Array(UInt16), `URLRegions` Array(UInt32), `RefererRegions` Array(UInt32), `ResolutionWidth` UInt16, `ResolutionHeight` UInt16, `ResolutionDepth` UInt8, `FlashMajor` UInt8, `FlashMinor` UInt8, `FlashMinor2` String, `NetMajor` UInt8, `NetMinor` UInt8, `UserAgentMajor` UInt16, `UserAgentMinor` FixedString(2), `CookieEnable` UInt8, `JavascriptEnable` UInt8, `IsMobile` UInt8, `MobilePhone` UInt8, `MobilePhoneModel` String, `Params` String, `IPNetworkID` UInt32, `TraficSourceID` Int8, `SearchEngineID` UInt16, `SearchPhrase` String, `AdvEngineID` UInt8, `IsArtifical` UInt8, `WindowClientWidth` UInt16, `WindowClientHeight` UInt16, `ClientTimeZone` Int16, `ClientEventTime` DateTime, `SilverlightVersion1` UInt8, `SilverlightVersion2` UInt8, `SilverlightVersion3` UInt32, `SilverlightVersion4` UInt16, `PageCharset` String, `CodeVersion` UInt32, `IsLink` UInt8, `IsDownload` UInt8, `IsNotBounce` UInt8, `FUniqID` UInt64, `HID` UInt32, `IsOldCounter` UInt8, `IsEvent` UInt8, `IsParameter` UInt8, `DontCountHits` UInt8, `WithHash` UInt8, `HitColor` FixedString(1), `UTCEventTime` DateTime, `Age` UInt8, `Sex` UInt8, `Income` UInt8, `Interests` UInt16, `Robotness` UInt8, `GeneralInterests` Array(UInt16), `RemoteIP` UInt32, `RemoteIP6` FixedString(16), `WindowName` Int32, `OpenerName` Int32, `HistoryLength` Int16, `BrowserLanguage` FixedString(2), `BrowserCountry` FixedString(2), `SocialNetwork` String, `SocialAction` String, `HTTPError` UInt16, `SendTiming` Int32, `DNSTiming` Int32, `ConnectTiming` Int32, `ResponseStartTiming` Int32, `ResponseEndTiming` Int32, `FetchTiming` Int32, `RedirectTiming` Int32, `DOMInteractiveTiming` Int32, `DOMContentLoadedTiming` Int32, `DOMCompleteTiming` Int32, `LoadEventStartTiming` Int32, `LoadEventEndTiming` Int32, `NSToDOMContentLoadedTiming` Int32, `FirstPaintTiming` Int32, `RedirectCount` Int8, `SocialSourceNetworkID` UInt8, `SocialSourcePage` String, `ParamPrice` Int64, `ParamOrderID` String, `ParamCurrency` FixedString(3), `ParamCurrencyID` UInt16, `GoalsReached` Array(UInt32), `OpenstatServiceName` String, `OpenstatCampaignID` String, `OpenstatAdID` String, `OpenstatSourceID` String, `UTMSource` String, `UTMMedium` String, `UTMCampaign` String, `UTMContent` String, `UTMTerm` String, `FromTag` String, `HasGCLID` UInt8, `RefererHash` UInt64, `URLHash` UInt64, `CLID` UInt32, `YCLID` UInt64, `ShareService` String, `ShareURL` String, `ShareTitle` String, `ParsedParams.Key1` Array(String), `ParsedParams.Key2` Array(String), `ParsedParams.Key3` Array(String), `ParsedParams.Key4` Array(String), `ParsedParams.Key5` Array(String), `ParsedParams.ValueDouble` Array(Float64), `IslandID` FixedString(16), `RequestNum` UInt32, `RequestTry` UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity=8192, index_granularity_bytes=0; -- looks like default table before update +CREATE TABLE fixed_granularity_table (`WatchID` UInt64, `JavaEnable` UInt8, `Title` String, `GoodEvent` Int16, `EventTime` DateTime, `EventDate` Date, `CounterID` UInt32, `ClientIP` UInt32, `ClientIP6` FixedString(16), `RegionID` UInt32, `UserID` UInt64, `CounterClass` Int8, `OS` UInt8, `UserAgent` UInt8, `URL` String, `Referer` String, `URLDomain` String, `RefererDomain` String, `Refresh` UInt8, `IsRobot` UInt8, `RefererCategories` Array(UInt16), `URLCategories` Array(UInt16), `URLRegions` Array(UInt32), `RefererRegions` Array(UInt32), `ResolutionWidth` UInt16, `ResolutionHeight` UInt16, `ResolutionDepth` UInt8, `FlashMajor` UInt8, `FlashMinor` UInt8, `FlashMinor2` String, `NetMajor` UInt8, `NetMinor` UInt8, `UserAgentMajor` UInt16, `UserAgentMinor` FixedString(2), `CookieEnable` UInt8, `JavascriptEnable` UInt8, `IsMobile` UInt8, `MobilePhone` UInt8, `MobilePhoneModel` String, `Params` String, `IPNetworkID` UInt32, `TraficSourceID` Int8, `SearchEngineID` UInt16, `SearchPhrase` String, `AdvEngineID` UInt8, `IsArtifical` UInt8, `WindowClientWidth` UInt16, `WindowClientHeight` UInt16, `ClientTimeZone` Int16, `ClientEventTime` DateTime, `SilverlightVersion1` UInt8, `SilverlightVersion2` UInt8, `SilverlightVersion3` UInt32, `SilverlightVersion4` UInt16, `PageCharset` String, `CodeVersion` UInt32, `IsLink` UInt8, `IsDownload` UInt8, `IsNotBounce` UInt8, `FUniqID` UInt64, `HID` UInt32, `IsOldCounter` UInt8, `IsEvent` UInt8, `IsParameter` UInt8, `DontCountHits` UInt8, `WithHash` UInt8, `HitColor` FixedString(1), `UTCEventTime` DateTime, `Age` UInt8, `Sex` UInt8, `Income` UInt8, `Interests` UInt16, `Robotness` UInt8, `GeneralInterests` Array(UInt16), `RemoteIP` UInt32, `RemoteIP6` FixedString(16), `WindowName` Int32, `OpenerName` Int32, `HistoryLength` Int16, `BrowserLanguage` FixedString(2), `BrowserCountry` FixedString(2), `SocialNetwork` String, `SocialAction` String, `HTTPError` UInt16, `SendTiming` Int32, `DNSTiming` Int32, `ConnectTiming` Int32, `ResponseStartTiming` Int32, `ResponseEndTiming` Int32, `FetchTiming` Int32, `RedirectTiming` Int32, `DOMInteractiveTiming` Int32, `DOMContentLoadedTiming` Int32, `DOMCompleteTiming` Int32, `LoadEventStartTiming` Int32, `LoadEventEndTiming` Int32, `NSToDOMContentLoadedTiming` Int32, `FirstPaintTiming` Int32, `RedirectCount` Int8, `SocialSourceNetworkID` UInt8, `SocialSourcePage` String, `ParamPrice` Int64, `ParamOrderID` String, `ParamCurrency` FixedString(3), `ParamCurrencyID` UInt16, `GoalsReached` Array(UInt32), `OpenstatServiceName` String, `OpenstatCampaignID` String, `OpenstatAdID` String, `OpenstatSourceID` String, `UTMSource` String, `UTMMedium` String, `UTMCampaign` String, `UTMContent` String, `UTMTerm` String, `FromTag` String, `HasGCLID` UInt8, `RefererHash` UInt64, `URLHash` UInt64, `CLID` UInt32, `YCLID` UInt64, `ShareService` String, `ShareURL` String, `ShareTitle` String, `ParsedParams.Key1` Array(String), `ParsedParams.Key2` Array(String), `ParsedParams.Key3` Array(String), `ParsedParams.Key4` Array(String), `ParsedParams.Key5` Array(String), `ParsedParams.ValueDouble` Array(Float64), `IslandID` FixedString(16), `RequestNum` UInt32, `RequestTry` UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity=8192, index_granularity_bytes=0, min_bytes_for_wide_part = 0; -- looks like default table before update ALTER TABLE fixed_granularity_table REPLACE PARTITION 201403 FROM test.hits; @@ -29,7 +29,7 @@ ALTER TABLE test.hits ATTACH PARTITION 201403; DROP TABLE IF EXISTS hits_copy; -CREATE TABLE hits_copy (`WatchID` UInt64, `JavaEnable` UInt8, `Title` String, `GoodEvent` Int16, `EventTime` DateTime, `EventDate` Date, `CounterID` UInt32, `ClientIP` UInt32, `ClientIP6` FixedString(16), `RegionID` UInt32, `UserID` UInt64, `CounterClass` Int8, `OS` UInt8, `UserAgent` UInt8, `URL` String, `Referer` String, `URLDomain` String, `RefererDomain` String, `Refresh` UInt8, `IsRobot` UInt8, `RefererCategories` Array(UInt16), `URLCategories` Array(UInt16), `URLRegions` Array(UInt32), `RefererRegions` Array(UInt32), `ResolutionWidth` UInt16, `ResolutionHeight` UInt16, `ResolutionDepth` UInt8, `FlashMajor` UInt8, `FlashMinor` UInt8, `FlashMinor2` String, `NetMajor` UInt8, `NetMinor` UInt8, `UserAgentMajor` UInt16, `UserAgentMinor` FixedString(2), `CookieEnable` UInt8, `JavascriptEnable` UInt8, `IsMobile` UInt8, `MobilePhone` UInt8, `MobilePhoneModel` String, `Params` String, `IPNetworkID` UInt32, `TraficSourceID` Int8, `SearchEngineID` UInt16, `SearchPhrase` String, `AdvEngineID` UInt8, `IsArtifical` UInt8, `WindowClientWidth` UInt16, `WindowClientHeight` UInt16, `ClientTimeZone` Int16, `ClientEventTime` DateTime, `SilverlightVersion1` UInt8, `SilverlightVersion2` UInt8, `SilverlightVersion3` UInt32, `SilverlightVersion4` UInt16, `PageCharset` String, `CodeVersion` UInt32, `IsLink` UInt8, `IsDownload` UInt8, `IsNotBounce` UInt8, `FUniqID` UInt64, `HID` UInt32, `IsOldCounter` UInt8, `IsEvent` UInt8, `IsParameter` UInt8, `DontCountHits` UInt8, `WithHash` UInt8, `HitColor` FixedString(1), `UTCEventTime` DateTime, `Age` UInt8, `Sex` UInt8, `Income` UInt8, `Interests` UInt16, `Robotness` UInt8, `GeneralInterests` Array(UInt16), `RemoteIP` UInt32, `RemoteIP6` FixedString(16), `WindowName` Int32, `OpenerName` Int32, `HistoryLength` Int16, `BrowserLanguage` FixedString(2), `BrowserCountry` FixedString(2), `SocialNetwork` String, `SocialAction` String, `HTTPError` UInt16, `SendTiming` Int32, `DNSTiming` Int32, `ConnectTiming` Int32, `ResponseStartTiming` Int32, `ResponseEndTiming` Int32, `FetchTiming` Int32, `RedirectTiming` Int32, `DOMInteractiveTiming` Int32, `DOMContentLoadedTiming` Int32, `DOMCompleteTiming` Int32, `LoadEventStartTiming` Int32, `LoadEventEndTiming` Int32, `NSToDOMContentLoadedTiming` Int32, `FirstPaintTiming` Int32, `RedirectCount` Int8, `SocialSourceNetworkID` UInt8, `SocialSourcePage` String, `ParamPrice` Int64, `ParamOrderID` String, `ParamCurrency` FixedString(3), `ParamCurrencyID` UInt16, `GoalsReached` Array(UInt32), `OpenstatServiceName` String, `OpenstatCampaignID` String, `OpenstatAdID` String, `OpenstatSourceID` String, `UTMSource` String, `UTMMedium` String, `UTMCampaign` String, `UTMContent` String, `UTMTerm` String, `FromTag` String, `HasGCLID` UInt8, `RefererHash` UInt64, `URLHash` UInt64, `CLID` UInt32, `YCLID` UInt64, `ShareService` String, `ShareURL` String, `ShareTitle` String, `ParsedParams.Key1` Array(String), `ParsedParams.Key2` Array(String), `ParsedParams.Key3` Array(String), `ParsedParams.Key4` Array(String), `ParsedParams.Key5` Array(String), `ParsedParams.ValueDouble` Array(Float64), `IslandID` FixedString(16), `RequestNum` UInt32, `RequestTry` UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity=8192, index_granularity_bytes=0; +CREATE TABLE hits_copy (`WatchID` UInt64, `JavaEnable` UInt8, `Title` String, `GoodEvent` Int16, `EventTime` DateTime, `EventDate` Date, `CounterID` UInt32, `ClientIP` UInt32, `ClientIP6` FixedString(16), `RegionID` UInt32, `UserID` UInt64, `CounterClass` Int8, `OS` UInt8, `UserAgent` UInt8, `URL` String, `Referer` String, `URLDomain` String, `RefererDomain` String, `Refresh` UInt8, `IsRobot` UInt8, `RefererCategories` Array(UInt16), `URLCategories` Array(UInt16), `URLRegions` Array(UInt32), `RefererRegions` Array(UInt32), `ResolutionWidth` UInt16, `ResolutionHeight` UInt16, `ResolutionDepth` UInt8, `FlashMajor` UInt8, `FlashMinor` UInt8, `FlashMinor2` String, `NetMajor` UInt8, `NetMinor` UInt8, `UserAgentMajor` UInt16, `UserAgentMinor` FixedString(2), `CookieEnable` UInt8, `JavascriptEnable` UInt8, `IsMobile` UInt8, `MobilePhone` UInt8, `MobilePhoneModel` String, `Params` String, `IPNetworkID` UInt32, `TraficSourceID` Int8, `SearchEngineID` UInt16, `SearchPhrase` String, `AdvEngineID` UInt8, `IsArtifical` UInt8, `WindowClientWidth` UInt16, `WindowClientHeight` UInt16, `ClientTimeZone` Int16, `ClientEventTime` DateTime, `SilverlightVersion1` UInt8, `SilverlightVersion2` UInt8, `SilverlightVersion3` UInt32, `SilverlightVersion4` UInt16, `PageCharset` String, `CodeVersion` UInt32, `IsLink` UInt8, `IsDownload` UInt8, `IsNotBounce` UInt8, `FUniqID` UInt64, `HID` UInt32, `IsOldCounter` UInt8, `IsEvent` UInt8, `IsParameter` UInt8, `DontCountHits` UInt8, `WithHash` UInt8, `HitColor` FixedString(1), `UTCEventTime` DateTime, `Age` UInt8, `Sex` UInt8, `Income` UInt8, `Interests` UInt16, `Robotness` UInt8, `GeneralInterests` Array(UInt16), `RemoteIP` UInt32, `RemoteIP6` FixedString(16), `WindowName` Int32, `OpenerName` Int32, `HistoryLength` Int16, `BrowserLanguage` FixedString(2), `BrowserCountry` FixedString(2), `SocialNetwork` String, `SocialAction` String, `HTTPError` UInt16, `SendTiming` Int32, `DNSTiming` Int32, `ConnectTiming` Int32, `ResponseStartTiming` Int32, `ResponseEndTiming` Int32, `FetchTiming` Int32, `RedirectTiming` Int32, `DOMInteractiveTiming` Int32, `DOMContentLoadedTiming` Int32, `DOMCompleteTiming` Int32, `LoadEventStartTiming` Int32, `LoadEventEndTiming` Int32, `NSToDOMContentLoadedTiming` Int32, `FirstPaintTiming` Int32, `RedirectCount` Int8, `SocialSourceNetworkID` UInt8, `SocialSourcePage` String, `ParamPrice` Int64, `ParamOrderID` String, `ParamCurrency` FixedString(3), `ParamCurrencyID` UInt16, `GoalsReached` Array(UInt32), `OpenstatServiceName` String, `OpenstatCampaignID` String, `OpenstatAdID` String, `OpenstatSourceID` String, `UTMSource` String, `UTMMedium` String, `UTMCampaign` String, `UTMContent` String, `UTMTerm` String, `FromTag` String, `HasGCLID` UInt8, `RefererHash` UInt64, `URLHash` UInt64, `CLID` UInt32, `YCLID` UInt64, `ShareService` String, `ShareURL` String, `ShareTitle` String, `ParsedParams.Key1` Array(String), `ParsedParams.Key2` Array(String), `ParsedParams.Key3` Array(String), `ParsedParams.Key4` Array(String), `ParsedParams.Key5` Array(String), `ParsedParams.ValueDouble` Array(Float64), `IslandID` FixedString(16), `RequestNum` UInt32, `RequestTry` UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity=8192, index_granularity_bytes=0, min_bytes_for_wide_part = 0; ALTER TABLE hits_copy REPLACE PARTITION 201403 FROM test.hits; From 2d43519e038ded3bef0962a3d5ff7a5da7248914 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 8 Jul 2020 02:27:20 +0300 Subject: [PATCH 03/11] Update some tests --- .../0_stateless/00160_merge_and_index_in_in.sql | 4 ++-- .../0_stateless/00443_preferred_block_size_bytes.sh | 6 +++--- ...0484_preferred_max_column_in_block_size_bytes.sql | 8 ++++---- .../00653_verification_monotonic_data_load.sh | 12 ++++++------ ...checksums_in_system_parts_columns_table.reference | 2 +- ...00961_checksums_in_system_parts_columns_table.sql | 2 +- .../01045_order_by_pk_special_storages.sh | 2 +- .../0_stateless/01343_min_bytes_to_use_mmap_io.sql | 2 +- .../01344_min_bytes_to_use_mmap_io_index.sql | 2 +- 9 files changed, 20 insertions(+), 20 deletions(-) diff --git a/tests/queries/0_stateless/00160_merge_and_index_in_in.sql b/tests/queries/0_stateless/00160_merge_and_index_in_in.sql index 6e2838afe88..bdab3f7640d 100644 --- a/tests/queries/0_stateless/00160_merge_and_index_in_in.sql +++ b/tests/queries/0_stateless/00160_merge_and_index_in_in.sql @@ -1,7 +1,7 @@ DROP TABLE IF EXISTS mt_00160; DROP TABLE IF EXISTS merge_00160; -CREATE TABLE mt_00160 (d Date DEFAULT toDate('2015-05-01'), x UInt64) ENGINE = MergeTree(d, x, 1); +CREATE TABLE mt_00160 (d Date DEFAULT toDate('2015-05-01'), x UInt64) ENGINE = MergeTree PARTITION BY d ORDER BY x SETTINGS index_granularity = 1, min_bytes_for_wide_part = 0; CREATE TABLE merge_00160 (d Date, x UInt64) ENGINE = Merge(currentDatabase(), '^mt_00160$'); SET min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; @@ -14,7 +14,7 @@ SELECT *, b FROM merge_00160 WHERE x IN (12345, 67890) AND NOT ignore(blockSize( DROP TABLE merge_00160; DROP TABLE mt_00160; -CREATE TABLE mt_00160 (d Date DEFAULT toDate('2015-05-01'), x UInt64, y UInt64, z UInt64) ENGINE = MergeTree(d, (x, z), 1); +CREATE TABLE mt_00160 (d Date DEFAULT toDate('2015-05-01'), x UInt64, y UInt64, z UInt64) ENGINE = MergeTree PARTITION BY d ORDER BY (x, z) SETTINGS index_granularity = 1, min_bytes_for_wide_part = 0; INSERT INTO mt_00160 (x, y, z) SELECT number AS x, number + 10 AS y, number / 2 AS z FROM system.numbers LIMIT 100000; diff --git a/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh b/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh index 4bf104a2d03..c05611783bb 100755 --- a/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh +++ b/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . $CURDIR/../shell_config.sh $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS preferred_block_size_bytes" -$CLICKHOUSE_CLIENT -q "CREATE TABLE preferred_block_size_bytes (p Date, s String) ENGINE = MergeTree PARTITION BY p ORDER BY p SETTINGS index_granularity=1, index_granularity_bytes=0" +$CLICKHOUSE_CLIENT -q "CREATE TABLE preferred_block_size_bytes (p Date, s String) ENGINE = MergeTree PARTITION BY p ORDER BY p SETTINGS index_granularity=1, index_granularity_bytes=0, min_bytes_for_wide_part = 0" $CLICKHOUSE_CLIENT -q "INSERT INTO preferred_block_size_bytes (s) SELECT '16_bytes_-_-_-_' AS s FROM system.numbers LIMIT 10, 90" $CLICKHOUSE_CLIENT -q "OPTIMIZE TABLE preferred_block_size_bytes" $CLICKHOUSE_CLIENT --preferred_block_size_bytes=26 -q "SELECT DISTINCT blockSize(), ignore(p, s) FROM preferred_block_size_bytes" @@ -17,7 +17,7 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS preferred_block_size_bytes" # PREWHERE using empty column $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS pbs" -$CLICKHOUSE_CLIENT -q "CREATE TABLE pbs (p Date, i UInt64, sa Array(String)) ENGINE = MergeTree PARTITION BY p ORDER BY p SETTINGS index_granularity=100, index_granularity_bytes=0" +$CLICKHOUSE_CLIENT -q "CREATE TABLE pbs (p Date, i UInt64, sa Array(String)) ENGINE = MergeTree PARTITION BY p ORDER BY p SETTINGS index_granularity=100, index_granularity_bytes=0, min_bytes_for_wide_part = 0" $CLICKHOUSE_CLIENT -q "INSERT INTO pbs (p, i, sa) SELECT toDate(i % 30) AS p, number AS i, ['a'] AS sa FROM system.numbers LIMIT 1000" $CLICKHOUSE_CLIENT -q "ALTER TABLE pbs ADD COLUMN s UInt8 DEFAULT 0" $CLICKHOUSE_CLIENT --preferred_block_size_bytes=100000 -q "SELECT count() FROM pbs PREWHERE s = 0" @@ -28,7 +28,7 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE pbs" # Nullable PREWHERE $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS nullable_prewhere" -$CLICKHOUSE_CLIENT -q "CREATE TABLE nullable_prewhere (p Date, f Nullable(UInt64), d UInt64) ENGINE = MergeTree PARTITION BY p ORDER BY p SETTINGS index_granularity=8, index_granularity_bytes=0" +$CLICKHOUSE_CLIENT -q "CREATE TABLE nullable_prewhere (p Date, f Nullable(UInt64), d UInt64) ENGINE = MergeTree PARTITION BY p ORDER BY p SETTINGS index_granularity=8, index_granularity_bytes=0, min_bytes_for_wide_part = 0" $CLICKHOUSE_CLIENT -q "INSERT INTO nullable_prewhere SELECT toDate(0) AS p, if(number % 2 = 0, CAST(number AS Nullable(UInt64)), CAST(NULL AS Nullable(UInt64))) AS f, number as d FROM system.numbers LIMIT 1001" $CLICKHOUSE_CLIENT -q "SELECT sum(d), sum(f), max(d) FROM nullable_prewhere PREWHERE NOT isNull(f)" $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS nullable_prewhere" diff --git a/tests/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.sql b/tests/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.sql index e3ab4180d4e..dc021ad52db 100644 --- a/tests/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.sql +++ b/tests/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.sql @@ -1,5 +1,5 @@ drop table if exists tab_00484; -create table tab_00484 (date Date, x UInt64, s FixedString(128)) engine = MergeTree(date, (date, x), 8192); +create table tab_00484 (date Date, x UInt64, s FixedString(128)) engine = MergeTree PARTITION BY date ORDER BY (date, x) SETTINGS min_bytes_for_wide_part = 0; insert into tab_00484 select today(), number, toFixedString('', 128) from system.numbers limit 8192; set preferred_block_size_bytes = 2000000; @@ -15,19 +15,19 @@ set preferred_max_column_in_block_size_bytes = 4194304; select max(blockSize()), min(blockSize()), any(ignore(*)) from tab_00484; drop table if exists tab_00484; -create table tab_00484 (date Date, x UInt64, s FixedString(128)) engine = MergeTree(date, (date, x), 32); +create table tab_00484 (date Date, x UInt64, s FixedString(128)) engine = MergeTree PARTITION BY date ORDER BY (date, x) SETTINGS min_bytes_for_wide_part = 0; insert into tab_00484 select today(), number, toFixedString('', 128) from system.numbers limit 47; set preferred_max_column_in_block_size_bytes = 1152; select blockSize(), * from tab_00484 where x = 1 or x > 36 format Null; drop table if exists tab_00484; -create table tab_00484 (date Date, x UInt64, s FixedString(128)) engine = MergeTree(date, (date, x), 8192); +create table tab_00484 (date Date, x UInt64, s FixedString(128)) engine = MergeTree PARTITION BY date ORDER BY (date, x) SETTINGS min_bytes_for_wide_part = 0; insert into tab_00484 select today(), number, toFixedString('', 128) from system.numbers limit 10; set preferred_max_column_in_block_size_bytes = 128; select s from tab_00484 where s == '' format Null; drop table if exists tab_00484; -create table tab_00484 (date Date, x UInt64, s String) engine = MergeTree(date, (date, x), 8192); +create table tab_00484 (date Date, x UInt64, s String) engine = MergeTree PARTITION BY date ORDER BY (date, x) SETTINGS min_bytes_for_wide_part = 0; insert into tab_00484 select today(), number, 'abc' from system.numbers limit 81920; set preferred_block_size_bytes = 0; select count(*) from tab_00484 prewhere s != 'abc' format Null; diff --git a/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh b/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh index e52610f03ba..3a8c2445e24 100755 --- a/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh +++ b/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh @@ -20,12 +20,12 @@ ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS unsigned_integer_test_table;" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS enum_test_table;" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS date_test_table;" -${CLICKHOUSE_CLIENT} --query="CREATE TABLE string_test_table (val String) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0;" -${CLICKHOUSE_CLIENT} --query="CREATE TABLE fixed_string_test_table (val FixedString(1)) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0;" -${CLICKHOUSE_CLIENT} --query="CREATE TABLE signed_integer_test_table (val Int32) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0;" -${CLICKHOUSE_CLIENT} --query="CREATE TABLE unsigned_integer_test_table (val UInt32) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0;" -${CLICKHOUSE_CLIENT} --query="CREATE TABLE enum_test_table (val Enum16('hello' = 1, 'world' = 2, 'yandex' = 256, 'clickhouse' = 257)) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0;" -${CLICKHOUSE_CLIENT} --query="CREATE TABLE date_test_table (val Date) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0;" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE string_test_table (val String) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0, min_bytes_for_wide_part = 0;" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE fixed_string_test_table (val FixedString(1)) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0, min_bytes_for_wide_part = 0;" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE signed_integer_test_table (val Int32) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0, min_bytes_for_wide_part = 0;" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE unsigned_integer_test_table (val UInt32) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0, min_bytes_for_wide_part = 0;" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE enum_test_table (val Enum16('hello' = 1, 'world' = 2, 'yandex' = 256, 'clickhouse' = 257)) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0, min_bytes_for_wide_part = 0;" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE date_test_table (val Date) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0, min_bytes_for_wide_part = 0;" ${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES;" diff --git a/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.reference b/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.reference index 282b0ddca7b..3bcfc00eded 100644 --- a/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.reference +++ b/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.reference @@ -1 +1 @@ -20000101_20000101_1_1_0 test_00961 1c63ae7a38eb76e2a71c28aaf0b3ae4d 0053df9b467cc5483e752ec62e91cfd4 da96ff1e527a8a1f908ddf2b1d0af239 +20000101_20000101_1_1_0 test_00961 b78f351b7498ecc9d4732ad29c3952de 1d4b7fbf05d0fc5c2f4559ca75aa32f7 38f047b57fd1bb81cf77e273deb34218 diff --git a/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql b/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql index e865ed609be..f3a729dd4fd 100644 --- a/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql +++ b/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql @@ -1,6 +1,6 @@ DROP TABLE IF EXISTS test_00961; -CREATE TABLE test_00961 (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = MergeTree(d, (a, b), 111) SETTINGS min_bytes_for_wide_part = 0; +CREATE TABLE test_00961 (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = MergeTree(d, (a, b), 111); INSERT INTO test_00961 VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); diff --git a/tests/queries/0_stateless/01045_order_by_pk_special_storages.sh b/tests/queries/0_stateless/01045_order_by_pk_special_storages.sh index b0d63b75dd5..3c549fa64ff 100755 --- a/tests/queries/0_stateless/01045_order_by_pk_special_storages.sh +++ b/tests/queries/0_stateless/01045_order_by_pk_special_storages.sh @@ -14,7 +14,7 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS mv" $CLICKHOUSE_CLIENT -q "CREATE TABLE s1 (a UInt32, s String) ENGINE = MergeTree ORDER BY a PARTITION BY a % 3 SETTINGS min_bytes_for_wide_part = 0" $CLICKHOUSE_CLIENT -q "CREATE TABLE s2 (a UInt32, s String) ENGINE = MergeTree ORDER BY a PARTITION BY a % 3 SETTINGS min_bytes_for_wide_part = 0" -$CLICKHOUSE_CLIENT -q "CREATE TABLE m (a UInt32, s String) engine = Merge(currentDatabase(), 's[1,2]') SETTINGS min_bytes_for_wide_part = 0" +$CLICKHOUSE_CLIENT -q "CREATE TABLE m (a UInt32, s String) engine = Merge(currentDatabase(), 's[1,2]')" $CLICKHOUSE_CLIENT -q "INSERT INTO s1 select (number % 20) * 2 as n, toString(number * number) from numbers(100000)" $CLICKHOUSE_CLIENT -q "INSERT INTO s2 select (number % 20) * 2 + 1 as n, toString(number * number * number) from numbers(100000)" diff --git a/tests/queries/0_stateless/01343_min_bytes_to_use_mmap_io.sql b/tests/queries/0_stateless/01343_min_bytes_to_use_mmap_io.sql index 9ff16ca60a7..62c5d20d714 100644 --- a/tests/queries/0_stateless/01343_min_bytes_to_use_mmap_io.sql +++ b/tests/queries/0_stateless/01343_min_bytes_to_use_mmap_io.sql @@ -1,5 +1,5 @@ DROP TABLE IF EXISTS test_01343; -CREATE TABLE test_01343 (x String) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE test_01343 (x String) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0; INSERT INTO test_01343 VALUES ('Hello, world'); SET min_bytes_to_use_mmap_io = 1; diff --git a/tests/queries/0_stateless/01344_min_bytes_to_use_mmap_io_index.sql b/tests/queries/0_stateless/01344_min_bytes_to_use_mmap_io_index.sql index 67baef7136d..544c0af7925 100644 --- a/tests/queries/0_stateless/01344_min_bytes_to_use_mmap_io_index.sql +++ b/tests/queries/0_stateless/01344_min_bytes_to_use_mmap_io_index.sql @@ -1,5 +1,5 @@ DROP TABLE IF EXISTS test_01344; -CREATE TABLE test_01344 (x String, INDEX idx (x) TYPE set(10) GRANULARITY 1) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE test_01344 (x String, INDEX idx (x) TYPE set(10) GRANULARITY 1) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0; INSERT INTO test_01344 VALUES ('Hello, world'); SET min_bytes_to_use_mmap_io = 1; From b9bf67b6ac31741246dce7790ec3890fd599ff7d Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Thu, 10 Sep 2020 04:27:36 +0300 Subject: [PATCH 04/11] allow to randomize part type in MergeTree --- src/Storages/MergeTree/MergeTreeSettings.h | 5 ++- .../MergeTree/registerStorageMergeTree.cpp | 34 +++++++++++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index 49847617d30..9adbc1a7b3d 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -20,7 +20,7 @@ struct Settings; M(UInt64, index_granularity, 8192, "How many rows correspond to one primary key value.", 0) \ \ /** Data storing format settings. */ \ - M(UInt64, min_bytes_for_wide_part, 0, "Minimal uncompressed size in bytes to create part in wide format instead of compact", 0) \ + M(UInt64, min_bytes_for_wide_part, 10485760, "Minimal uncompressed size in bytes to create part in wide format instead of compact", 0) \ M(UInt64, min_rows_for_wide_part, 0, "Minimal number of rows to create part in wide format instead of compact", 0) \ M(UInt64, min_bytes_for_compact_part, 0, "Experimental. Minimal uncompressed size in bytes to create part in compact format instead of saving it in RAM", 0) \ M(UInt64, min_rows_for_compact_part, 0, "Experimental. Minimal number of rows to create part in compact format instead of saving it in RAM", 0) \ @@ -97,6 +97,9 @@ struct Settings; M(String, storage_policy, "default", "Name of storage disk policy", 0) \ M(Bool, allow_nullable_key, false, "Allow Nullable types as primary keys.", 0) \ \ + /** Settings for testing purposes */ \ + M(Bool, randomize_part_type, false, "For testing purposes only. Randomizes part type between wide and compact", 0) \ + \ /** Obsolete settings. Kept for backward compatibility only. */ \ M(UInt64, min_relative_delay_to_yield_leadership, 120, "Obsolete setting, does nothing.", 0) \ M(UInt64, check_delay_period, 60, "Obsolete setting, does nothing.", 0) \ diff --git a/src/Storages/MergeTree/registerStorageMergeTree.cpp b/src/Storages/MergeTree/registerStorageMergeTree.cpp index b0c422bd79f..5609c130aba 100644 --- a/src/Storages/MergeTree/registerStorageMergeTree.cpp +++ b/src/Storages/MergeTree/registerStorageMergeTree.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -233,6 +234,25 @@ If you use the Replicated version of engines, see https://clickhouse.tech/docs/e } +static void randomizePartTypeSettings(const std::unique_ptr & storage_settings) +{ + static constexpr auto MAX_THRESHOLD_FOR_ROWS = 100000; + static constexpr auto MAX_THRESHOLD_FOR_BYTES = 1024 * 1024 * 10; + + /// Create all parts in wide format with probability 1/3. + if (thread_local_rng() % 3 == 0) + { + storage_settings->min_rows_for_wide_part = 0; + storage_settings->min_bytes_for_wide_part = 0; + } + else + { + storage_settings->min_rows_for_wide_part = std::uniform_int_distribution{0, MAX_THRESHOLD_FOR_ROWS}(thread_local_rng); + storage_settings->min_bytes_for_wide_part = std::uniform_int_distribution{0, MAX_THRESHOLD_FOR_BYTES}(thread_local_rng); + } +} + + static StoragePtr create(const StorageFactory::Arguments & args) { /** [Replicated][|Summing|Collapsing|Aggregating|Replacing|Graphite]MergeTree (2 * 7 combinations) engines @@ -652,6 +672,20 @@ static StoragePtr create(const StorageFactory::Arguments & args) ++arg_num; } + /// Allow to randomize part type for tests to cover more cases. + /// But if settings were set explicitly restrict it. + if (storage_settings->randomize_part_type + && !storage_settings->min_rows_for_wide_part.changed + && !storage_settings->min_bytes_for_wide_part.changed) + { + randomizePartTypeSettings(storage_settings); + LOG_INFO(&Poco::Logger::get(args.table_id.getNameForLogs() + " (registerStorageMergeTree)"), + "Applied setting 'randomize_part_type'. " + "Setting 'min_rows_for_wide_part' changed to {}. " + "Setting 'min_bytes_for_wide_part' changed to {}.", + storage_settings->min_rows_for_wide_part, storage_settings->min_bytes_for_wide_part); + } + if (arg_num != arg_cnt) throw Exception("Wrong number of engine arguments.", ErrorCodes::BAD_ARGUMENTS); From 6031e6bae95f8207ab554a422244ecf160aa90d8 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Thu, 10 Sep 2020 20:09:19 +0300 Subject: [PATCH 05/11] fix tests --- .../configs/wide_parts_only.xml | 6 +++++ .../test_adaptive_granularity/test.py | 24 ++++++++++++++----- .../configs/compact_parts.xml | 1 + .../test_polymorphic_parts/test.py | 1 + ...46_clear_column_in_partition_zookeeper.sql | 6 ++--- ...system_columns_and_system_tables.reference | 2 +- ...00753_system_columns_and_system_tables.sql | 3 ++- ...ndex_granularity_collapsing_merge_tree.sql | 2 +- ..._adaptive_index_granularity_merge_tree.sql | 12 ++-------- .../queries/0_stateless/00933_ttl_simple.sql | 1 - ...ms_in_system_parts_columns_table.reference | 2 +- ...hecksums_in_system_parts_columns_table.sql | 10 ++------ 12 files changed, 38 insertions(+), 32 deletions(-) create mode 100644 tests/integration/test_adaptive_granularity/configs/wide_parts_only.xml diff --git a/tests/integration/test_adaptive_granularity/configs/wide_parts_only.xml b/tests/integration/test_adaptive_granularity/configs/wide_parts_only.xml new file mode 100644 index 00000000000..42e2173f718 --- /dev/null +++ b/tests/integration/test_adaptive_granularity/configs/wide_parts_only.xml @@ -0,0 +1,6 @@ + + + 0 + 0 + + diff --git a/tests/integration/test_adaptive_granularity/test.py b/tests/integration/test_adaptive_granularity/test.py index 21d65588de4..7efafb4ddd1 100644 --- a/tests/integration/test_adaptive_granularity/test.py +++ b/tests/integration/test_adaptive_granularity/test.py @@ -13,10 +13,10 @@ node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml' node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True) node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.6.3.18', with_installed_binary=True) -node4 = cluster.add_instance('node4', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True) +node4 = cluster.add_instance('node4', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml', 'configs/wide_parts_only.xml'], with_zookeeper=True) node5 = cluster.add_instance('node5', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15', with_installed_binary=True) -node6 = cluster.add_instance('node6', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True) +node6 = cluster.add_instance('node6', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml', 'configs/wide_parts_only.xml'], with_zookeeper=True) node7 = cluster.add_instance('node7', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.6.3.18', stay_alive=True, with_installed_binary=True) node8 = cluster.add_instance('node8', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15', stay_alive=True, with_installed_binary=True) @@ -270,8 +270,14 @@ def test_mixed_granularity_single_node(start_dynamic_cluster, node): node.query("INSERT INTO table_with_default_granularity VALUES (toDate('2018-09-01'), 1, 333), (toDate('2018-09-02'), 2, 444)") def callback(n): - n.replace_config("/etc/clickhouse-server/merge_tree_settings.xml", "1") - n.replace_config("/etc/clickhouse-server/config.d/merge_tree_settings.xml", "1") + new_config = """ + + 1 + 0 +""" + + n.replace_config("/etc/clickhouse-server/merge_tree_settings.xml", new_config) + n.replace_config("/etc/clickhouse-server/config.d/merge_tree_settings.xml", new_config) node.restart_with_latest_version(callback_onstop=callback) node.query("SYSTEM RELOAD CONFIG") @@ -304,8 +310,14 @@ def test_version_update_two_nodes(start_dynamic_cluster): node12.query("SYSTEM SYNC REPLICA table_with_default_granularity", timeout=20) assert node12.query("SELECT COUNT() FROM table_with_default_granularity") == '2\n' def callback(n): - n.replace_config("/etc/clickhouse-server/merge_tree_settings.xml", "0") - n.replace_config("/etc/clickhouse-server/config.d/merge_tree_settings.xml", "0") + new_config = """ + + 0 + 0 +""" + + n.replace_config("/etc/clickhouse-server/merge_tree_settings.xml", new_config) + n.replace_config("/etc/clickhouse-server/config.d/merge_tree_settings.xml", new_config) node12.restart_with_latest_version(callback_onstop=callback) diff --git a/tests/integration/test_polymorphic_parts/configs/compact_parts.xml b/tests/integration/test_polymorphic_parts/configs/compact_parts.xml index e14c3f0ceae..5b3afe65d92 100644 --- a/tests/integration/test_polymorphic_parts/configs/compact_parts.xml +++ b/tests/integration/test_polymorphic_parts/configs/compact_parts.xml @@ -1,5 +1,6 @@ 512 + 0 diff --git a/tests/integration/test_polymorphic_parts/test.py b/tests/integration/test_polymorphic_parts/test.py index ed89f768d4c..cf2268bc831 100644 --- a/tests/integration/test_polymorphic_parts/test.py +++ b/tests/integration/test_polymorphic_parts/test.py @@ -42,6 +42,7 @@ def create_tables(name, nodes, node_settings, shard): ORDER BY id SETTINGS index_granularity = 64, index_granularity_bytes = {index_granularity_bytes}, min_rows_for_wide_part = {min_rows_for_wide_part}, min_rows_for_compact_part = {min_rows_for_compact_part}, + min_bytes_for_wide_part = 0, min_bytes_for_compact_part = 0, in_memory_parts_enable_wal = 1 '''.format(name=name, shard=shard, repl=i, **settings)) diff --git a/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.sql b/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.sql index 56f3a654682..e6de5a91ce3 100644 --- a/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.sql +++ b/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.sql @@ -3,7 +3,7 @@ SELECT '===Ordinary case==='; SET replication_alter_partitions_sync = 2; DROP TABLE IF EXISTS clear_column; -CREATE TABLE clear_column (d Date, num Int64, str String) ENGINE = MergeTree(d, d, 8192); +CREATE TABLE clear_column (d Date, num Int64, str String) ENGINE = MergeTree ORDER BY d PARTITION by d SETTINGS min_bytes_for_wide_part = 0; INSERT INTO clear_column VALUES ('2016-12-12', 1, 'a'), ('2016-11-12', 2, 'b'); @@ -24,8 +24,8 @@ SELECT '===Replicated case==='; DROP TABLE IF EXISTS clear_column1; DROP TABLE IF EXISTS clear_column2; SELECT sleep(1) FORMAT Null; -CREATE TABLE clear_column1 (d Date, i Int64) ENGINE = ReplicatedMergeTree('/clickhouse/test_00446/tables/clear_column', '1', d, d, 8192); -CREATE TABLE clear_column2 (d Date, i Int64) ENGINE = ReplicatedMergeTree('/clickhouse/test_00446/tables/clear_column', '2', d, d, 8192); +CREATE TABLE clear_column1 (d Date, i Int64) ENGINE = ReplicatedMergeTree('/clickhouse/test_00446/tables/clear_column', '1') ORDER BY d PARTITION by d SETTINGS min_bytes_for_wide_part = 0; +CREATE TABLE clear_column2 (d Date, i Int64) ENGINE = ReplicatedMergeTree('/clickhouse/test_00446/tables/clear_column', '2') ORDER BY d PARTITION by d SETTINGS min_bytes_for_wide_part = 0; INSERT INTO clear_column1 (d) VALUES ('2000-01-01'), ('2000-02-01'); SYSTEM SYNC REPLICA clear_column2; diff --git a/tests/queries/0_stateless/00753_system_columns_and_system_tables.reference b/tests/queries/0_stateless/00753_system_columns_and_system_tables.reference index 47c15da4b22..4d1fab83cc1 100644 --- a/tests/queries/0_stateless/00753_system_columns_and_system_tables.reference +++ b/tests/queries/0_stateless/00753_system_columns_and_system_tables.reference @@ -6,7 +6,7 @@ │ name2 │ 1 │ 0 │ 0 │ 0 │ │ name3 │ 0 │ 0 │ 0 │ 0 │ └───────┴─────────────────────┴───────────────────┴───────────────────┴────────────────────┘ -147 1 +231 1 ┌─name────────────────┬─partition_key─┬─sorting_key───┬─primary_key─┬─sampling_key─┐ │ check_system_tables │ date │ date, version │ date │ │ └─────────────────────┴───────────────┴───────────────┴─────────────┴──────────────┘ diff --git a/tests/queries/0_stateless/00753_system_columns_and_system_tables.sql b/tests/queries/0_stateless/00753_system_columns_and_system_tables.sql index 1d7faa32952..9b9fa04e6b0 100644 --- a/tests/queries/0_stateless/00753_system_columns_and_system_tables.sql +++ b/tests/queries/0_stateless/00753_system_columns_and_system_tables.sql @@ -9,7 +9,8 @@ CREATE TABLE check_system_tables ) ENGINE = MergeTree() ORDER BY name1 PARTITION BY name2 - SAMPLE BY name1; + SAMPLE BY name1 + SETTINGS min_bytes_for_wide_part = 0; SELECT name, partition_key, sorting_key, primary_key, sampling_key, storage_policy, total_rows FROM system.tables diff --git a/tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.sql b/tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.sql index 74159738bce..d4c19cbe8f2 100644 --- a/tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.sql +++ b/tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.sql @@ -41,7 +41,7 @@ CREATE TABLE four_rows_per_granule ( Sign Int8 ) ENGINE CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes=110, min_index_granularity_bytes=100, write_final_mark = 0, - + min_bytes_for_wide_part = 0, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_columns_to_activate=0; diff --git a/tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.sql b/tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.sql index f2e9e9749e3..249c6eebfcf 100644 --- a/tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.sql +++ b/tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.sql @@ -61,11 +61,7 @@ CREATE TABLE four_rows_per_granule ( k UInt64, v1 UInt64, v2 Int64 -<<<<<<< HEAD -) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, write_final_mark = 0, min_bytes_for_wide_part = 0; -======= -) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 10, write_final_mark = 0; ->>>>>>> upstream/master +) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 10, write_final_mark = 0, min_bytes_for_wide_part = 0; INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -130,11 +126,7 @@ CREATE TABLE adaptive_granularity_alter ( k UInt64, v1 UInt64, v2 Int64 -<<<<<<< HEAD -) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, write_final_mark = 0, min_bytes_for_wide_part = 0; -======= -) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 100, write_final_mark = 0; ->>>>>>> upstream/master +) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 100, write_final_mark = 0, min_bytes_for_wide_part = 0; INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); diff --git a/tests/queries/0_stateless/00933_ttl_simple.sql b/tests/queries/0_stateless/00933_ttl_simple.sql index 83d9962043d..c0adcd21e62 100644 --- a/tests/queries/0_stateless/00933_ttl_simple.sql +++ b/tests/queries/0_stateless/00933_ttl_simple.sql @@ -23,7 +23,6 @@ create table ttl_00933_1 (d DateTime, a Int ttl d + interval 1 DAY) engine = Mer insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 1); insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 2); insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 3); -optimize table ttl_00933_1 final; select * from ttl_00933_1 order by d; drop table if exists ttl_00933_1; diff --git a/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.reference b/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.reference index 3bcfc00eded..099fe566817 100644 --- a/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.reference +++ b/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.reference @@ -1 +1 @@ -20000101_20000101_1_1_0 test_00961 b78f351b7498ecc9d4732ad29c3952de 1d4b7fbf05d0fc5c2f4559ca75aa32f7 38f047b57fd1bb81cf77e273deb34218 +20000101_1_1_0 test_00961 5f2e2d4bbc14336f44037e3ac667f247 ed226557cd4e18ecf3ae06c6d5e6725c da96ff1e527a8a1f908ddf2b1d0af239 diff --git a/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql b/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql index 46daa0bf711..792bf62f9b1 100644 --- a/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql +++ b/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql @@ -1,21 +1,15 @@ DROP TABLE IF EXISTS test_00961; -CREATE TABLE test_00961 (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = MergeTree(d, (a, b), 111); +CREATE TABLE test_00961 (d Date, a String, b UInt8, x String, y Int8, z UInt32) + ENGINE = MergeTree PARTITION BY d ORDER BY (a, b) SETTINGS index_granularity = 111, min_bytes_for_wide_part = 0; INSERT INTO test_00961 VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); SELECT -<<<<<<< HEAD name, table, hash_of_all_files, hash_of_uncompressed_files, -======= - name, - table, - hash_of_all_files, - hash_of_uncompressed_files, ->>>>>>> upstream/master uncompressed_hash_of_compressed_files FROM system.parts WHERE table = 'test_00961' and database = currentDatabase(); From fa6d88b3b29f9a0e852e009651c32652e0201fad Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Thu, 10 Sep 2020 21:43:02 +0300 Subject: [PATCH 06/11] fix more tests --- .../test_filesystem_layout/test.py | 2 +- .../configs/config.d/storage_conf.xml | 4 ++++ tests/integration/test_partition/test.py | 2 +- .../configs/config.d/storage_conf.xml | 4 ++++ .../test_replicated_merge_tree_s3/test.py | 22 ++++++++++++++----- tests/integration/test_ttl_replicated/test.py | 10 +++++---- ...46_clear_column_in_partition_zookeeper.sql | 6 ++--- .../0_stateless/00933_ttl_simple.reference | 8 +++---- .../queries/0_stateless/00933_ttl_simple.sql | 15 ++++++++----- 9 files changed, 48 insertions(+), 25 deletions(-) diff --git a/tests/integration/test_filesystem_layout/test.py b/tests/integration/test_filesystem_layout/test.py index 83389b3d9bd..777e5ab7b9a 100644 --- a/tests/integration/test_filesystem_layout/test.py +++ b/tests/integration/test_filesystem_layout/test.py @@ -19,7 +19,7 @@ def test_file_path_escaping(started_cluster): node.query('CREATE DATABASE IF NOT EXISTS test ENGINE = Ordinary') node.query(''' CREATE TABLE test.`T.a_b,l-e!` (`~Id` UInt32) - ENGINE = MergeTree() PARTITION BY `~Id` ORDER BY `~Id`; + ENGINE = MergeTree() PARTITION BY `~Id` ORDER BY `~Id` SETTINGS min_bytes_for_wide_part = 0; ''') node.query('''INSERT INTO test.`T.a_b,l-e!` VALUES (1);''') node.query('''ALTER TABLE test.`T.a_b,l-e!` FREEZE;''') diff --git a/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml b/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml index d097675ca63..343f248c5fb 100644 --- a/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml +++ b/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml @@ -25,4 +25,8 @@ + + + 0 + diff --git a/tests/integration/test_partition/test.py b/tests/integration/test_partition/test.py index 80fbe947316..2a2bbe205b5 100644 --- a/tests/integration/test_partition/test.py +++ b/tests/integration/test_partition/test.py @@ -178,7 +178,7 @@ def test_attach_check_all_parts(attach_check_all_parts_table): exec_bash('cp -pr {} {}'.format(path_to_detached + '0_3_3_0', path_to_detached + 'deleting_0_7_7_0')) error = instance.client.query_and_get_error("ALTER TABLE test.attach_partition ATTACH PARTITION 0") - assert 0 <= error.find('No columns in part 0_5_5_0') + assert 0 <= error.find('No columns in part 0_5_5_0') or 0 <= error.find('No columns.txt in part 0_5_5_0') parts = q("SElECT name FROM system.parts WHERE table='attach_partition' AND database='test' ORDER BY name") assert TSV(parts) == TSV('1_2_2_0\n1_4_4_0') diff --git a/tests/integration/test_replicated_merge_tree_s3/configs/config.d/storage_conf.xml b/tests/integration/test_replicated_merge_tree_s3/configs/config.d/storage_conf.xml index b32770095fc..f3b7f959ce9 100644 --- a/tests/integration/test_replicated_merge_tree_s3/configs/config.d/storage_conf.xml +++ b/tests/integration/test_replicated_merge_tree_s3/configs/config.d/storage_conf.xml @@ -18,4 +18,8 @@ + + + 0 + diff --git a/tests/integration/test_replicated_merge_tree_s3/test.py b/tests/integration/test_replicated_merge_tree_s3/test.py index a77a69b842b..612b50becf7 100644 --- a/tests/integration/test_replicated_merge_tree_s3/test.py +++ b/tests/integration/test_replicated_merge_tree_s3/test.py @@ -30,7 +30,8 @@ def cluster(): FILES_OVERHEAD = 1 FILES_OVERHEAD_PER_COLUMN = 2 # Data and mark files -FILES_OVERHEAD_PER_PART = FILES_OVERHEAD_PER_COLUMN * 3 + 2 + 6 + 1 +FILES_OVERHEAD_PER_PART_WIDE = FILES_OVERHEAD_PER_COLUMN * 3 + 2 + 6 + 1 +FILES_OVERHEAD_PER_PART_COMPACT = 10 + 1 def random_string(length): @@ -44,7 +45,7 @@ def generate_values(date_str, count, sign=1): return ",".join(["('{}',{},'{}')".format(x, y, z) for x, y, z in data]) -def create_table(cluster): +def create_table(cluster, additional_settings=None): create_table_statement = """ CREATE TABLE s3_test ( dt Date, @@ -56,6 +57,9 @@ def create_table(cluster): ORDER BY (dt, id) SETTINGS storage_policy='s3' """ + if additional_settings: + create_table_statement += "," + create_table_statement += additional_settings for node in cluster.instances.values(): node.query(create_table_statement) @@ -72,9 +76,15 @@ def drop_table(cluster): for obj in list(minio.list_objects(cluster.minio_bucket, 'data/')): minio.remove_object(cluster.minio_bucket, obj.object_name) - -def test_insert_select_replicated(cluster): - create_table(cluster) +@pytest.mark.parametrize( + "min_rows_for_wide_part,files_per_part", + [ + (0, FILES_OVERHEAD_PER_PART_WIDE), + (8192, FILES_OVERHEAD_PER_PART_COMPACT) + ] +) +def test_insert_select_replicated(cluster, min_rows_for_wide_part, files_per_part): + create_table(cluster, additional_settings="min_rows_for_wide_part={}".format(min_rows_for_wide_part)) all_values = "" for node_idx in range(1, 4): @@ -90,4 +100,4 @@ def test_insert_select_replicated(cluster): assert node.query("SELECT * FROM s3_test order by dt, id FORMAT Values", settings={"select_sequential_consistency": 1}) == all_values minio = cluster.minio_client - assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == 3 * (FILES_OVERHEAD + FILES_OVERHEAD_PER_PART * 3) + assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == 3 * (FILES_OVERHEAD + files_per_part * 3) diff --git a/tests/integration/test_ttl_replicated/test.py b/tests/integration/test_ttl_replicated/test.py index 0f201f569b3..39d595662d0 100644 --- a/tests/integration/test_ttl_replicated/test.py +++ b/tests/integration/test_ttl_replicated/test.py @@ -30,7 +30,7 @@ def drop_table(nodes, table_name): node.query("DROP TABLE IF EXISTS {} NO DELAY".format(table_name)) time.sleep(1) - +# Column TTL works only with wide parts, because it's very expensive to apply it for compact parts def test_ttl_columns(started_cluster): drop_table([node1, node2], "test_ttl") for node in [node1, node2]: @@ -38,7 +38,7 @@ def test_ttl_columns(started_cluster): ''' CREATE TABLE test_ttl(date DateTime, id UInt32, a Int32 TTL date + INTERVAL 1 DAY, b Int32 TTL date + INTERVAL 1 MONTH) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl', '{replica}') - ORDER BY id PARTITION BY toDayOfMonth(date) SETTINGS merge_with_ttl_timeout=0; + ORDER BY id PARTITION BY toDayOfMonth(date) SETTINGS merge_with_ttl_timeout=0, min_bytes_for_wide_part=0; '''.format(replica=node.name)) node1.query("INSERT INTO test_ttl VALUES (toDateTime('2000-10-10 00:00:00'), 1, 1, 3)") @@ -59,7 +59,8 @@ def test_merge_with_ttl_timeout(started_cluster): ''' CREATE TABLE {table}(date DateTime, id UInt32, a Int32 TTL date + INTERVAL 1 DAY, b Int32 TTL date + INTERVAL 1 MONTH) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{table}', '{replica}') - ORDER BY id PARTITION BY toDayOfMonth(date); + ORDER BY id PARTITION BY toDayOfMonth(date) + SETTINGS min_bytes_for_wide_part=0; '''.format(replica=node.name, table=table)) node1.query("SYSTEM STOP TTL MERGES {table}".format(table=table)) @@ -198,7 +199,7 @@ def test_ttl_double_delete_rule_returns_error(started_cluster): CREATE TABLE test_ttl(date DateTime, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) - TTL date + INTERVAL 1 DAY, date + INTERVAL 2 DAY SETTINGS merge_with_ttl_timeout=0; + TTL date + INTERVAL 1 DAY, date + INTERVAL 2 DAY SETTINGS merge_with_ttl_timeout=0 '''.format(replica=node1.name)) assert False except client.QueryRuntimeException: @@ -246,6 +247,7 @@ limitations under the License.""" ) ENGINE = {engine} ORDER BY tuple() TTL d1 + INTERVAL 1 DAY DELETE + SETTINGS min_bytes_for_wide_part=0 """.format(name=name, engine=engine)) node1.query("""ALTER TABLE {name} MODIFY COLUMN s1 String TTL d1 + INTERVAL 1 SECOND""".format(name=name)) diff --git a/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.sql b/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.sql index e6de5a91ce3..bd6c12ffce4 100644 --- a/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.sql +++ b/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.sql @@ -3,7 +3,7 @@ SELECT '===Ordinary case==='; SET replication_alter_partitions_sync = 2; DROP TABLE IF EXISTS clear_column; -CREATE TABLE clear_column (d Date, num Int64, str String) ENGINE = MergeTree ORDER BY d PARTITION by d SETTINGS min_bytes_for_wide_part = 0; +CREATE TABLE clear_column (d Date, num Int64, str String) ENGINE = MergeTree ORDER BY d PARTITION by toYYYYMM(d) SETTINGS min_bytes_for_wide_part = 0; INSERT INTO clear_column VALUES ('2016-12-12', 1, 'a'), ('2016-11-12', 2, 'b'); @@ -24,8 +24,8 @@ SELECT '===Replicated case==='; DROP TABLE IF EXISTS clear_column1; DROP TABLE IF EXISTS clear_column2; SELECT sleep(1) FORMAT Null; -CREATE TABLE clear_column1 (d Date, i Int64) ENGINE = ReplicatedMergeTree('/clickhouse/test_00446/tables/clear_column', '1') ORDER BY d PARTITION by d SETTINGS min_bytes_for_wide_part = 0; -CREATE TABLE clear_column2 (d Date, i Int64) ENGINE = ReplicatedMergeTree('/clickhouse/test_00446/tables/clear_column', '2') ORDER BY d PARTITION by d SETTINGS min_bytes_for_wide_part = 0; +CREATE TABLE clear_column1 (d Date, i Int64) ENGINE = ReplicatedMergeTree('/clickhouse/test_00446/tables/clear_column', '1') ORDER BY d PARTITION by toYYYYMM(d) SETTINGS min_bytes_for_wide_part = 0; +CREATE TABLE clear_column2 (d Date, i Int64) ENGINE = ReplicatedMergeTree('/clickhouse/test_00446/tables/clear_column', '2') ORDER BY d PARTITION by toYYYYMM(d) SETTINGS min_bytes_for_wide_part = 0; INSERT INTO clear_column1 (d) VALUES ('2000-01-01'), ('2000-02-01'); SYSTEM SYNC REPLICA clear_column2; diff --git a/tests/queries/0_stateless/00933_ttl_simple.reference b/tests/queries/0_stateless/00933_ttl_simple.reference index a4ef8033328..e3982814eab 100644 --- a/tests/queries/0_stateless/00933_ttl_simple.reference +++ b/tests/queries/0_stateless/00933_ttl_simple.reference @@ -6,11 +6,11 @@ 2000-10-10 00:00:00 0 2100-10-10 00:00:00 3 2100-10-10 2 -CREATE TABLE default.ttl_00933_1\n(\n `b` Int32,\n `a` Int32 TTL now() - 1000\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.ttl_00933_1\n(\n `b` Int32,\n `a` Int32 TTL now() - 1000\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 1 0 -CREATE TABLE default.ttl_00933_1\n(\n `b` Int32,\n `a` Int32 TTL now() + 1000\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.ttl_00933_1\n(\n `b` Int32,\n `a` Int32 TTL now() + 1000\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 1 1 -CREATE TABLE default.ttl_00933_1\n(\n `b` Int32,\n `a` Int32 TTL today() - 1\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.ttl_00933_1\n(\n `b` Int32,\n `a` Int32 TTL today() - 1\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 1 0 -CREATE TABLE default.ttl_00933_1\n(\n `b` Int32,\n `a` Int32 TTL today() + 1\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.ttl_00933_1\n(\n `b` Int32,\n `a` Int32 TTL today() + 1\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 1 1 diff --git a/tests/queries/0_stateless/00933_ttl_simple.sql b/tests/queries/0_stateless/00933_ttl_simple.sql index c0adcd21e62..aa8b33b2999 100644 --- a/tests/queries/0_stateless/00933_ttl_simple.sql +++ b/tests/queries/0_stateless/00933_ttl_simple.sql @@ -1,6 +1,8 @@ drop table if exists ttl_00933_1; -create table ttl_00933_1 (d DateTime, a Int ttl d + interval 1 second, b Int ttl d + interval 1 second) engine = MergeTree order by tuple() partition by toMinute(d); +-- Column TTL works only with wide parts, because it's very expensive to apply it for compact parts + +create table ttl_00933_1 (d DateTime, a Int ttl d + interval 1 second, b Int ttl d + interval 1 second) engine = MergeTree order by tuple() partition by toMinute(d) settings min_bytes_for_wide_part = 0; insert into ttl_00933_1 values (now(), 1, 2); insert into ttl_00933_1 values (now(), 3, 4); select sleep(1.1) format Null; @@ -19,10 +21,11 @@ select a, b from ttl_00933_1; drop table if exists ttl_00933_1; -create table ttl_00933_1 (d DateTime, a Int ttl d + interval 1 DAY) engine = MergeTree order by tuple() partition by toDayOfMonth(d); +create table ttl_00933_1 (d DateTime, a Int ttl d + interval 1 DAY) engine = MergeTree order by tuple() partition by toDayOfMonth(d) settings min_bytes_for_wide_part = 0; insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 1); insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 2); insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 3); +optimize table ttl_00933_1 final; select * from ttl_00933_1 order by d; drop table if exists ttl_00933_1; @@ -44,7 +47,7 @@ select * from ttl_00933_1 order by d; -- const DateTime TTL positive drop table if exists ttl_00933_1; -create table ttl_00933_1 (b Int, a Int ttl now()-1000) engine = MergeTree order by tuple() partition by tuple(); +create table ttl_00933_1 (b Int, a Int ttl now()-1000) engine = MergeTree order by tuple() partition by tuple() settings min_bytes_for_wide_part = 0; show create table ttl_00933_1; insert into ttl_00933_1 values (1, 1); optimize table ttl_00933_1 final; @@ -52,7 +55,7 @@ select * from ttl_00933_1; -- const DateTime TTL negative drop table if exists ttl_00933_1; -create table ttl_00933_1 (b Int, a Int ttl now()+1000) engine = MergeTree order by tuple() partition by tuple(); +create table ttl_00933_1 (b Int, a Int ttl now()+1000) engine = MergeTree order by tuple() partition by tuple() settings min_bytes_for_wide_part = 0; show create table ttl_00933_1; insert into ttl_00933_1 values (1, 1); optimize table ttl_00933_1 final; @@ -60,7 +63,7 @@ select * from ttl_00933_1; -- const Date TTL positive drop table if exists ttl_00933_1; -create table ttl_00933_1 (b Int, a Int ttl today()-1) engine = MergeTree order by tuple() partition by tuple(); +create table ttl_00933_1 (b Int, a Int ttl today()-1) engine = MergeTree order by tuple() partition by tuple() settings min_bytes_for_wide_part = 0; show create table ttl_00933_1; insert into ttl_00933_1 values (1, 1); optimize table ttl_00933_1 final; @@ -68,7 +71,7 @@ select * from ttl_00933_1; -- const Date TTL negative drop table if exists ttl_00933_1; -create table ttl_00933_1 (b Int, a Int ttl today()+1) engine = MergeTree order by tuple() partition by tuple(); +create table ttl_00933_1 (b Int, a Int ttl today()+1) engine = MergeTree order by tuple() partition by tuple() settings min_bytes_for_wide_part = 0; show create table ttl_00933_1; insert into ttl_00933_1 values (1, 1); optimize table ttl_00933_1 final; From 8a201a28c04e06f1a2ebb03d51c0e1d8e983680a Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Fri, 11 Sep 2020 03:14:35 +0300 Subject: [PATCH 07/11] remove skip list for tests with polymorphic parts --- tests/clickhouse-test | 9 --------- tests/queries/skip_list.json | 27 --------------------------- 2 files changed, 36 deletions(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 6bfad37d8ad..a3bed189d55 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -506,15 +506,6 @@ def collect_build_flags(client): else: raise Exception("Cannot get inforamtion about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr)) - clickhouse_proc = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=PIPE) - (stdout, stderr) = clickhouse_proc.communicate("SELECT value FROM system.merge_tree_settings WHERE name = 'min_bytes_for_wide_part'") - - if clickhouse_proc.returncode == 0: - if '10485760' in stdout: - result.append(BuildFlags.POLYMORPHIC_PARTS) - else: - raise Exception("Cannot get inforamtion about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr)) - return result diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index adfc5f0e582..535f2757e43 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -127,32 +127,5 @@ "01130_in_memory_parts_partitons", "01225_show_create_table_from_dictionary", "01224_no_superfluous_dict_reload" - ], - "polymorphic-parts": [ - /// These tests fail with compact parts, because they - /// check some implementation defined things - /// like checksums, computed granularity, ProfileEvents, etc. - "avx", - "01045_order_by_pk_special_storages", - "01042_check_query_and_last_granule_size", - "00961_checksums_in_system_parts_columns_table", - "00933_test_fix_extra_seek_on_compressed_cache", - "00926_adaptive_index_granularity_collapsing_merge_tree", - "00926_adaptive_index_granularity_merge_tree", - "00926_adaptive_index_granularity_replacing_merge_tree", - "00926_adaptive_index_granularity_versioned_collapsing_merge_tree", - "00804_test_delta_codec_compression", - "00731_long_merge_tree_select_opened_files", - "00653_verification_monotonic_data_load", - "00484_preferred_max_column_in_block_size_bytes", - "00446_clear_column_in_partition_zookeeper", - "00443_preferred_block_size_bytes", - "00160_merge_and_index_in_in", - "01055_compact_parts", - "01039_mergetree_exec_time", - "00933_ttl_simple", /// Maybe it's worth to fix it - "00753_system_columns_and_system_tables", - "01343_min_bytes_to_use_mmap_io", - "01344_min_bytes_to_use_mmap_io_index" ] } From ac9ba23bdfa67bd0188ec00ccbff9816bc981bd5 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Mon, 14 Sep 2020 15:49:04 +0300 Subject: [PATCH 08/11] fix more tests --- tests/integration/test_check_table/test.py | 3 ++- .../configs/wide_parts_only.xml | 6 ++++++ .../test_default_compression_codec/test.py | 6 +++--- .../configs/config.d/storage_conf.xml | 4 ++++ .../test_merge_tree_s3_with_cache/test.py | 21 ++++++++++--------- .../configs/wide_parts_only.xml | 6 ++++++ .../test_mutations_hardlinks/test.py | 2 +- 7 files changed, 33 insertions(+), 15 deletions(-) create mode 100644 tests/integration/test_default_compression_codec/configs/wide_parts_only.xml create mode 100644 tests/integration/test_mutations_hardlinks/configs/wide_parts_only.xml diff --git a/tests/integration/test_check_table/test.py b/tests/integration/test_check_table/test.py index 83df59b44a0..f972e7a92ba 100644 --- a/tests/integration/test_check_table/test.py +++ b/tests/integration/test_check_table/test.py @@ -24,7 +24,8 @@ def started_cluster(): node1.query(''' CREATE TABLE non_replicated_mt(date Date, id UInt32, value Int32) - ENGINE = MergeTree() PARTITION BY toYYYYMM(date) ORDER BY id; + ENGINE = MergeTree() PARTITION BY toYYYYMM(date) ORDER BY id + SETTINGS min_bytes_for_wide_part=0; ''') yield cluster diff --git a/tests/integration/test_default_compression_codec/configs/wide_parts_only.xml b/tests/integration/test_default_compression_codec/configs/wide_parts_only.xml new file mode 100644 index 00000000000..42e2173f718 --- /dev/null +++ b/tests/integration/test_default_compression_codec/configs/wide_parts_only.xml @@ -0,0 +1,6 @@ + + + 0 + 0 + + diff --git a/tests/integration/test_default_compression_codec/test.py b/tests/integration/test_default_compression_codec/test.py index d312a93ba01..0cfbb0b67cf 100644 --- a/tests/integration/test_default_compression_codec/test.py +++ b/tests/integration/test_default_compression_codec/test.py @@ -6,9 +6,9 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/default_compression.xml'], with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=['configs/default_compression.xml'], with_zookeeper=True) -node3 = cluster.add_instance('node3', main_configs=['configs/default_compression.xml'], image='yandex/clickhouse-server', tag='20.3.16', stay_alive=True, with_installed_binary=True) +node1 = cluster.add_instance('node1', main_configs=['configs/default_compression.xml', 'configs/wide_parts_only.xml'], with_zookeeper=True) +node2 = cluster.add_instance('node2', main_configs=['configs/default_compression.xml', 'configs/wide_parts_only.xml'], with_zookeeper=True) +node3 = cluster.add_instance('node3', main_configs=['configs/default_compression.xml', 'configs/wide_parts_only.xml'], image='yandex/clickhouse-server', tag='20.3.16', stay_alive=True, with_installed_binary=True) @pytest.fixture(scope="module") def start_cluster(): diff --git a/tests/integration/test_merge_tree_s3_with_cache/configs/config.d/storage_conf.xml b/tests/integration/test_merge_tree_s3_with_cache/configs/config.d/storage_conf.xml index b32770095fc..f3b7f959ce9 100644 --- a/tests/integration/test_merge_tree_s3_with_cache/configs/config.d/storage_conf.xml +++ b/tests/integration/test_merge_tree_s3_with_cache/configs/config.d/storage_conf.xml @@ -18,4 +18,8 @@ + + + 0 + diff --git a/tests/integration/test_merge_tree_s3_with_cache/test.py b/tests/integration/test_merge_tree_s3_with_cache/test.py index 25c08777ae5..d5d6db2fb77 100644 --- a/tests/integration/test_merge_tree_s3_with_cache/test.py +++ b/tests/integration/test_merge_tree_s3_with_cache/test.py @@ -40,7 +40,8 @@ def get_query_stat(instance, hint): return result -def test_write_is_cached(cluster): +@pytest.mark.parametrize("min_rows_for_wide_part,read_requests", [(0, 2), (8192, 1)]) +def test_write_is_cached(cluster, min_rows_for_wide_part, read_requests): node = cluster.instances["node"] node.query( @@ -50,8 +51,8 @@ def test_write_is_cached(cluster): data String ) ENGINE=MergeTree() ORDER BY id - SETTINGS storage_policy='s3' - """ + SETTINGS storage_policy='s3', min_rows_for_wide_part={} + """.format(min_rows_for_wide_part) ) node.query("SYSTEM FLUSH LOGS") @@ -63,12 +64,12 @@ def test_write_is_cached(cluster): assert node.query(select_query) == "(0,'data'),(1,'data')" stat = get_query_stat(node, select_query) - assert stat["S3ReadRequestsCount"] == 2 # Only .bin files should be accessed from S3. + assert stat["S3ReadRequestsCount"] == read_requests # Only .bin files should be accessed from S3. node.query("DROP TABLE IF EXISTS s3_test NO DELAY") - -def test_read_after_cache_is_wiped(cluster): +@pytest.mark.parametrize("min_rows_for_wide_part,all_files,bin_files", [(0, 4, 2), (8192, 2, 1)]) +def test_read_after_cache_is_wiped(cluster, min_rows_for_wide_part, all_files, bin_files): node = cluster.instances["node"] node.query( @@ -78,8 +79,8 @@ def test_read_after_cache_is_wiped(cluster): data String ) ENGINE=MergeTree() ORDER BY id - SETTINGS storage_policy='s3' - """ + SETTINGS storage_policy='s3', min_rows_for_wide_part={} + """.format(min_rows_for_wide_part) ) node.query("SYSTEM FLUSH LOGS") @@ -93,12 +94,12 @@ def test_read_after_cache_is_wiped(cluster): select_query = "SELECT * FROM s3_test" node.query(select_query) stat = get_query_stat(node, select_query) - assert stat["S3ReadRequestsCount"] == 4 # .mrk and .bin files should be accessed from S3. + assert stat["S3ReadRequestsCount"] == all_files # .mrk and .bin files should be accessed from S3. # After cache is populated again, only .bin files should be accessed from S3. select_query = "SELECT * FROM s3_test order by id FORMAT Values" assert node.query(select_query) == "(0,'data'),(1,'data')" stat = get_query_stat(node, select_query) - assert stat["S3ReadRequestsCount"] == 2 + assert stat["S3ReadRequestsCount"] == bin_files node.query("DROP TABLE IF EXISTS s3_test NO DELAY") diff --git a/tests/integration/test_mutations_hardlinks/configs/wide_parts_only.xml b/tests/integration/test_mutations_hardlinks/configs/wide_parts_only.xml new file mode 100644 index 00000000000..42e2173f718 --- /dev/null +++ b/tests/integration/test_mutations_hardlinks/configs/wide_parts_only.xml @@ -0,0 +1,6 @@ + + + 0 + 0 + + diff --git a/tests/integration/test_mutations_hardlinks/test.py b/tests/integration/test_mutations_hardlinks/test.py index 56852f572ff..4e70e76bc63 100644 --- a/tests/integration/test_mutations_hardlinks/test.py +++ b/tests/integration/test_mutations_hardlinks/test.py @@ -9,7 +9,7 @@ from multiprocessing.dummy import Pool cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1') +node1 = cluster.add_instance('node1', main_configs=['configs/wide_parts_only.xml']) @pytest.fixture(scope="module") def started_cluster(): From f725f8deee7fb8d695e1e3282a8b830a95ccf6ed Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Tue, 15 Sep 2020 02:14:14 +0300 Subject: [PATCH 09/11] fix more tests --- .../test_backward_compatibility/configs/wide_parts_only.xml | 5 +++++ tests/integration/test_backward_compatibility/test.py | 4 ++-- .../0_stateless/00804_test_alter_compression_codecs.sql | 2 +- .../0_stateless/00926_adaptive_index_granularity_pk.sql | 2 +- 4 files changed, 9 insertions(+), 4 deletions(-) create mode 100644 tests/integration/test_backward_compatibility/configs/wide_parts_only.xml diff --git a/tests/integration/test_backward_compatibility/configs/wide_parts_only.xml b/tests/integration/test_backward_compatibility/configs/wide_parts_only.xml new file mode 100644 index 00000000000..b240c0fcb2a --- /dev/null +++ b/tests/integration/test_backward_compatibility/configs/wide_parts_only.xml @@ -0,0 +1,5 @@ + + + 0 + + diff --git a/tests/integration/test_backward_compatibility/test.py b/tests/integration/test_backward_compatibility/test.py index 5b51823d361..cef70add3d0 100644 --- a/tests/integration/test_backward_compatibility/test.py +++ b/tests/integration/test_backward_compatibility/test.py @@ -5,7 +5,7 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance('node1', with_zookeeper=True, image='yandex/clickhouse-server', tag='19.17.8.54', stay_alive=True, with_installed_binary=True) -node2 = cluster.add_instance('node2', with_zookeeper=True) +node2 = cluster.add_instance('node2', main_configs=['configs/wide_parts_only.xml'], with_zookeeper=True) @pytest.fixture(scope="module") def start_cluster(): @@ -24,7 +24,7 @@ def start_cluster(): cluster.shutdown() -def test_backward_compatability(start_cluster): +def test_backward_compatability1(start_cluster): node2.query("INSERT INTO t VALUES (today(), 1)") node1.query("SYSTEM SYNC REPLICA t", timeout=10) diff --git a/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql b/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql index a9e6c12735c..4710694baf5 100644 --- a/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql +++ b/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql @@ -63,7 +63,7 @@ CREATE TABLE large_alter_table_00804 ( somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12)), id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC), data String CODEC(ZSTD(2), LZ4HC, NONE, LZ4, LZ4) -) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS index_granularity = 2; +) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS index_granularity = 2 SETTINGS min_bytes_for_wide_part = 0; INSERT INTO large_alter_table_00804 SELECT toDate('2019-01-01'), number, toString(number + rand()) FROM system.numbers LIMIT 300000; diff --git a/tests/queries/0_stateless/00926_adaptive_index_granularity_pk.sql b/tests/queries/0_stateless/00926_adaptive_index_granularity_pk.sql index b71c0640bd5..fe434845c29 100644 --- a/tests/queries/0_stateless/00926_adaptive_index_granularity_pk.sql +++ b/tests/queries/0_stateless/00926_adaptive_index_granularity_pk.sql @@ -62,7 +62,7 @@ CREATE TABLE large_alter_table_00926 ( somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12)), id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC), data String CODEC(ZSTD(2), LZ4HC, NONE, LZ4, LZ4) -) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS index_granularity_bytes=40, min_index_granularity_bytes=30, write_final_mark = 0; +) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS min_index_granularity_bytes=30, write_final_mark = 0, min_bytes_for_wide_part = '10M'; INSERT INTO large_alter_table_00926 SELECT toDate('2019-01-01'), number, toString(number + rand()) FROM system.numbers LIMIT 300000; From 63db2ca68d9797d5c6f5242350d55fdff6c88f05 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Tue, 15 Sep 2020 15:30:07 +0300 Subject: [PATCH 10/11] fix test --- .../queries/0_stateless/00804_test_alter_compression_codecs.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql b/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql index 4710694baf5..2a1b9e55b9a 100644 --- a/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql +++ b/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql @@ -63,7 +63,7 @@ CREATE TABLE large_alter_table_00804 ( somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12)), id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC), data String CODEC(ZSTD(2), LZ4HC, NONE, LZ4, LZ4) -) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS index_granularity = 2 SETTINGS min_bytes_for_wide_part = 0; +) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS index_granularity = 2, min_bytes_for_wide_part = 0; INSERT INTO large_alter_table_00804 SELECT toDate('2019-01-01'), number, toString(number + rand()) FROM system.numbers LIMIT 300000; From d1e90e0e4bbb3d273ae06554991a36613e3844cd Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Fri, 18 Sep 2020 03:05:51 +0300 Subject: [PATCH 11/11] fix tests --- .../test_adaptive_granularity/test.py | 14 ----------- tests/integration/test_ttl_replicated/test.py | 25 +++---------------- 2 files changed, 3 insertions(+), 36 deletions(-) diff --git a/tests/integration/test_adaptive_granularity/test.py b/tests/integration/test_adaptive_granularity/test.py index 524c8215081..ec3169bb995 100644 --- a/tests/integration/test_adaptive_granularity/test.py +++ b/tests/integration/test_adaptive_granularity/test.py @@ -274,7 +274,6 @@ def test_mixed_granularity_single_node(start_dynamic_cluster, node): "INSERT INTO table_with_default_granularity VALUES (toDate('2018-09-01'), 1, 333), (toDate('2018-09-02'), 2, 444)") def callback(n): -<<<<<<< HEAD new_config = """ 1 @@ -283,12 +282,6 @@ def test_mixed_granularity_single_node(start_dynamic_cluster, node): n.replace_config("/etc/clickhouse-server/merge_tree_settings.xml", new_config) n.replace_config("/etc/clickhouse-server/config.d/merge_tree_settings.xml", new_config) -======= - n.replace_config("/etc/clickhouse-server/merge_tree_settings.xml", - "1") - n.replace_config("/etc/clickhouse-server/config.d/merge_tree_settings.xml", - "1") ->>>>>>> upstream/master node.restart_with_latest_version(callback_onstop=callback) node.query("SYSTEM RELOAD CONFIG") @@ -331,7 +324,6 @@ def test_version_update_two_nodes(start_dynamic_cluster): assert node12.query("SELECT COUNT() FROM table_with_default_granularity") == '2\n' def callback(n): -<<<<<<< HEAD new_config = """ 0 @@ -340,12 +332,6 @@ def test_version_update_two_nodes(start_dynamic_cluster): n.replace_config("/etc/clickhouse-server/merge_tree_settings.xml", new_config) n.replace_config("/etc/clickhouse-server/config.d/merge_tree_settings.xml", new_config) -======= - n.replace_config("/etc/clickhouse-server/merge_tree_settings.xml", - "0") - n.replace_config("/etc/clickhouse-server/config.d/merge_tree_settings.xml", - "0") ->>>>>>> upstream/master node12.restart_with_latest_version(callback_onstop=callback) diff --git a/tests/integration/test_ttl_replicated/test.py b/tests/integration/test_ttl_replicated/test.py index b8832126b0b..878db2da11f 100644 --- a/tests/integration/test_ttl_replicated/test.py +++ b/tests/integration/test_ttl_replicated/test.py @@ -55,20 +55,12 @@ def test_merge_with_ttl_timeout(started_cluster): drop_table([node1, node2], table) for node in [node1, node2]: node.query( -<<<<<<< HEAD - ''' - CREATE TABLE {table}(date DateTime, id UInt32, a Int32 TTL date + INTERVAL 1 DAY, b Int32 TTL date + INTERVAL 1 MONTH) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{table}', '{replica}') - ORDER BY id PARTITION BY toDayOfMonth(date) - SETTINGS min_bytes_for_wide_part=0; - '''.format(replica=node.name, table=table)) -======= ''' CREATE TABLE {table}(date DateTime, id UInt32, a Int32 TTL date + INTERVAL 1 DAY, b Int32 TTL date + INTERVAL 1 MONTH) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{table}', '{replica}') - ORDER BY id PARTITION BY toDayOfMonth(date); + ORDER BY id PARTITION BY toDayOfMonth(date) + SETTINGS min_bytes_for_wide_part=0; '''.format(replica=node.name, table=table)) ->>>>>>> upstream/master node1.query("SYSTEM STOP TTL MERGES {table}".format(table=table)) node2.query("SYSTEM STOP TTL MERGES {table}".format(table=table)) @@ -256,17 +248,6 @@ limitations under the License.""" time.sleep(0.5) node1.query( -<<<<<<< HEAD - """ - CREATE TABLE {name} ( - s1 String, - d1 DateTime - ) ENGINE = {engine} - ORDER BY tuple() - TTL d1 + INTERVAL 1 DAY DELETE - SETTINGS min_bytes_for_wide_part=0 - """.format(name=name, engine=engine)) -======= """ CREATE TABLE {name} ( s1 String, @@ -274,8 +255,8 @@ limitations under the License.""" ) ENGINE = {engine} ORDER BY tuple() TTL d1 + INTERVAL 1 DAY DELETE + SETTINGS min_bytes_for_wide_part=0 """.format(name=name, engine=engine)) ->>>>>>> upstream/master node1.query("""ALTER TABLE {name} MODIFY COLUMN s1 String TTL d1 + INTERVAL 1 SECOND""".format(name=name)) node1.query("""ALTER TABLE {name} ADD COLUMN b1 Int32""".format(name=name))