From ba05bb1b398f9b407e148182850a35621d8d8a29 Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 15 Jul 2020 14:47:51 +0300 Subject: [PATCH] Better tests for parallel run --- docker/test/fasttest/run.sh | 2 +- .../0_stateless/00626_replace_partition_from_table.sql | 2 +- .../00653_verification_monotonic_data_load.sh | 10 +++++++--- .../00738_nested_merge_multidimensional_array.sql | 4 ++-- .../0_stateless/00988_parallel_parts_removal.sql | 4 ++-- .../0_stateless/00989_parallel_parts_loading.sql | 4 ++-- ..._incremental_streaming_from_2_src_with_feedback.sql | 6 +++--- .../0_stateless/01213_alter_rename_column_zookeeper.sh | 4 ++-- .../0_stateless/01282_system_parts_ttl_info.sql | 4 ++-- .../01373_summing_merge_tree_exclude_partition_key.sql | 4 ++-- 10 files changed, 24 insertions(+), 20 deletions(-) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index eb4b6ad3ff0..fa82c071aaf 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -90,7 +90,7 @@ do sleep 0.1 done -TESTS_TO_SKIP="parquet avro h3 odbc mysql sha256 _orc_ arrow 01098_temporary_and_external_tables 01083_expressions_in_engine_arguments hdfs 00911_tautological_compare protobuf capnproto java_hash hashing secure 00490_special_line_separators_and_characters_outside_of_bmp 00436_convert_charset 00105_shard_collations 01354_order_by_tuple_collate_const 01292_create_user 01098_msgpack_format 00929_multi_match_edit_distance 00926_multimatch 00834_cancel_http_readonly_queries_on_client_close brotli parallel_alter" +TESTS_TO_SKIP="parquet avro h3 odbc mysql sha256 _orc_ arrow 01098_temporary_and_external_tables 01083_expressions_in_engine_arguments hdfs 00911_tautological_compare protobuf capnproto java_hash hashing secure 00490_special_line_separators_and_characters_outside_of_bmp 00436_convert_charset 00105_shard_collations 01354_order_by_tuple_collate_const 01292_create_user 01098_msgpack_format 00929_multi_match_edit_distance 00926_multimatch 00834_cancel_http_readonly_queries_on_client_close brotli parallel_alter 00302_http_compression 00417_kill_query 01294_lazy_database_concurrent" clickhouse-test -j 4 --no-long --testname --shard --zookeeper --skip $TESTS_TO_SKIP 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/test_log.txt diff --git a/tests/queries/0_stateless/00626_replace_partition_from_table.sql b/tests/queries/0_stateless/00626_replace_partition_from_table.sql index c6479a94060..7224224334e 100644 --- a/tests/queries/0_stateless/00626_replace_partition_from_table.sql +++ b/tests/queries/0_stateless/00626_replace_partition_from_table.sql @@ -62,7 +62,7 @@ SELECT count(), sum(d) FROM dst; SELECT 'OPTIMIZE'; SELECT count(), sum(d), uniqExact(_part) FROM dst; -SYSTEM START MERGES; +SYSTEM START MERGES dst; SET optimize_throw_if_noop=1; OPTIMIZE TABLE dst; SELECT count(), sum(d), uniqExact(_part) FROM dst; diff --git a/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh b/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh index e52610f03ba..bb248b5f4e1 100755 --- a/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh +++ b/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh @@ -25,9 +25,15 @@ ${CLICKHOUSE_CLIENT} --query="CREATE TABLE fixed_string_test_table (val FixedStr ${CLICKHOUSE_CLIENT} --query="CREATE TABLE signed_integer_test_table (val Int32) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0;" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE unsigned_integer_test_table (val UInt32) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0;" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE enum_test_table (val Enum16('hello' = 1, 'world' = 2, 'yandex' = 256, 'clickhouse' = 257)) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0;" + ${CLICKHOUSE_CLIENT} --query="CREATE TABLE date_test_table (val Date) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0;" -${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES;" +${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES string_test_table;" +${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES fixed_string_test_table;" +${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES signed_integer_test_table;" +${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES unsigned_integer_test_table;" +${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES enum_test_table;" +${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES date_test_table;" ${CLICKHOUSE_CLIENT} --query="INSERT INTO string_test_table VALUES ('0'), ('2'), ('2');" ${CLICKHOUSE_CLIENT} --query="INSERT INTO fixed_string_test_table VALUES ('0'), ('2'), ('2');" @@ -80,5 +86,3 @@ ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS signed_integer_test_table;" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS unsigned_integer_test_table;" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS enum_test_table;" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS date_test_table;" - -${CLICKHOUSE_CLIENT} --query="SYSTEM START MERGES;" diff --git a/tests/queries/0_stateless/00738_nested_merge_multidimensional_array.sql b/tests/queries/0_stateless/00738_nested_merge_multidimensional_array.sql index f9ccd9623e1..6efeb2e6ef5 100644 --- a/tests/queries/0_stateless/00738_nested_merge_multidimensional_array.sql +++ b/tests/queries/0_stateless/00738_nested_merge_multidimensional_array.sql @@ -1,13 +1,13 @@ DROP TABLE IF EXISTS sites; CREATE TABLE sites (Domain UInt8, `Users.UserID` Array(UInt64), `Users.Dates` Array(Array(Date))) ENGINE = MergeTree ORDER BY Domain SETTINGS vertical_merge_algorithm_min_rows_to_activate = 0, vertical_merge_algorithm_min_columns_to_activate = 0; -SYSTEM STOP MERGES; +SYSTEM STOP MERGES sites; INSERT INTO sites VALUES (1,[1],[[]]); INSERT INTO sites VALUES (2,[1],[['2018-06-22']]); SELECT count(), countArray(Users.Dates), countArrayArray(Users.Dates) FROM sites; -SYSTEM START MERGES; +SYSTEM START MERGES sites; OPTIMIZE TABLE sites FINAL; SELECT count(), countArray(Users.Dates), countArrayArray(Users.Dates) FROM sites; diff --git a/tests/queries/0_stateless/00988_parallel_parts_removal.sql b/tests/queries/0_stateless/00988_parallel_parts_removal.sql index 0dccd3df048..bff9bbe6d8d 100644 --- a/tests/queries/0_stateless/00988_parallel_parts_removal.sql +++ b/tests/queries/0_stateless/00988_parallel_parts_removal.sql @@ -2,7 +2,7 @@ DROP TABLE IF EXISTS mt; CREATE TABLE mt (x UInt64) ENGINE = MergeTree ORDER BY x SETTINGS max_part_removal_threads = 16, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, old_parts_lifetime = 1, parts_to_delay_insert = 100000, parts_to_throw_insert = 100000; -SYSTEM STOP MERGES; +SYSTEM STOP MERGES mt; SET max_block_size = 1, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; INSERT INTO mt SELECT * FROM numbers(1000); @@ -10,7 +10,7 @@ SET max_block_size = 65536; SELECT count(), sum(x) FROM mt; -SYSTEM START MERGES; +SYSTEM START MERGES mt; OPTIMIZE TABLE mt FINAL; SELECT count(), sum(x) FROM mt; diff --git a/tests/queries/0_stateless/00989_parallel_parts_loading.sql b/tests/queries/0_stateless/00989_parallel_parts_loading.sql index 5e0011483b3..0b4c0501669 100644 --- a/tests/queries/0_stateless/00989_parallel_parts_loading.sql +++ b/tests/queries/0_stateless/00989_parallel_parts_loading.sql @@ -2,7 +2,7 @@ DROP TABLE IF EXISTS mt; CREATE TABLE mt (x UInt64) ENGINE = MergeTree ORDER BY x SETTINGS max_part_loading_threads = 16, parts_to_delay_insert = 100000, parts_to_throw_insert = 100000; -SYSTEM STOP MERGES; +SYSTEM STOP MERGES mt; SET max_block_size = 1, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; INSERT INTO mt SELECT * FROM numbers(1000); @@ -15,5 +15,5 @@ ATTACH TABLE mt; SELECT count(), sum(x) FROM mt; -SYSTEM START MERGES; +SYSTEM START MERGES mt; DROP TABLE mt; diff --git a/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql b/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql index c2d0333bf46..cdefdd9de8a 100644 --- a/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql +++ b/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql @@ -76,7 +76,9 @@ AS GROUP BY id; -- This query has effect only for existing tables, so it must be located after CREATE. -SYSTEM STOP MERGES; +SYSTEM STOP MERGES target_table; +SYSTEM STOP MERGES checkouts; +SYSTEM STOP MERGES logins; -- feed with some initial values INSERT INTO logins SELECT number as id, '2000-01-01 08:00:00' from numbers(50000); @@ -126,5 +128,3 @@ DROP TABLE IF EXISTS mv_logins2target; DROP TABLE IF EXISTS checkouts; DROP TABLE IF EXISTS mv_checkouts2target; DROP TABLE target_table; - -SYSTEM START MERGES; diff --git a/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sh b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sh index d7b8ea3262d..009b400ee7b 100755 --- a/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sh +++ b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sh @@ -24,7 +24,7 @@ $CLICKHOUSE_CLIENT --query "INSERT INTO table_for_rename_replicated SELECT toDat $CLICKHOUSE_CLIENT --query "SELECT value1 FROM table_for_rename_replicated WHERE key = 1;" -$CLICKHOUSE_CLIENT --query "SYSTEM STOP MERGES;" +$CLICKHOUSE_CLIENT --query "SYSTEM STOP MERGES table_for_rename_replicated;" $CLICKHOUSE_CLIENT --query "SHOW CREATE TABLE table_for_rename_replicated;" @@ -49,7 +49,7 @@ $CLICKHOUSE_CLIENT --query "SELECT renamed_value1 FROM table_for_rename_replicat $CLICKHOUSE_CLIENT --query "SELECT * FROM table_for_rename_replicated WHERE key = 1 FORMAT TSVWithNames;" -$CLICKHOUSE_CLIENT --query "SYSTEM START MERGES;" +$CLICKHOUSE_CLIENT --query "SYSTEM START MERGES table_for_rename_replicated;" $CLICKHOUSE_CLIENT --query "SYSTEM SYNC REPLICA table_for_rename_replicated;" diff --git a/tests/queries/0_stateless/01282_system_parts_ttl_info.sql b/tests/queries/0_stateless/01282_system_parts_ttl_info.sql index 3a1b1cc79ce..0caf64bac8d 100644 --- a/tests/queries/0_stateless/01282_system_parts_ttl_info.sql +++ b/tests/queries/0_stateless/01282_system_parts_ttl_info.sql @@ -1,9 +1,9 @@ DROP TABLE IF EXISTS ttl; CREATE TABLE ttl (d DateTime) ENGINE = MergeTree ORDER BY tuple() TTL d + INTERVAL 10 DAY; -SYSTEM STOP MERGES; +SYSTEM STOP MERGES ttl; INSERT INTO ttl VALUES ('2000-01-01 01:02:03'), ('2000-02-03 04:05:06'); SELECT rows, delete_ttl_info_min, delete_ttl_info_max, move_ttl_info.expression, move_ttl_info.min, move_ttl_info.max FROM system.parts WHERE database = currentDatabase() AND table = 'ttl'; -SYSTEM START MERGES; +SYSTEM START MERGES ttl; OPTIMIZE TABLE ttl FINAL; SELECT rows, delete_ttl_info_min, delete_ttl_info_max, move_ttl_info.expression, move_ttl_info.min, move_ttl_info.max FROM system.parts WHERE database = currentDatabase() AND table = 'ttl' AND active; DROP TABLE ttl; diff --git a/tests/queries/0_stateless/01373_summing_merge_tree_exclude_partition_key.sql b/tests/queries/0_stateless/01373_summing_merge_tree_exclude_partition_key.sql index 60c988a2e2f..790fbca6b73 100644 --- a/tests/queries/0_stateless/01373_summing_merge_tree_exclude_partition_key.sql +++ b/tests/queries/0_stateless/01373_summing_merge_tree_exclude_partition_key.sql @@ -4,7 +4,7 @@ CREATE TABLE tt_01373 (a Int64, d Int64, val Int64) ENGINE = SummingMergeTree PARTITION BY (a) ORDER BY (d); -SYSTEM STOP MERGES; +SYSTEM STOP MERGES tt_01373; INSERT INTO tt_01373 SELECT number%13, number%17, 1 from numbers(1000000); @@ -17,7 +17,7 @@ SELECT count(*) FROM tt_01373 FINAL; SELECT '---'; SELECT a, count() FROM tt_01373 FINAL GROUP BY a ORDER BY a; -SYSTEM START MERGES; +SYSTEM START MERGES tt_01373; OPTIMIZE TABLE tt_01373 FINAL; SELECT '---';