diff --git a/tests/queries/0_stateless/02915_move_partition_inactive_replica.sql b/tests/queries/0_stateless/02915_move_partition_inactive_replica.sql index 3b30a2b6c2c..b679a0c24e8 100644 --- a/tests/queries/0_stateless/02915_move_partition_inactive_replica.sql +++ b/tests/queries/0_stateless/02915_move_partition_inactive_replica.sql @@ -10,11 +10,11 @@ drop table if exists shard_1.from_1; drop table if exists shard_0.to; drop table if exists shard_1.to; -create table shard_0.from_0 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_0_' || currentDatabase(), '0') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1; -create table shard_1.from_0 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_0_' || currentDatabase(), '1') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1; +create table shard_0.from_0 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_0_' || currentDatabase(), '0') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1, min_bytes_for_wide_part=0, min_bytes_for_full_part_storage=0, ratio_of_defaults_for_sparse_serialization=0, vertical_merge_algorithm_min_rows_to_activate=612278, vertical_merge_algorithm_min_columns_to_activate=1, allow_vertical_merges_from_compact_to_wide_parts=1; +create table shard_1.from_0 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_0_' || currentDatabase(), '1') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1, min_bytes_for_wide_part=0, min_bytes_for_full_part_storage=0, ratio_of_defaults_for_sparse_serialization=0, vertical_merge_algorithm_min_rows_to_activate=612278, vertical_merge_algorithm_min_columns_to_activate=1, allow_vertical_merges_from_compact_to_wide_parts=1; -create table shard_0.from_1 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_1_' || currentDatabase(), '0') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1; -create table shard_1.from_1 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_1_' || currentDatabase(), '1') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1; +create table shard_0.from_1 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_1_' || currentDatabase(), '0') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1, min_bytes_for_wide_part=0, min_bytes_for_full_part_storage=0, ratio_of_defaults_for_sparse_serialization=0, vertical_merge_algorithm_min_rows_to_activate=612278, vertical_merge_algorithm_min_columns_to_activate=1, allow_vertical_merges_from_compact_to_wide_parts=1; +create table shard_1.from_1 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_1_' || currentDatabase(), '1') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1, min_bytes_for_wide_part=0, min_bytes_for_full_part_storage=0, ratio_of_defaults_for_sparse_serialization=0, vertical_merge_algorithm_min_rows_to_activate=612278, vertical_merge_algorithm_min_columns_to_activate=1, allow_vertical_merges_from_compact_to_wide_parts=1; insert into shard_0.from_0 select number from numbers(10); insert into shard_0.from_0 select number + 10 from numbers(10); @@ -40,10 +40,17 @@ OPTIMIZE TABLE shard_0.from_0; OPTIMIZE TABLE shard_1.from_0; OPTIMIZE TABLE shard_0.from_1; OPTIMIZE TABLE shard_1.from_1; + OPTIMIZE TABLE shard_0.to; +-- If moved parts are not merged by OPTIMIZE or background merge restart +-- can log Warning about metadata version on disk. It's normal situation +-- and test shouldn't rarely fail because of it. +set send_logs_level = 'Error'; + system restart replica shard_0.to; +-- Doesn't lead to test flakyness, because we don't check anything after it select sleep(2); attach table shard_1.to; @@ -54,4 +61,3 @@ drop table if exists shard_0.from_1; drop table if exists shard_1.from_1; drop table if exists shard_0.to; drop table if exists shard_1.to; - diff --git a/tests/queries/0_stateless/02916_move_partition_inactive_replica.reference b/tests/queries/0_stateless/02916_another_move_partition_inactive_replica.reference similarity index 100% rename from tests/queries/0_stateless/02916_move_partition_inactive_replica.reference rename to tests/queries/0_stateless/02916_another_move_partition_inactive_replica.reference diff --git a/tests/queries/0_stateless/02916_move_partition_inactive_replica.sql b/tests/queries/0_stateless/02916_another_move_partition_inactive_replica.sql similarity index 87% rename from tests/queries/0_stateless/02916_move_partition_inactive_replica.sql rename to tests/queries/0_stateless/02916_another_move_partition_inactive_replica.sql index b0699539ac1..1b23d9f2815 100644 --- a/tests/queries/0_stateless/02916_move_partition_inactive_replica.sql +++ b/tests/queries/0_stateless/02916_another_move_partition_inactive_replica.sql @@ -35,8 +35,15 @@ OPTIMIZE TABLE shard_0.to; OPTIMIZE TABLE shard_0.to; select name, active from system.parts where database='shard_0' and table='to' and active order by name; +-- If moved parts are not merged by OPTIMIZE or background merge restart +-- can log Warning about metadata version on disk. It's normal situation +-- and test shouldn't rarely fail because of it. +set send_logs_level = 'Error'; + system restart replica shard_0.to; +-- Doesn't lead to test flakyness, because we don't check content in table +-- which doesn't depend on any background operation select sleep(3); attach table shard_1.to;