From 02b7c2fe9095d63a966671b9e5bedb38e792f02f Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Tue, 25 Apr 2023 22:22:47 +0200 Subject: [PATCH 1/3] clearing s3 between tests in a robust way --- tests/integration/test_merge_tree_s3/test.py | 120 ++++++------------- 1 file changed, 39 insertions(+), 81 deletions(-) diff --git a/tests/integration/test_merge_tree_s3/test.py b/tests/integration/test_merge_tree_s3/test.py index 76430a42e27..5e0445636a1 100644 --- a/tests/integration/test_merge_tree_s3/test.py +++ b/tests/integration/test_merge_tree_s3/test.py @@ -101,44 +101,34 @@ def run_s3_mocks(cluster): ) -def list_objects(cluster, path="data/"): +def list_objects(cluster, path="data/", hint="list_objects"): minio = cluster.minio_client objects = list(minio.list_objects(cluster.minio_bucket, path, recursive=True)) - logging.info(f"list_objects ({len(objects)}): {[x.object_name for x in objects]}") + logging.info(f"{hint} ({len(objects)}): {[x.object_name for x in objects]}") return objects def wait_for_delete_s3_objects(cluster, expected, timeout=30): - minio = cluster.minio_client while timeout > 0: - if ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) - == expected - ): + if len(list_objects(cluster, "data/")) == expected: return timeout -= 1 time.sleep(1) - assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) - == expected - ) + assert len(list_objects(cluster, "data/")) == expected -@pytest.fixture(autouse=True) -@pytest.mark.parametrize("node_name", ["node"]) -def drop_table(cluster, node_name): +@pytest.fixture(autouse=True, scope="function") +def clear_minio(cluster): + # CH do some writes to the S3 at start. For example, file data/clickhouse_access_check_{server_uuid}. + # Set the timeout there as 10 sec in order to resolve the race with that file exists. + wait_for_delete_s3_objects(cluster, 0, timeout=10) + yield - node = cluster.instances[node_name] + + # Remove extra objects to prevent tests cascade failing minio = cluster.minio_client - - node.query("DROP TABLE IF EXISTS s3_test NO DELAY") - - try: - wait_for_delete_s3_objects(cluster, 0) - finally: - # Remove extra objects to prevent tests cascade failing - for obj in list_objects(cluster, "data/"): - minio.remove_object(cluster.minio_bucket, obj.object_name) + for obj in list_objects(cluster, "data/"): + minio.remove_object(cluster.minio_bucket, obj.object_name) @pytest.mark.parametrize( @@ -158,10 +148,7 @@ def test_simple_insert_select( values1 = generate_values("2020-01-03", 4096) node.query("INSERT INTO s3_test VALUES {}".format(values1)) assert node.query("SELECT * FROM s3_test order by dt, id FORMAT Values") == values1 - assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) - == FILES_OVERHEAD + files_per_part - ) + assert len(list_objects(cluster, "data/")) == FILES_OVERHEAD + files_per_part values2 = generate_values("2020-01-04", 4096) node.query("INSERT INTO s3_test VALUES {}".format(values2)) @@ -169,10 +156,7 @@ def test_simple_insert_select( node.query("SELECT * FROM s3_test ORDER BY dt, id FORMAT Values") == values1 + "," + values2 ) - assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) - == FILES_OVERHEAD + files_per_part * 2 - ) + assert len(list_objects(cluster, "data/")) == FILES_OVERHEAD + files_per_part * 2 assert ( node.query("SELECT count(*) FROM s3_test where id = 1 FORMAT Values") == "(2)" @@ -214,7 +198,7 @@ def test_insert_same_partition_and_merge(cluster, merge_vertical, node_name): node.query("SELECT count(distinct(id)) FROM s3_test FORMAT Values") == "(8192)" ) assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) + len(list_objects(cluster, "data/")) == FILES_OVERHEAD_PER_PART_WIDE * 6 + FILES_OVERHEAD ) @@ -292,7 +276,6 @@ def test_alter_table_columns(cluster, node_name): def test_attach_detach_partition(cluster, node_name): node = cluster.instances[node_name] create_table(node, "s3_test") - minio = cluster.minio_client node.query( "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096)) @@ -360,7 +343,6 @@ def test_attach_detach_partition(cluster, node_name): def test_move_partition_to_another_disk(cluster, node_name): node = cluster.instances[node_name] create_table(node, "s3_test") - minio = cluster.minio_client node.query( "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096)) @@ -370,21 +352,21 @@ def test_move_partition_to_another_disk(cluster, node_name): ) assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)" assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) + len(list_objects(cluster, "data/")) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 ) node.query("ALTER TABLE s3_test MOVE PARTITION '2020-01-04' TO DISK 'hdd'") assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)" assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) + len(list_objects(cluster, "data/")) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE ) node.query("ALTER TABLE s3_test MOVE PARTITION '2020-01-04' TO DISK 's3'") assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)" assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) + len(list_objects(cluster, "data/")) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 ) @@ -393,7 +375,6 @@ def test_move_partition_to_another_disk(cluster, node_name): def test_table_manipulations(cluster, node_name): node = cluster.instances[node_name] create_table(node, "s3_test") - minio = cluster.minio_client node.query( "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096)) @@ -405,9 +386,10 @@ def test_table_manipulations(cluster, node_name): node.query("RENAME TABLE s3_test TO s3_renamed") assert node.query("SELECT count(*) FROM s3_renamed FORMAT Values") == "(8192)" assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) + len(list_objects(cluster, "data/")) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 ) + node.query("RENAME TABLE s3_renamed TO s3_test") assert node.query("CHECK TABLE s3_test FORMAT Values") == "(1)" @@ -416,7 +398,7 @@ def test_table_manipulations(cluster, node_name): node.query("ATTACH TABLE s3_test") assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)" assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) + len(list_objects(cluster, "data/")) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 ) @@ -424,17 +406,13 @@ def test_table_manipulations(cluster, node_name): wait_for_delete_empty_parts(node, "s3_test") wait_for_delete_inactive_parts(node, "s3_test") assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(0)" - assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) - == FILES_OVERHEAD - ) + assert len(list_objects(cluster, "data/")) == FILES_OVERHEAD @pytest.mark.parametrize("node_name", ["node"]) def test_move_replace_partition_to_another_table(cluster, node_name): node = cluster.instances[node_name] create_table(node, "s3_test") - minio = cluster.minio_client node.query( "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096)) @@ -451,12 +429,10 @@ def test_move_replace_partition_to_another_table(cluster, node_name): assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)" assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)" - s3_objects = list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)) - for obj in s3_objects: - print("Object at start", obj.object_name) - - assert len(s3_objects) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4 - + assert ( + len(list_objects(cluster, "data/", "Objects at start")) + == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4 + ) create_table(node, "s3_clone") node.query("ALTER TABLE s3_test MOVE PARTITION '2020-01-03' TO TABLE s3_clone") @@ -465,10 +441,8 @@ def test_move_replace_partition_to_another_table(cluster, node_name): assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)" assert node.query("SELECT sum(id) FROM s3_clone FORMAT Values") == "(0)" assert node.query("SELECT count(*) FROM s3_clone FORMAT Values") == "(8192)" - s3_objects = list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)) - for obj in s3_objects: - print("Object after move partition", obj.object_name) + list_objects(cluster, "data/", "Object after move partition") # Number of objects in S3 should be unchanged. wait_for_delete_s3_objects( cluster, @@ -486,10 +460,8 @@ def test_move_replace_partition_to_another_table(cluster, node_name): ) assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)" assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)" - s3_objects = list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)) - for obj in s3_objects: - print("Object after insert", obj.object_name) + list_objects(cluster, "data/", "Object after insert") wait_for_delete_s3_objects( cluster, FILES_OVERHEAD * 2 @@ -515,12 +487,8 @@ def test_move_replace_partition_to_another_table(cluster, node_name): node.query("DROP TABLE s3_clone NO DELAY") assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)" assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)" - s3_objects = list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)) - for obj in s3_objects: - print("Object after drop", obj.object_name) - - # Data should remain in S3 + list_objects(cluster, "data/", "Object after drop") wait_for_delete_s3_objects( cluster, FILES_OVERHEAD @@ -530,10 +498,7 @@ def test_move_replace_partition_to_another_table(cluster, node_name): node.query("ALTER TABLE s3_test FREEZE") # Number S3 objects should be unchanged. - s3_objects = list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)) - for obj in s3_objects: - print("Object after freeze", obj.object_name) - + list_objects(cluster, "data/", "Object after freeze") wait_for_delete_s3_objects( cluster, FILES_OVERHEAD @@ -548,7 +513,8 @@ def test_move_replace_partition_to_another_table(cluster, node_name): cluster, FILES_OVERHEAD_PER_PART_WIDE * 4 - FILES_OVERHEAD_METADATA_VERSION * 4 ) - for obj in list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)): + minio = cluster.minio_client + for obj in list_objects(cluster, "data/"): minio.remove_object(cluster.minio_bucket, obj.object_name) @@ -556,7 +522,6 @@ def test_move_replace_partition_to_another_table(cluster, node_name): def test_freeze_unfreeze(cluster, node_name): node = cluster.instances[node_name] create_table(node, "s3_test") - minio = cluster.minio_client node.query( "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096)) @@ -571,7 +536,7 @@ def test_freeze_unfreeze(cluster, node_name): wait_for_delete_empty_parts(node, "s3_test") wait_for_delete_inactive_parts(node, "s3_test") assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) + len(list_objects(cluster, "data/")) == FILES_OVERHEAD + (FILES_OVERHEAD_PER_PART_WIDE - FILES_OVERHEAD_METADATA_VERSION) * 2 ) @@ -586,10 +551,7 @@ def test_freeze_unfreeze(cluster, node_name): wait_for_delete_s3_objects(cluster, FILES_OVERHEAD) # Data should be removed from S3. - assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) - == FILES_OVERHEAD - ) + assert len(list_objects(cluster, "data/")) == FILES_OVERHEAD @pytest.mark.parametrize("node_name", ["node"]) @@ -597,7 +559,6 @@ def test_freeze_system_unfreeze(cluster, node_name): node = cluster.instances[node_name] create_table(node, "s3_test") create_table(node, "s3_test_removed") - minio = cluster.minio_client node.query( "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-04", 4096)) @@ -613,7 +574,7 @@ def test_freeze_system_unfreeze(cluster, node_name): wait_for_delete_inactive_parts(node, "s3_test") node.query("DROP TABLE s3_test_removed NO DELAY") assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) + len(list_objects(cluster, "data/")) == FILES_OVERHEAD + (FILES_OVERHEAD_PER_PART_WIDE - FILES_OVERHEAD_METADATA_VERSION) * 2 ) @@ -624,10 +585,7 @@ def test_freeze_system_unfreeze(cluster, node_name): wait_for_delete_s3_objects(cluster, FILES_OVERHEAD) # Data should be removed from S3. - assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) - == FILES_OVERHEAD - ) + assert len(list_objects(cluster, "data/")) == FILES_OVERHEAD @pytest.mark.parametrize("node_name", ["node"]) @@ -710,7 +668,7 @@ def test_lazy_seek_optimization_for_async_read(cluster, node_name): node.query("SELECT * FROM s3_test WHERE value LIKE '%abc%' ORDER BY value LIMIT 10") node.query("DROP TABLE IF EXISTS s3_test NO DELAY") minio = cluster.minio_client - for obj in list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)): + for obj in list_objects(cluster, "data/"): minio.remove_object(cluster.minio_bucket, obj.object_name) From e3647571f237d29c9f47c2db08a74068cb109d17 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Wed, 26 Apr 2023 14:24:35 +0200 Subject: [PATCH 2/3] explicit drop table for tests --- tests/integration/test_merge_tree_s3/test.py | 69 +++++++++++++------- 1 file changed, 47 insertions(+), 22 deletions(-) diff --git a/tests/integration/test_merge_tree_s3/test.py b/tests/integration/test_merge_tree_s3/test.py index 5e0445636a1..cb1848a88fb 100644 --- a/tests/integration/test_merge_tree_s3/test.py +++ b/tests/integration/test_merge_tree_s3/test.py @@ -117,20 +117,31 @@ def wait_for_delete_s3_objects(cluster, expected, timeout=30): assert len(list_objects(cluster, "data/")) == expected -@pytest.fixture(autouse=True, scope="function") -def clear_minio(cluster): - # CH do some writes to the S3 at start. For example, file data/clickhouse_access_check_{server_uuid}. - # Set the timeout there as 10 sec in order to resolve the race with that file exists. - wait_for_delete_s3_objects(cluster, 0, timeout=10) - - yield - - # Remove extra objects to prevent tests cascade failing +def remove_all_s3_objects(cluster): minio = cluster.minio_client for obj in list_objects(cluster, "data/"): minio.remove_object(cluster.minio_bucket, obj.object_name) +@pytest.fixture(autouse=True, scope="function") +def clear_minio(cluster): + try: + # CH do some writes to the S3 at start. For example, file data/clickhouse_access_check_{server_uuid}. + # Set the timeout there as 10 sec in order to resolve the race with that file exists. + wait_for_delete_s3_objects(cluster, 0, timeout=10) + except: + # Remove extra objects to prevent tests cascade failing + remove_all_s3_objects(cluster) + + yield + + +def check_no_objects_after_drop(cluster, table_name="s3_test", node_name="node"): + node = cluster.instances[node_name] + node.query(f"DROP TABLE IF EXISTS {table_name} NO DELAY") + wait_for_delete_s3_objects(cluster, 0, timeout=0) + + @pytest.mark.parametrize( "min_rows_for_wide_part,files_per_part,node_name", [ @@ -162,6 +173,8 @@ def test_simple_insert_select( node.query("SELECT count(*) FROM s3_test where id = 1 FORMAT Values") == "(2)" ) + check_no_objects_after_drop(cluster) + @pytest.mark.parametrize("merge_vertical,node_name", [(True, "node"), (False, "node")]) def test_insert_same_partition_and_merge(cluster, merge_vertical, node_name): @@ -172,7 +185,6 @@ def test_insert_same_partition_and_merge(cluster, merge_vertical, node_name): node = cluster.instances[node_name] create_table(node, "s3_test", **settings) - minio = cluster.minio_client node.query("SYSTEM STOP MERGES s3_test") node.query( @@ -226,6 +238,8 @@ def test_insert_same_partition_and_merge(cluster, merge_vertical, node_name): cluster, FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD, timeout=45 ) + check_no_objects_after_drop(cluster) + @pytest.mark.parametrize("node_name", ["node"]) def test_alter_table_columns(cluster, node_name): @@ -271,6 +285,8 @@ def test_alter_table_columns(cluster, node_name): cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + 2 ) + check_no_objects_after_drop(cluster) + @pytest.mark.parametrize("node_name", ["node"]) def test_attach_detach_partition(cluster, node_name): @@ -338,6 +354,8 @@ def test_attach_detach_partition(cluster, node_name): == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 0 ) + check_no_objects_after_drop(cluster) + @pytest.mark.parametrize("node_name", ["node"]) def test_move_partition_to_another_disk(cluster, node_name): @@ -370,6 +388,8 @@ def test_move_partition_to_another_disk(cluster, node_name): == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 ) + check_no_objects_after_drop(cluster) + @pytest.mark.parametrize("node_name", ["node"]) def test_table_manipulations(cluster, node_name): @@ -408,6 +428,8 @@ def test_table_manipulations(cluster, node_name): assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(0)" assert len(list_objects(cluster, "data/")) == FILES_OVERHEAD + check_no_objects_after_drop(cluster) + @pytest.mark.parametrize("node_name", ["node"]) def test_move_replace_partition_to_another_table(cluster, node_name): @@ -513,9 +535,7 @@ def test_move_replace_partition_to_another_table(cluster, node_name): cluster, FILES_OVERHEAD_PER_PART_WIDE * 4 - FILES_OVERHEAD_METADATA_VERSION * 4 ) - minio = cluster.minio_client - for obj in list_objects(cluster, "data/"): - minio.remove_object(cluster.minio_bucket, obj.object_name) + remove_all_s3_objects(cluster) @pytest.mark.parametrize("node_name", ["node"]) @@ -548,10 +568,10 @@ def test_freeze_unfreeze(cluster, node_name): # Unfreeze all partitions from backup2. node.query("ALTER TABLE s3_test UNFREEZE WITH NAME 'backup2'") + # Data should be removed from S3. wait_for_delete_s3_objects(cluster, FILES_OVERHEAD) - # Data should be removed from S3. - assert len(list_objects(cluster, "data/")) == FILES_OVERHEAD + check_no_objects_after_drop(cluster) @pytest.mark.parametrize("node_name", ["node"]) @@ -582,10 +602,10 @@ def test_freeze_system_unfreeze(cluster, node_name): # Unfreeze all data from backup3. node.query("SYSTEM UNFREEZE WITH NAME 'backup3'") + # Data should be removed from S3. wait_for_delete_s3_objects(cluster, FILES_OVERHEAD) - # Data should be removed from S3. - assert len(list_objects(cluster, "data/")) == FILES_OVERHEAD + check_no_objects_after_drop(cluster) @pytest.mark.parametrize("node_name", ["node"]) @@ -631,6 +651,8 @@ def test_s3_disk_apply_new_settings(cluster, node_name): # There should be 3 times more S3 requests because multi-part upload mode uses 3 requests to upload object. assert get_s3_requests() - s3_requests_before == s3_requests_to_write_partition * 3 + check_no_objects_after_drop(cluster) + @pytest.mark.parametrize("node_name", ["node"]) def test_s3_no_delete_objects(cluster, node_name): @@ -639,6 +661,7 @@ def test_s3_no_delete_objects(cluster, node_name): node, "s3_test_no_delete_objects", storage_policy="no_delete_objects_s3" ) node.query("DROP TABLE s3_test_no_delete_objects SYNC") + remove_all_s3_objects(cluster) @pytest.mark.parametrize("node_name", ["node"]) @@ -653,6 +676,7 @@ def test_s3_disk_reads_on_unstable_connection(cluster, node_name): assert node.query("SELECT sum(id) FROM s3_test").splitlines() == [ "40499995500000" ] + check_no_objects_after_drop(cluster) @pytest.mark.parametrize("node_name", ["node"]) @@ -666,10 +690,8 @@ def test_lazy_seek_optimization_for_async_read(cluster, node_name): "INSERT INTO s3_test SELECT * FROM generateRandom('key UInt32, value String') LIMIT 10000000" ) node.query("SELECT * FROM s3_test WHERE value LIKE '%abc%' ORDER BY value LIMIT 10") - node.query("DROP TABLE IF EXISTS s3_test NO DELAY") - minio = cluster.minio_client - for obj in list_objects(cluster, "data/"): - minio.remove_object(cluster.minio_bucket, obj.object_name) + + check_no_objects_after_drop(cluster) @pytest.mark.parametrize("node_name", ["node_with_limited_disk"]) @@ -697,7 +719,7 @@ def test_cache_with_full_disk_space(cluster, node_name): assert node.contains_in_log( "Insert into cache is skipped due to insufficient disk space" ) - node.query("DROP TABLE IF EXISTS s3_test NO DELAY") + check_no_objects_after_drop(cluster, node_name=node_name) @pytest.mark.parametrize("node_name", ["node"]) @@ -722,6 +744,7 @@ def test_store_cleanup_disk_s3(cluster, node_name): "CREATE TABLE s3_test UUID '00000000-1000-4000-8000-000000000001' (n UInt64) Engine=MergeTree() ORDER BY n SETTINGS storage_policy='s3';" ) node.query("INSERT INTO s3_test SELECT 1") + check_no_objects_after_drop(cluster) @pytest.mark.parametrize("node_name", ["node"]) @@ -798,3 +821,5 @@ def test_cache_setting_compatibility(cluster, node_name): node.query("SELECT * FROM s3_test FORMAT Null") assert not node.contains_in_log("No such file or directory: Cache info:") + + check_no_objects_after_drop(cluster) From ff648b7b36aa2d45f52c0e2cd28c90810a502c69 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Mon, 1 May 2023 13:13:57 +0200 Subject: [PATCH 3/3] mute the bug, will open new pr with fix --- tests/integration/test_merge_tree_s3/test.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_merge_tree_s3/test.py b/tests/integration/test_merge_tree_s3/test.py index cb1848a88fb..9e9903c36c7 100644 --- a/tests/integration/test_merge_tree_s3/test.py +++ b/tests/integration/test_merge_tree_s3/test.py @@ -452,7 +452,7 @@ def test_move_replace_partition_to_another_table(cluster, node_name): assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)" assert ( - len(list_objects(cluster, "data/", "Objects at start")) + len(list_objects(cluster, "data/", "Objects at start")) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4 ) create_table(node, "s3_clone") @@ -686,6 +686,7 @@ def test_lazy_seek_optimization_for_async_read(cluster, node_name): node.query( "CREATE TABLE s3_test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3';" ) + node.query("SYSTEM STOP MERGES s3_test") node.query( "INSERT INTO s3_test SELECT * FROM generateRandom('key UInt32, value String') LIMIT 10000000" ) @@ -701,6 +702,7 @@ def test_cache_with_full_disk_space(cluster, node_name): node.query( "CREATE TABLE s3_test (key UInt32, value String) Engine=MergeTree() ORDER BY value SETTINGS storage_policy='s3_with_cache_and_jbod';" ) + node.query("SYSTEM STOP MERGES s3_test") node.query( "INSERT INTO s3_test SELECT number, toString(number) FROM numbers(100000000)" )