Automatic style fix

This commit is contained in:
robot-clickhouse 2023-04-30 17:27:15 +00:00
parent b22a54d6cc
commit ab97dbcf1e
2 changed files with 35 additions and 14 deletions

View File

@ -224,14 +224,22 @@ def test_attach_detach_partition(cluster):
wait_for_delete_empty_parts(node, "hdfs_test")
wait_for_delete_inactive_parts(node, "hdfs_test")
wait_for_delete_hdfs_objects(
cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 - FILES_OVERHEAD_METADATA_VERSION
cluster,
FILES_OVERHEAD
+ FILES_OVERHEAD_PER_PART_WIDE * 2
- FILES_OVERHEAD_METADATA_VERSION,
)
node.query("ALTER TABLE hdfs_test ATTACH PARTITION '2020-01-03'")
assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(8192)"
hdfs_objects = fs.listdir("/clickhouse")
assert len(hdfs_objects) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 - FILES_OVERHEAD_METADATA_VERSION
assert (
len(hdfs_objects)
== FILES_OVERHEAD
+ FILES_OVERHEAD_PER_PART_WIDE * 2
- FILES_OVERHEAD_METADATA_VERSION
)
node.query("ALTER TABLE hdfs_test DROP PARTITION '2020-01-03'")
assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(4096)"
@ -396,7 +404,10 @@ def test_move_replace_partition_to_another_table(cluster):
# Wait for outdated partitions deletion.
wait_for_delete_hdfs_objects(
cluster, FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4 - FILES_OVERHEAD_METADATA_VERSION * 2
cluster,
FILES_OVERHEAD * 2
+ FILES_OVERHEAD_PER_PART_WIDE * 4
- FILES_OVERHEAD_METADATA_VERSION * 2,
)
node.query("DROP TABLE hdfs_clone NO DELAY")

View File

@ -312,14 +312,18 @@ def test_attach_detach_partition(cluster, node_name):
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(4096)"
assert (
len(list_objects(cluster, "data/"))
== FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 - FILES_OVERHEAD_METADATA_VERSION
== FILES_OVERHEAD
+ FILES_OVERHEAD_PER_PART_WIDE * 2
- FILES_OVERHEAD_METADATA_VERSION
)
node.query("ALTER TABLE s3_test ATTACH PARTITION '2020-01-03'")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert (
len(list_objects(cluster, "data/"))
== FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 - FILES_OVERHEAD_METADATA_VERSION
== FILES_OVERHEAD
+ FILES_OVERHEAD_PER_PART_WIDE * 2
- FILES_OVERHEAD_METADATA_VERSION
)
node.query("ALTER TABLE s3_test DROP PARTITION '2020-01-03'")
@ -337,7 +341,9 @@ def test_attach_detach_partition(cluster, node_name):
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(0)"
assert (
len(list_objects(cluster, "data/"))
== FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 1 - FILES_OVERHEAD_METADATA_VERSION
== FILES_OVERHEAD
+ FILES_OVERHEAD_PER_PART_WIDE * 1
- FILES_OVERHEAD_METADATA_VERSION
)
node.query(
"ALTER TABLE s3_test DROP DETACHED PARTITION '2020-01-04'",
@ -449,10 +455,7 @@ def test_move_replace_partition_to_another_table(cluster, node_name):
for obj in s3_objects:
print("Object at start", obj.object_name)
assert (
len(s3_objects)
== FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4
)
assert len(s3_objects) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4
create_table(node, "s3_clone")
@ -503,7 +506,10 @@ def test_move_replace_partition_to_another_table(cluster, node_name):
# Wait for outdated partitions deletion.
wait_for_delete_s3_objects(
cluster, FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4 - FILES_OVERHEAD_METADATA_VERSION * 2
cluster,
FILES_OVERHEAD * 2
+ FILES_OVERHEAD_PER_PART_WIDE * 4
- FILES_OVERHEAD_METADATA_VERSION * 2,
)
node.query("DROP TABLE s3_clone NO DELAY")
@ -537,7 +543,9 @@ def test_move_replace_partition_to_another_table(cluster, node_name):
node.query("DROP TABLE s3_test NO DELAY")
# Backup data should remain in S3.
wait_for_delete_s3_objects(cluster, FILES_OVERHEAD_PER_PART_WIDE * 4 - FILES_OVERHEAD_METADATA_VERSION * 4)
wait_for_delete_s3_objects(
cluster, FILES_OVERHEAD_PER_PART_WIDE * 4 - FILES_OVERHEAD_METADATA_VERSION * 4
)
for obj in list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)):
minio.remove_object(cluster.minio_bucket, obj.object_name)
@ -563,7 +571,8 @@ def test_freeze_unfreeze(cluster, node_name):
wait_for_delete_inactive_parts(node, "s3_test")
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== FILES_OVERHEAD + (FILES_OVERHEAD_PER_PART_WIDE - FILES_OVERHEAD_METADATA_VERSION) * 2
== FILES_OVERHEAD
+ (FILES_OVERHEAD_PER_PART_WIDE - FILES_OVERHEAD_METADATA_VERSION) * 2
)
# Unfreeze single partition from backup1.
@ -604,7 +613,8 @@ def test_freeze_system_unfreeze(cluster, node_name):
node.query("DROP TABLE s3_test_removed NO DELAY")
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== FILES_OVERHEAD + (FILES_OVERHEAD_PER_PART_WIDE - FILES_OVERHEAD_METADATA_VERSION) * 2
== FILES_OVERHEAD
+ (FILES_OVERHEAD_PER_PART_WIDE - FILES_OVERHEAD_METADATA_VERSION) * 2
)
# Unfreeze all data from backup3.