Automatic style fix

This commit is contained in:
robot-clickhouse 2023-11-09 16:15:14 +00:00
parent f9895ab37b
commit 188a88fa33

View File

@ -55,33 +55,50 @@ def start_cluster():
finally:
cluster.shutdown()
backup_id_counter = 0
def new_backup_name(base_name):
global backup_id_counter
backup_id_counter += 1
return f"Disk('backups', '{base_name}{backup_id_counter}')"
def test_on_cluster():
node1.query_with_retry("CREATE DATABASE keeper_backup ON CLUSTER cluster")
node1.query_with_retry("CREATE TABLE keeper_backup.keeper1 ON CLUSTER cluster (key UInt64, value String) Engine=KeeperMap('/test_on_cluster1') PRIMARY KEY key")
node1.query_with_retry("CREATE TABLE keeper_backup.keeper2 ON CLUSTER cluster (key UInt64, value String) Engine=KeeperMap('/test_on_cluster1') PRIMARY KEY key")
node1.query_with_retry("CREATE TABLE keeper_backup.keeper3 ON CLUSTER cluster (key UInt64, value String) Engine=KeeperMap('/test_on_cluster2') PRIMARY KEY key")
node1.query_with_retry("INSERT INTO keeper_backup.keeper2 SELECT number, 'test' || toString(number) FROM system.numbers LIMIT 5")
node1.query_with_retry("INSERT INTO keeper_backup.keeper3 SELECT number, 'test' || toString(number) FROM system.numbers LIMIT 5")
node1.query_with_retry(
"CREATE TABLE keeper_backup.keeper1 ON CLUSTER cluster (key UInt64, value String) Engine=KeeperMap('/test_on_cluster1') PRIMARY KEY key"
)
node1.query_with_retry(
"CREATE TABLE keeper_backup.keeper2 ON CLUSTER cluster (key UInt64, value String) Engine=KeeperMap('/test_on_cluster1') PRIMARY KEY key"
)
node1.query_with_retry(
"CREATE TABLE keeper_backup.keeper3 ON CLUSTER cluster (key UInt64, value String) Engine=KeeperMap('/test_on_cluster2') PRIMARY KEY key"
)
node1.query_with_retry(
"INSERT INTO keeper_backup.keeper2 SELECT number, 'test' || toString(number) FROM system.numbers LIMIT 5"
)
node1.query_with_retry(
"INSERT INTO keeper_backup.keeper3 SELECT number, 'test' || toString(number) FROM system.numbers LIMIT 5"
)
expected_result = ''.join(f'{i}\ttest{i}\n' for i in range(5))
expected_result = "".join(f"{i}\ttest{i}\n" for i in range(5))
def verify_data():
for node in [node1, node2, node3]:
for i in range(1, 4):
result = node.query_with_retry(f'SELECT key, value FROM keeper_backup.keeper{i} ORDER BY key FORMAT TSV')
result = node.query_with_retry(
f"SELECT key, value FROM keeper_backup.keeper{i} ORDER BY key FORMAT TSV"
)
assert result == expected_result
verify_data()
backup_name = new_backup_name('test_on_cluster')
node1.query(f"BACKUP DATABASE keeper_backup ON CLUSTER cluster TO {backup_name} SETTINGS async = false;")
backup_name = new_backup_name("test_on_cluster")
node1.query(
f"BACKUP DATABASE keeper_backup ON CLUSTER cluster TO {backup_name} SETTINGS async = false;"
)
node1.query("DROP DATABASE keeper_backup ON CLUSTER cluster SYNC;")
@ -91,21 +108,27 @@ def test_on_cluster():
def change_keeper_map_prefix(node):
node.replace_config(
"/etc/clickhouse-server/config.d/keeper_map_path_prefix.xml", """
"/etc/clickhouse-server/config.d/keeper_map_path_prefix.xml",
"""
<clickhouse>
<keeper_map_path_prefix>/different_path/keeper_map</keeper_map_path_prefix>
</clickhouse>
""")
""",
)
apply_for_all_nodes(lambda node: node.stop_clickhouse())
apply_for_all_nodes(change_keeper_map_prefix)
apply_for_all_nodes(lambda node: node.start_clickhouse())
node1.query(f"RESTORE DATABASE keeper_backup ON CLUSTER cluster FROM {backup_name} SETTINGS async = false;")
verify_data()
node1.query("DROP TABLE keeper_backup.keeper3 ON CLUSTER cluster SYNC;")
node1.query(f"RESTORE TABLE keeper_backup.keeper3 ON CLUSTER cluster FROM {backup_name} SETTINGS async = false;")
node1.query(
f"RESTORE DATABASE keeper_backup ON CLUSTER cluster FROM {backup_name} SETTINGS async = false;"
)
verify_data()
verify_data()
node1.query("DROP TABLE keeper_backup.keeper3 ON CLUSTER cluster SYNC;")
node1.query(
f"RESTORE TABLE keeper_backup.keeper3 ON CLUSTER cluster FROM {backup_name} SETTINGS async = false;"
)
verify_data()