mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 09:32:06 +00:00
Reformat test file according to Check black
This commit is contained in:
parent
6a6505c224
commit
191767d300
@ -16,26 +16,46 @@ def cluster():
|
||||
try:
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
cluster.add_instance("node1z",
|
||||
main_configs=COMMON_CONFIGS + ["configs/config.d/storage_conf.xml"],
|
||||
macros={"cluster": "node_zero_copy", "replica": "0"},
|
||||
with_minio=True, with_zookeeper=True, stay_alive=True)
|
||||
cluster.add_instance("node2z",
|
||||
main_configs=COMMON_CONFIGS + ["configs/config.d/storage_conf.xml"],
|
||||
macros={"cluster": "node_zero_copy", "replica": "1"},
|
||||
with_zookeeper=True, stay_alive=True)
|
||||
cluster.add_instance("node1n",
|
||||
main_configs=COMMON_CONFIGS + ["configs/config.d/storage_conf_without_zero_copy.xml"],
|
||||
macros={"cluster": "node_no_zero_copy", "replica": "2"},
|
||||
with_minio=True, with_zookeeper=True, stay_alive=True)
|
||||
cluster.add_instance("node2n",
|
||||
main_configs=COMMON_CONFIGS + ["configs/config.d/storage_conf_without_zero_copy.xml"],
|
||||
macros={"cluster": "node_no_zero_copy", "replica": "3"},
|
||||
with_zookeeper=True, stay_alive=True)
|
||||
cluster.add_instance("node_another_bucket",
|
||||
main_configs=COMMON_CONFIGS + ["configs/config.d/storage_conf_another_bucket.xml"],
|
||||
macros={"cluster": "node_another_bucket", "replica": "0"},
|
||||
with_zookeeper=True, stay_alive=True)
|
||||
cluster.add_instance(
|
||||
"node1z",
|
||||
main_configs=COMMON_CONFIGS + ["configs/config.d/storage_conf.xml"],
|
||||
macros={"cluster": "node_zero_copy", "replica": "0"},
|
||||
with_minio=True,
|
||||
with_zookeeper=True,
|
||||
stay_alive=True,
|
||||
)
|
||||
cluster.add_instance(
|
||||
"node2z",
|
||||
main_configs=COMMON_CONFIGS + ["configs/config.d/storage_conf.xml"],
|
||||
macros={"cluster": "node_zero_copy", "replica": "1"},
|
||||
with_zookeeper=True,
|
||||
stay_alive=True,
|
||||
)
|
||||
cluster.add_instance(
|
||||
"node1n",
|
||||
main_configs=COMMON_CONFIGS
|
||||
+ ["configs/config.d/storage_conf_without_zero_copy.xml"],
|
||||
macros={"cluster": "node_no_zero_copy", "replica": "2"},
|
||||
with_minio=True,
|
||||
with_zookeeper=True,
|
||||
stay_alive=True,
|
||||
)
|
||||
cluster.add_instance(
|
||||
"node2n",
|
||||
main_configs=COMMON_CONFIGS
|
||||
+ ["configs/config.d/storage_conf_without_zero_copy.xml"],
|
||||
macros={"cluster": "node_no_zero_copy", "replica": "3"},
|
||||
with_zookeeper=True,
|
||||
stay_alive=True,
|
||||
)
|
||||
cluster.add_instance(
|
||||
"node_another_bucket",
|
||||
main_configs=COMMON_CONFIGS
|
||||
+ ["configs/config.d/storage_conf_another_bucket.xml"],
|
||||
macros={"cluster": "node_another_bucket", "replica": "0"},
|
||||
with_zookeeper=True,
|
||||
stay_alive=True,
|
||||
)
|
||||
|
||||
logging.info("Starting cluster...")
|
||||
cluster.start()
|
||||
@ -48,12 +68,16 @@ def cluster():
|
||||
|
||||
def random_string(length):
|
||||
letters = string.ascii_letters
|
||||
return ''.join(random.choice(letters) for i in range(length))
|
||||
return "".join(random.choice(letters) for i in range(length))
|
||||
|
||||
|
||||
def create_table(node, table_name, schema, attach=False, db_atomic=False, uuid=""):
|
||||
node.query("CREATE DATABASE IF NOT EXISTS s3 {on_cluster} ENGINE = {engine}".format(engine="Atomic" if db_atomic else "Ordinary",
|
||||
on_cluster="ON CLUSTER '{cluster}'"))
|
||||
node.query(
|
||||
"CREATE DATABASE IF NOT EXISTS s3 {on_cluster} ENGINE = {engine}".format(
|
||||
engine="Atomic" if db_atomic else "Ordinary",
|
||||
on_cluster="ON CLUSTER '{cluster}'",
|
||||
)
|
||||
)
|
||||
|
||||
create_table_statement = """
|
||||
{create} TABLE s3.{table_name} {uuid} {on_cluster} (
|
||||
@ -66,12 +90,14 @@ def create_table(node, table_name, schema, attach=False, db_atomic=False, uuid="
|
||||
storage_policy='s3',
|
||||
old_parts_lifetime=600,
|
||||
index_granularity=512
|
||||
""".format(create="ATTACH" if attach else "CREATE",
|
||||
table_name=table_name,
|
||||
uuid="UUID '{uuid}'".format(uuid=uuid) if db_atomic and uuid else "",
|
||||
on_cluster="ON CLUSTER '{cluster}'",
|
||||
schema=schema,
|
||||
engine="ReplicatedMergeTree('/clickhouse/tables/{cluster}/test', '{replica}')")
|
||||
""".format(
|
||||
create="ATTACH" if attach else "CREATE",
|
||||
table_name=table_name,
|
||||
uuid="UUID '{uuid}'".format(uuid=uuid) if db_atomic and uuid else "",
|
||||
on_cluster="ON CLUSTER '{cluster}'",
|
||||
schema=schema,
|
||||
engine="ReplicatedMergeTree('/clickhouse/tables/{cluster}/test', '{replica}')",
|
||||
)
|
||||
|
||||
node.query(create_table_statement)
|
||||
|
||||
@ -85,37 +111,68 @@ def purge_s3(cluster, bucket):
|
||||
|
||||
|
||||
def drop_s3_metadata(node):
|
||||
node.exec_in_container(['bash', '-c', 'rm -rf /var/lib/clickhouse/disks/s3/*'], user='root')
|
||||
node.exec_in_container(
|
||||
["bash", "-c", "rm -rf /var/lib/clickhouse/disks/s3/*"], user="root"
|
||||
)
|
||||
|
||||
|
||||
def drop_shadow_information(node):
|
||||
node.exec_in_container(['bash', '-c', 'rm -rf /var/lib/clickhouse/shadow/*'], user='root')
|
||||
node.exec_in_container(
|
||||
["bash", "-c", "rm -rf /var/lib/clickhouse/shadow/*"], user="root"
|
||||
)
|
||||
|
||||
|
||||
def create_restore_file(node, revision=None, bucket=None, path=None, detached=None):
|
||||
node.exec_in_container(['bash', '-c', 'mkdir -p /var/lib/clickhouse/disks/s3/'], user='root')
|
||||
node.exec_in_container(['bash', '-c', 'touch /var/lib/clickhouse/disks/s3/restore'], user='root')
|
||||
node.exec_in_container(
|
||||
["bash", "-c", "mkdir -p /var/lib/clickhouse/disks/s3/"], user="root"
|
||||
)
|
||||
node.exec_in_container(
|
||||
["bash", "-c", "touch /var/lib/clickhouse/disks/s3/restore"], user="root"
|
||||
)
|
||||
|
||||
add_restore_option = 'echo -en "{}={}\n" >> /var/lib/clickhouse/disks/s3/restore'
|
||||
if revision:
|
||||
node.exec_in_container(['bash', '-c', add_restore_option.format('revision', revision)], user='root')
|
||||
node.exec_in_container(
|
||||
["bash", "-c", add_restore_option.format("revision", revision)], user="root"
|
||||
)
|
||||
if bucket:
|
||||
node.exec_in_container(['bash', '-c', add_restore_option.format('source_bucket', bucket)], user='root')
|
||||
node.exec_in_container(
|
||||
["bash", "-c", add_restore_option.format("source_bucket", bucket)],
|
||||
user="root",
|
||||
)
|
||||
if path:
|
||||
node.exec_in_container(['bash', '-c', add_restore_option.format('source_path', path)], user='root')
|
||||
node.exec_in_container(
|
||||
["bash", "-c", add_restore_option.format("source_path", path)], user="root"
|
||||
)
|
||||
if detached:
|
||||
node.exec_in_container(['bash', '-c', add_restore_option.format('detached', 'true')], user='root')
|
||||
node.exec_in_container(
|
||||
["bash", "-c", add_restore_option.format("detached", "true")], user="root"
|
||||
)
|
||||
|
||||
|
||||
def get_revision_counter(node, backup_number):
|
||||
return int(node.exec_in_container(
|
||||
['bash', '-c', 'cat /var/lib/clickhouse/disks/s3/shadow/{}/revision.txt'.format(backup_number)], user='root'))
|
||||
return int(
|
||||
node.exec_in_container(
|
||||
[
|
||||
"bash",
|
||||
"-c",
|
||||
"cat /var/lib/clickhouse/disks/s3/shadow/{}/revision.txt".format(
|
||||
backup_number
|
||||
),
|
||||
],
|
||||
user="root",
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def get_table_uuid(node, db_atomic, table):
|
||||
uuid = ""
|
||||
if db_atomic:
|
||||
uuid = node.query("SELECT uuid FROM system.tables WHERE database='s3' AND table='{}' FORMAT TabSeparated".format(table)).strip()
|
||||
uuid = node.query(
|
||||
"SELECT uuid FROM system.tables WHERE database='s3' AND table='{}' FORMAT TabSeparated".format(
|
||||
table
|
||||
)
|
||||
).strip()
|
||||
return uuid
|
||||
|
||||
|
||||
@ -138,12 +195,8 @@ def drop_table(cluster):
|
||||
purge_s3(cluster, bucket)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"db_atomic", [False, True]
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"zero_copy", [False, True]
|
||||
)
|
||||
@pytest.mark.parametrize("db_atomic", [False, True])
|
||||
@pytest.mark.parametrize("zero_copy", [False, True])
|
||||
def test_restore_another_bucket_path(cluster, db_atomic, zero_copy):
|
||||
suffix = "z" if zero_copy else "n"
|
||||
nodes = [cluster.instances[f"node1{suffix}"], cluster.instances[f"node2{suffix}"]]
|
||||
@ -160,18 +213,21 @@ def test_restore_another_bucket_path(cluster, db_atomic, zero_copy):
|
||||
create_table(nodes[0], "test", schema, db_atomic=db_atomic)
|
||||
uuid = get_table_uuid(nodes[0], db_atomic, "test")
|
||||
|
||||
|
||||
dropped_keys = 0
|
||||
|
||||
for key in range(0, keys):
|
||||
node = nodes[key % 2]
|
||||
node.query("INSERT INTO s3.test SELECT {key}, * FROM generateRandom('{schema}') LIMIT {size}".format(key=key, schema=schema, size=size))
|
||||
node.query(
|
||||
"INSERT INTO s3.test SELECT {key}, * FROM generateRandom('{schema}') LIMIT {size}".format(
|
||||
key=key, schema=schema, size=size
|
||||
)
|
||||
)
|
||||
if not (key % 3):
|
||||
dropped_keys += 1
|
||||
node.query("ALTER TABLE s3.test DROP PARTITION '{key}'".format(key=key))
|
||||
|
||||
for key in range(0, keys):
|
||||
if not ((key+1) % 3):
|
||||
if not ((key + 1) % 3):
|
||||
dropped_keys += 1
|
||||
node.query("ALTER TABLE s3.test DROP PARTITION '{key}'".format(key=key))
|
||||
|
||||
@ -182,14 +238,21 @@ def test_restore_another_bucket_path(cluster, db_atomic, zero_copy):
|
||||
nodes[0].query("OPTIMIZE TABLE s3.test")
|
||||
nodes[1].query("OPTIMIZE TABLE s3.test")
|
||||
|
||||
assert nodes[0].query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(size * (keys - dropped_keys))
|
||||
assert nodes[1].query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(size * (keys - dropped_keys))
|
||||
assert nodes[0].query(
|
||||
"SELECT count(*) FROM s3.test FORMAT Values"
|
||||
) == "({})".format(size * (keys - dropped_keys))
|
||||
assert nodes[1].query(
|
||||
"SELECT count(*) FROM s3.test FORMAT Values"
|
||||
) == "({})".format(size * (keys - dropped_keys))
|
||||
|
||||
node_another_bucket = cluster.instances["node_another_bucket"]
|
||||
|
||||
create_restore_file(node_another_bucket, bucket="root")
|
||||
node_another_bucket.query("SYSTEM RESTART DISK s3")
|
||||
create_table(node_another_bucket, "test", schema, attach=True, db_atomic=db_atomic, uuid=uuid)
|
||||
|
||||
assert node_another_bucket.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(size * (keys - dropped_keys))
|
||||
create_table(
|
||||
node_another_bucket, "test", schema, attach=True, db_atomic=db_atomic, uuid=uuid
|
||||
)
|
||||
|
||||
assert node_another_bucket.query(
|
||||
"SELECT count(*) FROM s3.test FORMAT Values"
|
||||
) == "({})".format(size * (keys - dropped_keys))
|
||||
|
Loading…
Reference in New Issue
Block a user