mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-09-20 08:40:50 +00:00
Reformat test file according to Check black
This commit is contained in:
parent
6a6505c224
commit
191767d300
@ -16,26 +16,46 @@ def cluster():
|
|||||||
try:
|
try:
|
||||||
cluster = ClickHouseCluster(__file__)
|
cluster = ClickHouseCluster(__file__)
|
||||||
|
|
||||||
cluster.add_instance("node1z",
|
cluster.add_instance(
|
||||||
main_configs=COMMON_CONFIGS + ["configs/config.d/storage_conf.xml"],
|
"node1z",
|
||||||
macros={"cluster": "node_zero_copy", "replica": "0"},
|
main_configs=COMMON_CONFIGS + ["configs/config.d/storage_conf.xml"],
|
||||||
with_minio=True, with_zookeeper=True, stay_alive=True)
|
macros={"cluster": "node_zero_copy", "replica": "0"},
|
||||||
cluster.add_instance("node2z",
|
with_minio=True,
|
||||||
main_configs=COMMON_CONFIGS + ["configs/config.d/storage_conf.xml"],
|
with_zookeeper=True,
|
||||||
macros={"cluster": "node_zero_copy", "replica": "1"},
|
stay_alive=True,
|
||||||
with_zookeeper=True, stay_alive=True)
|
)
|
||||||
cluster.add_instance("node1n",
|
cluster.add_instance(
|
||||||
main_configs=COMMON_CONFIGS + ["configs/config.d/storage_conf_without_zero_copy.xml"],
|
"node2z",
|
||||||
macros={"cluster": "node_no_zero_copy", "replica": "2"},
|
main_configs=COMMON_CONFIGS + ["configs/config.d/storage_conf.xml"],
|
||||||
with_minio=True, with_zookeeper=True, stay_alive=True)
|
macros={"cluster": "node_zero_copy", "replica": "1"},
|
||||||
cluster.add_instance("node2n",
|
with_zookeeper=True,
|
||||||
main_configs=COMMON_CONFIGS + ["configs/config.d/storage_conf_without_zero_copy.xml"],
|
stay_alive=True,
|
||||||
macros={"cluster": "node_no_zero_copy", "replica": "3"},
|
)
|
||||||
with_zookeeper=True, stay_alive=True)
|
cluster.add_instance(
|
||||||
cluster.add_instance("node_another_bucket",
|
"node1n",
|
||||||
main_configs=COMMON_CONFIGS + ["configs/config.d/storage_conf_another_bucket.xml"],
|
main_configs=COMMON_CONFIGS
|
||||||
macros={"cluster": "node_another_bucket", "replica": "0"},
|
+ ["configs/config.d/storage_conf_without_zero_copy.xml"],
|
||||||
with_zookeeper=True, stay_alive=True)
|
macros={"cluster": "node_no_zero_copy", "replica": "2"},
|
||||||
|
with_minio=True,
|
||||||
|
with_zookeeper=True,
|
||||||
|
stay_alive=True,
|
||||||
|
)
|
||||||
|
cluster.add_instance(
|
||||||
|
"node2n",
|
||||||
|
main_configs=COMMON_CONFIGS
|
||||||
|
+ ["configs/config.d/storage_conf_without_zero_copy.xml"],
|
||||||
|
macros={"cluster": "node_no_zero_copy", "replica": "3"},
|
||||||
|
with_zookeeper=True,
|
||||||
|
stay_alive=True,
|
||||||
|
)
|
||||||
|
cluster.add_instance(
|
||||||
|
"node_another_bucket",
|
||||||
|
main_configs=COMMON_CONFIGS
|
||||||
|
+ ["configs/config.d/storage_conf_another_bucket.xml"],
|
||||||
|
macros={"cluster": "node_another_bucket", "replica": "0"},
|
||||||
|
with_zookeeper=True,
|
||||||
|
stay_alive=True,
|
||||||
|
)
|
||||||
|
|
||||||
logging.info("Starting cluster...")
|
logging.info("Starting cluster...")
|
||||||
cluster.start()
|
cluster.start()
|
||||||
@ -48,12 +68,16 @@ def cluster():
|
|||||||
|
|
||||||
def random_string(length):
|
def random_string(length):
|
||||||
letters = string.ascii_letters
|
letters = string.ascii_letters
|
||||||
return ''.join(random.choice(letters) for i in range(length))
|
return "".join(random.choice(letters) for i in range(length))
|
||||||
|
|
||||||
|
|
||||||
def create_table(node, table_name, schema, attach=False, db_atomic=False, uuid=""):
|
def create_table(node, table_name, schema, attach=False, db_atomic=False, uuid=""):
|
||||||
node.query("CREATE DATABASE IF NOT EXISTS s3 {on_cluster} ENGINE = {engine}".format(engine="Atomic" if db_atomic else "Ordinary",
|
node.query(
|
||||||
on_cluster="ON CLUSTER '{cluster}'"))
|
"CREATE DATABASE IF NOT EXISTS s3 {on_cluster} ENGINE = {engine}".format(
|
||||||
|
engine="Atomic" if db_atomic else "Ordinary",
|
||||||
|
on_cluster="ON CLUSTER '{cluster}'",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
create_table_statement = """
|
create_table_statement = """
|
||||||
{create} TABLE s3.{table_name} {uuid} {on_cluster} (
|
{create} TABLE s3.{table_name} {uuid} {on_cluster} (
|
||||||
@ -66,12 +90,14 @@ def create_table(node, table_name, schema, attach=False, db_atomic=False, uuid="
|
|||||||
storage_policy='s3',
|
storage_policy='s3',
|
||||||
old_parts_lifetime=600,
|
old_parts_lifetime=600,
|
||||||
index_granularity=512
|
index_granularity=512
|
||||||
""".format(create="ATTACH" if attach else "CREATE",
|
""".format(
|
||||||
table_name=table_name,
|
create="ATTACH" if attach else "CREATE",
|
||||||
uuid="UUID '{uuid}'".format(uuid=uuid) if db_atomic and uuid else "",
|
table_name=table_name,
|
||||||
on_cluster="ON CLUSTER '{cluster}'",
|
uuid="UUID '{uuid}'".format(uuid=uuid) if db_atomic and uuid else "",
|
||||||
schema=schema,
|
on_cluster="ON CLUSTER '{cluster}'",
|
||||||
engine="ReplicatedMergeTree('/clickhouse/tables/{cluster}/test', '{replica}')")
|
schema=schema,
|
||||||
|
engine="ReplicatedMergeTree('/clickhouse/tables/{cluster}/test', '{replica}')",
|
||||||
|
)
|
||||||
|
|
||||||
node.query(create_table_statement)
|
node.query(create_table_statement)
|
||||||
|
|
||||||
@ -85,37 +111,68 @@ def purge_s3(cluster, bucket):
|
|||||||
|
|
||||||
|
|
||||||
def drop_s3_metadata(node):
|
def drop_s3_metadata(node):
|
||||||
node.exec_in_container(['bash', '-c', 'rm -rf /var/lib/clickhouse/disks/s3/*'], user='root')
|
node.exec_in_container(
|
||||||
|
["bash", "-c", "rm -rf /var/lib/clickhouse/disks/s3/*"], user="root"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def drop_shadow_information(node):
|
def drop_shadow_information(node):
|
||||||
node.exec_in_container(['bash', '-c', 'rm -rf /var/lib/clickhouse/shadow/*'], user='root')
|
node.exec_in_container(
|
||||||
|
["bash", "-c", "rm -rf /var/lib/clickhouse/shadow/*"], user="root"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def create_restore_file(node, revision=None, bucket=None, path=None, detached=None):
|
def create_restore_file(node, revision=None, bucket=None, path=None, detached=None):
|
||||||
node.exec_in_container(['bash', '-c', 'mkdir -p /var/lib/clickhouse/disks/s3/'], user='root')
|
node.exec_in_container(
|
||||||
node.exec_in_container(['bash', '-c', 'touch /var/lib/clickhouse/disks/s3/restore'], user='root')
|
["bash", "-c", "mkdir -p /var/lib/clickhouse/disks/s3/"], user="root"
|
||||||
|
)
|
||||||
|
node.exec_in_container(
|
||||||
|
["bash", "-c", "touch /var/lib/clickhouse/disks/s3/restore"], user="root"
|
||||||
|
)
|
||||||
|
|
||||||
add_restore_option = 'echo -en "{}={}\n" >> /var/lib/clickhouse/disks/s3/restore'
|
add_restore_option = 'echo -en "{}={}\n" >> /var/lib/clickhouse/disks/s3/restore'
|
||||||
if revision:
|
if revision:
|
||||||
node.exec_in_container(['bash', '-c', add_restore_option.format('revision', revision)], user='root')
|
node.exec_in_container(
|
||||||
|
["bash", "-c", add_restore_option.format("revision", revision)], user="root"
|
||||||
|
)
|
||||||
if bucket:
|
if bucket:
|
||||||
node.exec_in_container(['bash', '-c', add_restore_option.format('source_bucket', bucket)], user='root')
|
node.exec_in_container(
|
||||||
|
["bash", "-c", add_restore_option.format("source_bucket", bucket)],
|
||||||
|
user="root",
|
||||||
|
)
|
||||||
if path:
|
if path:
|
||||||
node.exec_in_container(['bash', '-c', add_restore_option.format('source_path', path)], user='root')
|
node.exec_in_container(
|
||||||
|
["bash", "-c", add_restore_option.format("source_path", path)], user="root"
|
||||||
|
)
|
||||||
if detached:
|
if detached:
|
||||||
node.exec_in_container(['bash', '-c', add_restore_option.format('detached', 'true')], user='root')
|
node.exec_in_container(
|
||||||
|
["bash", "-c", add_restore_option.format("detached", "true")], user="root"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_revision_counter(node, backup_number):
|
def get_revision_counter(node, backup_number):
|
||||||
return int(node.exec_in_container(
|
return int(
|
||||||
['bash', '-c', 'cat /var/lib/clickhouse/disks/s3/shadow/{}/revision.txt'.format(backup_number)], user='root'))
|
node.exec_in_container(
|
||||||
|
[
|
||||||
|
"bash",
|
||||||
|
"-c",
|
||||||
|
"cat /var/lib/clickhouse/disks/s3/shadow/{}/revision.txt".format(
|
||||||
|
backup_number
|
||||||
|
),
|
||||||
|
],
|
||||||
|
user="root",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_table_uuid(node, db_atomic, table):
|
def get_table_uuid(node, db_atomic, table):
|
||||||
uuid = ""
|
uuid = ""
|
||||||
if db_atomic:
|
if db_atomic:
|
||||||
uuid = node.query("SELECT uuid FROM system.tables WHERE database='s3' AND table='{}' FORMAT TabSeparated".format(table)).strip()
|
uuid = node.query(
|
||||||
|
"SELECT uuid FROM system.tables WHERE database='s3' AND table='{}' FORMAT TabSeparated".format(
|
||||||
|
table
|
||||||
|
)
|
||||||
|
).strip()
|
||||||
return uuid
|
return uuid
|
||||||
|
|
||||||
|
|
||||||
@ -138,12 +195,8 @@ def drop_table(cluster):
|
|||||||
purge_s3(cluster, bucket)
|
purge_s3(cluster, bucket)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize("db_atomic", [False, True])
|
||||||
"db_atomic", [False, True]
|
@pytest.mark.parametrize("zero_copy", [False, True])
|
||||||
)
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"zero_copy", [False, True]
|
|
||||||
)
|
|
||||||
def test_restore_another_bucket_path(cluster, db_atomic, zero_copy):
|
def test_restore_another_bucket_path(cluster, db_atomic, zero_copy):
|
||||||
suffix = "z" if zero_copy else "n"
|
suffix = "z" if zero_copy else "n"
|
||||||
nodes = [cluster.instances[f"node1{suffix}"], cluster.instances[f"node2{suffix}"]]
|
nodes = [cluster.instances[f"node1{suffix}"], cluster.instances[f"node2{suffix}"]]
|
||||||
@ -160,18 +213,21 @@ def test_restore_another_bucket_path(cluster, db_atomic, zero_copy):
|
|||||||
create_table(nodes[0], "test", schema, db_atomic=db_atomic)
|
create_table(nodes[0], "test", schema, db_atomic=db_atomic)
|
||||||
uuid = get_table_uuid(nodes[0], db_atomic, "test")
|
uuid = get_table_uuid(nodes[0], db_atomic, "test")
|
||||||
|
|
||||||
|
dropped_keys = 0
|
||||||
|
|
||||||
dropped_keys = 0
|
|
||||||
|
|
||||||
for key in range(0, keys):
|
for key in range(0, keys):
|
||||||
node = nodes[key % 2]
|
node = nodes[key % 2]
|
||||||
node.query("INSERT INTO s3.test SELECT {key}, * FROM generateRandom('{schema}') LIMIT {size}".format(key=key, schema=schema, size=size))
|
node.query(
|
||||||
|
"INSERT INTO s3.test SELECT {key}, * FROM generateRandom('{schema}') LIMIT {size}".format(
|
||||||
|
key=key, schema=schema, size=size
|
||||||
|
)
|
||||||
|
)
|
||||||
if not (key % 3):
|
if not (key % 3):
|
||||||
dropped_keys += 1
|
dropped_keys += 1
|
||||||
node.query("ALTER TABLE s3.test DROP PARTITION '{key}'".format(key=key))
|
node.query("ALTER TABLE s3.test DROP PARTITION '{key}'".format(key=key))
|
||||||
|
|
||||||
for key in range(0, keys):
|
for key in range(0, keys):
|
||||||
if not ((key+1) % 3):
|
if not ((key + 1) % 3):
|
||||||
dropped_keys += 1
|
dropped_keys += 1
|
||||||
node.query("ALTER TABLE s3.test DROP PARTITION '{key}'".format(key=key))
|
node.query("ALTER TABLE s3.test DROP PARTITION '{key}'".format(key=key))
|
||||||
|
|
||||||
@ -182,14 +238,21 @@ def test_restore_another_bucket_path(cluster, db_atomic, zero_copy):
|
|||||||
nodes[0].query("OPTIMIZE TABLE s3.test")
|
nodes[0].query("OPTIMIZE TABLE s3.test")
|
||||||
nodes[1].query("OPTIMIZE TABLE s3.test")
|
nodes[1].query("OPTIMIZE TABLE s3.test")
|
||||||
|
|
||||||
assert nodes[0].query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(size * (keys - dropped_keys))
|
assert nodes[0].query(
|
||||||
assert nodes[1].query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(size * (keys - dropped_keys))
|
"SELECT count(*) FROM s3.test FORMAT Values"
|
||||||
|
) == "({})".format(size * (keys - dropped_keys))
|
||||||
|
assert nodes[1].query(
|
||||||
|
"SELECT count(*) FROM s3.test FORMAT Values"
|
||||||
|
) == "({})".format(size * (keys - dropped_keys))
|
||||||
|
|
||||||
node_another_bucket = cluster.instances["node_another_bucket"]
|
node_another_bucket = cluster.instances["node_another_bucket"]
|
||||||
|
|
||||||
create_restore_file(node_another_bucket, bucket="root")
|
create_restore_file(node_another_bucket, bucket="root")
|
||||||
node_another_bucket.query("SYSTEM RESTART DISK s3")
|
node_another_bucket.query("SYSTEM RESTART DISK s3")
|
||||||
create_table(node_another_bucket, "test", schema, attach=True, db_atomic=db_atomic, uuid=uuid)
|
create_table(
|
||||||
|
node_another_bucket, "test", schema, attach=True, db_atomic=db_atomic, uuid=uuid
|
||||||
assert node_another_bucket.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(size * (keys - dropped_keys))
|
)
|
||||||
|
|
||||||
|
assert node_another_bucket.query(
|
||||||
|
"SELECT count(*) FROM s3.test FORMAT Values"
|
||||||
|
) == "({})".format(size * (keys - dropped_keys))
|
||||||
|
Loading…
Reference in New Issue
Block a user