mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 09:32:06 +00:00
Format with black
This commit is contained in:
parent
91b7001df6
commit
f03d4bb7d5
@ -823,7 +823,7 @@ def test_multiple_tables_streaming_sync_distributed(started_cluster, mode):
|
|||||||
|
|
||||||
def test_max_set_age(started_cluster):
|
def test_max_set_age(started_cluster):
|
||||||
node = started_cluster.instances["instance"]
|
node = started_cluster.instances["instance"]
|
||||||
table_name = f"max_set_age_{uuid4().hex}"
|
table_name = "max_set_age"
|
||||||
dst_table_name = f"{table_name}_dst"
|
dst_table_name = f"{table_name}_dst"
|
||||||
keeper_path = f"/clickhouse/test_{table_name}"
|
keeper_path = f"/clickhouse/test_{table_name}"
|
||||||
files_path = f"{table_name}_data"
|
files_path = f"{table_name}_data"
|
||||||
@ -848,9 +848,7 @@ def test_max_set_age(started_cluster):
|
|||||||
)
|
)
|
||||||
create_mv(node, table_name, dst_table_name)
|
create_mv(node, table_name, dst_table_name)
|
||||||
|
|
||||||
_ = generate_random_files(
|
_ = generate_random_files(started_cluster, files_path, files_to_generate, row_num=1)
|
||||||
started_cluster, files_path, files_to_generate, row_num=1
|
|
||||||
)
|
|
||||||
|
|
||||||
expected_rows = files_to_generate
|
expected_rows = files_to_generate
|
||||||
|
|
||||||
@ -869,13 +867,17 @@ def test_max_set_age(started_cluster):
|
|||||||
assert False
|
assert False
|
||||||
|
|
||||||
wait_for_condition(lambda: get_count() == expected_rows)
|
wait_for_condition(lambda: get_count() == expected_rows)
|
||||||
assert files_to_generate == int(node.query(f"SELECT uniq(_path) from {dst_table_name}"))
|
assert files_to_generate == int(
|
||||||
|
node.query(f"SELECT uniq(_path) from {dst_table_name}")
|
||||||
|
)
|
||||||
|
|
||||||
time.sleep(max_age + 5)
|
time.sleep(max_age + 5)
|
||||||
|
|
||||||
expected_rows *= 2
|
expected_rows *= 2
|
||||||
wait_for_condition(lambda: get_count() == expected_rows)
|
wait_for_condition(lambda: get_count() == expected_rows)
|
||||||
assert files_to_generate == int(node.query(f"SELECT uniq(_path) from {dst_table_name}"))
|
assert files_to_generate == int(
|
||||||
|
node.query(f"SELECT uniq(_path) from {dst_table_name}")
|
||||||
|
)
|
||||||
|
|
||||||
paths_count = [
|
paths_count = [
|
||||||
int(x)
|
int(x)
|
||||||
@ -888,9 +890,11 @@ def test_max_set_age(started_cluster):
|
|||||||
assert 2 == path_count
|
assert 2 == path_count
|
||||||
|
|
||||||
def get_object_storage_failures():
|
def get_object_storage_failures():
|
||||||
return int(node.query(
|
return int(
|
||||||
"SELECT value FROM system.events WHERE name = 'ObjectStorageQueueFailedFiles' SETTINGS system_events_show_zero_values=1"
|
node.query(
|
||||||
))
|
"SELECT value FROM system.events WHERE name = 'ObjectStorageQueueFailedFiles' SETTINGS system_events_show_zero_values=1"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
failed_count = get_object_storage_failures()
|
failed_count = get_object_storage_failures()
|
||||||
|
|
||||||
@ -900,6 +904,8 @@ def test_max_set_age(started_cluster):
|
|||||||
values_csv = (
|
values_csv = (
|
||||||
"\n".join((",".join(map(str, row)) for row in values)) + "\n"
|
"\n".join((",".join(map(str, row)) for row in values)) + "\n"
|
||||||
).encode()
|
).encode()
|
||||||
|
|
||||||
|
# use a different filename for each test to allow running a bunch of them sequentially with --count
|
||||||
file_with_error = f"fff_{uuid4().hex}.csv"
|
file_with_error = f"fff_{uuid4().hex}.csv"
|
||||||
put_s3_file_content(started_cluster, f"{files_path}/{file_with_error}", values_csv)
|
put_s3_file_content(started_cluster, f"{files_path}/{file_with_error}", values_csv)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user