ClickHouse/tests/integration/test_concurrent_backups_s3/test.py

57 lines
1.5 KiB
Python
Raw Normal View History

2022-07-14 19:38:17 +00:00
#!/usr/bin/env python3
import os.path
2024-09-27 10:19:39 +00:00
import re
import time
2022-07-14 19:38:17 +00:00
from multiprocessing.dummy import Pool
2024-09-27 10:19:39 +00:00
import pytest
2022-07-14 19:38:17 +00:00
from helpers.cluster import ClickHouseCluster
2022-07-15 12:43:27 +00:00
from helpers.test_tools import assert_eq_with_retry
2022-07-14 19:38:17 +00:00
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance(
"node",
main_configs=["configs/storage_conf.xml"],
with_minio=True,
)
2022-07-14 22:38:58 +00:00
2022-07-14 19:38:17 +00:00
@pytest.fixture(scope="module")
def start_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
2022-07-14 22:38:58 +00:00
@pytest.mark.skip(reason="broken test")
2022-07-14 19:38:17 +00:00
def test_concurrent_backups(start_cluster):
2023-05-03 18:06:46 +00:00
node.query("DROP TABLE IF EXISTS s3_test SYNC")
2022-07-14 19:38:17 +00:00
columns = [f"column_{i} UInt64" for i in range(1000)]
2022-07-14 22:38:58 +00:00
columns_str = ", ".join(columns)
2022-07-14 19:38:17 +00:00
node.query(
f"CREATE TABLE s3_test ({columns_str}) Engine=MergeTree() ORDER BY tuple() SETTINGS storage_policy='s3';"
)
node.query(
f"INSERT INTO s3_test SELECT * FROM generateRandom('{columns_str}') LIMIT 10000"
)
def create_backup(i):
backup_name = f"Disk('hdd', '/backups/{i}')"
node.query(f"BACKUP TABLE s3_test TO {backup_name} ASYNC")
2022-07-14 22:32:20 +00:00
p = Pool(40)
2022-07-14 19:38:17 +00:00
p.map(create_backup, range(40))
2022-07-15 13:34:44 +00:00
assert_eq_with_retry(
node,
"SELECT count() FROM system.backups WHERE status != 'BACKUP_CREATED' and status != 'BACKUP_FAILED'",
2022-07-15 13:34:44 +00:00
"0",
sleep_time=5,
retry_count=40, # 200 seconds must be enough
2022-07-15 13:34:44 +00:00
)
2022-07-14 19:38:17 +00:00
assert node.query("SELECT count() FROM s3_test where not ignore(*)") == "10000\n"