mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 15:42:02 +00:00
Add concurrent backups test
This commit is contained in:
parent
f0d4a5c93a
commit
1bc943264c
@ -0,0 +1,35 @@
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>http://minio1:9001/root/data/</endpoint>
|
||||
<access_key_id>minio</access_key_id>
|
||||
<secret_access_key>minio123</secret_access_key>
|
||||
<s3_max_single_part_upload_size>33554432</s3_max_single_part_upload_size>
|
||||
</s3>
|
||||
<hdd>
|
||||
<type>local</type>
|
||||
<path>/</path>
|
||||
</hdd>
|
||||
</disks>
|
||||
<policies>
|
||||
<s3>
|
||||
<volumes>
|
||||
<main>
|
||||
<disk>s3</disk>
|
||||
</main>
|
||||
</volumes>
|
||||
</s3>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
|
||||
<merge_tree>
|
||||
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
|
||||
</merge_tree>
|
||||
<backups>
|
||||
<allowed_disk>hdd</allowed_disk>
|
||||
<allowed_path>/backups/</allowed_path>
|
||||
</backups>
|
||||
|
||||
</clickhouse>
|
48
tests/integration/test_concurrent_backups_s3/test.py
Normal file
48
tests/integration/test_concurrent_backups_s3/test.py
Normal file
@ -0,0 +1,48 @@
|
||||
#!/usr/bin/env python3
|
||||
import pytest
|
||||
import re
|
||||
import os.path
|
||||
from multiprocessing.dummy import Pool
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.test_tools import assert_eq_with_retry, TSV
|
||||
import time
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
node = cluster.add_instance(
|
||||
"node",
|
||||
main_configs=["configs/storage_conf.xml"],
|
||||
with_minio=True,
|
||||
)
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def start_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
yield cluster
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
def test_concurrent_backups(start_cluster):
|
||||
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
|
||||
columns = [f"column_{i} UInt64" for i in range(1000)]
|
||||
columns_str = ', '.join(columns)
|
||||
node.query(
|
||||
f"CREATE TABLE s3_test ({columns_str}) Engine=MergeTree() ORDER BY tuple() SETTINGS storage_policy='s3';"
|
||||
)
|
||||
node.query(
|
||||
f"INSERT INTO s3_test SELECT * FROM generateRandom('{columns_str}') LIMIT 10000"
|
||||
)
|
||||
|
||||
def create_backup(i):
|
||||
backup_name = f"Disk('hdd', '/backups/{i}')"
|
||||
node.query(f"BACKUP TABLE s3_test TO {backup_name} ASYNC")
|
||||
|
||||
p = Pool(20)
|
||||
|
||||
p.map(create_backup, range(40))
|
||||
|
||||
for _ in range(20):
|
||||
print(node.query("SELECT * FROM system.backups FORMAT Vertical"))
|
||||
time.sleep(0.1)
|
||||
|
||||
assert node.query("SELECT count() FROM s3_test where not ignore(*)") == "10000\n"
|
Loading…
Reference in New Issue
Block a user