From 1bc943264c24f9ec92fa95ff8c787d6377362003 Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 14 Jul 2022 21:38:17 +0200 Subject: [PATCH] Add concurrent backups test --- .../configs/storage_conf.xml | 35 ++++++++++++++ .../test_concurrent_backups_s3/test.py | 48 +++++++++++++++++++ 2 files changed, 83 insertions(+) create mode 100644 tests/integration/test_concurrent_backups_s3/configs/storage_conf.xml create mode 100644 tests/integration/test_concurrent_backups_s3/test.py diff --git a/tests/integration/test_concurrent_backups_s3/configs/storage_conf.xml b/tests/integration/test_concurrent_backups_s3/configs/storage_conf.xml new file mode 100644 index 00000000000..ef55f3a62d7 --- /dev/null +++ b/tests/integration/test_concurrent_backups_s3/configs/storage_conf.xml @@ -0,0 +1,35 @@ + + + + + s3 + http://minio1:9001/root/data/ + minio + minio123 + 33554432 + + + local + / + + + + + +
+ s3 +
+
+
+
+
+ + + 0 + + + hdd + /backups/ + + +
diff --git a/tests/integration/test_concurrent_backups_s3/test.py b/tests/integration/test_concurrent_backups_s3/test.py new file mode 100644 index 00000000000..591715aff17 --- /dev/null +++ b/tests/integration/test_concurrent_backups_s3/test.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 +import pytest +import re +import os.path +from multiprocessing.dummy import Pool +from helpers.cluster import ClickHouseCluster +from helpers.test_tools import assert_eq_with_retry, TSV +import time + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance( + "node", + main_configs=["configs/storage_conf.xml"], + with_minio=True, +) + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + +def test_concurrent_backups(start_cluster): + node.query("DROP TABLE IF EXISTS s3_test NO DELAY") + columns = [f"column_{i} UInt64" for i in range(1000)] + columns_str = ', '.join(columns) + node.query( + f"CREATE TABLE s3_test ({columns_str}) Engine=MergeTree() ORDER BY tuple() SETTINGS storage_policy='s3';" + ) + node.query( + f"INSERT INTO s3_test SELECT * FROM generateRandom('{columns_str}') LIMIT 10000" + ) + + def create_backup(i): + backup_name = f"Disk('hdd', '/backups/{i}')" + node.query(f"BACKUP TABLE s3_test TO {backup_name} ASYNC") + + p = Pool(20) + + p.map(create_backup, range(40)) + + for _ in range(20): + print(node.query("SELECT * FROM system.backups FORMAT Vertical")) + time.sleep(0.1) + + assert node.query("SELECT count() FROM s3_test where not ignore(*)") == "10000\n"