ClickHouse/tests/integration/test_backup_restore_keeper_map/test.py

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

137 lines
4.0 KiB
Python
Raw Normal View History

2023-11-09 15:56:57 +00:00
from time import sleep
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
main_configs = [
"configs/remote_servers.xml",
"configs/backups_disk.xml",
"configs/keeper_map_path_prefix.xml",
]
user_configs = [
"configs/zookeeper_retries.xml",
]
node1 = cluster.add_instance(
"node1",
main_configs=main_configs,
user_configs=user_configs,
external_dirs=["/backups/"],
macros={"replica": "node1", "shard": "shard1"},
with_zookeeper=True,
stay_alive=True,
)
node2 = cluster.add_instance(
"node2",
main_configs=main_configs,
user_configs=user_configs,
external_dirs=["/backups/"],
macros={"replica": "node2", "shard": "shard1"},
with_zookeeper=True,
stay_alive=True,
)
node3 = cluster.add_instance(
"node3",
main_configs=main_configs,
user_configs=user_configs,
external_dirs=["/backups/"],
macros={"replica": "node3", "shard": "shard2"},
with_zookeeper=True,
stay_alive=True,
)
@pytest.fixture(scope="module", autouse=True)
def start_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
2023-11-09 16:15:14 +00:00
2023-11-09 15:56:57 +00:00
backup_id_counter = 0
2023-11-09 16:15:14 +00:00
2023-11-09 15:56:57 +00:00
def new_backup_name(base_name):
global backup_id_counter
backup_id_counter += 1
return f"Disk('backups', '{base_name}{backup_id_counter}')"
2023-11-09 16:15:14 +00:00
2023-11-14 14:33:34 +00:00
@pytest.mark.parametrize("deduplicate_files", [0, 1])
def test_on_cluster(deduplicate_files):
database_name = f"keeper_backup{deduplicate_files}"
node1.query_with_retry(f"CREATE DATABASE {database_name} ON CLUSTER cluster")
2023-11-09 16:15:14 +00:00
node1.query_with_retry(
2023-11-14 14:33:34 +00:00
f"CREATE TABLE {database_name}.keeper1 ON CLUSTER cluster (key UInt64, value String) Engine=KeeperMap('/{database_name}/test_on_cluster1') PRIMARY KEY key"
2023-11-09 16:15:14 +00:00
)
node1.query_with_retry(
2023-11-14 14:33:34 +00:00
f"CREATE TABLE {database_name}.keeper2 ON CLUSTER cluster (key UInt64, value String) Engine=KeeperMap('/{database_name}/test_on_cluster1') PRIMARY KEY key"
2023-11-09 16:15:14 +00:00
)
node1.query_with_retry(
2023-11-14 14:33:34 +00:00
f"CREATE TABLE {database_name}.keeper3 ON CLUSTER cluster (key UInt64, value String) Engine=KeeperMap('/{database_name}/test_on_cluster2') PRIMARY KEY key"
2023-11-09 16:15:14 +00:00
)
node1.query_with_retry(
2023-11-14 14:33:34 +00:00
f"INSERT INTO {database_name}.keeper2 SELECT number, 'test' || toString(number) FROM system.numbers LIMIT 5"
2023-11-09 16:15:14 +00:00
)
node1.query_with_retry(
2023-11-14 14:33:34 +00:00
f"INSERT INTO {database_name}.keeper3 SELECT number, 'test' || toString(number) FROM system.numbers LIMIT 5"
2023-11-09 16:15:14 +00:00
)
expected_result = "".join(f"{i}\ttest{i}\n" for i in range(5))
2023-11-09 15:56:57 +00:00
def verify_data():
for node in [node1, node2, node3]:
for i in range(1, 4):
2023-11-09 16:15:14 +00:00
result = node.query_with_retry(
2023-11-14 14:33:34 +00:00
f"SELECT key, value FROM {database_name}.keeper{i} ORDER BY key FORMAT TSV"
2023-11-09 16:15:14 +00:00
)
2023-11-09 15:56:57 +00:00
assert result == expected_result
verify_data()
2023-11-09 16:15:14 +00:00
backup_name = new_backup_name("test_on_cluster")
node1.query(
2023-11-14 14:33:34 +00:00
f"BACKUP DATABASE {database_name} ON CLUSTER cluster TO {backup_name} SETTINGS async = false, deduplicate_files = {deduplicate_files};"
2023-11-09 16:15:14 +00:00
)
2023-11-09 15:56:57 +00:00
2023-11-14 14:33:34 +00:00
node1.query(f"DROP DATABASE {database_name} ON CLUSTER cluster SYNC;")
2023-11-09 15:56:57 +00:00
def apply_for_all_nodes(f):
for node in [node1, node2, node3]:
f(node)
def change_keeper_map_prefix(node):
node.replace_config(
2023-11-09 16:15:14 +00:00
"/etc/clickhouse-server/config.d/keeper_map_path_prefix.xml",
"""
2023-11-09 15:56:57 +00:00
<clickhouse>
<keeper_map_path_prefix>/different_path/keeper_map</keeper_map_path_prefix>
</clickhouse>
2023-11-09 16:15:14 +00:00
""",
)
2023-11-09 15:56:57 +00:00
apply_for_all_nodes(lambda node: node.stop_clickhouse())
apply_for_all_nodes(change_keeper_map_prefix)
apply_for_all_nodes(lambda node: node.start_clickhouse())
2023-11-09 16:15:14 +00:00
node1.query(
2023-11-14 14:33:34 +00:00
f"RESTORE DATABASE {database_name} ON CLUSTER cluster FROM {backup_name} SETTINGS async = false;"
2023-11-09 16:15:14 +00:00
)
2023-11-09 15:56:57 +00:00
verify_data()
2023-11-09 16:15:14 +00:00
2023-11-14 14:33:34 +00:00
node1.query(f"DROP TABLE {database_name}.keeper3 ON CLUSTER cluster SYNC;")
2023-11-09 16:15:14 +00:00
node1.query(
2023-11-14 14:33:34 +00:00
f"RESTORE TABLE {database_name}.keeper3 ON CLUSTER cluster FROM {backup_name} SETTINGS async = false;"
2023-11-09 16:15:14 +00:00
)
2023-11-09 15:56:57 +00:00
2023-11-09 16:15:14 +00:00
verify_data()