diff --git a/tests/integration/test_backup_restore_new/test.py b/tests/integration/test_backup_restore_new/test.py index 56166046253..51daf6d37e8 100644 --- a/tests/integration/test_backup_restore_new/test.py +++ b/tests/integration/test_backup_restore_new/test.py @@ -45,9 +45,9 @@ def new_backup_name(): return f"Disk('backups', '{backup_id_counter}/')" -def get_backup_dir(backup_name): - counter = int(backup_name.split(",")[1].strip("')/ ")) - return os.path.join(instance.path, f"backups/{counter}") +def get_path_to_backup(backup_name): + name = backup_name.split(",")[1].strip("')/ ") + return os.path.join(instance.cluster.instances_dir, "backups", name) @pytest.mark.parametrize( @@ -158,14 +158,18 @@ def test_incremental_backup_after_renaming_table(): # Files in a base backup can be searched by checksum, so an incremental backup with a renamed table actually # contains only its changed metadata. - assert os.path.isdir(os.path.join(get_backup_dir(backup_name), "metadata")) == True - assert os.path.isdir(os.path.join(get_backup_dir(backup_name), "data")) == True assert ( - os.path.isdir(os.path.join(get_backup_dir(incremental_backup_name), "metadata")) + os.path.isdir(os.path.join(get_path_to_backup(backup_name), "metadata")) == True + ) + assert os.path.isdir(os.path.join(get_path_to_backup(backup_name), "data")) == True + assert ( + os.path.isdir( + os.path.join(get_path_to_backup(incremental_backup_name), "metadata") + ) == True ) assert ( - os.path.isdir(os.path.join(get_backup_dir(incremental_backup_name), "data")) + os.path.isdir(os.path.join(get_path_to_backup(incremental_backup_name), "data")) == False ) @@ -222,14 +226,12 @@ def test_database(): def test_zip_archive(): - backup_name = f"File('/backups/archive.zip')" + backup_name = f"Disk('backups', 'archive.zip')" create_and_fill_table() assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" instance.query(f"BACKUP TABLE test.table TO {backup_name}") - assert os.path.isfile( - os.path.join(os.path.join(instance.path, "backups/archive.zip")) - ) + assert os.path.isfile(get_path_to_backup(backup_name)) instance.query("DROP TABLE test.table") assert instance.query("EXISTS test.table") == "0\n" @@ -239,7 +241,7 @@ def test_zip_archive(): def test_zip_archive_with_settings(): - backup_name = f"File('/backups/archive_with_settings.zip')" + backup_name = f"Disk('backups', 'archive_with_settings.zip')" create_and_fill_table() assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" diff --git a/tests/integration/test_backup_restore_on_cluster/test.py b/tests/integration/test_backup_restore_on_cluster/test.py index 7ddbe035558..36ec02bb770 100644 --- a/tests/integration/test_backup_restore_on_cluster/test.py +++ b/tests/integration/test_backup_restore_on_cluster/test.py @@ -50,15 +50,12 @@ backup_id_counter = 0 def new_backup_name(): global backup_id_counter backup_id_counter += 1 - return f"Disk('backups', '{backup_id_counter}.zip')" + return f"Disk('backups', '{backup_id_counter}')" -def get_path_to_backup(instance, backup_name): - return os.path.join( - instance.path, - "backups", - backup_name.removeprefix("Disk('backups', '").removesuffix("')"), - ) +def get_path_to_backup(backup_name): + name = backup_name.split(",")[1].strip("')/ ") + return os.path.join(instance.cluster.instances_dir, "backups", name) def test_replicated_table(): @@ -78,7 +75,7 @@ def test_replicated_table(): # Make backup on node 1. node1.query( - f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name} SETTINGS replica=1" + f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name} SETTINGS replica_num=1" ) # Drop table on both nodes. @@ -114,7 +111,7 @@ def test_replicated_database(): # Make backup. backup_name = new_backup_name() node1.query( - f"BACKUP DATABASE mydb ON CLUSTER 'cluster' TO {backup_name} SETTINGS replica=2" + f"BACKUP DATABASE mydb ON CLUSTER 'cluster' TO {backup_name} SETTINGS replica_num=2" ) # Drop table on both nodes.