Cleaup on every iteration.

This commit is contained in:
Nikolai Kochetov 2023-07-31 19:47:23 +00:00
parent 584e23da0c
commit 453463ba22

View File

@ -125,9 +125,6 @@ void localBackup(
size_t try_no = 0;
const size_t max_tries = 10;
CleanupOnFail cleanup(disk_transaction ? std::function<void()>([]{}) :
[disk, destination_path]() { disk->removeRecursive(destination_path); });
/** Files in the directory can be permanently added and deleted.
* If some file is deleted during an attempt to make a backup, then try again,
* because it's important to take into account any new files that might appear.
@ -136,10 +133,30 @@ void localBackup(
{
try
{
if (copy_instead_of_hardlinks && !disk_transaction)
disk->copyDirectoryContent(source_path, disk, destination_path);
else
if (disk_transaction)
{
localBackupImpl(disk, disk_transaction.get(), source_path, destination_path, make_source_readonly, 0, max_level, copy_instead_of_hardlinks, files_to_copy_intead_of_hardlinks);
}
else if (copy_instead_of_hardlinks)
{
CleanupOnFail cleanup([disk, destination_path]() { disk->removeRecursive(destination_path); });
disk->copyDirectoryContent(source_path, disk, destination_path);
cleanup.success();
}
else
{
std::function<void()> cleaner;
if (disk->supportZeroCopyReplication())
/// Note: this code will create garbage on s3. We should always remove `copy_instead_of_hardlinks` files.
/// The third agrument should be a list of excpetions, but (looks like) it is ignored for keep_all_shared_data = true.
cleaner = [disk, destination_path]() { disk->removeSharedRecursive(destination_path, /*keep_all_shared_data*/ true, {}); };
else
cleaner = [disk, destination_path]() { disk->removeRecursive(destination_path); };
CleanupOnFail cleanup(std::move(cleaner));
localBackupImpl(disk, disk_transaction.get(), source_path, destination_path, make_source_readonly, 0, max_level, false, files_to_copy_intead_of_hardlinks);
cleanup.success();
}
}
catch (const DB::ErrnoException & e)
{
@ -166,8 +183,6 @@ void localBackup(
break;
}
cleanup.success();
}
}