mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-25 17:12:03 +00:00
Fix Zero Copy after merge master
This commit is contained in:
parent
39a30b77fe
commit
8ed4a5de62
@ -1227,7 +1227,7 @@ void IMergeTreeDataPart::projectionRemove(const String & parent_to, bool keep_s3
|
||||
"Cannot quickly remove directory {} by removing files; fallback to recursive removal. Reason: checksums.txt is missing",
|
||||
fullPath(disk, to));
|
||||
/// If the part is not completely written, we cannot use fast path by listing files.
|
||||
disk->removeRecursive(to + "/", keep_s3);
|
||||
disk->removeSharedRecursive(to + "/", keep_s3);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1240,17 +1240,17 @@ void IMergeTreeDataPart::projectionRemove(const String & parent_to, bool keep_s3
|
||||
# pragma GCC diagnostic ignored "-Wunused-variable"
|
||||
#endif
|
||||
for (const auto & [file, _] : checksums.files)
|
||||
disk->removeFile(to + "/" + file, keep_s3);
|
||||
disk->removeSharedFile(to + "/" + file, keep_s3);
|
||||
#if !defined(__clang__)
|
||||
# pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
for (const auto & file : {"checksums.txt", "columns.txt"})
|
||||
disk->removeFile(to + "/" + file);
|
||||
disk->removeFileIfExists(to + "/" + DEFAULT_COMPRESSION_CODEC_FILE_NAME, keep_s3);
|
||||
disk->removeFileIfExists(to + "/" + DELETE_ON_DESTROY_MARKER_FILE_NAME, keep_s3);
|
||||
disk->removeSharedFile(to + "/" + file, keep_s3);
|
||||
disk->removeSharedFileIfExists(to + "/" + DEFAULT_COMPRESSION_CODEC_FILE_NAME, keep_s3);
|
||||
disk->removeSharedFileIfExists(to + "/" + DELETE_ON_DESTROY_MARKER_FILE_NAME, keep_s3);
|
||||
|
||||
disk->removeDirectory(to);
|
||||
disk->removeSharedRecursive(to, keep_s3);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -1258,7 +1258,7 @@ void IMergeTreeDataPart::projectionRemove(const String & parent_to, bool keep_s3
|
||||
|
||||
LOG_ERROR(storage.log, "Cannot quickly remove directory {} by removing files; fallback to recursive removal. Reason: {}", fullPath(disk, to), getCurrentExceptionMessage(false));
|
||||
|
||||
disk->removeRecursive(to + "/", keep_s3);
|
||||
disk->removeSharedRecursive(to + "/", keep_s3);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -136,7 +136,7 @@ public:
|
||||
|
||||
void remove(bool keep_s3 = false) const;
|
||||
|
||||
void projectionRemove(const String & parent_to) const;
|
||||
void projectionRemove(const String & parent_to, bool keep_s3 = false) const;
|
||||
|
||||
/// Initialize columns (from columns.txt if exists, or create from column files if not).
|
||||
/// Load checksums from checksums.txt if exists. Load index if required.
|
||||
|
@ -27,7 +27,7 @@ def cluster():
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def get_large_objects_count(cluster, folder='data', size=100):
|
||||
def get_large_objects_count(cluster, size=100, folder='data'):
|
||||
minio = cluster.minio_client
|
||||
counter = 0
|
||||
for obj in minio.list_objects(cluster.minio_bucket, '{}/'.format(folder)):
|
||||
@ -38,11 +38,11 @@ def get_large_objects_count(cluster, folder='data', size=100):
|
||||
|
||||
def wait_for_large_objects_count(cluster, expected, size=100, timeout=30):
|
||||
while timeout > 0:
|
||||
if get_large_objects_count(cluster, size) == expected:
|
||||
if get_large_objects_count(cluster, size=size) == expected:
|
||||
return
|
||||
timeout -= 1
|
||||
time.sleep(1)
|
||||
assert get_large_objects_count(cluster, size) == expected
|
||||
assert get_large_objects_count(cluster, size=size) == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
Loading…
Reference in New Issue
Block a user