mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-17 20:02:05 +00:00
Merge pull request #9410 from excitoon-favorites/attachpart
Fixed `DeleteOnDestroy` logic in `ATTACH PART` and added few tests
This commit is contained in:
commit
e10cf89896
@ -31,6 +31,9 @@ namespace ErrorCodes
|
||||
}
|
||||
|
||||
|
||||
extern const char * DELETE_ON_DESTROY_MARKER_PATH;
|
||||
|
||||
|
||||
static std::unique_ptr<ReadBufferFromFileBase> openForReading(const DiskPtr & disk, const String & path)
|
||||
{
|
||||
return disk->readFile(path, std::min(size_t(DBMS_DEFAULT_BUFFER_SIZE), disk->getFileSize(path)));
|
||||
@ -744,6 +747,7 @@ void IMergeTreeDataPart::remove() const
|
||||
|
||||
for (const auto & file : {"checksums.txt", "columns.txt"})
|
||||
disk->remove(to + "/" + file);
|
||||
disk->removeIfExists(to + "/" + DELETE_ON_DESTROY_MARKER_PATH);
|
||||
|
||||
disk->remove(to);
|
||||
}
|
||||
@ -795,8 +799,11 @@ void IMergeTreeDataPart::makeCloneInDetached(const String & prefix) const
|
||||
assertOnDisk();
|
||||
LOG_INFO(storage.log, "Detaching " << relative_path);
|
||||
|
||||
String destination_path = storage.relative_data_path + getRelativePathForDetachedPart(prefix);
|
||||
|
||||
/// Backup is not recursive (max_level is 0), so do not copy inner directories
|
||||
localBackup(disk, getFullRelativePath(), storage.relative_data_path + getRelativePathForDetachedPart(prefix), 0);
|
||||
localBackup(disk, getFullRelativePath(), destination_path, 0);
|
||||
disk->removeIfExists(destination_path + "/" + DELETE_ON_DESTROY_MARKER_PATH);
|
||||
}
|
||||
|
||||
void IMergeTreeDataPart::makeCloneOnDiskDetached(const ReservationPtr & reservation) const
|
||||
@ -813,6 +820,7 @@ void IMergeTreeDataPart::makeCloneOnDiskDetached(const ReservationPtr & reservat
|
||||
reserved_disk->createDirectory(path_to_clone);
|
||||
|
||||
disk->copy(getFullRelativePath(), reserved_disk, path_to_clone);
|
||||
disk->removeIfExists(path_to_clone + "/" + DELETE_ON_DESTROY_MARKER_PATH);
|
||||
}
|
||||
|
||||
void IMergeTreeDataPart::checkConsistencyBase() const
|
||||
|
@ -116,10 +116,7 @@ namespace ErrorCodes
|
||||
}
|
||||
|
||||
|
||||
namespace
|
||||
{
|
||||
const char * DELETE_ON_DESTROY_MARKER_PATH = "delete-on-destroy.txt";
|
||||
}
|
||||
const char * DELETE_ON_DESTROY_MARKER_PATH = "delete-on-destroy.txt";
|
||||
|
||||
|
||||
MergeTreeData::MergeTreeData(
|
||||
@ -3185,7 +3182,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeData::cloneAndLoadDataPartOnSameDisk(
|
||||
}
|
||||
if (!does_storage_policy_allow_same_disk)
|
||||
throw Exception(
|
||||
"Could not clone and load part " + quoteString(src_part->getFullPath()) + " because disk does not belong to storage policy", ErrorCodes::LOGICAL_ERROR);
|
||||
"Could not clone and load part " + quoteString(src_part->getFullPath()) + " because disk does not belong to storage policy", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
String dst_part_name = src_part->getNewName(dst_part_info);
|
||||
String tmp_dst_part_name = tmp_part_prefix + dst_part_name;
|
||||
@ -3200,6 +3197,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeData::cloneAndLoadDataPartOnSameDisk(
|
||||
|
||||
LOG_DEBUG(log, "Cloning part " << fullPath(disk, src_part_path) << " to " << fullPath(disk, dst_part_path));
|
||||
localBackup(disk, src_part_path, dst_part_path);
|
||||
disk->removeIfExists(dst_part_path + "/" + DELETE_ON_DESTROY_MARKER_PATH);
|
||||
|
||||
auto dst_data_part = createPart(dst_part_name, dst_part_info, reservation->getDisk(), tmp_dst_part_name);
|
||||
|
||||
@ -3285,6 +3283,8 @@ void MergeTreeData::freezePartitionsByMatcher(MatcherFn matcher, const String &
|
||||
|
||||
String backup_part_path = backup_path + relative_data_path + part->relative_path;
|
||||
localBackup(part->disk, part->getFullRelativePath(), backup_part_path);
|
||||
part->disk->removeIfExists(backup_part_path + "/" + DELETE_ON_DESTROY_MARKER_PATH);
|
||||
|
||||
part->is_frozen.store(true, std::memory_order_relaxed);
|
||||
++parts_processed;
|
||||
}
|
||||
|
@ -1229,3 +1229,42 @@ def test_move_while_merge(start_cluster):
|
||||
|
||||
finally:
|
||||
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
|
||||
|
||||
|
||||
def test_move_across_policies_does_not_work(start_cluster):
|
||||
try:
|
||||
name = "test_move_across_policies_does_not_work"
|
||||
|
||||
node1.query("""
|
||||
CREATE TABLE {name} (
|
||||
n Int64
|
||||
) ENGINE = MergeTree
|
||||
ORDER BY tuple()
|
||||
SETTINGS storage_policy='jbods_with_external'
|
||||
""".format(name=name))
|
||||
|
||||
node1.query("""
|
||||
CREATE TABLE {name}2 (
|
||||
n Int64
|
||||
) ENGINE = MergeTree
|
||||
ORDER BY tuple()
|
||||
SETTINGS storage_policy='small_jbod_with_external'
|
||||
""".format(name=name))
|
||||
|
||||
node1.query("""INSERT INTO {name} VALUES (1)""".format(name=name))
|
||||
node1.query("""ALTER TABLE {name} MOVE PARTITION tuple() TO DISK 'jbod2'""".format(name=name))
|
||||
|
||||
with pytest.raises(QueryRuntimeException, match='.*because disk does not belong to storage policy.*'):
|
||||
node1.query("""ALTER TABLE {name}2 ATTACH PARTITION tuple() FROM {name}""".format(name=name))
|
||||
|
||||
with pytest.raises(QueryRuntimeException, match='.*because disk does not belong to storage policy.*'):
|
||||
node1.query("""ALTER TABLE {name}2 REPLACE PARTITION tuple() FROM {name}""".format(name=name))
|
||||
|
||||
with pytest.raises(QueryRuntimeException, match='.*should have the same storage policy of source table.*'):
|
||||
node1.query("""ALTER TABLE {name} MOVE PARTITION tuple() TO TABLE {name}2""".format(name=name))
|
||||
|
||||
assert node1.query("""SELECT * FROM {name}""".format(name=name)).splitlines() == ["1"]
|
||||
|
||||
finally:
|
||||
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
|
||||
node1.query("DROP TABLE IF EXISTS {name}2".format(name=name))
|
||||
|
Loading…
Reference in New Issue
Block a user