diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.cpp b/dbms/src/Storages/MergeTree/MergeTreeData.cpp index fa8cd018881..6bcbfd8d3a2 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeData.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeData.cpp @@ -51,11 +51,12 @@ #include #include +#include #include #include #include #include -#include +#include namespace ProfileEvents @@ -791,6 +792,27 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks) auto disks = storage_policy->getDisks(); + if (getStoragePolicy()->getName() != "default") + { + /// Check extra parts at different disks, in order to not allow to miss data parts at undefined disks. + std::unordered_set defined_disk_names; + for (const auto & disk_ptr : disks) + defined_disk_names.insert(disk_ptr->getName()); + + for (auto & [disk_name, disk_ptr] : global_context.getDiskSelector().getDisksMap()) + { + if (defined_disk_names.count(disk_name) == 0 && Poco::File(getFullPathOnDisk(disk_ptr)).exists()) + { + for (Poco::DirectoryIterator it(getFullPathOnDisk(disk_ptr)); it != end; ++it) + { + MergeTreePartInfo part_info; + if (MergeTreePartInfo::tryParsePartName(it.name(), &part_info, format_version)) + throw Exception("Part " + backQuote(it.name()) + " was found on disk " + backQuote(disk_name) + " which is not defined in the storage policy", ErrorCodes::UNKNOWN_DISK); + } + } + } + } + /// Reversed order to load part from low priority disks firstly. /// Used for keep part on low priority disk if duplication found for (auto disk_it = disks.rbegin(); disk_it != disks.rend(); ++disk_it) diff --git a/dbms/tests/integration/test_multiple_disks/test.py b/dbms/tests/integration/test_multiple_disks/test.py index 91ecf5a8b8d..35d3e9abe74 100644 --- a/dbms/tests/integration/test_multiple_disks/test.py +++ b/dbms/tests/integration/test_multiple_disks/test.py @@ -1063,6 +1063,7 @@ def test_freeze(start_cluster): finally: node1.query("DROP TABLE IF EXISTS default.freezing_table") + node1.exec_in_container(["rm", "-rf", "/jbod1/shadow", "/external/shadow"]) def test_kill_while_insert(start_cluster):