mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-25 17:12:03 +00:00
Merge pull request #8118 from excitoon-favorites/scan_all_disks
Check extra parts of `MergeTree` at different disks, in order to not allow to miss data parts at undefined disks
This commit is contained in:
commit
1adc3cfb88
@ -51,11 +51,12 @@
|
|||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <iomanip>
|
#include <iomanip>
|
||||||
|
#include <optional>
|
||||||
#include <set>
|
#include <set>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <typeinfo>
|
#include <typeinfo>
|
||||||
#include <typeindex>
|
#include <typeindex>
|
||||||
#include <optional>
|
#include <unordered_set>
|
||||||
|
|
||||||
|
|
||||||
namespace ProfileEvents
|
namespace ProfileEvents
|
||||||
@ -791,6 +792,27 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
|
|||||||
|
|
||||||
auto disks = storage_policy->getDisks();
|
auto disks = storage_policy->getDisks();
|
||||||
|
|
||||||
|
if (getStoragePolicy()->getName() != "default")
|
||||||
|
{
|
||||||
|
/// Check extra parts at different disks, in order to not allow to miss data parts at undefined disks.
|
||||||
|
std::unordered_set<String> defined_disk_names;
|
||||||
|
for (const auto & disk_ptr : disks)
|
||||||
|
defined_disk_names.insert(disk_ptr->getName());
|
||||||
|
|
||||||
|
for (auto & [disk_name, disk_ptr] : global_context.getDiskSelector().getDisksMap())
|
||||||
|
{
|
||||||
|
if (defined_disk_names.count(disk_name) == 0 && Poco::File(getFullPathOnDisk(disk_ptr)).exists())
|
||||||
|
{
|
||||||
|
for (Poco::DirectoryIterator it(getFullPathOnDisk(disk_ptr)); it != end; ++it)
|
||||||
|
{
|
||||||
|
MergeTreePartInfo part_info;
|
||||||
|
if (MergeTreePartInfo::tryParsePartName(it.name(), &part_info, format_version))
|
||||||
|
throw Exception("Part " + backQuote(it.name()) + " was found on disk " + backQuote(disk_name) + " which is not defined in the storage policy", ErrorCodes::UNKNOWN_DISK);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Reversed order to load part from low priority disks firstly.
|
/// Reversed order to load part from low priority disks firstly.
|
||||||
/// Used for keep part on low priority disk if duplication found
|
/// Used for keep part on low priority disk if duplication found
|
||||||
for (auto disk_it = disks.rbegin(); disk_it != disks.rend(); ++disk_it)
|
for (auto disk_it = disks.rbegin(); disk_it != disks.rend(); ++disk_it)
|
||||||
|
@ -1063,6 +1063,7 @@ def test_freeze(start_cluster):
|
|||||||
|
|
||||||
finally:
|
finally:
|
||||||
node1.query("DROP TABLE IF EXISTS default.freezing_table")
|
node1.query("DROP TABLE IF EXISTS default.freezing_table")
|
||||||
|
node1.exec_in_container(["rm", "-rf", "/jbod1/shadow", "/external/shadow"])
|
||||||
|
|
||||||
|
|
||||||
def test_kill_while_insert(start_cluster):
|
def test_kill_while_insert(start_cluster):
|
||||||
|
Loading…
Reference in New Issue
Block a user