mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 09:32:06 +00:00
Merge pull request #7833 from excitoon-favorites/detachedparts
Added disk info to `system.detached_parts`
This commit is contained in:
commit
47e499d71b
@ -2933,7 +2933,7 @@ MergeTreeData::getDetachedParts() const
|
||||
{
|
||||
std::vector<DetachedPartInfo> res;
|
||||
|
||||
for (const String & path : getDataPaths())
|
||||
for (const auto & [path, disk] : getDataPathsWithDisks())
|
||||
{
|
||||
for (Poco::DirectoryIterator it(path + "detached");
|
||||
it != Poco::DirectoryIterator(); ++it)
|
||||
@ -2944,6 +2944,7 @@ MergeTreeData::getDetachedParts() const
|
||||
auto & part = res.back();
|
||||
|
||||
DetachedPartInfo::tryParseDetachedPartName(dir_name, part, format_version);
|
||||
part.disk = disk->getName();
|
||||
}
|
||||
}
|
||||
return res;
|
||||
@ -3332,6 +3333,15 @@ Strings MergeTreeData::getDataPaths() const
|
||||
return res;
|
||||
}
|
||||
|
||||
MergeTreeData::PathsWithDisks MergeTreeData::getDataPathsWithDisks() const
|
||||
{
|
||||
PathsWithDisks res;
|
||||
auto disks = storage_policy->getDisks();
|
||||
for (const auto & disk : disks)
|
||||
res.emplace_back(getFullPathOnDisk(disk), disk);
|
||||
return res;
|
||||
}
|
||||
|
||||
void MergeTreeData::freezePartitionsByMatcher(MatcherFn matcher, const String & with_name, const Context & context)
|
||||
{
|
||||
String clickhouse_path = Poco::Path(context.getPath()).makeAbsolute().toString();
|
||||
|
@ -669,6 +669,10 @@ public:
|
||||
|
||||
Strings getDataPaths() const override;
|
||||
|
||||
using PathWithDisk = std::pair<String, DiskSpace::DiskPtr>;
|
||||
using PathsWithDisks = std::vector<PathWithDisk>;
|
||||
PathsWithDisks getDataPathsWithDisks() const;
|
||||
|
||||
/// Reserves space at least 1MB
|
||||
DiskSpace::ReservationPtr reserveSpace(UInt64 expected_size);
|
||||
|
||||
|
@ -95,6 +95,8 @@ struct DetachedPartInfo : public MergeTreePartInfo
|
||||
String dir_name;
|
||||
String prefix;
|
||||
|
||||
String disk;
|
||||
|
||||
/// If false, MergeTreePartInfo is in invalid state (directory name was not successfully parsed).
|
||||
bool valid_name;
|
||||
|
||||
|
@ -35,6 +35,7 @@ protected:
|
||||
{"table", std::make_shared<DataTypeString>()},
|
||||
{"partition_id", std::make_shared<DataTypeNullable>(std::make_shared<DataTypeString>())},
|
||||
{"name", std::make_shared<DataTypeString>()},
|
||||
{"disk", std::make_shared<DataTypeString>()},
|
||||
{"reason", std::make_shared<DataTypeNullable>(std::make_shared<DataTypeString>())},
|
||||
{"min_block_number", std::make_shared<DataTypeNullable>(std::make_shared<DataTypeInt64>())},
|
||||
{"max_block_number", std::make_shared<DataTypeNullable>(std::make_shared<DataTypeInt64>())},
|
||||
@ -66,6 +67,7 @@ protected:
|
||||
new_columns[i++]->insert(info.table);
|
||||
new_columns[i++]->insert(p.valid_name ? p.partition_id : Field());
|
||||
new_columns[i++]->insert(p.dir_name);
|
||||
new_columns[i++]->insert(p.disk);
|
||||
new_columns[i++]->insert(p.valid_name ? p.prefix : Field());
|
||||
new_columns[i++]->insert(p.valid_name ? p.min_block : Field());
|
||||
new_columns[i++]->insert(p.valid_name ? p.max_block : Field());
|
||||
|
@ -768,12 +768,42 @@ def test_concurrent_alter_move_and_drop(start_cluster, name, engine):
|
||||
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("name,engine", [
|
||||
("detach_attach_mt","MergeTree()"),
|
||||
("replicated_detach_attach_mt","ReplicatedMergeTree('/clickhouse/replicated_detach_attach_mt', '1')",),
|
||||
])
|
||||
def test_detach_attach(start_cluster, name, engine):
|
||||
try:
|
||||
node1.query("""
|
||||
CREATE TABLE {name} (
|
||||
s1 String
|
||||
) ENGINE = {engine}
|
||||
ORDER BY tuple()
|
||||
SETTINGS storage_policy='moving_jbod_with_external'
|
||||
""".format(name=name, engine=engine))
|
||||
|
||||
data = [] # 5MB in total
|
||||
for i in range(5):
|
||||
data.append(get_random_string(1024 * 1024)) # 1MB row
|
||||
node1.query("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data])))
|
||||
|
||||
node1.query("ALTER TABLE {} DETACH PARTITION tuple()".format(name))
|
||||
assert node1.query("SELECT count() FROM {}".format(name)).strip() == "0"
|
||||
|
||||
assert node1.query("SELECT disk FROM system.detached_parts WHERE table = '{}'".format(name)).strip() == "jbod1"
|
||||
|
||||
node1.query("ALTER TABLE {} ATTACH PARTITION tuple()".format(name))
|
||||
assert node1.query("SELECT count() FROM {}".format(name)).strip() == "5"
|
||||
|
||||
finally:
|
||||
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("name,engine", [
|
||||
("mutating_mt","MergeTree()"),
|
||||
("replicated_mutating_mt","ReplicatedMergeTree('/clickhouse/replicated_mutating_mt', '1')",),
|
||||
])
|
||||
def test_mutate_to_another_disk(start_cluster, name, engine):
|
||||
|
||||
try:
|
||||
node1.query("""
|
||||
CREATE TABLE {name} (
|
||||
|
@ -9,7 +9,7 @@ Sum before DETACH PARTITION:
|
||||
Sum after DETACH PARTITION:
|
||||
0
|
||||
system.detached_parts after DETACH PARTITION:
|
||||
default not_partitioned all all_1_2_1 1 2 1
|
||||
default not_partitioned all all_1_2_1 default 1 2 1
|
||||
*** Partitioned by week ***
|
||||
Parts before OPTIMIZE:
|
||||
1999-12-27 19991227_1_1_0
|
||||
|
Loading…
Reference in New Issue
Block a user