Add type column in system.disks

This commit is contained in:
Anton Ivashkin 2020-07-03 15:45:01 +03:00
parent 8513e1ec74
commit 23b44ca6fe
9 changed files with 114 additions and 0 deletions

View File

@ -99,6 +99,8 @@ public:
void createHardLink(const String & src_path, const String & dst_path) override;
const String getType() const override { return "local"; }
private:
bool tryReserve(UInt64 bytes);

View File

@ -90,6 +90,8 @@ public:
void createHardLink(const String & src_path, const String & dst_path) override;
const String getType() const override { return "memory"; }
private:
void createDirectoriesImpl(const String & path);
void replaceFileImpl(const String & from_path, const String & to_path);

View File

@ -171,6 +171,9 @@ public:
/// Create hardlink from `src_path` to `dst_path`.
virtual void createHardLink(const String & src_path, const String & dst_path) = 0;
/// Return disk type - "local", "s3", etc.
virtual const String getType() const = 0;
};
using DiskPtr = std::shared_ptr<IDisk>;

View File

@ -96,6 +96,8 @@ public:
void setReadOnly(const String & path) override;
const String getType() const override { return "s3"; }
private:
bool tryReserve(UInt64 bytes);

View File

@ -22,6 +22,7 @@ StorageSystemDisks::StorageSystemDisks(const std::string & name_)
{"free_space", std::make_shared<DataTypeUInt64>()},
{"total_space", std::make_shared<DataTypeUInt64>()},
{"keep_free_space", std::make_shared<DataTypeUInt64>()},
{"type", std::make_shared<DataTypeString>()},
}));
setInMemoryMetadata(storage_metadata);
}
@ -42,6 +43,7 @@ Pipes StorageSystemDisks::read(
MutableColumnPtr col_free = ColumnUInt64::create();
MutableColumnPtr col_total = ColumnUInt64::create();
MutableColumnPtr col_keep = ColumnUInt64::create();
MutableColumnPtr col_type = ColumnString::create();
for (const auto & [disk_name, disk_ptr] : context.getDisksMap())
{
@ -50,6 +52,7 @@ Pipes StorageSystemDisks::read(
col_free->insert(disk_ptr->getAvailableSpace());
col_total->insert(disk_ptr->getTotalSpace());
col_keep->insert(disk_ptr->getKeepingFreeSpace());
col_type->insert(disk_ptr->getType());
}
Columns res_columns;
@ -58,6 +61,7 @@ Pipes StorageSystemDisks::read(
res_columns.emplace_back(std::move(col_free));
res_columns.emplace_back(std::move(col_total));
res_columns.emplace_back(std::move(col_keep));
res_columns.emplace_back(std::move(col_type));
UInt64 num_rows = res_columns.at(0)->size();
Chunk chunk(std::move(res_columns), num_rows);

View File

@ -0,0 +1,42 @@
<?xml version="1.0"?>
<yandex>
<logger>
<level>trace</level>
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
<size>1000M</size>
<count>10</count>
</logger>
<storage_configuration>
<disks>
<disk_s3>
<type>s3</type>
<endpoint>http://minio1:9001/root/data/</endpoint>
<access_key_id>minio</access_key_id>
<secret_access_key>minio123</secret_access_key>
</disk_s3>
<disk_memory>
<type>memory</type>
</disk_memory>
</disks>
</storage_configuration>
<tcp_port>9000</tcp_port>
<listen_host>127.0.0.1</listen_host>
<openSSL>
<client>
<cacheSessions>true</cacheSessions>
<verificationMode>none</verificationMode>
<invalidCertificateHandler>
<name>AcceptCertificateHandler</name>
</invalidCertificateHandler>
</client>
</openSSL>
<max_concurrent_queries>500</max_concurrent_queries>
<mark_cache_size>5368709120</mark_cache_size>
<path>./clickhouse/</path>
<users_config>users.xml</users_config>
</yandex>

View File

@ -0,0 +1,23 @@
<?xml version="1.0"?>
<yandex>
<profiles>
<default>
</default>
</profiles>
<users>
<default>
<password></password>
<networks incl="networks" replace="replace">
<ip>::/0</ip>
</networks>
<profile>default</profile>
<quota>default</quota>
</default>
</users>
<quotas>
<default>
</default>
</quotas>
</yandex>

View File

@ -0,0 +1,36 @@
import pytest
from helpers.cluster import ClickHouseCluster
disk_types = {
"default" : "local",
"disk_s3" : "s3",
"disk_memory" : "memory",
}
@pytest.fixture(scope="module")
def cluster():
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance("node", config_dir="configs", with_minio=True)
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_different_types(cluster):
node = cluster.instances["node"]
responce = node.query("SELECT * FROM system.disks")
disks = responce.split("\n")
for disk in disks:
if disk == '': # skip empty line (after split at last position)
continue
fields = disk.split("\t")
assert len(fields) >= 6
assert disk_types.get(fields[0], "UNKNOWN") == fields[5]
def test_select_by_type(cluster):
node = cluster.instances["node"]
for name, disk_type in disk_types.items():
assert node.query("SELECT name FROM system.disks WHERE type='" + disk_type + "'") == name + "\n"