mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 15:42:02 +00:00
add tests for merge_workload and mutation_workload settings
This commit is contained in:
parent
ba3343f567
commit
77d2e6d3d8
@ -81,7 +81,10 @@ void StorageSystemServerSettings::fillData(MutableColumns & res_columns, Context
|
||||
{"uncompressed_cache_size", {std::to_string(context->getUncompressedCache()->maxSizeInBytes()), ChangeableWithoutRestart::Yes}},
|
||||
{"index_mark_cache_size", {std::to_string(context->getIndexMarkCache()->maxSizeInBytes()), ChangeableWithoutRestart::Yes}},
|
||||
{"index_uncompressed_cache_size", {std::to_string(context->getIndexUncompressedCache()->maxSizeInBytes()), ChangeableWithoutRestart::Yes}},
|
||||
{"mmap_cache_size", {std::to_string(context->getMMappedFileCache()->maxSizeInBytes()), ChangeableWithoutRestart::Yes}}
|
||||
{"mmap_cache_size", {std::to_string(context->getMMappedFileCache()->maxSizeInBytes()), ChangeableWithoutRestart::Yes}},
|
||||
|
||||
{"merge_workload", {context->getMergeWorkload(), ChangeableWithoutRestart::Yes}},
|
||||
{"mutation_workload", {context->getMutationWorkload(), ChangeableWithoutRestart::Yes}}
|
||||
};
|
||||
|
||||
if (context->areBackgroundExecutorsInitialized())
|
||||
|
3
tests/integration/test_scheduler/configs/resources.xml
Normal file
3
tests/integration/test_scheduler/configs/resources.xml
Normal file
@ -0,0 +1,3 @@
|
||||
<clickhouse>
|
||||
<!-- Will be overwritten by the test -->
|
||||
</clickhouse>
|
@ -0,0 +1,76 @@
|
||||
<clickhouse>
|
||||
<resources>
|
||||
<network_read>
|
||||
<node path="/"> <type>inflight_limit</type><max_cost>1000000</max_cost></node>
|
||||
<node path="/prio"> <type>priority</type></node>
|
||||
<node path="/prio/admin"> <type>fifo</type><priority>0</priority></node>
|
||||
<node path="/prio/fair"> <type>fair</type><priority>1</priority></node>
|
||||
<node path="/prio/fair/prod"> <type>fifo</type><weight>9</weight></node>
|
||||
<node path="/prio/fair/dev"> <type>fifo</type><weight>1</weight></node>
|
||||
<node path="/prio/fair/sys"> <type>fair</type><weight>90</weight></node>
|
||||
<node path="/prio/fair/sys/merges"> <type>fifo</type></node>
|
||||
<node path="/prio/fair/sys/mutations"> <type>fifo</type></node>
|
||||
<node path="/prio/fair/prod_merges"> <type>fifo</type><weight>9</weight></node>
|
||||
<node path="/prio/fair/prod_mutations"> <type>fifo</type><weight>9</weight></node>
|
||||
<node path="/prio/fair/dev_merges"> <type>fifo</type><weight>9</weight></node>
|
||||
<node path="/prio/fair/dev_mutations"> <type>fifo</type><weight>9</weight></node>
|
||||
</network_read>
|
||||
<network_write>
|
||||
<node path="/"> <type>inflight_limit</type><max_cost>1000000</max_cost></node>
|
||||
<node path="/prio"> <type>priority</type></node>
|
||||
<node path="/prio/admin"> <type>fifo</type><priority>0</priority></node>
|
||||
<node path="/prio/fair"> <type>fair</type><priority>1</priority></node>
|
||||
<node path="/prio/fair/prod"> <type>fifo</type><weight>9</weight></node>
|
||||
<node path="/prio/fair/dev"> <type>fifo</type><weight>1</weight></node>
|
||||
<node path="/prio/fair/sys"> <type>fair</type><weight>90</weight></node>
|
||||
<node path="/prio/fair/sys/merges"> <type>fifo</type></node>
|
||||
<node path="/prio/fair/sys/mutations"> <type>fifo</type></node>
|
||||
<node path="/prio/fair/prod_merges"> <type>fifo</type><weight>9</weight></node>
|
||||
<node path="/prio/fair/prod_mutations"> <type>fifo</type><weight>9</weight></node>
|
||||
<node path="/prio/fair/dev_merges"> <type>fifo</type><weight>9</weight></node>
|
||||
<node path="/prio/fair/dev_mutations"> <type>fifo</type><weight>9</weight></node>
|
||||
</network_write>
|
||||
</resources>
|
||||
<workload_classifiers>
|
||||
<admin>
|
||||
<network_read>/prio/admin</network_read>
|
||||
<network_write>/prio/admin</network_write>
|
||||
</admin>
|
||||
<production>
|
||||
<network_read>/prio/fair/prod</network_read>
|
||||
<network_write>/prio/fair/prod</network_write>
|
||||
</production>
|
||||
<development>
|
||||
<network_read>/prio/fair/dev</network_read>
|
||||
<network_write>/prio/fair/dev</network_write>
|
||||
</development>
|
||||
<default>
|
||||
<network_read>/prio/fair/dev</network_read>
|
||||
<network_write>/prio/fair/dev</network_write>
|
||||
</default>
|
||||
<sys_merges>
|
||||
<network_read>/prio/fair/sys/merges</network_read>
|
||||
<network_write>/prio/fair/sys/merges</network_write>
|
||||
</sys_merges>
|
||||
<sys_mutations>
|
||||
<network_read>/prio/fair/sys/mutations</network_read>
|
||||
<network_write>/prio/fair/sys/mutations</network_write>
|
||||
</sys_mutations>
|
||||
<prod_merges>
|
||||
<network_read>/prio/fair/prod_merges</network_read>
|
||||
<network_write>/prio/fair/prod_merges</network_write>
|
||||
</prod_merges>
|
||||
<prod_mutations>
|
||||
<network_read>/prio/fair/prod_mutations</network_read>
|
||||
<network_write>/prio/fair/prod_mutations</network_write>
|
||||
</prod_mutations>
|
||||
<dev_merges>
|
||||
<network_read>/prio/fair/dev_merges</network_read>
|
||||
<network_write>/prio/fair/dev_merges</network_write>
|
||||
</dev_merges>
|
||||
<dev_mutations>
|
||||
<network_read>/prio/fair/dev_mutations</network_read>
|
||||
<network_write>/prio/fair/dev_mutations</network_write>
|
||||
</dev_mutations>
|
||||
</workload_classifiers>
|
||||
</clickhouse>
|
@ -1,62 +0,0 @@
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>http://minio1:9001/root/data/</endpoint>
|
||||
<access_key_id>minio</access_key_id>
|
||||
<secret_access_key>minio123</secret_access_key>
|
||||
<s3_max_single_part_upload_size>33554432</s3_max_single_part_upload_size>
|
||||
<s3_max_put_rps>10</s3_max_put_rps>
|
||||
<s3_max_get_rps>10</s3_max_get_rps>
|
||||
<read_resource>network_read</read_resource>
|
||||
<write_resource>network_write</write_resource>
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
<s3>
|
||||
<volumes>
|
||||
<main>
|
||||
<disk>s3</disk>
|
||||
</main>
|
||||
</volumes>
|
||||
</s3>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
<resources>
|
||||
<network_read>
|
||||
<node path="/"> <type>inflight_limit</type><max_cost>1000000</max_cost></node>
|
||||
<node path="/prio"> <type>priority</type></node>
|
||||
<node path="/prio/admin"> <type>fifo</type><priority>0</priority></node>
|
||||
<node path="/prio/fair"> <type>fair</type><priority>1</priority></node>
|
||||
<node path="/prio/fair/prod"><type>fifo</type><weight>9</weight></node>
|
||||
<node path="/prio/fair/dev"> <type>fifo</type><weight>1</weight></node>
|
||||
</network_read>
|
||||
<network_write>
|
||||
<node path="/"> <type>inflight_limit</type><max_cost>1000000</max_cost></node>
|
||||
<node path="/prio"> <type>priority</type></node>
|
||||
<node path="/prio/admin"> <type>fifo</type><priority>0</priority></node>
|
||||
<node path="/prio/fair"> <type>fair</type><priority>1</priority></node>
|
||||
<node path="/prio/fair/prod"><type>fifo</type><weight>9</weight></node>
|
||||
<node path="/prio/fair/dev"> <type>fifo</type><weight>1</weight></node>
|
||||
</network_write>
|
||||
</resources>
|
||||
<workload_classifiers>
|
||||
<admin>
|
||||
<network_read>/prio/admin</network_read>
|
||||
<network_write>/prio/admin</network_write>
|
||||
</admin>
|
||||
<production>
|
||||
<network_read>/prio/fair/prod</network_read>
|
||||
<network_write>/prio/fair/prod</network_write>
|
||||
</production>
|
||||
<development>
|
||||
<network_read>/prio/fair/dev</network_read>
|
||||
<network_write>/prio/fair/dev</network_write>
|
||||
</development>
|
||||
<default>
|
||||
<network_read>/prio/fair/dev</network_read>
|
||||
<network_write>/prio/fair/dev</network_write>
|
||||
</default>
|
||||
</workload_classifiers>
|
||||
</clickhouse>
|
@ -0,0 +1,26 @@
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>http://minio1:9001/root/data/</endpoint>
|
||||
<access_key_id>minio</access_key_id>
|
||||
<secret_access_key>minio123</secret_access_key>
|
||||
<s3_max_single_part_upload_size>33554432</s3_max_single_part_upload_size>
|
||||
<s3_max_put_rps>10</s3_max_put_rps>
|
||||
<s3_max_get_rps>10</s3_max_get_rps>
|
||||
<read_resource>network_read</read_resource>
|
||||
<write_resource>network_write</write_resource>
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
<s3>
|
||||
<volumes>
|
||||
<main>
|
||||
<disk>s3</disk>
|
||||
</main>
|
||||
</volumes>
|
||||
</s3>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
</clickhouse>
|
3
tests/integration/test_scheduler/configs/workloads.xml
Normal file
3
tests/integration/test_scheduler/configs/workloads.xml
Normal file
@ -0,0 +1,3 @@
|
||||
<clickhouse>
|
||||
<!-- Will be overwritten by the test -->
|
||||
</clickhouse>
|
@ -0,0 +1,4 @@
|
||||
<clickhouse>
|
||||
<merge_workload>sys_merges</merge_workload>
|
||||
<mutation_workload>sys_mutations</mutation_workload>
|
||||
</clickhouse>
|
@ -13,7 +13,13 @@ cluster = ClickHouseCluster(__file__)
|
||||
node = cluster.add_instance(
|
||||
"node",
|
||||
stay_alive=True,
|
||||
main_configs=["configs/scheduler.xml"],
|
||||
main_configs=[
|
||||
"configs/storage_configuration.xml",
|
||||
"configs/resources.xml",
|
||||
"configs/resources.xml.default",
|
||||
"configs/workloads.xml",
|
||||
"configs/workloads.xml.default",
|
||||
],
|
||||
with_minio=True,
|
||||
)
|
||||
|
||||
@ -27,6 +33,41 @@ def start_cluster():
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
@pytest.fixture(scope="function", autouse=True)
|
||||
def set_default_configs():
|
||||
node.exec_in_container(
|
||||
[
|
||||
"bash",
|
||||
"-c",
|
||||
"cp /etc/clickhouse-server/config.d/resources.xml.default /etc/clickhouse-server/config.d/resources.xml",
|
||||
]
|
||||
)
|
||||
node.exec_in_container(
|
||||
[
|
||||
"bash",
|
||||
"-c",
|
||||
"cp /etc/clickhouse-server/config.d/workloads.xml.default /etc/clickhouse-server/config.d/workloads.xml",
|
||||
]
|
||||
)
|
||||
node.query("system reload config")
|
||||
yield
|
||||
|
||||
|
||||
def update_workloads_config(**settings):
|
||||
xml=''
|
||||
for name in settings:
|
||||
xml += f"<{name}>{settings[name]}</{name}>"
|
||||
print(xml)
|
||||
node.exec_in_container(
|
||||
[
|
||||
"bash",
|
||||
"-c",
|
||||
f"echo '<clickhouse>{xml}</clickhouse>' > /etc/clickhouse-server/config.d/workloads.xml",
|
||||
]
|
||||
)
|
||||
node.query("system reload config")
|
||||
|
||||
|
||||
def test_s3_disk():
|
||||
node.query(
|
||||
f"""
|
||||
@ -110,3 +151,175 @@ def test_s3_disk():
|
||||
)
|
||||
== "1\n"
|
||||
)
|
||||
|
||||
|
||||
def test_merge_workload():
|
||||
node.query(
|
||||
f"""
|
||||
drop table if exists data;
|
||||
create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, storage_policy='s3';
|
||||
"""
|
||||
)
|
||||
|
||||
reads_before = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/sys/merges'").strip())
|
||||
writes_before = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/sys/merges'").strip())
|
||||
|
||||
node.query(f"insert into data select * from numbers(1e4)")
|
||||
node.query(f"insert into data select * from numbers(2e4)")
|
||||
node.query(f"insert into data select * from numbers(3e4)")
|
||||
node.query(f"optimize table data final")
|
||||
|
||||
reads_after = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/sys/merges'").strip())
|
||||
writes_after = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/sys/merges'").strip())
|
||||
|
||||
assert (reads_before < reads_after)
|
||||
assert (writes_before < writes_after)
|
||||
|
||||
|
||||
def test_merge_workload_override():
|
||||
node.query(
|
||||
f"""
|
||||
drop table if exists prod_data;
|
||||
drop table if exists dev_data;
|
||||
create table prod_data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, storage_policy='s3', merge_workload='prod_merges';
|
||||
create table dev_data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, storage_policy='s3', merge_workload='dev_merges';
|
||||
"""
|
||||
)
|
||||
|
||||
prod_reads_before = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/prod_merges'").strip())
|
||||
prod_writes_before = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/prod_merges'").strip())
|
||||
dev_reads_before = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/dev_merges'").strip())
|
||||
dev_writes_before = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/dev_merges'").strip())
|
||||
|
||||
node.query(f"insert into prod_data select * from numbers(1e4)")
|
||||
node.query(f"insert into prod_data select * from numbers(2e4)")
|
||||
node.query(f"insert into prod_data select * from numbers(3e4)")
|
||||
node.query(f"insert into dev_data select * from numbers(1e4)")
|
||||
node.query(f"insert into dev_data select * from numbers(2e4)")
|
||||
node.query(f"insert into dev_data select * from numbers(3e4)")
|
||||
node.query(f"optimize table prod_data final")
|
||||
node.query(f"optimize table dev_data final")
|
||||
|
||||
prod_reads_after = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/prod_merges'").strip())
|
||||
prod_writes_after = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/prod_merges'").strip())
|
||||
dev_reads_after = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/dev_merges'").strip())
|
||||
dev_writes_after = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/dev_merges'").strip())
|
||||
|
||||
assert (prod_reads_before < prod_reads_after)
|
||||
assert (prod_writes_before < prod_writes_after)
|
||||
assert (dev_reads_before < dev_reads_after)
|
||||
assert (dev_writes_before < dev_writes_after)
|
||||
|
||||
|
||||
def test_mutate_workload():
|
||||
node.query(
|
||||
f"""
|
||||
drop table if exists data;
|
||||
create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, storage_policy='s3';
|
||||
"""
|
||||
)
|
||||
|
||||
node.query(f"insert into data select * from numbers(1e4)")
|
||||
node.query(f"optimize table data final")
|
||||
|
||||
reads_before = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/sys/mutations'").strip())
|
||||
writes_before = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/sys/mutations'").strip())
|
||||
|
||||
node.query(f"alter table data update key = 1 where key = 42")
|
||||
node.query(f"optimize table data final")
|
||||
|
||||
reads_after = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/sys/mutations'").strip())
|
||||
writes_after = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/sys/mutations'").strip())
|
||||
|
||||
assert (reads_before < reads_after)
|
||||
assert (writes_before < writes_after)
|
||||
|
||||
|
||||
def test_mutation_workload_override():
|
||||
node.query(
|
||||
f"""
|
||||
drop table if exists prod_data;
|
||||
drop table if exists dev_data;
|
||||
create table prod_data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, storage_policy='s3', mutation_workload='prod_mutations';
|
||||
create table dev_data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, storage_policy='s3', mutation_workload='dev_mutations';
|
||||
"""
|
||||
)
|
||||
|
||||
node.query(f"insert into prod_data select * from numbers(1e4)")
|
||||
node.query(f"optimize table prod_data final")
|
||||
node.query(f"insert into dev_data select * from numbers(1e4)")
|
||||
node.query(f"optimize table dev_data final")
|
||||
|
||||
prod_reads_before = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/prod_mutations'").strip())
|
||||
prod_writes_before = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/prod_mutations'").strip())
|
||||
dev_reads_before = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/dev_mutations'").strip())
|
||||
dev_writes_before = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/dev_mutations'").strip())
|
||||
|
||||
node.query(f"alter table prod_data update key = 1 where key = 42")
|
||||
node.query(f"optimize table prod_data final")
|
||||
node.query(f"alter table dev_data update key = 1 where key = 42")
|
||||
node.query(f"optimize table dev_data final")
|
||||
|
||||
prod_reads_after = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/prod_mutations'").strip())
|
||||
prod_writes_after = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/prod_mutations'").strip())
|
||||
dev_reads_after = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/dev_mutations'").strip())
|
||||
dev_writes_after = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/dev_mutations'").strip())
|
||||
|
||||
assert (prod_reads_before < prod_reads_after)
|
||||
assert (prod_writes_before < prod_writes_after)
|
||||
assert (dev_reads_before < dev_reads_after)
|
||||
assert (dev_writes_before < dev_writes_after)
|
||||
|
||||
|
||||
def test_merge_workload_change():
|
||||
node.query(
|
||||
f"""
|
||||
drop table if exists data;
|
||||
create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, storage_policy='s3';
|
||||
"""
|
||||
)
|
||||
|
||||
for env in ['prod', 'dev']:
|
||||
update_workloads_config(merge_workload=f"{env}_merges")
|
||||
|
||||
reads_before = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/{env}_merges'").strip())
|
||||
writes_before = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/{env}_merges'").strip())
|
||||
|
||||
node.query(f"insert into data select * from numbers(1e4)")
|
||||
node.query(f"insert into data select * from numbers(2e4)")
|
||||
node.query(f"insert into data select * from numbers(3e4)")
|
||||
node.query(f"optimize table data final")
|
||||
|
||||
reads_after = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/{env}_merges'").strip())
|
||||
writes_after = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/{env}_merges'").strip())
|
||||
|
||||
assert (reads_before < reads_after)
|
||||
assert (writes_before < writes_after)
|
||||
|
||||
def test_mutation_workload_change():
|
||||
node.query(
|
||||
f"""
|
||||
drop table if exists data;
|
||||
create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, storage_policy='s3';
|
||||
"""
|
||||
)
|
||||
|
||||
for env in ['prod', 'dev']:
|
||||
update_workloads_config(mutation_workload=f"{env}_mutations")
|
||||
|
||||
node.query(f"insert into data select * from numbers(1e4)")
|
||||
node.query(f"optimize table data final")
|
||||
|
||||
reads_before = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/{env}_mutations'").strip())
|
||||
writes_before = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/{env}_mutations'").strip())
|
||||
|
||||
node.query(f"alter table data update key = 1 where key = 42")
|
||||
node.query(f"optimize table data final")
|
||||
|
||||
reads_after = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/{env}_mutations'").strip())
|
||||
writes_after = int(node.query(f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/{env}_mutations'").strip())
|
||||
|
||||
breakpoint()
|
||||
|
||||
assert (reads_before < reads_after)
|
||||
assert (writes_before < writes_after)
|
||||
|
Loading…
Reference in New Issue
Block a user