mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 15:42:02 +00:00
Add test case for 'engine','settings' configuration
This commit is contained in:
parent
6e8136e6f4
commit
01158e783a
@ -1,55 +0,0 @@
|
||||
<clickhouse>
|
||||
<query_log>
|
||||
<order_by>event_date, event_time, initial_query_id</order_by>
|
||||
</query_log>
|
||||
<query_thread_log>
|
||||
<order_by>event_date, event_time, query_id</order_by>
|
||||
</query_thread_log>
|
||||
<!--
|
||||
<part_log>
|
||||
<order_by>event_date, event_time, event_type</order_by>
|
||||
</part_log>
|
||||
<trace_log>
|
||||
<order_by>event_date, event_time, trace_type</order_by>
|
||||
</trace_log>
|
||||
<crash_log>
|
||||
<order_by>event_date, event_time</order_by>
|
||||
</crash_log>
|
||||
<text_log>
|
||||
<order_by>event_date, event_time, query_id</order_by>
|
||||
</text_log>
|
||||
<metric_log>
|
||||
<order_by>event_date, event_time, event_time_microseconds</order_by>
|
||||
</metric_log>
|
||||
<filesystem_cache_log>
|
||||
<order_by>event_date, event_time</order_by>
|
||||
</filesystem_cache_log>
|
||||
<filesystem_read_prefetches_log>
|
||||
<order_by>event_date, event_time</order_by>
|
||||
</filesystem_read_prefetches_log>
|
||||
<asynchronous_metric_log>
|
||||
<order_by>event_date, event_time, metric</order_by>
|
||||
</asynchronous_metric_log>
|
||||
<opentelemetry_span_log>
|
||||
<order_by>event_date, event_time</order_by>
|
||||
</opentelemetry_span_log>
|
||||
<query_views_log>
|
||||
<order_by>event_date, event_time</order_by>
|
||||
</query_views_log>
|
||||
<zookeeper_log>
|
||||
<order_by>event_date, event_time</order_by>
|
||||
</zookeeper_log>
|
||||
<session_log>
|
||||
<order_by>event_date, event_time</order_by>
|
||||
</session_log>
|
||||
<transactions_info_log>
|
||||
<order_by>event_date, event_time</order_by>
|
||||
</transactions_info_log>
|
||||
<processors_profile_log>
|
||||
<order_by>event_date, event_time</order_by>
|
||||
</processors_profile_log>
|
||||
<asynchronous_insert_log>
|
||||
<order_by>event_date, event_time</order_by>
|
||||
</asynchronous_insert_log>
|
||||
-->
|
||||
</clickhouse>
|
@ -0,0 +1,28 @@
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<disk1>
|
||||
<path>/var/lib/clickhouse1/</path>
|
||||
</disk1>
|
||||
<disk2>
|
||||
<path>/var/lib/clickhouse2/</path>
|
||||
</disk2>
|
||||
</disks>
|
||||
<policies>
|
||||
<policy1>
|
||||
<volumes>
|
||||
<volume1>
|
||||
<disk>disk1</disk>
|
||||
</volume1>
|
||||
</volumes>
|
||||
</policy1>
|
||||
<policy2>
|
||||
<volumes>
|
||||
<volume1>
|
||||
<disk>disk2</disk>
|
||||
</volume1>
|
||||
</volumes>
|
||||
</policy2>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
</clickhouse>
|
@ -0,0 +1,5 @@
|
||||
<clickhouse>
|
||||
<query_log>
|
||||
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + INTERVAL 30 day SETTINGS storage_policy='policy2', ttl_only_drop_parts=1</engine>
|
||||
</query_log>
|
||||
</clickhouse>
|
@ -0,0 +1,8 @@
|
||||
<clickhouse>
|
||||
<query_log>
|
||||
<order_by>event_date, event_time, initial_query_id</order_by>
|
||||
</query_log>
|
||||
<query_thread_log>
|
||||
<order_by>event_date, event_time, query_id</order_by>
|
||||
</query_thread_log>
|
||||
</clickhouse>
|
@ -0,0 +1,10 @@
|
||||
<clickhouse>
|
||||
<query_log>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<ttl>event_date + INTERVAL 30 DAY DELETE</ttl>
|
||||
<order_by>event_date, event_time, initial_query_id</order_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<storage_policy>policy1</storage_policy>
|
||||
<settings>storage_policy='policy2', ttl_only_drop_parts=1</settings>
|
||||
</query_log>
|
||||
</clickhouse>
|
@ -1,34 +0,0 @@
|
||||
# pylint: disable=line-too-long
|
||||
# pylint: disable=unused-argument
|
||||
# pylint: disable=redefined-outer-name
|
||||
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
node = cluster.add_instance(
|
||||
"node_default",
|
||||
main_configs=["configs/config.d/config.xml"],
|
||||
stay_alive=True,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module", autouse=True)
|
||||
def start_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
yield cluster
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
def test_system_logs_order_by_expr(start_cluster):
|
||||
node.query("SET log_query_threads = 1")
|
||||
node.query("SELECT count() FROM system.tables")
|
||||
node.query("SYSTEM FLUSH LOGS")
|
||||
|
||||
# system.query_log
|
||||
assert node.query("SELECT sorting_key FROM system.tables WHERE database='system' and name='query_log'")
|
||||
== "event_date, event_time, initial_query_id\n"
|
||||
# system.query_thread_log
|
||||
assert node.query("SELECT sorting_key FROM system.tables WHERE database='system' and name='query_thread_log'")
|
||||
== "event_date, event_time, query_id\n"
|
73
tests/integration/test_system_logs/test_system_logs.py
Normal file
73
tests/integration/test_system_logs/test_system_logs.py
Normal file
@ -0,0 +1,73 @@
|
||||
# pylint: disable=line-too-long
|
||||
# pylint: disable=unused-argument
|
||||
# pylint: disable=redefined-outer-name
|
||||
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
node1 = cluster.add_instance(
|
||||
"node1",
|
||||
main_configs=["configs/config_order_by.xml"],
|
||||
stay_alive=True,
|
||||
)
|
||||
|
||||
node2 = cluster.add_instance(
|
||||
"node2",
|
||||
main_configs=[
|
||||
"configs/system_logs_engine.xml",
|
||||
"configs/config.d/disks.xml",
|
||||
],
|
||||
stay_alive=True,
|
||||
)
|
||||
|
||||
node3 = cluster.add_instance(
|
||||
"node3",
|
||||
main_configs=[
|
||||
"configs/system_logs_settings.xml",
|
||||
"configs/config.d/disks.xml",
|
||||
],
|
||||
stay_alive=True,
|
||||
)
|
||||
|
||||
@pytest.fixture(scope="module", autouse=True)
|
||||
def start_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
yield cluster
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
def test_system_logs_order_by_expr(start_cluster):
|
||||
node1.query("SET log_query_threads = 1")
|
||||
node1.query("SELECT count() FROM system.tables")
|
||||
node1.query("SYSTEM FLUSH LOGS")
|
||||
|
||||
# Check 'sorting_key' of system.query_log.
|
||||
assert node1.query("SELECT sorting_key FROM system.tables WHERE database='system' and name='query_log'")
|
||||
== "event_date, event_time, initial_query_id\n"
|
||||
|
||||
# Check 'sorting_key' of system.query_thread_log.
|
||||
assert node1.query("SELECT sorting_key FROM system.tables WHERE database='system' and name='query_thread_log'")
|
||||
== "event_date, event_time, query_id\n"
|
||||
|
||||
|
||||
def test_system_logs_engine_expr(start_cluster):
|
||||
node2.query("SET log_query_threads = 1")
|
||||
node2.query("SELECT count() FROM system.tables")
|
||||
node2.query("SYSTEM FLUSH LOGS")
|
||||
|
||||
# Check 'engine_full' of system.query_log.
|
||||
expected_result = "MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + toIntervalDay(30) SETTINGS storage_policy = 'policy2', ttl_only_drop_parts = 1"
|
||||
assert expected_result in node2.query("SELECT engine_full FROM system.tables WHERE database='system' and name='query_log'")
|
||||
|
||||
|
||||
def test_system_logs_settings_expr(start_cluster):
|
||||
node3.query("SET log_query_threads = 1")
|
||||
node3.query("SELECT count() FROM system.tables")
|
||||
node3.query("SYSTEM FLUSH LOGS")
|
||||
|
||||
# Check 'engine_full' of system.query_log.
|
||||
expected_result = "MergeTree PARTITION BY toYYYYMM(event_date) ORDER BY (event_date, event_time, initial_query_id) TTL event_date + toIntervalDay(30) SETTINGS storage_policy = 'policy1', storage_policy = 'policy2', ttl_only_drop_parts = 1"
|
||||
assert expected_result in node3.query("SELECT engine_full FROM system.tables WHERE database='system' and name='query_log'")
|
Loading…
Reference in New Issue
Block a user