2023-05-18 11:55:16 +00:00
|
|
|
# pylint: disable=line-too-long
|
|
|
|
# pylint: disable=unused-argument
|
|
|
|
# pylint: disable=redefined-outer-name
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
from helpers.cluster import ClickHouseCluster
|
|
|
|
|
|
|
|
cluster = ClickHouseCluster(__file__)
|
|
|
|
|
|
|
|
node1 = cluster.add_instance(
|
|
|
|
"node1",
|
2023-05-27 14:56:00 +00:00
|
|
|
base_config_dir="configs",
|
|
|
|
main_configs=["configs/config.d/system_logs_order_by.xml"],
|
2023-05-18 11:55:16 +00:00
|
|
|
stay_alive=True,
|
|
|
|
)
|
|
|
|
|
|
|
|
node2 = cluster.add_instance(
|
|
|
|
"node2",
|
2023-05-27 14:56:00 +00:00
|
|
|
base_config_dir="configs",
|
2023-05-18 11:55:16 +00:00
|
|
|
main_configs=[
|
2023-05-26 09:04:11 +00:00
|
|
|
"configs/config.d/system_logs_engine.xml",
|
2023-05-18 11:55:16 +00:00
|
|
|
"configs/config.d/disks.xml",
|
|
|
|
],
|
|
|
|
stay_alive=True,
|
|
|
|
)
|
|
|
|
|
|
|
|
node3 = cluster.add_instance(
|
|
|
|
"node3",
|
2023-05-27 14:56:00 +00:00
|
|
|
base_config_dir="configs",
|
2023-05-18 11:55:16 +00:00
|
|
|
main_configs=[
|
2023-05-26 09:04:11 +00:00
|
|
|
"configs/config.d/system_logs_settings.xml",
|
2023-05-18 11:55:16 +00:00
|
|
|
"configs/config.d/disks.xml",
|
|
|
|
],
|
|
|
|
stay_alive=True,
|
|
|
|
)
|
|
|
|
|
2023-05-24 01:50:29 +00:00
|
|
|
|
2023-05-18 11:55:16 +00:00
|
|
|
@pytest.fixture(scope="module", autouse=True)
|
|
|
|
def start_cluster():
|
|
|
|
try:
|
|
|
|
cluster.start()
|
|
|
|
yield cluster
|
|
|
|
finally:
|
|
|
|
cluster.shutdown()
|
|
|
|
|
2023-05-24 01:50:29 +00:00
|
|
|
|
2023-05-18 11:55:16 +00:00
|
|
|
def test_system_logs_order_by_expr(start_cluster):
|
|
|
|
node1.query("SET log_query_threads = 1")
|
|
|
|
node1.query("SELECT count() FROM system.tables")
|
|
|
|
node1.query("SYSTEM FLUSH LOGS")
|
|
|
|
|
|
|
|
# Check 'sorting_key' of system.query_log.
|
2023-05-24 01:50:29 +00:00
|
|
|
assert (
|
|
|
|
node1.query(
|
|
|
|
"SELECT sorting_key FROM system.tables WHERE database='system' and name='query_log'"
|
|
|
|
)
|
2023-05-18 11:55:16 +00:00
|
|
|
== "event_date, event_time, initial_query_id\n"
|
2023-05-24 01:50:29 +00:00
|
|
|
)
|
2023-05-18 11:55:16 +00:00
|
|
|
|
|
|
|
# Check 'sorting_key' of system.query_thread_log.
|
2023-05-24 01:50:29 +00:00
|
|
|
assert (
|
|
|
|
node1.query(
|
|
|
|
"SELECT sorting_key FROM system.tables WHERE database='system' and name='query_thread_log'"
|
|
|
|
)
|
2023-05-18 11:55:16 +00:00
|
|
|
== "event_date, event_time, query_id\n"
|
2023-05-24 01:50:29 +00:00
|
|
|
)
|
2023-05-18 11:55:16 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_system_logs_engine_expr(start_cluster):
|
|
|
|
node2.query("SET log_query_threads = 1")
|
|
|
|
node2.query("SELECT count() FROM system.tables")
|
|
|
|
node2.query("SYSTEM FLUSH LOGS")
|
|
|
|
|
|
|
|
# Check 'engine_full' of system.query_log.
|
2023-05-25 08:58:58 +00:00
|
|
|
expected = "MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + toIntervalDay(30) SETTINGS storage_policy = \\'policy2\\', ttl_only_drop_parts = 1"
|
|
|
|
assert expected in node2.query(
|
2023-05-24 01:50:29 +00:00
|
|
|
"SELECT engine_full FROM system.tables WHERE database='system' and name='query_log'"
|
|
|
|
)
|
2023-05-18 11:55:16 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_system_logs_settings_expr(start_cluster):
|
|
|
|
node3.query("SET log_query_threads = 1")
|
|
|
|
node3.query("SELECT count() FROM system.tables")
|
|
|
|
node3.query("SYSTEM FLUSH LOGS")
|
|
|
|
|
|
|
|
# Check 'engine_full' of system.query_log.
|
2023-05-25 08:58:58 +00:00
|
|
|
expected = "MergeTree PARTITION BY toYYYYMM(event_date) ORDER BY (event_date, event_time, initial_query_id) TTL event_date + toIntervalDay(30) SETTINGS storage_policy = \\'policy1\\', storage_policy = \\'policy2\\', ttl_only_drop_parts = 1"
|
|
|
|
assert expected in node3.query(
|
2023-05-24 01:50:29 +00:00
|
|
|
"SELECT engine_full FROM system.tables WHERE database='system' and name='query_log'"
|
|
|
|
)
|