diff --git a/tests/integration/test_system_logs/configs/config.d/config.xml b/tests/integration/test_system_logs/configs/config.d/config.xml
deleted file mode 100644
index 46e60164ce6..00000000000
--- a/tests/integration/test_system_logs/configs/config.d/config.xml
+++ /dev/null
@@ -1,55 +0,0 @@
-
-
- event_date, event_time, initial_query_id
-
-
- event_date, event_time, query_id
-
-
-
diff --git a/tests/integration/test_system_logs/configs/config.d/disks.xml b/tests/integration/test_system_logs/configs/config.d/disks.xml
new file mode 100644
index 00000000000..90a1b110326
--- /dev/null
+++ b/tests/integration/test_system_logs/configs/config.d/disks.xml
@@ -0,0 +1,28 @@
+
+
+
+
+ /var/lib/clickhouse1/
+
+
+ /var/lib/clickhouse2/
+
+
+
+
+
+
+ disk1
+
+
+
+
+
+
+ disk2
+
+
+
+
+
+
\ No newline at end of file
diff --git a/tests/integration/test_system_logs/configs/system_logs_engine.xml b/tests/integration/test_system_logs/configs/system_logs_engine.xml
new file mode 100644
index 00000000000..8afb2b84c63
--- /dev/null
+++ b/tests/integration/test_system_logs/configs/system_logs_engine.xml
@@ -0,0 +1,5 @@
+
+
+ Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + INTERVAL 30 day SETTINGS storage_policy='policy2', ttl_only_drop_parts=1
+
+
diff --git a/tests/integration/test_system_logs/configs/system_logs_order_by.xml b/tests/integration/test_system_logs/configs/system_logs_order_by.xml
new file mode 100644
index 00000000000..1015f53c06c
--- /dev/null
+++ b/tests/integration/test_system_logs/configs/system_logs_order_by.xml
@@ -0,0 +1,8 @@
+
+
+ event_date, event_time, initial_query_id
+
+
+ event_date, event_time, query_id
+
+
diff --git a/tests/integration/test_system_logs/configs/system_logs_settings.xml b/tests/integration/test_system_logs/configs/system_logs_settings.xml
new file mode 100644
index 00000000000..a219f8b2fee
--- /dev/null
+++ b/tests/integration/test_system_logs/configs/system_logs_settings.xml
@@ -0,0 +1,10 @@
+
+
+ toYYYYMM(event_date)
+ event_date + INTERVAL 30 DAY DELETE
+ event_date, event_time, initial_query_id
+ 7500
+ policy1
+ storage_policy='policy2', ttl_only_drop_parts=1
+
+
diff --git a/tests/integration/test_system_logs/test.py b/tests/integration/test_system_logs/test.py
deleted file mode 100644
index 252b167a262..00000000000
--- a/tests/integration/test_system_logs/test.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# pylint: disable=line-too-long
-# pylint: disable=unused-argument
-# pylint: disable=redefined-outer-name
-
-import pytest
-from helpers.cluster import ClickHouseCluster
-
-cluster = ClickHouseCluster(__file__)
-node = cluster.add_instance(
- "node_default",
- main_configs=["configs/config.d/config.xml"],
- stay_alive=True,
-)
-
-
-@pytest.fixture(scope="module", autouse=True)
-def start_cluster():
- try:
- cluster.start()
- yield cluster
- finally:
- cluster.shutdown()
-
-def test_system_logs_order_by_expr(start_cluster):
- node.query("SET log_query_threads = 1")
- node.query("SELECT count() FROM system.tables")
- node.query("SYSTEM FLUSH LOGS")
-
- # system.query_log
- assert node.query("SELECT sorting_key FROM system.tables WHERE database='system' and name='query_log'")
- == "event_date, event_time, initial_query_id\n"
- # system.query_thread_log
- assert node.query("SELECT sorting_key FROM system.tables WHERE database='system' and name='query_thread_log'")
- == "event_date, event_time, query_id\n"
diff --git a/tests/integration/test_system_logs/test_system_logs.py b/tests/integration/test_system_logs/test_system_logs.py
new file mode 100644
index 00000000000..1636e16a1a8
--- /dev/null
+++ b/tests/integration/test_system_logs/test_system_logs.py
@@ -0,0 +1,73 @@
+# pylint: disable=line-too-long
+# pylint: disable=unused-argument
+# pylint: disable=redefined-outer-name
+
+import pytest
+from helpers.cluster import ClickHouseCluster
+
+cluster = ClickHouseCluster(__file__)
+
+node1 = cluster.add_instance(
+ "node1",
+ main_configs=["configs/config_order_by.xml"],
+ stay_alive=True,
+)
+
+node2 = cluster.add_instance(
+ "node2",
+ main_configs=[
+ "configs/system_logs_engine.xml",
+ "configs/config.d/disks.xml",
+ ],
+ stay_alive=True,
+)
+
+node3 = cluster.add_instance(
+ "node3",
+ main_configs=[
+ "configs/system_logs_settings.xml",
+ "configs/config.d/disks.xml",
+ ],
+ stay_alive=True,
+)
+
+@pytest.fixture(scope="module", autouse=True)
+def start_cluster():
+ try:
+ cluster.start()
+ yield cluster
+ finally:
+ cluster.shutdown()
+
+def test_system_logs_order_by_expr(start_cluster):
+ node1.query("SET log_query_threads = 1")
+ node1.query("SELECT count() FROM system.tables")
+ node1.query("SYSTEM FLUSH LOGS")
+
+ # Check 'sorting_key' of system.query_log.
+ assert node1.query("SELECT sorting_key FROM system.tables WHERE database='system' and name='query_log'")
+ == "event_date, event_time, initial_query_id\n"
+
+ # Check 'sorting_key' of system.query_thread_log.
+ assert node1.query("SELECT sorting_key FROM system.tables WHERE database='system' and name='query_thread_log'")
+ == "event_date, event_time, query_id\n"
+
+
+def test_system_logs_engine_expr(start_cluster):
+ node2.query("SET log_query_threads = 1")
+ node2.query("SELECT count() FROM system.tables")
+ node2.query("SYSTEM FLUSH LOGS")
+
+ # Check 'engine_full' of system.query_log.
+ expected_result = "MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + toIntervalDay(30) SETTINGS storage_policy = 'policy2', ttl_only_drop_parts = 1"
+ assert expected_result in node2.query("SELECT engine_full FROM system.tables WHERE database='system' and name='query_log'")
+
+
+def test_system_logs_settings_expr(start_cluster):
+ node3.query("SET log_query_threads = 1")
+ node3.query("SELECT count() FROM system.tables")
+ node3.query("SYSTEM FLUSH LOGS")
+
+ # Check 'engine_full' of system.query_log.
+ expected_result = "MergeTree PARTITION BY toYYYYMM(event_date) ORDER BY (event_date, event_time, initial_query_id) TTL event_date + toIntervalDay(30) SETTINGS storage_policy = 'policy1', storage_policy = 'policy2', ttl_only_drop_parts = 1"
+ assert expected_result in node3.query("SELECT engine_full FROM system.tables WHERE database='system' and name='query_log'")