Merge pull request #11029 from azat/system_tables_lazy_load

Disable system_tables_lazy_load by default
This commit is contained in:
alexey-milovidov 2020-05-20 21:15:09 +03:00 committed by GitHub
commit 07924f0ae1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 52 additions and 42 deletions

View File

@ -405,6 +405,9 @@
</prometheus>
-->
<!-- Lazy system.*_log table creation -->
<!-- <system_tables_lazy_load>false</system_tables_lazy_load> -->
<!-- Query log. Used only for queries with setting log_queries = 1. -->
<query_log>
<!-- What table to insert data. If table is not exist, it will be created.

View File

@ -97,7 +97,7 @@ SystemLogs::SystemLogs(Context & global_context, const Poco::Util::AbstractConfi
if (metric_log)
logs.emplace_back(metric_log.get());
bool lazy_load = config.getBool("system_tables_lazy_load", true);
bool lazy_load = config.getBool("system_tables_lazy_load", false);
try
{

View File

@ -1,6 +1,6 @@
<yandex>
<part_log>
<database>database_name</database>
<table>table_name</table>
<database>database_name</database> <!-- ignored -->
<table>own_part_log</table>
</part_log>
</yandex>

View File

@ -21,22 +21,21 @@ def test_config_without_part_log(start_cluster):
node1.query("CREATE TABLE test_table(word String, value UInt64) ENGINE=MergeTree() ORDER BY value")
assert "Table system.part_log doesn't exist" in node1.query_and_get_error("SELECT * FROM system.part_log")
node1.query("INSERT INTO test_table VALUES ('name', 1)")
time.sleep(10)
node1.query("SYSTEM FLUSH LOGS")
assert "Table system.part_log doesn't exist" in node1.query_and_get_error("SELECT * FROM system.part_log")
def test_config_with_standard_part_log(start_cluster):
assert "Table system.part_log doesn't exist" in node2.query_and_get_error("SELECT * FROM system.part_log")
assert node2.query("SELECT * FROM system.part_log") == ''
node2.query("CREATE TABLE test_table(word String, value UInt64) ENGINE=MergeTree() Order by value")
assert "Table system.part_log doesn't exist" in node2.query_and_get_error("SELECT * FROM system.part_log")
assert node2.query("SELECT * FROM system.part_log") == ''
node2.query("INSERT INTO test_table VALUES ('name', 1)")
time.sleep(10)
assert node2.query("SELECT * FROM system.part_log") != ""
node2.query("SYSTEM FLUSH LOGS")
assert int(node2.query("SELECT count() FROM system.part_log")) == 1
def test_config_with_non_standard_part_log(start_cluster):
assert "Table system.table_name doesn't exist" in node3.query_and_get_error("SELECT * FROM system.table_name")
assert node3.query("SELECT * FROM system.own_part_log") == ''
node3.query("CREATE TABLE test_table(word String, value UInt64) ENGINE=MergeTree() Order by value")
assert "Table system.table_name doesn't exist" in node3.query_and_get_error("SELECT * FROM system.table_name")
assert node3.query("SELECT * FROM system.own_part_log") == ''
node3.query("INSERT INTO test_table VALUES ('name', 1)")
time.sleep(10)
assert node3.query("SELECT * FROM system.table_name") != ""
node3.query("SYSTEM FLUSH LOGS")
assert int(node3.query("SELECT count() FROM system.own_part_log")) == 1

View File

@ -30,7 +30,7 @@ def test_sophisticated_default(started_cluster):
def test_partially_dropped_tables(started_cluster):
instance = started_cluster.instances['dummy']
assert instance.exec_in_container(['bash', '-c', 'find /var/lib/clickhouse -name *.sql* | sort'], privileged=True, user='root') \
assert instance.exec_in_container(['bash', '-c', 'find /var/lib/clickhouse/*/default -name *.sql* | sort'], privileged=True, user='root') \
== "/var/lib/clickhouse/metadata/default/should_be_restored.sql\n" \
"/var/lib/clickhouse/metadata/default/sophisticated_default.sql\n"
assert instance.query("SELECT n FROM should_be_restored") == "1\n2\n3\n"

View File

@ -0,0 +1,4 @@
<?xml version="1.0"?>
<yandex>
<system_tables_lazy_load>true</system_tables_lazy_load>
</yandex>

View File

@ -0,0 +1,32 @@
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node_default = cluster.add_instance('node_default')
# main_configs is mandatory ,since system_tables_lazy_load will be read earlier then parsing of config_lazy.xml
node_lazy = cluster.add_instance('node_lazy', config_dir='configs', main_configs=['configs/config_lazy.xml'])
system_logs = [
# disabled by default
# ('system.part_log'),
# ('system.text_log'),
# enabled by default
('system.query_log'),
('system.query_thread_log'),
('system.trace_log'),
('system.metric_log'),
]
@pytest.fixture(scope='module')
def start_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
@pytest.mark.parametrize('table', system_logs)
def test_system_table(start_cluster, table):
node_default.query('SELECT * FROM {}'.format(table))
assert "Table {} doesn't exist".format(table) in node_lazy.query_and_get_error('SELECT * FROM {}'.format(table))

View File

@ -1,4 +0,0 @@
<?xml version="1.0"?>
<yandex>
<system_tables_lazy_load>false</system_tables_lazy_load>
</yandex>

View File

@ -1,24 +0,0 @@
import time
import pytest
import os
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', config_dir="configs")
@pytest.fixture(scope="module")
def start_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_system_tables_non_lazy_load(start_cluster):
assert node1.query_and_get_error("SELECT * FROM system.part_log") == ""
assert node1.query_and_get_error("SELECT * FROM system.query_log") == ""
assert node1.query_and_get_error("SELECT * FROM system.query_thread_log") == ""
assert node1.query_and_get_error("SELECT * FROM system.text_log") == ""
assert node1.query_and_get_error("SELECT * FROM system.trace_log") == ""
assert node1.query_and_get_error("SELECT * FROM system.metric_log") == ""