From e39c4cdf7121a4e766ddebd777ee9979b06ec004 Mon Sep 17 00:00:00 2001 From: Mikhail Filimonov Date: Thu, 28 May 2020 19:41:57 +0200 Subject: [PATCH 01/14] Add _timestamp_ms virtual columns to Kafka engine --- src/Storages/Kafka/KafkaBlockInputStream.cpp | 12 ++++--- src/Storages/Kafka/StorageKafka.cpp | 5 ++- tests/integration/test_storage_kafka/test.py | 34 ++++++++++---------- 3 files changed, 28 insertions(+), 23 deletions(-) diff --git a/src/Storages/Kafka/KafkaBlockInputStream.cpp b/src/Storages/Kafka/KafkaBlockInputStream.cpp index a2403e66c50..6ae7e2606b6 100644 --- a/src/Storages/Kafka/KafkaBlockInputStream.cpp +++ b/src/Storages/Kafka/KafkaBlockInputStream.cpp @@ -19,8 +19,8 @@ KafkaBlockInputStream::KafkaBlockInputStream( , column_names(columns) , max_block_size(max_block_size_) , commit_in_suffix(commit_in_suffix_) - , non_virtual_header(storage.getSampleBlockNonMaterialized()) /// FIXME: add materialized columns support - , virtual_header(storage.getSampleBlockForColumns({"_topic", "_key", "_offset", "_partition", "_timestamp"})) + , non_virtual_header(storage.getSampleBlockNonMaterialized()) + , virtual_header(storage.getSampleBlockForColumns({"_topic", "_key", "_offset", "_partition", "_timestamp","_timestamp_ms"})) { context.setSetting("input_format_skip_unknown_fields", 1u); // Always skip unknown fields regardless of the context (JSON or TSKV) @@ -141,8 +141,7 @@ Block KafkaBlockInputStream::readImpl() auto offset = buffer->currentOffset(); auto partition = buffer->currentPartition(); auto timestamp_raw = buffer->currentTimestamp(); - auto timestamp = timestamp_raw ? std::chrono::duration_cast(timestamp_raw->get_timestamp()).count() - : 0; + for (size_t i = 0; i < new_rows; ++i) { virtual_columns[0]->insert(topic); @@ -151,11 +150,14 @@ Block KafkaBlockInputStream::readImpl() virtual_columns[3]->insert(partition); if (timestamp_raw) { - virtual_columns[4]->insert(timestamp); + auto ts = timestamp_raw->get_timestamp(); + virtual_columns[4]->insert(std::chrono::duration_cast(ts).count()); + virtual_columns[5]->insert(DecimalField(std::chrono::duration_cast(ts).count(),3)); } else { virtual_columns[4]->insertDefault(); + virtual_columns[5]->insertDefault(); } } diff --git a/src/Storages/Kafka/StorageKafka.cpp b/src/Storages/Kafka/StorageKafka.cpp index 7731cf3c06a..c2d26dfa300 100644 --- a/src/Storages/Kafka/StorageKafka.cpp +++ b/src/Storages/Kafka/StorageKafka.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -36,6 +37,7 @@ #include + namespace DB { @@ -724,7 +726,8 @@ NamesAndTypesList StorageKafka::getVirtuals() const {"_key", std::make_shared()}, {"_offset", std::make_shared()}, {"_partition", std::make_shared()}, - {"_timestamp", std::make_shared(std::make_shared())} + {"_timestamp", std::make_shared(std::make_shared())}, + {"_timestamp_ms", std::make_shared(std::make_shared(3))} }; } diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index 13577864870..d89684e2131 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -840,28 +840,28 @@ def test_kafka_virtual_columns2(kafka_cluster): kafka_format = 'JSONEachRow'; CREATE MATERIALIZED VIEW test.view Engine=Log AS - SELECT value, _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp) FROM test.kafka; + SELECT value, _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp), toUnixTimestamp64Milli(_timestamp_ms) FROM test.kafka; ''') producer = KafkaProducer(bootstrap_servers="localhost:9092") - producer.send(topic='virt2_0', value=json.dumps({'value': 1}), partition=0, key='k1', timestamp_ms=1577836801000) - producer.send(topic='virt2_0', value=json.dumps({'value': 2}), partition=0, key='k2', timestamp_ms=1577836802000) + producer.send(topic='virt2_0', value=json.dumps({'value': 1}), partition=0, key='k1', timestamp_ms=1577836801001) + producer.send(topic='virt2_0', value=json.dumps({'value': 2}), partition=0, key='k2', timestamp_ms=1577836802002) producer.flush() time.sleep(1) - producer.send(topic='virt2_0', value=json.dumps({'value': 3}), partition=1, key='k3', timestamp_ms=1577836803000) - producer.send(topic='virt2_0', value=json.dumps({'value': 4}), partition=1, key='k4', timestamp_ms=1577836804000) + producer.send(topic='virt2_0', value=json.dumps({'value': 3}), partition=1, key='k3', timestamp_ms=1577836803003) + producer.send(topic='virt2_0', value=json.dumps({'value': 4}), partition=1, key='k4', timestamp_ms=1577836804004) producer.flush() time.sleep(1) - producer.send(topic='virt2_1', value=json.dumps({'value': 5}), partition=0, key='k5', timestamp_ms=1577836805000) - producer.send(topic='virt2_1', value=json.dumps({'value': 6}), partition=0, key='k6', timestamp_ms=1577836806000) + producer.send(topic='virt2_1', value=json.dumps({'value': 5}), partition=0, key='k5', timestamp_ms=1577836805005) + producer.send(topic='virt2_1', value=json.dumps({'value': 6}), partition=0, key='k6', timestamp_ms=1577836806006) producer.flush() time.sleep(1) - producer.send(topic='virt2_1', value=json.dumps({'value': 7}), partition=1, key='k7', timestamp_ms=1577836807000) - producer.send(topic='virt2_1', value=json.dumps({'value': 8}), partition=1, key='k8', timestamp_ms=1577836808000) + producer.send(topic='virt2_1', value=json.dumps({'value': 7}), partition=1, key='k7', timestamp_ms=1577836807007) + producer.send(topic='virt2_1', value=json.dumps({'value': 8}), partition=1, key='k8', timestamp_ms=1577836808008) producer.flush() time.sleep(10) @@ -869,14 +869,14 @@ def test_kafka_virtual_columns2(kafka_cluster): result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True) expected = '''\ -1 k1 virt2_0 0 0 1577836801 -2 k2 virt2_0 0 1 1577836802 -3 k3 virt2_0 1 0 1577836803 -4 k4 virt2_0 1 1 1577836804 -5 k5 virt2_1 0 0 1577836805 -6 k6 virt2_1 0 1 1577836806 -7 k7 virt2_1 1 0 1577836807 -8 k8 virt2_1 1 1 1577836808 +1 k1 virt2_0 0 0 1577836801 1577836801001 +2 k2 virt2_0 0 1 1577836802 1577836802002 +3 k3 virt2_0 1 0 1577836803 1577836803003 +4 k4 virt2_0 1 1 1577836804 1577836804004 +5 k5 virt2_1 0 0 1577836805 1577836805005 +6 k6 virt2_1 0 1 1577836806 1577836806006 +7 k7 virt2_1 1 0 1577836807 1577836807007 +8 k8 virt2_1 1 1 1577836808 1577836808008 ''' assert TSV(result) == TSV(expected) From 7211af9404c47a527f74ae8f4c5c3c256f0d6319 Mon Sep 17 00:00:00 2001 From: Mikhail Filimonov Date: Thu, 28 May 2020 20:19:10 +0200 Subject: [PATCH 02/14] Fixing style --- src/Storages/Kafka/StorageKafka.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Storages/Kafka/StorageKafka.cpp b/src/Storages/Kafka/StorageKafka.cpp index c2d26dfa300..d1f350b02a6 100644 --- a/src/Storages/Kafka/StorageKafka.cpp +++ b/src/Storages/Kafka/StorageKafka.cpp @@ -37,7 +37,6 @@ #include - namespace DB { From 86959ca97f36217c60f3c5b8a86161793736e460 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 29 May 2020 01:23:29 +0300 Subject: [PATCH 03/14] Addition to #11184 --- tests/config/log_queries.xml | 7 ------- 1 file changed, 7 deletions(-) delete mode 100644 tests/config/log_queries.xml diff --git a/tests/config/log_queries.xml b/tests/config/log_queries.xml deleted file mode 100644 index 25261072ade..00000000000 --- a/tests/config/log_queries.xml +++ /dev/null @@ -1,7 +0,0 @@ - - - - 1 - - - From b885337454edf2f690535c3e76f455a357c0bc89 Mon Sep 17 00:00:00 2001 From: Mikhail Filimonov Date: Fri, 29 May 2020 10:44:10 +0200 Subject: [PATCH 04/14] Virtual columns for accessing kafka message headers --- src/Storages/Kafka/KafkaBlockInputStream.cpp | 20 ++++++++++++-- .../Kafka/ReadBufferFromKafkaConsumer.h | 1 + src/Storages/Kafka/StorageKafka.cpp | 5 +++- tests/integration/test_storage_kafka/test.py | 26 +++++++++---------- 4 files changed, 36 insertions(+), 16 deletions(-) diff --git a/src/Storages/Kafka/KafkaBlockInputStream.cpp b/src/Storages/Kafka/KafkaBlockInputStream.cpp index 6ae7e2606b6..3e4533f8bb2 100644 --- a/src/Storages/Kafka/KafkaBlockInputStream.cpp +++ b/src/Storages/Kafka/KafkaBlockInputStream.cpp @@ -20,8 +20,7 @@ KafkaBlockInputStream::KafkaBlockInputStream( , max_block_size(max_block_size_) , commit_in_suffix(commit_in_suffix_) , non_virtual_header(storage.getSampleBlockNonMaterialized()) - , virtual_header(storage.getSampleBlockForColumns({"_topic", "_key", "_offset", "_partition", "_timestamp","_timestamp_ms"})) - + , virtual_header(storage.getSampleBlockForColumns({"_topic", "_key", "_offset", "_partition", "_timestamp","_timestamp_ms","_headers.name","_headers.value"})) { context.setSetting("input_format_skip_unknown_fields", 1u); // Always skip unknown fields regardless of the context (JSON or TSKV) context.setSetting("input_format_allow_errors_ratio", 0.); @@ -141,6 +140,21 @@ Block KafkaBlockInputStream::readImpl() auto offset = buffer->currentOffset(); auto partition = buffer->currentPartition(); auto timestamp_raw = buffer->currentTimestamp(); + auto header_list = buffer->currentHeaderList(); + + Array headers_names; + Array headers_values; + + if (!header_list.empty()) + { + headers_names.reserve(header_list.size()); + headers_values.reserve(header_list.size()); + for (const auto & header : header_list) + { + headers_names.emplace_back(header.get_name()); + headers_values.emplace_back(static_cast(header.get_value())); + } + } for (size_t i = 0; i < new_rows; ++i) { @@ -159,6 +173,8 @@ Block KafkaBlockInputStream::readImpl() virtual_columns[4]->insertDefault(); virtual_columns[5]->insertDefault(); } + virtual_columns[6]->insert(headers_names); + virtual_columns[7]->insert(headers_values); } total_rows = total_rows + new_rows; diff --git a/src/Storages/Kafka/ReadBufferFromKafkaConsumer.h b/src/Storages/Kafka/ReadBufferFromKafkaConsumer.h index e90e3b48881..7449f58c838 100644 --- a/src/Storages/Kafka/ReadBufferFromKafkaConsumer.h +++ b/src/Storages/Kafka/ReadBufferFromKafkaConsumer.h @@ -49,6 +49,7 @@ public: auto currentOffset() const { return current[-1].get_offset(); } auto currentPartition() const { return current[-1].get_partition(); } auto currentTimestamp() const { return current[-1].get_timestamp(); } + const auto & currentHeaderList() const { return current[-1].get_header_list(); } private: using Messages = std::vector; diff --git a/src/Storages/Kafka/StorageKafka.cpp b/src/Storages/Kafka/StorageKafka.cpp index d1f350b02a6..d1014fdb0f8 100644 --- a/src/Storages/Kafka/StorageKafka.cpp +++ b/src/Storages/Kafka/StorageKafka.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -726,7 +727,9 @@ NamesAndTypesList StorageKafka::getVirtuals() const {"_offset", std::make_shared()}, {"_partition", std::make_shared()}, {"_timestamp", std::make_shared(std::make_shared())}, - {"_timestamp_ms", std::make_shared(std::make_shared(3))} + {"_timestamp_ms", std::make_shared(std::make_shared(3))}, + {"_headers.name", std::make_shared(std::make_shared())}, + {"_headers.value", std::make_shared(std::make_shared())} }; } diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index d89684e2131..82b409aa85e 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -840,18 +840,18 @@ def test_kafka_virtual_columns2(kafka_cluster): kafka_format = 'JSONEachRow'; CREATE MATERIALIZED VIEW test.view Engine=Log AS - SELECT value, _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp), toUnixTimestamp64Milli(_timestamp_ms) FROM test.kafka; + SELECT value, _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp), toUnixTimestamp64Milli(_timestamp_ms), _headers.name, _headers.value FROM test.kafka; ''') producer = KafkaProducer(bootstrap_servers="localhost:9092") - producer.send(topic='virt2_0', value=json.dumps({'value': 1}), partition=0, key='k1', timestamp_ms=1577836801001) - producer.send(topic='virt2_0', value=json.dumps({'value': 2}), partition=0, key='k2', timestamp_ms=1577836802002) + producer.send(topic='virt2_0', value=json.dumps({'value': 1}), partition=0, key='k1', timestamp_ms=1577836801001, headers=[('content-encoding', b'base64')]) + producer.send(topic='virt2_0', value=json.dumps({'value': 2}), partition=0, key='k2', timestamp_ms=1577836802002, headers=[('empty_value', ''),('', 'empty name'), ('',''), ('repetition', '1'), ('repetition', '2')]) producer.flush() time.sleep(1) - producer.send(topic='virt2_0', value=json.dumps({'value': 3}), partition=1, key='k3', timestamp_ms=1577836803003) - producer.send(topic='virt2_0', value=json.dumps({'value': 4}), partition=1, key='k4', timestamp_ms=1577836804004) + producer.send(topic='virt2_0', value=json.dumps({'value': 3}), partition=1, key='k3', timestamp_ms=1577836803003, headers=[('b', 'b'),('a', 'a')]) + producer.send(topic='virt2_0', value=json.dumps({'value': 4}), partition=1, key='k4', timestamp_ms=1577836804004, headers=[('a', 'a'),('b', 'b')]) producer.flush() time.sleep(1) @@ -869,14 +869,14 @@ def test_kafka_virtual_columns2(kafka_cluster): result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True) expected = '''\ -1 k1 virt2_0 0 0 1577836801 1577836801001 -2 k2 virt2_0 0 1 1577836802 1577836802002 -3 k3 virt2_0 1 0 1577836803 1577836803003 -4 k4 virt2_0 1 1 1577836804 1577836804004 -5 k5 virt2_1 0 0 1577836805 1577836805005 -6 k6 virt2_1 0 1 1577836806 1577836806006 -7 k7 virt2_1 1 0 1577836807 1577836807007 -8 k8 virt2_1 1 1 1577836808 1577836808008 +1 k1 virt2_0 0 0 1577836801 1577836801001 ['content-encoding'] ['base64'] +2 k2 virt2_0 0 1 1577836802 1577836802002 ['empty_value','','','repetition','repetition'] ['','empty name','','1','2'] +3 k3 virt2_0 1 0 1577836803 1577836803003 ['b','a'] ['b','a'] +4 k4 virt2_0 1 1 1577836804 1577836804004 ['a','b'] ['a','b'] +5 k5 virt2_1 0 0 1577836805 1577836805005 [] [] +6 k6 virt2_1 0 1 1577836806 1577836806006 [] [] +7 k7 virt2_1 1 0 1577836807 1577836807007 [] [] +8 k8 virt2_1 1 1 1577836808 1577836808008 [] [] ''' assert TSV(result) == TSV(expected) From 07205519b5b2649665700ebb13daabeb87bd7181 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 29 May 2020 16:31:27 +0300 Subject: [PATCH 05/14] Addition to #11184 --- docker/test/stateful/Dockerfile | 2 -- docker/test/stateful_with_coverage/run.sh | 2 -- docker/test/stateless/Dockerfile | 2 -- docker/test/stateless_with_coverage/run.sh | 2 -- docker/test/stress/Dockerfile | 1 - tests/config/metric_log.xml | 8 -------- 6 files changed, 17 deletions(-) delete mode 100644 tests/config/metric_log.xml diff --git a/docker/test/stateful/Dockerfile b/docker/test/stateful/Dockerfile index e51efadf653..ace9e0d46a5 100644 --- a/docker/test/stateful/Dockerfile +++ b/docker/test/stateful/Dockerfile @@ -24,8 +24,6 @@ CMD dpkg -i package_folder/clickhouse-common-static_*.deb; \ ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/; \ ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/; \ ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/; \ ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/; \ ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/; \ ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/; \ diff --git a/docker/test/stateful_with_coverage/run.sh b/docker/test/stateful_with_coverage/run.sh index b946f5b187d..5530aadb4ca 100755 --- a/docker/test/stateful_with_coverage/run.sh +++ b/docker/test/stateful_with_coverage/run.sh @@ -59,9 +59,7 @@ ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/con ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/; \ ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/; \ ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/; \ ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/; \ ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/; \ ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/; \ ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/; \ diff --git a/docker/test/stateless/Dockerfile b/docker/test/stateless/Dockerfile index 35a8a5a9d3d..ad64e2e9880 100644 --- a/docker/test/stateless/Dockerfile +++ b/docker/test/stateless/Dockerfile @@ -62,9 +62,7 @@ CMD dpkg -i package_folder/clickhouse-common-static_*.deb; \ ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/; \ ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/; \ ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/; \ ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/; \ ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/; \ ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/; \ ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/; \ diff --git a/docker/test/stateless_with_coverage/run.sh b/docker/test/stateless_with_coverage/run.sh index 185dc95c783..12ed7a25b75 100755 --- a/docker/test/stateless_with_coverage/run.sh +++ b/docker/test/stateless_with_coverage/run.sh @@ -50,9 +50,7 @@ ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/con ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/; \ ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/; \ ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/; \ ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/; \ ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/; \ ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/; \ ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/; \ diff --git a/docker/test/stress/Dockerfile b/docker/test/stress/Dockerfile index a5aa3bbf004..66f5135a4a4 100644 --- a/docker/test/stress/Dockerfile +++ b/docker/test/stress/Dockerfile @@ -31,7 +31,6 @@ CMD dpkg -i package_folder/clickhouse-common-static_*.deb; \ dpkg -i package_folder/clickhouse-server_*.deb; \ dpkg -i package_folder/clickhouse-client_*.deb; \ dpkg -i package_folder/clickhouse-test_*.deb; \ - ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/; \ ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/; \ ln -s /usr/lib/llvm-9/bin/llvm-symbolizer /usr/bin/llvm-symbolizer; \ echo "TSAN_OPTIONS='halt_on_error=1 history_size=7 ignore_noninstrumented_modules=1 verbosity=1'" >> /etc/environment; \ diff --git a/tests/config/metric_log.xml b/tests/config/metric_log.xml deleted file mode 100644 index 0ca9f162416..00000000000 --- a/tests/config/metric_log.xml +++ /dev/null @@ -1,8 +0,0 @@ - - - system - metric_log
- 7500 - 1000 -
-
From 496fed5ed3e073ee2d19bec3abf00c5569894271 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 30 May 2020 17:24:09 +0300 Subject: [PATCH 06/14] Remove "system_tables_lazy_load" option --- programs/server/config.xml | 5 +-- src/Interpreters/SystemLog.cpp | 6 ---- .../configs/config_lazy.xml | 4 --- .../test_system_tables_lazy_load/test.py | 32 ------------------- 4 files changed, 1 insertion(+), 46 deletions(-) delete mode 100644 tests/integration/test_system_tables_lazy_load/configs/config_lazy.xml delete mode 100644 tests/integration/test_system_tables_lazy_load/test.py diff --git a/programs/server/config.xml b/programs/server/config.xml index e16af9d75d7..21605edeb36 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -191,7 +191,7 @@ /var/lib/clickhouse/access/ - + users.xml @@ -405,9 +405,6 @@ --> - - -