From 0a26afd1c8513074c9bef3c84d5e61eb7f41f3d6 Mon Sep 17 00:00:00 2001 From: Arthur Passos Date: Mon, 10 Oct 2022 18:54:35 -0300 Subject: [PATCH 001/112] Add mutex around ares fd processing --- src/Common/CaresPTRResolver.cpp | 6 ++++++ src/Common/CaresPTRResolver.h | 3 ++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/Common/CaresPTRResolver.cpp b/src/Common/CaresPTRResolver.cpp index a02909309b6..32d9fd65e1c 100644 --- a/src/Common/CaresPTRResolver.cpp +++ b/src/Common/CaresPTRResolver.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include "ares.h" #include "netdb.h" @@ -40,6 +41,8 @@ namespace DB } } + std::mutex CaresPTRResolver::mutex; + CaresPTRResolver::CaresPTRResolver(CaresPTRResolver::provider_token) : channel(nullptr) { /* @@ -119,6 +122,9 @@ namespace DB { FD_ZERO(&read_fds); FD_ZERO(&write_fds); + + std::lock_guard guard {mutex}; + nfds = ares_fds(channel, &read_fds,&write_fds); if (nfds == 0) { diff --git a/src/Common/CaresPTRResolver.h b/src/Common/CaresPTRResolver.h index e5182d34682..47c3aaf7bec 100644 --- a/src/Common/CaresPTRResolver.h +++ b/src/Common/CaresPTRResolver.h @@ -20,7 +20,6 @@ namespace DB * Allow only DNSPTRProvider to instantiate this class * */ struct provider_token {}; - public: explicit CaresPTRResolver(provider_token); ~CaresPTRResolver() override; @@ -37,6 +36,8 @@ namespace DB void resolve_v6(const std::string & ip, std::unordered_set & response); ares_channel channel; + + static std::mutex mutex; }; } From 6dae76ff8809e11a4e11350df173a735ab000bed Mon Sep 17 00:00:00 2001 From: Arthur Passos Date: Tue, 11 Oct 2022 12:47:27 -0300 Subject: [PATCH 002/112] add mutex include for release builds --- src/Common/CaresPTRResolver.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Common/CaresPTRResolver.h b/src/Common/CaresPTRResolver.h index 47c3aaf7bec..a012e7bed35 100644 --- a/src/Common/CaresPTRResolver.h +++ b/src/Common/CaresPTRResolver.h @@ -1,5 +1,6 @@ #pragma once +#include #include "DNSPTRResolver.h" using ares_channel = struct ares_channeldata *; From 9abf13bf78cd3fe32c50c1e8b9096f1d98fa7c45 Mon Sep 17 00:00:00 2001 From: Arthur Passos Date: Wed, 12 Oct 2022 13:56:00 -0300 Subject: [PATCH 003/112] Use poll instead of select for c-ares --- src/Common/CaresPTRResolver.cpp | 83 +++++++++++++++++++++++++++------ src/Common/CaresPTRResolver.h | 12 ++++- 2 files changed, 78 insertions(+), 17 deletions(-) diff --git a/src/Common/CaresPTRResolver.cpp b/src/Common/CaresPTRResolver.cpp index 32d9fd65e1c..79b78a734aa 100644 --- a/src/Common/CaresPTRResolver.cpp +++ b/src/Common/CaresPTRResolver.cpp @@ -41,8 +41,6 @@ namespace DB } } - std::mutex CaresPTRResolver::mutex; - CaresPTRResolver::CaresPTRResolver(CaresPTRResolver::provider_token) : channel(nullptr) { /* @@ -113,26 +111,81 @@ namespace DB void CaresPTRResolver::wait() { - timeval * tvp, tv; - fd_set read_fds; - fd_set write_fds; - int nfds; + int sockets[ARES_GETSOCK_MAXNUM]; + pollfd pollfd[ARES_GETSOCK_MAXNUM]; - for (;;) + while(true) { - FD_ZERO(&read_fds); - FD_ZERO(&write_fds); + auto readable_sockets = get_readable_sockets(sockets, pollfd); + auto timeout = calculate_timeout(); - std::lock_guard guard {mutex}; + int number_of_fds_ready = 0; + if(!readable_sockets.empty()) + number_of_fds_ready = poll(readable_sockets.data(), readable_sockets.size(), timeout); - nfds = ares_fds(channel, &read_fds,&write_fds); - if (nfds == 0) + if(!number_of_fds_ready) + { + process_possible_timeout(); + break; + } + else + { + process_readable_sockets(readable_sockets); + } + } + } + + std::span CaresPTRResolver::get_readable_sockets(int * sockets, pollfd * pollfd) + { + int sockets_bitmask = ares_getsock(channel, sockets, ARES_GETSOCK_MAXNUM); + + int number_of_sockets_to_poll = 0; + + for(int i = 0; i < ARES_GETSOCK_MAXNUM; i++, number_of_sockets_to_poll++) + { + pollfd[i].events = 0; + pollfd[i].revents = 0; + + if(ARES_GETSOCK_READABLE(sockets_bitmask, i)) + { + pollfd[i].fd = sockets[i]; + pollfd[i].events = POLLIN; + } + else { break; } - tvp = ares_timeout(channel, nullptr, &tv); - select(nfds, &read_fds, &write_fds, nullptr, tvp); - ares_process(channel, &read_fds, &write_fds); + } + + return std::span(pollfd, number_of_sockets_to_poll); + } + + int64_t CaresPTRResolver::calculate_timeout() + { + timeval tv; + if (auto * tvp = ares_timeout(channel, nullptr, &tv)) + { + auto timeout = tvp->tv_sec * 1000 + tvp->tv_usec / 1000; + + return timeout; + } + + return 0; + } + + void CaresPTRResolver::process_possible_timeout() + { + /* Call ares_process() unconditonally here, even if we simply timed out + above, as otherwise the ares name resolve won't timeout! */ + ares_process_fd(channel, ARES_SOCKET_BAD, ARES_SOCKET_BAD); + } + + void CaresPTRResolver::process_readable_sockets(std::span readable_sockets) + { + for (auto readable_socket : readable_sockets) + { + auto fd = readable_socket.revents & POLLIN ? readable_socket.fd : ARES_SOCKET_BAD; + ares_process_fd(channel, fd, ARES_SOCKET_BAD); } } } diff --git a/src/Common/CaresPTRResolver.h b/src/Common/CaresPTRResolver.h index a012e7bed35..654f2af9008 100644 --- a/src/Common/CaresPTRResolver.h +++ b/src/Common/CaresPTRResolver.h @@ -1,6 +1,7 @@ #pragma once -#include +#include +#include #include "DNSPTRResolver.h" using ares_channel = struct ares_channeldata *; @@ -36,8 +37,15 @@ namespace DB void resolve_v6(const std::string & ip, std::unordered_set & response); - ares_channel channel; + std::span get_readable_sockets(int * sockets, pollfd * pollfd); + int64_t calculate_timeout(); + + void process_possible_timeout(); + + void process_readable_sockets(std::span readable_sockets); + + ares_channel channel; static std::mutex mutex; }; } From d98eac11eb5684d91fd94df737bd0e61f9c6fd0d Mon Sep 17 00:00:00 2001 From: Arthur Passos Date: Wed, 12 Oct 2022 17:47:56 -0300 Subject: [PATCH 004/112] remove mutex declaration --- src/Common/CaresPTRResolver.h | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Common/CaresPTRResolver.h b/src/Common/CaresPTRResolver.h index 654f2af9008..38344d75ade 100644 --- a/src/Common/CaresPTRResolver.h +++ b/src/Common/CaresPTRResolver.h @@ -46,7 +46,6 @@ namespace DB void process_readable_sockets(std::span readable_sockets); ares_channel channel; - static std::mutex mutex; }; } From 72ba210b24e35adcd5a6ebd52f2c60e6ef0d7ed3 Mon Sep 17 00:00:00 2001 From: Arthur Passos Date: Wed, 12 Oct 2022 20:23:29 -0300 Subject: [PATCH 005/112] working on style --- src/Common/CaresPTRResolver.cpp | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/Common/CaresPTRResolver.cpp b/src/Common/CaresPTRResolver.cpp index 79b78a734aa..cf0e1661322 100644 --- a/src/Common/CaresPTRResolver.cpp +++ b/src/Common/CaresPTRResolver.cpp @@ -121,16 +121,18 @@ namespace DB int number_of_fds_ready = 0; if(!readable_sockets.empty()) - number_of_fds_ready = poll(readable_sockets.data(), readable_sockets.size(), timeout); - - if(!number_of_fds_ready) { - process_possible_timeout(); - break; + number_of_fds_ready = poll(readable_sockets.data(), readable_sockets.size(), timeout); + } + + if(number_of_fds_ready > 0) + { + process_readable_sockets(readable_sockets); } else { - process_readable_sockets(readable_sockets); + process_possible_timeout(); + break; } } } From f588830e245a0b1a564a01ad9965c4a5780706d0 Mon Sep 17 00:00:00 2001 From: Arthur Passos Date: Wed, 12 Oct 2022 20:35:17 -0300 Subject: [PATCH 006/112] working on style --- src/Common/CaresPTRResolver.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Common/CaresPTRResolver.cpp b/src/Common/CaresPTRResolver.cpp index cf0e1661322..a3eeb3d24ad 100644 --- a/src/Common/CaresPTRResolver.cpp +++ b/src/Common/CaresPTRResolver.cpp @@ -114,18 +114,18 @@ namespace DB int sockets[ARES_GETSOCK_MAXNUM]; pollfd pollfd[ARES_GETSOCK_MAXNUM]; - while(true) + while (true) { auto readable_sockets = get_readable_sockets(sockets, pollfd); auto timeout = calculate_timeout(); int number_of_fds_ready = 0; - if(!readable_sockets.empty()) + if (!readable_sockets.empty()) { number_of_fds_ready = poll(readable_sockets.data(), readable_sockets.size(), timeout); } - if(number_of_fds_ready > 0) + if (number_of_fds_ready > 0) { process_readable_sockets(readable_sockets); } @@ -143,7 +143,7 @@ namespace DB int number_of_sockets_to_poll = 0; - for(int i = 0; i < ARES_GETSOCK_MAXNUM; i++, number_of_sockets_to_poll++) + for (int i = 0; i < ARES_GETSOCK_MAXNUM; i++, number_of_sockets_to_poll++) { pollfd[i].events = 0; pollfd[i].revents = 0; From 91560dd60708af4fded8bf55c00fdafc0057fbe8 Mon Sep 17 00:00:00 2001 From: Arthur Passos Date: Wed, 12 Oct 2022 21:28:32 -0300 Subject: [PATCH 007/112] working on style --- src/Common/CaresPTRResolver.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/CaresPTRResolver.cpp b/src/Common/CaresPTRResolver.cpp index a3eeb3d24ad..f04c6fb6de7 100644 --- a/src/Common/CaresPTRResolver.cpp +++ b/src/Common/CaresPTRResolver.cpp @@ -148,7 +148,7 @@ namespace DB pollfd[i].events = 0; pollfd[i].revents = 0; - if(ARES_GETSOCK_READABLE(sockets_bitmask, i)) + if (ARES_GETSOCK_READABLE(sockets_bitmask, i)) { pollfd[i].fd = sockets[i]; pollfd[i].events = POLLIN; From 6d22bb78e0db5deebe88444cee5475516c7d322d Mon Sep 17 00:00:00 2001 From: Arthur Passos Date: Fri, 14 Oct 2022 12:39:08 -0300 Subject: [PATCH 008/112] custom py script in integ tests --- docker/test/integration/base/Dockerfile | 5 ++ .../__init__.py | 0 .../configs/host_regexp.xml | 11 +++ .../configs/listen_host.xml | 5 ++ .../coredns_config/Corefile | 8 ++ .../coredns_config/example.com | 1 + .../scripts/stress_test.py | 56 +++++++++++++ .../test.py | 83 +++++++++++++++++++ 8 files changed, 169 insertions(+) create mode 100644 tests/integration/test_host_regexp_multiple_ptr_records_concurrent/__init__.py create mode 100644 tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/host_regexp.xml create mode 100644 tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/listen_host.xml create mode 100644 tests/integration/test_host_regexp_multiple_ptr_records_concurrent/coredns_config/Corefile create mode 100644 tests/integration/test_host_regexp_multiple_ptr_records_concurrent/coredns_config/example.com create mode 100644 tests/integration/test_host_regexp_multiple_ptr_records_concurrent/scripts/stress_test.py create mode 100644 tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py diff --git a/docker/test/integration/base/Dockerfile b/docker/test/integration/base/Dockerfile index 9b6318a5426..a2d86187a23 100644 --- a/docker/test/integration/base/Dockerfile +++ b/docker/test/integration/base/Dockerfile @@ -27,9 +27,14 @@ RUN apt-get update \ tar \ tzdata \ unixodbc \ + python3-pip \ + libcurl4-openssl-dev \ + libssl-dev \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* +RUN pip3 install pycurl + # Architecture of the image when BuildKit/buildx is used ARG TARGETARCH diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/__init__.py b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/host_regexp.xml b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/host_regexp.xml new file mode 100644 index 00000000000..7a2141e6c7e --- /dev/null +++ b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/host_regexp.xml @@ -0,0 +1,11 @@ + + + + + + test1\.example\.com$ + + default + + + \ No newline at end of file diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/listen_host.xml b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/listen_host.xml new file mode 100644 index 00000000000..58ef55cd3f3 --- /dev/null +++ b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/listen_host.xml @@ -0,0 +1,5 @@ + + :: + 0.0.0.0 + 1 + diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/coredns_config/Corefile b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/coredns_config/Corefile new file mode 100644 index 00000000000..0dd198441dc --- /dev/null +++ b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/coredns_config/Corefile @@ -0,0 +1,8 @@ +. { + hosts /example.com { + reload "200ms" + fallthrough + } + forward . 127.0.0.11 + log +} diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/coredns_config/example.com b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/coredns_config/example.com new file mode 100644 index 00000000000..9beb415c290 --- /dev/null +++ b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/coredns_config/example.com @@ -0,0 +1 @@ +filled in runtime, but needs to exist in order to be volume mapped in docker \ No newline at end of file diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/scripts/stress_test.py b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/scripts/stress_test.py new file mode 100644 index 00000000000..1a840246ccf --- /dev/null +++ b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/scripts/stress_test.py @@ -0,0 +1,56 @@ +import pycurl +import threading +from io import BytesIO +import sys + +server_ip = sys.argv[1] + +mutex = threading.Lock() +success_counter = 0 +number_of_threads = 1 +number_of_iterations = 400 + +def perform_request(): + + buffer = BytesIO() + crl = pycurl.Curl() + crl.setopt(pycurl.INTERFACE, '192.168.0.157') + crl.setopt(crl.WRITEDATA, buffer) + crl.setopt(crl.URL, f'http://{server_ip}:8123/?query=select+1&user=test_dns') + + crl.perform() + + # End curl session + crl.close() + + str_response = buffer.getvalue().decode('iso-8859-1') + expected_response = "1\n" + + mutex.acquire() + + global success_counter + + if (str_response == expected_response): + success_counter += 1 + + mutex.release() + + # print(buffer.getvalue().decode('iso-8859-1')) + +def perform_multiple_requests(n): + for i in range(n): + perform_request() + +threads = [] + + +for i in range(number_of_threads): + thread = threading.Thread(target=perform_multiple_requests, args=(number_of_iterations,)) + thread.start() + threads.append(thread) + + +for thread in threads: + thread.join() + +exit(success_counter == number_of_threads * number_of_iterations) diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py new file mode 100644 index 00000000000..566eb5c0b2b --- /dev/null +++ b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py @@ -0,0 +1,83 @@ +import pytest +from helpers.cluster import ClickHouseCluster, get_docker_compose_path, run_and_check +from time import sleep +import os + +DOCKER_COMPOSE_PATH = get_docker_compose_path() +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) + +cluster = ClickHouseCluster(__file__) + +ch_server = cluster.add_instance( + "clickhouse-server", + with_coredns=True, + main_configs=["configs/listen_host.xml"], + user_configs=["configs/host_regexp.xml"], + ipv6_address="2001:3984:3989::1:1111", +) + +client = cluster.add_instance( + "clickhouse-client", + ipv6_address="2001:3984:3989::1:1112", +) + + +@pytest.fixture(scope="module") +def started_cluster(): + global cluster + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + + +def setup_dns_server(ip): + domains_string = "test3.example.com test2.example.com test1.example.com" + example_file_path = f'{ch_server.env_variables["COREDNS_CONFIG_DIR"]}/example.com' + run_and_check(f"echo '{ip} {domains_string}' > {example_file_path}", shell=True) + + +def setup_ch_server(dns_server_ip): + ch_server.exec_in_container( + (["bash", "-c", f"echo 'nameserver {dns_server_ip}' > /etc/resolv.conf"]) + ) + ch_server.exec_in_container( + (["bash", "-c", "echo 'options ndots:0' >> /etc/resolv.conf"]) + ) + ch_server.query("SYSTEM DROP DNS CACHE") + + +def build_endpoint_v4(ip): + return f"'http://{ip}:8123/?query=SELECT+1&user=test_dns'" + + +def build_endpoint_v6(ip): + return build_endpoint_v4(f"[{ip}]") + + +def test_host_regexp_multiple_ptr_v4(started_cluster): + server_ip = cluster.get_instance_ip("clickhouse-server") + client_ip = cluster.get_instance_ip("clickhouse-client") + dns_server_ip = cluster.get_instance_ip(cluster.coredns_host) + + setup_dns_server(client_ip) + setup_ch_server(dns_server_ip) + + current_dir = os.path.dirname(__file__) + client.copy_file_to_container(os.path.join(current_dir, "scripts", "stress_test.py"), "stress_test.py") + + assert "1\n" == client.exec_in_container(["python3", f"stress_test.py", server_ip]) + + # benchmark_command = f"echo 'select 1' | clickhouse benchmark -h {server_ip} --user test_dns -c 10 --reconnect" + + # assert "1\n" == client.exec_in_container((["bash", "-c", benchmark_command])) +# container_id = cluster.get_container_id("resolver") +# current_dir = os.path.dirname(__file__) +# cluster.copy_file_to_container( +# container_id, +# os.path.join(current_dir, "s3_endpoint", "endpoint.py"), +# "endpoint.py", +# ) +# cluster.exec_in_container(container_id, ["python", "endpoint.py"], detach=True) From 299d8484737f436166bc05ca3b864368b90cde90 Mon Sep 17 00:00:00 2001 From: Arthur Passos Date: Fri, 14 Oct 2022 15:23:10 -0300 Subject: [PATCH 009/112] style fix? --- .../scripts/stress_test.py | 18 ++++++++++-------- .../test.py | 15 +++------------ 2 files changed, 13 insertions(+), 20 deletions(-) diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/scripts/stress_test.py b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/scripts/stress_test.py index 1a840246ccf..2d8a70edc57 100644 --- a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/scripts/stress_test.py +++ b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/scripts/stress_test.py @@ -10,45 +10,47 @@ success_counter = 0 number_of_threads = 1 number_of_iterations = 400 + def perform_request(): buffer = BytesIO() crl = pycurl.Curl() - crl.setopt(pycurl.INTERFACE, '192.168.0.157') + crl.setopt(pycurl.INTERFACE, "192.168.0.157") crl.setopt(crl.WRITEDATA, buffer) - crl.setopt(crl.URL, f'http://{server_ip}:8123/?query=select+1&user=test_dns') + crl.setopt(crl.URL, f"http://{server_ip}:8123/?query=select+1&user=test_dns") crl.perform() # End curl session crl.close() - str_response = buffer.getvalue().decode('iso-8859-1') + str_response = buffer.getvalue().decode("iso-8859-1") expected_response = "1\n" mutex.acquire() global success_counter - if (str_response == expected_response): + if str_response == expected_response: success_counter += 1 mutex.release() - - # print(buffer.getvalue().decode('iso-8859-1')) + def perform_multiple_requests(n): for i in range(n): perform_request() + threads = [] for i in range(number_of_threads): - thread = threading.Thread(target=perform_multiple_requests, args=(number_of_iterations,)) + thread = threading.Thread( + target=perform_multiple_requests, args=(number_of_iterations,) + ) thread.start() threads.append(thread) - for thread in threads: thread.join() diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py index 566eb5c0b2b..ef1c615023d 100644 --- a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py +++ b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py @@ -66,18 +66,9 @@ def test_host_regexp_multiple_ptr_v4(started_cluster): setup_ch_server(dns_server_ip) current_dir = os.path.dirname(__file__) - client.copy_file_to_container(os.path.join(current_dir, "scripts", "stress_test.py"), "stress_test.py") + client.copy_file_to_container( + os.path.join(current_dir, "scripts", "stress_test.py"), "stress_test.py" + ) assert "1\n" == client.exec_in_container(["python3", f"stress_test.py", server_ip]) - # benchmark_command = f"echo 'select 1' | clickhouse benchmark -h {server_ip} --user test_dns -c 10 --reconnect" - - # assert "1\n" == client.exec_in_container((["bash", "-c", benchmark_command])) -# container_id = cluster.get_container_id("resolver") -# current_dir = os.path.dirname(__file__) -# cluster.copy_file_to_container( -# container_id, -# os.path.join(current_dir, "s3_endpoint", "endpoint.py"), -# "endpoint.py", -# ) -# cluster.exec_in_container(container_id, ["python", "endpoint.py"], detach=True) From f414edebae69c5d247f2657c5a312c2e06b13e98 Mon Sep 17 00:00:00 2001 From: Arthur Passos Date: Fri, 14 Oct 2022 15:54:48 -0300 Subject: [PATCH 010/112] fix black style --- .../test_host_regexp_multiple_ptr_records_concurrent/test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py index ef1c615023d..7ead30275a5 100644 --- a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py +++ b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py @@ -71,4 +71,3 @@ def test_host_regexp_multiple_ptr_v4(started_cluster): ) assert "1\n" == client.exec_in_container(["python3", f"stress_test.py", server_ip]) - From 3d1ed2969ae2d25038e4f6b1e974225f90e94625 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 17 Oct 2022 18:21:03 +0000 Subject: [PATCH 011/112] Fix possible LOGICAL_ERROR in binary ariphmetics monotonicity. Fix invalid evaluation of binary monotonic function in KeyCondition. --- src/Functions/FunctionBinaryArithmetic.h | 18 ++++++++++-------- src/Storages/MergeTree/KeyCondition.cpp | 2 ++ ...2461_mullable_pk_monotonicity_bug.reference | 4 ++++ .../02461_mullable_pk_monotonicity_bug.sql | 6 ++++++ 4 files changed, 22 insertions(+), 8 deletions(-) create mode 100644 tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.reference create mode 100644 tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql diff --git a/src/Functions/FunctionBinaryArithmetic.h b/src/Functions/FunctionBinaryArithmetic.h index baa3c65537d..cf752bbcbf8 100644 --- a/src/Functions/FunctionBinaryArithmetic.h +++ b/src/Functions/FunctionBinaryArithmetic.h @@ -1782,21 +1782,24 @@ public: { ColumnsWithTypeAndName columns_with_constant = {{left.column->cloneResized(1), left.type, left.name}, - {right.type->createColumnConst(1, point), right.type, right.name}}; + {removeNullable(right.type)->createColumnConst(1, point), removeNullable(right.type), right.name}}; + /// This is a bit dangerous to call Base::executeImpl cause it ignores `use Default Implementation For XXX` flags. + /// It was possible to check monotonicity for nullable right type which result to exception. + /// Adding removeNullable above fixes the issue, but some other inconsistency may left. auto col = Base::executeImpl(columns_with_constant, return_type, 1); Field point_transformed; col->get(0, point_transformed); return point_transformed; }; - transform(left_point); - transform(right_point); + + bool is_positive_monotonicity = applyVisitor(FieldVisitorAccurateLess(), left_point, right_point) + == applyVisitor(FieldVisitorAccurateLess(), transform(left_point), transform(right_point)); if (name_view == "plus") { // Check if there is an overflow - if (applyVisitor(FieldVisitorAccurateLess(), left_point, right_point) - == applyVisitor(FieldVisitorAccurateLess(), transform(left_point), transform(right_point))) + if (is_positive_monotonicity) return {true, true, false, true}; else return {false, true, false, false}; @@ -1804,8 +1807,7 @@ public: else { // Check if there is an overflow - if (applyVisitor(FieldVisitorAccurateLess(), left_point, right_point) - != applyVisitor(FieldVisitorAccurateLess(), transform(left_point), transform(right_point))) + if (!is_positive_monotonicity) return {true, false, false, true}; else return {false, false, false, false}; @@ -1817,7 +1819,7 @@ public: auto transform = [&](const Field & point) { ColumnsWithTypeAndName columns_with_constant - = {{left.type->createColumnConst(1, point), left.type, left.name}, + = {{removeNullable(left.type)->createColumnConst(1, point), removeNullable(left.type), left.name}, {right.column->cloneResized(1), right.type, right.name}}; auto col = Base::executeImpl(columns_with_constant, return_type, 1); diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index d7c33c8663b..ec5454973c7 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -1407,6 +1407,7 @@ public: ColumnsWithTypeAndName new_arguments; new_arguments.reserve(arguments.size() + 1); new_arguments.push_back(const_arg); + new_arguments.front().column = new_arguments.front().column->cloneResized(input_rows_count); for (const auto & arg : arguments) new_arguments.push_back(arg); return func->prepare(new_arguments)->execute(new_arguments, result_type, input_rows_count, dry_run); @@ -1415,6 +1416,7 @@ public: { auto new_arguments = arguments; new_arguments.push_back(const_arg); + new_arguments.back().column = new_arguments.back().column->cloneResized(input_rows_count); return func->prepare(new_arguments)->execute(new_arguments, result_type, input_rows_count, dry_run); } else diff --git a/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.reference b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.reference new file mode 100644 index 00000000000..099b7d91c92 --- /dev/null +++ b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.reference @@ -0,0 +1,4 @@ +1 +2 +1 +2 diff --git a/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql new file mode 100644 index 00000000000..56ea054cc47 --- /dev/null +++ b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql @@ -0,0 +1,6 @@ +create table tab (x Nullable(UInt8)) engine = MergeTree order by x settings allow_nullable_key = 1, index_granularity = 2; +insert into tab select number from numbers(4); + +set max_rows_to_read = 2; +SELECT x + 1 FROM tab where plus(x, 1) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1, x) <= 2 order by x; From c457f7dff3d100c29c24e1592f665a9d03ecffed Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 18 Oct 2022 12:21:16 +0000 Subject: [PATCH 012/112] Fix and more tests. --- src/Functions/FunctionBinaryArithmetic.h | 15 +++++++++---- ...461_mullable_pk_monotonicity_bug.reference | 20 ++++++++++++++++++ .../02461_mullable_pk_monotonicity_bug.sql | 21 +++++++++++++++++++ 3 files changed, 52 insertions(+), 4 deletions(-) diff --git a/src/Functions/FunctionBinaryArithmetic.h b/src/Functions/FunctionBinaryArithmetic.h index cf752bbcbf8..05f092b5060 100644 --- a/src/Functions/FunctionBinaryArithmetic.h +++ b/src/Functions/FunctionBinaryArithmetic.h @@ -39,6 +39,7 @@ #include #include #include +#include #include #if USE_EMBEDDED_COMPILER @@ -1778,11 +1779,14 @@ public: // const +|- variable if (left.column && isColumnConst(*left.column)) { + auto left_type = removeLowCardinality(removeNullable(left.type)); + auto right_type = removeLowCardinality(removeNullable(right.type)); + auto transform = [&](const Field & point) { ColumnsWithTypeAndName columns_with_constant - = {{left.column->cloneResized(1), left.type, left.name}, - {removeNullable(right.type)->createColumnConst(1, point), removeNullable(right.type), right.name}}; + = {{left_type->createColumnConst(1, (*left.column)[0]), left_type, left.name}, + {right_type->createColumnConst(1, point), right_type, right.name}}; /// This is a bit dangerous to call Base::executeImpl cause it ignores `use Default Implementation For XXX` flags. /// It was possible to check monotonicity for nullable right type which result to exception. @@ -1816,11 +1820,14 @@ public: // variable +|- constant else if (right.column && isColumnConst(*right.column)) { + auto left_type = removeLowCardinality(removeNullable(left.type)); + auto right_type = removeLowCardinality(removeNullable(right.type)); + auto transform = [&](const Field & point) { ColumnsWithTypeAndName columns_with_constant - = {{removeNullable(left.type)->createColumnConst(1, point), removeNullable(left.type), left.name}, - {right.column->cloneResized(1), right.type, right.name}}; + = {{left_type->createColumnConst(1, point), left_type, left.name}, + {right_type->createColumnConst(1, (*right.column)[0]), right_type, right.name}}; auto col = Base::executeImpl(columns_with_constant, return_type, 1); Field point_transformed; diff --git a/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.reference b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.reference index 099b7d91c92..c2983d46447 100644 --- a/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.reference +++ b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.reference @@ -2,3 +2,23 @@ 2 1 2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 diff --git a/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql index 56ea054cc47..798868fe566 100644 --- a/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql +++ b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql @@ -1,6 +1,27 @@ create table tab (x Nullable(UInt8)) engine = MergeTree order by x settings allow_nullable_key = 1, index_granularity = 2; insert into tab select number from numbers(4); +set allow_suspicious_low_cardinality_types=1; +set max_rows_to_read = 2; + +SELECT x + 1 FROM tab where plus(x, 1) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::Nullable(UInt8)) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(UInt8)) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1, x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::Nullable(UInt8), x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(UInt8), x) <= 2 order by x; + +drop table tab; + +set max_rows_to_read = 100; + +create table tab (x LowCardinality(UInt8)) engine = MergeTree order by x settings allow_nullable_key = 1, index_granularity = 2; +insert into tab select number from numbers(4); + set max_rows_to_read = 2; SELECT x + 1 FROM tab where plus(x, 1) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::Nullable(UInt8)) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(UInt8)) <= 2 order by x; SELECT 1 + x FROM tab where plus(1, x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::Nullable(UInt8), x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(UInt8), x) <= 2 order by x; From 4645d465b12013d99d88956020034cbfa47d9455 Mon Sep 17 00:00:00 2001 From: Boris Kuschel Date: Tue, 18 Oct 2022 17:38:06 -0500 Subject: [PATCH 013/112] update zlib-ng to latest --- contrib/zlib-ng | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/zlib-ng b/contrib/zlib-ng index bffad6f6fe7..50f0eae1a41 160000 --- a/contrib/zlib-ng +++ b/contrib/zlib-ng @@ -1 +1 @@ -Subproject commit bffad6f6fe74d6a2f92e2668390664a926c68733 +Subproject commit 50f0eae1a411764cd6d1e85b3ce471438acd3c1c From 7f0a2d32c852874ebb521f7883542e6adbc63dad Mon Sep 17 00:00:00 2001 From: Arthur Passos Date: Tue, 18 Oct 2022 20:15:58 -0300 Subject: [PATCH 014/112] fix test errors --- .../configs/config.xml | 4 ++++ .../scripts/stress_test.py | 17 +++++++++++------ .../test.py | 6 ++---- 3 files changed, 17 insertions(+), 10 deletions(-) create mode 100644 tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/config.xml diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/config.xml b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/config.xml new file mode 100644 index 00000000000..42a1f962705 --- /dev/null +++ b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/config.xml @@ -0,0 +1,4 @@ + + 1 + 250 + diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/scripts/stress_test.py b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/scripts/stress_test.py index 2d8a70edc57..81dfb4cc511 100644 --- a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/scripts/stress_test.py +++ b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/scripts/stress_test.py @@ -3,19 +3,20 @@ import threading from io import BytesIO import sys -server_ip = sys.argv[1] +client_ip = sys.argv[1] +server_ip = sys.argv[2] mutex = threading.Lock() success_counter = 0 -number_of_threads = 1 -number_of_iterations = 400 +number_of_threads = 200 +number_of_iterations = 200 def perform_request(): buffer = BytesIO() crl = pycurl.Curl() - crl.setopt(pycurl.INTERFACE, "192.168.0.157") + crl.setopt(pycurl.INTERFACE, client_ip) crl.setopt(crl.WRITEDATA, buffer) crl.setopt(crl.URL, f"http://{server_ip}:8123/?query=select+1&user=test_dns") @@ -38,7 +39,7 @@ def perform_request(): def perform_multiple_requests(n): - for i in range(n): + for request_number in range(n): perform_request() @@ -55,4 +56,8 @@ for i in range(number_of_threads): for thread in threads: thread.join() -exit(success_counter == number_of_threads * number_of_iterations) + +if success_counter == number_of_threads * number_of_iterations: + exit(0) + +exit(1) diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py index 7ead30275a5..62f47579612 100644 --- a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py +++ b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py @@ -11,14 +11,12 @@ cluster = ClickHouseCluster(__file__) ch_server = cluster.add_instance( "clickhouse-server", with_coredns=True, - main_configs=["configs/listen_host.xml"], + main_configs=["configs/config.xml", "configs/listen_host.xml"], user_configs=["configs/host_regexp.xml"], - ipv6_address="2001:3984:3989::1:1111", ) client = cluster.add_instance( "clickhouse-client", - ipv6_address="2001:3984:3989::1:1112", ) @@ -70,4 +68,4 @@ def test_host_regexp_multiple_ptr_v4(started_cluster): os.path.join(current_dir, "scripts", "stress_test.py"), "stress_test.py" ) - assert "1\n" == client.exec_in_container(["python3", f"stress_test.py", server_ip]) + client.exec_in_container(["python3", f"stress_test.py", client_ip, server_ip]) From 549597fe804353800b6505c2b03c029a073f9782 Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Wed, 19 Oct 2022 12:15:27 +0800 Subject: [PATCH 015/112] Fix misbehavior of key analysis. When key types cannot be inside Nullable, it can break key analysis. See https://github.com/ClickHouse/ClickHouse/issues/42456. --- src/Interpreters/castColumn.cpp | 10 ++++++++++ src/Interpreters/castColumn.h | 1 + src/Storages/MergeTree/KeyCondition.cpp | 6 ++++-- ...dition_with_types_that_cannot_be_nullable.reference | 1 + ...ey_condition_with_types_that_cannot_be_nullable.sql | 9 +++++++++ 5 files changed, 25 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/02457_key_condition_with_types_that_cannot_be_nullable.reference create mode 100644 tests/queries/0_stateless/02457_key_condition_with_types_that_cannot_be_nullable.sql diff --git a/src/Interpreters/castColumn.cpp b/src/Interpreters/castColumn.cpp index dc9882b84b0..744cfff2527 100644 --- a/src/Interpreters/castColumn.cpp +++ b/src/Interpreters/castColumn.cpp @@ -51,4 +51,14 @@ ColumnPtr castColumnAccurateOrNull(const ColumnWithTypeAndName & arg, const Data return castColumn(arg, type); } +ColumnPtr tryCastColumnAccurate(const ColumnWithTypeAndName & arg, const DataTypePtr & type) +try +{ + return castColumn(arg, type); +} +catch (...) +{ + return nullptr; +} + } diff --git a/src/Interpreters/castColumn.h b/src/Interpreters/castColumn.h index fcbea0f4646..c0a2cfbefbc 100644 --- a/src/Interpreters/castColumn.h +++ b/src/Interpreters/castColumn.h @@ -8,5 +8,6 @@ namespace DB ColumnPtr castColumn(const ColumnWithTypeAndName & arg, const DataTypePtr & type); ColumnPtr castColumnAccurate(const ColumnWithTypeAndName & arg, const DataTypePtr & type); ColumnPtr castColumnAccurateOrNull(const ColumnWithTypeAndName & arg, const DataTypePtr & type); +ColumnPtr tryCastColumnAccurate(const ColumnWithTypeAndName & arg, const DataTypePtr & type); } diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index 9f5f2873b98..3ecaff2c19a 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -1182,11 +1182,13 @@ bool KeyCondition::transformConstantWithValidFunctions( { auto const_type = cur_node->result_type; auto const_column = out_type->createColumnConst(1, out_value); - auto const_value = (*castColumnAccurateOrNull({const_column, out_type, ""}, const_type))[0]; + auto maybe_const_value_column = tryCastColumnAccurate({const_column, out_type, ""}, const_type); - if (const_value.isNull()) + if (maybe_const_value_column == nullptr) return false; + auto const_value = (*maybe_const_value_column)[0]; + while (!chain.empty()) { const auto * func = chain.top(); diff --git a/tests/queries/0_stateless/02457_key_condition_with_types_that_cannot_be_nullable.reference b/tests/queries/0_stateless/02457_key_condition_with_types_that_cannot_be_nullable.reference new file mode 100644 index 00000000000..13b65c29f05 --- /dev/null +++ b/tests/queries/0_stateless/02457_key_condition_with_types_that_cannot_be_nullable.reference @@ -0,0 +1 @@ +printer1 diff --git a/tests/queries/0_stateless/02457_key_condition_with_types_that_cannot_be_nullable.sql b/tests/queries/0_stateless/02457_key_condition_with_types_that_cannot_be_nullable.sql new file mode 100644 index 00000000000..690ec6c70e0 --- /dev/null +++ b/tests/queries/0_stateless/02457_key_condition_with_types_that_cannot_be_nullable.sql @@ -0,0 +1,9 @@ +drop table if exists test; + +create table test (Printer LowCardinality(String), IntervalStart DateTime) engine MergeTree partition by (hiveHash(Printer), toYear(IntervalStart)) order by (Printer, IntervalStart); + +insert into test values ('printer1', '2006-02-07 06:28:15'); + +select Printer from test where Printer='printer1'; + +drop table test; From c8f9e7bd1f028be15d233571c5cff4ed2442ad64 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 19 Oct 2022 14:33:37 +0000 Subject: [PATCH 016/112] Update test. --- src/Functions/FunctionBinaryArithmetic.h | 8 ++++---- .../02461_mullable_pk_monotonicity_bug.reference | 8 ++++++++ .../0_stateless/02461_mullable_pk_monotonicity_bug.sql | 4 ++++ 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/src/Functions/FunctionBinaryArithmetic.h b/src/Functions/FunctionBinaryArithmetic.h index 05f092b5060..3ffe054a439 100644 --- a/src/Functions/FunctionBinaryArithmetic.h +++ b/src/Functions/FunctionBinaryArithmetic.h @@ -1779,8 +1779,8 @@ public: // const +|- variable if (left.column && isColumnConst(*left.column)) { - auto left_type = removeLowCardinality(removeNullable(left.type)); - auto right_type = removeLowCardinality(removeNullable(right.type)); + auto left_type = removeNullable(removeLowCardinality(left.type)); + auto right_type = removeNullable(removeLowCardinality(right.type)); auto transform = [&](const Field & point) { @@ -1820,8 +1820,8 @@ public: // variable +|- constant else if (right.column && isColumnConst(*right.column)) { - auto left_type = removeLowCardinality(removeNullable(left.type)); - auto right_type = removeLowCardinality(removeNullable(right.type)); + auto left_type = removeNullable(removeLowCardinality(left.type)); + auto right_type = removeNullable(removeLowCardinality(right.type)); auto transform = [&](const Field & point) { diff --git a/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.reference b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.reference index c2983d46447..5ee6e6c67c4 100644 --- a/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.reference +++ b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.reference @@ -22,3 +22,11 @@ 2 1 2 +1 +2 +1 +2 +1 +2 +1 +2 diff --git a/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql index 798868fe566..4f20ec0cd35 100644 --- a/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql +++ b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql @@ -7,9 +7,11 @@ set max_rows_to_read = 2; SELECT x + 1 FROM tab where plus(x, 1) <= 2 order by x; SELECT x + 1 FROM tab where plus(x, 1::Nullable(UInt8)) <= 2 order by x; SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(UInt8)) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(Nullable(UInt8))) <= 2 order by x; SELECT 1 + x FROM tab where plus(1, x) <= 2 order by x; SELECT 1 + x FROM tab where plus(1::Nullable(UInt8), x) <= 2 order by x; SELECT 1 + x FROM tab where plus(1::LowCardinality(UInt8), x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(Nullable(UInt8)), x) <= 2 order by x; drop table tab; @@ -22,6 +24,8 @@ set max_rows_to_read = 2; SELECT x + 1 FROM tab where plus(x, 1) <= 2 order by x; SELECT x + 1 FROM tab where plus(x, 1::Nullable(UInt8)) <= 2 order by x; SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(UInt8)) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(Nullable(UInt8))) <= 2 order by x; SELECT 1 + x FROM tab where plus(1, x) <= 2 order by x; SELECT 1 + x FROM tab where plus(1::Nullable(UInt8), x) <= 2 order by x; SELECT 1 + x FROM tab where plus(1::LowCardinality(UInt8), x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(Nullable(UInt8)), x) <= 2 order by x; From 58b6194a2d3ff7361617217538c573db7c529b10 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 19 Oct 2022 15:08:00 +0000 Subject: [PATCH 017/112] Fix another one case. --- src/Functions/FunctionsConversion.h | 4 ++++ .../0_stateless/02461_mullable_pk_monotonicity_bug.sql | 9 +++++++++ 2 files changed, 13 insertions(+) diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index 8cbe3b0e532..ebf8417aea6 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -2209,6 +2209,10 @@ struct ToNumberMonotonicity /// Integer cases. + /// Do not support 128 bit integers and decimals for now. + if (!isNativeInteger(type)) + return {}; + const bool from_is_unsigned = type.isValueRepresentedByUnsignedInteger(); const bool to_is_unsigned = is_unsigned_v; diff --git a/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql index 4f20ec0cd35..ce190694514 100644 --- a/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql +++ b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql @@ -29,3 +29,12 @@ SELECT 1 + x FROM tab where plus(1, x) <= 2 order by x; SELECT 1 + x FROM tab where plus(1::Nullable(UInt8), x) <= 2 order by x; SELECT 1 + x FROM tab where plus(1::LowCardinality(UInt8), x) <= 2 order by x; SELECT 1 + x FROM tab where plus(1::LowCardinality(Nullable(UInt8)), x) <= 2 order by x; + +drop table tab; + +set max_rows_to_read = 100; + +create table tab (x UInt128) engine = MergeTree order by x settings allow_nullable_key = 1, index_granularity = 2; +insert into tab select number from numbers(4); + +SELECT x + 1 FROM tab WHERE (x + 1::LowCardinality(UInt8)) <= -9223372036854775808 order by x; From 2499ab3b3a9bb9fcf9ff09172fb0b869f7db4e19 Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Thu, 20 Oct 2022 17:53:52 +0800 Subject: [PATCH 018/112] Use convertFieldToType --- src/Storages/MergeTree/KeyCondition.cpp | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index 3ecaff2c19a..194cfbdabfc 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -1181,14 +1180,10 @@ bool KeyCondition::transformConstantWithValidFunctions( if (is_valid_chain) { auto const_type = cur_node->result_type; - auto const_column = out_type->createColumnConst(1, out_value); - auto maybe_const_value_column = tryCastColumnAccurate({const_column, out_type, ""}, const_type); - - if (maybe_const_value_column == nullptr) + auto const_value = convertFieldToType(out_value, *const_type); + if (const_value.isNull()) return false; - auto const_value = (*maybe_const_value_column)[0]; - while (!chain.empty()) { const auto * func = chain.top(); From 820e6b427697211d15988a38aaefbc6fd6bb7e39 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 20 Oct 2022 10:52:43 +0000 Subject: [PATCH 019/112] Build with libcxx(abi) 15 --- contrib/libcxx | 2 +- contrib/libcxx-cmake/CMakeLists.txt | 2 + contrib/libcxxabi | 2 +- contrib/libcxxabi-cmake/CMakeLists.txt | 1 + contrib/rocksdb | 2 +- .../tests/gtest_compressionCodec.cpp | 2 +- .../tests/gtest_json_parser.cpp | 2 +- .../Algorithms/AggregatingSortedAlgorithm.cpp | 66 ------------------ .../Algorithms/AggregatingSortedAlgorithm.h | 67 ++++++++++++++++++- .../Algorithms/SummingSortedAlgorithm.cpp | 8 +-- 10 files changed, 77 insertions(+), 77 deletions(-) diff --git a/contrib/libcxx b/contrib/libcxx index 172b2ae074f..19330eed499 160000 --- a/contrib/libcxx +++ b/contrib/libcxx @@ -1 +1 @@ -Subproject commit 172b2ae074f6755145b91c53a95c8540c1468239 +Subproject commit 19330eed499f0f2011437a92b7e8567ea36fe082 diff --git a/contrib/libcxx-cmake/CMakeLists.txt b/contrib/libcxx-cmake/CMakeLists.txt index 6f42a479588..53c6ff58f83 100644 --- a/contrib/libcxx-cmake/CMakeLists.txt +++ b/contrib/libcxx-cmake/CMakeLists.txt @@ -25,6 +25,7 @@ set(SRCS "${LIBCXX_SOURCE_DIR}/src/ios.cpp" "${LIBCXX_SOURCE_DIR}/src/ios.instantiations.cpp" "${LIBCXX_SOURCE_DIR}/src/iostream.cpp" +"${LIBCXX_SOURCE_DIR}/src/legacy_debug_handler.cpp" "${LIBCXX_SOURCE_DIR}/src/legacy_pointer_safety.cpp" "${LIBCXX_SOURCE_DIR}/src/locale.cpp" "${LIBCXX_SOURCE_DIR}/src/memory.cpp" @@ -49,6 +50,7 @@ set(SRCS "${LIBCXX_SOURCE_DIR}/src/valarray.cpp" "${LIBCXX_SOURCE_DIR}/src/variant.cpp" "${LIBCXX_SOURCE_DIR}/src/vector.cpp" +"${LIBCXX_SOURCE_DIR}/src/verbose_abort.cpp" ) add_library(cxx ${SRCS}) diff --git a/contrib/libcxxabi b/contrib/libcxxabi index 6eb7cc7a7bd..a736a6b3c6a 160000 --- a/contrib/libcxxabi +++ b/contrib/libcxxabi @@ -1 +1 @@ -Subproject commit 6eb7cc7a7bdd779e6734d1b9fb451df2274462d7 +Subproject commit a736a6b3c6a7b8aae2ebad629ca21b2c55b4820e diff --git a/contrib/libcxxabi-cmake/CMakeLists.txt b/contrib/libcxxabi-cmake/CMakeLists.txt index bf1ede8a60e..221a18de6e5 100644 --- a/contrib/libcxxabi-cmake/CMakeLists.txt +++ b/contrib/libcxxabi-cmake/CMakeLists.txt @@ -9,6 +9,7 @@ set(SRCS "${LIBCXXABI_SOURCE_DIR}/src/cxa_exception_storage.cpp" "${LIBCXXABI_SOURCE_DIR}/src/cxa_guard.cpp" "${LIBCXXABI_SOURCE_DIR}/src/cxa_handlers.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_noexception.cpp" "${LIBCXXABI_SOURCE_DIR}/src/cxa_personality.cpp" "${LIBCXXABI_SOURCE_DIR}/src/cxa_thread_atexit.cpp" "${LIBCXXABI_SOURCE_DIR}/src/cxa_vector.cpp" diff --git a/contrib/rocksdb b/contrib/rocksdb index e7c2b2f7bcf..2c8998e26c6 160000 --- a/contrib/rocksdb +++ b/contrib/rocksdb @@ -1 +1 @@ -Subproject commit e7c2b2f7bcf3b4b33892a1a6d25c32a93edfbdb9 +Subproject commit 2c8998e26c6d46b27c710d7829c3a15e34959f70 diff --git a/src/Compression/tests/gtest_compressionCodec.cpp b/src/Compression/tests/gtest_compressionCodec.cpp index 2df3edb23ad..633c2426d2c 100644 --- a/src/Compression/tests/gtest_compressionCodec.cpp +++ b/src/Compression/tests/gtest_compressionCodec.cpp @@ -1043,7 +1043,7 @@ INSTANTIATE_TEST_SUITE_P(RandomInt, ::testing::Combine( DefaultCodecsToTest, ::testing::Values( - generateSeq(G(RandomGenerator(0))), + generateSeq(G(RandomGenerator(0))), generateSeq(G(RandomGenerator(0))), generateSeq(G(RandomGenerator(0, 0, 1000'000'000))), generateSeq(G(RandomGenerator(0, 0, 1000'000'000))) diff --git a/src/DataTypes/Serializations/tests/gtest_json_parser.cpp b/src/DataTypes/Serializations/tests/gtest_json_parser.cpp index 4dddb3cd03d..9b0c8e44d02 100644 --- a/src/DataTypes/Serializations/tests/gtest_json_parser.cpp +++ b/src/DataTypes/Serializations/tests/gtest_json_parser.cpp @@ -69,7 +69,7 @@ static std::ostream & operator<<(std::ostream & ostr, const JSONPathAndValue & p bool first = true; for (const auto & part : path_and_value.path.getParts()) { - ostr << (first ? "{" : ", {") << part.key << ", " << part.is_nested << ", " << part.anonymous_array_level << "}"; + ostr << (first ? "{" : ", {") << part.key << ", " << part.is_nested << ", " << static_cast(part.anonymous_array_level) << "}"; first = false; } diff --git a/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.cpp index ebc1b37074b..7a0847bcbc5 100644 --- a/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.cpp @@ -1,7 +1,5 @@ #include -#include -#include #include #include #include @@ -18,70 +16,6 @@ AggregatingSortedAlgorithm::ColumnsDefinition::ColumnsDefinition() = default; AggregatingSortedAlgorithm::ColumnsDefinition::ColumnsDefinition(ColumnsDefinition &&) noexcept = default; AggregatingSortedAlgorithm::ColumnsDefinition::~ColumnsDefinition() = default; -/// Stores information for aggregation of AggregateFunction columns -struct AggregatingSortedAlgorithm::AggregateDescription -{ - ColumnAggregateFunction * column = nullptr; - const size_t column_number = 0; /// Position in header. - - AggregateDescription() = default; - explicit AggregateDescription(size_t col_number) : column_number(col_number) {} -}; - -/// Stores information for aggregation of SimpleAggregateFunction columns -struct AggregatingSortedAlgorithm::SimpleAggregateDescription -{ - /// An aggregate function 'anyLast', 'sum'... - AggregateFunctionPtr function; - IAggregateFunction::AddFunc add_function = nullptr; - - size_t column_number = 0; - IColumn * column = nullptr; - - /// For LowCardinality, convert is converted to nested type. nested_type is nullptr if no conversion needed. - const DataTypePtr nested_type; /// Nested type for LowCardinality, if it is. - const DataTypePtr real_type; /// Type in header. - - AlignedBuffer state; - bool created = false; - - SimpleAggregateDescription( - AggregateFunctionPtr function_, const size_t column_number_, - DataTypePtr nested_type_, DataTypePtr real_type_) - : function(std::move(function_)), column_number(column_number_) - , nested_type(std::move(nested_type_)), real_type(std::move(real_type_)) - { - add_function = function->getAddressOfAddFunction(); - state.reset(function->sizeOfData(), function->alignOfData()); - } - - void createState() - { - if (created) - return; - function->create(state.data()); - created = true; - } - - void destroyState() - { - if (!created) - return; - function->destroy(state.data()); - created = false; - } - - /// Explicitly destroy aggregation state if the stream is terminated - ~SimpleAggregateDescription() - { - destroyState(); - } - - SimpleAggregateDescription() = default; - SimpleAggregateDescription(SimpleAggregateDescription &&) = default; - SimpleAggregateDescription(const SimpleAggregateDescription &) = delete; -}; - static AggregatingSortedAlgorithm::ColumnsDefinition defineColumns( const Block & header, const SortDescription & description) { diff --git a/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h index e572ed7d526..0ffbd1262d3 100644 --- a/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h @@ -1,5 +1,7 @@ #pragma once +#include +#include #include #include @@ -23,8 +25,69 @@ public: void consume(Input & input, size_t source_num) override; Status merge() override; - struct SimpleAggregateDescription; - struct AggregateDescription; + /// Stores information for aggregation of SimpleAggregateFunction columns + struct SimpleAggregateDescription + { + /// An aggregate function 'anyLast', 'sum'... + AggregateFunctionPtr function; + IAggregateFunction::AddFunc add_function = nullptr; + + size_t column_number = 0; + IColumn * column = nullptr; + + /// For LowCardinality, convert is converted to nested type. nested_type is nullptr if no conversion needed. + const DataTypePtr nested_type; /// Nested type for LowCardinality, if it is. + const DataTypePtr real_type; /// Type in header. + + AlignedBuffer state; + bool created = false; + + SimpleAggregateDescription( + AggregateFunctionPtr function_, const size_t column_number_, + DataTypePtr nested_type_, DataTypePtr real_type_) + : function(std::move(function_)), column_number(column_number_) + , nested_type(std::move(nested_type_)), real_type(std::move(real_type_)) + { + add_function = function->getAddressOfAddFunction(); + state.reset(function->sizeOfData(), function->alignOfData()); + } + + void createState() + { + if (created) + return; + function->create(state.data()); + created = true; + } + + void destroyState() + { + if (!created) + return; + function->destroy(state.data()); + created = false; + } + + /// Explicitly destroy aggregation state if the stream is terminated + ~SimpleAggregateDescription() + { + destroyState(); + } + + SimpleAggregateDescription() = default; + SimpleAggregateDescription(SimpleAggregateDescription &&) = default; + SimpleAggregateDescription(const SimpleAggregateDescription &) = delete; + }; + + /// Stores information for aggregation of AggregateFunction columns + struct AggregateDescription + { + ColumnAggregateFunction * column = nullptr; + const size_t column_number = 0; /// Position in header. + + AggregateDescription() = default; + explicit AggregateDescription(size_t col_number) : column_number(col_number) {} + }; /// This structure define columns into one of three types: /// * columns which are not aggregate functions and not needed to be aggregated diff --git a/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.cpp index 8636813132d..c79c667a988 100644 --- a/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.cpp @@ -23,10 +23,6 @@ namespace ErrorCodes extern const int CORRUPTED_DATA; } -SummingSortedAlgorithm::ColumnsDefinition::ColumnsDefinition() = default; -SummingSortedAlgorithm::ColumnsDefinition::ColumnsDefinition(ColumnsDefinition &&) noexcept = default; -SummingSortedAlgorithm::ColumnsDefinition::~ColumnsDefinition() = default; - /// Stores numbers of key-columns and value-columns. struct SummingSortedAlgorithm::MapDescription { @@ -777,4 +773,8 @@ IMergingAlgorithm::Status SummingSortedAlgorithm::merge() return Status(merged_data.pull(), true); } +SummingSortedAlgorithm::ColumnsDefinition::ColumnsDefinition() = default; +SummingSortedAlgorithm::ColumnsDefinition::ColumnsDefinition(ColumnsDefinition &&) noexcept = default; +SummingSortedAlgorithm::ColumnsDefinition::~ColumnsDefinition() = default; + } From cdae7082a30da62e8b2df4430360887494a44148 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 20 Oct 2022 11:09:32 +0000 Subject: [PATCH 020/112] Up-inline ShardPartition, ShardPartitionPiece and TaskCluster --- programs/copier/CMakeLists.txt | 5 +- programs/copier/ShardPartition.cpp | 63 +++++++++++++++++++++ programs/copier/ShardPartition.h | 57 +------------------ programs/copier/ShardPartitionPiece.cpp | 56 +++++++++++++++++++ programs/copier/ShardPartitionPiece.h | 53 +----------------- programs/copier/TaskCluster.cpp | 74 +++++++++++++++++++++++++ programs/copier/TaskCluster.h | 66 +--------------------- 7 files changed, 200 insertions(+), 174 deletions(-) create mode 100644 programs/copier/ShardPartition.cpp create mode 100644 programs/copier/ShardPartitionPiece.cpp create mode 100644 programs/copier/TaskCluster.cpp diff --git a/programs/copier/CMakeLists.txt b/programs/copier/CMakeLists.txt index 57e0996ed78..394983d51b2 100644 --- a/programs/copier/CMakeLists.txt +++ b/programs/copier/CMakeLists.txt @@ -1,7 +1,10 @@ set(CLICKHOUSE_COPIER_SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/ClusterCopierApp.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/ClusterCopier.cpp" - "${CMAKE_CURRENT_SOURCE_DIR}/Internals.cpp") + "${CMAKE_CURRENT_SOURCE_DIR}/Internals.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/ShardPartition.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/ShardPartitionPiece.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/TaskCluster.cpp") set (CLICKHOUSE_COPIER_LINK PRIVATE diff --git a/programs/copier/ShardPartition.cpp b/programs/copier/ShardPartition.cpp new file mode 100644 index 00000000000..fb482163377 --- /dev/null +++ b/programs/copier/ShardPartition.cpp @@ -0,0 +1,63 @@ +#include "ShardPartition.h" + +namespace DB +{ + +ShardPartition::ShardPartition(TaskShard &parent, String name_quoted_, size_t number_of_splits) + : task_shard(parent), name(std::move(name_quoted_)) { pieces.reserve(number_of_splits); } + +String ShardPartition::getPartitionCleanStartPath() const +{ + return getPartitionPath() + "/clean_start"; +} + +String ShardPartition::getPartitionPieceCleanStartPath(size_t current_piece_number) const +{ + assert(current_piece_number < task_shard.task_table.number_of_splits); + return getPartitionPiecePath(current_piece_number) + "/clean_start"; +} + +String ShardPartition::getPartitionPath() const +{ + return task_shard.task_table.getPartitionPath(name); +} + +String ShardPartition::getPartitionPiecePath(size_t current_piece_number) const +{ + assert(current_piece_number < task_shard.task_table.number_of_splits); + return task_shard.task_table.getPartitionPiecePath(name, current_piece_number); +} + +String ShardPartition::getShardStatusPath() const +{ + // schema: //tables///shards/ + // e.g. /root/table_test.hits/201701/shards/1 + return getPartitionShardsPath() + "/" + toString(task_shard.numberInCluster()); +} + +String ShardPartition::getPartitionShardsPath() const +{ + return getPartitionPath() + "/shards"; +} + +String ShardPartition::getPartitionActiveWorkersPath() const +{ + return getPartitionPath() + "/partition_active_workers"; +} + +String ShardPartition::getActiveWorkerPath() const +{ + return getPartitionActiveWorkersPath() + "/" + toString(task_shard.numberInCluster()); +} + +String ShardPartition::getCommonPartitionIsDirtyPath() const +{ + return getPartitionPath() + "/is_dirty"; +} + +String ShardPartition::getCommonPartitionIsCleanedPath() const +{ + return getCommonPartitionIsDirtyPath() + "/cleaned"; +} + +} diff --git a/programs/copier/ShardPartition.h b/programs/copier/ShardPartition.h index 7de381977f9..a78790c0db1 100644 --- a/programs/copier/ShardPartition.h +++ b/programs/copier/ShardPartition.h @@ -12,8 +12,7 @@ namespace DB /// This class describes a partition (name) that is stored on the shard (parent). struct ShardPartition { - ShardPartition(TaskShard &parent, String name_quoted_, size_t number_of_splits = 10) - : task_shard(parent), name(std::move(name_quoted_)) { pieces.reserve(number_of_splits); } + ShardPartition(TaskShard &parent, String name_quoted_, size_t number_of_splits = 10); String getPartitionPath() const; @@ -45,58 +44,4 @@ struct ShardPartition String name; }; -inline String ShardPartition::getPartitionCleanStartPath() const -{ - return getPartitionPath() + "/clean_start"; -} - -inline String ShardPartition::getPartitionPieceCleanStartPath(size_t current_piece_number) const -{ - assert(current_piece_number < task_shard.task_table.number_of_splits); - return getPartitionPiecePath(current_piece_number) + "/clean_start"; -} - -inline String ShardPartition::getPartitionPath() const -{ - return task_shard.task_table.getPartitionPath(name); -} - -inline String ShardPartition::getPartitionPiecePath(size_t current_piece_number) const -{ - assert(current_piece_number < task_shard.task_table.number_of_splits); - return task_shard.task_table.getPartitionPiecePath(name, current_piece_number); -} - -inline String ShardPartition::getShardStatusPath() const -{ - // schema: //tables/
//shards/ - // e.g. /root/table_test.hits/201701/shards/1 - return getPartitionShardsPath() + "/" + toString(task_shard.numberInCluster()); -} - -inline String ShardPartition::getPartitionShardsPath() const -{ - return getPartitionPath() + "/shards"; -} - -inline String ShardPartition::getPartitionActiveWorkersPath() const -{ - return getPartitionPath() + "/partition_active_workers"; -} - -inline String ShardPartition::getActiveWorkerPath() const -{ - return getPartitionActiveWorkersPath() + "/" + toString(task_shard.numberInCluster()); -} - -inline String ShardPartition::getCommonPartitionIsDirtyPath() const -{ - return getPartitionPath() + "/is_dirty"; -} - -inline String ShardPartition::getCommonPartitionIsCleanedPath() const -{ - return getCommonPartitionIsDirtyPath() + "/cleaned"; -} - } diff --git a/programs/copier/ShardPartitionPiece.cpp b/programs/copier/ShardPartitionPiece.cpp new file mode 100644 index 00000000000..c060a955c9a --- /dev/null +++ b/programs/copier/ShardPartitionPiece.cpp @@ -0,0 +1,56 @@ +#include "ShardPartitionPiece.h" + +namespace DB +{ + +ShardPartitionPiece::ShardPartitionPiece(ShardPartition &parent, size_t current_piece_number_, bool is_present_piece_) + : is_absent_piece(!is_present_piece_), current_piece_number(current_piece_number_), + shard_partition(parent) {} + +String ShardPartitionPiece::getPartitionPiecePath() const +{ + return shard_partition.getPartitionPath() + "/piece_" + toString(current_piece_number); +} + +String ShardPartitionPiece::getPartitionPieceCleanStartPath() const +{ + return getPartitionPiecePath() + "/clean_start"; +} + +String ShardPartitionPiece::getPartitionPieceIsDirtyPath() const +{ + return getPartitionPiecePath() + "/is_dirty"; +} + +String ShardPartitionPiece::getPartitionPieceIsCleanedPath() const +{ + return getPartitionPieceIsDirtyPath() + "/cleaned"; +} + +String ShardPartitionPiece::getPartitionPieceActiveWorkersPath() const +{ + return getPartitionPiecePath() + "/partition_piece_active_workers"; +} + +String ShardPartitionPiece::getActiveWorkerPath() const +{ + return getPartitionPieceActiveWorkersPath() + "/" + toString(shard_partition.task_shard.numberInCluster()); +} + +/// On what shards do we have current partition. +String ShardPartitionPiece::getPartitionPieceShardsPath() const +{ + return getPartitionPiecePath() + "/shards"; +} + +String ShardPartitionPiece::getShardStatusPath() const +{ + return getPartitionPieceShardsPath() + "/" + toString(shard_partition.task_shard.numberInCluster()); +} + +String ShardPartitionPiece::getPartitionPieceCleanerPath() const +{ + return getPartitionPieceIsDirtyPath() + "/cleaner"; +} + +} diff --git a/programs/copier/ShardPartitionPiece.h b/programs/copier/ShardPartitionPiece.h index a21fd531da4..243cb7439b8 100644 --- a/programs/copier/ShardPartitionPiece.h +++ b/programs/copier/ShardPartitionPiece.h @@ -7,10 +7,7 @@ namespace DB struct ShardPartitionPiece { - - ShardPartitionPiece(ShardPartition &parent, size_t current_piece_number_, bool is_present_piece_) - : is_absent_piece(!is_present_piece_), current_piece_number(current_piece_number_), - shard_partition(parent) {} + ShardPartitionPiece(ShardPartition &parent, size_t current_piece_number_, bool is_present_piece_); String getPartitionPiecePath() const; @@ -37,52 +34,4 @@ struct ShardPartitionPiece ShardPartition & shard_partition; }; - -inline String ShardPartitionPiece::getPartitionPiecePath() const -{ - return shard_partition.getPartitionPath() + "/piece_" + toString(current_piece_number); -} - -inline String ShardPartitionPiece::getPartitionPieceCleanStartPath() const -{ - return getPartitionPiecePath() + "/clean_start"; -} - -inline String ShardPartitionPiece::getPartitionPieceIsDirtyPath() const -{ - return getPartitionPiecePath() + "/is_dirty"; -} - -inline String ShardPartitionPiece::getPartitionPieceIsCleanedPath() const -{ - return getPartitionPieceIsDirtyPath() + "/cleaned"; -} - -inline String ShardPartitionPiece::getPartitionPieceActiveWorkersPath() const -{ - return getPartitionPiecePath() + "/partition_piece_active_workers"; -} - -inline String ShardPartitionPiece::getActiveWorkerPath() const -{ - return getPartitionPieceActiveWorkersPath() + "/" + toString(shard_partition.task_shard.numberInCluster()); -} - -/// On what shards do we have current partition. -inline String ShardPartitionPiece::getPartitionPieceShardsPath() const -{ - return getPartitionPiecePath() + "/shards"; -} - -inline String ShardPartitionPiece::getShardStatusPath() const -{ - return getPartitionPieceShardsPath() + "/" + toString(shard_partition.task_shard.numberInCluster()); -} - -inline String ShardPartitionPiece::getPartitionPieceCleanerPath() const -{ - return getPartitionPieceIsDirtyPath() + "/cleaner"; -} - - } diff --git a/programs/copier/TaskCluster.cpp b/programs/copier/TaskCluster.cpp new file mode 100644 index 00000000000..6b7911f56f2 --- /dev/null +++ b/programs/copier/TaskCluster.cpp @@ -0,0 +1,74 @@ +#include "TaskCluster.h" + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; +} + +TaskCluster::TaskCluster(const String & task_zookeeper_path_, const String & default_local_database_) + : task_zookeeper_path(task_zookeeper_path_) + , default_local_database(default_local_database_) +{} + +void DB::TaskCluster::loadTasks(const Poco::Util::AbstractConfiguration & config, const String & base_key) +{ + String prefix = base_key.empty() ? "" : base_key + "."; + + clusters_prefix = prefix + "remote_servers"; + if (!config.has(clusters_prefix)) + throw Exception("You should specify list of clusters in " + clusters_prefix, ErrorCodes::BAD_ARGUMENTS); + + Poco::Util::AbstractConfiguration::Keys tables_keys; + config.keys(prefix + "tables", tables_keys); + + for (const auto & table_key : tables_keys) + { + table_tasks.emplace_back(*this, config, prefix + "tables", table_key); + } +} + +void DB::TaskCluster::reloadSettings(const Poco::Util::AbstractConfiguration & config, const String & base_key) +{ + String prefix = base_key.empty() ? "" : base_key + "."; + + max_workers = config.getUInt64(prefix + "max_workers"); + + settings_common = Settings(); + if (config.has(prefix + "settings")) + settings_common.loadSettingsFromConfig(prefix + "settings", config); + + settings_common.prefer_localhost_replica = 0; + + settings_pull = settings_common; + if (config.has(prefix + "settings_pull")) + settings_pull.loadSettingsFromConfig(prefix + "settings_pull", config); + + settings_push = settings_common; + if (config.has(prefix + "settings_push")) + settings_push.loadSettingsFromConfig(prefix + "settings_push", config); + + auto set_default_value = [] (auto && setting, auto && default_value) + { + setting = setting.changed ? setting.value : default_value; + }; + + /// Override important settings + settings_pull.readonly = 1; + settings_pull.prefer_localhost_replica = false; + settings_push.insert_distributed_sync = true; + settings_push.prefer_localhost_replica = false; + + set_default_value(settings_pull.load_balancing, LoadBalancing::NEAREST_HOSTNAME); + set_default_value(settings_pull.max_threads, 1); + set_default_value(settings_pull.max_block_size, 8192UL); + set_default_value(settings_pull.preferred_block_size_bytes, 0); + + set_default_value(settings_push.insert_distributed_timeout, 0); + set_default_value(settings_push.replication_alter_partitions_sync, 2); +} + +} + diff --git a/programs/copier/TaskCluster.h b/programs/copier/TaskCluster.h index 7d8f01ba15f..c59c7bff4d7 100644 --- a/programs/copier/TaskCluster.h +++ b/programs/copier/TaskCluster.h @@ -5,17 +5,10 @@ namespace DB { -namespace ErrorCodes -{ - extern const int BAD_ARGUMENTS; -} struct TaskCluster { - TaskCluster(const String & task_zookeeper_path_, const String & default_local_database_) - : task_zookeeper_path(task_zookeeper_path_) - , default_local_database(default_local_database_) - {} + TaskCluster(const String & task_zookeeper_path_, const String & default_local_database_); void loadTasks(const Poco::Util::AbstractConfiguration & config, const String & base_key = ""); @@ -50,61 +43,4 @@ struct TaskCluster pcg64 random_engine; }; -inline void DB::TaskCluster::loadTasks(const Poco::Util::AbstractConfiguration & config, const String & base_key) -{ - String prefix = base_key.empty() ? "" : base_key + "."; - - clusters_prefix = prefix + "remote_servers"; - if (!config.has(clusters_prefix)) - throw Exception("You should specify list of clusters in " + clusters_prefix, ErrorCodes::BAD_ARGUMENTS); - - Poco::Util::AbstractConfiguration::Keys tables_keys; - config.keys(prefix + "tables", tables_keys); - - for (const auto & table_key : tables_keys) - { - table_tasks.emplace_back(*this, config, prefix + "tables", table_key); - } -} - -inline void DB::TaskCluster::reloadSettings(const Poco::Util::AbstractConfiguration & config, const String & base_key) -{ - String prefix = base_key.empty() ? "" : base_key + "."; - - max_workers = config.getUInt64(prefix + "max_workers"); - - settings_common = Settings(); - if (config.has(prefix + "settings")) - settings_common.loadSettingsFromConfig(prefix + "settings", config); - - settings_common.prefer_localhost_replica = 0; - - settings_pull = settings_common; - if (config.has(prefix + "settings_pull")) - settings_pull.loadSettingsFromConfig(prefix + "settings_pull", config); - - settings_push = settings_common; - if (config.has(prefix + "settings_push")) - settings_push.loadSettingsFromConfig(prefix + "settings_push", config); - - auto set_default_value = [] (auto && setting, auto && default_value) - { - setting = setting.changed ? setting.value : default_value; - }; - - /// Override important settings - settings_pull.readonly = 1; - settings_pull.prefer_localhost_replica = false; - settings_push.insert_distributed_sync = true; - settings_push.prefer_localhost_replica = false; - - set_default_value(settings_pull.load_balancing, LoadBalancing::NEAREST_HOSTNAME); - set_default_value(settings_pull.max_threads, 1); - set_default_value(settings_pull.max_block_size, 8192UL); - set_default_value(settings_pull.preferred_block_size_bytes, 0); - - set_default_value(settings_push.insert_distributed_timeout, 0); - set_default_value(settings_push.replication_alter_partitions_sync, 2); -} - } From f6de964eaa8dd4f14bcfbb28a999a9ae7bed384f Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 20 Oct 2022 11:20:18 +0000 Subject: [PATCH 021/112] Fix static_assert in random generator --- programs/copier/TaskTableAndShard.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/copier/TaskTableAndShard.h b/programs/copier/TaskTableAndShard.h index cef9b669971..e643d66d1c6 100644 --- a/programs/copier/TaskTableAndShard.h +++ b/programs/copier/TaskTableAndShard.h @@ -361,7 +361,7 @@ template inline void TaskTable::initShards(RandomEngine && random_engine) { const String & fqdn_name = getFQDNOrHostName(); - std::uniform_int_distribution get_urand(0, std::numeric_limits::max()); + std::uniform_int_distribution get_urand(0, std::numeric_limits::max()); // Compute the priority for (const auto & shard_info : cluster_pull->getShardsInfo()) From 0df9d2f5ccd8f88e967e830d6a7bf5c9e07e830e Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 20 Oct 2022 13:41:27 +0000 Subject: [PATCH 022/112] Fix tests. --- src/Functions/FunctionsConversion.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index ebf8417aea6..b801cc070b6 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -2209,16 +2209,16 @@ struct ToNumberMonotonicity /// Integer cases. - /// Do not support 128 bit integers and decimals for now. - if (!isNativeInteger(type)) - return {}; - const bool from_is_unsigned = type.isValueRepresentedByUnsignedInteger(); const bool to_is_unsigned = is_unsigned_v; const size_t size_of_from = type.getSizeOfValueInMemory(); const size_t size_of_to = sizeof(T); + /// Do not support 128 bit integers and decimals for now. + if (size_of_from > sizeof(Int64)) + return {}; + const bool left_in_first_half = left.isNull() ? from_is_unsigned : (left.get() >= 0); From df318d871964c30f0c303ba50077c491c54d89b3 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 20 Oct 2022 11:45:15 +0000 Subject: [PATCH 023/112] Un-inline TaskTableAndShard --- programs/copier/CMakeLists.txt | 3 +- programs/copier/TaskTableAndShard.cpp | 241 ++++++++++++++++++++++++++ programs/copier/TaskTableAndShard.h | 229 +----------------------- 3 files changed, 249 insertions(+), 224 deletions(-) create mode 100644 programs/copier/TaskTableAndShard.cpp diff --git a/programs/copier/CMakeLists.txt b/programs/copier/CMakeLists.txt index 394983d51b2..69019843d05 100644 --- a/programs/copier/CMakeLists.txt +++ b/programs/copier/CMakeLists.txt @@ -4,7 +4,8 @@ set(CLICKHOUSE_COPIER_SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/Internals.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/ShardPartition.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/ShardPartitionPiece.cpp" - "${CMAKE_CURRENT_SOURCE_DIR}/TaskCluster.cpp") + "${CMAKE_CURRENT_SOURCE_DIR}/TaskCluster.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/TaskTableAndShard.cpp") set (CLICKHOUSE_COPIER_LINK PRIVATE diff --git a/programs/copier/TaskTableAndShard.cpp b/programs/copier/TaskTableAndShard.cpp new file mode 100644 index 00000000000..36aff4f4cd8 --- /dev/null +++ b/programs/copier/TaskTableAndShard.cpp @@ -0,0 +1,241 @@ +#include "TaskTableAndShard.h" + +namespace DB +{ +namespace ErrorCodes +{ + extern const int UNKNOWN_ELEMENT_IN_CONFIG; + extern const int LOGICAL_ERROR; +} + +TaskTable::TaskTable(TaskCluster & parent, const Poco::Util::AbstractConfiguration & config, + const String & prefix_, const String & table_key) + : task_cluster(parent) +{ + String table_prefix = prefix_ + "." + table_key + "."; + + name_in_config = table_key; + + number_of_splits = config.getUInt64(table_prefix + "number_of_splits", 3); + + allow_to_copy_alias_and_materialized_columns = config.getBool(table_prefix + "allow_to_copy_alias_and_materialized_columns", false); + allow_to_drop_target_partitions = config.getBool(table_prefix + "allow_to_drop_target_partitions", false); + + cluster_pull_name = config.getString(table_prefix + "cluster_pull"); + cluster_push_name = config.getString(table_prefix + "cluster_push"); + + table_pull.first = config.getString(table_prefix + "database_pull"); + table_pull.second = config.getString(table_prefix + "table_pull"); + + table_push.first = config.getString(table_prefix + "database_push"); + table_push.second = config.getString(table_prefix + "table_push"); + + /// Used as node name in ZooKeeper + table_id = escapeForFileName(cluster_push_name) + + "." + escapeForFileName(table_push.first) + + "." + escapeForFileName(table_push.second); + + engine_push_str = config.getString(table_prefix + "engine", "rand()"); + + { + ParserStorage parser_storage; + engine_push_ast = parseQuery(parser_storage, engine_push_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + engine_push_partition_key_ast = extractPartitionKey(engine_push_ast); + primary_key_comma_separated = boost::algorithm::join(extractPrimaryKeyColumnNames(engine_push_ast), ", "); + is_replicated_table = isReplicatedTableEngine(engine_push_ast); + } + + sharding_key_str = config.getString(table_prefix + "sharding_key"); + + auxiliary_engine_split_asts.reserve(number_of_splits); + { + ParserExpressionWithOptionalAlias parser_expression(false); + sharding_key_ast = parseQuery(parser_expression, sharding_key_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + main_engine_split_ast = createASTStorageDistributed(cluster_push_name, table_push.first, table_push.second, + sharding_key_ast); + + for (const auto piece_number : collections::range(0, number_of_splits)) + { + auxiliary_engine_split_asts.emplace_back + ( + createASTStorageDistributed(cluster_push_name, table_push.first, + table_push.second + "_piece_" + toString(piece_number), sharding_key_ast) + ); + } + } + + where_condition_str = config.getString(table_prefix + "where_condition", ""); + if (!where_condition_str.empty()) + { + ParserExpressionWithOptionalAlias parser_expression(false); + where_condition_ast = parseQuery(parser_expression, where_condition_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + + // Will use canonical expression form + where_condition_str = queryToString(where_condition_ast); + } + + String enabled_partitions_prefix = table_prefix + "enabled_partitions"; + has_enabled_partitions = config.has(enabled_partitions_prefix); + + if (has_enabled_partitions) + { + Strings keys; + config.keys(enabled_partitions_prefix, keys); + + if (keys.empty()) + { + /// Parse list of partition from space-separated string + String partitions_str = config.getString(table_prefix + "enabled_partitions"); + boost::trim_if(partitions_str, isWhitespaceASCII); + boost::split(enabled_partitions, partitions_str, isWhitespaceASCII, boost::token_compress_on); + } + else + { + /// Parse sequence of ... + for (const String &key : keys) + { + if (!startsWith(key, "partition")) + throw Exception("Unknown key " + key + " in " + enabled_partitions_prefix, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG); + + enabled_partitions.emplace_back(config.getString(enabled_partitions_prefix + "." + key)); + } + } + + std::copy(enabled_partitions.begin(), enabled_partitions.end(), std::inserter(enabled_partitions_set, enabled_partitions_set.begin())); + } +} + + +String TaskTable::getPartitionPath(const String & partition_name) const +{ + return task_cluster.task_zookeeper_path // root + + "/tables/" + table_id // tables/dst_cluster.merge.hits + + "/" + escapeForFileName(partition_name); // 201701 +} + +String TaskTable::getPartitionAttachIsActivePath(const String & partition_name) const +{ + return getPartitionPath(partition_name) + "/attach_active"; +} + +String TaskTable::getPartitionAttachIsDonePath(const String & partition_name) const +{ + return getPartitionPath(partition_name) + "/attach_is_done"; +} + +String TaskTable::getPartitionPiecePath(const String & partition_name, size_t piece_number) const +{ + assert(piece_number < number_of_splits); + return getPartitionPath(partition_name) + "/piece_" + toString(piece_number); // 1...number_of_splits +} + +String TaskTable::getCertainPartitionIsDirtyPath(const String &partition_name) const +{ + return getPartitionPath(partition_name) + "/is_dirty"; +} + +String TaskTable::getCertainPartitionPieceIsDirtyPath(const String & partition_name, const size_t piece_number) const +{ + return getPartitionPiecePath(partition_name, piece_number) + "/is_dirty"; +} + +String TaskTable::getCertainPartitionIsCleanedPath(const String & partition_name) const +{ + return getCertainPartitionIsDirtyPath(partition_name) + "/cleaned"; +} + +String TaskTable::getCertainPartitionPieceIsCleanedPath(const String & partition_name, const size_t piece_number) const +{ + return getCertainPartitionPieceIsDirtyPath(partition_name, piece_number) + "/cleaned"; +} + +String TaskTable::getCertainPartitionTaskStatusPath(const String & partition_name) const +{ + return getPartitionPath(partition_name) + "/shards"; +} + +String TaskTable::getCertainPartitionPieceTaskStatusPath(const String & partition_name, const size_t piece_number) const +{ + return getPartitionPiecePath(partition_name, piece_number) + "/shards"; +} + +bool TaskTable::isReplicatedTable() const +{ + return is_replicated_table; +} + +String TaskTable::getStatusAllPartitionCount() const +{ + return task_cluster.task_zookeeper_path + "/status/all_partitions_count"; +} + +String TaskTable::getStatusProcessedPartitionsCount() const +{ + return task_cluster.task_zookeeper_path + "/status/processed_partitions_count"; +} + +ASTPtr TaskTable::rewriteReplicatedCreateQueryToPlain() const +{ + ASTPtr prev_engine_push_ast = engine_push_ast->clone(); + + auto & new_storage_ast = prev_engine_push_ast->as(); + auto & new_engine_ast = new_storage_ast.engine->as(); + + /// Remove "Replicated" from name + new_engine_ast.name = new_engine_ast.name.substr(10); + + if (new_engine_ast.arguments) + { + auto & replicated_table_arguments = new_engine_ast.arguments->children; + + + /// In some cases of Atomic database engine usage ReplicatedMergeTree tables + /// could be created without arguments. + if (!replicated_table_arguments.empty()) + { + /// Delete first two arguments of Replicated...MergeTree() table. + replicated_table_arguments.erase(replicated_table_arguments.begin()); + replicated_table_arguments.erase(replicated_table_arguments.begin()); + } + } + + return new_storage_ast.clone(); +} + +ClusterPartition & TaskTable::getClusterPartition(const String & partition_name) +{ + auto it = cluster_partitions.find(partition_name); + if (it == cluster_partitions.end()) + throw Exception("There are no cluster partition " + partition_name + " in " + table_id, + ErrorCodes::LOGICAL_ERROR); + return it->second; +} + +TaskShard::TaskShard(TaskTable & parent, const ShardInfo & info_) : task_table(parent), info(info_) +{ + list_of_split_tables_on_shard.assign(task_table.number_of_splits, DatabaseAndTableName()); +} + +UInt32 TaskShard::numberInCluster() const +{ + return info.shard_num; +} + +UInt32 TaskShard::indexInCluster() const +{ + return info.shard_num - 1; +} + +String DB::TaskShard::getDescription() const +{ + return fmt::format("N{} (having a replica {}, pull table {} of cluster {}", + numberInCluster(), getHostNameExample(), getQuotedTable(task_table.table_pull), task_table.cluster_pull_name); +} + +String DB::TaskShard::getHostNameExample() const +{ + const auto & replicas = task_table.cluster_pull->getShardsAddresses().at(indexInCluster()); + return replicas.at(0).readableString(); +} + +} diff --git a/programs/copier/TaskTableAndShard.h b/programs/copier/TaskTableAndShard.h index e643d66d1c6..1fe5b0a255e 100644 --- a/programs/copier/TaskTableAndShard.h +++ b/programs/copier/TaskTableAndShard.h @@ -13,18 +13,12 @@ namespace DB { -namespace ErrorCodes -{ - extern const int UNKNOWN_ELEMENT_IN_CONFIG; - extern const int LOGICAL_ERROR; -} struct TaskShard; struct TaskTable { - TaskTable(TaskCluster & parent, const Poco::Util::AbstractConfiguration & config, const String & prefix, - const String & table_key); + TaskTable(TaskCluster & parent, const Poco::Util::AbstractConfiguration & config, const String & prefix, const String & table_key); TaskCluster & task_cluster; @@ -51,7 +45,7 @@ struct TaskTable String getCertainPartitionPieceTaskStatusPath(const String & partition_name, size_t piece_number) const; - bool isReplicatedTable() const { return is_replicated_table; } + bool isReplicatedTable() const; /// These nodes are used for check-status option String getStatusAllPartitionCount() const; @@ -136,14 +130,7 @@ struct TaskTable /// Partition names to process in user-specified order Strings ordered_partition_names; - ClusterPartition & getClusterPartition(const String & partition_name) - { - auto it = cluster_partitions.find(partition_name); - if (it == cluster_partitions.end()) - throw Exception("There are no cluster partition " + partition_name + " in " + table_id, - ErrorCodes::LOGICAL_ERROR); - return it->second; - } + ClusterPartition & getClusterPartition(const String & partition_name); Stopwatch watch; UInt64 bytes_copied = 0; @@ -156,18 +143,15 @@ struct TaskTable struct TaskShard { - TaskShard(TaskTable & parent, const ShardInfo & info_) : task_table(parent), info(info_) - { - list_of_split_tables_on_shard.assign(task_table.number_of_splits, DatabaseAndTableName()); - } + TaskShard(TaskTable & parent, const ShardInfo & info_); TaskTable & task_table; ShardInfo info; - UInt32 numberInCluster() const { return info.shard_num; } + UInt32 numberInCluster() const; - UInt32 indexInCluster() const { return info.shard_num - 1; } + UInt32 indexInCluster() const; String getDescription() const; @@ -197,166 +181,6 @@ struct TaskShard }; -inline String TaskTable::getPartitionPath(const String & partition_name) const -{ - return task_cluster.task_zookeeper_path // root - + "/tables/" + table_id // tables/dst_cluster.merge.hits - + "/" + escapeForFileName(partition_name); // 201701 -} - -inline String TaskTable::getPartitionAttachIsActivePath(const String & partition_name) const -{ - return getPartitionPath(partition_name) + "/attach_active"; -} - -inline String TaskTable::getPartitionAttachIsDonePath(const String & partition_name) const -{ - return getPartitionPath(partition_name) + "/attach_is_done"; -} - -inline String TaskTable::getPartitionPiecePath(const String & partition_name, size_t piece_number) const -{ - assert(piece_number < number_of_splits); - return getPartitionPath(partition_name) + "/piece_" + toString(piece_number); // 1...number_of_splits -} - -inline String TaskTable::getCertainPartitionIsDirtyPath(const String &partition_name) const -{ - return getPartitionPath(partition_name) + "/is_dirty"; -} - -inline String TaskTable::getCertainPartitionPieceIsDirtyPath(const String & partition_name, const size_t piece_number) const -{ - return getPartitionPiecePath(partition_name, piece_number) + "/is_dirty"; -} - -inline String TaskTable::getCertainPartitionIsCleanedPath(const String & partition_name) const -{ - return getCertainPartitionIsDirtyPath(partition_name) + "/cleaned"; -} - -inline String TaskTable::getCertainPartitionPieceIsCleanedPath(const String & partition_name, const size_t piece_number) const -{ - return getCertainPartitionPieceIsDirtyPath(partition_name, piece_number) + "/cleaned"; -} - -inline String TaskTable::getCertainPartitionTaskStatusPath(const String & partition_name) const -{ - return getPartitionPath(partition_name) + "/shards"; -} - -inline String TaskTable::getCertainPartitionPieceTaskStatusPath(const String & partition_name, const size_t piece_number) const -{ - return getPartitionPiecePath(partition_name, piece_number) + "/shards"; -} - -inline String TaskTable::getStatusAllPartitionCount() const -{ - return task_cluster.task_zookeeper_path + "/status/all_partitions_count"; -} - -inline String TaskTable::getStatusProcessedPartitionsCount() const -{ - return task_cluster.task_zookeeper_path + "/status/processed_partitions_count"; -} - -inline TaskTable::TaskTable(TaskCluster & parent, const Poco::Util::AbstractConfiguration & config, - const String & prefix_, const String & table_key) - : task_cluster(parent) -{ - String table_prefix = prefix_ + "." + table_key + "."; - - name_in_config = table_key; - - number_of_splits = config.getUInt64(table_prefix + "number_of_splits", 3); - - allow_to_copy_alias_and_materialized_columns = config.getBool(table_prefix + "allow_to_copy_alias_and_materialized_columns", false); - allow_to_drop_target_partitions = config.getBool(table_prefix + "allow_to_drop_target_partitions", false); - - cluster_pull_name = config.getString(table_prefix + "cluster_pull"); - cluster_push_name = config.getString(table_prefix + "cluster_push"); - - table_pull.first = config.getString(table_prefix + "database_pull"); - table_pull.second = config.getString(table_prefix + "table_pull"); - - table_push.first = config.getString(table_prefix + "database_push"); - table_push.second = config.getString(table_prefix + "table_push"); - - /// Used as node name in ZooKeeper - table_id = escapeForFileName(cluster_push_name) - + "." + escapeForFileName(table_push.first) - + "." + escapeForFileName(table_push.second); - - engine_push_str = config.getString(table_prefix + "engine", "rand()"); - - { - ParserStorage parser_storage; - engine_push_ast = parseQuery(parser_storage, engine_push_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); - engine_push_partition_key_ast = extractPartitionKey(engine_push_ast); - primary_key_comma_separated = boost::algorithm::join(extractPrimaryKeyColumnNames(engine_push_ast), ", "); - is_replicated_table = isReplicatedTableEngine(engine_push_ast); - } - - sharding_key_str = config.getString(table_prefix + "sharding_key"); - - auxiliary_engine_split_asts.reserve(number_of_splits); - { - ParserExpressionWithOptionalAlias parser_expression(false); - sharding_key_ast = parseQuery(parser_expression, sharding_key_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); - main_engine_split_ast = createASTStorageDistributed(cluster_push_name, table_push.first, table_push.second, - sharding_key_ast); - - for (const auto piece_number : collections::range(0, number_of_splits)) - { - auxiliary_engine_split_asts.emplace_back - ( - createASTStorageDistributed(cluster_push_name, table_push.first, - table_push.second + "_piece_" + toString(piece_number), sharding_key_ast) - ); - } - } - - where_condition_str = config.getString(table_prefix + "where_condition", ""); - if (!where_condition_str.empty()) - { - ParserExpressionWithOptionalAlias parser_expression(false); - where_condition_ast = parseQuery(parser_expression, where_condition_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); - - // Will use canonical expression form - where_condition_str = queryToString(where_condition_ast); - } - - String enabled_partitions_prefix = table_prefix + "enabled_partitions"; - has_enabled_partitions = config.has(enabled_partitions_prefix); - - if (has_enabled_partitions) - { - Strings keys; - config.keys(enabled_partitions_prefix, keys); - - if (keys.empty()) - { - /// Parse list of partition from space-separated string - String partitions_str = config.getString(table_prefix + "enabled_partitions"); - boost::trim_if(partitions_str, isWhitespaceASCII); - boost::split(enabled_partitions, partitions_str, isWhitespaceASCII, boost::token_compress_on); - } - else - { - /// Parse sequence of ... - for (const String &key : keys) - { - if (!startsWith(key, "partition")) - throw Exception("Unknown key " + key + " in " + enabled_partitions_prefix, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG); - - enabled_partitions.emplace_back(config.getString(enabled_partitions_prefix + "." + key)); - } - } - - std::copy(enabled_partitions.begin(), enabled_partitions.end(), std::inserter(enabled_partitions_set, enabled_partitions_set.begin())); - } -} - template inline void TaskTable::initShards(RandomEngine && random_engine) { @@ -390,45 +214,4 @@ inline void TaskTable::initShards(RandomEngine && random_engine) local_shards.assign(all_shards.begin(), it_first_remote); } -inline ASTPtr TaskTable::rewriteReplicatedCreateQueryToPlain() const -{ - ASTPtr prev_engine_push_ast = engine_push_ast->clone(); - - auto & new_storage_ast = prev_engine_push_ast->as(); - auto & new_engine_ast = new_storage_ast.engine->as(); - - /// Remove "Replicated" from name - new_engine_ast.name = new_engine_ast.name.substr(10); - - if (new_engine_ast.arguments) - { - auto & replicated_table_arguments = new_engine_ast.arguments->children; - - - /// In some cases of Atomic database engine usage ReplicatedMergeTree tables - /// could be created without arguments. - if (!replicated_table_arguments.empty()) - { - /// Delete first two arguments of Replicated...MergeTree() table. - replicated_table_arguments.erase(replicated_table_arguments.begin()); - replicated_table_arguments.erase(replicated_table_arguments.begin()); - } - } - - return new_storage_ast.clone(); -} - - -inline String DB::TaskShard::getDescription() const -{ - return fmt::format("N{} (having a replica {}, pull table {} of cluster {}", - numberInCluster(), getHostNameExample(), getQuotedTable(task_table.table_pull), task_table.cluster_pull_name); -} - -inline String DB::TaskShard::getHostNameExample() const -{ - const auto & replicas = task_table.cluster_pull->getShardsAddresses().at(indexInCluster()); - return replicas.at(0).readableString(); -} - } From 50c932e566cc9b268afe5709a50cb6650ead8d7a Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 20 Oct 2022 13:56:40 +0000 Subject: [PATCH 024/112] Separate TaskShard and TaskTable --- programs/copier/CMakeLists.txt | 3 +- programs/copier/ClusterCopier.h | 3 +- programs/copier/ShardPartition.h | 2 +- programs/copier/TaskShard.cpp | 37 ++++++++++++ programs/copier/TaskShard.h | 58 +++++++++++++++++++ .../{TaskTableAndShard.cpp => TaskTable.cpp} | 29 +--------- .../{TaskTableAndShard.h => TaskTable.h} | 43 -------------- 7 files changed, 101 insertions(+), 74 deletions(-) create mode 100644 programs/copier/TaskShard.cpp create mode 100644 programs/copier/TaskShard.h rename programs/copier/{TaskTableAndShard.cpp => TaskTable.cpp} (90%) rename programs/copier/{TaskTableAndShard.h => TaskTable.h} (83%) diff --git a/programs/copier/CMakeLists.txt b/programs/copier/CMakeLists.txt index 69019843d05..fb4c848270f 100644 --- a/programs/copier/CMakeLists.txt +++ b/programs/copier/CMakeLists.txt @@ -5,7 +5,8 @@ set(CLICKHOUSE_COPIER_SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/ShardPartition.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/ShardPartitionPiece.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/TaskCluster.cpp" - "${CMAKE_CURRENT_SOURCE_DIR}/TaskTableAndShard.cpp") + "${CMAKE_CURRENT_SOURCE_DIR}/TaskShard.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/TaskTable.cpp") set (CLICKHOUSE_COPIER_LINK PRIVATE diff --git a/programs/copier/ClusterCopier.h b/programs/copier/ClusterCopier.h index b354fc59eee..063b13e9078 100644 --- a/programs/copier/ClusterCopier.h +++ b/programs/copier/ClusterCopier.h @@ -3,7 +3,8 @@ #include "Aliases.h" #include "Internals.h" #include "TaskCluster.h" -#include "TaskTableAndShard.h" +#include "TaskShard.h" +#include "TaskTable.h" #include "ShardPartition.h" #include "ShardPartitionPiece.h" #include "ZooKeeperStaff.h" diff --git a/programs/copier/ShardPartition.h b/programs/copier/ShardPartition.h index a78790c0db1..3487c0e51ac 100644 --- a/programs/copier/ShardPartition.h +++ b/programs/copier/ShardPartition.h @@ -1,7 +1,7 @@ #pragma once #include "Aliases.h" -#include "TaskTableAndShard.h" +#include "TaskShard.h" namespace DB { diff --git a/programs/copier/TaskShard.cpp b/programs/copier/TaskShard.cpp new file mode 100644 index 00000000000..af21848b384 --- /dev/null +++ b/programs/copier/TaskShard.cpp @@ -0,0 +1,37 @@ +#include "TaskShard.h" + +#include "TaskTable.h" + +namespace DB +{ + +TaskShard::TaskShard(TaskTable & parent, const ShardInfo & info_) + : task_table(parent) + , info(info_) +{ + list_of_split_tables_on_shard.assign(task_table.number_of_splits, DatabaseAndTableName()); +} + +UInt32 TaskShard::numberInCluster() const +{ + return info.shard_num; +} + +UInt32 TaskShard::indexInCluster() const +{ + return info.shard_num - 1; +} + +String DB::TaskShard::getDescription() const +{ + return fmt::format("N{} (having a replica {}, pull table {} of cluster {}", + numberInCluster(), getHostNameExample(), getQuotedTable(task_table.table_pull), task_table.cluster_pull_name); +} + +String DB::TaskShard::getHostNameExample() const +{ + const auto & replicas = task_table.cluster_pull->getShardsAddresses().at(indexInCluster()); + return replicas.at(0).readableString(); +} + +} diff --git a/programs/copier/TaskShard.h b/programs/copier/TaskShard.h new file mode 100644 index 00000000000..970ca21612f --- /dev/null +++ b/programs/copier/TaskShard.h @@ -0,0 +1,58 @@ +#pragma once + +#include "Aliases.h" +#include "Internals.h" +#include "ClusterPartition.h" + +#include +#include + +#include +#include + + +namespace DB +{ + +struct TaskTable; + +struct TaskShard +{ + TaskShard(TaskTable & parent, const ShardInfo & info_); + + TaskTable & task_table; + + ShardInfo info; + + UInt32 numberInCluster() const; + + UInt32 indexInCluster() const; + + String getDescription() const; + + String getHostNameExample() const; + + /// Used to sort clusters by their proximity + ShardPriority priority; + + /// Column with unique destination partitions (computed from engine_push_partition_key expr.) in the shard + ColumnWithTypeAndName partition_key_column; + + /// There is a task for each destination partition + TasksPartition partition_tasks; + + /// Which partitions have been checked for existence + /// If some partition from this lists is exists, it is in partition_tasks + std::set checked_partitions; + + /// Last CREATE TABLE query of the table of the shard + ASTPtr current_pull_table_create_query; + ASTPtr current_push_table_create_query; + + /// Internal distributed tables + DatabaseAndTableName table_read_shard; + DatabaseAndTableName main_table_split_shard; + ListOfDatabasesAndTableNames list_of_split_tables_on_shard; +}; + +} diff --git a/programs/copier/TaskTableAndShard.cpp b/programs/copier/TaskTable.cpp similarity index 90% rename from programs/copier/TaskTableAndShard.cpp rename to programs/copier/TaskTable.cpp index 36aff4f4cd8..2f282842db6 100644 --- a/programs/copier/TaskTableAndShard.cpp +++ b/programs/copier/TaskTable.cpp @@ -1,4 +1,4 @@ -#include "TaskTableAndShard.h" +#include "TaskTable.h" namespace DB { @@ -211,31 +211,4 @@ ClusterPartition & TaskTable::getClusterPartition(const String & partition_name) return it->second; } -TaskShard::TaskShard(TaskTable & parent, const ShardInfo & info_) : task_table(parent), info(info_) -{ - list_of_split_tables_on_shard.assign(task_table.number_of_splits, DatabaseAndTableName()); -} - -UInt32 TaskShard::numberInCluster() const -{ - return info.shard_num; -} - -UInt32 TaskShard::indexInCluster() const -{ - return info.shard_num - 1; -} - -String DB::TaskShard::getDescription() const -{ - return fmt::format("N{} (having a replica {}, pull table {} of cluster {}", - numberInCluster(), getHostNameExample(), getQuotedTable(task_table.table_pull), task_table.cluster_pull_name); -} - -String DB::TaskShard::getHostNameExample() const -{ - const auto & replicas = task_table.cluster_pull->getShardsAddresses().at(indexInCluster()); - return replicas.at(0).readableString(); -} - } diff --git a/programs/copier/TaskTableAndShard.h b/programs/copier/TaskTable.h similarity index 83% rename from programs/copier/TaskTableAndShard.h rename to programs/copier/TaskTable.h index 1fe5b0a255e..32b8759cec1 100644 --- a/programs/copier/TaskTableAndShard.h +++ b/programs/copier/TaskTable.h @@ -14,8 +14,6 @@ namespace DB { -struct TaskShard; - struct TaskTable { TaskTable(TaskCluster & parent, const Poco::Util::AbstractConfiguration & config, const String & prefix, const String & table_key); @@ -140,47 +138,6 @@ struct TaskTable void initShards(RandomEngine &&random_engine); }; - -struct TaskShard -{ - TaskShard(TaskTable & parent, const ShardInfo & info_); - - TaskTable & task_table; - - ShardInfo info; - - UInt32 numberInCluster() const; - - UInt32 indexInCluster() const; - - String getDescription() const; - - String getHostNameExample() const; - - /// Used to sort clusters by their proximity - ShardPriority priority; - - /// Column with unique destination partitions (computed from engine_push_partition_key expr.) in the shard - ColumnWithTypeAndName partition_key_column; - - /// There is a task for each destination partition - TasksPartition partition_tasks; - - /// Which partitions have been checked for existence - /// If some partition from this lists is exists, it is in partition_tasks - std::set checked_partitions; - - /// Last CREATE TABLE query of the table of the shard - ASTPtr current_pull_table_create_query; - ASTPtr current_push_table_create_query; - - /// Internal distributed tables - DatabaseAndTableName table_read_shard; - DatabaseAndTableName main_table_split_shard; - ListOfDatabasesAndTableNames list_of_split_tables_on_shard; -}; - - template inline void TaskTable::initShards(RandomEngine && random_engine) { From f435e4fbe2c270ae9cbf5b54ede38045d0398f37 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 20 Oct 2022 14:37:27 +0000 Subject: [PATCH 025/112] Un-clusterf$ck incomplete type errors --- programs/copier/Aliases.h | 23 ++------- programs/copier/CMakeLists.txt | 1 + programs/copier/ClusterPartition.h | 25 ++++++---- programs/copier/ShardPartition.cpp | 11 ++++- programs/copier/ShardPartition.h | 11 ++++- programs/copier/ShardPartitionPiece.cpp | 14 ++++-- programs/copier/ShardPartitionPiece.h | 8 +++- programs/copier/StatusAccumulator.cpp | 48 +++++++++++++++++++ programs/copier/StatusAccumulator.h | 62 +++++-------------------- programs/copier/TaskCluster.h | 8 +++- programs/copier/TaskShard.cpp | 2 +- programs/copier/TaskShard.h | 14 +++--- programs/copier/TaskTable.cpp | 7 +++ programs/copier/TaskTable.h | 15 +++--- 14 files changed, 144 insertions(+), 105 deletions(-) create mode 100644 programs/copier/StatusAccumulator.cpp diff --git a/programs/copier/Aliases.h b/programs/copier/Aliases.h index c4d9c40d9f1..02be3441acd 100644 --- a/programs/copier/Aliases.h +++ b/programs/copier/Aliases.h @@ -1,6 +1,10 @@ #pragma once -#include +#include + +#include + +#include namespace DB { @@ -8,21 +12,4 @@ namespace DB using DatabaseAndTableName = std::pair; using ListOfDatabasesAndTableNames = std::vector; - - /// Hierarchical description of the tasks - struct ShardPartitionPiece; - struct ShardPartition; - struct TaskShard; - struct TaskTable; - struct TaskCluster; - struct ClusterPartition; - - using PartitionPieces = std::vector; - using TasksPartition = std::map>; - using ShardInfo = Cluster::ShardInfo; - using TaskShardPtr = std::shared_ptr; - using TasksShard = std::vector; - using TasksTable = std::list; - using ClusterPartitions = std::map>; } - diff --git a/programs/copier/CMakeLists.txt b/programs/copier/CMakeLists.txt index fb4c848270f..2c17e70bc5e 100644 --- a/programs/copier/CMakeLists.txt +++ b/programs/copier/CMakeLists.txt @@ -4,6 +4,7 @@ set(CLICKHOUSE_COPIER_SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/Internals.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/ShardPartition.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/ShardPartitionPiece.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/StatusAccumulator.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/TaskCluster.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/TaskShard.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/TaskTable.cpp") diff --git a/programs/copier/ClusterPartition.h b/programs/copier/ClusterPartition.h index ed69bfa8c26..22063989e22 100644 --- a/programs/copier/ClusterPartition.h +++ b/programs/copier/ClusterPartition.h @@ -1,17 +1,22 @@ #pragma once -#include "Aliases.h" +#include +#include namespace DB { - /// Contains info about all shards that contain a partition - struct ClusterPartition - { - double elapsed_time_seconds = 0; - UInt64 bytes_copied = 0; - UInt64 rows_copied = 0; - UInt64 blocks_copied = 0; - UInt64 total_tries = 0; - }; +/// Contains info about all shards that contain a partition +struct ClusterPartition +{ + double elapsed_time_seconds = 0; + UInt64 bytes_copied = 0; + UInt64 rows_copied = 0; + UInt64 blocks_copied = 0; + + UInt64 total_tries = 0; +}; + +using ClusterPartitions = std::map>; + } diff --git a/programs/copier/ShardPartition.cpp b/programs/copier/ShardPartition.cpp index fb482163377..4c962fc807d 100644 --- a/programs/copier/ShardPartition.cpp +++ b/programs/copier/ShardPartition.cpp @@ -1,10 +1,17 @@ #include "ShardPartition.h" +#include "TaskShard.h" +#include "TaskTable.h" + namespace DB { -ShardPartition::ShardPartition(TaskShard &parent, String name_quoted_, size_t number_of_splits) - : task_shard(parent), name(std::move(name_quoted_)) { pieces.reserve(number_of_splits); } +ShardPartition::ShardPartition(TaskShard & parent, String name_quoted_, size_t number_of_splits) + : task_shard(parent) + , name(std::move(name_quoted_)) +{ + pieces.reserve(number_of_splits); +} String ShardPartition::getPartitionCleanStartPath() const { diff --git a/programs/copier/ShardPartition.h b/programs/copier/ShardPartition.h index 3487c0e51ac..2457213733c 100644 --- a/programs/copier/ShardPartition.h +++ b/programs/copier/ShardPartition.h @@ -1,11 +1,16 @@ #pragma once -#include "Aliases.h" -#include "TaskShard.h" +#include "ShardPartitionPiece.h" + +#include + +#include namespace DB { +struct TaskShard; + /// Just destination partition of a shard /// I don't know what this comment means. /// In short, when we discovered what shards contain currently processing partition, @@ -44,4 +49,6 @@ struct ShardPartition String name; }; +using TasksPartition = std::map>; + } diff --git a/programs/copier/ShardPartitionPiece.cpp b/programs/copier/ShardPartitionPiece.cpp index c060a955c9a..36d1621e012 100644 --- a/programs/copier/ShardPartitionPiece.cpp +++ b/programs/copier/ShardPartitionPiece.cpp @@ -1,11 +1,19 @@ #include "ShardPartitionPiece.h" +#include "ShardPartition.h" +#include "TaskShard.h" + +#include + namespace DB { -ShardPartitionPiece::ShardPartitionPiece(ShardPartition &parent, size_t current_piece_number_, bool is_present_piece_) - : is_absent_piece(!is_present_piece_), current_piece_number(current_piece_number_), - shard_partition(parent) {} +ShardPartitionPiece::ShardPartitionPiece(ShardPartition & parent, size_t current_piece_number_, bool is_present_piece_) + : is_absent_piece(!is_present_piece_) + , current_piece_number(current_piece_number_) + , shard_partition(parent) +{ +} String ShardPartitionPiece::getPartitionPiecePath() const { diff --git a/programs/copier/ShardPartitionPiece.h b/programs/copier/ShardPartitionPiece.h index 243cb7439b8..aba378d466d 100644 --- a/programs/copier/ShardPartitionPiece.h +++ b/programs/copier/ShardPartitionPiece.h @@ -1,13 +1,15 @@ #pragma once -#include "Internals.h" +#include namespace DB { +struct ShardPartition; + struct ShardPartitionPiece { - ShardPartitionPiece(ShardPartition &parent, size_t current_piece_number_, bool is_present_piece_); + ShardPartitionPiece(ShardPartition & parent, size_t current_piece_number_, bool is_present_piece_); String getPartitionPiecePath() const; @@ -34,4 +36,6 @@ struct ShardPartitionPiece ShardPartition & shard_partition; }; +using PartitionPieces = std::vector; + } diff --git a/programs/copier/StatusAccumulator.cpp b/programs/copier/StatusAccumulator.cpp new file mode 100644 index 00000000000..77adeac708c --- /dev/null +++ b/programs/copier/StatusAccumulator.cpp @@ -0,0 +1,48 @@ +#include "StatusAccumulator.h" + +#include +#include +#include +#include + +#include + +namespace DB +{ + +StatusAccumulator::MapPtr StatusAccumulator::fromJSON(String state_json) +{ + Poco::JSON::Parser parser; + auto state = parser.parse(state_json).extract(); + MapPtr result_ptr = std::make_shared(); + for (const auto & table_name : state->getNames()) + { + auto table_status_json = state->getValue(table_name); + auto table_status = parser.parse(table_status_json).extract(); + /// Map entry will be created if it is absent + auto & map_table_status = (*result_ptr)[table_name]; + map_table_status.all_partitions_count += table_status->getValue("all_partitions_count"); + map_table_status.processed_partitions_count += table_status->getValue("processed_partitions_count"); + } + return result_ptr; +} + +String StatusAccumulator::serializeToJSON(MapPtr statuses) +{ + Poco::JSON::Object result_json; + for (const auto & [table_name, table_status] : *statuses) + { + Poco::JSON::Object status_json; + status_json.set("all_partitions_count", table_status.all_partitions_count); + status_json.set("processed_partitions_count", table_status.processed_partitions_count); + + result_json.set(table_name, status_json); + } + std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM + oss.exceptions(std::ios::failbit); + Poco::JSON::Stringifier::stringify(result_json, oss); + auto result = oss.str(); + return result; +} + +} diff --git a/programs/copier/StatusAccumulator.h b/programs/copier/StatusAccumulator.h index 6e20e3dc95d..d420b611602 100644 --- a/programs/copier/StatusAccumulator.h +++ b/programs/copier/StatusAccumulator.h @@ -1,65 +1,27 @@ #pragma once +#include -#include -#include -#include -#include - -#include #include -#include -#include +#include namespace DB { class StatusAccumulator { - public: - struct TableStatus - { - size_t all_partitions_count; - size_t processed_partitions_count; - }; +public: + struct TableStatus + { + size_t all_partitions_count; + size_t processed_partitions_count; + }; - using Map = std::unordered_map; - using MapPtr = std::shared_ptr; + using Map = std::unordered_map; + using MapPtr = std::shared_ptr; - static MapPtr fromJSON(std::string state_json) - { - Poco::JSON::Parser parser; - auto state = parser.parse(state_json).extract(); - MapPtr result_ptr = std::make_shared(); - for (const auto & table_name : state->getNames()) - { - auto table_status_json = state->getValue(table_name); - auto table_status = parser.parse(table_status_json).extract(); - /// Map entry will be created if it is absent - auto & map_table_status = (*result_ptr)[table_name]; - map_table_status.all_partitions_count += table_status->getValue("all_partitions_count"); - map_table_status.processed_partitions_count += table_status->getValue("processed_partitions_count"); - } - return result_ptr; - } - - static std::string serializeToJSON(MapPtr statuses) - { - Poco::JSON::Object result_json; - for (const auto & [table_name, table_status] : *statuses) - { - Poco::JSON::Object status_json; - status_json.set("all_partitions_count", table_status.all_partitions_count); - status_json.set("processed_partitions_count", table_status.processed_partitions_count); - - result_json.set(table_name, status_json); - } - std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM - oss.exceptions(std::ios::failbit); - Poco::JSON::Stringifier::stringify(result_json, oss); - auto result = oss.str(); - return result; - } + static MapPtr fromJSON(String state_json); + static String serializeToJSON(MapPtr statuses); }; } diff --git a/programs/copier/TaskCluster.h b/programs/copier/TaskCluster.h index c59c7bff4d7..fc1c8a663ec 100644 --- a/programs/copier/TaskCluster.h +++ b/programs/copier/TaskCluster.h @@ -1,8 +1,14 @@ #pragma once -#include "Aliases.h" +#include "TaskTable.h" + +#include +#include + #include +#include + namespace DB { diff --git a/programs/copier/TaskShard.cpp b/programs/copier/TaskShard.cpp index af21848b384..d156f451a84 100644 --- a/programs/copier/TaskShard.cpp +++ b/programs/copier/TaskShard.cpp @@ -5,7 +5,7 @@ namespace DB { -TaskShard::TaskShard(TaskTable & parent, const ShardInfo & info_) +TaskShard::TaskShard(TaskTable & parent, const Cluster::ShardInfo & info_) : task_table(parent) , info(info_) { diff --git a/programs/copier/TaskShard.h b/programs/copier/TaskShard.h index 970ca21612f..05d652077ea 100644 --- a/programs/copier/TaskShard.h +++ b/programs/copier/TaskShard.h @@ -3,12 +3,7 @@ #include "Aliases.h" #include "Internals.h" #include "ClusterPartition.h" - -#include -#include - -#include -#include +#include "ShardPartition.h" namespace DB @@ -18,11 +13,11 @@ struct TaskTable; struct TaskShard { - TaskShard(TaskTable & parent, const ShardInfo & info_); + TaskShard(TaskTable & parent, const Cluster::ShardInfo & info_); TaskTable & task_table; - ShardInfo info; + Cluster::ShardInfo info; UInt32 numberInCluster() const; @@ -55,4 +50,7 @@ struct TaskShard ListOfDatabasesAndTableNames list_of_split_tables_on_shard; }; +using TaskShardPtr = std::shared_ptr; +using TasksShard = std::vector; + } diff --git a/programs/copier/TaskTable.cpp b/programs/copier/TaskTable.cpp index 2f282842db6..5b09a9c99a7 100644 --- a/programs/copier/TaskTable.cpp +++ b/programs/copier/TaskTable.cpp @@ -1,5 +1,12 @@ #include "TaskTable.h" +#include "ClusterPartition.h" +#include "TaskCluster.h" + +#include + +#include + namespace DB { namespace ErrorCodes diff --git a/programs/copier/TaskTable.h b/programs/copier/TaskTable.h index 32b8759cec1..2bb7f078bc6 100644 --- a/programs/copier/TaskTable.h +++ b/programs/copier/TaskTable.h @@ -1,19 +1,15 @@ #pragma once #include "Aliases.h" -#include "Internals.h" -#include "ClusterPartition.h" - -#include -#include - -#include -#include +#include "TaskShard.h" namespace DB { +struct ClusterPartition; +struct TaskCluster; + struct TaskTable { TaskTable(TaskCluster & parent, const Poco::Util::AbstractConfiguration & config, const String & prefix, const String & table_key); @@ -138,6 +134,9 @@ struct TaskTable void initShards(RandomEngine &&random_engine); }; +using TasksTable = std::list; + + template inline void TaskTable::initShards(RandomEngine && random_engine) { From eda3955f010ddb49136633043cf0e9adbfe297be Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 20 Oct 2022 14:50:51 +0000 Subject: [PATCH 026/112] Update libcxx submodule --- contrib/libcxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/libcxx b/contrib/libcxx index 19330eed499..4db7f838afd 160000 --- a/contrib/libcxx +++ b/contrib/libcxx @@ -1 +1 @@ -Subproject commit 19330eed499f0f2011437a92b7e8567ea36fe082 +Subproject commit 4db7f838afd3139eb3761694b04d31275df45d2d From 2fe59e7e8f03db75c42c40f7e266581d307bc97a Mon Sep 17 00:00:00 2001 From: Arthur Passos Date: Thu, 20 Oct 2022 13:30:20 -0300 Subject: [PATCH 027/112] lower thread and iterations number --- .../scripts/stress_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/scripts/stress_test.py b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/scripts/stress_test.py index 81dfb4cc511..b8bafb3d0c1 100644 --- a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/scripts/stress_test.py +++ b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/scripts/stress_test.py @@ -8,8 +8,8 @@ server_ip = sys.argv[2] mutex = threading.Lock() success_counter = 0 -number_of_threads = 200 -number_of_iterations = 200 +number_of_threads = 100 +number_of_iterations = 100 def perform_request(): From 1a462fddea3c518e51c68f74a2f7c6193644042a Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Fri, 21 Oct 2022 13:28:24 +0800 Subject: [PATCH 028/112] Fix test --- src/Interpreters/convertFieldToType.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/convertFieldToType.cpp b/src/Interpreters/convertFieldToType.cpp index 4e7562ef451..fdbae838ab4 100644 --- a/src/Interpreters/convertFieldToType.cpp +++ b/src/Interpreters/convertFieldToType.cpp @@ -218,10 +218,11 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID } if (which_type.isDateTime64() - && (which_from_type.isNativeInt() || which_from_type.isNativeUInt() || which_from_type.isDate() || which_from_type.isDate32() || which_from_type.isDateTime() || which_from_type.isDateTime64())) + && (src.getType() == Field::Types::UInt64 || src.getType() == Field::Types::Int64 || src.getType() == Field::Types::Decimal64)) { const auto scale = static_cast(type).getScale(); - const auto decimal_value = DecimalUtils::decimalFromComponents(applyVisitor(FieldVisitorConvertToNumber(), src), 0, scale); + const auto decimal_value + = DecimalUtils::decimalFromComponents(applyVisitor(FieldVisitorConvertToNumber(), src), 0, scale); return Field(DecimalField(decimal_value, scale)); } } From 2c41c0357d4b25e81479af487dd66e71b34e99d4 Mon Sep 17 00:00:00 2001 From: Arthur Passos Date: Fri, 21 Oct 2022 14:20:49 -0300 Subject: [PATCH 029/112] Make CaresPTRResolver completely thread-safe --- src/Common/CaresPTRResolver.h | 3 +++ src/Common/DNSPTRResolverProvider.cpp | 5 ++++- src/Common/LockedDNSPTRResolver.cpp | 25 +++++++++++++++++++++++++ src/Common/LockedDNSPTRResolver.h | 25 +++++++++++++++++++++++++ 4 files changed, 57 insertions(+), 1 deletion(-) create mode 100644 src/Common/LockedDNSPTRResolver.cpp create mode 100644 src/Common/LockedDNSPTRResolver.h diff --git a/src/Common/CaresPTRResolver.h b/src/Common/CaresPTRResolver.h index 38344d75ade..9df6d7aeb72 100644 --- a/src/Common/CaresPTRResolver.h +++ b/src/Common/CaresPTRResolver.h @@ -2,6 +2,7 @@ #include #include +#include #include "DNSPTRResolver.h" using ares_channel = struct ares_channeldata *; @@ -46,6 +47,8 @@ namespace DB void process_readable_sockets(std::span readable_sockets); ares_channel channel; + + static std::mutex mutex; }; } diff --git a/src/Common/DNSPTRResolverProvider.cpp b/src/Common/DNSPTRResolverProvider.cpp index 97d601a3a78..63d23612fba 100644 --- a/src/Common/DNSPTRResolverProvider.cpp +++ b/src/Common/DNSPTRResolverProvider.cpp @@ -1,12 +1,15 @@ #include "DNSPTRResolverProvider.h" +#include "LockedDNSPTRResolver.h" #include "CaresPTRResolver.h" namespace DB { std::shared_ptr DNSPTRResolverProvider::get() { - return std::make_shared( + static auto resolver = std::make_shared( CaresPTRResolver::provider_token {} ); + + return resolver; } } diff --git a/src/Common/LockedDNSPTRResolver.cpp b/src/Common/LockedDNSPTRResolver.cpp new file mode 100644 index 00000000000..947a109860d --- /dev/null +++ b/src/Common/LockedDNSPTRResolver.cpp @@ -0,0 +1,25 @@ +#include "LockedDNSPTRResolver.h" + +namespace DB { + + std::mutex LockedPTRResolver::mutex; + + LockedPTRResolver::LockedPTRResolver(std::unique_ptr resolver_) + : resolver(std::move(resolver_)) + {} + + std::unordered_set LockedPTRResolver::resolve(const std::string & ip) + { + std::lock_guard guard(mutex); + + return resolver->resolve(ip); + } + + std::unordered_set LockedPTRResolver::resolve_v6(const std::string & ip) + { + std::lock_guard guard(mutex); + + return resolver->resolve_v6(ip); + } + +} diff --git a/src/Common/LockedDNSPTRResolver.h b/src/Common/LockedDNSPTRResolver.h new file mode 100644 index 00000000000..82fdd189744 --- /dev/null +++ b/src/Common/LockedDNSPTRResolver.h @@ -0,0 +1,25 @@ +#pragma once + +#include + +#include + +namespace DB { + class LockedPTRResolver : public DNSPTRResolver + { + public: + + LockedPTRResolver(std::unique_ptr resolver); + + std::unordered_set resolve(const std::string & ip) override; + + std::unordered_set resolve_v6(const std::string & ip) override; + + private: + // this needs to be owned + std::unique_ptr resolver; + + static std::mutex mutex; + }; +} + From e2fcd51c935ce5eb46b9af7e6a4c4ef49d93a106 Mon Sep 17 00:00:00 2001 From: Arthur Passos Date: Fri, 21 Oct 2022 14:43:08 -0300 Subject: [PATCH 030/112] what --- src/Common/CaresPTRResolver.cpp | 6 ++++++ src/Common/DNSPTRResolverProvider.cpp | 1 - 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/Common/CaresPTRResolver.cpp b/src/Common/CaresPTRResolver.cpp index f04c6fb6de7..c6fe70a09fa 100644 --- a/src/Common/CaresPTRResolver.cpp +++ b/src/Common/CaresPTRResolver.cpp @@ -41,6 +41,8 @@ namespace DB } } + std::mutex CaresPTRResolver::mutex; + CaresPTRResolver::CaresPTRResolver(CaresPTRResolver::provider_token) : channel(nullptr) { /* @@ -74,6 +76,8 @@ namespace DB std::unordered_set CaresPTRResolver::resolve(const std::string & ip) { + std::lock_guard guard(mutex); + std::unordered_set ptr_records; resolve(ip, ptr_records); @@ -84,6 +88,8 @@ namespace DB std::unordered_set CaresPTRResolver::resolve_v6(const std::string & ip) { + std::lock_guard guard(mutex); + std::unordered_set ptr_records; resolve_v6(ip, ptr_records); diff --git a/src/Common/DNSPTRResolverProvider.cpp b/src/Common/DNSPTRResolverProvider.cpp index 63d23612fba..91ce4dbb938 100644 --- a/src/Common/DNSPTRResolverProvider.cpp +++ b/src/Common/DNSPTRResolverProvider.cpp @@ -1,5 +1,4 @@ #include "DNSPTRResolverProvider.h" -#include "LockedDNSPTRResolver.h" #include "CaresPTRResolver.h" namespace DB From 615b73773ae321a0c024dd5bcc05fcf96d55a4f3 Mon Sep 17 00:00:00 2001 From: Arthur Passos Date: Fri, 21 Oct 2022 14:46:10 -0300 Subject: [PATCH 031/112] removing unwanted files --- src/Common/LockedDNSPTRResolver.cpp | 25 ------------------------- src/Common/LockedDNSPTRResolver.h | 25 ------------------------- 2 files changed, 50 deletions(-) delete mode 100644 src/Common/LockedDNSPTRResolver.cpp delete mode 100644 src/Common/LockedDNSPTRResolver.h diff --git a/src/Common/LockedDNSPTRResolver.cpp b/src/Common/LockedDNSPTRResolver.cpp deleted file mode 100644 index 947a109860d..00000000000 --- a/src/Common/LockedDNSPTRResolver.cpp +++ /dev/null @@ -1,25 +0,0 @@ -#include "LockedDNSPTRResolver.h" - -namespace DB { - - std::mutex LockedPTRResolver::mutex; - - LockedPTRResolver::LockedPTRResolver(std::unique_ptr resolver_) - : resolver(std::move(resolver_)) - {} - - std::unordered_set LockedPTRResolver::resolve(const std::string & ip) - { - std::lock_guard guard(mutex); - - return resolver->resolve(ip); - } - - std::unordered_set LockedPTRResolver::resolve_v6(const std::string & ip) - { - std::lock_guard guard(mutex); - - return resolver->resolve_v6(ip); - } - -} diff --git a/src/Common/LockedDNSPTRResolver.h b/src/Common/LockedDNSPTRResolver.h deleted file mode 100644 index 82fdd189744..00000000000 --- a/src/Common/LockedDNSPTRResolver.h +++ /dev/null @@ -1,25 +0,0 @@ -#pragma once - -#include - -#include - -namespace DB { - class LockedPTRResolver : public DNSPTRResolver - { - public: - - LockedPTRResolver(std::unique_ptr resolver); - - std::unordered_set resolve(const std::string & ip) override; - - std::unordered_set resolve_v6(const std::string & ip) override; - - private: - // this needs to be owned - std::unique_ptr resolver; - - static std::mutex mutex; - }; -} - From f104dd08cdf820382a375a32fb5b53ab2507fd63 Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Sat, 22 Oct 2022 15:47:13 +0800 Subject: [PATCH 032/112] Only fix LowCardinality for now --- src/Interpreters/castColumn.cpp | 10 ---------- src/Interpreters/castColumn.h | 1 - src/Storages/MergeTree/KeyCondition.cpp | 8 ++++++-- 3 files changed, 6 insertions(+), 13 deletions(-) diff --git a/src/Interpreters/castColumn.cpp b/src/Interpreters/castColumn.cpp index 744cfff2527..dc9882b84b0 100644 --- a/src/Interpreters/castColumn.cpp +++ b/src/Interpreters/castColumn.cpp @@ -51,14 +51,4 @@ ColumnPtr castColumnAccurateOrNull(const ColumnWithTypeAndName & arg, const Data return castColumn(arg, type); } -ColumnPtr tryCastColumnAccurate(const ColumnWithTypeAndName & arg, const DataTypePtr & type) -try -{ - return castColumn(arg, type); -} -catch (...) -{ - return nullptr; -} - } diff --git a/src/Interpreters/castColumn.h b/src/Interpreters/castColumn.h index c0a2cfbefbc..fcbea0f4646 100644 --- a/src/Interpreters/castColumn.h +++ b/src/Interpreters/castColumn.h @@ -8,6 +8,5 @@ namespace DB ColumnPtr castColumn(const ColumnWithTypeAndName & arg, const DataTypePtr & type); ColumnPtr castColumnAccurate(const ColumnWithTypeAndName & arg, const DataTypePtr & type); ColumnPtr castColumnAccurateOrNull(const ColumnWithTypeAndName & arg, const DataTypePtr & type); -ColumnPtr tryCastColumnAccurate(const ColumnWithTypeAndName & arg, const DataTypePtr & type); } diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index 194cfbdabfc..cc5f6998955 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -1179,8 +1180,11 @@ bool KeyCondition::transformConstantWithValidFunctions( if (is_valid_chain) { - auto const_type = cur_node->result_type; - auto const_value = convertFieldToType(out_value, *const_type); + out_type = removeLowCardinality(out_type); + auto const_type = removeLowCardinality(cur_node->result_type); + auto const_column = out_type->createColumnConst(1, out_value); + auto const_value = (*castColumnAccurateOrNull({const_column, out_type, ""}, const_type))[0]; + if (const_value.isNull()) return false; From 715b923bf9bbace82be7414b59c52afc9e5b7dc0 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Sat, 22 Oct 2022 13:09:47 +0000 Subject: [PATCH 033/112] Add fuzzed test. --- .../0_stateless/01902_table_function_merge_db_repr.sql | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql b/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql index 3801a83e247..460ce16ccad 100644 --- a/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql +++ b/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql @@ -62,6 +62,11 @@ SHOW CREATE TABLE 01902_db.t_merge_1; SELECT 'SELECT _database, _table, n FROM merge(currentDatabase(), ^t) ORDER BY _database, _table, n'; SELECT _database, _table, n FROM merge(currentDatabase(), '^t') ORDER BY _database, _table, n; +--fuzzed LOGICAL_ERROR +CREATE TABLE 01902_db.t4 (n Date) ENGINE=MergeTree ORDER BY n; +INSERT INTO 01902_db.t4 SELECT * FROM numbers(10); +SELECT NULL FROM 01902_db.t_merge WHERE n ORDER BY _table DESC; + DROP DATABASE 01902_db; DROP DATABASE 01902_db1; DROP DATABASE 01902_db2; From cd7a48215ccbff4bbce45a1e43f9c69e59e88c93 Mon Sep 17 00:00:00 2001 From: flynn Date: Sat, 22 Oct 2022 16:17:05 +0000 Subject: [PATCH 034/112] Fix create set with wrong header when data type is lowcardinality --- src/Interpreters/Set.cpp | 1 + ...467_set_with_lowcardinality_type.reference | 2 ++ .../02467_set_with_lowcardinality_type.sql | 30 +++++++++++++++++++ 3 files changed, 33 insertions(+) create mode 100644 tests/queries/0_stateless/02467_set_with_lowcardinality_type.reference create mode 100644 tests/queries/0_stateless/02467_set_with_lowcardinality_type.sql diff --git a/src/Interpreters/Set.cpp b/src/Interpreters/Set.cpp index ded8b04a589..e75232aa0f5 100644 --- a/src/Interpreters/Set.cpp +++ b/src/Interpreters/Set.cpp @@ -131,6 +131,7 @@ void Set::setHeader(const ColumnsWithTypeAndName & header) if (const auto * low_cardinality_type = typeid_cast(data_types.back().get())) { data_types.back() = low_cardinality_type->getDictionaryType(); + set_elements_types.back() = low_cardinality_type->getDictionaryType(); materialized_columns.emplace_back(key_columns.back()->convertToFullColumnIfLowCardinality()); key_columns.back() = materialized_columns.back().get(); } diff --git a/tests/queries/0_stateless/02467_set_with_lowcardinality_type.reference b/tests/queries/0_stateless/02467_set_with_lowcardinality_type.reference new file mode 100644 index 00000000000..b3f28057554 --- /dev/null +++ b/tests/queries/0_stateless/02467_set_with_lowcardinality_type.reference @@ -0,0 +1,2 @@ +1 test +1 test diff --git a/tests/queries/0_stateless/02467_set_with_lowcardinality_type.sql b/tests/queries/0_stateless/02467_set_with_lowcardinality_type.sql new file mode 100644 index 00000000000..7b572df73f5 --- /dev/null +++ b/tests/queries/0_stateless/02467_set_with_lowcardinality_type.sql @@ -0,0 +1,30 @@ +DROP TABLE IF EXISTS bloom_filter_nullable_index__fuzz_0; +CREATE TABLE bloom_filter_nullable_index__fuzz_0 +( + `order_key` UInt64, + `str` Nullable(String), + INDEX idx str TYPE bloom_filter GRANULARITY 1 +) +ENGINE = MergeTree ORDER BY order_key SETTINGS index_granularity = 6; + +INSERT INTO bloom_filter_nullable_index__fuzz_0 VALUES (1, 'test'); +INSERT INTO bloom_filter_nullable_index__fuzz_0 VALUES (2, 'test2'); + +DROP TABLE IF EXISTS bloom_filter_nullable_index__fuzz_1; +CREATE TABLE bloom_filter_nullable_index__fuzz_1 +( + `order_key` UInt64, + `str` Nullable(String), + INDEX idx str TYPE bloom_filter GRANULARITY 1 +) +ENGINE = MergeTree ORDER BY order_key SETTINGS index_granularity = 6; + +INSERT INTO bloom_filter_nullable_index__fuzz_0 VALUES (1, 'test'); +INSERT INTO bloom_filter_nullable_index__fuzz_0 VALUES (2, 'test2'); + +DROP TABLE IF EXISTS nullable_string_value__fuzz_2; +CREATE TABLE nullable_string_value__fuzz_2 (`value` LowCardinality(String)) ENGINE = TinyLog; +INSERT INTO nullable_string_value__fuzz_2 VALUES ('test'); + +SELECT * FROM bloom_filter_nullable_index__fuzz_0 WHERE str IN (SELECT value FROM nullable_string_value__fuzz_2); +SELECT * FROM bloom_filter_nullable_index__fuzz_1 WHERE str IN (SELECT value FROM nullable_string_value__fuzz_2); From 1aa8a986dd8f7a7b2a17f5466cc94fac1637e9db Mon Sep 17 00:00:00 2001 From: flynn Date: Sat, 22 Oct 2022 16:19:49 +0000 Subject: [PATCH 035/112] update test --- tests/queries/0_stateless/02467_set_with_lowcardinality_type.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/02467_set_with_lowcardinality_type.sql b/tests/queries/0_stateless/02467_set_with_lowcardinality_type.sql index 7b572df73f5..92519a48bea 100644 --- a/tests/queries/0_stateless/02467_set_with_lowcardinality_type.sql +++ b/tests/queries/0_stateless/02467_set_with_lowcardinality_type.sql @@ -1,3 +1,4 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/42460 DROP TABLE IF EXISTS bloom_filter_nullable_index__fuzz_0; CREATE TABLE bloom_filter_nullable_index__fuzz_0 ( From 5ac7538e92e32b63a635587aa7ee34f9cc5493fe Mon Sep 17 00:00:00 2001 From: flynn Date: Sat, 22 Oct 2022 16:21:28 +0000 Subject: [PATCH 036/112] update test --- .../queries/0_stateless/02467_set_with_lowcardinality_type.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02467_set_with_lowcardinality_type.sql b/tests/queries/0_stateless/02467_set_with_lowcardinality_type.sql index 92519a48bea..dee6f7de74a 100644 --- a/tests/queries/0_stateless/02467_set_with_lowcardinality_type.sql +++ b/tests/queries/0_stateless/02467_set_with_lowcardinality_type.sql @@ -15,7 +15,7 @@ DROP TABLE IF EXISTS bloom_filter_nullable_index__fuzz_1; CREATE TABLE bloom_filter_nullable_index__fuzz_1 ( `order_key` UInt64, - `str` Nullable(String), + `str` String, INDEX idx str TYPE bloom_filter GRANULARITY 1 ) ENGINE = MergeTree ORDER BY order_key SETTINGS index_granularity = 6; From 2fc91fd33802ba69193e3d8cb1caa154dcefcd24 Mon Sep 17 00:00:00 2001 From: Han Fei Date: Sat, 22 Oct 2022 18:27:24 +0200 Subject: [PATCH 037/112] fix behaviour of max_rows_to_read for trival limit queries --- src/Interpreters/InterpreterSelectQuery.cpp | 17 +++++++++----- .../QueryPlan/ReadFromMergeTree.cpp | 19 +++++++++++++++- .../MergeTree/MergeTreeDataSelectExecutor.cpp | 4 ++++ .../MergeTree/MergeTreeSelectProcessor.cpp | 8 ------- src/Storages/SelectQueryInfo.h | 3 +++ src/Storages/System/StorageSystemNumbers.cpp | 17 +++++++++++--- ...5_limit_trivial_max_rows_to_read.reference | 7 ++++++ .../02465_limit_trivial_max_rows_to_read.sql | 22 +++++++++++++++++++ 8 files changed, 80 insertions(+), 17 deletions(-) create mode 100644 tests/queries/0_stateless/02465_limit_trivial_max_rows_to_read.reference create mode 100644 tests/queries/0_stateless/02465_limit_trivial_max_rows_to_read.sql diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 79deb38317c..adf341f5ffd 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -2143,6 +2143,8 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc auto [limit_length, limit_offset] = getLimitLengthAndOffset(query, context); + auto local_limits = getStorageLimits(*context, options); + /** Optimization - if not specified DISTINCT, WHERE, GROUP, HAVING, ORDER, JOIN, LIMIT BY, WITH TIES * but LIMIT is specified, and limit + offset < max_block_size, * then as the block size we will use limit + offset (not to read more from the table than requested), @@ -2161,17 +2163,22 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc && !query_analyzer->hasAggregation() && !query_analyzer->hasWindow() && query.limitLength() - && limit_length <= std::numeric_limits::max() - limit_offset - && limit_length + limit_offset < max_block_size) + && limit_length <= std::numeric_limits::max() - limit_offset) { - max_block_size = std::max(1, limit_length + limit_offset); - max_threads_execute_query = max_streams = 1; + if (limit_length + limit_offset < max_block_size) + { + max_block_size = std::max(1, limit_length + limit_offset); + max_threads_execute_query = max_streams = 1; + } + if (limit_length + limit_offset < local_limits.local_limits.size_limits.max_rows) + { + query_info.limit = limit_length + limit_offset; + } } if (!max_block_size) throw Exception("Setting 'max_block_size' cannot be zero", ErrorCodes::PARAMETER_OUT_OF_BOUND); - auto local_limits = getStorageLimits(*context, options); storage_limits.emplace_back(local_limits); /// Initialize the initial data streams to which the query transforms are superimposed. Table or subquery or prepared input? diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index b340073e73d..164ec8777de 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -173,6 +173,9 @@ Pipe ReadFromMergeTree::readFromPool( total_rows += part.getRowsCount(); } + if (query_info.limit > 0 && query_info.limit < total_rows) + total_rows = query_info.limit; + const auto & settings = context->getSettingsRef(); const auto & client_info = context->getClientInfo(); MergeTreeReadPool::BackoffSettings backoff_settings(settings); @@ -246,10 +249,24 @@ ProcessorPtr ReadFromMergeTree::createSource( }; } - return std::make_shared( + auto total_rows = part.getRowsCount(); + if (query_info.limit > 0 && query_info.limit < total_rows) + total_rows = query_info.limit; + + auto source = std::make_shared( data, storage_snapshot, part.data_part, max_block_size, preferred_block_size_bytes, preferred_max_column_in_block_size_bytes, required_columns, part.ranges, use_uncompressed_cache, prewhere_info, actions_settings, reader_settings, virt_column_names, part.part_index_in_query, has_limit_below_one_block, std::move(extension)); + + /// Actually it means that parallel reading from replicas enabled + /// and we have to collaborate with initiator. + /// In this case we won't set approximate rows, because it will be accounted multiple times. + /// Also do not count amount of read rows if we read in order of sorting key, + /// because we don't know actual amount of read rows in case when limit is set. + if (!extension.has_value() && !reader_settings.read_in_order) + source -> addTotalRowsApprox(total_rows); + + return source; } Pipe ReadFromMergeTree::readInOrder( diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 91ecb3a37a0..0eddaac2fac 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -1061,6 +1061,10 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd auto current_rows_estimate = ranges.getRowsCount(); size_t prev_total_rows_estimate = total_rows.fetch_add(current_rows_estimate); size_t total_rows_estimate = current_rows_estimate + prev_total_rows_estimate; + if (query_info.limit > 0 && total_rows_estimate > query_info.limit) + { + total_rows_estimate = query_info.limit; + } limits.check(total_rows_estimate, 0, "rows (controlled by 'max_rows_to_read' setting)", ErrorCodes::TOO_MANY_ROWS); leaf_limits.check( total_rows_estimate, 0, "rows (controlled by 'max_rows_to_read_leaf' setting)", ErrorCodes::TOO_MANY_ROWS); diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 59cbae3f914..2490eb77772 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -38,14 +38,6 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor( has_limit_below_one_block(has_limit_below_one_block_), total_rows(data_part->index_granularity.getRowsCountInRanges(all_mark_ranges)) { - /// Actually it means that parallel reading from replicas enabled - /// and we have to collaborate with initiator. - /// In this case we won't set approximate rows, because it will be accounted multiple times. - /// Also do not count amount of read rows if we read in order of sorting key, - /// because we don't know actual amount of read rows in case when limit is set. - if (!extension_.has_value() && !reader_settings.read_in_order) - addTotalRowsApprox(total_rows); - ordered_names = header_without_virtual_columns.getNames(); } diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h index f2835ab4dbf..94a8c1143f3 100644 --- a/src/Storages/SelectQueryInfo.h +++ b/src/Storages/SelectQueryInfo.h @@ -220,6 +220,9 @@ struct SelectQueryInfo Block minmax_count_projection_block; MergeTreeDataSelectAnalysisResultPtr merge_tree_select_result_ptr; + // If limit is not 0, that means it's a trival limit query. + UInt64 limit = 0; + InputOrderInfoPtr getInputOrderInfo() const { return input_order_info ? input_order_info : (projection ? projection->input_order_info : nullptr); diff --git a/src/Storages/System/StorageSystemNumbers.cpp b/src/Storages/System/StorageSystemNumbers.cpp index 523ec25b89c..fbcd449051f 100644 --- a/src/Storages/System/StorageSystemNumbers.cpp +++ b/src/Storages/System/StorageSystemNumbers.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include @@ -125,7 +126,7 @@ StorageSystemNumbers::StorageSystemNumbers(const StorageID & table_id, bool mult Pipe StorageSystemNumbers::read( const Names & column_names, const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo &, + SelectQueryInfo & query_info, ContextPtr /*context*/, QueryProcessingStage::Enum /*processed_stage*/, size_t max_block_size, @@ -154,7 +155,12 @@ Pipe StorageSystemNumbers::read( auto source = std::make_shared(state, max_block_size, max_counter); if (i == 0) - source->addTotalRowsApprox(*limit); + { + auto rows_appr = *limit; + if (query_info.limit > 0 && query_info.limit < rows_appr) + rows_appr = query_info.limit; + source->addTotalRowsApprox(rows_appr); + } pipe.addSource(std::move(source)); } @@ -167,7 +173,12 @@ Pipe StorageSystemNumbers::read( auto source = std::make_shared(max_block_size, offset + i * max_block_size, num_streams * max_block_size); if (limit && i == 0) - source->addTotalRowsApprox(*limit); + { + auto rows_appr = *limit; + if (query_info.limit > 0 && query_info.limit < rows_appr) + rows_appr = query_info.limit; + source->addTotalRowsApprox(rows_appr); + } pipe.addSource(std::move(source)); } diff --git a/tests/queries/0_stateless/02465_limit_trivial_max_rows_to_read.reference b/tests/queries/0_stateless/02465_limit_trivial_max_rows_to_read.reference new file mode 100644 index 00000000000..87370760038 --- /dev/null +++ b/tests/queries/0_stateless/02465_limit_trivial_max_rows_to_read.reference @@ -0,0 +1,7 @@ +0 +0 +1 +2 +3 +4 +0 diff --git a/tests/queries/0_stateless/02465_limit_trivial_max_rows_to_read.sql b/tests/queries/0_stateless/02465_limit_trivial_max_rows_to_read.sql new file mode 100644 index 00000000000..ee7a4e6b6b5 --- /dev/null +++ b/tests/queries/0_stateless/02465_limit_trivial_max_rows_to_read.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS t_max_rows_to_read; + +CREATE TABLE t_max_rows_to_read (a UInt64) +ENGINE = MergeTree ORDER BY a +SETTINGS index_granularity = 4; + +INSERT INTO t_max_rows_to_read SELECT number FROM numbers(100); + +SET max_block_size = 10; +SET max_rows_to_read = 20; +SET read_overflow_mode = 'throw'; + +SELECT number FROM numbers(30); -- { serverError 158 } +SELECT number FROM numbers(30) LIMIT 21; -- { serverError 158 } +SELECT number FROM numbers(30) LIMIT 1; +SELECT number FROM numbers(5); + +SELECT a FROM t_max_rows_to_read LIMIT 1; +SELECT a FROM t_max_rows_to_read LIMIT 11 offset 11; -- { serverError 158 } +SELECT a FROM t_max_rows_to_read WHERE a > 50 LIMIT 1; -- { serverError 158 } + +DROP TABLE t_max_rows_to_read; From 56e5daba0c97f6e55ad556c895670ea42efc0296 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Sat, 22 Oct 2022 22:51:59 +0000 Subject: [PATCH 038/112] remove DataPartStorageBuilder --- .../DiskObjectStorageTransaction.cpp | 2 +- .../MergeTree/DataPartStorageOnDisk.cpp | 182 ++++++++---------- .../MergeTree/DataPartStorageOnDisk.h | 67 +++---- src/Storages/MergeTree/DataPartsExchange.cpp | 73 +++---- src/Storages/MergeTree/DataPartsExchange.h | 5 +- src/Storages/MergeTree/IDataPartStorage.h | 55 ++---- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 29 ++- src/Storages/MergeTree/IMergeTreeDataPart.h | 20 +- .../IMergeTreeDataPartInfoForReader.h | 5 +- .../MergeTree/IMergeTreeDataPartWriter.cpp | 4 +- .../MergeTree/IMergeTreeDataPartWriter.h | 6 +- .../MergeTree/IMergedBlockOutputStream.cpp | 5 +- .../MergeTree/IMergedBlockOutputStream.h | 6 +- .../LoadedMergeTreeDataPartInfoForReader.h | 5 +- .../MergeTree/MergeFromLogEntryTask.cpp | 4 +- .../MergeTree/MergePlainMergeTreeTask.cpp | 3 +- src/Storages/MergeTree/MergeTask.cpp | 19 +- src/Storages/MergeTree/MergeTask.h | 9 - src/Storages/MergeTree/MergeTreeData.cpp | 59 ++---- src/Storages/MergeTree/MergeTreeData.h | 24 +-- .../MergeTree/MergeTreeDataMergerMutator.cpp | 7 +- .../MergeTree/MergeTreeDataMergerMutator.h | 4 +- .../MergeTree/MergeTreeDataPartCompact.cpp | 9 +- .../MergeTree/MergeTreeDataPartCompact.h | 7 +- .../MergeTree/MergeTreeDataPartInMemory.cpp | 50 ++--- .../MergeTree/MergeTreeDataPartInMemory.h | 14 +- .../MergeTree/MergeTreeDataPartWide.cpp | 11 +- .../MergeTree/MergeTreeDataPartWide.h | 7 +- .../MergeTreeDataPartWriterCompact.cpp | 9 +- .../MergeTreeDataPartWriterCompact.h | 3 +- .../MergeTreeDataPartWriterInMemory.cpp | 4 +- .../MergeTreeDataPartWriterInMemory.h | 4 +- .../MergeTreeDataPartWriterOnDisk.cpp | 20 +- .../MergeTree/MergeTreeDataPartWriterOnDisk.h | 5 +- .../MergeTree/MergeTreeDataPartWriterWide.cpp | 17 +- .../MergeTree/MergeTreeDataPartWriterWide.h | 3 +- .../MergeTree/MergeTreeDataWriter.cpp | 34 +--- src/Storages/MergeTree/MergeTreeDataWriter.h | 5 - src/Storages/MergeTree/MergeTreePartition.cpp | 8 +- src/Storages/MergeTree/MergeTreePartition.h | 8 +- .../MergeTree/MergeTreePartsMover.cpp | 21 +- src/Storages/MergeTree/MergeTreeSink.cpp | 2 +- .../MergeTree/MergeTreeWriteAheadLog.cpp | 6 +- .../MergeTree/MergedBlockOutputStream.cpp | 41 ++-- .../MergeTree/MergedBlockOutputStream.h | 3 +- .../MergedColumnOnlyOutputStream.cpp | 8 +- .../MergeTree/MergedColumnOnlyOutputStream.h | 3 +- .../MergeTree/MutateFromLogEntryTask.cpp | 7 +- .../MergeTree/MutatePlainMergeTreeTask.cpp | 7 +- src/Storages/MergeTree/MutateTask.cpp | 58 ++---- src/Storages/MergeTree/MutateTask.h | 2 +- .../MergeTree/ReplicatedMergeTreeSink.cpp | 10 +- .../MergeTree/ReplicatedMergeTreeSink.h | 1 - src/Storages/StorageMergeTree.cpp | 14 +- src/Storages/StorageReplicatedMergeTree.cpp | 72 +++---- src/Storages/StorageReplicatedMergeTree.h | 6 +- 56 files changed, 432 insertions(+), 640 deletions(-) diff --git a/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp b/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp index 2a75668dd76..b55fb2c4fa5 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp @@ -599,7 +599,7 @@ std::unique_ptr DiskObjectStorageTransaction::writeFile auto write_operation = std::make_unique(object_storage, metadata_storage, object); std::function create_metadata_callback; - if (autocommit) + if (autocommit) { create_metadata_callback = [tx = shared_from_this(), mode, path, blob_name] (size_t count) { diff --git a/src/Storages/MergeTree/DataPartStorageOnDisk.cpp b/src/Storages/MergeTree/DataPartStorageOnDisk.cpp index efc7710f640..765c62ba903 100644 --- a/src/Storages/MergeTree/DataPartStorageOnDisk.cpp +++ b/src/Storages/MergeTree/DataPartStorageOnDisk.cpp @@ -12,6 +12,7 @@ #include #include #include +#include namespace DB { @@ -29,6 +30,11 @@ DataPartStorageOnDisk::DataPartStorageOnDisk(VolumePtr volume_, std::string root { } +std::shared_ptr DataPartStorageOnDisk::clone() const +{ + return std::make_shared(volume, root_path, part_dir); +} + std::string DataPartStorageOnDisk::getFullPath() const { return fs::path(volume->getDisk()->getPath()) / root_path / part_dir / ""; @@ -54,6 +60,11 @@ DataPartStoragePtr DataPartStorageOnDisk::getProjection(const std::string & name return std::make_shared(volume, std::string(fs::path(root_path) / part_dir), name); } +MutableDataPartStoragePtr DataPartStorageOnDisk::getProjection(const std::string & name) +{ + return std::make_shared(volume, std::string(fs::path(root_path) / part_dir), name); +} + bool DataPartStorageOnDisk::exists() const { return volume->getDisk()->exists(fs::path(root_path) / part_dir); @@ -199,11 +210,6 @@ void DataPartStorageOnDisk::checkConsistency(const MergeTreeDataPartChecksums & checksums.checkSizes(volume->getDisk(), getRelativePath()); } -DataPartStorageBuilderPtr DataPartStorageOnDisk::getBuilder() const -{ - return std::make_shared(volume, root_path, part_dir); -} - void DataPartStorageOnDisk::remove( CanRemoveCallback && can_remove_callback, const MergeTreeDataPartChecksums & checksums, @@ -488,11 +494,6 @@ bool DataPartStorageOnDisk::looksLikeBrokenDetachedPartHasTheSameContent(const S return original_files_list == detached_files_list; } -void DataPartStorageBuilderOnDisk::setRelativePath(const std::string & path) -{ - part_dir = path; -} - std::string DataPartStorageOnDisk::getDiskName() const { return volume->getDisk()->getName(); @@ -548,7 +549,7 @@ DataPartStorageOnDisk::DisksSet::const_iterator DataPartStorageOnDisk::isStoredO return disks.find(volume->getDisk()); } -ReservationPtr DataPartStorageOnDisk::reserve(UInt64 bytes) const +ReservationPtr DataPartStorageOnDisk::reserve(UInt64 bytes) { auto res = volume->reserve(bytes); if (!res) @@ -557,7 +558,7 @@ ReservationPtr DataPartStorageOnDisk::reserve(UInt64 bytes) const return res; } -ReservationPtr DataPartStorageOnDisk::tryReserve(UInt64 bytes) const +ReservationPtr DataPartStorageOnDisk::tryReserve(UInt64 bytes) { return volume->reserve(bytes); } @@ -798,7 +799,7 @@ void DataPartStorageOnDisk::backup( } } -DataPartStoragePtr DataPartStorageOnDisk::freeze( +MutableDataPartStoragePtr DataPartStorageOnDisk::freeze( const std::string & to, const std::string & dir_path, bool make_source_readonly, @@ -822,7 +823,7 @@ DataPartStoragePtr DataPartStorageOnDisk::freeze( return std::make_shared(single_disk_volume, to, dir_path); } -DataPartStoragePtr DataPartStorageOnDisk::clone( +MutableDataPartStoragePtr DataPartStorageOnDisk::clonePart( const std::string & to, const std::string & dir_path, const DiskPtr & disk, @@ -835,6 +836,7 @@ DataPartStoragePtr DataPartStorageOnDisk::clone( LOG_WARNING(log, "Path {} already exists. Will remove it and clone again.", fullPath(disk, path_to_clone)); disk->removeRecursive(path_to_clone); } + disk->createDirectories(to); volume->getDisk()->copy(getRelativePath(), disk, to); volume->getDisk()->removeFileIfExists(fs::path(path_to_clone) / "delete-on-destroy.txt"); @@ -849,7 +851,7 @@ void DataPartStorageOnDisk::onRename(const std::string & new_root_path, const st root_path = new_root_path; } -void DataPartStorageBuilderOnDisk::rename( +void DataPartStorageOnDisk::rename( const std::string & new_root_path, const std::string & new_part_dir, Poco::Logger * log, @@ -870,7 +872,7 @@ void DataPartStorageBuilderOnDisk::rename( "Part directory {} already exists and contains {} files. Removing it.", fullPath(volume->getDisk(), to), files.size()); - transaction->removeRecursive(to); + executeOperation([&](auto & disk) { disk.removeRecursive(to); }); } else { @@ -884,8 +886,12 @@ void DataPartStorageBuilderOnDisk::rename( String from = getRelativePath(); /// Why? - transaction->setLastModified(from, Poco::Timestamp::fromEpochTime(time(nullptr))); - transaction->moveDirectory(from, to); + executeOperation([&](auto & disk) + { + disk.setLastModified(from, Poco::Timestamp::fromEpochTime(time(nullptr))); + disk.moveDirectory(from, to); + }); + part_dir = new_part_dir; root_path = new_root_path; @@ -920,51 +926,52 @@ void DataPartStorageOnDisk::changeRootPath(const std::string & from_root, const root_path = to_root.substr(0, dst_size) + root_path.substr(prefix_size); } -DataPartStorageBuilderOnDisk::DataPartStorageBuilderOnDisk( - VolumePtr volume_, - std::string root_path_, - std::string part_dir_) - : volume(std::move(volume_)) - , root_path(std::move(root_path_)) - , part_dir(std::move(part_dir_)) - , transaction(volume->getDisk()->createTransaction()) -{ -} - -std::unique_ptr DataPartStorageBuilderOnDisk::writeFile( - const String & name, - size_t buf_size, - const WriteSettings & settings) -{ - return transaction->writeFile(fs::path(root_path) / part_dir / name, buf_size, WriteMode::Rewrite, settings, /* autocommit = */ false); -} - -void DataPartStorageBuilderOnDisk::removeFile(const String & name) -{ - transaction->removeFile(fs::path(root_path) / part_dir / name); -} - -void DataPartStorageBuilderOnDisk::removeFileIfExists(const String & name) -{ - transaction->removeFileIfExists(fs::path(root_path) / part_dir / name); -} - -void DataPartStorageBuilderOnDisk::removeRecursive() -{ - transaction->removeRecursive(fs::path(root_path) / part_dir); -} - -void DataPartStorageBuilderOnDisk::removeSharedRecursive(bool keep_in_remote_fs) -{ - transaction->removeSharedRecursive(fs::path(root_path) / part_dir, keep_in_remote_fs, {}); -} - -SyncGuardPtr DataPartStorageBuilderOnDisk::getDirectorySyncGuard() const +SyncGuardPtr DataPartStorageOnDisk::getDirectorySyncGuard() const { return volume->getDisk()->getDirectorySyncGuard(fs::path(root_path) / part_dir); } -void DataPartStorageBuilderOnDisk::createHardLinkFrom(const IDataPartStorage & source, const std::string & from, const std::string & to) const +template +void DataPartStorageOnDisk::executeOperation(Op && op) +{ + if (transaction) + op(*transaction); + else + op(*volume->getDisk()); +} + +std::unique_ptr DataPartStorageOnDisk::writeFile( + const String & name, + size_t buf_size, + const WriteSettings & settings) +{ + if (transaction) + return transaction->writeFile(fs::path(root_path) / part_dir / name, buf_size, WriteMode::Rewrite, settings, /* autocommit = */ false); + + return volume->getDisk()->writeFile(fs::path(root_path) / part_dir / name, buf_size, WriteMode::Rewrite, settings); +} + +void DataPartStorageOnDisk::removeFile(const String & name) +{ + executeOperation([&](auto & disk) { disk.removeFile(fs::path(root_path) / part_dir / name); }); +} + +void DataPartStorageOnDisk::removeFileIfExists(const String & name) +{ + executeOperation([&](auto & disk) { disk.removeFileIfExists(fs::path(root_path) / part_dir / name); }); +} + +void DataPartStorageOnDisk::removeRecursive() +{ + executeOperation([&](auto & disk) { disk.removeRecursive(fs::path(root_path) / part_dir); }); +} + +void DataPartStorageOnDisk::removeSharedRecursive(bool keep_in_remote_fs) +{ + executeOperation([&](auto & disk) { disk.removeSharedRecursive(fs::path(root_path) / part_dir, keep_in_remote_fs, {}); }); +} + +void DataPartStorageOnDisk::createHardLinkFrom(const IDataPartStorage & source, const std::string & from, const std::string & to) { const auto * source_on_disk = typeid_cast(&source); if (!source_on_disk) @@ -973,58 +980,39 @@ void DataPartStorageBuilderOnDisk::createHardLinkFrom(const IDataPartStorage & s "Cannot create hardlink from different storage. Expected DataPartStorageOnDisk, got {}", typeid(source).name()); - transaction->createHardLink( - fs::path(source_on_disk->getRelativePath()) / from, - fs::path(root_path) / part_dir / to); + executeOperation([&](auto & disk) + { + disk.createHardLink( + fs::path(source_on_disk->getRelativePath()) / from, + fs::path(root_path) / part_dir / to); + }); } -bool DataPartStorageBuilderOnDisk::exists() const +void DataPartStorageOnDisk::createDirectories() { - return volume->getDisk()->exists(fs::path(root_path) / part_dir); + executeOperation([&](auto & disk) { disk.createDirectories(fs::path(root_path) / part_dir); }); } -std::string DataPartStorageBuilderOnDisk::getFullPath() const +void DataPartStorageOnDisk::createProjection(const std::string & name) { - return fs::path(volume->getDisk()->getPath()) / root_path / part_dir; + executeOperation([&](auto & disk) { disk.createDirectory(fs::path(root_path) / part_dir / name); }); } -std::string DataPartStorageBuilderOnDisk::getRelativePath() const +void DataPartStorageOnDisk::beginTransaction() { - return fs::path(root_path) / part_dir; + if (transaction) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Uncommitted transaction already exists"); + + transaction = volume->getDisk()->createTransaction(); } -void DataPartStorageBuilderOnDisk::createDirectories() +void DataPartStorageOnDisk::commitTransaction() { - transaction->createDirectories(fs::path(root_path) / part_dir); -} + if (!transaction) + throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no uncommitted transaction"); -void DataPartStorageBuilderOnDisk::createProjection(const std::string & name) -{ - transaction->createDirectory(fs::path(root_path) / part_dir / name); -} - -ReservationPtr DataPartStorageBuilderOnDisk::reserve(UInt64 bytes) -{ - auto res = volume->reserve(bytes); - if (!res) - throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, "Cannot reserve {}, not enough space", ReadableSize(bytes)); - - return res; -} - -DataPartStorageBuilderPtr DataPartStorageBuilderOnDisk::getProjection(const std::string & name) const -{ - return std::make_shared(volume, std::string(fs::path(root_path) / part_dir), name); -} - -DataPartStoragePtr DataPartStorageBuilderOnDisk::getStorage() const -{ - return std::make_shared(volume, root_path, part_dir); -} - -void DataPartStorageBuilderOnDisk::commit() -{ transaction->commit(); + transaction.reset(); } } diff --git a/src/Storages/MergeTree/DataPartStorageOnDisk.h b/src/Storages/MergeTree/DataPartStorageOnDisk.h index d325049f056..3ce063ca990 100644 --- a/src/Storages/MergeTree/DataPartStorageOnDisk.h +++ b/src/Storages/MergeTree/DataPartStorageOnDisk.h @@ -15,6 +15,7 @@ class DataPartStorageOnDisk final : public IDataPartStorage { public: DataPartStorageOnDisk(VolumePtr volume_, std::string root_path_, std::string part_dir_); + std::shared_ptr clone() const override; std::string getFullPath() const override; std::string getRelativePath() const override; @@ -22,6 +23,7 @@ public: std::string getFullRootPath() const override; DataPartStoragePtr getProjection(const std::string & name) const override; + MutableDataPartStoragePtr getProjection(const std::string & name) override; bool exists() const override; bool exists(const std::string & name) const override; @@ -75,8 +77,8 @@ public: DisksSet::const_iterator isStoredOnDisk(const DisksSet & disks) const override; - ReservationPtr reserve(UInt64 bytes) const override; - ReservationPtr tryReserve(UInt64 bytes) const override; + ReservationPtr reserve(UInt64 bytes) override; + ReservationPtr tryReserve(UInt64 bytes) override; size_t getVolumeIndex(const IStoragePolicy &) const override; void writeChecksums(const MergeTreeDataPartChecksums & checksums, const WriteSettings & settings) const override; @@ -100,7 +102,7 @@ public: bool make_temporary_hard_links, TemporaryFilesOnDisks * temp_dirs) const override; - DataPartStoragePtr freeze( + MutableDataPartStoragePtr freeze( const std::string & to, const std::string & dir_path, bool make_source_readonly, @@ -108,7 +110,7 @@ public: bool copy_instead_of_hardlink, const NameSet & files_to_copy_instead_of_hardlinks) const override; - DataPartStoragePtr clone( + MutableDataPartStoragePtr clonePart( const std::string & to, const std::string & dir_path, const DiskPtr & disk, @@ -116,40 +118,9 @@ public: void changeRootPath(const std::string & from_root, const std::string & to_root) override; - DataPartStorageBuilderPtr getBuilder() const override; -private: - VolumePtr volume; - std::string root_path; - std::string part_dir; - - void clearDirectory( - const std::string & dir, - bool can_remove_shared_data, - const NameSet & names_not_to_remove, - const MergeTreeDataPartChecksums & checksums, - const std::unordered_set & skip_directories, - bool is_temp, - MergeTreeDataPartState state, - Poco::Logger * log, - bool is_projection) const; -}; - -class DataPartStorageBuilderOnDisk final : public IDataPartStorageBuilder -{ -public: - DataPartStorageBuilderOnDisk(VolumePtr volume_, std::string root_path_, std::string part_dir_); - - void setRelativePath(const std::string & path) override; - - bool exists() const override; - void createDirectories() override; void createProjection(const std::string & name) override; - std::string getPartDirectory() const override { return part_dir; } - std::string getFullPath() const override; - std::string getRelativePath() const override; - std::unique_ptr writeFile( const String & name, size_t buf_size, @@ -162,13 +133,7 @@ public: SyncGuardPtr getDirectorySyncGuard() const override; - void createHardLinkFrom(const IDataPartStorage & source, const std::string & from, const std::string & to) const override; - - ReservationPtr reserve(UInt64 bytes) override; - - DataPartStorageBuilderPtr getProjection(const std::string & name) const override; - - DataPartStoragePtr getStorage() const override; + void createHardLinkFrom(const IDataPartStorage & source, const std::string & from, const std::string & to) override; void rename( const std::string & new_root_path, @@ -177,13 +142,29 @@ public: bool remove_new_dir_if_exists, bool fsync_part_dir) override; - void commit() override; + void beginTransaction() override; + void commitTransaction() override; + bool hasActiveTransaction() const override { return transaction != nullptr; } private: VolumePtr volume; std::string root_path; std::string part_dir; DiskTransactionPtr transaction; + + template + void executeOperation(Op && op); + + void clearDirectory( + const std::string & dir, + bool can_remove_shared_data, + const NameSet & names_not_to_remove, + const MergeTreeDataPartChecksums & checksums, + const std::unordered_set & skip_directories, + bool is_temp, + MergeTreeDataPartState state, + Poco::Logger * log, + bool is_projection) const; }; } diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index 475461aa0d6..3398839131c 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -13,9 +13,9 @@ #include #include #include +#include #include #include -#include #include #include #include @@ -728,13 +728,9 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToMemory( data.getRelativeDataPath(), part_name); - auto data_part_storage_builder = std::make_shared( - volume, - data.getRelativeDataPath(), - part_name); - MergeTreeData::MutableDataPartPtr new_data_part = std::make_shared(data, part_name, data_part_storage); + new_data_part->version.setCreationTID(Tx::PrehistoricTID, nullptr); for (auto i = 0ul; i < projections; ++i) @@ -750,7 +746,6 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToMemory( throttler->add(block.bytes()); auto projection_part_storage = data_part_storage->getProjection(projection_name + ".proj"); - auto projection_part_storage_builder = data_part_storage_builder->getProjection(projection_name + ".proj"); MergeTreePartInfo new_part_info("all", 0, 0, 0); MergeTreeData::MutableDataPartPtr new_projection_part = @@ -764,7 +759,6 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToMemory( MergedBlockOutputStream part_out( new_projection_part, - projection_part_storage_builder, metadata_snapshot->projections.get(projection_name).metadata, block.getNamesAndTypesList(), {}, @@ -792,7 +786,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToMemory( new_data_part->partition.create(metadata_snapshot, block, 0, context); MergedBlockOutputStream part_out( - new_data_part, data_part_storage_builder, metadata_snapshot, block.getNamesAndTypesList(), {}, + new_data_part, metadata_snapshot, block.getNamesAndTypesList(), {}, CompressionCodecFactory::instance().get("NONE", {}), NO_TRANSACTION_PTR); part_out.write(block); @@ -804,7 +798,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToMemory( void Fetcher::downloadBasePartOrProjectionPartToDiskRemoteMeta( const String & replica_path, - DataPartStorageBuilderPtr & data_part_storage_builder, + const MutableDataPartStoragePtr & data_part_storage, PooledReadWriteBufferFromHTTP & in, MergeTreeData::DataPart::Checksums & checksums, ThrottlerPtr throttler) const @@ -820,7 +814,7 @@ void Fetcher::downloadBasePartOrProjectionPartToDiskRemoteMeta( readStringBinary(file_name, in); readBinary(file_size, in); - String metadata_file = fs::path(data_part_storage_builder->getFullPath()) / file_name; + String metadata_file = fs::path(data_part_storage->getFullPath()) / file_name; { auto file_out = std::make_unique(metadata_file, DBMS_DEFAULT_BUFFER_SIZE, -1, 0666, nullptr, 0); @@ -834,8 +828,8 @@ void Fetcher::downloadBasePartOrProjectionPartToDiskRemoteMeta( /// NOTE The is_cancelled flag also makes sense to check every time you read over the network, /// performing a poll with a not very large timeout. /// And now we check it only between read chunks (in the `copyData` function). - data_part_storage_builder->removeSharedRecursive(true); - data_part_storage_builder->commit(); + data_part_storage->removeSharedRecursive(true); + data_part_storage->commitTransaction(); throw Exception("Fetching of part was cancelled", ErrorCodes::ABORTED); } @@ -861,7 +855,7 @@ void Fetcher::downloadBasePartOrProjectionPartToDiskRemoteMeta( void Fetcher::downloadBaseOrProjectionPartToDisk( const String & replica_path, - DataPartStorageBuilderPtr & data_part_storage_builder, + const MutableDataPartStoragePtr & data_part_storage, bool sync, PooledReadWriteBufferFromHTTP & in, MergeTreeData::DataPart::Checksums & checksums, @@ -880,14 +874,14 @@ void Fetcher::downloadBaseOrProjectionPartToDisk( /// File must be inside "absolute_part_path" directory. /// Otherwise malicious ClickHouse replica may force us to write to arbitrary path. - String absolute_file_path = fs::weakly_canonical(fs::path(data_part_storage_builder->getRelativePath()) / file_name); - if (!startsWith(absolute_file_path, fs::weakly_canonical(data_part_storage_builder->getRelativePath()).string())) + String absolute_file_path = fs::weakly_canonical(fs::path(data_part_storage->getRelativePath()) / file_name); + if (!startsWith(absolute_file_path, fs::weakly_canonical(data_part_storage->getRelativePath()).string())) throw Exception(ErrorCodes::INSECURE_PATH, "File path ({}) doesn't appear to be inside part path ({}). " "This may happen if we are trying to download part from malicious replica or logical error.", - absolute_file_path, data_part_storage_builder->getRelativePath()); + absolute_file_path, data_part_storage->getRelativePath()); - auto file_out = data_part_storage_builder->writeFile(file_name, std::min(file_size, DBMS_DEFAULT_BUFFER_SIZE), {}); + auto file_out = data_part_storage->writeFile(file_name, std::min(file_size, DBMS_DEFAULT_BUFFER_SIZE), {}); HashingWriteBuffer hashing_out(*file_out); copyDataWithThrottler(in, hashing_out, file_size, blocker.getCounter(), throttler); @@ -896,7 +890,7 @@ void Fetcher::downloadBaseOrProjectionPartToDisk( /// NOTE The is_cancelled flag also makes sense to check every time you read over the network, /// performing a poll with a not very large timeout. /// And now we check it only between read chunks (in the `copyData` function). - data_part_storage_builder->removeRecursive(); + data_part_storage->removeRecursive(); throw Exception("Fetching of part was cancelled", ErrorCodes::ABORTED); } @@ -906,7 +900,7 @@ void Fetcher::downloadBaseOrProjectionPartToDisk( if (expected_hash != hashing_out.getHash()) throw Exception(ErrorCodes::CHECKSUM_DOESNT_MATCH, "Checksum mismatch for file {} transferred from {}", - (fs::path(data_part_storage_builder->getFullPath()) / file_name).string(), + (fs::path(data_part_storage->getFullPath()) / file_name).string(), replica_path); if (file_name != "checksums.txt" && @@ -951,15 +945,12 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDisk( part_relative_path, part_dir); - DataPartStorageBuilderPtr data_part_storage_builder = std::make_shared( - volume, - part_relative_path, - part_dir); + data_part_storage->beginTransaction(); - if (data_part_storage_builder->exists()) + if (data_part_storage->exists()) { LOG_WARNING(log, "Directory {} already exists, probably result of a failed fetch. Will remove it before fetching part.", - data_part_storage_builder->getFullPath()); + data_part_storage->getFullPath()); /// Even if it's a temporary part it could be downloaded with zero copy replication and this function /// is executed as a callback. @@ -967,10 +958,10 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDisk( /// We don't control the amount of refs for temporary parts so we cannot decide can we remove blobs /// or not. So we are not doing it bool keep_shared = disk->supportZeroCopyReplication() && data_settings->allow_remote_fs_zero_copy_replication; - data_part_storage_builder->removeSharedRecursive(keep_shared); + data_part_storage->removeSharedRecursive(keep_shared); } - data_part_storage_builder->createDirectories(); + data_part_storage->createDirectories(); SyncGuardPtr sync_guard; if (data.getSettings()->fsync_part_directory) @@ -985,19 +976,18 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDisk( MergeTreeData::DataPart::Checksums projection_checksum; auto projection_part_storage = data_part_storage->getProjection(projection_name + ".proj"); - auto projection_part_storage_builder = data_part_storage_builder->getProjection(projection_name + ".proj"); - - projection_part_storage_builder->createDirectories(); + projection_part_storage->createDirectories(); downloadBaseOrProjectionPartToDisk( - replica_path, projection_part_storage_builder, sync, in, projection_checksum, throttler); + replica_path, projection_part_storage, sync, in, projection_checksum, throttler); checksums.addFile( projection_name + ".proj", projection_checksum.getTotalSizeOnDisk(), projection_checksum.getTotalChecksumUInt128()); } // Download the base part - downloadBaseOrProjectionPartToDisk(replica_path, data_part_storage_builder, sync, in, checksums, throttler); + downloadBaseOrProjectionPartToDisk(replica_path, data_part_storage, sync, in, checksums, throttler); assertEOF(in); + data_part_storage->commitTransaction(); MergeTreeData::MutableDataPartPtr new_data_part = data.createPart(part_name, data_part_storage); new_data_part->version.setCreationTID(Tx::PrehistoricTID, nullptr); new_data_part->is_temp = true; @@ -1043,17 +1033,14 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDiskRemoteMeta( part_relative_path, part_dir); - DataPartStorageBuilderPtr data_part_storage_builder = std::make_shared( - volume, - part_relative_path, - part_dir); + data_part_storage->beginTransaction(); if (data_part_storage->exists()) throw Exception(ErrorCodes::DIRECTORY_ALREADY_EXISTS, "Directory {} already exists.", data_part_storage->getFullPath()); CurrentMetrics::Increment metric_increment{CurrentMetrics::ReplicatedFetch}; - volume->getDisk()->createDirectories(data_part_storage->getFullPath()); + data_part_storage->createDirectories(); for (auto i = 0ul; i < projections; ++i) { @@ -1062,24 +1049,22 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDiskRemoteMeta( MergeTreeData::DataPart::Checksums projection_checksum; auto projection_part_storage = data_part_storage->getProjection(projection_name + ".proj"); - auto projection_part_storage_builder = data_part_storage_builder->getProjection(projection_name + ".proj"); - - projection_part_storage_builder->createDirectories(); + projection_part_storage->createDirectories(); downloadBasePartOrProjectionPartToDiskRemoteMeta( - replica_path, projection_part_storage_builder, in, projection_checksum, throttler); + replica_path, projection_part_storage, in, projection_checksum, throttler); checksums.addFile( projection_name + ".proj", projection_checksum.getTotalSizeOnDisk(), projection_checksum.getTotalChecksumUInt128()); } downloadBasePartOrProjectionPartToDiskRemoteMeta( - replica_path, data_part_storage_builder, in, checksums, throttler); + replica_path, data_part_storage, in, checksums, throttler); assertEOF(in); MergeTreeData::MutableDataPartPtr new_data_part; try { - data_part_storage_builder->commit(); + data_part_storage->commitTransaction(); new_data_part = data.createPart(part_name, data_part_storage); new_data_part->version.setCreationTID(Tx::PrehistoricTID, nullptr); diff --git a/src/Storages/MergeTree/DataPartsExchange.h b/src/Storages/MergeTree/DataPartsExchange.h index 9e453ffb422..59800756c34 100644 --- a/src/Storages/MergeTree/DataPartsExchange.h +++ b/src/Storages/MergeTree/DataPartsExchange.h @@ -94,7 +94,7 @@ public: private: void downloadBaseOrProjectionPartToDisk( const String & replica_path, - DataPartStorageBuilderPtr & data_part_storage_builder, + const MutableDataPartStoragePtr & data_part_storage, bool sync, PooledReadWriteBufferFromHTTP & in, MergeTreeData::DataPart::Checksums & checksums, @@ -102,12 +102,11 @@ private: void downloadBasePartOrProjectionPartToDiskRemoteMeta( const String & replica_path, - DataPartStorageBuilderPtr & data_part_storage_builder, + const MutableDataPartStoragePtr & data_part_storage, PooledReadWriteBufferFromHTTP & in, MergeTreeData::DataPart::Checksums & checksums, ThrottlerPtr throttler) const; - MergeTreeData::MutableDataPartPtr downloadPartToDisk( const String & part_name, const String & replica_path, diff --git a/src/Storages/MergeTree/IDataPartStorage.h b/src/Storages/MergeTree/IDataPartStorage.h index 03627938348..3b005942c54 100644 --- a/src/Storages/MergeTree/IDataPartStorage.h +++ b/src/Storages/MergeTree/IDataPartStorage.h @@ -4,6 +4,7 @@ #include #include #include +#include #include namespace DB @@ -18,6 +19,7 @@ struct CanRemoveDescription NameSet files_not_to_remove; }; + using CanRemoveCallback = std::function; class IDataPartStorageIterator @@ -61,16 +63,14 @@ struct WriteSettings; class TemporaryFileOnDisk; -class IDataPartStorageBuilder; -using DataPartStorageBuilderPtr = std::shared_ptr; - /// This is an abstraction of storage for data part files. /// Ideally, it is assumed to contains read-only methods from IDisk. /// It is not fulfilled now, but let's try our best. -class IDataPartStorage +class IDataPartStorage : public boost::noncopyable { public: virtual ~IDataPartStorage() = default; + virtual std::shared_ptr clone() const = 0; /// Methods to get path components of a data part. virtual std::string getFullPath() const = 0; /// '/var/lib/clickhouse/data/database/table/moving/all_1_5_1' @@ -81,7 +81,8 @@ public: /// virtual std::string getRelativeRootPath() const = 0; /// Get a storage for projection. - virtual std::shared_ptr getProjection(const std::string & name) const = 0; + virtual std::shared_ptr getProjection(const std::string & name) const = 0; + virtual std::shared_ptr getProjection(const std::string & name) = 0; /// Part directory exists. virtual bool exists() const = 0; @@ -155,8 +156,8 @@ public: /// Reserve space on the same disk. /// Probably we should try to remove it later. - virtual ReservationPtr reserve(UInt64 /*bytes*/) const { return nullptr; } - virtual ReservationPtr tryReserve(UInt64 /*bytes*/) const { return nullptr; } + virtual ReservationPtr reserve(UInt64 /*bytes*/) { return nullptr; } + virtual ReservationPtr tryReserve(UInt64 /*bytes*/) { return nullptr; } virtual size_t getVolumeIndex(const IStoragePolicy &) const { return 0; } /// Some methods which change data part internals possibly after creation. @@ -205,7 +206,7 @@ public: const NameSet & files_to_copy_instead_of_hardlinks) const = 0; /// Make a full copy of a data part into 'to/dir_path' (possibly to a different disk). - virtual std::shared_ptr clone( + virtual std::shared_ptr clonePart( const std::string & to, const std::string & dir_path, const DiskPtr & disk, @@ -215,29 +216,6 @@ public: /// Right now, this is needed for rename table query. virtual void changeRootPath(const std::string & from_root, const std::string & to_root) = 0; - /// Leak of abstraction as well. We should use builder as one-time object which allow - /// us to build parts, while storage should be read-only method to access part properties - /// related to disk. However our code is really tricky and sometimes we need ad-hoc builders. - virtual DataPartStorageBuilderPtr getBuilder() const = 0; -}; - -using DataPartStoragePtr = std::shared_ptr; - -/// This interface is needed to write data part. -class IDataPartStorageBuilder -{ -public: - virtual ~IDataPartStorageBuilder() = default; - - /// Reset part directory, used for im-memory parts - virtual void setRelativePath(const std::string & path) = 0; - - virtual std::string getPartDirectory() const = 0; - virtual std::string getFullPath() const = 0; - virtual std::string getRelativePath() const = 0; - - virtual bool exists() const = 0; - virtual void createDirectories() = 0; virtual void createProjection(const std::string & name) = 0; @@ -250,13 +228,7 @@ public: virtual SyncGuardPtr getDirectorySyncGuard() const { return nullptr; } - virtual void createHardLinkFrom(const IDataPartStorage & source, const std::string & from, const std::string & to) const = 0; - - virtual ReservationPtr reserve(UInt64 /*bytes*/) { return nullptr; } - - virtual std::shared_ptr getProjection(const std::string & name) const = 0; - - virtual DataPartStoragePtr getStorage() const = 0; + virtual void createHardLinkFrom(const IDataPartStorage & source, const std::string & from, const std::string & to) = 0; /// Rename part. /// Ideally, new_root_path should be the same as current root (but it is not true). @@ -271,7 +243,12 @@ public: bool remove_new_dir_if_exists, bool fsync_part_dir) = 0; - virtual void commit() = 0; + virtual void beginTransaction() = 0; + virtual void commitTransaction() = 0; + virtual bool hasActiveTransaction() const = 0; }; +using DataPartStoragePtr = std::shared_ptr; +using MutableDataPartStoragePtr = std::shared_ptr; + } diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index cc9a14162f8..6a641f0c94e 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -101,7 +101,7 @@ void IMergeTreeDataPart::MinMaxIndex::load(const MergeTreeData & data, const Par } IMergeTreeDataPart::MinMaxIndex::WrittenFiles IMergeTreeDataPart::MinMaxIndex::store( - const MergeTreeData & data, const DataPartStorageBuilderPtr & data_part_storage_builder, Checksums & out_checksums) const + const MergeTreeData & data, const MutableDataPartStoragePtr & part_storage, Checksums & out_checksums) const { auto metadata_snapshot = data.getInMemoryMetadataPtr(); const auto & partition_key = metadata_snapshot->getPartitionKey(); @@ -109,20 +109,20 @@ IMergeTreeDataPart::MinMaxIndex::WrittenFiles IMergeTreeDataPart::MinMaxIndex::s auto minmax_column_names = data.getMinMaxColumnsNames(partition_key); auto minmax_column_types = data.getMinMaxColumnsTypes(partition_key); - return store(minmax_column_names, minmax_column_types, data_part_storage_builder, out_checksums); + return store(minmax_column_names, minmax_column_types, part_storage, out_checksums); } IMergeTreeDataPart::MinMaxIndex::WrittenFiles IMergeTreeDataPart::MinMaxIndex::store( const Names & column_names, const DataTypes & data_types, - const DataPartStorageBuilderPtr & data_part_storage_builder, + const MutableDataPartStoragePtr & part_storage, Checksums & out_checksums) const { if (!initialized) throw Exception( ErrorCodes::LOGICAL_ERROR, "Attempt to store uninitialized MinMax index for part {}. This is a bug", - data_part_storage_builder->getFullPath()); + part_storage->getFullPath()); WrittenFiles written_files; @@ -131,7 +131,7 @@ IMergeTreeDataPart::MinMaxIndex::WrittenFiles IMergeTreeDataPart::MinMaxIndex::s String file_name = "minmax_" + escapeForFileName(column_names[i]) + ".idx"; auto serialization = data_types.at(i)->getDefaultSerialization(); - auto out = data_part_storage_builder->writeFile(file_name, DBMS_DEFAULT_BUFFER_SIZE, {}); + auto out = part_storage->writeFile(file_name, DBMS_DEFAULT_BUFFER_SIZE, {}); HashingWriteBuffer out_hashing(*out); serialization->serializeBinary(hyperrectangle[i].left, out_hashing); serialization->serializeBinary(hyperrectangle[i].right, out_hashing); @@ -301,7 +301,7 @@ static void decrementTypeMetric(MergeTreeDataPartType type) IMergeTreeDataPart::IMergeTreeDataPart( const MergeTreeData & storage_, const String & name_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, Type part_type_, const IMergeTreeDataPart * parent_part_) : storage(storage_) @@ -315,6 +315,7 @@ IMergeTreeDataPart::IMergeTreeDataPart( { if (parent_part) state = MergeTreeDataPartState::Active; + incrementStateMetric(state); incrementTypeMetric(part_type); @@ -328,7 +329,7 @@ IMergeTreeDataPart::IMergeTreeDataPart( const MergeTreeData & storage_, const String & name_, const MergeTreePartInfo & info_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, Type part_type_, const IMergeTreeDataPart * parent_part_) : storage(storage_) @@ -1365,7 +1366,7 @@ bool IMergeTreeDataPart::shallParticipateInMerges(const StoragePolicyPtr & stora return data_part_storage->shallParticipateInMerges(*storage_policy); } -void IMergeTreeDataPart::renameTo(const String & new_relative_path, bool remove_new_dir_if_exists, DataPartStorageBuilderPtr builder) const +void IMergeTreeDataPart::renameTo(const String & new_relative_path, bool remove_new_dir_if_exists) const try { assertOnDisk(); @@ -1384,14 +1385,12 @@ try metadata_manager->deleteAll(true); metadata_manager->assertAllDeleted(true); - builder->rename(to.parent_path(), to.filename(), storage.log, remove_new_dir_if_exists, fsync_dir); + data_part_storage->rename(to.parent_path(), to.filename(), storage.log, remove_new_dir_if_exists, fsync_dir); data_part_storage->onRename(to.parent_path(), to.filename()); metadata_manager->updateAll(true); for (const auto & [p_name, part] : projection_parts) - { part->data_part_storage = data_part_storage->getProjection(p_name + ".proj"); - } } catch (...) { @@ -1507,11 +1506,11 @@ std::optional IMergeTreeDataPart::getRelativePathForDetachedPart(const S return {}; } -void IMergeTreeDataPart::renameToDetached(const String & prefix, DataPartStorageBuilderPtr builder) const +void IMergeTreeDataPart::renameToDetached(const String & prefix) const { auto path_to_detach = getRelativePathForDetachedPart(prefix, /* broken */ false); assert(path_to_detach); - renameTo(path_to_detach.value(), true, builder); + renameTo(path_to_detach.value(), true); part_is_probably_removed_from_disk = true; } @@ -1539,7 +1538,7 @@ void IMergeTreeDataPart::makeCloneInDetached(const String & prefix, const Storag {}); } -DataPartStoragePtr IMergeTreeDataPart::makeCloneOnDisk(const DiskPtr & disk, const String & directory_name) const +MutableDataPartStoragePtr IMergeTreeDataPart::makeCloneOnDisk(const DiskPtr & disk, const String & directory_name) const { assertOnDisk(); @@ -1549,7 +1548,7 @@ DataPartStoragePtr IMergeTreeDataPart::makeCloneOnDisk(const DiskPtr & disk, con throw Exception("Can not clone data part " + name + " to empty directory.", ErrorCodes::LOGICAL_ERROR); String path_to_clone = fs::path(storage.relative_data_path) / directory_name / ""; - return data_part_storage->clone(path_to_clone, data_part_storage->getPartDirectory(), disk, storage.log); + return data_part_storage->clonePart(path_to_clone, data_part_storage->getPartDirectory(), disk, storage.log); } void IMergeTreeDataPart::checkConsistencyBase() const diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index 6f034574fb4..0fe94b666b6 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -67,19 +67,18 @@ public: using uint128 = IPartMetadataManager::uint128; - IMergeTreeDataPart( const MergeTreeData & storage_, const String & name_, const MergeTreePartInfo & info_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, Type part_type_, const IMergeTreeDataPart * parent_part_); IMergeTreeDataPart( const MergeTreeData & storage_, const String & name_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, Type part_type_, const IMergeTreeDataPart * parent_part_); @@ -94,13 +93,12 @@ public: const ReadBufferFromFileBase::ProfileCallback & profile_callback_) const = 0; virtual MergeTreeWriterPtr getWriter( - DataPartStorageBuilderPtr data_part_storage_builder, const NamesAndTypesList & columns_list, const StorageMetadataPtr & metadata_snapshot, const std::vector & indices_to_recalc, const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & writer_settings, - const MergeTreeIndexGranularity & computed_index_granularity) const = 0; + const MergeTreeIndexGranularity & computed_index_granularity) = 0; virtual bool isStoredOnDisk() const = 0; @@ -202,7 +200,7 @@ public: /// This is an object which encapsulates all the operations with disk. /// Contains a path to stored data. - DataPartStoragePtr data_part_storage; + MutableDataPartStoragePtr data_part_storage; MergeTreeIndexGranularityInfo index_granularity_info; @@ -289,8 +287,8 @@ public: using WrittenFiles = std::vector>; - [[nodiscard]] WrittenFiles store(const MergeTreeData & data, const DataPartStorageBuilderPtr & data_part_storage_builder, Checksums & checksums) const; - [[nodiscard]] WrittenFiles store(const Names & column_names, const DataTypes & data_types, const DataPartStorageBuilderPtr & data_part_storage_builder, Checksums & checksums) const; + [[nodiscard]] WrittenFiles store(const MergeTreeData & data, const MutableDataPartStoragePtr & part_storage, Checksums & checksums) const; + [[nodiscard]] WrittenFiles store(const Names & column_names, const DataTypes & data_types, const MutableDataPartStoragePtr & part_storage, Checksums & checksums) const; void update(const Block & block, const Names & column_names); void merge(const MinMaxIndex & other); @@ -321,17 +319,17 @@ public: size_t getFileSizeOrZero(const String & file_name) const; /// Moves a part to detached/ directory and adds prefix to its name - void renameToDetached(const String & prefix, DataPartStorageBuilderPtr builder) const; + void renameToDetached(const String & prefix) const; /// Makes checks and move part to new directory /// Changes only relative_dir_name, you need to update other metadata (name, is_temp) explicitly - virtual void renameTo(const String & new_relative_path, bool remove_new_dir_if_exists, DataPartStorageBuilderPtr builder) const; + virtual void renameTo(const String & new_relative_path, bool remove_new_dir_if_exists) const; /// Makes clone of a part in detached/ directory via hard links virtual void makeCloneInDetached(const String & prefix, const StorageMetadataPtr & metadata_snapshot) const; /// Makes full clone of part in specified subdirectory (relative to storage data directory, e.g. "detached") on another disk - DataPartStoragePtr makeCloneOnDisk(const DiskPtr & disk, const String & directory_name) const; + MutableDataPartStoragePtr makeCloneOnDisk(const DiskPtr & disk, const String & directory_name) const; /// Checks that .bin and .mrk files exist. /// diff --git a/src/Storages/MergeTree/IMergeTreeDataPartInfoForReader.h b/src/Storages/MergeTree/IMergeTreeDataPartInfoForReader.h index 28f834d661d..2e4972c2788 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPartInfoForReader.h +++ b/src/Storages/MergeTree/IMergeTreeDataPartInfoForReader.h @@ -7,7 +7,8 @@ namespace DB { class IDataPartStorage; -using DataPartStoragePtr = std::shared_ptr; +using DataPartStoragePtr = std::shared_ptr; + class MergeTreeIndexGranularity; struct MergeTreeDataPartChecksums; struct MergeTreeIndexGranularityInfo; @@ -36,7 +37,7 @@ public: virtual bool isProjectionPart() const = 0; - virtual const DataPartStoragePtr & getDataPartStorage() const = 0; + virtual DataPartStoragePtr getDataPartStorage() const = 0; virtual const NamesAndTypesList & getColumns() const = 0; diff --git a/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp b/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp index 84d0b50ae2f..2488c63e309 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp @@ -38,14 +38,12 @@ Block permuteBlockIfNeeded(const Block & block, const IColumn::Permutation * per } IMergeTreeDataPartWriter::IMergeTreeDataPartWriter( - const MergeTreeData::DataPartPtr & data_part_, - DataPartStorageBuilderPtr data_part_storage_builder_, + const MergeTreeMutableDataPartPtr & data_part_, const NamesAndTypesList & columns_list_, const StorageMetadataPtr & metadata_snapshot_, const MergeTreeWriterSettings & settings_, const MergeTreeIndexGranularity & index_granularity_) : data_part(data_part_) - , data_part_storage_builder(std::move(data_part_storage_builder_)) , storage(data_part_->storage) , metadata_snapshot(metadata_snapshot_) , columns_list(columns_list_) diff --git a/src/Storages/MergeTree/IMergeTreeDataPartWriter.h b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h index 417e2713180..fa3c675f7da 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPartWriter.h +++ b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h @@ -22,8 +22,7 @@ class IMergeTreeDataPartWriter : private boost::noncopyable { public: IMergeTreeDataPartWriter( - const MergeTreeData::DataPartPtr & data_part_, - DataPartStorageBuilderPtr data_part_storage_builder_, + const MergeTreeMutableDataPartPtr & data_part_, const NamesAndTypesList & columns_list_, const StorageMetadataPtr & metadata_snapshot_, const MergeTreeWriterSettings & settings_, @@ -42,8 +41,7 @@ public: protected: - const MergeTreeData::DataPartPtr data_part; - DataPartStorageBuilderPtr data_part_storage_builder; + const MergeTreeMutableDataPartPtr data_part; const MergeTreeData & storage; const StorageMetadataPtr metadata_snapshot; const NamesAndTypesList columns_list; diff --git a/src/Storages/MergeTree/IMergedBlockOutputStream.cpp b/src/Storages/MergeTree/IMergedBlockOutputStream.cpp index 5af9bbd3ed8..54f393a65a2 100644 --- a/src/Storages/MergeTree/IMergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/IMergedBlockOutputStream.cpp @@ -6,14 +6,13 @@ namespace DB { IMergedBlockOutputStream::IMergedBlockOutputStream( - DataPartStorageBuilderPtr data_part_storage_builder_, - const MergeTreeDataPartPtr & data_part, + const MergeTreeMutableDataPartPtr & data_part, const StorageMetadataPtr & metadata_snapshot_, const NamesAndTypesList & columns_list, bool reset_columns_) : storage(data_part->storage) , metadata_snapshot(metadata_snapshot_) - , data_part_storage_builder(std::move(data_part_storage_builder_)) + , data_part_storage(data_part->data_part_storage) , reset_columns(reset_columns_) { if (reset_columns) diff --git a/src/Storages/MergeTree/IMergedBlockOutputStream.h b/src/Storages/MergeTree/IMergedBlockOutputStream.h index dbcca1443b5..ca4e3899b29 100644 --- a/src/Storages/MergeTree/IMergedBlockOutputStream.h +++ b/src/Storages/MergeTree/IMergedBlockOutputStream.h @@ -1,5 +1,6 @@ #pragma once +#include "Storages/MergeTree/IDataPartStorage.h" #include #include #include @@ -12,8 +13,7 @@ class IMergedBlockOutputStream { public: IMergedBlockOutputStream( - DataPartStorageBuilderPtr data_part_storage_builder_, - const MergeTreeDataPartPtr & data_part, + const MergeTreeMutableDataPartPtr & data_part, const StorageMetadataPtr & metadata_snapshot_, const NamesAndTypesList & columns_list, bool reset_columns_); @@ -42,7 +42,7 @@ protected: const MergeTreeData & storage; StorageMetadataPtr metadata_snapshot; - DataPartStorageBuilderPtr data_part_storage_builder; + MutableDataPartStoragePtr data_part_storage; IMergeTreeDataPart::MergeTreeWriterPtr writer; bool reset_columns = false; diff --git a/src/Storages/MergeTree/LoadedMergeTreeDataPartInfoForReader.h b/src/Storages/MergeTree/LoadedMergeTreeDataPartInfoForReader.h index a16aaa728ae..ee265ee6fb1 100644 --- a/src/Storages/MergeTree/LoadedMergeTreeDataPartInfoForReader.h +++ b/src/Storages/MergeTree/LoadedMergeTreeDataPartInfoForReader.h @@ -12,7 +12,8 @@ public: explicit LoadedMergeTreeDataPartInfoForReader(MergeTreeData::DataPartPtr data_part_) : IMergeTreeDataPartInfoForReader(data_part_->storage.getContext()) , data_part(data_part_) - {} + { + } bool isCompactPart() const override { return DB::isCompactPart(data_part); } @@ -22,7 +23,7 @@ public: bool isProjectionPart() const override { return data_part->isProjectionPart(); } - const DataPartStoragePtr & getDataPartStorage() const override { return data_part->data_part_storage; } + DataPartStoragePtr getDataPartStorage() const override { return data_part->data_part_storage; } const NamesAndTypesList & getColumns() const override { return data_part->getColumns(); } diff --git a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp index 18982c3bbf4..182d5df4960 100644 --- a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp @@ -294,12 +294,10 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare() bool MergeFromLogEntryTask::finalize(ReplicatedMergeMutateTaskBase::PartLogWriter write_part_log) { part = merge_task->getFuture().get(); - auto builder = merge_task->getBuilder(); /// Task is not needed merge_task.reset(); - - storage.merger_mutator.renameMergedTemporaryPart(part, parts, NO_TRANSACTION_PTR, *transaction_ptr, builder); + storage.merger_mutator.renameMergedTemporaryPart(part, parts, NO_TRANSACTION_PTR, *transaction_ptr); try { diff --git a/src/Storages/MergeTree/MergePlainMergeTreeTask.cpp b/src/Storages/MergeTree/MergePlainMergeTreeTask.cpp index 0dcdd927e7b..cc5e87956a1 100644 --- a/src/Storages/MergeTree/MergePlainMergeTreeTask.cpp +++ b/src/Storages/MergeTree/MergePlainMergeTreeTask.cpp @@ -115,10 +115,9 @@ void MergePlainMergeTreeTask::prepare() void MergePlainMergeTreeTask::finish() { new_part = merge_task->getFuture().get(); - auto builder = merge_task->getBuilder(); MergeTreeData::Transaction transaction(storage, txn.get()); - storage.merger_mutator.renameMergedTemporaryPart(new_part, future_part->parts, txn, transaction, builder); + storage.merger_mutator.renameMergedTemporaryPart(new_part, future_part->parts, txn, transaction); transaction.commit(); write_part_log({}); diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index c247d2d2476..130d156e53c 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -1,3 +1,4 @@ +#include "Storages/MergeTree/IDataPartStorage.h" #include #include @@ -125,23 +126,26 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() ctx->disk = global_ctx->space_reservation->getDisk(); String local_tmp_part_basename = local_tmp_prefix + global_ctx->future_part->name + local_tmp_suffix; + MutableDataPartStoragePtr data_part_storage; - if (global_ctx->parent_path_storage_builder) + if (global_ctx->parent_part) { - global_ctx->data_part_storage_builder = global_ctx->parent_path_storage_builder->getProjection(local_tmp_part_basename); + data_part_storage = global_ctx->parent_part->data_part_storage->getProjection(local_tmp_part_basename); } else { auto local_single_disk_volume = std::make_shared("volume_" + global_ctx->future_part->name, ctx->disk, 0); - global_ctx->data_part_storage_builder = std::make_shared( + data_part_storage = std::make_shared( local_single_disk_volume, global_ctx->data->relative_data_path, local_tmp_part_basename); } - if (global_ctx->data_part_storage_builder->exists()) - throw Exception("Directory " + global_ctx->data_part_storage_builder->getFullPath() + " already exists", ErrorCodes::DIRECTORY_ALREADY_EXISTS); + data_part_storage->beginTransaction(); + + if (data_part_storage->exists()) + throw Exception("Directory " + data_part_storage->getFullPath() + " already exists", ErrorCodes::DIRECTORY_ALREADY_EXISTS); if (!global_ctx->parent_part) global_ctx->temporary_directory_lock = global_ctx->data->getTemporaryPartDirectoryHolder(local_tmp_part_basename); @@ -163,8 +167,6 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() global_ctx->merging_columns, global_ctx->merging_column_names); - auto data_part_storage = global_ctx->data_part_storage_builder->getStorage(); - global_ctx->new_data_part = global_ctx->data->createPart( global_ctx->future_part->name, global_ctx->future_part->type, @@ -302,7 +304,6 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() global_ctx->to = std::make_shared( global_ctx->new_data_part, - global_ctx->data_part_storage_builder, global_ctx->metadata_snapshot, global_ctx->merging_columns, MergeTreeIndexFactory::instance().getMany(global_ctx->metadata_snapshot->getSecondaryIndices()), @@ -501,7 +502,6 @@ void MergeTask::VerticalMergeStage::prepareVerticalMergeForOneColumn() const ctx->executor = std::make_unique(ctx->column_parts_pipeline); ctx->column_to = std::make_unique( - global_ctx->data_part_storage_builder, global_ctx->new_data_part, global_ctx->metadata_snapshot, ctx->executor->getHeader(), @@ -654,7 +654,6 @@ bool MergeTask::MergeProjectionsStage::mergeMinMaxIndexAndPrepareProjections() c global_ctx->deduplicate_by_columns, projection_merging_params, global_ctx->new_data_part.get(), - global_ctx->data_part_storage_builder.get(), ".proj", NO_TRANSACTION_PTR, global_ctx->data, diff --git a/src/Storages/MergeTree/MergeTask.h b/src/Storages/MergeTree/MergeTask.h index 43aba602052..07d46460423 100644 --- a/src/Storages/MergeTree/MergeTask.h +++ b/src/Storages/MergeTree/MergeTask.h @@ -60,7 +60,6 @@ public: Names deduplicate_by_columns_, MergeTreeData::MergingParams merging_params_, const IMergeTreeDataPart * parent_part_, - const IDataPartStorageBuilder * parent_path_storage_builder_, String suffix_, MergeTreeTransactionPtr txn, MergeTreeData * data_, @@ -82,7 +81,6 @@ public: global_ctx->deduplicate = std::move(deduplicate_); global_ctx->deduplicate_by_columns = std::move(deduplicate_by_columns_); global_ctx->parent_part = std::move(parent_part_); - global_ctx->parent_path_storage_builder = std::move(parent_path_storage_builder_); global_ctx->data = std::move(data_); global_ctx->mutator = std::move(mutator_); global_ctx->merges_blocker = std::move(merges_blocker_); @@ -102,11 +100,6 @@ public: return global_ctx->promise.get_future(); } - DataPartStorageBuilderPtr getBuilder() - { - return global_ctx->data_part_storage_builder; - } - bool execute(); private: @@ -142,7 +135,6 @@ private: FutureMergedMutatedPartPtr future_part{nullptr}; /// This will be either nullptr or new_data_part, so raw pointer is ok. const IMergeTreeDataPart * parent_part{nullptr}; - const IDataPartStorageBuilder * parent_path_storage_builder{nullptr}; ContextPtr context{nullptr}; time_t time_of_merge{0}; ReservationSharedPtr space_reservation{nullptr}; @@ -168,7 +160,6 @@ private: std::unique_ptr merging_executor; MergeTreeData::MutableDataPartPtr new_data_part{nullptr}; - DataPartStorageBuilderPtr data_part_storage_builder; /// If lightweight delete mask is present then some input rows are filtered out right after reading. std::shared_ptr> input_rows_filtered{std::make_shared>(0)}; diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 66950734d5f..9e9d90b6b1e 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1082,7 +1082,6 @@ void MergeTreeData::loadDataPartsFromDisk( if (size_of_part.has_value()) part_size_str = formatReadableSizeWithBinarySuffix(*size_of_part); - LOG_ERROR(log, "Detaching broken part {}{} (size: {}). " "If it happened after update, it is likely because of backward incompatibility. " @@ -1397,11 +1396,7 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks) } for (auto & part : broken_parts_to_detach) - { - auto builder = part->data_part_storage->getBuilder(); - part->renameToDetached("broken-on-start", builder); /// detached parts must not have '_' in prefixes - builder->commit(); - } + part->renameToDetached("broken-on-start"); /// detached parts must not have '_' in prefixes for (auto & part : duplicate_parts_to_remove) part->remove(); @@ -2726,7 +2721,7 @@ MergeTreeDataPartType MergeTreeData::choosePartTypeOnDisk(size_t bytes_uncompres MergeTreeData::MutableDataPartPtr MergeTreeData::createPart(const String & name, MergeTreeDataPartType type, const MergeTreePartInfo & part_info, - const DataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part) const + const MutableDataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part) const { if (type == MergeTreeDataPartType::Compact) return std::make_shared(*this, name, part_info, data_part_storage, parent_part); @@ -2739,14 +2734,14 @@ MergeTreeData::MutableDataPartPtr MergeTreeData::createPart(const String & name, } MergeTreeData::MutableDataPartPtr MergeTreeData::createPart( - const String & name, const DataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part) const + const String & name, const MutableDataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part) const { return createPart(name, MergeTreePartInfo::fromPartName(name, format_version), data_part_storage, parent_part); } MergeTreeData::MutableDataPartPtr MergeTreeData::createPart( const String & name, const MergeTreePartInfo & part_info, - const DataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part) const + const MutableDataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part) const { MergeTreeDataPartType type; auto mrk_ext = MergeTreeIndexGranularityInfo::getMarksExtensionFromFilesystem(data_part_storage); @@ -2943,12 +2938,11 @@ MergeTreeData::DataPartsVector MergeTreeData::getActivePartsToReplace( bool MergeTreeData::renameTempPartAndAdd( MutableDataPartPtr & part, Transaction & out_transaction, - DataPartStorageBuilderPtr builder, DataPartsLock & lock) { DataPartsVector covered_parts; - if (!renameTempPartAndReplaceImpl(part, out_transaction, lock, builder, &covered_parts)) + if (!renameTempPartAndReplaceImpl(part, out_transaction, lock, &covered_parts)) return false; if (!covered_parts.empty()) @@ -2982,7 +2976,7 @@ void MergeTreeData::checkPartCanBeAddedToTable(MutableDataPartPtr & part, DataPa } } -void MergeTreeData::preparePartForCommit(MutableDataPartPtr & part, Transaction & out_transaction, DataPartStorageBuilderPtr builder) +void MergeTreeData::preparePartForCommit(MutableDataPartPtr & part, Transaction & out_transaction) { part->is_temp = false; part->setState(DataPartState::PreActive); @@ -2994,17 +2988,16 @@ void MergeTreeData::preparePartForCommit(MutableDataPartPtr & part, Transaction return !may_be_cleaned_up || temporary_parts.contains(dir_name); }()); - part->renameTo(part->name, true, builder); + part->renameTo(part->name, true); data_parts_indexes.insert(part); - out_transaction.addPart(part, builder); + out_transaction.addPart(part); } bool MergeTreeData::renameTempPartAndReplaceImpl( MutableDataPartPtr & part, Transaction & out_transaction, DataPartsLock & lock, - DataPartStorageBuilderPtr builder, DataPartsVector * out_covered_parts) { LOG_TRACE(log, "Renaming temporary part {} to {}.", part->data_part_storage->getPartDirectory(), part->name); @@ -3029,7 +3022,7 @@ bool MergeTreeData::renameTempPartAndReplaceImpl( /// All checks are passed. Now we can rename the part on disk. /// So, we maintain invariant: if a non-temporary part in filesystem then it is in data_parts - preparePartForCommit(part, out_transaction, builder); + preparePartForCommit(part, out_transaction); if (out_covered_parts) { @@ -3045,21 +3038,19 @@ bool MergeTreeData::renameTempPartAndReplaceImpl( MergeTreeData::DataPartsVector MergeTreeData::renameTempPartAndReplaceUnlocked( MutableDataPartPtr & part, Transaction & out_transaction, - DataPartStorageBuilderPtr builder, DataPartsLock & lock) { DataPartsVector covered_parts; - renameTempPartAndReplaceImpl(part, out_transaction, lock, builder, &covered_parts); + renameTempPartAndReplaceImpl(part, out_transaction, lock, &covered_parts); return covered_parts; } MergeTreeData::DataPartsVector MergeTreeData::renameTempPartAndReplace( MutableDataPartPtr & part, - Transaction & out_transaction, - DataPartStorageBuilderPtr builder) + Transaction & out_transaction) { auto part_lock = lockParts(); - return renameTempPartAndReplaceUnlocked(part, out_transaction, builder, part_lock); + return renameTempPartAndReplaceUnlocked(part, out_transaction, part_lock); } void MergeTreeData::removePartsFromWorkingSet(MergeTreeTransaction * txn, const MergeTreeData::DataPartsVector & remove, bool clear_without_timeout, DataPartsLock & acquired_lock) @@ -3280,9 +3271,7 @@ void MergeTreeData::forcefullyMovePartToDetachedAndRemoveFromMemory(const MergeT modifyPartState(it_part, DataPartState::Deleting); - auto builder = part->data_part_storage->getBuilder(); - part->renameToDetached(prefix, builder); - builder->commit(); + part->renameToDetached(prefix); data_parts_indexes.erase(it_part); @@ -4911,19 +4900,13 @@ ReservationPtr MergeTreeData::reserveSpace(UInt64 expected_size, SpacePtr space) return checkAndReturnReservation(expected_size, std::move(reservation)); } -ReservationPtr MergeTreeData::reserveSpace(UInt64 expected_size, const DataPartStoragePtr & data_part_storage) +ReservationPtr MergeTreeData::reserveSpace(UInt64 expected_size, const MutableDataPartStoragePtr & data_part_storage) { expected_size = std::max(RESERVATION_MIN_ESTIMATION_SIZE, expected_size); return data_part_storage->reserve(expected_size); } -ReservationPtr MergeTreeData::reserveSpace(UInt64 expected_size, const DataPartStorageBuilderPtr & data_part_storage_builder) -{ - expected_size = std::max(RESERVATION_MIN_ESTIMATION_SIZE, expected_size); - return data_part_storage_builder->reserve(expected_size); -} - -ReservationPtr MergeTreeData::tryReserveSpace(UInt64 expected_size, const DataPartStoragePtr & data_part_storage) +ReservationPtr MergeTreeData::tryReserveSpace(UInt64 expected_size, const MutableDataPartStoragePtr & data_part_storage) { expected_size = std::max(RESERVATION_MIN_ESTIMATION_SIZE, expected_size); return data_part_storage->tryReserve(expected_size); @@ -5162,12 +5145,11 @@ void MergeTreeData::Transaction::rollbackPartsToTemporaryState() clear(); } -void MergeTreeData::Transaction::addPart(MutableDataPartPtr & part, DataPartStorageBuilderPtr builder) +void MergeTreeData::Transaction::addPart(MutableDataPartPtr & part) { precommitted_parts.insert(part); if (asInMemoryPart(part)) has_in_memory_parts = true; - part_builders.push_back(builder); } void MergeTreeData::Transaction::rollback() @@ -5205,8 +5187,9 @@ MergeTreeData::DataPartsVector MergeTreeData::Transaction::commit(MergeTreeData: auto parts_lock = acquired_parts_lock ? MergeTreeData::DataPartsLock() : data.lockParts(); auto * owing_parts_lock = acquired_parts_lock ? acquired_parts_lock : &parts_lock; - for (auto & builder : part_builders) - builder->commit(); + for (const auto & part : precommitted_parts) + if (part->data_part_storage->hasActiveTransaction()) + part->data_part_storage->commitTransaction(); bool commit_to_wal = has_in_memory_parts && settings->in_memory_parts_enable_wal; if (txn || commit_to_wal) @@ -5215,7 +5198,7 @@ MergeTreeData::DataPartsVector MergeTreeData::Transaction::commit(MergeTreeData: if (commit_to_wal) wal = data.getWriteAheadLog(); - for (const DataPartPtr & part : precommitted_parts) + for (const auto & part : precommitted_parts) { if (txn) { @@ -5240,7 +5223,7 @@ MergeTreeData::DataPartsVector MergeTreeData::Transaction::commit(MergeTreeData: size_t reduce_rows = 0; size_t reduce_parts = 0; - for (const DataPartPtr & part : precommitted_parts) + for (const auto & part : precommitted_parts) { DataPartPtr covering_part; DataPartsVector covered_parts = data.getActivePartsToReplace(part->info, part->name, covering_part, *owing_parts_lock); diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index c4a5d66ccbe..327718d15ed 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -225,15 +225,15 @@ public: /// After this method setColumns must be called MutableDataPartPtr createPart(const String & name, MergeTreeDataPartType type, const MergeTreePartInfo & part_info, - const DataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part = nullptr) const; + const MutableDataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part = nullptr) const; /// Create part, that already exists on filesystem. /// After this methods 'loadColumnsChecksumsIndexes' must be called. MutableDataPartPtr createPart(const String & name, - const DataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part = nullptr) const; + const MutableDataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part = nullptr) const; MutableDataPartPtr createPart(const String & name, const MergeTreePartInfo & part_info, - const DataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part = nullptr) const; + const MutableDataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part = nullptr) const; /// Auxiliary object to add a set of parts into the working set in two steps: /// * First, as PreActive parts (the parts are ready, but not yet in the active set). @@ -247,7 +247,7 @@ public: DataPartsVector commit(MergeTreeData::DataPartsLock * acquired_parts_lock = nullptr); - void addPart(MutableDataPartPtr & part, DataPartStorageBuilderPtr builder); + void addPart(MutableDataPartPtr & part); void rollback(); @@ -276,7 +276,6 @@ public: MergeTreeData & data; MergeTreeTransaction * txn; DataParts precommitted_parts; - std::vector part_builders; DataParts locked_parts; bool has_in_memory_parts = false; @@ -414,9 +413,8 @@ public: SelectQueryInfo & info) const override; ReservationPtr reserveSpace(UInt64 expected_size, VolumePtr & volume) const; - static ReservationPtr tryReserveSpace(UInt64 expected_size, const DataPartStoragePtr & data_part_storage); - static ReservationPtr reserveSpace(UInt64 expected_size, const DataPartStoragePtr & data_part_storage); - static ReservationPtr reserveSpace(UInt64 expected_size, const DataPartStorageBuilderPtr & data_part_storage_builder); + static ReservationPtr tryReserveSpace(UInt64 expected_size, const MutableDataPartStoragePtr & data_part_storage); + static ReservationPtr reserveSpace(UInt64 expected_size, const MutableDataPartStoragePtr & data_part_storage); static bool partsContainSameProjections(const DataPartPtr & left, const DataPartPtr & right); @@ -555,21 +553,18 @@ public: bool renameTempPartAndAdd( MutableDataPartPtr & part, Transaction & transaction, - DataPartStorageBuilderPtr builder, DataPartsLock & lock); /// The same as renameTempPartAndAdd but the block range of the part can contain existing parts. /// Returns all parts covered by the added part (in ascending order). DataPartsVector renameTempPartAndReplace( MutableDataPartPtr & part, - Transaction & out_transaction, - DataPartStorageBuilderPtr builder); + Transaction & out_transaction); /// Unlocked version of previous one. Useful when added multiple parts with a single lock. DataPartsVector renameTempPartAndReplaceUnlocked( MutableDataPartPtr & part, Transaction & out_transaction, - DataPartStorageBuilderPtr builder, DataPartsLock & lock); /// Remove parts from working set immediately (without wait for background @@ -979,7 +974,7 @@ public: /// Fetch part only if some replica has it on shared storage like S3 /// Overridden in StorageReplicatedMergeTree - virtual DataPartStoragePtr tryToFetchIfShared(const IMergeTreeDataPart &, const DiskPtr &, const String &) { return nullptr; } + virtual bool tryToFetchIfShared(const IMergeTreeDataPart &, const DiskPtr &, const String &) { return false; } /// Check shared data usage on other replicas for detached/freezed part /// Remove local files and remote files if needed @@ -1270,7 +1265,7 @@ private: /// Preparing itself to be committed in memory: fill some fields inside part, add it to data_parts_indexes /// in precommitted state and to transaction - void preparePartForCommit(MutableDataPartPtr & part, Transaction & out_transaction, DataPartStorageBuilderPtr builder); + void preparePartForCommit(MutableDataPartPtr & part, Transaction & out_transaction); /// Low-level method for preparing parts for commit (in-memory). /// FIXME Merge MergeTreeTransaction and Transaction @@ -1278,7 +1273,6 @@ private: MutableDataPartPtr & part, Transaction & out_transaction, DataPartsLock & lock, - DataPartStorageBuilderPtr builder, DataPartsVector * out_covered_parts); /// RAII Wrapper for atomic work with currently moving parts diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 1a5c94a2e26..c96003c8938 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -483,7 +483,6 @@ MergeTaskPtr MergeTreeDataMergerMutator::mergePartsToTemporaryPart( const MergeTreeData::MergingParams & merging_params, const MergeTreeTransactionPtr & txn, const IMergeTreeDataPart * parent_part, - const IDataPartStorageBuilder * parent_path_storage_builder, const String & suffix) { return std::make_shared( @@ -498,7 +497,6 @@ MergeTaskPtr MergeTreeDataMergerMutator::mergePartsToTemporaryPart( deduplicate_by_columns, merging_params, parent_part, - parent_path_storage_builder, suffix, txn, &data, @@ -540,8 +538,7 @@ MergeTreeData::DataPartPtr MergeTreeDataMergerMutator::renameMergedTemporaryPart MergeTreeData::MutableDataPartPtr & new_data_part, const MergeTreeData::DataPartsVector & parts, const MergeTreeTransactionPtr & txn, - MergeTreeData::Transaction & out_transaction, - DataPartStorageBuilderPtr builder) + MergeTreeData::Transaction & out_transaction) { /// Some of source parts was possibly created in transaction, so non-transactional merge may break isolation. if (data.transactions_enabled.load(std::memory_order_relaxed) && !txn) @@ -549,7 +546,7 @@ MergeTreeData::DataPartPtr MergeTreeDataMergerMutator::renameMergedTemporaryPart "but transactions were enabled for this table"); /// Rename new part, add to the set and remove original parts. - auto replaced_parts = data.renameTempPartAndReplace(new_data_part, out_transaction, builder); + auto replaced_parts = data.renameTempPartAndReplace(new_data_part, out_transaction); /// Let's check that all original parts have been deleted and only them. if (replaced_parts.size() != parts.size()) diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h index 14eb82c641c..d1b9d3c99e7 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h @@ -114,7 +114,6 @@ public: const MergeTreeData::MergingParams & merging_params, const MergeTreeTransactionPtr & txn, const IMergeTreeDataPart * parent_part = nullptr, - const IDataPartStorageBuilder * parent_path_storage_builder = nullptr, const String & suffix = ""); /// Mutate a single data part with the specified commands. Will create and return a temporary part. @@ -133,8 +132,7 @@ public: MergeTreeData::MutableDataPartPtr & new_data_part, const MergeTreeData::DataPartsVector & parts, const MergeTreeTransactionPtr & txn, - MergeTreeData::Transaction & out_transaction, - DataPartStorageBuilderPtr builder); + MergeTreeData::Transaction & out_transaction); /// The approximate amount of disk space needed for merge or mutation. With a surplus. diff --git a/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp index 9298e841072..4fedacee13c 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp @@ -22,7 +22,7 @@ namespace ErrorCodes MergeTreeDataPartCompact::MergeTreeDataPartCompact( MergeTreeData & storage_, const String & name_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, const IMergeTreeDataPart * parent_part_) : IMergeTreeDataPart(storage_, name_, data_part_storage_, Type::Compact, parent_part_) { @@ -32,7 +32,7 @@ MergeTreeDataPartCompact::MergeTreeDataPartCompact( const MergeTreeData & storage_, const String & name_, const MergeTreePartInfo & info_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, const IMergeTreeDataPart * parent_part_) : IMergeTreeDataPart(storage_, name_, info_, data_part_storage_, Type::Compact, parent_part_) { @@ -58,13 +58,12 @@ IMergeTreeDataPart::MergeTreeReaderPtr MergeTreeDataPartCompact::getReader( } IMergeTreeDataPart::MergeTreeWriterPtr MergeTreeDataPartCompact::getWriter( - DataPartStorageBuilderPtr data_part_storage_builder, const NamesAndTypesList & columns_list, const StorageMetadataPtr & metadata_snapshot, const std::vector & indices_to_recalc, const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & writer_settings, - const MergeTreeIndexGranularity & computed_index_granularity) const + const MergeTreeIndexGranularity & computed_index_granularity) { NamesAndTypesList ordered_columns_list; std::copy_if(columns_list.begin(), columns_list.end(), std::back_inserter(ordered_columns_list), @@ -75,7 +74,7 @@ IMergeTreeDataPart::MergeTreeWriterPtr MergeTreeDataPartCompact::getWriter( { return *getColumnPosition(lhs.name) < *getColumnPosition(rhs.name); }); return std::make_unique( - shared_from_this(), std::move(data_part_storage_builder), ordered_columns_list, metadata_snapshot, + shared_from_this(), ordered_columns_list, metadata_snapshot, indices_to_recalc, getMarksFileExtension(), default_codec_, writer_settings, computed_index_granularity); } diff --git a/src/Storages/MergeTree/MergeTreeDataPartCompact.h b/src/Storages/MergeTree/MergeTreeDataPartCompact.h index d3ac71cb02a..7c3fe012616 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartCompact.h +++ b/src/Storages/MergeTree/MergeTreeDataPartCompact.h @@ -25,13 +25,13 @@ public: const MergeTreeData & storage_, const String & name_, const MergeTreePartInfo & info_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, const IMergeTreeDataPart * parent_part_ = nullptr); MergeTreeDataPartCompact( MergeTreeData & storage_, const String & name_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, const IMergeTreeDataPart * parent_part_ = nullptr); MergeTreeReaderPtr getReader( @@ -45,13 +45,12 @@ public: const ReadBufferFromFileBase::ProfileCallback & profile_callback) const override; MergeTreeWriterPtr getWriter( - DataPartStorageBuilderPtr data_part_storage_builder, const NamesAndTypesList & columns_list, const StorageMetadataPtr & metadata_snapshot, const std::vector & indices_to_recalc, const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & writer_settings, - const MergeTreeIndexGranularity & computed_index_granularity) const override; + const MergeTreeIndexGranularity & computed_index_granularity) override; bool isStoredOnDisk() const override { return true; } diff --git a/src/Storages/MergeTree/MergeTreeDataPartInMemory.cpp b/src/Storages/MergeTree/MergeTreeDataPartInMemory.cpp index 7a3c5f11c81..c30efbc7969 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartInMemory.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartInMemory.cpp @@ -1,9 +1,10 @@ -#include "MergeTreeDataPartInMemory.h" +#include #include #include #include #include #include +#include #include #include #include @@ -21,7 +22,7 @@ namespace ErrorCodes MergeTreeDataPartInMemory::MergeTreeDataPartInMemory( MergeTreeData & storage_, const String & name_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, const IMergeTreeDataPart * parent_part_) : IMergeTreeDataPart(storage_, name_, data_part_storage_, Type::InMemory, parent_part_) { @@ -32,7 +33,7 @@ MergeTreeDataPartInMemory::MergeTreeDataPartInMemory( const MergeTreeData & storage_, const String & name_, const MergeTreePartInfo & info_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, const IMergeTreeDataPart * parent_part_) : IMergeTreeDataPart(storage_, name_, info_, data_part_storage_, Type::InMemory, parent_part_) { @@ -56,27 +57,27 @@ IMergeTreeDataPart::MergeTreeReaderPtr MergeTreeDataPartInMemory::getReader( } IMergeTreeDataPart::MergeTreeWriterPtr MergeTreeDataPartInMemory::getWriter( - DataPartStorageBuilderPtr data_part_storage_builder_, const NamesAndTypesList & columns_list, const StorageMetadataPtr & metadata_snapshot, const std::vector & /* indices_to_recalc */, const CompressionCodecPtr & /* default_codec */, const MergeTreeWriterSettings & writer_settings, - const MergeTreeIndexGranularity & /* computed_index_granularity */) const + const MergeTreeIndexGranularity & /* computed_index_granularity */) { - data_part_storage_builder = data_part_storage_builder_; - auto ptr = std::static_pointer_cast(shared_from_this()); + auto ptr = std::static_pointer_cast(shared_from_this()); return std::make_unique( ptr, columns_list, metadata_snapshot, writer_settings); } -DataPartStoragePtr MergeTreeDataPartInMemory::flushToDisk(const String & new_relative_path, const StorageMetadataPtr & metadata_snapshot) const +MutableDataPartStoragePtr MergeTreeDataPartInMemory::flushToDisk(const String & new_relative_path, const StorageMetadataPtr & metadata_snapshot) const { - auto current_full_path = data_part_storage_builder->getFullPath(); - data_part_storage_builder->setRelativePath(new_relative_path); + auto current_full_path = data_part_storage->getFullPath(); + auto new_data_part_storage = data_part_storage->clone(); + + new_data_part_storage->setRelativePath(new_relative_path); + new_data_part_storage->beginTransaction(); auto new_type = storage.choosePartTypeOnDisk(block.bytes(), rows_count); - auto new_data_part_storage = data_part_storage_builder->getStorage(); auto new_data_part = storage.createPart(name, new_type, info, new_data_part_storage); new_data_part->uuid = uuid; @@ -84,50 +85,50 @@ DataPartStoragePtr MergeTreeDataPartInMemory::flushToDisk(const String & new_rel new_data_part->partition.value = partition.value; new_data_part->minmax_idx = minmax_idx; - if (data_part_storage_builder->exists()) + if (new_data_part_storage->exists()) { throw Exception( ErrorCodes::DIRECTORY_ALREADY_EXISTS, "Could not flush part {}. Part in {} already exists", quoteString(current_full_path), - data_part_storage_builder->getFullPath()); + new_data_part_storage->getFullPath()); } - data_part_storage_builder->createDirectories(); + new_data_part_storage->createDirectories(); auto compression_codec = storage.getContext()->chooseCompressionCodec(0, 0); auto indices = MergeTreeIndexFactory::instance().getMany(metadata_snapshot->getSecondaryIndices()); - MergedBlockOutputStream out(new_data_part, data_part_storage_builder, metadata_snapshot, columns, indices, compression_codec, NO_TRANSACTION_PTR); + MergedBlockOutputStream out(new_data_part, metadata_snapshot, columns, indices, compression_codec, NO_TRANSACTION_PTR); out.write(block); const auto & projections = metadata_snapshot->getProjections(); for (const auto & [projection_name, projection] : projection_parts) { if (projections.has(projection_name)) { - auto projection_part_storage_builder = data_part_storage_builder->getProjection(projection_name + ".proj"); - if (projection_part_storage_builder->exists()) + auto projection_part_storage = new_data_part_storage->getProjection(projection_name + ".proj"); + if (projection_part_storage->exists()) { throw Exception( ErrorCodes::DIRECTORY_ALREADY_EXISTS, "Could not flush projection part {}. Projection part in {} already exists", projection_name, - projection_part_storage_builder->getFullPath()); + projection_part_storage->getFullPath()); } auto projection_part = asInMemoryPart(projection); auto projection_type = storage.choosePartTypeOnDisk(projection_part->block.bytes(), rows_count); MergeTreePartInfo projection_info("all", 0, 0, 0); auto projection_data_part - = storage.createPart(projection_name, projection_type, projection_info, projection_part_storage_builder->getStorage(), parent_part); + = storage.createPart(projection_name, projection_type, projection_info, projection_part_storage, parent_part); projection_data_part->is_temp = false; // clean up will be done on parent part projection_data_part->setColumns(projection->getColumns(), {}); - projection_part_storage_builder->createDirectories(); + projection_part_storage->createDirectories(); const auto & desc = projections.get(name); auto projection_compression_codec = storage.getContext()->chooseCompressionCodec(0, 0); auto projection_indices = MergeTreeIndexFactory::instance().getMany(desc.metadata->getSecondaryIndices()); MergedBlockOutputStream projection_out( - projection_data_part, projection_part_storage_builder, desc.metadata, projection_part->columns, projection_indices, + projection_data_part, desc.metadata, projection_part->columns, projection_indices, projection_compression_codec, NO_TRANSACTION_PTR); projection_out.write(projection_part->block); @@ -137,6 +138,7 @@ DataPartStoragePtr MergeTreeDataPartInMemory::flushToDisk(const String & new_rel } out.finalizePart(new_data_part, false); + new_data_part_storage->commitTransaction(); return new_data_part_storage; } @@ -146,12 +148,12 @@ void MergeTreeDataPartInMemory::makeCloneInDetached(const String & prefix, const flushToDisk(detached_path, metadata_snapshot); } -void MergeTreeDataPartInMemory::renameTo(const String & new_relative_path, bool /* remove_new_dir_if_exists */, DataPartStorageBuilderPtr) const +void MergeTreeDataPartInMemory::renameTo(const String & new_relative_path, bool /* remove_new_dir_if_exists */) const { data_part_storage->setRelativePath(new_relative_path); - if (data_part_storage_builder) - data_part_storage_builder->setRelativePath(new_relative_path); + if (data_part_storage) + data_part_storage->setRelativePath(new_relative_path); } void MergeTreeDataPartInMemory::calculateEachColumnSizes(ColumnSizeByName & each_columns_size, ColumnSize & total_size) const diff --git a/src/Storages/MergeTree/MergeTreeDataPartInMemory.h b/src/Storages/MergeTree/MergeTreeDataPartInMemory.h index d985c7f055e..49bc5eff1ea 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartInMemory.h +++ b/src/Storages/MergeTree/MergeTreeDataPartInMemory.h @@ -14,13 +14,13 @@ public: const MergeTreeData & storage_, const String & name_, const MergeTreePartInfo & info_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, const IMergeTreeDataPart * parent_part_ = nullptr); MergeTreeDataPartInMemory( MergeTreeData & storage_, const String & name_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, const IMergeTreeDataPart * parent_part_ = nullptr); MergeTreeReaderPtr getReader( @@ -34,29 +34,27 @@ public: const ReadBufferFromFileBase::ProfileCallback & profile_callback) const override; MergeTreeWriterPtr getWriter( - DataPartStorageBuilderPtr data_part_storage_builder_, const NamesAndTypesList & columns_list, const StorageMetadataPtr & metadata_snapshot, const std::vector & indices_to_recalc, const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & writer_settings, - const MergeTreeIndexGranularity & computed_index_granularity) const override; + const MergeTreeIndexGranularity & computed_index_granularity) override; bool isStoredOnDisk() const override { return false; } bool isStoredOnRemoteDisk() const override { return false; } bool isStoredOnRemoteDiskWithZeroCopySupport() const override { return false; } bool hasColumnFiles(const NameAndTypePair & column) const override { return !!getColumnPosition(column.getNameInStorage()); } String getFileNameForColumn(const NameAndTypePair & /* column */) const override { return ""; } - void renameTo(const String & new_relative_path, bool remove_new_dir_if_exists, DataPartStorageBuilderPtr) const override; + void renameTo(const String & new_relative_path, bool remove_new_dir_if_exists) const override; void makeCloneInDetached(const String & prefix, const StorageMetadataPtr & metadata_snapshot) const override; - DataPartStoragePtr flushToDisk(const String & new_relative_path, const StorageMetadataPtr & metadata_snapshot) const; + MutableDataPartStoragePtr flushToDisk(const String & new_relative_path, const StorageMetadataPtr & metadata_snapshot) const; /// Returns hash of parts's block Checksum calculateBlockChecksum() const; mutable Block block; - mutable DataPartStorageBuilderPtr data_part_storage_builder; private: mutable std::condition_variable is_merged; @@ -66,6 +64,8 @@ private: }; using DataPartInMemoryPtr = std::shared_ptr; +using MutableDataPartInMemoryPtr = std::shared_ptr; + DataPartInMemoryPtr asInMemoryPart(const MergeTreeDataPartPtr & part); } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp index 170d1b1d703..18467f2cef7 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp @@ -21,7 +21,7 @@ namespace ErrorCodes MergeTreeDataPartWide::MergeTreeDataPartWide( MergeTreeData & storage_, const String & name_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, const IMergeTreeDataPart * parent_part_) : IMergeTreeDataPart(storage_, name_, data_part_storage_, Type::Wide, parent_part_) { @@ -31,7 +31,7 @@ MergeTreeDataPartWide::MergeTreeDataPartWide( const MergeTreeData & storage_, const String & name_, const MergeTreePartInfo & info_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, const IMergeTreeDataPart * parent_part_) : IMergeTreeDataPart(storage_, name_, info_, data_part_storage_, Type::Wide, parent_part_) { @@ -56,17 +56,16 @@ IMergeTreeDataPart::MergeTreeReaderPtr MergeTreeDataPartWide::getReader( } IMergeTreeDataPart::MergeTreeWriterPtr MergeTreeDataPartWide::getWriter( - DataPartStorageBuilderPtr data_part_storage_builder, const NamesAndTypesList & columns_list, const StorageMetadataPtr & metadata_snapshot, const std::vector & indices_to_recalc, const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & writer_settings, - const MergeTreeIndexGranularity & computed_index_granularity) const + const MergeTreeIndexGranularity & computed_index_granularity) { return std::make_unique( - shared_from_this(), data_part_storage_builder, - columns_list, metadata_snapshot, indices_to_recalc, + shared_from_this(), columns_list, + metadata_snapshot, indices_to_recalc, getMarksFileExtension(), default_codec_, writer_settings, computed_index_granularity); } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWide.h b/src/Storages/MergeTree/MergeTreeDataPartWide.h index 52afa9e82d4..4343148b175 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWide.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWide.h @@ -19,13 +19,13 @@ public: const MergeTreeData & storage_, const String & name_, const MergeTreePartInfo & info_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, const IMergeTreeDataPart * parent_part_ = nullptr); MergeTreeDataPartWide( MergeTreeData & storage_, const String & name_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, const IMergeTreeDataPart * parent_part_ = nullptr); MergeTreeReaderPtr getReader( @@ -39,13 +39,12 @@ public: const ReadBufferFromFileBase::ProfileCallback & profile_callback) const override; MergeTreeWriterPtr getWriter( - DataPartStorageBuilderPtr data_part_storage_builder, const NamesAndTypesList & columns_list, const StorageMetadataPtr & metadata_snapshot, const std::vector & indices_to_recalc, const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & writer_settings, - const MergeTreeIndexGranularity & computed_index_granularity) const override; + const MergeTreeIndexGranularity & computed_index_granularity) override; bool isStoredOnDisk() const override { return true; } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp index 457aad55023..6821c52f0d2 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp @@ -10,8 +10,7 @@ namespace ErrorCodes } MergeTreeDataPartWriterCompact::MergeTreeDataPartWriterCompact( - const MergeTreeData::DataPartPtr & data_part_, - DataPartStorageBuilderPtr data_part_storage_builder_, + const MergeTreeMutableDataPartPtr & data_part_, const NamesAndTypesList & columns_list_, const StorageMetadataPtr & metadata_snapshot_, const std::vector & indices_to_recalc_, @@ -19,16 +18,16 @@ MergeTreeDataPartWriterCompact::MergeTreeDataPartWriterCompact( const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & settings_, const MergeTreeIndexGranularity & index_granularity_) - : MergeTreeDataPartWriterOnDisk(data_part_, std::move(data_part_storage_builder_), columns_list_, metadata_snapshot_, + : MergeTreeDataPartWriterOnDisk(data_part_, columns_list_, metadata_snapshot_, indices_to_recalc_, marks_file_extension_, default_codec_, settings_, index_granularity_) - , plain_file(data_part_storage_builder->writeFile( + , plain_file(data_part_->data_part_storage->writeFile( MergeTreeDataPartCompact::DATA_FILE_NAME_WITH_EXTENSION, settings.max_compress_block_size, settings_.query_write_settings)) , plain_hashing(*plain_file) { - marks_file = data_part_storage_builder->writeFile( + marks_file = data_part_->data_part_storage->writeFile( MergeTreeDataPartCompact::DATA_FILE_NAME + marks_file_extension_, 4096, settings_.query_write_settings); diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h index 7b68f61925f..06f8122393f 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h @@ -11,8 +11,7 @@ class MergeTreeDataPartWriterCompact : public MergeTreeDataPartWriterOnDisk { public: MergeTreeDataPartWriterCompact( - const MergeTreeData::DataPartPtr & data_part, - DataPartStorageBuilderPtr data_part_storage_builder_, + const MergeTreeMutableDataPartPtr & data_part, const NamesAndTypesList & columns_list, const StorageMetadataPtr & metadata_snapshot_, const std::vector & indices_to_recalc, diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterInMemory.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterInMemory.cpp index e1145868ce2..8066a097499 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterInMemory.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterInMemory.cpp @@ -11,11 +11,11 @@ namespace ErrorCodes } MergeTreeDataPartWriterInMemory::MergeTreeDataPartWriterInMemory( - const DataPartInMemoryPtr & part_, + const MutableDataPartInMemoryPtr & part_, const NamesAndTypesList & columns_list_, const StorageMetadataPtr & metadata_snapshot_, const MergeTreeWriterSettings & settings_) - : IMergeTreeDataPartWriter(part_, nullptr, columns_list_, metadata_snapshot_, settings_) + : IMergeTreeDataPartWriter(part_, columns_list_, metadata_snapshot_, settings_) , part_in_memory(part_) {} void MergeTreeDataPartWriterInMemory::write( diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterInMemory.h b/src/Storages/MergeTree/MergeTreeDataPartWriterInMemory.h index 233ca81a697..9e1e868beac 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterInMemory.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterInMemory.h @@ -10,7 +10,7 @@ class MergeTreeDataPartWriterInMemory : public IMergeTreeDataPartWriter { public: MergeTreeDataPartWriterInMemory( - const DataPartInMemoryPtr & part_, + const MutableDataPartInMemoryPtr & part_, const NamesAndTypesList & columns_list_, const StorageMetadataPtr & metadata_snapshot, const MergeTreeWriterSettings & settings_); @@ -24,7 +24,7 @@ public: private: void calculateAndSerializePrimaryIndex(const Block & primary_index_block); - DataPartInMemoryPtr part_in_memory; + MutableDataPartInMemoryPtr part_in_memory; }; } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp index 1d2b095330e..383cd79734f 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp @@ -48,7 +48,7 @@ void MergeTreeDataPartWriterOnDisk::Stream::sync() const MergeTreeDataPartWriterOnDisk::Stream::Stream( const String & escaped_column_name_, - const DataPartStorageBuilderPtr & data_part_storage_builder, + const MutableDataPartStoragePtr & data_part_storage, const String & data_path_, const std::string & data_file_extension_, const std::string & marks_path_, @@ -61,11 +61,11 @@ MergeTreeDataPartWriterOnDisk::Stream::Stream( escaped_column_name(escaped_column_name_), data_file_extension{data_file_extension_}, marks_file_extension{marks_file_extension_}, - plain_file(data_part_storage_builder->writeFile(data_path_ + data_file_extension, max_compress_block_size_, query_write_settings)), + plain_file(data_part_storage->writeFile(data_path_ + data_file_extension, max_compress_block_size_, query_write_settings)), plain_hashing(*plain_file), compressor(plain_hashing, compression_codec_, max_compress_block_size_), compressed_hashing(compressor), - marks_file(data_part_storage_builder->writeFile(marks_path_ + marks_file_extension, 4096, query_write_settings)), + marks_file(data_part_storage->writeFile(marks_path_ + marks_file_extension, 4096, query_write_settings)), marks_hashing(*marks_file), marks_compressor(marks_hashing, marks_compression_codec_, marks_compress_block_size_), marks_compressed_hashing(marks_compressor), @@ -96,8 +96,7 @@ void MergeTreeDataPartWriterOnDisk::Stream::addToChecksums(MergeTreeData::DataPa MergeTreeDataPartWriterOnDisk::MergeTreeDataPartWriterOnDisk( - const MergeTreeData::DataPartPtr & data_part_, - DataPartStorageBuilderPtr data_part_storage_builder_, + const MergeTreeMutableDataPartPtr & data_part_, const NamesAndTypesList & columns_list_, const StorageMetadataPtr & metadata_snapshot_, const MergeTreeIndices & indices_to_recalc_, @@ -105,8 +104,7 @@ MergeTreeDataPartWriterOnDisk::MergeTreeDataPartWriterOnDisk( const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & settings_, const MergeTreeIndexGranularity & index_granularity_) - : IMergeTreeDataPartWriter(data_part_, std::move(data_part_storage_builder_), - columns_list_, metadata_snapshot_, settings_, index_granularity_) + : IMergeTreeDataPartWriter(data_part_, columns_list_, metadata_snapshot_, settings_, index_granularity_) , skip_indices(indices_to_recalc_) , marks_file_extension(marks_file_extension_) , default_codec(default_codec_) @@ -116,8 +114,8 @@ MergeTreeDataPartWriterOnDisk::MergeTreeDataPartWriterOnDisk( if (settings.blocks_are_granules_size && !index_granularity.empty()) throw Exception("Can't take information about index granularity from blocks, when non empty index_granularity array specified", ErrorCodes::LOGICAL_ERROR); - if (!data_part_storage_builder->exists()) - data_part_storage_builder->createDirectories(); + if (!data_part->data_part_storage->exists()) + data_part->data_part_storage->createDirectories(); if (settings.rewrite_primary_key) initPrimaryIndex(); @@ -178,7 +176,7 @@ void MergeTreeDataPartWriterOnDisk::initPrimaryIndex() if (metadata_snapshot->hasPrimaryKey()) { String index_name = "primary" + getIndexExtension(compress_primary_key); - index_file_stream = data_part_storage_builder->writeFile(index_name, DBMS_DEFAULT_BUFFER_SIZE, settings.query_write_settings); + index_file_stream = data_part->data_part_storage->writeFile(index_name, DBMS_DEFAULT_BUFFER_SIZE, settings.query_write_settings); index_file_hashing_stream = std::make_unique(*index_file_stream); if (compress_primary_key) @@ -204,7 +202,7 @@ void MergeTreeDataPartWriterOnDisk::initSkipIndices() skip_indices_streams.emplace_back( std::make_unique( stream_name, - data_part_storage_builder, + data_part->data_part_storage, stream_name, index_helper->getSerializedFileExtension(), stream_name, marks_file_extension, default_codec, settings.max_compress_block_size, diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h index 4b58224de78..ab1adfe7f59 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h @@ -50,7 +50,7 @@ public: { Stream( const String & escaped_column_name_, - const DataPartStorageBuilderPtr & data_part_storage_builder, + const MutableDataPartStoragePtr & data_part_storage, const String & data_path_, const std::string & data_file_extension_, const std::string & marks_path_, @@ -92,8 +92,7 @@ public: using StreamPtr = std::unique_ptr; MergeTreeDataPartWriterOnDisk( - const MergeTreeData::DataPartPtr & data_part_, - DataPartStorageBuilderPtr data_part_storage_builder_, + const MergeTreeMutableDataPartPtr & data_part_, const NamesAndTypesList & columns_list, const StorageMetadataPtr & metadata_snapshot_, const std::vector & indices_to_recalc, diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index 70654f521a1..5c6e69abe4d 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -71,8 +71,7 @@ Granules getGranulesToWrite(const MergeTreeIndexGranularity & index_granularity, } MergeTreeDataPartWriterWide::MergeTreeDataPartWriterWide( - const MergeTreeData::DataPartPtr & data_part_, - DataPartStorageBuilderPtr data_part_storage_builder_, + const MergeTreeMutableDataPartPtr & data_part_, const NamesAndTypesList & columns_list_, const StorageMetadataPtr & metadata_snapshot_, const std::vector & indices_to_recalc_, @@ -80,7 +79,7 @@ MergeTreeDataPartWriterWide::MergeTreeDataPartWriterWide( const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & settings_, const MergeTreeIndexGranularity & index_granularity_) - : MergeTreeDataPartWriterOnDisk(data_part_, std::move(data_part_storage_builder_), columns_list_, metadata_snapshot_, + : MergeTreeDataPartWriterOnDisk(data_part_, columns_list_, metadata_snapshot_, indices_to_recalc_, marks_file_extension_, default_codec_, settings_, index_granularity_) { @@ -117,7 +116,7 @@ void MergeTreeDataPartWriterWide::addStreams( column_streams[stream_name] = std::make_unique( stream_name, - data_part_storage_builder, + data_part->data_part_storage, stream_name, DATA_FILE_EXTENSION, stream_name, marks_file_extension, compression_codec, @@ -421,20 +420,18 @@ void MergeTreeDataPartWriterWide::validateColumnOfFixedSize(const NameAndTypePai String mrk_path = escaped_name + marks_file_extension; String bin_path = escaped_name + DATA_FILE_EXTENSION; - auto data_part_storage = data_part_storage_builder->getStorage(); - /// Some columns may be removed because of ttl. Skip them. - if (!data_part_storage->exists(mrk_path)) + if (!data_part->data_part_storage->exists(mrk_path)) return; - auto mrk_file_in = data_part_storage->readFile(mrk_path, {}, std::nullopt, std::nullopt); + auto mrk_file_in = data_part->data_part_storage->readFile(mrk_path, {}, std::nullopt, std::nullopt); std::unique_ptr mrk_in; if (data_part->index_granularity_info.mark_type.compressed) mrk_in = std::make_unique(std::move(mrk_file_in)); else mrk_in = std::move(mrk_file_in); - DB::CompressedReadBufferFromFile bin_in(data_part_storage->readFile(bin_path, {}, std::nullopt, std::nullopt)); + DB::CompressedReadBufferFromFile bin_in(data_part->data_part_storage->readFile(bin_path, {}, std::nullopt, std::nullopt)); bool must_be_last = false; UInt64 offset_in_compressed_file = 0; UInt64 offset_in_decompressed_block = 0; @@ -485,7 +482,7 @@ void MergeTreeDataPartWriterWide::validateColumnOfFixedSize(const NameAndTypePai if (index_granularity_rows != index_granularity.getMarkRows(mark_num)) throw Exception( ErrorCodes::LOGICAL_ERROR, "Incorrect mark rows for part {} for mark #{} (compressed offset {}, decompressed offset {}), in-memory {}, on disk {}, total marks {}", - data_part_storage_builder->getFullPath(), mark_num, offset_in_compressed_file, offset_in_decompressed_block, index_granularity.getMarkRows(mark_num), index_granularity_rows, index_granularity.getMarksCount()); + data_part->data_part_storage->getFullPath(), mark_num, offset_in_compressed_file, offset_in_decompressed_block, index_granularity.getMarkRows(mark_num), index_granularity_rows, index_granularity.getMarksCount()); auto column = type->createColumn(); diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h index 08815d9930a..633b5119474 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h @@ -18,8 +18,7 @@ class MergeTreeDataPartWriterWide : public MergeTreeDataPartWriterOnDisk { public: MergeTreeDataPartWriterWide( - const MergeTreeData::DataPartPtr & data_part, - DataPartStorageBuilderPtr data_part_storage_builder_, + const MergeTreeMutableDataPartPtr & data_part, const NamesAndTypesList & columns_list, const StorageMetadataPtr & metadata_snapshot, const std::vector & indices_to_recalc, diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index 7b99819340e..62d01a2d555 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -378,10 +378,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPart( data.relative_data_path, TMP_PREFIX + part_name); - auto data_part_storage_builder = std::make_shared( - data_part_volume, - data.relative_data_path, - TMP_PREFIX + part_name); + data_part_storage->beginTransaction(); auto new_data_part = data.createPart( part_name, @@ -413,10 +410,10 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPart( if (new_data_part->data_part_storage->exists()) { LOG_WARNING(log, "Removing old temporary directory {}", full_path); - data_part_storage_builder->removeRecursive(); + data_part_storage->removeRecursive(); } - data_part_storage_builder->createDirectories(); + data_part_storage->createDirectories(); if (data.getSettings()->fsync_part_directory) { @@ -448,7 +445,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPart( auto compression_codec = data.getContext()->chooseCompressionCodec(0, 0); const auto & index_factory = MergeTreeIndexFactory::instance(); - auto out = std::make_unique(new_data_part, data_part_storage_builder, metadata_snapshot, columns, + auto out = std::make_unique(new_data_part, metadata_snapshot, columns, index_factory.getMany(metadata_snapshot->getSecondaryIndices()), compression_codec, context->getCurrentTransaction(), false, false, context->getWriteSettings()); @@ -459,9 +456,8 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPart( auto projection_block = projection.calculate(block, context); if (projection_block.rows()) { - auto proj_temp_part = writeProjectionPart(data, log, projection_block, projection, data_part_storage_builder, new_data_part.get()); + auto proj_temp_part = writeProjectionPart(data, log, projection_block, projection, new_data_part.get()); new_data_part->addProjectionPart(projection.name, std::move(proj_temp_part.part)); - proj_temp_part.builder->commit(); for (auto & stream : proj_temp_part.streams) temp_part.streams.emplace_back(std::move(stream)); } @@ -473,7 +469,6 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPart( nullptr, nullptr); temp_part.part = new_data_part; - temp_part.builder = data_part_storage_builder; temp_part.streams.emplace_back(TemporaryPart::Stream{.stream = std::move(out), .finalizer = std::move(finalizer)}); ProfileEvents::increment(ProfileEvents::MergeTreeDataWriterRows, block.rows()); @@ -487,7 +482,6 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPartImpl( const String & part_name, MergeTreeDataPartType part_type, const String & relative_path, - const DataPartStorageBuilderPtr & data_part_storage_builder, bool is_temp, const IMergeTreeDataPart * parent_part, const MergeTreeData & data, @@ -498,6 +492,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPartImpl( TemporaryPart temp_part; const StorageMetadataPtr & metadata_snapshot = projection.metadata; MergeTreePartInfo new_part_info("all", 0, 0, 0); + auto projection_part_storage = parent_part->data_part_storage->getProjection(relative_path); auto new_data_part = data.createPart( part_name, @@ -506,7 +501,6 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPartImpl( projection_part_storage, parent_part); - auto projection_part_storage_builder = data_part_storage_builder->getProjection(relative_path); new_data_part->is_temp = is_temp; NamesAndTypesList columns = metadata_snapshot->getColumns().getAllPhysical().filter(block.getNames()); @@ -522,10 +516,10 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPartImpl( if (projection_part_storage->exists()) { LOG_WARNING(log, "Removing old temporary directory {}", projection_part_storage->getFullPath()); - projection_part_storage_builder->removeRecursive(); + projection_part_storage->removeRecursive(); } - projection_part_storage_builder->createDirectories(); + projection_part_storage->createDirectories(); } /// If we need to calculate some columns to sort. @@ -569,7 +563,6 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPartImpl( auto out = std::make_unique( new_data_part, - projection_part_storage_builder, metadata_snapshot, columns, MergeTreeIndices{}, @@ -580,7 +573,6 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPartImpl( out->writeWithPermutation(block, perm_ptr); auto finalizer = out->finalizePartAsync(new_data_part, false); temp_part.part = new_data_part; - temp_part.builder = projection_part_storage_builder; temp_part.streams.emplace_back(TemporaryPart::Stream{.stream = std::move(out), .finalizer = std::move(finalizer)}); ProfileEvents::increment(ProfileEvents::MergeTreeDataProjectionWriterRows, block.rows()); @@ -595,7 +587,6 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPart( Poco::Logger * log, Block block, const ProjectionDescription & projection, - const DataPartStorageBuilderPtr & data_part_storage_builder, const IMergeTreeDataPart * parent_part) { String part_name = projection.name; @@ -609,7 +600,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPart( /// Size of part would not be greater than block.bytes() + epsilon size_t expected_size = block.bytes(); // just check if there is enough space on parent volume - data.reserveSpace(expected_size, data_part_storage_builder); + data.reserveSpace(expected_size, parent_part->data_part_storage); part_type = data.choosePartTypeOnDisk(expected_size, block.rows()); } @@ -617,7 +608,6 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPart( part_name, part_type, part_name + ".proj" /* relative_path */, - data_part_storage_builder, false /* is_temp */, parent_part, data, @@ -633,7 +623,6 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempProjectionPart( Poco::Logger * log, Block block, const ProjectionDescription & projection, - const DataPartStorageBuilderPtr & data_part_storage_builder, const IMergeTreeDataPart * parent_part, size_t block_num) { @@ -648,7 +637,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempProjectionPart( /// Size of part would not be greater than block.bytes() + epsilon size_t expected_size = block.bytes(); // just check if there is enough space on parent volume - data.reserveSpace(expected_size, data_part_storage_builder); + data.reserveSpace(expected_size, parent_part->data_part_storage); part_type = data.choosePartTypeOnDisk(expected_size, block.rows()); } @@ -656,7 +645,6 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempProjectionPart( part_name, part_type, part_name + ".tmp_proj" /* relative_path */, - data_part_storage_builder, true /* is_temp */, parent_part, data, @@ -670,14 +658,12 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeInMemoryProjectionP Poco::Logger * log, Block block, const ProjectionDescription & projection, - const DataPartStorageBuilderPtr & data_part_storage_builder, const IMergeTreeDataPart * parent_part) { return writeProjectionPartImpl( projection.name, MergeTreeDataPartType::InMemory, projection.name + ".proj" /* relative_path */, - data_part_storage_builder, false /* is_temp */, parent_part, data, diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.h b/src/Storages/MergeTree/MergeTreeDataWriter.h index 00438a29fa1..72ceb8b38e3 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.h +++ b/src/Storages/MergeTree/MergeTreeDataWriter.h @@ -52,7 +52,6 @@ public: struct TemporaryPart { MergeTreeData::MutableDataPartPtr part; - DataPartStorageBuilderPtr builder; struct Stream { @@ -78,7 +77,6 @@ public: Poco::Logger * log, Block block, const ProjectionDescription & projection, - const DataPartStorageBuilderPtr & data_part_storage_builder, const IMergeTreeDataPart * parent_part); /// For mutation: MATERIALIZE PROJECTION. @@ -87,7 +85,6 @@ public: Poco::Logger * log, Block block, const ProjectionDescription & projection, - const DataPartStorageBuilderPtr & data_part_storage_builder, const IMergeTreeDataPart * parent_part, size_t block_num); @@ -97,7 +94,6 @@ public: Poco::Logger * log, Block block, const ProjectionDescription & projection, - const DataPartStorageBuilderPtr & data_part_storage_builder, const IMergeTreeDataPart * parent_part); static Block mergeBlock( @@ -112,7 +108,6 @@ private: const String & part_name, MergeTreeDataPartType part_type, const String & relative_path, - const DataPartStorageBuilderPtr & data_part_storage_builder, bool is_temp, const IMergeTreeDataPart * parent_part, const MergeTreeData & data, diff --git a/src/Storages/MergeTree/MergeTreePartition.cpp b/src/Storages/MergeTree/MergeTreePartition.cpp index 4ea6ec11ecc..47f07e7bb08 100644 --- a/src/Storages/MergeTree/MergeTreePartition.cpp +++ b/src/Storages/MergeTree/MergeTreePartition.cpp @@ -382,20 +382,20 @@ void MergeTreePartition::load(const MergeTreeData & storage, const PartMetadataM partition_key_sample.getByPosition(i).type->getDefaultSerialization()->deserializeBinary(value[i], *file); } -std::unique_ptr MergeTreePartition::store(const MergeTreeData & storage, const DataPartStorageBuilderPtr & data_part_storage_builder, MergeTreeDataPartChecksums & checksums) const +std::unique_ptr MergeTreePartition::store(const MergeTreeData & storage, const MutableDataPartStoragePtr & data_part_storage, MergeTreeDataPartChecksums & checksums) const { auto metadata_snapshot = storage.getInMemoryMetadataPtr(); const auto & context = storage.getContext(); const auto & partition_key_sample = adjustPartitionKey(metadata_snapshot, storage.getContext()).sample_block; - return store(partition_key_sample, data_part_storage_builder, checksums, context->getWriteSettings()); + return store(partition_key_sample, data_part_storage, checksums, context->getWriteSettings()); } -std::unique_ptr MergeTreePartition::store(const Block & partition_key_sample, const DataPartStorageBuilderPtr & data_part_storage_builder, MergeTreeDataPartChecksums & checksums, const WriteSettings & settings) const +std::unique_ptr MergeTreePartition::store(const Block & partition_key_sample, const MutableDataPartStoragePtr & data_part_storage, MergeTreeDataPartChecksums & checksums, const WriteSettings & settings) const { if (!partition_key_sample) return nullptr; - auto out = data_part_storage_builder->writeFile("partition.dat", DBMS_DEFAULT_BUFFER_SIZE, settings); + auto out = data_part_storage->writeFile("partition.dat", DBMS_DEFAULT_BUFFER_SIZE, settings); HashingWriteBuffer out_hashing(*out); for (size_t i = 0; i < value.size(); ++i) { diff --git a/src/Storages/MergeTree/MergeTreePartition.h b/src/Storages/MergeTree/MergeTreePartition.h index 6394641dfa3..c9c6723df5a 100644 --- a/src/Storages/MergeTree/MergeTreePartition.h +++ b/src/Storages/MergeTree/MergeTreePartition.h @@ -15,10 +15,10 @@ class MergeTreeData; struct FormatSettings; struct MergeTreeDataPartChecksums; struct StorageInMemoryMetadata; -class IDataPartStorageBuilder; +class IDataPartStorage; using StorageMetadataPtr = std::shared_ptr; -using DataPartStorageBuilderPtr = std::shared_ptr; +using MutableDataPartStoragePtr = std::shared_ptr; /// This class represents a partition value of a single part and encapsulates its loading/storing logic. struct MergeTreePartition @@ -44,8 +44,8 @@ public: /// Store functions return write buffer with written but not finalized data. /// User must call finish() for returned object. - [[nodiscard]] std::unique_ptr store(const MergeTreeData & storage, const DataPartStorageBuilderPtr & data_part_storage_builder, MergeTreeDataPartChecksums & checksums) const; - [[nodiscard]] std::unique_ptr store(const Block & partition_key_sample, const DataPartStorageBuilderPtr & data_part_storage_builder, MergeTreeDataPartChecksums & checksums, const WriteSettings & settings) const; + [[nodiscard]] std::unique_ptr store(const MergeTreeData & storage, const MutableDataPartStoragePtr & data_part_storage, MergeTreeDataPartChecksums & checksums) const; + [[nodiscard]] std::unique_ptr store(const Block & partition_key_sample, const MutableDataPartStoragePtr & data_part_storage, MergeTreeDataPartChecksums & checksums, const WriteSettings & settings) const; void assign(const MergeTreePartition & other) { value = other.value; } diff --git a/src/Storages/MergeTree/MergeTreePartsMover.cpp b/src/Storages/MergeTree/MergeTreePartsMover.cpp index afeeacbe5d6..5a3f138d727 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.cpp +++ b/src/Storages/MergeTree/MergeTreePartsMover.cpp @@ -209,8 +209,7 @@ MergeTreeData::DataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEnt auto disk = moving_part.reserved_space->getDisk(); LOG_DEBUG(log, "Cloning part {} from '{}' to '{}'", part->name, part->data_part_storage->getDiskName(), disk->getName()); - DataPartStoragePtr cloned_part_storage; - + MutableDataPartStoragePtr cloned_part_storage; if (disk->supportZeroCopyReplication() && settings->allow_remote_fs_zero_copy_replication) { /// Try zero-copy replication and fallback to default copy if it's not possible @@ -225,12 +224,16 @@ MergeTreeData::DataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEnt disk->createDirectories(path_to_clone); - cloned_part_storage = data->tryToFetchIfShared(*part, disk, fs::path(path_to_clone) / part->name); + bool is_fetched = data->tryToFetchIfShared(*part, disk, fs::path(path_to_clone) / part->name); - if (!cloned_part_storage) + if (!is_fetched) { LOG_INFO(log, "Part {} was not fetched, we are the first who move it to another disk, so we will copy it", part->name); - cloned_part_storage = part->data_part_storage->clone(path_to_clone, part->data_part_storage->getPartDirectory(), disk, log); + cloned_part_storage = part->data_part_storage->clonePart(path_to_clone, part->data_part_storage->getPartDirectory(), disk, log); + } + else + { + cloned_part_storage = part->data_part_storage->clone(); } } else @@ -238,14 +241,13 @@ MergeTreeData::DataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEnt cloned_part_storage = part->makeCloneOnDisk(disk, MergeTreeData::MOVING_DIR_NAME); } - MergeTreeData::MutableDataPartPtr cloned_part = data->createPart(part->name, cloned_part_storage); + auto cloned_part = data->createPart(part->name, cloned_part_storage); LOG_TRACE(log, "Part {} was cloned to {}", part->name, cloned_part->data_part_storage->getFullPath()); cloned_part->loadColumnsChecksumsIndexes(true, true); cloned_part->loadVersionMetadata(); cloned_part->modification_time = cloned_part->data_part_storage->getLastModified().epochTime(); return cloned_part; - } @@ -263,11 +265,8 @@ void MergeTreePartsMover::swapClonedPart(const MergeTreeData::DataPartPtr & clon return; } - auto builder = cloned_part->data_part_storage->getBuilder(); /// Don't remove new directory but throw an error because it may contain part which is currently in use. - cloned_part->renameTo(active_part->name, false, builder); - - builder->commit(); + cloned_part->renameTo(active_part->name, false); /// TODO what happen if server goes down here? data->swapActivePart(cloned_part); diff --git a/src/Storages/MergeTree/MergeTreeSink.cpp b/src/Storages/MergeTree/MergeTreeSink.cpp index 5d00db861a8..6178af83913 100644 --- a/src/Storages/MergeTree/MergeTreeSink.cpp +++ b/src/Storages/MergeTree/MergeTreeSink.cpp @@ -160,7 +160,7 @@ void MergeTreeSink::finishDelayedChunk() } } - added = storage.renameTempPartAndAdd(part, transaction, partition.temp_part.builder, lock); + added = storage.renameTempPartAndAdd(part, transaction, lock); transaction.commit(&lock); } diff --git a/src/Storages/MergeTree/MergeTreeWriteAheadLog.cpp b/src/Storages/MergeTree/MergeTreeWriteAheadLog.cpp index 4735eae8fdd..85b41f6dd83 100644 --- a/src/Storages/MergeTree/MergeTreeWriteAheadLog.cpp +++ b/src/Storages/MergeTree/MergeTreeWriteAheadLog.cpp @@ -150,7 +150,6 @@ MergeTreeData::MutableDataPartsVector MergeTreeWriteAheadLog::restore( while (!in->eof()) { MergeTreeData::MutableDataPartPtr part; - DataPartStorageBuilderPtr data_part_storage_builder; UInt8 version; String part_name; Block block; @@ -177,7 +176,6 @@ MergeTreeData::MutableDataPartsVector MergeTreeWriteAheadLog::restore( { auto single_disk_volume = std::make_shared("volume_" + part_name, disk, 0); auto data_part_storage = std::make_shared(single_disk_volume, storage.getRelativeDataPath(), part_name); - data_part_storage_builder = std::make_shared(single_disk_volume, storage.getRelativeDataPath(), part_name); part = storage.createPart( part_name, @@ -222,7 +220,6 @@ MergeTreeData::MutableDataPartsVector MergeTreeWriteAheadLog::restore( { MergedBlockOutputStream part_out( part, - data_part_storage_builder, metadata_snapshot, block.getNamesAndTypesList(), {}, @@ -240,11 +237,12 @@ MergeTreeData::MutableDataPartsVector MergeTreeWriteAheadLog::restore( for (const auto & projection : metadata_snapshot->getProjections()) { auto projection_block = projection.calculate(block, context); - auto temp_part = MergeTreeDataWriter::writeInMemoryProjectionPart(storage, log, projection_block, projection, data_part_storage_builder, part.get()); + auto temp_part = MergeTreeDataWriter::writeInMemoryProjectionPart(storage, log, projection_block, projection, part.get()); temp_part.finalize(); if (projection_block.rows()) part->addProjectionPart(projection.name, std::move(temp_part.part)); } + part_out.finalizePart(part, false); min_block_number = std::min(min_block_number, part->info.min_block); diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/src/Storages/MergeTree/MergedBlockOutputStream.cpp index 269a78977ad..1d0743a0429 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/MergedBlockOutputStream.cpp @@ -14,8 +14,7 @@ namespace ErrorCodes MergedBlockOutputStream::MergedBlockOutputStream( - const MergeTreeDataPartPtr & data_part, - DataPartStorageBuilderPtr data_part_storage_builder_, + const MergeTreeMutableDataPartPtr & data_part, const StorageMetadataPtr & metadata_snapshot_, const NamesAndTypesList & columns_list_, const MergeTreeIndices & skip_indices, @@ -24,7 +23,7 @@ MergedBlockOutputStream::MergedBlockOutputStream( bool reset_columns_, bool blocks_are_granules_size, const WriteSettings & write_settings_) - : IMergedBlockOutputStream(std::move(data_part_storage_builder_), data_part, metadata_snapshot_, columns_list_, reset_columns_) + : IMergedBlockOutputStream(data_part, metadata_snapshot_, columns_list_, reset_columns_) , columns_list(columns_list_) , default_codec(default_codec_) , write_settings(write_settings_) @@ -38,7 +37,7 @@ MergedBlockOutputStream::MergedBlockOutputStream( blocks_are_granules_size); if (data_part->isStoredOnDisk()) - data_part_storage_builder->createDirectories(); + data_part_storage->createDirectories(); /// We should write version metadata on part creation to distinguish it from parts that were created without transaction. TransactionID tid = txn ? txn->tid : Tx::PrehistoricTID; @@ -47,7 +46,7 @@ MergedBlockOutputStream::MergedBlockOutputStream( data_part->version.setCreationTID(tid, nullptr); data_part->storeVersionMetadata(); - writer = data_part->getWriter(data_part_storage_builder, columns_list, metadata_snapshot, skip_indices, default_codec, writer_settings, {}); + writer = data_part->getWriter(columns_list, metadata_snapshot, skip_indices, default_codec, writer_settings, {}); } /// If data is pre-sorted. @@ -68,17 +67,17 @@ struct MergedBlockOutputStream::Finalizer::Impl { IMergeTreeDataPartWriter & writer; MergeTreeData::MutableDataPartPtr part; - DataPartStorageBuilderPtr data_part_storage_builder; NameSet files_to_remove_after_finish; std::vector> written_files; bool sync; - Impl(IMergeTreeDataPartWriter & writer_, MergeTreeData::MutableDataPartPtr part_, DataPartStorageBuilderPtr data_part_storage_builder_, const NameSet & files_to_remove_after_finish_, bool sync_) + Impl(IMergeTreeDataPartWriter & writer_, MergeTreeData::MutableDataPartPtr part_, const NameSet & files_to_remove_after_finish_, bool sync_) : writer(writer_) , part(std::move(part_)) - , data_part_storage_builder(std::move(data_part_storage_builder_)) , files_to_remove_after_finish(files_to_remove_after_finish_) - , sync(sync_) {} + , sync(sync_) + { + } void finish(); }; @@ -95,7 +94,7 @@ void MergedBlockOutputStream::Finalizer::Impl::finish() writer.finish(sync); for (const auto & file_name : files_to_remove_after_finish) - data_part_storage_builder->removeFile(file_name); + part->data_part_storage->removeFile(file_name); for (auto & file : written_files) { @@ -165,7 +164,7 @@ MergedBlockOutputStream::Finalizer MergedBlockOutputStream::finalizePartAsync( new_part->setColumns(part_columns, serialization_infos); } - auto finalizer = std::make_unique(*writer, new_part, data_part_storage_builder, files_to_remove_after_sync, sync); + auto finalizer = std::make_unique(*writer, new_part, files_to_remove_after_sync, sync); if (new_part->isStoredOnDisk()) finalizer->written_files = finalizePartOnDisk(new_part, checksums); @@ -192,7 +191,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis { if (storage.format_version >= MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING || isCompactPart(new_part)) { - auto count_out = data_part_storage_builder->writeFile("count.txt", 4096, write_settings); + auto count_out = new_part->data_part_storage->writeFile("count.txt", 4096, write_settings); HashingWriteBuffer count_out_hashing(*count_out); writeIntText(rows_count, count_out_hashing); count_out_hashing.next(); @@ -206,7 +205,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis { if (new_part->uuid != UUIDHelpers::Nil) { - auto out = data_part_storage_builder->writeFile(IMergeTreeDataPart::UUID_FILE_NAME, 4096, write_settings); + auto out = new_part->data_part_storage->writeFile(IMergeTreeDataPart::UUID_FILE_NAME, 4096, write_settings); HashingWriteBuffer out_hashing(*out); writeUUIDText(new_part->uuid, out_hashing); checksums.files[IMergeTreeDataPart::UUID_FILE_NAME].file_size = out_hashing.count(); @@ -217,12 +216,12 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis if (storage.format_version >= MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING) { - if (auto file = new_part->partition.store(storage, data_part_storage_builder, checksums)) + if (auto file = new_part->partition.store(storage, new_part->data_part_storage, checksums)) written_files.emplace_back(std::move(file)); if (new_part->minmax_idx->initialized) { - auto files = new_part->minmax_idx->store(storage, data_part_storage_builder, checksums); + auto files = new_part->minmax_idx->store(storage, new_part->data_part_storage, checksums); for (auto & file : files) written_files.emplace_back(std::move(file)); } @@ -232,7 +231,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis } { - auto count_out = data_part_storage_builder->writeFile("count.txt", 4096, write_settings); + auto count_out = new_part->data_part_storage->writeFile("count.txt", 4096, write_settings); HashingWriteBuffer count_out_hashing(*count_out); writeIntText(rows_count, count_out_hashing); count_out_hashing.next(); @@ -246,7 +245,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis if (!new_part->ttl_infos.empty()) { /// Write a file with ttl infos in json format. - auto out = data_part_storage_builder->writeFile("ttl.txt", 4096, write_settings); + auto out = new_part->data_part_storage->writeFile("ttl.txt", 4096, write_settings); HashingWriteBuffer out_hashing(*out); new_part->ttl_infos.write(out_hashing); checksums.files["ttl.txt"].file_size = out_hashing.count(); @@ -257,7 +256,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis if (!new_part->getSerializationInfos().empty()) { - auto out = data_part_storage_builder->writeFile(IMergeTreeDataPart::SERIALIZATION_FILE_NAME, 4096, write_settings); + auto out = new_part->data_part_storage->writeFile(IMergeTreeDataPart::SERIALIZATION_FILE_NAME, 4096, write_settings); HashingWriteBuffer out_hashing(*out); new_part->getSerializationInfos().writeJSON(out_hashing); checksums.files[IMergeTreeDataPart::SERIALIZATION_FILE_NAME].file_size = out_hashing.count(); @@ -268,7 +267,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis { /// Write a file with a description of columns. - auto out = data_part_storage_builder->writeFile("columns.txt", 4096, write_settings); + auto out = new_part->data_part_storage->writeFile("columns.txt", 4096, write_settings); new_part->getColumns().writeText(*out); out->preFinalize(); written_files.emplace_back(std::move(out)); @@ -276,7 +275,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis if (default_codec != nullptr) { - auto out = data_part_storage_builder->writeFile(IMergeTreeDataPart::DEFAULT_COMPRESSION_CODEC_FILE_NAME, 4096, write_settings); + auto out = new_part->data_part_storage->writeFile(IMergeTreeDataPart::DEFAULT_COMPRESSION_CODEC_FILE_NAME, 4096, write_settings); DB::writeText(queryToString(default_codec->getFullCodecDesc()), *out); out->preFinalize(); written_files.emplace_back(std::move(out)); @@ -289,7 +288,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis { /// Write file with checksums. - auto out = data_part_storage_builder->writeFile("checksums.txt", 4096, write_settings); + auto out = new_part->data_part_storage->writeFile("checksums.txt", 4096, write_settings); checksums.write(*out); out->preFinalize(); written_files.emplace_back(std::move(out)); diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.h b/src/Storages/MergeTree/MergedBlockOutputStream.h index 92dcd8dd272..799bae8e94b 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.h +++ b/src/Storages/MergeTree/MergedBlockOutputStream.h @@ -15,8 +15,7 @@ class MergedBlockOutputStream final : public IMergedBlockOutputStream { public: MergedBlockOutputStream( - const MergeTreeDataPartPtr & data_part, - DataPartStorageBuilderPtr data_part_storage_builder_, + const MergeTreeMutableDataPartPtr & data_part, const StorageMetadataPtr & metadata_snapshot_, const NamesAndTypesList & columns_list_, const MergeTreeIndices & skip_indices, diff --git a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp index dd75cddd380..310bc849ffe 100644 --- a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp +++ b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp @@ -11,8 +11,7 @@ namespace ErrorCodes } MergedColumnOnlyOutputStream::MergedColumnOnlyOutputStream( - DataPartStorageBuilderPtr data_part_storage_builder_, - const MergeTreeDataPartPtr & data_part, + const MergeTreeMutableDataPartPtr & data_part, const StorageMetadataPtr & metadata_snapshot_, const Block & header_, CompressionCodecPtr default_codec, @@ -20,7 +19,7 @@ MergedColumnOnlyOutputStream::MergedColumnOnlyOutputStream( WrittenOffsetColumns * offset_columns_, const MergeTreeIndexGranularity & index_granularity, const MergeTreeIndexGranularityInfo * index_granularity_info) - : IMergedBlockOutputStream(std::move(data_part_storage_builder_), data_part, metadata_snapshot_, header_.getNamesAndTypesList(), /*reset_columns=*/ true) + : IMergedBlockOutputStream(data_part, metadata_snapshot_, header_.getNamesAndTypesList(), /*reset_columns=*/ true) , header(header_) { const auto & global_settings = data_part->storage.getContext()->getSettings(); @@ -34,7 +33,6 @@ MergedColumnOnlyOutputStream::MergedColumnOnlyOutputStream( /* rewrite_primary_key = */ false); writer = data_part->getWriter( - data_part_storage_builder, header.getNamesAndTypesList(), metadata_snapshot_, indices_to_recalc, @@ -81,7 +79,7 @@ MergedColumnOnlyOutputStream::fillChecksums( for (const String & removed_file : removed_files) { - data_part_storage_builder->removeFileIfExists(removed_file); + new_part->data_part_storage->removeFileIfExists(removed_file); if (all_checksums.files.contains(removed_file)) all_checksums.files.erase(removed_file); diff --git a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h index 1fd1c752226..f382b0fef60 100644 --- a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h +++ b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h @@ -14,8 +14,7 @@ public: /// Pass empty 'already_written_offset_columns' first time then and pass the same object to subsequent instances of MergedColumnOnlyOutputStream /// if you want to serialize elements of Nested data structure in different instances of MergedColumnOnlyOutputStream. MergedColumnOnlyOutputStream( - DataPartStorageBuilderPtr data_part_storage_builder_, - const MergeTreeDataPartPtr & data_part, + const MergeTreeMutableDataPartPtr & data_part, const StorageMetadataPtr & metadata_snapshot_, const Block & header_, CompressionCodecPtr default_codec_, diff --git a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp index 549c4e7373f..a4a75a637a4 100644 --- a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp @@ -193,12 +193,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MutateFromLogEntryTask::prepare() bool MutateFromLogEntryTask::finalize(ReplicatedMergeMutateTaskBase::PartLogWriter write_part_log) { new_part = mutate_task->getFuture().get(); - auto builder = mutate_task->getBuilder(); - - if (!builder) - builder = new_part->data_part_storage->getBuilder(); - - storage.renameTempPartAndReplace(new_part, *transaction_ptr, builder); + storage.renameTempPartAndReplace(new_part, *transaction_ptr); try { diff --git a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp index 0cf10ee1935..b1714076a46 100644 --- a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp +++ b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp @@ -83,14 +83,9 @@ bool MutatePlainMergeTreeTask::executeStep() new_part = mutate_task->getFuture().get(); - auto builder = mutate_task->getBuilder(); - if (!builder) - builder = new_part->data_part_storage->getBuilder(); - - MergeTreeData::Transaction transaction(storage, merge_mutate_entry->txn.get()); /// FIXME Transactions: it's too optimistic, better to lock parts before starting transaction - storage.renameTempPartAndReplace(new_part, transaction, builder); + storage.renameTempPartAndReplace(new_part, transaction); transaction.commit(); storage.updateMutationEntriesErrors(future_part, true, ""); diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 3d964e60798..80bf7e3c7d9 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -619,7 +619,6 @@ static NameToNameVector collectFilesForRenames( /// Initialize and write to disk new part fields like checksums, columns, etc. void finalizeMutatedPart( const MergeTreeDataPartPtr & source_part, - const DataPartStorageBuilderPtr & data_part_storage_builder, MergeTreeData::MutableDataPartPtr new_data_part, ExecuteTTLType execute_ttl_type, const CompressionCodecPtr & codec, @@ -627,7 +626,7 @@ void finalizeMutatedPart( { if (new_data_part->uuid != UUIDHelpers::Nil) { - auto out = data_part_storage_builder->writeFile(IMergeTreeDataPart::UUID_FILE_NAME, 4096, context->getWriteSettings()); + auto out = new_data_part->data_part_storage->writeFile(IMergeTreeDataPart::UUID_FILE_NAME, 4096, context->getWriteSettings()); HashingWriteBuffer out_hashing(*out); writeUUIDText(new_data_part->uuid, out_hashing); new_data_part->checksums.files[IMergeTreeDataPart::UUID_FILE_NAME].file_size = out_hashing.count(); @@ -637,7 +636,7 @@ void finalizeMutatedPart( if (execute_ttl_type != ExecuteTTLType::NONE) { /// Write a file with ttl infos in json format. - auto out_ttl = data_part_storage_builder->writeFile("ttl.txt", 4096, context->getWriteSettings()); + auto out_ttl = new_data_part->data_part_storage->writeFile("ttl.txt", 4096, context->getWriteSettings()); HashingWriteBuffer out_hashing(*out_ttl); new_data_part->ttl_infos.write(out_hashing); new_data_part->checksums.files["ttl.txt"].file_size = out_hashing.count(); @@ -646,7 +645,7 @@ void finalizeMutatedPart( if (!new_data_part->getSerializationInfos().empty()) { - auto out = data_part_storage_builder->writeFile(IMergeTreeDataPart::SERIALIZATION_FILE_NAME, 4096, context->getWriteSettings()); + auto out = new_data_part->data_part_storage->writeFile(IMergeTreeDataPart::SERIALIZATION_FILE_NAME, 4096, context->getWriteSettings()); HashingWriteBuffer out_hashing(*out); new_data_part->getSerializationInfos().writeJSON(out_hashing); new_data_part->checksums.files[IMergeTreeDataPart::SERIALIZATION_FILE_NAME].file_size = out_hashing.count(); @@ -655,18 +654,18 @@ void finalizeMutatedPart( { /// Write file with checksums. - auto out_checksums = data_part_storage_builder->writeFile("checksums.txt", 4096, context->getWriteSettings()); + auto out_checksums = new_data_part->data_part_storage->writeFile("checksums.txt", 4096, context->getWriteSettings()); new_data_part->checksums.write(*out_checksums); } /// close fd { - auto out = data_part_storage_builder->writeFile(IMergeTreeDataPart::DEFAULT_COMPRESSION_CODEC_FILE_NAME, 4096, context->getWriteSettings()); + auto out = new_data_part->data_part_storage->writeFile(IMergeTreeDataPart::DEFAULT_COMPRESSION_CODEC_FILE_NAME, 4096, context->getWriteSettings()); DB::writeText(queryToString(codec->getFullCodecDesc()), *out); } /// close fd { /// Write a file with a description of columns. - auto out_columns = data_part_storage_builder->writeFile("columns.txt", 4096, context->getWriteSettings()); + auto out_columns = new_data_part->data_part_storage->writeFile("columns.txt", 4096, context->getWriteSettings()); new_data_part->getColumns().writeText(*out_columns); } /// close fd @@ -734,8 +733,6 @@ struct MutationContext = MutationsInterpreter::MutationKind::MutationKindEnum::MUTATE_UNKNOWN; MergeTreeData::MutableDataPartPtr new_data_part; - DataPartStorageBuilderPtr data_part_storage_builder; - IMergedBlockOutputStreamPtr out{nullptr}; String mrk_extension; @@ -816,11 +813,9 @@ public: if (next_level_parts.empty()) { LOG_DEBUG(log, "Merged a projection part in level {}", current_level); - auto builder = selected_parts[0]->data_part_storage->getBuilder(); - selected_parts[0]->renameTo(projection.name + ".proj", true, builder); + selected_parts[0]->renameTo(projection.name + ".proj", true); selected_parts[0]->name = projection.name; selected_parts[0]->is_temp = false; - builder->commit(); ctx->new_data_part->addProjectionPart(name, std::move(selected_parts[0])); /// Task is finished @@ -865,7 +860,6 @@ public: projection_merging_params, NO_TRANSACTION_PTR, ctx->new_data_part.get(), - ctx->data_part_storage_builder.get(), ".tmp_proj"); next_level_parts.push_back(executeHere(tmp_part_merge_task)); @@ -1025,8 +1019,7 @@ bool PartMergerWriter::mutateOriginalPartAndPrepareProjections() if (projection_block) { auto tmp_part = MergeTreeDataWriter::writeTempProjectionPart( - *ctx->data, ctx->log, projection_block, projection, ctx->data_part_storage_builder, ctx->new_data_part.get(), ++block_num); - tmp_part.builder->commit(); + *ctx->data, ctx->log, projection_block, projection, ctx->new_data_part.get(), ++block_num); tmp_part.finalize(); projection_parts[projection.name].emplace_back(std::move(tmp_part.part)); } @@ -1048,8 +1041,7 @@ bool PartMergerWriter::mutateOriginalPartAndPrepareProjections() if (projection_block) { auto temp_part = MergeTreeDataWriter::writeTempProjectionPart( - *ctx->data, ctx->log, projection_block, projection, ctx->data_part_storage_builder, ctx->new_data_part.get(), ++block_num); - temp_part.builder->commit(); + *ctx->data, ctx->log, projection_block, projection, ctx->new_data_part.get(), ++block_num); temp_part.finalize(); projection_parts[projection.name].emplace_back(std::move(temp_part.part)); } @@ -1149,7 +1141,7 @@ private: void prepare() { - ctx->data_part_storage_builder->createDirectories(); + ctx->new_data_part->data_part_storage->createDirectories(); /// Note: this is done before creating input streams, because otherwise data.data_parts_mutex /// (which is locked in data.getTotalActiveSizeInBytes()) @@ -1184,7 +1176,6 @@ private: ctx->out = std::make_shared( ctx->new_data_part, - ctx->data_part_storage_builder, ctx->metadata_snapshot, ctx->new_data_part->getColumns(), skip_part_indices, @@ -1280,7 +1271,7 @@ private: if (ctx->execute_ttl_type != ExecuteTTLType::NONE) ctx->files_to_skip.insert("ttl.txt"); - ctx->data_part_storage_builder->createDirectories(); + ctx->new_data_part->data_part_storage->createDirectories(); /// We should write version metadata on part creation to distinguish it from parts that were created without transaction. TransactionID tid = ctx->txn ? ctx->txn->tid : Tx::PrehistoricTID; @@ -1317,22 +1308,22 @@ private: if (it->isFile()) { - ctx->data_part_storage_builder->createHardLinkFrom( + ctx->new_data_part->data_part_storage->createHardLinkFrom( *ctx->source_part->data_part_storage, it->name(), destination); hardlinked_files.insert(it->name()); } else if (!endsWith(it->name(), ".tmp_proj")) // ignore projection tmp merge dir { // it's a projection part directory - ctx->data_part_storage_builder->createProjection(destination); + ctx->new_data_part->data_part_storage->createProjection(destination); - auto projection_data_part_storage = ctx->source_part->data_part_storage->getProjection(destination); - auto projection_data_part_storage_builder = ctx->data_part_storage_builder->getProjection(destination); + auto projection_data_part_storage_src = ctx->source_part->data_part_storage->getProjection(destination); + auto projection_data_part_storage_dst = ctx->new_data_part->data_part_storage->getProjection(destination); - for (auto p_it = projection_data_part_storage->iterate(); p_it->isValid(); p_it->next()) + for (auto p_it = projection_data_part_storage_src->iterate(); p_it->isValid(); p_it->next()) { - projection_data_part_storage_builder->createHardLinkFrom( - *projection_data_part_storage, p_it->name(), p_it->name()); + projection_data_part_storage_dst->createHardLinkFrom( + *projection_data_part_storage_src, p_it->name(), p_it->name()); hardlinked_files.insert(p_it->name()); } } @@ -1362,7 +1353,6 @@ private: builder.addTransform(std::make_shared(builder.getHeader(), *ctx->data, ctx->metadata_snapshot, ctx->new_data_part, ctx->time_of_mutation, true)); ctx->out = std::make_shared( - ctx->data_part_storage_builder, ctx->new_data_part, ctx->metadata_snapshot, ctx->updated_header, @@ -1414,7 +1404,7 @@ private: } } - MutationHelpers::finalizeMutatedPart(ctx->source_part, ctx->data_part_storage_builder, ctx->new_data_part, ctx->execute_ttl_type, ctx->compression_codec, ctx->context); + MutationHelpers::finalizeMutatedPart(ctx->source_part, ctx->new_data_part, ctx->execute_ttl_type, ctx->compression_codec, ctx->context); } @@ -1584,10 +1574,7 @@ bool MutateTask::prepare() ctx->data->getRelativeDataPath(), tmp_part_dir_name); - ctx->data_part_storage_builder = std::make_shared( - single_disk_volume, - ctx->data->getRelativeDataPath(), - tmp_part_dir_name); + data_part_storage->beginTransaction(); ctx->new_data_part = ctx->data->createPart( ctx->future_part->name, ctx->future_part->type, ctx->future_part->part_info, data_part_storage); @@ -1690,9 +1677,4 @@ const MergeTreeData::HardlinkedFiles & MutateTask::getHardlinkedFiles() const return ctx->hardlinked_files; } -DataPartStorageBuilderPtr MutateTask::getBuilder() const -{ - return ctx->data_part_storage_builder; -} - } diff --git a/src/Storages/MergeTree/MutateTask.h b/src/Storages/MergeTree/MutateTask.h index 1f2e8a6fd20..3df30670d7f 100644 --- a/src/Storages/MergeTree/MutateTask.h +++ b/src/Storages/MergeTree/MutateTask.h @@ -46,7 +46,7 @@ public: const MergeTreeData::HardlinkedFiles & getHardlinkedFiles() const; - DataPartStorageBuilderPtr getBuilder() const; + MutableDataPartStoragePtr getBuilder() const; private: diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index 158cbfca9fd..293c679dcd0 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -268,7 +268,7 @@ void ReplicatedMergeTreeSink::finishDelayedChunk(zkutil::ZooKeeperPtr & zookeepe try { - commitPart(zookeeper, part, partition.block_id, partition.temp_part.builder, delayed_chunk->replicas_num); + commitPart(zookeeper, part, partition.block_id, delayed_chunk->replicas_num); last_block_is_duplicate = last_block_is_duplicate || part->is_duplicate; @@ -301,7 +301,7 @@ void ReplicatedMergeTreeSink::writeExistingPart(MergeTreeData::MutableDataPartPt try { part->version.setCreationTID(Tx::PrehistoricTID, nullptr); - commitPart(zookeeper, part, "", part->data_part_storage->getBuilder(), replicas_num); + commitPart(zookeeper, part, "", replicas_num); PartLog::addNewPart(storage.getContext(), part, watch.elapsed()); } catch (...) @@ -315,7 +315,6 @@ void ReplicatedMergeTreeSink::commitPart( zkutil::ZooKeeperPtr & zookeeper, MergeTreeData::MutableDataPartPtr & part, const String & block_id, - DataPartStorageBuilderPtr builder, size_t replicas_num) { /// It is possible that we alter a part with different types of source columns. @@ -499,7 +498,7 @@ void ReplicatedMergeTreeSink::commitPart( try { auto lock = storage.lockParts(); - renamed = storage.renameTempPartAndAdd(part, transaction, builder, lock); + renamed = storage.renameTempPartAndAdd(part, transaction, lock); } catch (const Exception & e) { @@ -563,8 +562,7 @@ void ReplicatedMergeTreeSink::commitPart( transaction.rollbackPartsToTemporaryState(); part->is_temp = true; - part->renameTo(temporary_part_relative_path, false, builder); - builder->commit(); + part->renameTo(temporary_part_relative_path, false); /// If this part appeared on other replica than it's better to try to write it locally one more time. If it's our part /// than it will be ignored on the next itration. diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.h b/src/Storages/MergeTree/ReplicatedMergeTreeSink.h index ab729e6edec..da87ddc0d63 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.h @@ -79,7 +79,6 @@ private: zkutil::ZooKeeperPtr & zookeeper, MergeTreeData::MutableDataPartPtr & part, const String & block_id, - DataPartStorageBuilderPtr part_builder, size_t replicas_num); /// Wait for quorum to be satisfied on path (quorum_path) form part (part_name) diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 7cfce882e7a..1aeca1343c2 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -1519,9 +1519,8 @@ PartitionCommandsResultInfo StorageMergeTree::attachPartition( MergeTreeData::Transaction transaction(*this, local_context->getCurrentTransaction().get()); { auto lock = lockParts(); - auto builder = loaded_parts[i]->data_part_storage->getBuilder(); fillNewPartName(loaded_parts[i], lock); - renameTempPartAndAdd(loaded_parts[i], transaction, builder, lock); + renameTempPartAndAdd(loaded_parts[i], transaction, lock); transaction.commit(&lock); } @@ -1604,9 +1603,7 @@ void StorageMergeTree::replacePartitionFrom(const StoragePtr & source_table, con for (auto part : dst_parts) { fillNewPartName(part, data_parts_lock); - - auto builder = part->data_part_storage->getBuilder(); - renameTempPartAndReplaceUnlocked(part, transaction, builder, data_parts_lock); + renameTempPartAndReplaceUnlocked(part, transaction, data_parts_lock); } /// Populate transaction transaction.commit(&data_parts_lock); @@ -1685,9 +1682,8 @@ void StorageMergeTree::movePartitionToTable(const StoragePtr & dest_table, const for (auto & part : dst_parts) { - auto builder = part->data_part_storage->getBuilder(); dest_table_storage->fillNewPartName(part, dest_data_parts_lock); - dest_table_storage->renameTempPartAndReplaceUnlocked(part, transaction, builder, dest_data_parts_lock); + dest_table_storage->renameTempPartAndReplaceUnlocked(part, transaction, dest_data_parts_lock); } @@ -1810,17 +1806,15 @@ BackupEntries StorageMergeTree::backupMutations(UInt64 version, const String & d void StorageMergeTree::attachRestoredParts(MutableDataPartsVector && parts) { - for (auto part : parts) { /// It's important to create it outside of lock scope because /// otherwise it can lock parts in destructor and deadlock is possible. MergeTreeData::Transaction transaction(*this, NO_TRANSACTION_RAW); - auto builder = part->data_part_storage->getBuilder(); { auto lock = lockParts(); fillNewPartName(part, lock); - renameTempPartAndAdd(part, transaction, builder, lock); + renameTempPartAndAdd(part, transaction, lock); transaction.commit(&lock); } } diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index ef2d91fabfc..52a30dd37b3 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -1454,6 +1454,7 @@ MergeTreeData::MutableDataPartPtr StorageReplicatedMergeTree::attachPartHelperFo const String part_new_name = actual_part_info.getPartName(); for (const DiskPtr & disk : getStoragePolicy()->getDisks()) + { for (const auto it = disk->iterateDirectory(fs::path(relative_data_path) / "detached/"); it->isValid(); it->next()) { const auto part_info = MergeTreePartInfo::tryParsePartName(it->name(), format_version); @@ -1490,6 +1491,7 @@ MergeTreeData::MutableDataPartPtr StorageReplicatedMergeTree::attachPartHelperFo return part; } } + } return {}; } @@ -1540,8 +1542,7 @@ bool StorageReplicatedMergeTree::executeLogEntry(LogEntry & entry) Transaction transaction(*this, NO_TRANSACTION_RAW); part->version.setCreationTID(Tx::PrehistoricTID, nullptr); - auto builder = part->data_part_storage->getBuilder(); - renameTempPartAndReplace(part, transaction, builder); + renameTempPartAndReplace(part, transaction); checkPartChecksumsAndCommit(transaction, part); writePartLog(PartLogElement::Type::NEW_PART, {}, 0 /** log entry is fake so we don't measure the time */, @@ -1780,7 +1781,7 @@ bool StorageReplicatedMergeTree::executeFetch(LogEntry & entry, bool need_to_che } -DataPartStoragePtr StorageReplicatedMergeTree::executeFetchShared( +bool StorageReplicatedMergeTree::executeFetchShared( const String & source_replica, const String & new_part_name, const DiskPtr & disk, @@ -1789,7 +1790,7 @@ DataPartStoragePtr StorageReplicatedMergeTree::executeFetchShared( if (source_replica.empty()) { LOG_INFO(log, "No active replica has part {} on shared storage.", new_part_name); - return nullptr; + return false; } const auto storage_settings_ptr = getSettings(); @@ -2225,8 +2226,7 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry) Coordination::Requests ops; for (PartDescriptionPtr & part_desc : final_parts) { - auto builder = part_desc->res_part->data_part_storage->getBuilder(); - renameTempPartAndReplace(part_desc->res_part, transaction, builder); + renameTempPartAndReplace(part_desc->res_part, transaction); getCommitPartOps(ops, part_desc->res_part); lockSharedData(*part_desc->res_part, false, part_desc->hardlinked_files); @@ -2323,9 +2323,7 @@ void StorageReplicatedMergeTree::executeClonePartFromShard(const LogEntry & entr part = get_part(); // The fetched part is valuable and should not be cleaned like a temp part. part->is_temp = false; - auto builder = part->data_part_storage->getBuilder(); - part->renameTo("detached/" + entry.new_part_name, true, builder); - builder->commit(); + part->renameTo("detached/" + entry.new_part_name, true); LOG_INFO(log, "Cloned part {} to detached directory", part->name); } @@ -3990,11 +3988,10 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora { part = get_part(); - auto builder = part->data_part_storage->getBuilder(); if (!to_detached) { Transaction transaction(*this, NO_TRANSACTION_RAW); - renameTempPartAndReplace(part, transaction, builder); + renameTempPartAndReplace(part, transaction); replaced_parts = checkPartChecksumsAndCommit(transaction, part, hardlinked_files); @@ -4036,8 +4033,7 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora { // The fetched part is valuable and should not be cleaned like a temp part. part->is_temp = false; - part->renameTo(fs::path("detached") / part_name, true, builder); - builder->commit(); + part->renameTo(fs::path("detached") / part_name, true); } } catch (const Exception & e) @@ -4071,7 +4067,7 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora } -DataPartStoragePtr StorageReplicatedMergeTree::fetchExistsPart( +bool StorageReplicatedMergeTree::fetchExistsPart( const String & part_name, const StorageMetadataPtr & metadata_snapshot, const String & source_replica_path, @@ -4086,7 +4082,7 @@ DataPartStoragePtr StorageReplicatedMergeTree::fetchExistsPart( LOG_DEBUG(log, "Part {} should be deleted after previous attempt before fetch", part->name); /// Force immediate parts cleanup to delete the part that was left from the previous fetch attempt. cleanup_thread.wakeup(); - return nullptr; + return false; } { @@ -4094,7 +4090,7 @@ DataPartStoragePtr StorageReplicatedMergeTree::fetchExistsPart( if (!currently_fetching_parts.insert(part_name).second) { LOG_DEBUG(log, "Part {} is already fetching right now", part_name); - return nullptr; + return false; } } @@ -4150,10 +4146,7 @@ DataPartStoragePtr StorageReplicatedMergeTree::fetchExistsPart( throw Exception("Part " + part->name + " fetched on wrong disk " + part->data_part_storage->getDiskName(), ErrorCodes::LOGICAL_ERROR); auto replaced_path = fs::path(replaced_part_path); - auto builder = part->data_part_storage->getBuilder(); - builder->rename(replaced_path.parent_path(), replaced_path.filename(), nullptr, true, false); - part->data_part_storage->onRename(replaced_path.parent_path(), replaced_path.filename()); - builder->commit(); + part->data_part_storage->rename(replaced_path.parent_path(), replaced_path.filename(), nullptr, true, false); } catch (const Exception & e) { @@ -4162,7 +4155,7 @@ DataPartStoragePtr StorageReplicatedMergeTree::fetchExistsPart( if (e.code() == ErrorCodes::DIRECTORY_ALREADY_EXISTS) { LOG_TRACE(log, "Not fetching part: {}", e.message()); - return nullptr; + return false; } throw; @@ -4176,8 +4169,7 @@ DataPartStoragePtr StorageReplicatedMergeTree::fetchExistsPart( ProfileEvents::increment(ProfileEvents::ReplicatedPartFetches); LOG_DEBUG(log, "Fetched part {} from {}", part_name, source_replica_path); - - return part->data_part_storage; + return true; } void StorageReplicatedMergeTree::startup() @@ -6592,10 +6584,7 @@ void StorageReplicatedMergeTree::replacePartitionFrom( { auto data_parts_lock = lockParts(); for (auto & part : dst_parts) - { - auto builder = part->data_part_storage->getBuilder(); - renameTempPartAndReplaceUnlocked(part, transaction, builder, data_parts_lock); - } + renameTempPartAndReplaceUnlocked(part, transaction, data_parts_lock); } for (size_t i = 0; i < dst_parts.size(); ++i) @@ -6831,10 +6820,7 @@ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_ta auto dest_data_parts_lock = dest_table_storage->lockParts(); for (auto & part : dst_parts) - { - auto builder = part->data_part_storage->getBuilder(); - dest_table_storage->renameTempPartAndReplaceUnlocked(part, transaction, builder, dest_data_parts_lock); - } + dest_table_storage->renameTempPartAndReplaceUnlocked(part, transaction, dest_data_parts_lock); for (size_t i = 0; i < dst_parts.size(); ++i) dest_table_storage->lockSharedData(*dst_parts[i], false, hardlinked_files_for_parts[i]); @@ -7888,7 +7874,7 @@ std::pair StorageReplicatedMergeTree::unlockSharedDataByID( } -DataPartStoragePtr StorageReplicatedMergeTree::tryToFetchIfShared( +bool StorageReplicatedMergeTree::tryToFetchIfShared( const IMergeTreeDataPart & part, const DiskPtr & disk, const String & path) @@ -7896,13 +7882,13 @@ DataPartStoragePtr StorageReplicatedMergeTree::tryToFetchIfShared( const auto settings = getSettings(); auto data_source_description = disk->getDataSourceDescription(); if (!(disk->supportZeroCopyReplication() && settings->allow_remote_fs_zero_copy_replication)) - return nullptr; + return false; String replica = getSharedDataReplica(part, data_source_description.type); /// We can't fetch part when none replicas have this part on a same type remote disk if (replica.empty()) - return nullptr; + return false; return executeFetchShared(replica, part.name, disk, path); } @@ -8123,15 +8109,13 @@ bool StorageReplicatedMergeTree::createEmptyPartInsteadOfLost(zkutil::ZooKeeperP minmax_idx->update(block, getMinMaxColumnsNames(metadata_snapshot->getPartitionKey())); auto new_volume = createVolumeFromReservation(reservation, volume); + auto data_part_storage = std::make_shared( new_volume, relative_data_path, TMP_PREFIX + lost_part_name); - DataPartStorageBuilderPtr data_part_storage_builder = std::make_shared( - new_volume, - relative_data_path, - TMP_PREFIX + lost_part_name); + data_part_storage->beginTransaction(); auto new_data_part = createPart( lost_part_name, @@ -8174,16 +8158,16 @@ bool StorageReplicatedMergeTree::createEmptyPartInsteadOfLost(zkutil::ZooKeeperP if (new_data_part->isStoredOnDisk()) { /// The name could be non-unique in case of stale files from previous runs. - if (data_part_storage_builder->exists()) + if (data_part_storage->exists()) { LOG_WARNING(log, "Removing old temporary directory {}", new_data_part->data_part_storage->getFullPath()); - data_part_storage_builder->removeRecursive(); + data_part_storage->removeRecursive(); } - data_part_storage_builder->createDirectories(); + data_part_storage->createDirectories(); if (getSettings()->fsync_part_directory) - sync_guard = data_part_storage_builder->getDirectorySyncGuard(); + sync_guard = data_part_storage->getDirectorySyncGuard(); } /// This effectively chooses minimal compression method: @@ -8191,7 +8175,7 @@ bool StorageReplicatedMergeTree::createEmptyPartInsteadOfLost(zkutil::ZooKeeperP auto compression_codec = getContext()->chooseCompressionCodec(0, 0); const auto & index_factory = MergeTreeIndexFactory::instance(); - MergedBlockOutputStream out(new_data_part, data_part_storage_builder, metadata_snapshot, columns, + MergedBlockOutputStream out(new_data_part, metadata_snapshot, columns, index_factory.getMany(metadata_snapshot->getSecondaryIndices()), compression_codec, NO_TRANSACTION_PTR); bool sync_on_insert = settings->fsync_after_insert; @@ -8205,7 +8189,7 @@ bool StorageReplicatedMergeTree::createEmptyPartInsteadOfLost(zkutil::ZooKeeperP try { MergeTreeData::Transaction transaction(*this, NO_TRANSACTION_RAW); - auto replaced_parts = renameTempPartAndReplace(new_data_part, transaction, data_part_storage_builder); + auto replaced_parts = renameTempPartAndReplace(new_data_part, transaction); if (!replaced_parts.empty()) { diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index e10ffcce22c..c9af1ab5f93 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -263,7 +263,7 @@ public: bool canExecuteFetch(const ReplicatedMergeTreeLogEntry & entry, String & disable_reason) const; /// Fetch part only when it stored on shared storage like S3 - DataPartStoragePtr executeFetchShared(const String & source_replica, const String & new_part_name, const DiskPtr & disk, const String & path); + bool executeFetchShared(const String & source_replica, const String & new_part_name, const DiskPtr & disk, const String & path); /// Lock part in zookeeper for use shared data in several nodes void lockSharedData(const IMergeTreeDataPart & part, bool replace_existing_lock, std::optional hardlinked_files) const override; @@ -283,7 +283,7 @@ public: const String & zookeeper_path_old, MergeTreeDataFormatVersion data_format_version); /// Fetch part only if some replica has it on shared storage like S3 - DataPartStoragePtr tryToFetchIfShared(const IMergeTreeDataPart & part, const DiskPtr & disk, const String & path) override; + bool tryToFetchIfShared(const IMergeTreeDataPart & part, const DiskPtr & disk, const String & path) override; /// Get best replica having this partition on a same type remote disk String getSharedDataReplica(const IMergeTreeDataPart & part, DataSourceType data_source_type) const; @@ -682,7 +682,7 @@ private: * Used for replace local part on the same s3-shared part in hybrid storage. * Returns false if part is already fetching right now. */ - DataPartStoragePtr fetchExistsPart( + bool fetchExistsPart( const String & part_name, const StorageMetadataPtr & metadata_snapshot, const String & replica_path, From b40d9200d20c0ce92ef40c601087ddb576575ba7 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Sun, 23 Oct 2022 03:29:26 +0000 Subject: [PATCH 039/112] better semantic of constsness of DataPartStorage --- .../getCompressionCodecForFile.cpp | 4 +- src/Compression/getCompressionCodecForFile.h | 2 +- src/Interpreters/PartLog.cpp | 4 +- .../MergeTree/DataPartStorageOnDisk.cpp | 26 +--- .../MergeTree/DataPartStorageOnDisk.h | 11 +- src/Storages/MergeTree/DataPartsExchange.cpp | 24 +-- src/Storages/MergeTree/IDataPartStorage.h | 36 +++-- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 138 +++++++++--------- src/Storages/MergeTree/IMergeTreeDataPart.h | 18 +-- .../MergeTree/IMergedBlockOutputStream.cpp | 2 +- .../LoadedMergeTreeDataPartInfoForReader.h | 2 +- .../MergeTree/MergeFromLogEntryTask.cpp | 2 +- src/Storages/MergeTree/MergeList.cpp | 2 +- src/Storages/MergeTree/MergeTask.cpp | 2 +- src/Storages/MergeTree/MergeTask.h | 2 +- src/Storages/MergeTree/MergeTreeData.cpp | 131 +++++++++-------- src/Storages/MergeTree/MergeTreeData.h | 21 +-- .../MergeTree/MergeTreeDataMergerMutator.cpp | 2 +- .../MergeTree/MergeTreeDataMergerMutator.h | 2 +- .../MergeTree/MergeTreeDataPartCompact.cpp | 38 ++--- .../MergeTree/MergeTreeDataPartCompact.h | 2 +- .../MergeTree/MergeTreeDataPartInMemory.cpp | 20 ++- .../MergeTree/MergeTreeDataPartInMemory.h | 2 +- .../MergeTree/MergeTreeDataPartWide.cpp | 30 ++-- .../MergeTree/MergeTreeDataPartWide.h | 3 +- .../MergeTreeDataPartWriterCompact.cpp | 4 +- .../MergeTreeDataPartWriterOnDisk.cpp | 8 +- .../MergeTree/MergeTreeDataPartWriterWide.cpp | 10 +- .../MergeTree/MergeTreeDataSelectExecutor.cpp | 6 +- .../MergeTree/MergeTreeDataWriter.cpp | 10 +- .../MergeTreeIndexGranularityInfo.cpp | 8 +- .../MergeTree/MergeTreeIndexGranularityInfo.h | 4 +- .../MergeTree/MergeTreeIndexMinMax.cpp | 6 +- src/Storages/MergeTree/MergeTreeIndexMinMax.h | 2 +- .../MergeTree/MergeTreeIndexReader.cpp | 4 +- src/Storages/MergeTree/MergeTreeIndices.h | 4 +- src/Storages/MergeTree/MergeTreePartition.cpp | 6 +- src/Storages/MergeTree/MergeTreePartition.h | 4 +- .../MergeTree/MergeTreePartsMover.cpp | 28 ++-- src/Storages/MergeTree/MergeTreePartsMover.h | 4 +- src/Storages/MergeTree/MergeTreeReadPool.cpp | 2 +- .../MergeTree/MergeTreeReaderCompact.cpp | 10 +- src/Storages/MergeTree/MergeTreeSink.cpp | 2 +- .../MergeTree/MergedBlockOutputStream.cpp | 40 ++--- .../MergeTree/MergedBlockOutputStream.h | 20 +-- .../MergedColumnOnlyOutputStream.cpp | 2 +- .../MergeTree/MutateFromLogEntryTask.cpp | 2 +- src/Storages/MergeTree/MutateTask.cpp | 28 ++-- .../MergeTree/PartMetadataManagerOrdinary.cpp | 16 +- .../MergeTree/PartMetadataManagerOrdinary.h | 2 +- .../PartMetadataManagerWithCache.cpp | 28 ++-- .../MergeTree/ReplicatedMergeTreeSink.cpp | 2 +- src/Storages/MergeTree/checkDataPart.cpp | 35 ++--- src/Storages/StorageMergeTree.cpp | 12 +- src/Storages/StorageReplicatedMergeTree.cpp | 58 ++++---- src/Storages/StorageReplicatedMergeTree.h | 6 +- src/Storages/System/StorageSystemParts.cpp | 4 +- .../System/StorageSystemPartsColumns.cpp | 4 +- .../System/StorageSystemProjectionParts.cpp | 4 +- .../StorageSystemProjectionPartsColumns.cpp | 4 +- 60 files changed, 456 insertions(+), 459 deletions(-) diff --git a/src/Compression/getCompressionCodecForFile.cpp b/src/Compression/getCompressionCodecForFile.cpp index f9365862c5b..027ee0ac57a 100644 --- a/src/Compression/getCompressionCodecForFile.cpp +++ b/src/Compression/getCompressionCodecForFile.cpp @@ -13,9 +13,9 @@ namespace DB using Checksum = CityHash_v1_0_2::uint128; -CompressionCodecPtr getCompressionCodecForFile(const DataPartStoragePtr & data_part_storage, const String & relative_path) +CompressionCodecPtr getCompressionCodecForFile(const IDataPartStorage & data_part_storage, const String & relative_path) { - auto read_buffer = data_part_storage->readFile(relative_path, {}, std::nullopt, std::nullopt); + auto read_buffer = data_part_storage.readFile(relative_path, {}, std::nullopt, std::nullopt); read_buffer->ignore(sizeof(Checksum)); UInt8 header_size = ICompressionCodec::getHeaderSize(); diff --git a/src/Compression/getCompressionCodecForFile.h b/src/Compression/getCompressionCodecForFile.h index ad855684128..b6f22750e4d 100644 --- a/src/Compression/getCompressionCodecForFile.h +++ b/src/Compression/getCompressionCodecForFile.h @@ -11,6 +11,6 @@ namespace DB /// clickhouse fashion (with checksums, headers for each block, etc). This /// method should be used as fallback when we cannot deduce compression codec /// from metadata. -CompressionCodecPtr getCompressionCodecForFile(const DataPartStoragePtr & data_part_storage, const String & relative_path); +CompressionCodecPtr getCompressionCodecForFile(const IDataPartStorage & data_part_storage, const String & relative_path); } diff --git a/src/Interpreters/PartLog.cpp b/src/Interpreters/PartLog.cpp index 75e6d02d6e1..b35ee50b98e 100644 --- a/src/Interpreters/PartLog.cpp +++ b/src/Interpreters/PartLog.cpp @@ -207,8 +207,8 @@ bool PartLog::addNewParts( elem.table_name = table_id.table_name; elem.partition_id = part->info.partition_id; elem.part_name = part->name; - elem.disk_name = part->data_part_storage->getDiskName(); - elem.path_on_disk = part->data_part_storage->getFullPath(); + elem.disk_name = part->getDataPartStorage().getDiskName(); + elem.path_on_disk = part->getDataPartStorage().getFullPath(); elem.part_type = part->getType(); elem.bytes_compressed_on_disk = part->getBytesOnDisk(); diff --git a/src/Storages/MergeTree/DataPartStorageOnDisk.cpp b/src/Storages/MergeTree/DataPartStorageOnDisk.cpp index 765c62ba903..e28aa359c99 100644 --- a/src/Storages/MergeTree/DataPartStorageOnDisk.cpp +++ b/src/Storages/MergeTree/DataPartStorageOnDisk.cpp @@ -30,11 +30,6 @@ DataPartStorageOnDisk::DataPartStorageOnDisk(VolumePtr volume_, std::string root { } -std::shared_ptr DataPartStorageOnDisk::clone() const -{ - return std::make_shared(volume, root_path, part_dir); -} - std::string DataPartStorageOnDisk::getFullPath() const { return fs::path(volume->getDisk()->getPath()) / root_path / part_dir / ""; @@ -55,12 +50,7 @@ std::string DataPartStorageOnDisk::getFullRootPath() const return fs::path(volume->getDisk()->getPath()) / root_path / ""; } -DataPartStoragePtr DataPartStorageOnDisk::getProjection(const std::string & name) const -{ - return std::make_shared(volume, std::string(fs::path(root_path) / part_dir), name); -} - -MutableDataPartStoragePtr DataPartStorageOnDisk::getProjection(const std::string & name) +MutableDataPartStoragePtr DataPartStorageOnDisk::getProjection(const std::string & name) const { return std::make_shared(volume, std::string(fs::path(root_path) / part_dir), name); } @@ -279,7 +269,7 @@ void DataPartStorageOnDisk::remove( try { disk->moveDirectory(from, to); - onRename(root_path, part_dir_without_slash); + part_dir = part_dir_without_slash; } catch (const Exception & e) { @@ -524,7 +514,7 @@ bool DataPartStorageOnDisk::isBroken() const return volume->getDisk()->isBroken(); } -void DataPartStorageOnDisk::syncRevision(UInt64 revision) +void DataPartStorageOnDisk::syncRevision(UInt64 revision) const { volume->getDisk()->syncRevision(revision); } @@ -549,7 +539,7 @@ DataPartStorageOnDisk::DisksSet::const_iterator DataPartStorageOnDisk::isStoredO return disks.find(volume->getDisk()); } -ReservationPtr DataPartStorageOnDisk::reserve(UInt64 bytes) +ReservationPtr DataPartStorageOnDisk::reserve(UInt64 bytes) const { auto res = volume->reserve(bytes); if (!res) @@ -558,7 +548,7 @@ ReservationPtr DataPartStorageOnDisk::reserve(UInt64 bytes) return res; } -ReservationPtr DataPartStorageOnDisk::tryReserve(UInt64 bytes) +ReservationPtr DataPartStorageOnDisk::tryReserve(UInt64 bytes) const { return volume->reserve(bytes); } @@ -845,12 +835,6 @@ MutableDataPartStoragePtr DataPartStorageOnDisk::clonePart( return std::make_shared(single_disk_volume, to, dir_path); } -void DataPartStorageOnDisk::onRename(const std::string & new_root_path, const std::string & new_part_dir) -{ - part_dir = new_part_dir; - root_path = new_root_path; -} - void DataPartStorageOnDisk::rename( const std::string & new_root_path, const std::string & new_part_dir, diff --git a/src/Storages/MergeTree/DataPartStorageOnDisk.h b/src/Storages/MergeTree/DataPartStorageOnDisk.h index 3ce063ca990..80946c37f79 100644 --- a/src/Storages/MergeTree/DataPartStorageOnDisk.h +++ b/src/Storages/MergeTree/DataPartStorageOnDisk.h @@ -15,15 +15,13 @@ class DataPartStorageOnDisk final : public IDataPartStorage { public: DataPartStorageOnDisk(VolumePtr volume_, std::string root_path_, std::string part_dir_); - std::shared_ptr clone() const override; std::string getFullPath() const override; std::string getRelativePath() const override; std::string getPartDirectory() const override { return part_dir; } std::string getFullRootPath() const override; - DataPartStoragePtr getProjection(const std::string & name) const override; - MutableDataPartStoragePtr getProjection(const std::string & name) override; + MutableDataPartStoragePtr getProjection(const std::string & name) const override; bool exists() const override; bool exists(const std::string & name) const override; @@ -62,7 +60,6 @@ public: std::optional & original_files_list) const; void setRelativePath(const std::string & path) override; - void onRename(const std::string & new_root_path, const std::string & new_part_dir) override; std::string getDiskName() const override; std::string getDiskType() const override; @@ -70,15 +67,15 @@ public: bool supportZeroCopyReplication() const override; bool supportParallelWrite() const override; bool isBroken() const override; - void syncRevision(UInt64 revision) override; + void syncRevision(UInt64 revision) const override; UInt64 getRevision() const override; std::unordered_map getSerializedMetadata(const std::vector & paths) const override; std::string getDiskPath() const override; DisksSet::const_iterator isStoredOnDisk(const DisksSet & disks) const override; - ReservationPtr reserve(UInt64 bytes) override; - ReservationPtr tryReserve(UInt64 bytes) override; + ReservationPtr reserve(UInt64 bytes) const override; + ReservationPtr tryReserve(UInt64 bytes) const override; size_t getVolumeIndex(const IStoragePolicy &) const override; void writeChecksums(const MergeTreeDataPartChecksums & checksums, const WriteSettings & settings) const override; diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index 3398839131c..88a745820e9 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -147,12 +147,12 @@ void Service::processQuery(const HTMLForm & params, ReadBuffer & /*body*/, Write CurrentMetrics::Increment metric_increment{CurrentMetrics::ReplicatedSend}; - if (part->data_part_storage->isStoredOnRemoteDisk()) + if (part->getDataPartStorage().isStoredOnRemoteDisk()) { UInt64 revision = parse(params.get("disk_revision", "0")); if (revision) - part->data_part_storage->syncRevision(revision); - revision = part->data_part_storage->getRevision(); + part->getDataPartStorage().syncRevision(revision); + revision = part->getDataPartStorage().getRevision(); if (revision) response.addCookie({"disk_revision", toString(revision)}); } @@ -184,8 +184,8 @@ void Service::processQuery(const HTMLForm & params, ReadBuffer & /*body*/, Write !isInMemoryPart(part) && client_protocol_version >= REPLICATION_PROTOCOL_VERSION_WITH_PARTS_ZERO_COPY) { - auto disk_type = part->data_part_storage->getDiskType(); - if (part->data_part_storage->supportZeroCopyReplication() && std::find(capability.begin(), capability.end(), disk_type) != capability.end()) + auto disk_type = part->getDataPartStorage().getDiskType(); + if (part->getDataPartStorage().supportZeroCopyReplication() && std::find(capability.begin(), capability.end(), disk_type) != capability.end()) { /// Send metadata if the receiver's capability covers the source disk type. response.addCookie({"remote_fs_metadata", disk_type}); @@ -307,12 +307,12 @@ MergeTreeData::DataPart::Checksums Service::sendPartFromDisk( { String file_name = it.first; - UInt64 size = part->data_part_storage->getFileSize(file_name); + UInt64 size = part->getDataPartStorage().getFileSize(file_name); writeStringBinary(it.first, out); writeBinary(size, out); - auto file_in = part->data_part_storage->readFile(file_name, {}, std::nullopt, std::nullopt); + auto file_in = part->getDataPartStorage().readFile(file_name, {}, std::nullopt, std::nullopt); HashingWriteBuffer hashing_out(out); copyDataWithThrottler(*file_in, hashing_out, blocker.getCounter(), data.getSendsThrottler()); @@ -323,7 +323,7 @@ MergeTreeData::DataPart::Checksums Service::sendPartFromDisk( throw Exception( ErrorCodes::BAD_SIZE_OF_FILE_IN_DATA_PART, "Unexpected size of file {}, expected {} got {}", - std::string(fs::path(part->data_part_storage->getRelativePath()) / file_name), + std::string(fs::path(part->getDataPartStorage().getRelativePath()) / file_name), hashing_out.count(), size); writePODBinary(hashing_out.getHash(), out); @@ -342,9 +342,9 @@ MergeTreeData::DataPart::Checksums Service::sendPartFromDiskRemoteMeta( bool send_part_id, const std::map> & projections) { - const auto * data_part_storage_on_disk = dynamic_cast(part->data_part_storage.get()); + const auto * data_part_storage_on_disk = dynamic_cast(&part->getDataPartStorage()); if (!data_part_storage_on_disk) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Storage '{}' doesn't support zero-copy replication", part->data_part_storage->getDiskName()); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Storage '{}' doesn't support zero-copy replication", part->getDataPartStorage().getDiskName()); if (!data_part_storage_on_disk->supportZeroCopyReplication()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Disk '{}' doesn't support zero-copy replication", data_part_storage_on_disk->getDiskName()); @@ -365,7 +365,7 @@ MergeTreeData::DataPart::Checksums Service::sendPartFromDiskRemoteMeta( std::vector paths; paths.reserve(checksums.files.size()); for (const auto & it : checksums.files) - paths.push_back(fs::path(part->data_part_storage->getRelativePath()) / it.first); + paths.push_back(fs::path(part->getDataPartStorage().getRelativePath()) / it.first); /// Serialized metadatadatas with zero ref counts. auto metadatas = data_part_storage_on_disk->getSerializedMetadata(paths); @@ -399,7 +399,7 @@ MergeTreeData::DataPart::Checksums Service::sendPartFromDiskRemoteMeta( for (const auto & it : checksums.files) { const String & file_name = it.first; - String file_path_prefix = fs::path(part->data_part_storage->getRelativePath()) / file_name; + String file_path_prefix = fs::path(part->getDataPartStorage().getRelativePath()) / file_name; /// Just some additional checks String metadata_file_path = fs::path(data_part_storage_on_disk->getDiskPath()) / file_path_prefix; diff --git a/src/Storages/MergeTree/IDataPartStorage.h b/src/Storages/MergeTree/IDataPartStorage.h index 3b005942c54..9e3fbe7d13b 100644 --- a/src/Storages/MergeTree/IDataPartStorage.h +++ b/src/Storages/MergeTree/IDataPartStorage.h @@ -4,6 +4,7 @@ #include #include #include +#include #include #include @@ -70,7 +71,6 @@ class IDataPartStorage : public boost::noncopyable { public: virtual ~IDataPartStorage() = default; - virtual std::shared_ptr clone() const = 0; /// Methods to get path components of a data part. virtual std::string getFullPath() const = 0; /// '/var/lib/clickhouse/data/database/table/moving/all_1_5_1' @@ -81,8 +81,7 @@ public: /// virtual std::string getRelativeRootPath() const = 0; /// Get a storage for projection. - virtual std::shared_ptr getProjection(const std::string & name) const = 0; - virtual std::shared_ptr getProjection(const std::string & name) = 0; + virtual std::shared_ptr getProjection(const std::string & name) const = 0; /// Part directory exists. virtual bool exists() const = 0; @@ -132,10 +131,9 @@ public: /// TODO: remove it. virtual std::optional getRelativePathForPrefix(Poco::Logger * log, const String & prefix, bool detached, bool broken) const = 0; - /// Reset part directory, used for im-memory parts. + /// Reset part directory, used for in-memory parts. /// TODO: remove it. virtual void setRelativePath(const std::string & path) = 0; - virtual void onRename(const std::string & new_root_path, const std::string & new_part_dir) = 0; /// Some methods from IDisk. Needed to avoid getting internal IDisk interface. virtual std::string getDiskName() const = 0; @@ -144,7 +142,8 @@ public: virtual bool supportZeroCopyReplication() const { return false; } virtual bool supportParallelWrite() const = 0; virtual bool isBroken() const = 0; - virtual void syncRevision(UInt64 revision) = 0; + /// TODO: remove or at least remove const. + virtual void syncRevision(UInt64 revision) const = 0; virtual UInt64 getRevision() const = 0; virtual std::unordered_map getSerializedMetadata(const std::vector & paths) const = 0; /// Get a path for internal disk if relevant. It is used mainly for logging. @@ -156,8 +155,9 @@ public: /// Reserve space on the same disk. /// Probably we should try to remove it later. - virtual ReservationPtr reserve(UInt64 /*bytes*/) { return nullptr; } - virtual ReservationPtr tryReserve(UInt64 /*bytes*/) { return nullptr; } + /// TODO: remove constness + virtual ReservationPtr reserve(UInt64 /*bytes*/) const { return nullptr; } + virtual ReservationPtr tryReserve(UInt64 /*bytes*/) const { return nullptr; } virtual size_t getVolumeIndex(const IStoragePolicy &) const { return 0; } /// Some methods which change data part internals possibly after creation. @@ -234,8 +234,6 @@ public: /// Ideally, new_root_path should be the same as current root (but it is not true). /// Examples are: 'all_1_2_1' -> 'detached/all_1_2_1' /// 'moving/tmp_all_1_2_1' -> 'all_1_2_1' - /// - /// To notify storage also call onRename for it with first two args virtual void rename( const std::string & new_root_path, const std::string & new_part_dir, @@ -251,4 +249,22 @@ public: using DataPartStoragePtr = std::shared_ptr; using MutableDataPartStoragePtr = std::shared_ptr; +class DataPartStorageHolder : public boost::noncopyable +{ +public: + explicit DataPartStorageHolder(MutableDataPartStoragePtr storage_) + : storage(std::move(storage_)) + { + } + + IDataPartStorage & getDataPartStorage() { return *storage; } + const IDataPartStorage & getDataPartStorage() const { return *storage; } + + MutableDataPartStoragePtr getDataPartStoragePtr() { return storage; } + DataPartStoragePtr getDataPartStoragePtr() const { return storage; } + +private: + MutableDataPartStoragePtr storage; +}; + } diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 6a641f0c94e..fdaa4231e9c 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -1,4 +1,5 @@ #include "IMergeTreeDataPart.h" +#include "Storages/MergeTree/IDataPartStorage.h" #include #include @@ -101,7 +102,7 @@ void IMergeTreeDataPart::MinMaxIndex::load(const MergeTreeData & data, const Par } IMergeTreeDataPart::MinMaxIndex::WrittenFiles IMergeTreeDataPart::MinMaxIndex::store( - const MergeTreeData & data, const MutableDataPartStoragePtr & part_storage, Checksums & out_checksums) const + const MergeTreeData & data, IDataPartStorage & part_storage, Checksums & out_checksums) const { auto metadata_snapshot = data.getInMemoryMetadataPtr(); const auto & partition_key = metadata_snapshot->getPartitionKey(); @@ -115,14 +116,14 @@ IMergeTreeDataPart::MinMaxIndex::WrittenFiles IMergeTreeDataPart::MinMaxIndex::s IMergeTreeDataPart::MinMaxIndex::WrittenFiles IMergeTreeDataPart::MinMaxIndex::store( const Names & column_names, const DataTypes & data_types, - const MutableDataPartStoragePtr & part_storage, + IDataPartStorage & part_storage, Checksums & out_checksums) const { if (!initialized) throw Exception( ErrorCodes::LOGICAL_ERROR, "Attempt to store uninitialized MinMax index for part {}. This is a bug", - part_storage->getFullPath()); + part_storage.getFullPath()); WrittenFiles written_files; @@ -131,7 +132,7 @@ IMergeTreeDataPart::MinMaxIndex::WrittenFiles IMergeTreeDataPart::MinMaxIndex::s String file_name = "minmax_" + escapeForFileName(column_names[i]) + ".idx"; auto serialization = data_types.at(i)->getDefaultSerialization(); - auto out = part_storage->writeFile(file_name, DBMS_DEFAULT_BUFFER_SIZE, {}); + auto out = part_storage.writeFile(file_name, DBMS_DEFAULT_BUFFER_SIZE, {}); HashingWriteBuffer out_hashing(*out); serialization->serializeBinary(hyperrectangle[i].left, out_hashing); serialization->serializeBinary(hyperrectangle[i].right, out_hashing); @@ -304,10 +305,10 @@ IMergeTreeDataPart::IMergeTreeDataPart( const MutableDataPartStoragePtr & data_part_storage_, Type part_type_, const IMergeTreeDataPart * parent_part_) - : storage(storage_) + : DataPartStorageHolder(data_part_storage_) + , storage(storage_) , name(name_) , info(MergeTreePartInfo::fromPartName(name_, storage.format_version)) - , data_part_storage(parent_part_ ? parent_part_->data_part_storage : data_part_storage_) , index_granularity_info(storage_, part_type_) , part_type(part_type_) , parent_part(parent_part_) @@ -332,10 +333,10 @@ IMergeTreeDataPart::IMergeTreeDataPart( const MutableDataPartStoragePtr & data_part_storage_, Type part_type_, const IMergeTreeDataPart * parent_part_) - : storage(storage_) + : DataPartStorageHolder(data_part_storage_) + , storage(storage_) , name(name_) , info(info_) - , data_part_storage(data_part_storage_) , index_granularity_info(storage_, part_type_) , part_type(part_type_) , parent_part(parent_part_) @@ -343,6 +344,7 @@ IMergeTreeDataPart::IMergeTreeDataPart( { if (parent_part) state = MergeTreeDataPartState::Active; + incrementStateMetric(state); incrementTypeMetric(part_type); @@ -506,17 +508,17 @@ void IMergeTreeDataPart::removeIfNeeded() std::string path; try { - path = data_part_storage->getRelativePath(); + path = getDataPartStorage().getRelativePath(); - if (!data_part_storage->exists()) // path + if (!getDataPartStorage().exists()) // path return; if (is_temp) { - String file_name = fileName(data_part_storage->getPartDirectory()); + String file_name = fileName(getDataPartStorage().getPartDirectory()); if (file_name.empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "relative_path {} of part {} is invalid or not set", data_part_storage->getPartDirectory(), name); + throw Exception(ErrorCodes::LOGICAL_ERROR, "relative_path {} of part {} is invalid or not set", getDataPartStorage().getPartDirectory(), name); if (!startsWith(file_name, "tmp") && !endsWith(file_name, ".tmp_proj")) { @@ -621,7 +623,7 @@ String IMergeTreeDataPart::getColumnNameWithMinimumCompressedSize(bool with_subc } if (!minimum_size_column) - throw Exception("Could not find a column of minimum size in MergeTree, part " + data_part_storage->getFullPath(), ErrorCodes::LOGICAL_ERROR); + throw Exception("Could not find a column of minimum size in MergeTree, part " + getDataPartStorage().getFullPath(), ErrorCodes::LOGICAL_ERROR); return *minimum_size_column; } @@ -699,9 +701,9 @@ void IMergeTreeDataPart::loadProjections(bool require_columns_checksums, bool ch for (const auto & projection : metadata_snapshot->projections) { String path = /*getRelativePath() + */ projection.name + ".proj"; - if (data_part_storage->exists(path)) + if (getDataPartStorage().exists(path)) { - auto projection_part_storage = data_part_storage->getProjection(projection.name + ".proj"); + auto projection_part_storage = getDataPartStorage().getProjection(projection.name + ".proj"); auto part = storage.createPart(projection.name, {"all", 0, 0, 0}, projection_part_storage, this); part->loadColumnsChecksumsIndexes(require_columns_checksums, check_consistency); projection_parts.emplace(projection.name, std::move(part)); @@ -742,8 +744,8 @@ void IMergeTreeDataPart::loadIndex() loaded_index[i]->reserve(index_granularity.getMarksCount()); } - String index_name = "primary" + getIndexExtensionFromFilesystem(data_part_storage).value(); - String index_path = fs::path(data_part_storage->getRelativePath()) / index_name; + String index_name = "primary" + getIndexExtensionFromFilesystem(getDataPartStorage()).value(); + String index_path = fs::path(getDataPartStorage().getRelativePath()) / index_name; auto index_file = metadata_manager->read(index_name); size_t marks_count = index_granularity.getMarksCount(); @@ -782,7 +784,7 @@ void IMergeTreeDataPart::appendFilesOfIndex(Strings & files) const if (metadata_snapshot->hasPrimaryKey()) { - String index_name = "primary" + getIndexExtensionFromFilesystem(data_part_storage).value(); + String index_name = "primary" + getIndexExtensionFromFilesystem(getDataPartStorage()).value(); files.push_back(index_name); } } @@ -794,10 +796,10 @@ NameSet IMergeTreeDataPart::getFileNamesWithoutChecksums() const NameSet result = {"checksums.txt", "columns.txt"}; - if (data_part_storage->exists(DEFAULT_COMPRESSION_CODEC_FILE_NAME)) + if (getDataPartStorage().exists(DEFAULT_COMPRESSION_CODEC_FILE_NAME)) result.emplace(DEFAULT_COMPRESSION_CODEC_FILE_NAME); - if (data_part_storage->exists(TXN_VERSION_METADATA_FILE_NAME)) + if (getDataPartStorage().exists(TXN_VERSION_METADATA_FILE_NAME)) result.emplace(TXN_VERSION_METADATA_FILE_NAME); return result; @@ -812,7 +814,7 @@ void IMergeTreeDataPart::loadDefaultCompressionCodec() return; } - String path = fs::path(data_part_storage->getRelativePath()) / DEFAULT_COMPRESSION_CODEC_FILE_NAME; + String path = fs::path(getDataPartStorage().getRelativePath()) / DEFAULT_COMPRESSION_CODEC_FILE_NAME; bool exists = metadata_manager->exists(DEFAULT_COMPRESSION_CODEC_FILE_NAME); if (!exists) { @@ -881,7 +883,7 @@ CompressionCodecPtr IMergeTreeDataPart::detectDefaultCompressionCodec() const String candidate_path = /*fs::path(getRelativePath()) */ (ISerialization::getFileNameForStream(part_column, substream_path) + ".bin"); /// We can have existing, but empty .bin files. Example: LowCardinality(Nullable(...)) columns and column_name.dict.null.bin file. - if (data_part_storage->exists(candidate_path) && data_part_storage->getFileSize(candidate_path) != 0) + if (getDataPartStorage().exists(candidate_path) && getDataPartStorage().getFileSize(candidate_path) != 0) path_to_data_file = candidate_path; } }); @@ -892,7 +894,7 @@ CompressionCodecPtr IMergeTreeDataPart::detectDefaultCompressionCodec() const continue; } - result = getCompressionCodecForFile(data_part_storage, path_to_data_file); + result = getCompressionCodecForFile(getDataPartStorage(), path_to_data_file); break; } } @@ -937,7 +939,7 @@ void IMergeTreeDataPart::loadPartitionAndMinMaxIndex() String calculated_partition_id = partition.getID(metadata_snapshot->getPartitionKey().sample_block); if (calculated_partition_id != info.partition_id) throw Exception( - "While loading part " + data_part_storage->getFullPath() + ": calculated partition ID: " + calculated_partition_id + "While loading part " + getDataPartStorage().getFullPath() + ": calculated partition ID: " + calculated_partition_id + " differs from partition ID in part name: " + info.partition_id, ErrorCodes::CORRUPTED_DATA); } @@ -966,7 +968,7 @@ void IMergeTreeDataPart::loadChecksums(bool require) bytes_on_disk = checksums.getTotalSizeOnDisk(); } else - bytes_on_disk = data_part_storage->calculateTotalSizeOnDisk(); + bytes_on_disk = getDataPartStorage().calculateTotalSizeOnDisk(); } else { @@ -978,7 +980,7 @@ void IMergeTreeDataPart::loadChecksums(bool require) LOG_WARNING(storage.log, "Checksums for part {} not found. Will calculate them from data on disk.", name); checksums = checkDataPart(shared_from_this(), false); - data_part_storage->writeChecksums(checksums, {}); + getDataPartStorage().writeChecksums(checksums, {}); bytes_on_disk = checksums.getTotalSizeOnDisk(); } @@ -1063,7 +1065,7 @@ void IMergeTreeDataPart::loadRowsCount() } else { - if (data_part_storage->exists("count.txt")) + if (getDataPartStorage().exists("count.txt")) { read_rows_count(); return; @@ -1162,7 +1164,7 @@ void IMergeTreeDataPart::appendFilesOfUUID(Strings & files) void IMergeTreeDataPart::loadColumns(bool require) { - String path = fs::path(data_part_storage->getRelativePath()) / "columns.txt"; + String path = fs::path(getDataPartStorage().getRelativePath()) / "columns.txt"; auto metadata_snapshot = storage.getInMemoryMetadataPtr(); if (parent_part) metadata_snapshot = metadata_snapshot->projections.get(name).metadata; @@ -1173,18 +1175,18 @@ void IMergeTreeDataPart::loadColumns(bool require) { /// We can get list of columns only from columns.txt in compact parts. if (require || part_type == Type::Compact) - throw Exception("No columns.txt in part " + name + ", expected path " + path + " on drive " + data_part_storage->getDiskName(), + throw Exception("No columns.txt in part " + name + ", expected path " + path + " on drive " + getDataPartStorage().getDiskName(), ErrorCodes::NO_FILE_IN_DATA_PART); /// If there is no file with a list of columns, write it down. for (const NameAndTypePair & column : metadata_snapshot->getColumns().getAllPhysical()) - if (data_part_storage->exists(getFileNameForColumn(column) + ".bin")) + if (getDataPartStorage().exists(getFileNameForColumn(column) + ".bin")) loaded_columns.push_back(column); if (columns.empty()) throw Exception("No columns in part " + name, ErrorCodes::NO_FILE_IN_DATA_PART); - data_part_storage->writeColumns(loaded_columns, {}); + getDataPartStorage().writeColumns(loaded_columns, {}); } else { @@ -1228,7 +1230,7 @@ void IMergeTreeDataPart::assertHasVersionMetadata(MergeTreeTransaction * txn) co name, storage.getStorageID().getNameForLogs(), version.creation_tid, txn ? txn->dumpDescription() : ""); assert(!txn || storage.supportsTransactions()); - assert(!txn || data_part_storage->exists(TXN_VERSION_METADATA_FILE_NAME)); + assert(!txn || getDataPartStorage().exists(TXN_VERSION_METADATA_FILE_NAME)); } void IMergeTreeDataPart::storeVersionMetadata(bool force) const @@ -1243,7 +1245,7 @@ void IMergeTreeDataPart::storeVersionMetadata(bool force) const throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Transactions are not supported for in-memory parts (table: {}, part: {})", storage.getStorageID().getNameForLogs(), name); - data_part_storage->writeVersionMetadata(version, storage.getSettings()->fsync_part_directory); + getDataPartStorage().writeVersionMetadata(version, storage.getSettings()->fsync_part_directory); } void IMergeTreeDataPart::appendCSNToVersionMetadata(VersionMetadata::WhichCSN which_csn) const @@ -1255,7 +1257,7 @@ void IMergeTreeDataPart::appendCSNToVersionMetadata(VersionMetadata::WhichCSN wh chassert(!(which_csn == VersionMetadata::WhichCSN::REMOVAL && version.removal_csn == 0)); chassert(isStoredOnDisk()); - data_part_storage->appendCSNToVersionMetadata(version, which_csn); + getDataPartStorage().appendCSNToVersionMetadata(version, which_csn); } void IMergeTreeDataPart::appendRemovalTIDToVersionMetadata(bool clear) const @@ -1278,13 +1280,13 @@ void IMergeTreeDataPart::appendRemovalTIDToVersionMetadata(bool clear) const else LOG_TEST(storage.log, "Appending removal TID for {} (creation: {}, removal {})", name, version.creation_tid, version.removal_tid); - data_part_storage->appendRemovalTIDToVersionMetadata(version, clear); + getDataPartStorage().appendRemovalTIDToVersionMetadata(version, clear); } void IMergeTreeDataPart::loadVersionMetadata() const try { - data_part_storage->loadVersionMetadata(version, storage.log); + getDataPartStorage().loadVersionMetadata(version, storage.log); } catch (Exception & e) { @@ -1321,15 +1323,15 @@ bool IMergeTreeDataPart::assertHasValidVersionMetadata() const if (state == MergeTreeDataPartState::Temporary) return true; - if (!data_part_storage->exists()) + if (!getDataPartStorage().exists()) return true; String content; String version_file_name = TXN_VERSION_METADATA_FILE_NAME; try { - size_t file_size = data_part_storage->getFileSize(TXN_VERSION_METADATA_FILE_NAME); - auto buf = data_part_storage->readFile(TXN_VERSION_METADATA_FILE_NAME, ReadSettings().adjustBufferSize(file_size), file_size, std::nullopt); + size_t file_size = getDataPartStorage().getFileSize(TXN_VERSION_METADATA_FILE_NAME); + auto buf = getDataPartStorage().readFile(TXN_VERSION_METADATA_FILE_NAME, ReadSettings().adjustBufferSize(file_size), file_size, std::nullopt); readStringUntilEOF(content, *buf); ReadBufferFromString str_buf{content}; @@ -1363,10 +1365,10 @@ void IMergeTreeDataPart::appendFilesOfColumns(Strings & files) bool IMergeTreeDataPart::shallParticipateInMerges(const StoragePolicyPtr & storage_policy) const { - return data_part_storage->shallParticipateInMerges(*storage_policy); + return getDataPartStorage().shallParticipateInMerges(*storage_policy); } -void IMergeTreeDataPart::renameTo(const String & new_relative_path, bool remove_new_dir_if_exists) const +void IMergeTreeDataPart::renameTo(const String & new_relative_path, bool remove_new_dir_if_exists) try { assertOnDisk(); @@ -1377,20 +1379,21 @@ try if (parent_part) { /// For projections, move is only possible inside parent part dir. - relative_path = parent_part->data_part_storage->getRelativePath(); + relative_path = parent_part->getDataPartStorage().getRelativePath(); } - String from = data_part_storage->getRelativePath(); auto to = fs::path(relative_path) / new_relative_path; metadata_manager->deleteAll(true); metadata_manager->assertAllDeleted(true); - data_part_storage->rename(to.parent_path(), to.filename(), storage.log, remove_new_dir_if_exists, fsync_dir); - data_part_storage->onRename(to.parent_path(), to.filename()); + getDataPartStorage().rename(to.parent_path(), to.filename(), storage.log, remove_new_dir_if_exists, fsync_dir); metadata_manager->updateAll(true); - for (const auto & [p_name, part] : projection_parts) - part->data_part_storage = data_part_storage->getProjection(p_name + ".proj"); + auto old_projection_root_path = getDataPartStorage().getRelativePath(); + auto new_projection_root_path = to.string(); + + for (const auto & [_, part] : projection_parts) + part->getDataPartStorage().changeRootPath(old_projection_root_path, new_projection_root_path); } catch (...) { @@ -1431,14 +1434,14 @@ void IMergeTreeDataPart::initializePartMetadataManager() void IMergeTreeDataPart::initializeIndexGranularityInfo() { - auto mrk_ext = MergeTreeIndexGranularityInfo::getMarksExtensionFromFilesystem(data_part_storage); + auto mrk_ext = MergeTreeIndexGranularityInfo::getMarksExtensionFromFilesystem(getDataPartStorage()); if (mrk_ext) index_granularity_info = MergeTreeIndexGranularityInfo(storage, MarkType{*mrk_ext}); else index_granularity_info = MergeTreeIndexGranularityInfo(storage, part_type); } -void IMergeTreeDataPart::remove() const +void IMergeTreeDataPart::remove() { assert(assertHasValidVersionMetadata()); part_is_probably_removed_from_disk = true; @@ -1455,7 +1458,6 @@ void IMergeTreeDataPart::remove() const return CanRemoveDescription{.can_remove_anything = can_remove, .files_not_to_remove = files_not_to_remove }; }; - if (!isStoredOnDisk()) return; @@ -1474,7 +1476,7 @@ void IMergeTreeDataPart::remove() const projection_checksums.emplace_back(IDataPartStorage::ProjectionChecksums{.name = p_name, .checksums = projection_part->checksums}); } - data_part_storage->remove(std::move(can_remove_callback), checksums, projection_checksums, is_temp, getState(), storage.log); + getDataPartStorage().remove(std::move(can_remove_callback), checksums, projection_checksums, is_temp, getState(), storage.log); } std::optional IMergeTreeDataPart::getRelativePathForPrefix(const String & prefix, bool detached, bool broken) const @@ -1491,7 +1493,7 @@ std::optional IMergeTreeDataPart::getRelativePathForPrefix(const String if (detached && parent_part) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot detach projection"); - return data_part_storage->getRelativePathForPrefix(storage.log, prefix, detached, broken); + return getDataPartStorage().getRelativePathForPrefix(storage.log, prefix, detached, broken); } std::optional IMergeTreeDataPart::getRelativePathForDetachedPart(const String & prefix, bool broken) const @@ -1506,7 +1508,7 @@ std::optional IMergeTreeDataPart::getRelativePathForDetachedPart(const S return {}; } -void IMergeTreeDataPart::renameToDetached(const String & prefix) const +void IMergeTreeDataPart::renameToDetached(const String & prefix) { auto path_to_detach = getRelativePathForDetachedPart(prefix, /* broken */ false); assert(path_to_detach); @@ -1529,7 +1531,7 @@ void IMergeTreeDataPart::makeCloneInDetached(const String & prefix, const Storag if (!maybe_path_in_detached) return; - data_part_storage->freeze( + getDataPartStorage().freeze( storage.relative_data_path, *maybe_path_in_detached, /*make_source_readonly*/ true, @@ -1542,13 +1544,13 @@ MutableDataPartStoragePtr IMergeTreeDataPart::makeCloneOnDisk(const DiskPtr & di { assertOnDisk(); - if (disk->getName() == data_part_storage->getDiskName()) - throw Exception("Can not clone data part " + name + " to same disk " + data_part_storage->getDiskName(), ErrorCodes::LOGICAL_ERROR); + if (disk->getName() == getDataPartStorage().getDiskName()) + throw Exception("Can not clone data part " + name + " to same disk " + getDataPartStorage().getDiskName(), ErrorCodes::LOGICAL_ERROR); if (directory_name.empty()) throw Exception("Can not clone data part " + name + " to empty directory.", ErrorCodes::LOGICAL_ERROR); String path_to_clone = fs::path(storage.relative_data_path) / directory_name / ""; - return data_part_storage->clonePart(path_to_clone, data_part_storage->getPartDirectory(), disk, storage.log); + return getDataPartStorage().clonePart(path_to_clone, getDataPartStorage().getPartDirectory(), disk, storage.log); } void IMergeTreeDataPart::checkConsistencyBase() const @@ -1589,26 +1591,26 @@ void IMergeTreeDataPart::checkConsistencyBase() const } } - data_part_storage->checkConsistency(checksums); + getDataPartStorage().checkConsistency(checksums); } else { auto check_file_not_empty = [this](const String & file_path) { UInt64 file_size; - if (!data_part_storage->exists(file_path) || (file_size = data_part_storage->getFileSize(file_path)) == 0) + if (!getDataPartStorage().exists(file_path) || (file_size = getDataPartStorage().getFileSize(file_path)) == 0) throw Exception( ErrorCodes::BAD_SIZE_OF_FILE_IN_DATA_PART, "Part {} is broken: {} is empty", - data_part_storage->getFullPath(), - std::string(fs::path(data_part_storage->getFullPath()) / file_path)); + getDataPartStorage().getFullPath(), + std::string(fs::path(getDataPartStorage().getFullPath()) / file_path)); return file_size; }; /// Check that the primary key index is not empty. if (!pk.column_names.empty()) { - String index_name = "primary" + getIndexExtensionFromFilesystem(data_part_storage).value(); + String index_name = "primary" + getIndexExtensionFromFilesystem(getDataPartStorage()).value(); check_file_not_empty(index_name); } @@ -1752,7 +1754,7 @@ bool IMergeTreeDataPart::checkAllTTLCalculated(const StorageMetadataPtr & metada String IMergeTreeDataPart::getUniqueId() const { - return data_part_storage->getUniqueId(); + return getDataPartStorage().getUniqueId(); } String IMergeTreeDataPart::getZeroLevelPartBlockID(std::string_view token) const @@ -1791,11 +1793,11 @@ IMergeTreeDataPart::uint128 IMergeTreeDataPart::getActualChecksumByFile(const St return it->second.file_hash; } - if (!data_part_storage->exists(file_name)) + if (!getDataPartStorage().exists(file_name)) { return {}; } - std::unique_ptr in_file = data_part_storage->readFile(file_name, {}, std::nullopt, std::nullopt); + std::unique_ptr in_file = getDataPartStorage().readFile(file_name, {}, std::nullopt, std::nullopt); HashingReadBuffer in_hash(*in_file); String value; @@ -1823,11 +1825,11 @@ bool isInMemoryPart(const MergeTreeDataPartPtr & data_part) return (data_part && data_part->getType() == MergeTreeDataPartType::InMemory); } -std::optional getIndexExtensionFromFilesystem(const DataPartStoragePtr & data_part_storage) +std::optional getIndexExtensionFromFilesystem(const IDataPartStorage & data_part_storage) { - if (data_part_storage->exists()) + if (data_part_storage.exists()) { - for (auto it = data_part_storage->iterate(); it->isValid(); it->next()) + for (auto it = data_part_storage.iterate(); it->isValid(); it->next()) { const auto & extension = fs::path(it->name()).extension(); if (extension == getIndexExtension(false) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index 0fe94b666b6..fbe4f992de4 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -46,7 +46,7 @@ class UncompressedCache; class MergeTreeTransaction; /// Description of the data part. -class IMergeTreeDataPart : public std::enable_shared_from_this +class IMergeTreeDataPart : public std::enable_shared_from_this, public DataPartStorageHolder { public: static constexpr auto DATA_FILE_EXTENSION = ".bin"; @@ -150,7 +150,7 @@ public: /// Throws an exception if part is not stored in on-disk format. void assertOnDisk() const; - void remove() const; + void remove(); /// Initialize columns (from columns.txt if exists, or create from column files if not). /// Load checksums from checksums.txt if exists. Load index if required. @@ -198,10 +198,6 @@ public: /// processed by multiple shards. UUID uuid = UUIDHelpers::Nil; - /// This is an object which encapsulates all the operations with disk. - /// Contains a path to stored data. - MutableDataPartStoragePtr data_part_storage; - MergeTreeIndexGranularityInfo index_granularity_info; size_t rows_count = 0; @@ -287,8 +283,8 @@ public: using WrittenFiles = std::vector>; - [[nodiscard]] WrittenFiles store(const MergeTreeData & data, const MutableDataPartStoragePtr & part_storage, Checksums & checksums) const; - [[nodiscard]] WrittenFiles store(const Names & column_names, const DataTypes & data_types, const MutableDataPartStoragePtr & part_storage, Checksums & checksums) const; + [[nodiscard]] WrittenFiles store(const MergeTreeData & data, IDataPartStorage & part_storage, Checksums & checksums) const; + [[nodiscard]] WrittenFiles store(const Names & column_names, const DataTypes & data_types, IDataPartStorage & part_storage, Checksums & checksums) const; void update(const Block & block, const Names & column_names); void merge(const MinMaxIndex & other); @@ -319,11 +315,11 @@ public: size_t getFileSizeOrZero(const String & file_name) const; /// Moves a part to detached/ directory and adds prefix to its name - void renameToDetached(const String & prefix) const; + void renameToDetached(const String & prefix); /// Makes checks and move part to new directory /// Changes only relative_dir_name, you need to update other metadata (name, is_temp) explicitly - virtual void renameTo(const String & new_relative_path, bool remove_new_dir_if_exists) const; + virtual void renameTo(const String & new_relative_path, bool remove_new_dir_if_exists); /// Makes clone of a part in detached/ directory via hard links virtual void makeCloneInDetached(const String & prefix, const StorageMetadataPtr & metadata_snapshot) const; @@ -583,7 +579,7 @@ bool isCompactPart(const MergeTreeDataPartPtr & data_part); bool isWidePart(const MergeTreeDataPartPtr & data_part); bool isInMemoryPart(const MergeTreeDataPartPtr & data_part); inline String getIndexExtension(bool is_compressed_primary_key) { return is_compressed_primary_key ? ".cidx" : ".idx"; } -std::optional getIndexExtensionFromFilesystem(const DataPartStoragePtr & data_part_storage); +std::optional getIndexExtensionFromFilesystem(const IDataPartStorage & data_part_storage); bool isCompressedFromIndexExtension(const String & index_extension); } diff --git a/src/Storages/MergeTree/IMergedBlockOutputStream.cpp b/src/Storages/MergeTree/IMergedBlockOutputStream.cpp index 54f393a65a2..37da6014d1b 100644 --- a/src/Storages/MergeTree/IMergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/IMergedBlockOutputStream.cpp @@ -12,7 +12,7 @@ IMergedBlockOutputStream::IMergedBlockOutputStream( bool reset_columns_) : storage(data_part->storage) , metadata_snapshot(metadata_snapshot_) - , data_part_storage(data_part->data_part_storage) + , data_part_storage(data_part->getDataPartStoragePtr()) , reset_columns(reset_columns_) { if (reset_columns) diff --git a/src/Storages/MergeTree/LoadedMergeTreeDataPartInfoForReader.h b/src/Storages/MergeTree/LoadedMergeTreeDataPartInfoForReader.h index ee265ee6fb1..bc786ec0428 100644 --- a/src/Storages/MergeTree/LoadedMergeTreeDataPartInfoForReader.h +++ b/src/Storages/MergeTree/LoadedMergeTreeDataPartInfoForReader.h @@ -23,7 +23,7 @@ public: bool isProjectionPart() const override { return data_part->isProjectionPart(); } - DataPartStoragePtr getDataPartStorage() const override { return data_part->data_part_storage; } + DataPartStoragePtr getDataPartStorage() const override { return data_part->getDataPartStoragePtr(); } const NamesAndTypesList & getColumns() const override { return data_part->getColumns(); } diff --git a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp index 182d5df4960..f3b81a4793e 100644 --- a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp @@ -160,7 +160,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare() for (auto & part_ptr : parts) { ttl_infos.update(part_ptr->ttl_infos); - max_volume_index = std::max(max_volume_index, part_ptr->data_part_storage->getVolumeIndex(*storage.getStoragePolicy())); + max_volume_index = std::max(max_volume_index, part_ptr->getDataPartStorage().getVolumeIndex(*storage.getStoragePolicy())); } /// It will live until the whole task is being destroyed diff --git a/src/Storages/MergeTree/MergeList.cpp b/src/Storages/MergeTree/MergeList.cpp index ebe826531d2..02e61a70eb6 100644 --- a/src/Storages/MergeTree/MergeList.cpp +++ b/src/Storages/MergeTree/MergeList.cpp @@ -65,7 +65,7 @@ MergeListElement::MergeListElement( for (const auto & source_part : future_part->parts) { source_part_names.emplace_back(source_part->name); - source_part_paths.emplace_back(source_part->data_part_storage->getFullPath()); + source_part_paths.emplace_back(source_part->getDataPartStorage().getFullPath()); total_size_bytes_compressed += source_part->getBytesOnDisk(); total_size_marks += source_part->getMarksCount(); diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 130d156e53c..27a71345a5e 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -130,7 +130,7 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() if (global_ctx->parent_part) { - data_part_storage = global_ctx->parent_part->data_part_storage->getProjection(local_tmp_part_basename); + data_part_storage = global_ctx->parent_part->getDataPartStorage().getProjection(local_tmp_part_basename); } else { diff --git a/src/Storages/MergeTree/MergeTask.h b/src/Storages/MergeTree/MergeTask.h index 07d46460423..f21d542c7a0 100644 --- a/src/Storages/MergeTree/MergeTask.h +++ b/src/Storages/MergeTree/MergeTask.h @@ -59,7 +59,7 @@ public: bool deduplicate_, Names deduplicate_by_columns_, MergeTreeData::MergingParams merging_params_, - const IMergeTreeDataPart * parent_part_, + IMergeTreeDataPart * parent_part_, String suffix_, MergeTreeTransactionPtr txn, MergeTreeData * data_, diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 9e9d90b6b1e..83f3a167fa7 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -943,8 +943,8 @@ Int64 MergeTreeData::getMaxBlockNumber() const } void MergeTreeData::loadDataPartsFromDisk( - DataPartsVector & broken_parts_to_detach, - DataPartsVector & duplicate_parts_to_remove, + MutableDataPartsVector & broken_parts_to_detach, + MutableDataPartsVector & duplicate_parts_to_remove, ThreadPool & pool, size_t num_parts, std::queue>> & parts_queue, @@ -1199,8 +1199,7 @@ void MergeTreeData::loadDataPartsFromDisk( void MergeTreeData::loadDataPartsFromWAL( - DataPartsVector & /* broken_parts_to_detach */, - DataPartsVector & duplicate_parts_to_remove, + MutableDataPartsVector & duplicate_parts_to_remove, MutableDataPartsVector & parts_from_wal) { for (auto & part : parts_from_wal) @@ -1214,7 +1213,7 @@ void MergeTreeData::loadDataPartsFromWAL( { if ((*it)->checksums.getTotalChecksumHex() == part->checksums.getTotalChecksumHex()) { - LOG_ERROR(log, "Remove duplicate part {}", part->data_part_storage->getFullPath()); + LOG_ERROR(log, "Remove duplicate part {}", part->getDataPartStorage().getFullPath()); duplicate_parts_to_remove.push_back(part); } else @@ -1328,8 +1327,8 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks) auto part_lock = lockParts(); data_parts_indexes.clear(); - DataPartsVector broken_parts_to_detach; - DataPartsVector duplicate_parts_to_remove; + MutableDataPartsVector broken_parts_to_detach; + MutableDataPartsVector duplicate_parts_to_remove; if (num_parts > 0) loadDataPartsFromDisk( @@ -1383,7 +1382,7 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks) parts_from_wal.insert( parts_from_wal.end(), std::make_move_iterator(disk_wal_parts.begin()), std::make_move_iterator(disk_wal_parts.end())); - loadDataPartsFromWAL(broken_parts_to_detach, duplicate_parts_to_remove, parts_from_wal); + loadDataPartsFromWAL(duplicate_parts_to_remove, parts_from_wal); num_parts += parts_from_wal.size(); } @@ -1684,6 +1683,14 @@ scope_guard MergeTreeData::getTemporaryPartDirectoryHolder(const String & part_d return [this, part_dir_name]() { temporary_parts.remove(part_dir_name); }; } +MergeTreeData::MutableDataPartPtr MergeTreeData::preparePartForRemoval(const DataPartPtr & part) +{ + if (part->getState() != DataPartState::Deleting) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Cannot remove part {}, because it has state: {}", part->name, magic_enum::enum_name(part->getState())); + + return std::const_pointer_cast(part); +} MergeTreeData::DataPartsVector MergeTreeData::grabOldParts(bool force) { @@ -1859,7 +1866,7 @@ void MergeTreeData::flushAllInMemoryPartsIfNeeded() { if (auto part_in_memory = asInMemoryPart(part)) { - part_in_memory->flushToDisk(part_in_memory->data_part_storage->getPartDirectory(), metadata_snapshot); + part_in_memory->flushToDisk(part_in_memory->getDataPartStorage().getPartDirectory(), metadata_snapshot); } } } @@ -1943,7 +1950,7 @@ void MergeTreeData::clearPartsFromFilesystemImpl(const DataPartsVector & parts_t if (thread_group) CurrentThread::attachToIfDetached(thread_group); - part->remove(); + preparePartForRemoval(part)->remove(); if (part_names_succeed) { std::lock_guard lock(part_names_mutex); @@ -1959,7 +1966,7 @@ void MergeTreeData::clearPartsFromFilesystemImpl(const DataPartsVector & parts_t LOG_DEBUG(log, "Removing {} parts from filesystem: {}", parts_to_remove.size(), fmt::join(parts_to_remove, ", ")); for (const DataPartPtr & part : parts_to_remove) { - part->remove(); + preparePartForRemoval(part)->remove(); if (part_names_succeed) part_names_succeed->insert(part->name); } @@ -2139,11 +2146,14 @@ void MergeTreeData::rename(const String & new_table_path, const StorageID & new_ if (!getStorageID().hasUUID()) getContext()->dropCaches(); + /// TODO: remove const_cast for (const auto & part : data_parts_by_info) - part->data_part_storage->changeRootPath(relative_data_path, new_table_path); + { + auto & part_mutable = const_cast(*part); + part_mutable.getDataPartStorage().changeRootPath(relative_data_path, new_table_path); + } relative_data_path = new_table_path; - renameInMemory(new_table_id); } @@ -2744,7 +2754,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeData::createPart( const MutableDataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part) const { MergeTreeDataPartType type; - auto mrk_ext = MergeTreeIndexGranularityInfo::getMarksExtensionFromFilesystem(data_part_storage); + auto mrk_ext = MergeTreeIndexGranularityInfo::getMarksExtensionFromFilesystem(*data_part_storage); if (mrk_ext) { @@ -2983,7 +2993,7 @@ void MergeTreeData::preparePartForCommit(MutableDataPartPtr & part, Transaction assert([&]() { - String dir_name = fs::path(part->data_part_storage->getRelativePath()).filename(); + String dir_name = fs::path(part->getDataPartStorage().getRelativePath()).filename(); bool may_be_cleaned_up = dir_name.starts_with("tmp_") || dir_name.starts_with("tmp-fetch_"); return !may_be_cleaned_up || temporary_parts.contains(dir_name); }()); @@ -3000,7 +3010,7 @@ bool MergeTreeData::renameTempPartAndReplaceImpl( DataPartsLock & lock, DataPartsVector * out_covered_parts) { - LOG_TRACE(log, "Renaming temporary part {} to {}.", part->data_part_storage->getPartDirectory(), part->name); + LOG_TRACE(log, "Renaming temporary part {} to {}.", part->getDataPartStorage().getPartDirectory(), part->name); if (&out_transaction.data != this) throw Exception("MergeTreeData::Transaction for one table cannot be used with another. It is a bug.", @@ -3231,9 +3241,9 @@ void MergeTreeData::outdateBrokenPartAndCloneToDetached(const DataPartPtr & part { auto metadata_snapshot = getInMemoryMetadataPtr(); if (prefix.empty()) - LOG_INFO(log, "Cloning part {} to {} and making it obsolete.", part_to_detach->data_part_storage->getPartDirectory(), part_to_detach->name); + LOG_INFO(log, "Cloning part {} to {} and making it obsolete.", part_to_detach->getDataPartStorage().getPartDirectory(), part_to_detach->name); else - LOG_INFO(log, "Cloning part {} to {}_{} and making it obsolete.", part_to_detach->data_part_storage->getPartDirectory(), prefix, part_to_detach->name); + LOG_INFO(log, "Cloning part {} to {}_{} and making it obsolete.", part_to_detach->getDataPartStorage().getPartDirectory(), prefix, part_to_detach->name); part_to_detach->makeCloneInDetached(prefix, metadata_snapshot); @@ -3245,9 +3255,9 @@ void MergeTreeData::outdateBrokenPartAndCloneToDetached(const DataPartPtr & part void MergeTreeData::forcefullyMovePartToDetachedAndRemoveFromMemory(const MergeTreeData::DataPartPtr & part_to_detach, const String & prefix, bool restore_covered) { if (prefix.empty()) - LOG_INFO(log, "Renaming {} to {} and forgetting it.", part_to_detach->data_part_storage->getPartDirectory(), part_to_detach->name); + LOG_INFO(log, "Renaming {} to {} and forgetting it.", part_to_detach->getDataPartStorage().getPartDirectory(), part_to_detach->name); else - LOG_INFO(log, "Renaming {} to {}_{} and forgetting it.", part_to_detach->data_part_storage->getPartDirectory(), prefix, part_to_detach->name); + LOG_INFO(log, "Renaming {} to {}_{} and forgetting it.", part_to_detach->getDataPartStorage().getPartDirectory(), prefix, part_to_detach->name); auto lock = lockParts(); bool removed_active_part = false; @@ -3270,9 +3280,7 @@ void MergeTreeData::forcefullyMovePartToDetachedAndRemoveFromMemory(const MergeT } modifyPartState(it_part, DataPartState::Deleting); - - part->renameToDetached(prefix); - + preparePartForRemoval(part)->renameToDetached(prefix); data_parts_indexes.erase(it_part); if (restore_covered && part->info.level == 0) @@ -3426,7 +3434,7 @@ void MergeTreeData::tryRemovePartImmediately(DataPartPtr && part) try { - part_to_delete->remove(); + preparePartForRemoval(part_to_delete)->remove(); } catch (...) { @@ -3636,9 +3644,9 @@ void MergeTreeData::swapActivePart(MergeTreeData::DataPartPtr part_copy) /// when allow_remote_fs_zero_copy_replication turned on and off again original_active_part->force_keep_shared_data = false; - if (original_active_part->data_part_storage->supportZeroCopyReplication() && - part_copy->data_part_storage->supportZeroCopyReplication() && - original_active_part->data_part_storage->getUniqueId() == part_copy->data_part_storage->getUniqueId()) + if (original_active_part->getDataPartStorage().supportZeroCopyReplication() && + part_copy->getDataPartStorage().supportZeroCopyReplication() && + original_active_part->getDataPartStorage().getUniqueId() == part_copy->getDataPartStorage().getUniqueId()) { /// May be when several volumes use the same S3/HDFS storage original_active_part->force_keep_shared_data = true; @@ -3658,7 +3666,7 @@ void MergeTreeData::swapActivePart(MergeTreeData::DataPartPtr part_copy) /// All other locks are taken in StorageReplicatedMergeTree lockSharedData(*part_copy); - original_active_part->data_part_storage->writeDeleteOnDestroyMarker(log); + original_active_part->getDataPartStorage().writeDeleteOnDestroyMarker(log); return; } } @@ -3792,9 +3800,9 @@ MergeTreeData::DataPartPtr MergeTreeData::getPartIfExists(const String & part_na static void loadPartAndFixMetadataImpl(MergeTreeData::MutableDataPartPtr part) { part->loadColumnsChecksumsIndexes(false, true); - part->modification_time = part->data_part_storage->getLastModified().epochTime(); - part->data_part_storage->removeDeleteOnDestroyMarker(); - part->data_part_storage->removeVersionMetadata(); + part->modification_time = part->getDataPartStorage().getLastModified().epochTime(); + part->getDataPartStorage().removeDeleteOnDestroyMarker(); + part->getDataPartStorage().removeVersionMetadata(); } void MergeTreeData::calculateColumnAndSecondaryIndexSizesImpl() @@ -3954,7 +3962,7 @@ void MergeTreeData::movePartitionToDisk(const ASTPtr & partition, const String & auto disk = getStoragePolicy()->getDiskByName(name); std::erase_if(parts, [&](auto part_ptr) { - return part_ptr->data_part_storage->getDiskName() == disk->getName(); + return part_ptr->getDataPartStorage().getDiskName() == disk->getName(); }); if (parts.empty()) @@ -4004,7 +4012,7 @@ void MergeTreeData::movePartitionToVolume(const ASTPtr & partition, const String { for (const auto & disk : volume->getDisks()) { - if (part_ptr->data_part_storage->getDiskName() == disk->getName()) + if (part_ptr->getDataPartStorage().getDiskName() == disk->getName()) { return true; } @@ -4201,7 +4209,7 @@ BackupEntries MergeTreeData::backupParts(const DataPartsVector & data_parts, con make_temporary_hard_links = false; hold_storage_and_part_ptrs = true; } - else if (supportsReplication() && part->data_part_storage->supportZeroCopyReplication() && getSettings()->allow_remote_fs_zero_copy_replication) + else if (supportsReplication() && part->getDataPartStorage().supportZeroCopyReplication() && getSettings()->allow_remote_fs_zero_copy_replication) { /// Hard links don't work correctly with zero copy replication. make_temporary_hard_links = false; @@ -4213,7 +4221,7 @@ BackupEntries MergeTreeData::backupParts(const DataPartsVector & data_parts, con table_lock = lockForShare(local_context->getCurrentQueryId(), local_context->getSettingsRef().lock_acquire_timeout); BackupEntries backup_entries_from_part; - part->data_part_storage->backup( + part->getDataPartStorage().backup( part->checksums, part->getFileNamesWithoutChecksums(), data_path_in_backup, @@ -4224,7 +4232,7 @@ BackupEntries MergeTreeData::backupParts(const DataPartsVector & data_parts, con auto projection_parts = part->getProjectionParts(); for (const auto & [projection_name, projection_part] : projection_parts) { - projection_part->data_part_storage->backup( + projection_part->getDataPartStorage().backup( projection_part->checksums, projection_part->getFileNamesWithoutChecksums(), fs::path{data_path_in_backup} / part->name, @@ -4900,16 +4908,16 @@ ReservationPtr MergeTreeData::reserveSpace(UInt64 expected_size, SpacePtr space) return checkAndReturnReservation(expected_size, std::move(reservation)); } -ReservationPtr MergeTreeData::reserveSpace(UInt64 expected_size, const MutableDataPartStoragePtr & data_part_storage) +ReservationPtr MergeTreeData::reserveSpace(UInt64 expected_size, const IDataPartStorage & data_part_storage) { expected_size = std::max(RESERVATION_MIN_ESTIMATION_SIZE, expected_size); - return data_part_storage->reserve(expected_size); + return data_part_storage.reserve(expected_size); } -ReservationPtr MergeTreeData::tryReserveSpace(UInt64 expected_size, const MutableDataPartStoragePtr & data_part_storage) +ReservationPtr MergeTreeData::tryReserveSpace(UInt64 expected_size, const IDataPartStorage & data_part_storage) { expected_size = std::max(RESERVATION_MIN_ESTIMATION_SIZE, expected_size); - return data_part_storage->tryReserve(expected_size); + return data_part_storage.tryReserve(expected_size); } ReservationPtr MergeTreeData::tryReserveSpace(UInt64 expected_size, SpacePtr space) @@ -5058,11 +5066,11 @@ bool MergeTreeData::isPartInTTLDestination(const TTLDescription & ttl, const IMe if (ttl.destination_type == DataDestinationType::VOLUME) { for (const auto & disk : policy->getVolumeByName(ttl.destination_name)->getDisks()) - if (disk->getName() == part.data_part_storage->getDiskName()) + if (disk->getName() == part.getDataPartStorage().getDiskName()) return true; } else if (ttl.destination_type == DataDestinationType::DISK) - return policy->getDiskByName(ttl.destination_name)->getName() == part.data_part_storage->getDiskName(); + return policy->getDiskByName(ttl.destination_name)->getName() == part.getDataPartStorage().getDiskName(); return false; } @@ -5134,7 +5142,7 @@ void MergeTreeData::Transaction::rollbackPartsToTemporaryState() WriteBufferFromOwnString buf; buf << " Rollbacking parts state to temporary and removing from working set:"; for (const auto & part : precommitted_parts) - buf << " " << part->data_part_storage->getPartDirectory(); + buf << " " << part->getDataPartStorage().getPartDirectory(); buf << "."; LOG_DEBUG(data.log, "Undoing transaction.{}", buf.str()); @@ -5159,7 +5167,7 @@ void MergeTreeData::Transaction::rollback() WriteBufferFromOwnString buf; buf << " Removing parts:"; for (const auto & part : precommitted_parts) - buf << " " << part->data_part_storage->getPartDirectory(); + buf << " " << part->getDataPartStorage().getPartDirectory(); buf << "."; LOG_DEBUG(data.log, "Undoing transaction.{}", buf.str()); @@ -5188,8 +5196,8 @@ MergeTreeData::DataPartsVector MergeTreeData::Transaction::commit(MergeTreeData: auto * owing_parts_lock = acquired_parts_lock ? acquired_parts_lock : &parts_lock; for (const auto & part : precommitted_parts) - if (part->data_part_storage->hasActiveTransaction()) - part->data_part_storage->commitTransaction(); + if (part->getDataPartStorage().hasActiveTransaction()) + part->getDataPartStorage().commitTransaction(); bool commit_to_wal = has_in_memory_parts && settings->in_memory_parts_enable_wal; if (txn || commit_to_wal) @@ -6210,7 +6218,7 @@ std::pair MergeTreeData::cloneAn bool does_storage_policy_allow_same_disk = false; for (const DiskPtr & disk : getStoragePolicy()->getDisks()) { - if (disk->getName() == src_part->data_part_storage->getDiskName()) + if (disk->getName() == src_part->getDataPartStorage().getDiskName()) { does_storage_policy_allow_same_disk = true; break; @@ -6220,7 +6228,7 @@ std::pair MergeTreeData::cloneAn throw Exception( ErrorCodes::BAD_ARGUMENTS, "Could not clone and load part {} because disk does not belong to storage policy", - quoteString(src_part->data_part_storage->getFullPath())); + quoteString(src_part->getDataPartStorage().getFullPath())); String dst_part_name = src_part->getNewName(dst_part_info); assert(!tmp_part_prefix.empty()); @@ -6228,9 +6236,8 @@ std::pair MergeTreeData::cloneAn auto temporary_directory_lock = getTemporaryPartDirectoryHolder(tmp_dst_part_name); /// Why it is needed if we only hardlink files? - auto reservation = src_part->data_part_storage->reserve(src_part->getBytesOnDisk()); - - auto src_part_storage = src_part->data_part_storage; + auto reservation = src_part->getDataPartStorage().reserve(src_part->getBytesOnDisk()); + auto src_part_storage = src_part->getDataPartStoragePtr(); /// If source part is in memory, flush it to disk and clone it already in on-disk format if (auto src_part_in_memory = asInMemoryPart(src_part)) @@ -6257,7 +6264,7 @@ std::pair MergeTreeData::cloneAn hardlinked_files->source_part_name = src_part->name; hardlinked_files->source_table_shared_id = src_part->storage.getTableSharedID(); - for (auto it = src_part->data_part_storage->iterate(); it->isValid(); it->next()) + for (auto it = src_part->getDataPartStorage().iterate(); it->isValid(); it->next()) { if (!files_to_copy_instead_of_hardlinks.contains(it->name()) && it->name() != IMergeTreeDataPart::DELETE_ON_DESTROY_MARKER_FILE_NAME @@ -6316,14 +6323,14 @@ Strings MergeTreeData::getDataPaths() const void MergeTreeData::reportBrokenPart(MergeTreeData::DataPartPtr & data_part) const { - if (data_part->data_part_storage && data_part->data_part_storage->isBroken()) + if (data_part->getDataPartStorage().isBroken()) { auto parts = getDataPartsForInternalUsage(); - LOG_WARNING(log, "Scanning parts to recover on broken disk {}@{}.", data_part->data_part_storage->getDiskName(), data_part->data_part_storage->getDiskPath()); + LOG_WARNING(log, "Scanning parts to recover on broken disk {}@{}.", data_part->getDataPartStorage().getDiskName(), data_part->getDataPartStorage().getDiskPath()); for (const auto & part : parts) { - if (part->data_part_storage && part->data_part_storage->getDiskName() == data_part->data_part_storage->getDiskName()) + if (part->getDataPartStorage().getDiskName() == data_part->getDataPartStorage().getDiskName()) broken_part_callback(part->name); } } @@ -6414,7 +6421,7 @@ PartitionCommandsResultInfo MergeTreeData::freezePartitionsByMatcher( LOG_DEBUG(log, "Freezing part {} snapshot will be placed at {}", part->name, backup_path); - auto data_part_storage = part->data_part_storage; + auto data_part_storage = part->getDataPartStoragePtr(); String src_part_path = data_part_storage->getRelativePath(); String backup_part_path = fs::path(backup_path) / relative_data_path; if (auto part_in_memory = asInMemoryPart(part)) @@ -6428,12 +6435,12 @@ PartitionCommandsResultInfo MergeTreeData::freezePartitionsByMatcher( // Store metadata for replicated table. // Do nothing for non-replicated. - createAndStoreFreezeMetadata(disk, part, fs::path(backup_part_path) / part->data_part_storage->getPartDirectory()); + createAndStoreFreezeMetadata(disk, part, fs::path(backup_part_path) / part->getDataPartStorage().getPartDirectory()); }; auto new_storage = data_part_storage->freeze( backup_part_path, - part->data_part_storage->getPartDirectory(), + part->getDataPartStorage().getPartDirectory(), /*make_source_readonly*/ true, callback, /*copy_instead_of_hardlink*/ false, @@ -6555,8 +6562,8 @@ try if (result_part) { - part_log_elem.disk_name = result_part->data_part_storage->getDiskName(); - part_log_elem.path_on_disk = result_part->data_part_storage->getFullPath(); + part_log_elem.disk_name = result_part->getDataPartStorage().getDiskName(); + part_log_elem.path_on_disk = result_part->getDataPartStorage().getFullPath(); part_log_elem.bytes_compressed_on_disk = result_part->getBytesOnDisk(); part_log_elem.rows = result_part->rows_count; part_log_elem.part_type = result_part->getType(); @@ -6712,7 +6719,7 @@ bool MergeTreeData::moveParts(const CurrentlyMovingPartsTaggerPtr & moving_tagge for (const auto & moving_part : moving_tagger->parts_to_move) { Stopwatch stopwatch; - DataPartPtr cloned_part; + MutableDataPartPtr cloned_part; auto write_part_log = [&](const ExecutionStatus & execution_status) { @@ -6975,7 +6982,7 @@ ReservationPtr MergeTreeData::balancedReservation( if (part->isStoredOnDisk() && part->getBytesOnDisk() >= min_bytes_to_rebalance_partition_over_jbod && part_info.partition_id == part->info.partition_id) { - auto name = part->data_part_storage->getDiskName(); + auto name = part->getDataPartStorage().getDiskName(); auto it = disk_occupation.find(name); if (it != disk_occupation.end()) { diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 327718d15ed..2b67face570 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -214,6 +214,7 @@ public: }; using DataParts = std::set; + using MutableDataParts = std::set; using DataPartsVector = std::vector; using DataPartsLock = std::unique_lock; @@ -275,8 +276,8 @@ public: MergeTreeData & data; MergeTreeTransaction * txn; - DataParts precommitted_parts; - DataParts locked_parts; + MutableDataParts precommitted_parts; + MutableDataParts locked_parts; bool has_in_memory_parts = false; void clear(); @@ -413,8 +414,8 @@ public: SelectQueryInfo & info) const override; ReservationPtr reserveSpace(UInt64 expected_size, VolumePtr & volume) const; - static ReservationPtr tryReserveSpace(UInt64 expected_size, const MutableDataPartStoragePtr & data_part_storage); - static ReservationPtr reserveSpace(UInt64 expected_size, const MutableDataPartStoragePtr & data_part_storage); + static ReservationPtr tryReserveSpace(UInt64 expected_size, const IDataPartStorage & data_part_storage); + static ReservationPtr reserveSpace(UInt64 expected_size, const IDataPartStorage & data_part_storage); static bool partsContainSameProjections(const DataPartPtr & left, const DataPartPtr & right); @@ -974,7 +975,7 @@ public: /// Fetch part only if some replica has it on shared storage like S3 /// Overridden in StorageReplicatedMergeTree - virtual bool tryToFetchIfShared(const IMergeTreeDataPart &, const DiskPtr &, const String &) { return false; } + virtual MutableDataPartStoragePtr tryToFetchIfShared(const IMergeTreeDataPart &, const DiskPtr &, const String &) { return nullptr; } /// Check shared data usage on other replicas for detached/freezed part /// Remove local files and remote files if needed @@ -1259,7 +1260,6 @@ protected: static void incrementMergedPartsProfileEvent(MergeTreeDataPartType type); private: - /// Checking that candidate part doesn't break invariants: correct partition and doesn't exist already void checkPartCanBeAddedToTable(MutableDataPartPtr & part, DataPartsLock & lock) const; @@ -1328,8 +1328,8 @@ private: virtual std::unique_ptr getDefaultSettings() const = 0; void loadDataPartsFromDisk( - DataPartsVector & broken_parts_to_detach, - DataPartsVector & duplicate_parts_to_remove, + MutableDataPartsVector & broken_parts_to_detach, + MutableDataPartsVector & duplicate_parts_to_remove, ThreadPool & pool, size_t num_parts, std::queue>> & parts_queue, @@ -1337,8 +1337,7 @@ private: const MergeTreeSettingsPtr & settings); void loadDataPartsFromWAL( - DataPartsVector & broken_parts_to_detach, - DataPartsVector & duplicate_parts_to_remove, + MutableDataPartsVector & duplicate_parts_to_remove, MutableDataPartsVector & parts_from_wal); /// Create zero-copy exclusive lock for part and disk. Useful for coordination of @@ -1350,6 +1349,8 @@ private: /// Otherwise, in non-parallel case will break and return. void clearPartsFromFilesystemImpl(const DataPartsVector & parts, NameSet * part_names_succeed); + static MutableDataPartPtr preparePartForRemoval(const DataPartPtr & part); + TemporaryParts temporary_parts; }; diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index c96003c8938..4d5dea94d44 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -482,7 +482,7 @@ MergeTaskPtr MergeTreeDataMergerMutator::mergePartsToTemporaryPart( const Names & deduplicate_by_columns, const MergeTreeData::MergingParams & merging_params, const MergeTreeTransactionPtr & txn, - const IMergeTreeDataPart * parent_part, + IMergeTreeDataPart * parent_part, const String & suffix) { return std::make_shared( diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h index d1b9d3c99e7..5d98f526325 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h @@ -113,7 +113,7 @@ public: const Names & deduplicate_by_columns, const MergeTreeData::MergingParams & merging_params, const MergeTreeTransactionPtr & txn, - const IMergeTreeDataPart * parent_part = nullptr, + IMergeTreeDataPart * parent_part = nullptr, const String & suffix = ""); /// Mutate a single data part with the specified commands. Will create and return a temporary part. diff --git a/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp index 4fedacee13c..a537b44d9ea 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp @@ -96,21 +96,21 @@ void MergeTreeDataPartCompact::calculateEachColumnSizes(ColumnSizeByName & /*eac void MergeTreeDataPartCompact::loadIndexGranularityImpl( MergeTreeIndexGranularity & index_granularity_, const MergeTreeIndexGranularityInfo & index_granularity_info_, - size_t columns_count, const DataPartStoragePtr & data_part_storage_) + size_t columns_count, const IDataPartStorage & data_part_storage_) { if (!index_granularity_info_.mark_type.adaptive) throw Exception("MergeTreeDataPartCompact cannot be created with non-adaptive granulary.", ErrorCodes::NOT_IMPLEMENTED); auto marks_file_path = index_granularity_info_.getMarksFilePath("data"); - if (!data_part_storage_->exists(marks_file_path)) + if (!data_part_storage_.exists(marks_file_path)) throw Exception( ErrorCodes::NO_FILE_IN_DATA_PART, "Marks file '{}' doesn't exist", - std::string(fs::path(data_part_storage_->getFullPath()) / marks_file_path)); + std::string(fs::path(data_part_storage_.getFullPath()) / marks_file_path)); - size_t marks_file_size = data_part_storage_->getFileSize(marks_file_path); + size_t marks_file_size = data_part_storage_.getFileSize(marks_file_path); - std::unique_ptr buffer = data_part_storage_->readFile( + std::unique_ptr buffer = data_part_storage_.readFile( marks_file_path, ReadSettings().adjustBufferSize(marks_file_size), marks_file_size, std::nullopt); std::unique_ptr marks_reader; @@ -139,7 +139,7 @@ void MergeTreeDataPartCompact::loadIndexGranularity() if (columns.empty()) throw Exception("No columns in part " + name, ErrorCodes::NO_FILE_IN_DATA_PART); - loadIndexGranularityImpl(index_granularity, index_granularity_info, columns.size(), data_part_storage); + loadIndexGranularityImpl(index_granularity, index_granularity_info, columns.size(), getDataPartStorage()); } bool MergeTreeDataPartCompact::hasColumnFiles(const NameAndTypePair & column) const @@ -170,12 +170,12 @@ void MergeTreeDataPartCompact::checkConsistency(bool require_part_metadata) cons throw Exception( ErrorCodes::NO_FILE_IN_DATA_PART, "No marks file checksum for column in part {}", - data_part_storage->getFullPath()); + getDataPartStorage().getFullPath()); if (!checksums.files.contains(DATA_FILE_NAME_WITH_EXTENSION)) throw Exception( ErrorCodes::NO_FILE_IN_DATA_PART, "No data file checksum for in part {}", - data_part_storage->getFullPath()); + getDataPartStorage().getFullPath()); } } else @@ -183,33 +183,33 @@ void MergeTreeDataPartCompact::checkConsistency(bool require_part_metadata) cons { /// count.txt should be present even in non custom-partitioned parts std::string file_path = "count.txt"; - if (!data_part_storage->exists(file_path) || data_part_storage->getFileSize(file_path) == 0) + if (!getDataPartStorage().exists(file_path) || getDataPartStorage().getFileSize(file_path) == 0) throw Exception( ErrorCodes::BAD_SIZE_OF_FILE_IN_DATA_PART, "Part {} is broken: {} is empty", - data_part_storage->getRelativePath(), - std::string(fs::path(data_part_storage->getFullPath()) / file_path)); + getDataPartStorage().getRelativePath(), + std::string(fs::path(getDataPartStorage().getFullPath()) / file_path)); } /// Check that marks are nonempty and have the consistent size with columns number. - if (data_part_storage->exists(mrk_file_name)) + if (getDataPartStorage().exists(mrk_file_name)) { - UInt64 file_size = data_part_storage->getFileSize(mrk_file_name); + UInt64 file_size = getDataPartStorage().getFileSize(mrk_file_name); if (!file_size) throw Exception( ErrorCodes::BAD_SIZE_OF_FILE_IN_DATA_PART, "Part {} is broken: {} is empty.", - data_part_storage->getRelativePath(), - std::string(fs::path(data_part_storage->getFullPath()) / mrk_file_name)); + getDataPartStorage().getRelativePath(), + std::string(fs::path(getDataPartStorage().getFullPath()) / mrk_file_name)); UInt64 expected_file_size = index_granularity_info.getMarkSizeInBytes(columns.size()) * index_granularity.getMarksCount(); if (expected_file_size != file_size) throw Exception( ErrorCodes::BAD_SIZE_OF_FILE_IN_DATA_PART, "Part {} is broken: bad size of marks file '{}': {}, must be: {}", - data_part_storage->getRelativePath(), - std::string(fs::path(data_part_storage->getFullPath()) / mrk_file_name), + getDataPartStorage().getRelativePath(), + std::string(fs::path(getDataPartStorage().getFullPath()) / mrk_file_name), std::to_string(file_size), std::to_string(expected_file_size)); } } @@ -217,12 +217,12 @@ void MergeTreeDataPartCompact::checkConsistency(bool require_part_metadata) cons bool MergeTreeDataPartCompact::isStoredOnRemoteDisk() const { - return data_part_storage->isStoredOnRemoteDisk(); + return getDataPartStorage().isStoredOnRemoteDisk(); } bool MergeTreeDataPartCompact::isStoredOnRemoteDiskWithZeroCopySupport() const { - return data_part_storage->supportZeroCopyReplication(); + return getDataPartStorage().supportZeroCopyReplication(); } MergeTreeDataPartCompact::~MergeTreeDataPartCompact() diff --git a/src/Storages/MergeTree/MergeTreeDataPartCompact.h b/src/Storages/MergeTree/MergeTreeDataPartCompact.h index 7c3fe012616..e275c586cb9 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartCompact.h +++ b/src/Storages/MergeTree/MergeTreeDataPartCompact.h @@ -67,7 +67,7 @@ public: protected: static void loadIndexGranularityImpl( MergeTreeIndexGranularity & index_granularity_, const MergeTreeIndexGranularityInfo & index_granularity_info_, - size_t columns_count, const DataPartStoragePtr & data_part_storage_); + size_t columns_count, const IDataPartStorage & data_part_storage_); private: void checkConsistency(bool require_part_metadata) const override; diff --git a/src/Storages/MergeTree/MergeTreeDataPartInMemory.cpp b/src/Storages/MergeTree/MergeTreeDataPartInMemory.cpp index c30efbc7969..48b1b6bab60 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartInMemory.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartInMemory.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -71,12 +72,18 @@ IMergeTreeDataPart::MergeTreeWriterPtr MergeTreeDataPartInMemory::getWriter( MutableDataPartStoragePtr MergeTreeDataPartInMemory::flushToDisk(const String & new_relative_path, const StorageMetadataPtr & metadata_snapshot) const { - auto current_full_path = data_part_storage->getFullPath(); - auto new_data_part_storage = data_part_storage->clone(); + auto reservation = storage.reserveSpace(block.bytes(), getDataPartStorage()); + VolumePtr volume = storage.getStoragePolicy()->getVolume(0); + VolumePtr data_part_volume = createVolumeFromReservation(reservation, volume); + + auto new_data_part_storage = std::make_shared( + data_part_volume, + storage.getRelativeDataPath(), + new_relative_path); - new_data_part_storage->setRelativePath(new_relative_path); new_data_part_storage->beginTransaction(); + auto current_full_path = getDataPartStorage().getFullPath(); auto new_type = storage.choosePartTypeOnDisk(block.bytes(), rows_count); auto new_data_part = storage.createPart(name, new_type, info, new_data_part_storage); @@ -148,12 +155,9 @@ void MergeTreeDataPartInMemory::makeCloneInDetached(const String & prefix, const flushToDisk(detached_path, metadata_snapshot); } -void MergeTreeDataPartInMemory::renameTo(const String & new_relative_path, bool /* remove_new_dir_if_exists */) const +void MergeTreeDataPartInMemory::renameTo(const String & new_relative_path, bool /* remove_new_dir_if_exists */) { - data_part_storage->setRelativePath(new_relative_path); - - if (data_part_storage) - data_part_storage->setRelativePath(new_relative_path); + getDataPartStorage().setRelativePath(new_relative_path); } void MergeTreeDataPartInMemory::calculateEachColumnSizes(ColumnSizeByName & each_columns_size, ColumnSize & total_size) const diff --git a/src/Storages/MergeTree/MergeTreeDataPartInMemory.h b/src/Storages/MergeTree/MergeTreeDataPartInMemory.h index 49bc5eff1ea..e58701b04a1 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartInMemory.h +++ b/src/Storages/MergeTree/MergeTreeDataPartInMemory.h @@ -46,7 +46,7 @@ public: bool isStoredOnRemoteDiskWithZeroCopySupport() const override { return false; } bool hasColumnFiles(const NameAndTypePair & column) const override { return !!getColumnPosition(column.getNameInStorage()); } String getFileNameForColumn(const NameAndTypePair & /* column */) const override { return ""; } - void renameTo(const String & new_relative_path, bool remove_new_dir_if_exists) const override; + void renameTo(const String & new_relative_path, bool remove_new_dir_if_exists) override; void makeCloneInDetached(const String & prefix, const StorageMetadataPtr & metadata_snapshot) const override; MutableDataPartStoragePtr flushToDisk(const String & new_relative_path, const StorageMetadataPtr & metadata_snapshot) const; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp index 18467f2cef7..2418960f992 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp @@ -104,18 +104,18 @@ ColumnSize MergeTreeDataPartWide::getColumnSizeImpl( void MergeTreeDataPartWide::loadIndexGranularityImpl( MergeTreeIndexGranularity & index_granularity_, MergeTreeIndexGranularityInfo & index_granularity_info_, - const DataPartStoragePtr & data_part_storage_, const std::string & any_column_file_name) + const IDataPartStorage & data_part_storage_, const std::string & any_column_file_name) { index_granularity_info_.changeGranularityIfRequired(data_part_storage_); /// We can use any column, it doesn't matter std::string marks_file_path = index_granularity_info_.getMarksFilePath(any_column_file_name); - if (!data_part_storage_->exists(marks_file_path)) + if (!data_part_storage_.exists(marks_file_path)) throw Exception( ErrorCodes::NO_FILE_IN_DATA_PART, "Marks file '{}' doesn't exist", - std::string(fs::path(data_part_storage_->getFullPath()) / marks_file_path)); + std::string(fs::path(data_part_storage_.getFullPath()) / marks_file_path)); - size_t marks_file_size = data_part_storage_->getFileSize(marks_file_path); + size_t marks_file_size = data_part_storage_.getFileSize(marks_file_path); if (!index_granularity_info_.mark_type.adaptive && !index_granularity_info_.mark_type.compressed) { @@ -125,7 +125,7 @@ void MergeTreeDataPartWide::loadIndexGranularityImpl( } else { - auto marks_file = data_part_storage_->readFile(marks_file_path, ReadSettings().adjustBufferSize(marks_file_size), marks_file_size, std::nullopt); + auto marks_file = data_part_storage_.readFile(marks_file_path, ReadSettings().adjustBufferSize(marks_file_size), marks_file_size, std::nullopt); std::unique_ptr marks_reader; if (!index_granularity_info_.mark_type.compressed) @@ -162,18 +162,18 @@ void MergeTreeDataPartWide::loadIndexGranularity() if (columns.empty()) throw Exception("No columns in part " + name, ErrorCodes::NO_FILE_IN_DATA_PART); - loadIndexGranularityImpl(index_granularity, index_granularity_info, data_part_storage, getFileNameForColumn(columns.front())); + loadIndexGranularityImpl(index_granularity, index_granularity_info, getDataPartStorage(), getFileNameForColumn(columns.front())); } bool MergeTreeDataPartWide::isStoredOnRemoteDisk() const { - return data_part_storage->isStoredOnRemoteDisk(); + return getDataPartStorage().isStoredOnRemoteDisk(); } bool MergeTreeDataPartWide::isStoredOnRemoteDiskWithZeroCopySupport() const { - return data_part_storage->supportZeroCopyReplication(); + return getDataPartStorage().supportZeroCopyReplication(); } MergeTreeDataPartWide::~MergeTreeDataPartWide() @@ -202,13 +202,13 @@ void MergeTreeDataPartWide::checkConsistency(bool require_part_metadata) const throw Exception( ErrorCodes::NO_FILE_IN_DATA_PART, "No {} file checksum for column {} in part {} ", - mrk_file_name, name_type.name, data_part_storage->getFullPath()); + mrk_file_name, name_type.name, getDataPartStorage().getFullPath()); if (!checksums.files.contains(bin_file_name)) throw Exception( ErrorCodes::NO_FILE_IN_DATA_PART, "No {} file checksum for column {} in part ", - bin_file_name, name_type.name, data_part_storage->getFullPath()); + bin_file_name, name_type.name, getDataPartStorage().getFullPath()); }); } } @@ -224,23 +224,23 @@ void MergeTreeDataPartWide::checkConsistency(bool require_part_metadata) const auto file_path = ISerialization::getFileNameForStream(name_type, substream_path) + marks_file_extension; /// Missing file is Ok for case when new column was added. - if (data_part_storage->exists(file_path)) + if (getDataPartStorage().exists(file_path)) { - UInt64 file_size = data_part_storage->getFileSize(file_path); + UInt64 file_size = getDataPartStorage().getFileSize(file_path); if (!file_size) throw Exception( ErrorCodes::BAD_SIZE_OF_FILE_IN_DATA_PART, "Part {} is broken: {} is empty.", - data_part_storage->getFullPath(), - std::string(fs::path(data_part_storage->getFullPath()) / file_path)); + getDataPartStorage().getFullPath(), + std::string(fs::path(getDataPartStorage().getFullPath()) / file_path)); if (!marks_size) marks_size = file_size; else if (file_size != *marks_size) throw Exception( ErrorCodes::BAD_SIZE_OF_FILE_IN_DATA_PART, - "Part {} is broken: marks have different sizes.", data_part_storage->getFullPath()); + "Part {} is broken: marks have different sizes.", getDataPartStorage().getFullPath()); } }); } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWide.h b/src/Storages/MergeTree/MergeTreeDataPartWide.h index 4343148b175..601bdff51a1 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWide.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWide.h @@ -1,5 +1,6 @@ #pragma once +#include "Storages/MergeTree/IDataPartStorage.h" #include namespace DB @@ -63,7 +64,7 @@ public: protected: static void loadIndexGranularityImpl( MergeTreeIndexGranularity & index_granularity_, MergeTreeIndexGranularityInfo & index_granularity_info_, - const DataPartStoragePtr & data_part_storage_, const std::string & any_column_file_name); + const IDataPartStorage & data_part_storage_, const std::string & any_column_file_name); private: void checkConsistency(bool require_part_metadata) const override; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp index 6821c52f0d2..c9f3c3b5101 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp @@ -21,13 +21,13 @@ MergeTreeDataPartWriterCompact::MergeTreeDataPartWriterCompact( : MergeTreeDataPartWriterOnDisk(data_part_, columns_list_, metadata_snapshot_, indices_to_recalc_, marks_file_extension_, default_codec_, settings_, index_granularity_) - , plain_file(data_part_->data_part_storage->writeFile( + , plain_file(data_part_->getDataPartStorage().writeFile( MergeTreeDataPartCompact::DATA_FILE_NAME_WITH_EXTENSION, settings.max_compress_block_size, settings_.query_write_settings)) , plain_hashing(*plain_file) { - marks_file = data_part_->data_part_storage->writeFile( + marks_file = data_part_->getDataPartStorage().writeFile( MergeTreeDataPartCompact::DATA_FILE_NAME + marks_file_extension_, 4096, settings_.query_write_settings); diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp index 383cd79734f..d085bb29b20 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp @@ -114,8 +114,8 @@ MergeTreeDataPartWriterOnDisk::MergeTreeDataPartWriterOnDisk( if (settings.blocks_are_granules_size && !index_granularity.empty()) throw Exception("Can't take information about index granularity from blocks, when non empty index_granularity array specified", ErrorCodes::LOGICAL_ERROR); - if (!data_part->data_part_storage->exists()) - data_part->data_part_storage->createDirectories(); + if (!data_part->getDataPartStorage().exists()) + data_part->getDataPartStorage().createDirectories(); if (settings.rewrite_primary_key) initPrimaryIndex(); @@ -176,7 +176,7 @@ void MergeTreeDataPartWriterOnDisk::initPrimaryIndex() if (metadata_snapshot->hasPrimaryKey()) { String index_name = "primary" + getIndexExtension(compress_primary_key); - index_file_stream = data_part->data_part_storage->writeFile(index_name, DBMS_DEFAULT_BUFFER_SIZE, settings.query_write_settings); + index_file_stream = data_part->getDataPartStorage().writeFile(index_name, DBMS_DEFAULT_BUFFER_SIZE, settings.query_write_settings); index_file_hashing_stream = std::make_unique(*index_file_stream); if (compress_primary_key) @@ -202,7 +202,7 @@ void MergeTreeDataPartWriterOnDisk::initSkipIndices() skip_indices_streams.emplace_back( std::make_unique( stream_name, - data_part->data_part_storage, + data_part->getDataPartStoragePtr(), stream_name, index_helper->getSerializedFileExtension(), stream_name, marks_file_extension, default_codec, settings.max_compress_block_size, diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index 5c6e69abe4d..f48b350a981 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -116,7 +116,7 @@ void MergeTreeDataPartWriterWide::addStreams( column_streams[stream_name] = std::make_unique( stream_name, - data_part->data_part_storage, + data_part->getDataPartStoragePtr(), stream_name, DATA_FILE_EXTENSION, stream_name, marks_file_extension, compression_codec, @@ -421,17 +421,17 @@ void MergeTreeDataPartWriterWide::validateColumnOfFixedSize(const NameAndTypePai String bin_path = escaped_name + DATA_FILE_EXTENSION; /// Some columns may be removed because of ttl. Skip them. - if (!data_part->data_part_storage->exists(mrk_path)) + if (!data_part->getDataPartStorage().exists(mrk_path)) return; - auto mrk_file_in = data_part->data_part_storage->readFile(mrk_path, {}, std::nullopt, std::nullopt); + auto mrk_file_in = data_part->getDataPartStorage().readFile(mrk_path, {}, std::nullopt, std::nullopt); std::unique_ptr mrk_in; if (data_part->index_granularity_info.mark_type.compressed) mrk_in = std::make_unique(std::move(mrk_file_in)); else mrk_in = std::move(mrk_file_in); - DB::CompressedReadBufferFromFile bin_in(data_part->data_part_storage->readFile(bin_path, {}, std::nullopt, std::nullopt)); + DB::CompressedReadBufferFromFile bin_in(data_part->getDataPartStorage().readFile(bin_path, {}, std::nullopt, std::nullopt)); bool must_be_last = false; UInt64 offset_in_compressed_file = 0; UInt64 offset_in_decompressed_block = 0; @@ -482,7 +482,7 @@ void MergeTreeDataPartWriterWide::validateColumnOfFixedSize(const NameAndTypePai if (index_granularity_rows != index_granularity.getMarkRows(mark_num)) throw Exception( ErrorCodes::LOGICAL_ERROR, "Incorrect mark rows for part {} for mark #{} (compressed offset {}, decompressed offset {}), in-memory {}, on disk {}, total marks {}", - data_part->data_part_storage->getFullPath(), mark_num, offset_in_compressed_file, offset_in_decompressed_block, index_granularity.getMarkRows(mark_num), index_granularity_rows, index_granularity.getMarksCount()); + data_part->getDataPartStorage().getFullPath(), mark_num, offset_in_compressed_file, offset_in_decompressed_block, index_granularity.getMarkRows(mark_num), index_granularity_rows, index_granularity.getMarksCount()); auto column = type->createColumn(); diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 91ecb3a37a0..b99b9047308 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -1618,10 +1618,10 @@ MarkRanges MergeTreeDataSelectExecutor::filterMarksUsingIndex( UncompressedCache * uncompressed_cache, Poco::Logger * log) { - if (!index_helper->getDeserializedFormat(part->data_part_storage, index_helper->getFileName())) + if (!index_helper->getDeserializedFormat(part->getDataPartStorage(), index_helper->getFileName())) { LOG_DEBUG(log, "File for index {} does not exist ({}.*). Skipping it.", backQuote(index_helper->index.name), - (fs::path(part->data_part_storage->getFullPath()) / index_helper->getFileName()).string()); + (fs::path(part->getDataPartStorage().getFullPath()) / index_helper->getFileName()).string()); return ranges; } @@ -1736,7 +1736,7 @@ MarkRanges MergeTreeDataSelectExecutor::filterMarksUsingMergedIndex( { for (const auto & index_helper : indices) { - if (!part->data_part_storage->exists(index_helper->getFileName() + ".idx")) + if (!part->getDataPartStorage().exists(index_helper->getFileName() + ".idx")) { LOG_DEBUG(log, "File for index {} does not exist. Skipping it.", backQuote(index_helper->index.name)); return ranges; diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index 62d01a2d555..856a684d18d 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -405,9 +405,9 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPart( if (new_data_part->isStoredOnDisk()) { /// The name could be non-unique in case of stale files from previous runs. - String full_path = new_data_part->data_part_storage->getFullPath(); + String full_path = new_data_part->getDataPartStorage().getFullPath(); - if (new_data_part->data_part_storage->exists()) + if (new_data_part->getDataPartStorage().exists()) { LOG_WARNING(log, "Removing old temporary directory {}", full_path); data_part_storage->removeRecursive(); @@ -493,7 +493,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPartImpl( const StorageMetadataPtr & metadata_snapshot = projection.metadata; MergeTreePartInfo new_part_info("all", 0, 0, 0); - auto projection_part_storage = parent_part->data_part_storage->getProjection(relative_path); + auto projection_part_storage = parent_part->getDataPartStorage().getProjection(relative_path); auto new_data_part = data.createPart( part_name, part_type, @@ -600,7 +600,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPart( /// Size of part would not be greater than block.bytes() + epsilon size_t expected_size = block.bytes(); // just check if there is enough space on parent volume - data.reserveSpace(expected_size, parent_part->data_part_storage); + data.reserveSpace(expected_size, parent_part->getDataPartStorage()); part_type = data.choosePartTypeOnDisk(expected_size, block.rows()); } @@ -637,7 +637,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempProjectionPart( /// Size of part would not be greater than block.bytes() + epsilon size_t expected_size = block.bytes(); // just check if there is enough space on parent volume - data.reserveSpace(expected_size, parent_part->data_part_storage); + data.reserveSpace(expected_size, parent_part->getDataPartStorage()); part_type = data.choosePartTypeOnDisk(expected_size, block.rows()); } diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp index 9c154f786f7..11e1f9efcc2 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp @@ -89,10 +89,10 @@ std::string MarkType::getFileExtension() const } -std::optional MergeTreeIndexGranularityInfo::getMarksExtensionFromFilesystem(const DataPartStoragePtr & data_part_storage) +std::optional MergeTreeIndexGranularityInfo::getMarksExtensionFromFilesystem(const IDataPartStorage & data_part_storage) { - if (data_part_storage->exists()) - for (auto it = data_part_storage->iterate(); it->isValid(); it->next()) + if (data_part_storage.exists()) + for (auto it = data_part_storage.iterate(); it->isValid(); it->next()) if (it->isFile()) if (std::string ext = fs::path(it->name()).extension(); MarkType::isMarkFileExtension(ext)) return ext; @@ -110,7 +110,7 @@ MergeTreeIndexGranularityInfo::MergeTreeIndexGranularityInfo(const MergeTreeData fixed_index_granularity = storage.getSettings()->index_granularity; } -void MergeTreeIndexGranularityInfo::changeGranularityIfRequired(const DataPartStoragePtr & data_part_storage) +void MergeTreeIndexGranularityInfo::changeGranularityIfRequired(const IDataPartStorage & data_part_storage) { auto mrk_ext = getMarksExtensionFromFilesystem(data_part_storage); if (mrk_ext && !MarkType(*mrk_ext).adaptive) diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h index 883fe3c899e..aed3081d3d0 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h +++ b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h @@ -48,7 +48,7 @@ public: MergeTreeIndexGranularityInfo(MergeTreeDataPartType type_, bool is_adaptive_, size_t index_granularity_, size_t index_granularity_bytes_); - void changeGranularityIfRequired(const DataPartStoragePtr & data_part_storage); + void changeGranularityIfRequired(const IDataPartStorage & data_part_storage); String getMarksFilePath(const String & path_prefix) const { @@ -57,7 +57,7 @@ public: size_t getMarkSizeInBytes(size_t columns_num = 1) const; - static std::optional getMarksExtensionFromFilesystem(const DataPartStoragePtr & data_part_storage); + static std::optional getMarksExtensionFromFilesystem(const IDataPartStorage & data_part_storage); }; constexpr inline auto getNonAdaptiveMrkSizeWide() { return sizeof(UInt64) * 2; } diff --git a/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp b/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp index b190ac2b2fd..43e655a4ee5 100644 --- a/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp @@ -211,11 +211,11 @@ bool MergeTreeIndexMinMax::mayBenefitFromIndexForIn(const ASTPtr & node) const return false; } -MergeTreeIndexFormat MergeTreeIndexMinMax::getDeserializedFormat(const DataPartStoragePtr & data_part_storage, const std::string & relative_path_prefix) const +MergeTreeIndexFormat MergeTreeIndexMinMax::getDeserializedFormat(const IDataPartStorage & data_part_storage, const std::string & relative_path_prefix) const { - if (data_part_storage->exists(relative_path_prefix + ".idx2")) + if (data_part_storage.exists(relative_path_prefix + ".idx2")) return {2, ".idx2"}; - else if (data_part_storage->exists(relative_path_prefix + ".idx")) + else if (data_part_storage.exists(relative_path_prefix + ".idx")) return {1, ".idx"}; return {0 /* unknown */, ""}; } diff --git a/src/Storages/MergeTree/MergeTreeIndexMinMax.h b/src/Storages/MergeTree/MergeTreeIndexMinMax.h index 0566a15d535..af420613855 100644 --- a/src/Storages/MergeTree/MergeTreeIndexMinMax.h +++ b/src/Storages/MergeTree/MergeTreeIndexMinMax.h @@ -83,7 +83,7 @@ public: bool mayBenefitFromIndexForIn(const ASTPtr & node) const override; const char* getSerializedFileExtension() const override { return ".idx2"; } - MergeTreeIndexFormat getDeserializedFormat(const DataPartStoragePtr & data_part_storage, const std::string & path_prefix) const override; /// NOLINT + MergeTreeIndexFormat getDeserializedFormat(const IDataPartStorage & data_part_storage, const std::string & path_prefix) const override; /// NOLINT }; } diff --git a/src/Storages/MergeTree/MergeTreeIndexReader.cpp b/src/Storages/MergeTree/MergeTreeIndexReader.cpp index 33106f7ab64..7d7024a8ac2 100644 --- a/src/Storages/MergeTree/MergeTreeIndexReader.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexReader.cpp @@ -20,7 +20,7 @@ std::unique_ptr makeIndexReader( auto * load_marks_threadpool = settings.read_settings.load_marks_asynchronously ? &context->getLoadMarksThreadpool() : nullptr; return std::make_unique( - part->data_part_storage, + part->getDataPartStoragePtr(), index->getFileName(), extension, marks_count, all_mark_ranges, std::move(settings), mark_cache, uncompressed_cache, @@ -44,7 +44,7 @@ MergeTreeIndexReader::MergeTreeIndexReader( MergeTreeReaderSettings settings) : index(index_) { - auto index_format = index->getDeserializedFormat(part_->data_part_storage, index->getFileName()); + auto index_format = index->getDeserializedFormat(part_->getDataPartStorage(), index->getFileName()); stream = makeIndexReader( index_format.extension, diff --git a/src/Storages/MergeTree/MergeTreeIndices.h b/src/Storages/MergeTree/MergeTreeIndices.h index 14002534c94..6a671c31944 100644 --- a/src/Storages/MergeTree/MergeTreeIndices.h +++ b/src/Storages/MergeTree/MergeTreeIndices.h @@ -148,9 +148,9 @@ struct IMergeTreeIndex /// Returns extension for deserialization. /// /// Return pair. - virtual MergeTreeIndexFormat getDeserializedFormat(const DataPartStoragePtr & data_part_storage, const std::string & relative_path_prefix) const + virtual MergeTreeIndexFormat getDeserializedFormat(const IDataPartStorage & data_part_storage, const std::string & relative_path_prefix) const { - if (data_part_storage->exists(relative_path_prefix + ".idx")) + if (data_part_storage.exists(relative_path_prefix + ".idx")) return {1, ".idx"}; return {0 /*unknown*/, ""}; } diff --git a/src/Storages/MergeTree/MergeTreePartition.cpp b/src/Storages/MergeTree/MergeTreePartition.cpp index 47f07e7bb08..10f5cc95baf 100644 --- a/src/Storages/MergeTree/MergeTreePartition.cpp +++ b/src/Storages/MergeTree/MergeTreePartition.cpp @@ -382,7 +382,7 @@ void MergeTreePartition::load(const MergeTreeData & storage, const PartMetadataM partition_key_sample.getByPosition(i).type->getDefaultSerialization()->deserializeBinary(value[i], *file); } -std::unique_ptr MergeTreePartition::store(const MergeTreeData & storage, const MutableDataPartStoragePtr & data_part_storage, MergeTreeDataPartChecksums & checksums) const +std::unique_ptr MergeTreePartition::store(const MergeTreeData & storage, IDataPartStorage & data_part_storage, MergeTreeDataPartChecksums & checksums) const { auto metadata_snapshot = storage.getInMemoryMetadataPtr(); const auto & context = storage.getContext(); @@ -390,12 +390,12 @@ std::unique_ptr MergeTreePartition::store(const MergeTr return store(partition_key_sample, data_part_storage, checksums, context->getWriteSettings()); } -std::unique_ptr MergeTreePartition::store(const Block & partition_key_sample, const MutableDataPartStoragePtr & data_part_storage, MergeTreeDataPartChecksums & checksums, const WriteSettings & settings) const +std::unique_ptr MergeTreePartition::store(const Block & partition_key_sample, IDataPartStorage & data_part_storage, MergeTreeDataPartChecksums & checksums, const WriteSettings & settings) const { if (!partition_key_sample) return nullptr; - auto out = data_part_storage->writeFile("partition.dat", DBMS_DEFAULT_BUFFER_SIZE, settings); + auto out = data_part_storage.writeFile("partition.dat", DBMS_DEFAULT_BUFFER_SIZE, settings); HashingWriteBuffer out_hashing(*out); for (size_t i = 0; i < value.size(); ++i) { diff --git a/src/Storages/MergeTree/MergeTreePartition.h b/src/Storages/MergeTree/MergeTreePartition.h index c9c6723df5a..78b141f26ec 100644 --- a/src/Storages/MergeTree/MergeTreePartition.h +++ b/src/Storages/MergeTree/MergeTreePartition.h @@ -44,8 +44,8 @@ public: /// Store functions return write buffer with written but not finalized data. /// User must call finish() for returned object. - [[nodiscard]] std::unique_ptr store(const MergeTreeData & storage, const MutableDataPartStoragePtr & data_part_storage, MergeTreeDataPartChecksums & checksums) const; - [[nodiscard]] std::unique_ptr store(const Block & partition_key_sample, const MutableDataPartStoragePtr & data_part_storage, MergeTreeDataPartChecksums & checksums, const WriteSettings & settings) const; + [[nodiscard]] std::unique_ptr store(const MergeTreeData & storage, IDataPartStorage & data_part_storage, MergeTreeDataPartChecksums & checksums) const; + [[nodiscard]] std::unique_ptr store(const Block & partition_key_sample, IDataPartStorage & data_part_storage, MergeTreeDataPartChecksums & checksums, const WriteSettings & settings) const; void assign(const MergeTreePartition & other) { value = other.value; } diff --git a/src/Storages/MergeTree/MergeTreePartsMover.cpp b/src/Storages/MergeTree/MergeTreePartsMover.cpp index 5a3f138d727..bd277ca4374 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.cpp +++ b/src/Storages/MergeTree/MergeTreePartsMover.cpp @@ -140,7 +140,7 @@ bool MergeTreePartsMover::selectPartsForMove( auto ttl_entry = selectTTLDescriptionForTTLInfos(metadata_snapshot->getMoveTTLs(), part->ttl_infos.moves_ttl, time_of_move, true); auto to_insert = need_to_move.end(); - if (auto disk_it = part->data_part_storage->isStoredOnDisk(need_to_move_disks); disk_it != need_to_move_disks.end()) + if (auto disk_it = part->getDataPartStorage().isStoredOnDisk(need_to_move_disks); disk_it != need_to_move_disks.end()) to_insert = need_to_move.find(*disk_it); ReservationPtr reservation; @@ -199,7 +199,7 @@ bool MergeTreePartsMover::selectPartsForMove( return false; } -MergeTreeData::DataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEntry & moving_part) const +MergeTreeMutableDataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEntry & moving_part) const { if (moves_blocker.isCancelled()) throw Exception("Cancelled moving parts.", ErrorCodes::ABORTED); @@ -207,7 +207,7 @@ MergeTreeData::DataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEnt auto settings = data->getSettings(); auto part = moving_part.part; auto disk = moving_part.reserved_space->getDisk(); - LOG_DEBUG(log, "Cloning part {} from '{}' to '{}'", part->name, part->data_part_storage->getDiskName(), disk->getName()); + LOG_DEBUG(log, "Cloning part {} from '{}' to '{}'", part->name, part->getDataPartStorage().getDiskName(), disk->getName()); MutableDataPartStoragePtr cloned_part_storage; if (disk->supportZeroCopyReplication() && settings->allow_remote_fs_zero_copy_replication) @@ -215,7 +215,7 @@ MergeTreeData::DataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEnt /// Try zero-copy replication and fallback to default copy if it's not possible moving_part.part->assertOnDisk(); String path_to_clone = fs::path(data->getRelativeDataPath()) / MergeTreeData::MOVING_DIR_NAME / ""; - String relative_path = part->data_part_storage->getPartDirectory(); + String relative_path = part->getDataPartStorage().getPartDirectory(); if (disk->exists(path_to_clone + relative_path)) { LOG_WARNING(log, "Path {} already exists. Will remove it and clone again.", fullPath(disk, path_to_clone + relative_path)); @@ -224,16 +224,12 @@ MergeTreeData::DataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEnt disk->createDirectories(path_to_clone); - bool is_fetched = data->tryToFetchIfShared(*part, disk, fs::path(path_to_clone) / part->name); + cloned_part_storage = data->tryToFetchIfShared(*part, disk, fs::path(path_to_clone) / part->name); - if (!is_fetched) + if (!cloned_part_storage) { LOG_INFO(log, "Part {} was not fetched, we are the first who move it to another disk, so we will copy it", part->name); - cloned_part_storage = part->data_part_storage->clonePart(path_to_clone, part->data_part_storage->getPartDirectory(), disk, log); - } - else - { - cloned_part_storage = part->data_part_storage->clone(); + cloned_part_storage = part->getDataPartStorage().clonePart(path_to_clone, part->getDataPartStorage().getPartDirectory(), disk, log); } } else @@ -242,16 +238,16 @@ MergeTreeData::DataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEnt } auto cloned_part = data->createPart(part->name, cloned_part_storage); - LOG_TRACE(log, "Part {} was cloned to {}", part->name, cloned_part->data_part_storage->getFullPath()); + LOG_TRACE(log, "Part {} was cloned to {}", part->name, cloned_part->getDataPartStorage().getFullPath()); cloned_part->loadColumnsChecksumsIndexes(true, true); cloned_part->loadVersionMetadata(); - cloned_part->modification_time = cloned_part->data_part_storage->getLastModified().epochTime(); + cloned_part->modification_time = cloned_part->getDataPartStorage().getLastModified().epochTime(); return cloned_part; } -void MergeTreePartsMover::swapClonedPart(const MergeTreeData::DataPartPtr & cloned_part) const +void MergeTreePartsMover::swapClonedPart(const MergeTreeMutableDataPartPtr & cloned_part) const { if (moves_blocker.isCancelled()) throw Exception("Cancelled moving parts.", ErrorCodes::ABORTED); @@ -261,7 +257,7 @@ void MergeTreePartsMover::swapClonedPart(const MergeTreeData::DataPartPtr & clon /// It's ok, because we don't block moving parts for merges or mutations if (!active_part || active_part->name != cloned_part->name) { - LOG_INFO(log, "Failed to swap {}. Active part doesn't exist. Possible it was merged or mutated. Will remove copy on path '{}'.", cloned_part->name, cloned_part->data_part_storage->getFullPath()); + LOG_INFO(log, "Failed to swap {}. Active part doesn't exist. Possible it was merged or mutated. Will remove copy on path '{}'.", cloned_part->name, cloned_part->getDataPartStorage().getFullPath()); return; } @@ -271,7 +267,7 @@ void MergeTreePartsMover::swapClonedPart(const MergeTreeData::DataPartPtr & clon /// TODO what happen if server goes down here? data->swapActivePart(cloned_part); - LOG_TRACE(log, "Part {} was moved to {}", cloned_part->name, cloned_part->data_part_storage->getFullPath()); + LOG_TRACE(log, "Part {} was moved to {}", cloned_part->name, cloned_part->getDataPartStorage().getFullPath()); } } diff --git a/src/Storages/MergeTree/MergeTreePartsMover.h b/src/Storages/MergeTree/MergeTreePartsMover.h index 6ad658c2cb3..0266b2daa46 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.h +++ b/src/Storages/MergeTree/MergeTreePartsMover.h @@ -50,14 +50,14 @@ public: const std::lock_guard & moving_parts_lock); /// Copies part to selected reservation in detached folder. Throws exception if part already exists. - MergeTreeDataPartPtr clonePart(const MergeTreeMoveEntry & moving_part) const; + MergeTreeMutableDataPartPtr clonePart(const MergeTreeMoveEntry & moving_part) const; /// Replaces cloned part from detached directory into active data parts set. /// Replacing part changes state to DeleteOnDestroy and will be removed from disk after destructor of ///IMergeTreeDataPart called. If replacing part doesn't exists or not active (committed) than /// cloned part will be removed and log message will be reported. It may happen in case of concurrent /// merge or mutation. - void swapClonedPart(const MergeTreeDataPartPtr & cloned_parts) const; + void swapClonedPart(const MergeTreeMutableDataPartPtr & cloned_parts) const; /// Can stop background moves and moves from queries ActionBlocker moves_blocker; diff --git a/src/Storages/MergeTree/MergeTreeReadPool.cpp b/src/Storages/MergeTree/MergeTreeReadPool.cpp index 3f51673a6b1..ca9cde0ae61 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPool.cpp @@ -263,7 +263,7 @@ void MergeTreeReadPool::fillPerThreadInfo( { PartInfo part_info{parts[i], per_part_sum_marks[i], i}; if (parts[i].data_part->isStoredOnDisk()) - parts_per_disk[parts[i].data_part->data_part_storage->getDiskName()].push_back(std::move(part_info)); + parts_per_disk[parts[i].data_part->getDataPartStorage().getDiskName()].push_back(std::move(part_info)); else parts_per_disk[""].push_back(std::move(part_info)); } diff --git a/src/Storages/MergeTree/MergeTreeReaderCompact.cpp b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp index 4801c9a4058..b0488d29f8e 100644 --- a/src/Storages/MergeTree/MergeTreeReaderCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp @@ -59,13 +59,15 @@ MergeTreeReaderCompact::MergeTreeReaderCompact( throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA, "Cannot read to empty buffer."); const String path = MergeTreeDataPartCompact::DATA_FILE_NAME_WITH_EXTENSION; + auto data_part_storage = data_part_info_for_read->getDataPartStorage(); + if (uncompressed_cache) { auto buffer = std::make_unique( - std::string(fs::path(data_part_info_for_read->getDataPartStorage()->getFullPath()) / path), - [this, path]() + std::string(fs::path(data_part_storage->getFullPath()) / path), + [this, path, data_part_storage]() { - return data_part_info_for_read->getDataPartStorage()->readFile( + return data_part_storage->readFile( path, settings.read_settings, std::nullopt, std::nullopt); @@ -87,7 +89,7 @@ MergeTreeReaderCompact::MergeTreeReaderCompact( { auto buffer = std::make_unique( - data_part_info_for_read->getDataPartStorage()->readFile( + data_part_storage->readFile( path, settings.read_settings, std::nullopt, std::nullopt), diff --git a/src/Storages/MergeTree/MergeTreeSink.cpp b/src/Storages/MergeTree/MergeTreeSink.cpp index 6178af83913..a7ddb9d9e1d 100644 --- a/src/Storages/MergeTree/MergeTreeSink.cpp +++ b/src/Storages/MergeTree/MergeTreeSink.cpp @@ -81,7 +81,7 @@ void MergeTreeSink::consume(Chunk chunk) if (!temp_part.part) continue; - if (!support_parallel_write && temp_part.part->data_part_storage->supportParallelWrite()) + if (!support_parallel_write && temp_part.part->getDataPartStorage().supportParallelWrite()) support_parallel_write = true; if (storage.getDeduplicationLog()) diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/src/Storages/MergeTree/MergedBlockOutputStream.cpp index 1d0743a0429..991a8d359a8 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/MergedBlockOutputStream.cpp @@ -94,7 +94,7 @@ void MergedBlockOutputStream::Finalizer::Impl::finish() writer.finish(sync); for (const auto & file_name : files_to_remove_after_finish) - part->data_part_storage->removeFile(file_name); + part->getDataPartStorage().removeFile(file_name); for (auto & file : written_files) { @@ -121,19 +121,19 @@ MergedBlockOutputStream::Finalizer & MergedBlockOutputStream::Finalizer::operato MergedBlockOutputStream::Finalizer::Finalizer(std::unique_ptr impl_) : impl(std::move(impl_)) {} void MergedBlockOutputStream::finalizePart( - MergeTreeData::MutableDataPartPtr & new_part, - bool sync, - const NamesAndTypesList * total_columns_list, - MergeTreeData::DataPart::Checksums * additional_column_checksums) + const MergeTreeMutableDataPartPtr & new_part, + bool sync, + const NamesAndTypesList * total_columns_list, + MergeTreeData::DataPart::Checksums * additional_column_checksums) { finalizePartAsync(new_part, sync, total_columns_list, additional_column_checksums).finish(); } MergedBlockOutputStream::Finalizer MergedBlockOutputStream::finalizePartAsync( - MergeTreeData::MutableDataPartPtr & new_part, - bool sync, - const NamesAndTypesList * total_columns_list, - MergeTreeData::DataPart::Checksums * additional_column_checksums) + const MergeTreeMutableDataPartPtr & new_part, + bool sync, + const NamesAndTypesList * total_columns_list, + MergeTreeData::DataPart::Checksums * additional_column_checksums) { /// Finish write and get checksums. MergeTreeData::DataPart::Checksums checksums; @@ -183,7 +183,7 @@ MergedBlockOutputStream::Finalizer MergedBlockOutputStream::finalizePartAsync( } MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDisk( - const MergeTreeData::DataPartPtr & new_part, + const MergeTreeMutableDataPartPtr & new_part, MergeTreeData::DataPart::Checksums & checksums) { WrittenFiles written_files; @@ -191,7 +191,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis { if (storage.format_version >= MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING || isCompactPart(new_part)) { - auto count_out = new_part->data_part_storage->writeFile("count.txt", 4096, write_settings); + auto count_out = new_part->getDataPartStorage().writeFile("count.txt", 4096, write_settings); HashingWriteBuffer count_out_hashing(*count_out); writeIntText(rows_count, count_out_hashing); count_out_hashing.next(); @@ -205,7 +205,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis { if (new_part->uuid != UUIDHelpers::Nil) { - auto out = new_part->data_part_storage->writeFile(IMergeTreeDataPart::UUID_FILE_NAME, 4096, write_settings); + auto out = new_part->getDataPartStorage().writeFile(IMergeTreeDataPart::UUID_FILE_NAME, 4096, write_settings); HashingWriteBuffer out_hashing(*out); writeUUIDText(new_part->uuid, out_hashing); checksums.files[IMergeTreeDataPart::UUID_FILE_NAME].file_size = out_hashing.count(); @@ -216,12 +216,12 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis if (storage.format_version >= MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING) { - if (auto file = new_part->partition.store(storage, new_part->data_part_storage, checksums)) + if (auto file = new_part->partition.store(storage, new_part->getDataPartStorage(), checksums)) written_files.emplace_back(std::move(file)); if (new_part->minmax_idx->initialized) { - auto files = new_part->minmax_idx->store(storage, new_part->data_part_storage, checksums); + auto files = new_part->minmax_idx->store(storage, new_part->getDataPartStorage(), checksums); for (auto & file : files) written_files.emplace_back(std::move(file)); } @@ -231,7 +231,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis } { - auto count_out = new_part->data_part_storage->writeFile("count.txt", 4096, write_settings); + auto count_out = new_part->getDataPartStorage().writeFile("count.txt", 4096, write_settings); HashingWriteBuffer count_out_hashing(*count_out); writeIntText(rows_count, count_out_hashing); count_out_hashing.next(); @@ -245,7 +245,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis if (!new_part->ttl_infos.empty()) { /// Write a file with ttl infos in json format. - auto out = new_part->data_part_storage->writeFile("ttl.txt", 4096, write_settings); + auto out = new_part->getDataPartStorage().writeFile("ttl.txt", 4096, write_settings); HashingWriteBuffer out_hashing(*out); new_part->ttl_infos.write(out_hashing); checksums.files["ttl.txt"].file_size = out_hashing.count(); @@ -256,7 +256,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis if (!new_part->getSerializationInfos().empty()) { - auto out = new_part->data_part_storage->writeFile(IMergeTreeDataPart::SERIALIZATION_FILE_NAME, 4096, write_settings); + auto out = new_part->getDataPartStorage().writeFile(IMergeTreeDataPart::SERIALIZATION_FILE_NAME, 4096, write_settings); HashingWriteBuffer out_hashing(*out); new_part->getSerializationInfos().writeJSON(out_hashing); checksums.files[IMergeTreeDataPart::SERIALIZATION_FILE_NAME].file_size = out_hashing.count(); @@ -267,7 +267,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis { /// Write a file with a description of columns. - auto out = new_part->data_part_storage->writeFile("columns.txt", 4096, write_settings); + auto out = new_part->getDataPartStorage().writeFile("columns.txt", 4096, write_settings); new_part->getColumns().writeText(*out); out->preFinalize(); written_files.emplace_back(std::move(out)); @@ -275,7 +275,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis if (default_codec != nullptr) { - auto out = new_part->data_part_storage->writeFile(IMergeTreeDataPart::DEFAULT_COMPRESSION_CODEC_FILE_NAME, 4096, write_settings); + auto out = new_part->getDataPartStorage().writeFile(IMergeTreeDataPart::DEFAULT_COMPRESSION_CODEC_FILE_NAME, 4096, write_settings); DB::writeText(queryToString(default_codec->getFullCodecDesc()), *out); out->preFinalize(); written_files.emplace_back(std::move(out)); @@ -288,7 +288,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis { /// Write file with checksums. - auto out = new_part->data_part_storage->writeFile("checksums.txt", 4096, write_settings); + auto out = new_part->getDataPartStorage().writeFile("checksums.txt", 4096, write_settings); checksums.write(*out); out->preFinalize(); written_files.emplace_back(std::move(out)); diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.h b/src/Storages/MergeTree/MergedBlockOutputStream.h index 799bae8e94b..ad1bb584788 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.h +++ b/src/Storages/MergeTree/MergedBlockOutputStream.h @@ -54,16 +54,16 @@ public: /// Finalize writing part and fill inner structures /// If part is new and contains projections, they should be added before invoking this method. Finalizer finalizePartAsync( - MergeTreeData::MutableDataPartPtr & new_part, - bool sync, - const NamesAndTypesList * total_columns_list = nullptr, - MergeTreeData::DataPart::Checksums * additional_column_checksums = nullptr); + const MergeTreeMutableDataPartPtr & new_part, + bool sync, + const NamesAndTypesList * total_columns_list = nullptr, + MergeTreeData::DataPart::Checksums * additional_column_checksums = nullptr); void finalizePart( - MergeTreeData::MutableDataPartPtr & new_part, - bool sync, - const NamesAndTypesList * total_columns_list = nullptr, - MergeTreeData::DataPart::Checksums * additional_column_checksums = nullptr); + const MergeTreeMutableDataPartPtr & new_part, + bool sync, + const NamesAndTypesList * total_columns_list = nullptr, + MergeTreeData::DataPart::Checksums * additional_column_checksums = nullptr); private: /** If `permutation` is given, it rearranges the values in the columns when writing. @@ -73,8 +73,8 @@ private: using WrittenFiles = std::vector>; WrittenFiles finalizePartOnDisk( - const MergeTreeData::DataPartPtr & new_part, - MergeTreeData::DataPart::Checksums & checksums); + const MergeTreeMutableDataPartPtr & new_part, + MergeTreeData::DataPart::Checksums & checksums); NamesAndTypesList columns_list; IMergeTreeDataPart::MinMaxIndex minmax_idx; diff --git a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp index 310bc849ffe..e4a5a0bc3ba 100644 --- a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp +++ b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp @@ -79,7 +79,7 @@ MergedColumnOnlyOutputStream::fillChecksums( for (const String & removed_file : removed_files) { - new_part->data_part_storage->removeFileIfExists(removed_file); + new_part->getDataPartStorage().removeFileIfExists(removed_file); if (all_checksums.files.contains(removed_file)) all_checksums.files.erase(removed_file); diff --git a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp index a4a75a637a4..9e3cbb0640b 100644 --- a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp @@ -92,7 +92,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MutateFromLogEntryTask::prepare() /// Once we mutate part, we must reserve space on the same disk, because mutations can possibly create hardlinks. /// Can throw an exception. - reserved_space = storage.reserveSpace(estimated_space_for_result, source_part->data_part_storage); + reserved_space = storage.reserveSpace(estimated_space_for_result, source_part->getDataPartStorage()); table_lock_holder = storage.lockForShare( RWLockImpl::NO_QUERY, storage_settings_ptr->lock_acquire_timeout_for_background_operations); diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 80bf7e3c7d9..e5ba771a198 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -626,7 +626,7 @@ void finalizeMutatedPart( { if (new_data_part->uuid != UUIDHelpers::Nil) { - auto out = new_data_part->data_part_storage->writeFile(IMergeTreeDataPart::UUID_FILE_NAME, 4096, context->getWriteSettings()); + auto out = new_data_part->getDataPartStorage().writeFile(IMergeTreeDataPart::UUID_FILE_NAME, 4096, context->getWriteSettings()); HashingWriteBuffer out_hashing(*out); writeUUIDText(new_data_part->uuid, out_hashing); new_data_part->checksums.files[IMergeTreeDataPart::UUID_FILE_NAME].file_size = out_hashing.count(); @@ -636,7 +636,7 @@ void finalizeMutatedPart( if (execute_ttl_type != ExecuteTTLType::NONE) { /// Write a file with ttl infos in json format. - auto out_ttl = new_data_part->data_part_storage->writeFile("ttl.txt", 4096, context->getWriteSettings()); + auto out_ttl = new_data_part->getDataPartStorage().writeFile("ttl.txt", 4096, context->getWriteSettings()); HashingWriteBuffer out_hashing(*out_ttl); new_data_part->ttl_infos.write(out_hashing); new_data_part->checksums.files["ttl.txt"].file_size = out_hashing.count(); @@ -645,7 +645,7 @@ void finalizeMutatedPart( if (!new_data_part->getSerializationInfos().empty()) { - auto out = new_data_part->data_part_storage->writeFile(IMergeTreeDataPart::SERIALIZATION_FILE_NAME, 4096, context->getWriteSettings()); + auto out = new_data_part->getDataPartStorage().writeFile(IMergeTreeDataPart::SERIALIZATION_FILE_NAME, 4096, context->getWriteSettings()); HashingWriteBuffer out_hashing(*out); new_data_part->getSerializationInfos().writeJSON(out_hashing); new_data_part->checksums.files[IMergeTreeDataPart::SERIALIZATION_FILE_NAME].file_size = out_hashing.count(); @@ -654,18 +654,18 @@ void finalizeMutatedPart( { /// Write file with checksums. - auto out_checksums = new_data_part->data_part_storage->writeFile("checksums.txt", 4096, context->getWriteSettings()); + auto out_checksums = new_data_part->getDataPartStorage().writeFile("checksums.txt", 4096, context->getWriteSettings()); new_data_part->checksums.write(*out_checksums); } /// close fd { - auto out = new_data_part->data_part_storage->writeFile(IMergeTreeDataPart::DEFAULT_COMPRESSION_CODEC_FILE_NAME, 4096, context->getWriteSettings()); + auto out = new_data_part->getDataPartStorage().writeFile(IMergeTreeDataPart::DEFAULT_COMPRESSION_CODEC_FILE_NAME, 4096, context->getWriteSettings()); DB::writeText(queryToString(codec->getFullCodecDesc()), *out); } /// close fd { /// Write a file with a description of columns. - auto out_columns = new_data_part->data_part_storage->writeFile("columns.txt", 4096, context->getWriteSettings()); + auto out_columns = new_data_part->getDataPartStorage().writeFile("columns.txt", 4096, context->getWriteSettings()); new_data_part->getColumns().writeText(*out_columns); } /// close fd @@ -1141,7 +1141,7 @@ private: void prepare() { - ctx->new_data_part->data_part_storage->createDirectories(); + ctx->new_data_part->getDataPartStorage().createDirectories(); /// Note: this is done before creating input streams, because otherwise data.data_parts_mutex /// (which is locked in data.getTotalActiveSizeInBytes()) @@ -1271,7 +1271,7 @@ private: if (ctx->execute_ttl_type != ExecuteTTLType::NONE) ctx->files_to_skip.insert("ttl.txt"); - ctx->new_data_part->data_part_storage->createDirectories(); + ctx->new_data_part->getDataPartStorage().createDirectories(); /// We should write version metadata on part creation to distinguish it from parts that were created without transaction. TransactionID tid = ctx->txn ? ctx->txn->tid : Tx::PrehistoricTID; @@ -1282,7 +1282,7 @@ private: NameSet hardlinked_files; /// Create hardlinks for unchanged files - for (auto it = ctx->source_part->data_part_storage->iterate(); it->isValid(); it->next()) + for (auto it = ctx->source_part->getDataPartStorage().iterate(); it->isValid(); it->next()) { if (ctx->files_to_skip.contains(it->name())) continue; @@ -1308,17 +1308,17 @@ private: if (it->isFile()) { - ctx->new_data_part->data_part_storage->createHardLinkFrom( - *ctx->source_part->data_part_storage, it->name(), destination); + ctx->new_data_part->getDataPartStorage().createHardLinkFrom( + ctx->source_part->getDataPartStorage(), it->name(), destination); hardlinked_files.insert(it->name()); } else if (!endsWith(it->name(), ".tmp_proj")) // ignore projection tmp merge dir { // it's a projection part directory - ctx->new_data_part->data_part_storage->createProjection(destination); + ctx->new_data_part->getDataPartStorage().createProjection(destination); - auto projection_data_part_storage_src = ctx->source_part->data_part_storage->getProjection(destination); - auto projection_data_part_storage_dst = ctx->new_data_part->data_part_storage->getProjection(destination); + auto projection_data_part_storage_src = ctx->source_part->getDataPartStorage().getProjection(destination); + auto projection_data_part_storage_dst = ctx->new_data_part->getDataPartStorage().getProjection(destination); for (auto p_it = projection_data_part_storage_src->iterate(); p_it->isValid(); p_it->next()) { diff --git a/src/Storages/MergeTree/PartMetadataManagerOrdinary.cpp b/src/Storages/MergeTree/PartMetadataManagerOrdinary.cpp index 7eb868f7754..30823d593a2 100644 --- a/src/Storages/MergeTree/PartMetadataManagerOrdinary.cpp +++ b/src/Storages/MergeTree/PartMetadataManagerOrdinary.cpp @@ -8,20 +8,10 @@ namespace DB { -static std::unique_ptr openForReading(const DataPartStoragePtr & data_part_storage, const String & path) -{ - size_t file_size = data_part_storage->getFileSize(path); - return data_part_storage->readFile(path, ReadSettings().adjustBufferSize(file_size), file_size, std::nullopt); -} - -PartMetadataManagerOrdinary::PartMetadataManagerOrdinary(const IMergeTreeDataPart * part_) : IPartMetadataManager(part_) -{ -} - - std::unique_ptr PartMetadataManagerOrdinary::read(const String & file_name) const { - auto res = openForReading(part->data_part_storage, file_name); + size_t file_size = part->getDataPartStorage().getFileSize(file_name); + auto res = part->getDataPartStorage().readFile(file_name, ReadSettings().adjustBufferSize(file_size), file_size, std::nullopt); if (isCompressedFromFileName(file_name)) return std::make_unique(std::move(res)); @@ -31,7 +21,7 @@ std::unique_ptr PartMetadataManagerOrdinary::read(const String & fil bool PartMetadataManagerOrdinary::exists(const String & file_name) const { - return part->data_part_storage->exists(file_name); + return part->getDataPartStorage().exists(file_name); } diff --git a/src/Storages/MergeTree/PartMetadataManagerOrdinary.h b/src/Storages/MergeTree/PartMetadataManagerOrdinary.h index d86d5c54c00..428b6d4710a 100644 --- a/src/Storages/MergeTree/PartMetadataManagerOrdinary.h +++ b/src/Storages/MergeTree/PartMetadataManagerOrdinary.h @@ -8,7 +8,7 @@ namespace DB class PartMetadataManagerOrdinary : public IPartMetadataManager { public: - explicit PartMetadataManagerOrdinary(const IMergeTreeDataPart * part_); + explicit PartMetadataManagerOrdinary(const IMergeTreeDataPart * part_) : IPartMetadataManager(part_) {} ~PartMetadataManagerOrdinary() override = default; diff --git a/src/Storages/MergeTree/PartMetadataManagerWithCache.cpp b/src/Storages/MergeTree/PartMetadataManagerWithCache.cpp index ee0970984f9..90fd25bc4e7 100644 --- a/src/Storages/MergeTree/PartMetadataManagerWithCache.cpp +++ b/src/Storages/MergeTree/PartMetadataManagerWithCache.cpp @@ -31,24 +31,24 @@ PartMetadataManagerWithCache::PartMetadataManagerWithCache(const IMergeTreeDataP String PartMetadataManagerWithCache::getKeyFromFilePath(const String & file_path) const { - return part->data_part_storage->getDiskName() + ":" + file_path; + return part->getDataPartStorage().getDiskName() + ":" + file_path; } String PartMetadataManagerWithCache::getFilePathFromKey(const String & key) const { - return key.substr(part->data_part_storage->getDiskName().size() + 1); + return key.substr(part->getDataPartStorage().getDiskName().size() + 1); } std::unique_ptr PartMetadataManagerWithCache::read(const String & file_name) const { - String file_path = fs::path(part->data_part_storage->getRelativePath()) / file_name; + String file_path = fs::path(part->getDataPartStorage().getRelativePath()) / file_name; String key = getKeyFromFilePath(file_path); String value; auto status = cache->get(key, value); if (!status.ok()) { ProfileEvents::increment(ProfileEvents::MergeTreeMetadataCacheMiss); - auto in = part->data_part_storage->readFile(file_name, {}, std::nullopt, std::nullopt); + auto in = part->getDataPartStorage().readFile(file_name, {}, std::nullopt, std::nullopt); std::unique_ptr reader; if (!isCompressedFromFileName(file_name)) reader = std::move(in); @@ -67,7 +67,7 @@ std::unique_ptr PartMetadataManagerWithCache::read(const String & fi bool PartMetadataManagerWithCache::exists(const String & file_name) const { - String file_path = fs::path(part->data_part_storage->getRelativePath()) / file_name; + String file_path = fs::path(part->getDataPartStorage().getRelativePath()) / file_name; String key = getKeyFromFilePath(file_path); String value; auto status = cache->get(key, value); @@ -79,7 +79,7 @@ bool PartMetadataManagerWithCache::exists(const String & file_name) const else { ProfileEvents::increment(ProfileEvents::MergeTreeMetadataCacheMiss); - return part->data_part_storage->exists(file_name); + return part->getDataPartStorage().exists(file_name); } } @@ -91,7 +91,7 @@ void PartMetadataManagerWithCache::deleteAll(bool include_projection) String value; for (const auto & file_name : file_names) { - String file_path = fs::path(part->data_part_storage->getRelativePath()) / file_name; + String file_path = fs::path(part->getDataPartStorage().getRelativePath()) / file_name; String key = getKeyFromFilePath(file_path); auto status = cache->del(key); if (!status.ok()) @@ -119,10 +119,10 @@ void PartMetadataManagerWithCache::updateAll(bool include_projection) String read_value; for (const auto & file_name : file_names) { - String file_path = fs::path(part->data_part_storage->getRelativePath()) / file_name; - if (!part->data_part_storage->exists(file_name)) + String file_path = fs::path(part->getDataPartStorage().getRelativePath()) / file_name; + if (!part->getDataPartStorage().exists(file_name)) continue; - auto in = part->data_part_storage->readFile(file_name, {}, std::nullopt, std::nullopt); + auto in = part->getDataPartStorage().readFile(file_name, {}, std::nullopt, std::nullopt); readStringUntilEOF(value, *in); String key = getKeyFromFilePath(file_path); @@ -159,7 +159,7 @@ void PartMetadataManagerWithCache::assertAllDeleted(bool include_projection) con file_name = fs::path(file_path).filename(); /// Metadata file belongs to current part - if (fs::path(part->data_part_storage->getRelativePath()) / file_name == file_path) + if (fs::path(part->getDataPartStorage().getRelativePath()) / file_name == file_path) throw Exception( ErrorCodes::LOGICAL_ERROR, "Data part {} with type {} with meta file {} still in cache", @@ -173,7 +173,7 @@ void PartMetadataManagerWithCache::assertAllDeleted(bool include_projection) con const auto & projection_parts = part->getProjectionParts(); for (const auto & [projection_name, projection_part] : projection_parts) { - if (fs::path(part->data_part_storage->getRelativePath()) / (projection_name + ".proj") / file_name == file_path) + if (fs::path(part->getDataPartStorage().getRelativePath()) / (projection_name + ".proj") / file_name == file_path) { throw Exception( ErrorCodes::LOGICAL_ERROR, @@ -190,7 +190,7 @@ void PartMetadataManagerWithCache::assertAllDeleted(bool include_projection) con void PartMetadataManagerWithCache::getKeysAndCheckSums(Strings & keys, std::vector & checksums) const { - String prefix = getKeyFromFilePath(fs::path(part->data_part_storage->getRelativePath()) / ""); + String prefix = getKeyFromFilePath(fs::path(part->getDataPartStorage().getRelativePath()) / ""); Strings values; cache->getByPrefix(prefix, keys, values); size_t size = keys.size(); @@ -225,7 +225,7 @@ std::unordered_map PartMetadataManagerWit results.emplace(file_name, cache_checksums[i]); /// File belongs to normal part - if (fs::path(part->data_part_storage->getRelativePath()) / file_name == file_path) + if (fs::path(part->getDataPartStorage().getRelativePath()) / file_name == file_path) { auto disk_checksum = part->getActualChecksumByFile(file_name); if (disk_checksum != cache_checksums[i]) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index 293c679dcd0..082228d7ebf 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -325,7 +325,7 @@ void ReplicatedMergeTreeSink::commitPart( assertSessionIsNotExpired(zookeeper); - String temporary_part_relative_path = part->data_part_storage->getPartDirectory(); + String temporary_part_relative_path = part->getDataPartStorage().getPartDirectory(); /// There is one case when we need to retry transaction in a loop. /// But don't do it too many times - just as defensive measure. diff --git a/src/Storages/MergeTree/checkDataPart.cpp b/src/Storages/MergeTree/checkDataPart.cpp index d5a838668d2..4758ccb201a 100644 --- a/src/Storages/MergeTree/checkDataPart.cpp +++ b/src/Storages/MergeTree/checkDataPart.cpp @@ -1,3 +1,4 @@ +#include "Storages/MergeTree/IDataPartStorage.h" #include #include @@ -46,7 +47,7 @@ bool isNotEnoughMemoryErrorCode(int code) IMergeTreeDataPart::Checksums checkDataPart( MergeTreeData::DataPartPtr data_part, - const DataPartStoragePtr & data_part_storage, + const IDataPartStorage & data_part_storage, const NamesAndTypesList & columns_list, const MergeTreeDataPartType & part_type, const NameSet & files_without_checksums, @@ -64,13 +65,13 @@ IMergeTreeDataPart::Checksums checkDataPart( NamesAndTypesList columns_txt; { - auto buf = data_part_storage->readFile("columns.txt", {}, std::nullopt, std::nullopt); + auto buf = data_part_storage.readFile("columns.txt", {}, std::nullopt, std::nullopt); columns_txt.readText(*buf); assertEOF(*buf); } if (columns_txt != columns_list) - throw Exception("Columns doesn't match in part " + data_part_storage->getFullPath() + throw Exception("Columns doesn't match in part " + data_part_storage.getFullPath() + ". Expected: " + columns_list.toString() + ". Found: " + columns_txt.toString(), ErrorCodes::CORRUPTED_DATA); @@ -78,9 +79,9 @@ IMergeTreeDataPart::Checksums checkDataPart( IMergeTreeDataPart::Checksums checksums_data; /// This function calculates checksum for both compressed and decompressed contents of compressed file. - auto checksum_compressed_file = [](const DataPartStoragePtr & data_part_storage_, const String & file_path) + auto checksum_compressed_file = [](const IDataPartStorage & data_part_storage_, const String & file_path) { - auto file_buf = data_part_storage_->readFile(file_path, {}, std::nullopt, std::nullopt); + auto file_buf = data_part_storage_.readFile(file_path, {}, std::nullopt, std::nullopt); HashingReadBuffer compressed_hashing_buf(*file_buf); CompressedReadBuffer uncompressing_buf(compressed_hashing_buf); HashingReadBuffer uncompressed_hashing_buf(uncompressing_buf); @@ -96,9 +97,9 @@ IMergeTreeDataPart::Checksums checkDataPart( auto ratio_of_defaults = data_part->storage.getSettings()->ratio_of_defaults_for_sparse_serialization; SerializationInfoByName serialization_infos(columns_txt, SerializationInfo::Settings{ratio_of_defaults, false}); - if (data_part_storage->exists(IMergeTreeDataPart::SERIALIZATION_FILE_NAME)) + if (data_part_storage.exists(IMergeTreeDataPart::SERIALIZATION_FILE_NAME)) { - auto serialization_file = data_part_storage->readFile(IMergeTreeDataPart::SERIALIZATION_FILE_NAME, {}, std::nullopt, std::nullopt); + auto serialization_file = data_part_storage.readFile(IMergeTreeDataPart::SERIALIZATION_FILE_NAME, {}, std::nullopt, std::nullopt); serialization_infos.readJSON(*serialization_file); } @@ -114,7 +115,7 @@ IMergeTreeDataPart::Checksums checkDataPart( /// It also calculates checksum of projections. auto checksum_file = [&](const String & file_name) { - if (data_part_storage->isDirectory(file_name) && endsWith(file_name, ".proj")) + if (data_part_storage.isDirectory(file_name) && endsWith(file_name, ".proj")) { auto projection_name = file_name.substr(0, file_name.size() - sizeof(".proj") + 1); auto pit = data_part->getProjectionParts().find(projection_name); @@ -129,7 +130,7 @@ IMergeTreeDataPart::Checksums checkDataPart( const auto & projection = pit->second; IMergeTreeDataPart::Checksums projection_checksums_data; - auto projection_part_storage = data_part_storage->getProjection(file_name); + auto projection_part_storage = data_part_storage.getProjection(file_name); if (projection->getType() == MergeTreeDataPartType::Compact) { @@ -148,7 +149,7 @@ IMergeTreeDataPart::Checksums checkDataPart( [&](const ISerialization::SubstreamPath & substream_path) { String projection_file_name = ISerialization::getFileNameForStream(projection_column, substream_path) + ".bin"; - projection_checksums_data.files[projection_file_name] = checksum_compressed_file(projection_part_storage, projection_file_name); + projection_checksums_data.files[projection_file_name] = checksum_compressed_file(*projection_part_storage, projection_file_name); }); } } @@ -183,7 +184,7 @@ IMergeTreeDataPart::Checksums checkDataPart( } else { - projection_checksums_data.files[projection_file_name] = checksum_compressed_file(projection_part_storage, projection_file_name); + projection_checksums_data.files[projection_file_name] = checksum_compressed_file(*projection_part_storage, projection_file_name); } } } @@ -195,7 +196,7 @@ IMergeTreeDataPart::Checksums checkDataPart( } else { - auto file_buf = data_part_storage->readFile(file_name, {}, std::nullopt, std::nullopt); + auto file_buf = data_part_storage.readFile(file_name, {}, std::nullopt, std::nullopt); HashingReadBuffer hashing_buf(*file_buf); hashing_buf.ignoreAll(); checksums_data.files[file_name] = IMergeTreeDataPart::Checksums::Checksum(hashing_buf.count(), hashing_buf.getHash()); @@ -224,21 +225,21 @@ IMergeTreeDataPart::Checksums checkDataPart( } else { - throw Exception("Unknown type in part " + data_part_storage->getFullPath(), ErrorCodes::UNKNOWN_PART_TYPE); + throw Exception("Unknown type in part " + data_part_storage.getFullPath(), ErrorCodes::UNKNOWN_PART_TYPE); } /// Checksums from the rest files listed in checksums.txt. May be absent. If present, they are subsequently compared with the actual data checksums. IMergeTreeDataPart::Checksums checksums_txt; - if (require_checksums || data_part_storage->exists("checksums.txt")) + if (require_checksums || data_part_storage.exists("checksums.txt")) { - auto buf = data_part_storage->readFile("checksums.txt", {}, std::nullopt, std::nullopt); + auto buf = data_part_storage.readFile("checksums.txt", {}, std::nullopt, std::nullopt); checksums_txt.read(*buf); assertEOF(*buf); } const auto & checksum_files_txt = checksums_txt.files; - for (auto it = data_part_storage->iterate(); it->isValid(); it->next()) + for (auto it = data_part_storage.iterate(); it->isValid(); it->next()) { const String & file_name = it->name(); auto checksum_it = checksums_data.files.find(file_name); @@ -285,7 +286,7 @@ IMergeTreeDataPart::Checksums checkDataPart( return checkDataPart( data_part, - data_part->data_part_storage, + data_part->getDataPartStorage(), data_part->getColumns(), data_part->getType(), data_part->getFileNamesWithoutChecksums(), diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 1aeca1343c2..3b456ab5360 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -378,7 +378,9 @@ CurrentlyMergingPartsTagger::CurrentlyMergingPartsTagger( /// if we mutate part, than we should reserve space on the same disk, because mutations possible can create hardlinks if (is_mutation) - reserved_space = storage.tryReserveSpace(total_size, future_part->parts[0]->data_part_storage); + { + reserved_space = storage.tryReserveSpace(total_size, future_part->parts[0]->getDataPartStorage()); + } else { IMergeTreeDataPart::TTLInfos ttl_infos; @@ -386,7 +388,7 @@ CurrentlyMergingPartsTagger::CurrentlyMergingPartsTagger( for (auto & part_ptr : future_part->parts) { ttl_infos.update(part_ptr->ttl_infos); - max_volume_index = std::max(max_volume_index, part_ptr->data_part_storage->getVolumeIndex(*storage.getStoragePolicy())); + max_volume_index = std::max(max_volume_index, part_ptr->getDataPartStorage().getVolumeIndex(*storage.getStoragePolicy())); } reserved_space = storage.balancedReservation( @@ -1474,7 +1476,7 @@ void StorageMergeTree::dropPartsImpl(DataPartsVector && parts_to_remove, bool de /// NOTE: no race with background cleanup until we hold pointers to parts for (const auto & part : parts_to_remove) { - LOG_INFO(log, "Detaching {}", part->data_part_storage->getPartDirectory()); + LOG_INFO(log, "Detaching {}", part->getDataPartStorage().getPartDirectory()); part->makeCloneInDetached("", metadata_snapshot); } } @@ -1739,14 +1741,14 @@ CheckResults StorageMergeTree::checkData(const ASTPtr & query, ContextPtr local_ /// If the checksums file is not present, calculate the checksums and write them to disk. String checksums_path = "checksums.txt"; String tmp_checksums_path = "checksums.txt.tmp"; - if (part->isStoredOnDisk() && !part->data_part_storage->exists(checksums_path)) + if (part->isStoredOnDisk() && !part->getDataPartStorage().exists(checksums_path)) { try { auto calculated_checksums = checkDataPart(part, false); calculated_checksums.checkEqual(part->checksums, true); - part->data_part_storage->writeChecksums(part->checksums, local_context->getWriteSettings()); + part->getDataPartStorage().writeChecksums(part->checksums, local_context->getWriteSettings()); part->checkMetadata(); results.emplace_back(part->name, true, "Checksums recounted and written to disk."); diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 52a30dd37b3..286a9d6bdd8 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -1,5 +1,6 @@ #include +#include #include #include "Common/hex.h" #include @@ -1781,7 +1782,7 @@ bool StorageReplicatedMergeTree::executeFetch(LogEntry & entry, bool need_to_che } -bool StorageReplicatedMergeTree::executeFetchShared( +MutableDataPartStoragePtr StorageReplicatedMergeTree::executeFetchShared( const String & source_replica, const String & new_part_name, const DiskPtr & disk, @@ -1790,7 +1791,7 @@ bool StorageReplicatedMergeTree::executeFetchShared( if (source_replica.empty()) { LOG_INFO(log, "No active replica has part {} on shared storage.", new_part_name); - return false; + return nullptr; } const auto storage_settings_ptr = getSettings(); @@ -1847,7 +1848,7 @@ void StorageReplicatedMergeTree::executeDropRange(const LogEntry & entry) /// If DETACH clone parts to detached/ directory for (const auto & part : parts_to_remove) { - LOG_INFO(log, "Detaching {}", part->data_part_storage->getPartDirectory()); + LOG_INFO(log, "Detaching {}", part->getDataPartStorage().getPartDirectory()); part->makeCloneInDetached("", metadata_snapshot); } } @@ -2538,7 +2539,7 @@ void StorageReplicatedMergeTree::cloneReplica(const String & source_replica, Coo for (const auto & part : parts_to_remove_from_working_set) { - LOG_INFO(log, "Detaching {}", part->data_part_storage->getPartDirectory()); + LOG_INFO(log, "Detaching {}", part->getDataPartStorage().getPartDirectory()); part->makeCloneInDetached("clone", metadata_snapshot); } } @@ -3890,7 +3891,7 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora auto source_part = getActiveContainingPart(covered_part_info); /// Fetch for zero-copy replication is cheap and straightforward, so we don't use local clone here - if (source_part && (!settings_ptr->allow_remote_fs_zero_copy_replication || !source_part->data_part_storage->supportZeroCopyReplication())) + if (source_part && (!settings_ptr->allow_remote_fs_zero_copy_replication || !source_part->getDataPartStorage().supportZeroCopyReplication())) { auto source_part_header = ReplicatedMergeTreePartHeader::fromColumnsAndChecksums( source_part->getColumns(), source_part->checksums); @@ -4067,7 +4068,7 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora } -bool StorageReplicatedMergeTree::fetchExistsPart( +MutableDataPartStoragePtr StorageReplicatedMergeTree::fetchExistsPart( const String & part_name, const StorageMetadataPtr & metadata_snapshot, const String & source_replica_path, @@ -4082,7 +4083,7 @@ bool StorageReplicatedMergeTree::fetchExistsPart( LOG_DEBUG(log, "Part {} should be deleted after previous attempt before fetch", part->name); /// Force immediate parts cleanup to delete the part that was left from the previous fetch attempt. cleanup_thread.wakeup(); - return false; + return nullptr; } { @@ -4090,7 +4091,7 @@ bool StorageReplicatedMergeTree::fetchExistsPart( if (!currently_fetching_parts.insert(part_name).second) { LOG_DEBUG(log, "Part {} is already fetching right now", part_name); - return false; + return nullptr; } } @@ -4142,11 +4143,11 @@ bool StorageReplicatedMergeTree::fetchExistsPart( { part = get_part(); - if (part->data_part_storage->getDiskName() != replaced_disk->getName()) - throw Exception("Part " + part->name + " fetched on wrong disk " + part->data_part_storage->getDiskName(), ErrorCodes::LOGICAL_ERROR); + if (part->getDataPartStorage().getDiskName() != replaced_disk->getName()) + throw Exception("Part " + part->name + " fetched on wrong disk " + part->getDataPartStorage().getDiskName(), ErrorCodes::LOGICAL_ERROR); auto replaced_path = fs::path(replaced_part_path); - part->data_part_storage->rename(replaced_path.parent_path(), replaced_path.filename(), nullptr, true, false); + part->getDataPartStorage().rename(replaced_path.parent_path(), replaced_path.filename(), nullptr, true, false); } catch (const Exception & e) { @@ -4155,7 +4156,7 @@ bool StorageReplicatedMergeTree::fetchExistsPart( if (e.code() == ErrorCodes::DIRECTORY_ALREADY_EXISTS) { LOG_TRACE(log, "Not fetching part: {}", e.message()); - return false; + return nullptr; } throw; @@ -4169,7 +4170,7 @@ bool StorageReplicatedMergeTree::fetchExistsPart( ProfileEvents::increment(ProfileEvents::ReplicatedPartFetches); LOG_DEBUG(log, "Fetched part {} from {}", part_name, source_replica_path); - return true; + return part->getDataPartStoragePtr(); } void StorageReplicatedMergeTree::startup() @@ -7409,7 +7410,7 @@ void StorageReplicatedMergeTree::checkBrokenDisks() for (auto & part : *parts) { - if (part->data_part_storage && part->data_part_storage->getDiskName() == disk_ptr->getName()) + if (part->getDataPartStorage().getDiskName() == disk_ptr->getName()) broken_part_callback(part->name); } continue; @@ -7572,10 +7573,10 @@ void StorageReplicatedMergeTree::lockSharedData(const IMergeTreeDataPart & part, { auto settings = getSettings(); - if (!part.data_part_storage || !part.isStoredOnDisk() || !settings->allow_remote_fs_zero_copy_replication) + if (!part.isStoredOnDisk() || !settings->allow_remote_fs_zero_copy_replication) return; - if (!part.data_part_storage->supportZeroCopyReplication()) + if (!part.getDataPartStorage().supportZeroCopyReplication()) return; zkutil::ZooKeeperPtr zookeeper = tryGetZooKeeper(); @@ -7586,7 +7587,7 @@ void StorageReplicatedMergeTree::lockSharedData(const IMergeTreeDataPart & part, boost::replace_all(id, "/", "_"); Strings zc_zookeeper_paths = getZeroCopyPartPath( - *getSettings(), part.data_part_storage->getDiskType(), getTableSharedID(), + *getSettings(), part.getDataPartStorage().getDiskType(), getTableSharedID(), part.name, zookeeper_path); String path_to_set_hardlinked_files; @@ -7595,7 +7596,7 @@ void StorageReplicatedMergeTree::lockSharedData(const IMergeTreeDataPart & part, if (hardlinked_files.has_value() && !hardlinked_files->hardlinks_from_source_part.empty()) { path_to_set_hardlinked_files = getZeroCopyPartPath( - *getSettings(), part.data_part_storage->getDiskType(), hardlinked_files->source_table_shared_id, + *getSettings(), part.getDataPartStorage().getDiskType(), hardlinked_files->source_table_shared_id, hardlinked_files->source_part_name, zookeeper_path)[0]; hardlinks = hardlinked_files->hardlinks_from_source_part; @@ -7619,25 +7620,22 @@ std::pair StorageReplicatedMergeTree::unlockSharedData(const IMer if (!settings->allow_remote_fs_zero_copy_replication) return std::make_pair(true, NameSet{}); - if (!part.data_part_storage) - LOG_WARNING(log, "Datapart storage for part {} (temp: {}) is not initialzied", part.name, part.is_temp); - - if (!part.data_part_storage || !part.isStoredOnDisk()) + if (!part.isStoredOnDisk()) { LOG_TRACE(log, "Part {} is not stored on disk, blobs can be removed", part.name); return std::make_pair(true, NameSet{}); } - if (!part.data_part_storage || !part.data_part_storage->supportZeroCopyReplication()) + if (!part.getDataPartStorage().supportZeroCopyReplication()) { LOG_TRACE(log, "Part {} is not stored on zero-copy replicated disk, blobs can be removed", part.name); return std::make_pair(true, NameSet{}); } /// If part is temporary refcount file may be absent - if (part.data_part_storage->exists(IMergeTreeDataPart::FILE_FOR_REFERENCES_CHECK)) + if (part.getDataPartStorage().exists(IMergeTreeDataPart::FILE_FOR_REFERENCES_CHECK)) { - auto ref_count = part.data_part_storage->getRefCount(IMergeTreeDataPart::FILE_FOR_REFERENCES_CHECK); + auto ref_count = part.getDataPartStorage().getRefCount(IMergeTreeDataPart::FILE_FOR_REFERENCES_CHECK); if (ref_count > 0) /// Keep part shard info for frozen backups { LOG_TRACE(log, "Part {} has more than zero local references ({}), blobs cannot be removed", part.name, ref_count); @@ -7675,7 +7673,7 @@ std::pair StorageReplicatedMergeTree::unlockSharedData(const IMer return unlockSharedDataByID( part.getUniqueId(), getTableSharedID(), part.name, replica_name, - part.data_part_storage->getDiskType(), zookeeper, *getSettings(), log, zookeeper_path, format_version); + part.getDataPartStorage().getDiskType(), zookeeper, *getSettings(), log, zookeeper_path, format_version); } namespace @@ -7874,7 +7872,7 @@ std::pair StorageReplicatedMergeTree::unlockSharedDataByID( } -bool StorageReplicatedMergeTree::tryToFetchIfShared( +MutableDataPartStoragePtr StorageReplicatedMergeTree::tryToFetchIfShared( const IMergeTreeDataPart & part, const DiskPtr & disk, const String & path) @@ -7882,13 +7880,13 @@ bool StorageReplicatedMergeTree::tryToFetchIfShared( const auto settings = getSettings(); auto data_source_description = disk->getDataSourceDescription(); if (!(disk->supportZeroCopyReplication() && settings->allow_remote_fs_zero_copy_replication)) - return false; + return nullptr; String replica = getSharedDataReplica(part, data_source_description.type); /// We can't fetch part when none replicas have this part on a same type remote disk if (replica.empty()) - return false; + return nullptr; return executeFetchShared(replica, part.name, disk, path); } @@ -8160,7 +8158,7 @@ bool StorageReplicatedMergeTree::createEmptyPartInsteadOfLost(zkutil::ZooKeeperP /// The name could be non-unique in case of stale files from previous runs. if (data_part_storage->exists()) { - LOG_WARNING(log, "Removing old temporary directory {}", new_data_part->data_part_storage->getFullPath()); + LOG_WARNING(log, "Removing old temporary directory {}", new_data_part->getDataPartStorage().getFullPath()); data_part_storage->removeRecursive(); } diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index c9af1ab5f93..c7399d46ce3 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -263,7 +263,7 @@ public: bool canExecuteFetch(const ReplicatedMergeTreeLogEntry & entry, String & disable_reason) const; /// Fetch part only when it stored on shared storage like S3 - bool executeFetchShared(const String & source_replica, const String & new_part_name, const DiskPtr & disk, const String & path); + MutableDataPartStoragePtr executeFetchShared(const String & source_replica, const String & new_part_name, const DiskPtr & disk, const String & path); /// Lock part in zookeeper for use shared data in several nodes void lockSharedData(const IMergeTreeDataPart & part, bool replace_existing_lock, std::optional hardlinked_files) const override; @@ -283,7 +283,7 @@ public: const String & zookeeper_path_old, MergeTreeDataFormatVersion data_format_version); /// Fetch part only if some replica has it on shared storage like S3 - bool tryToFetchIfShared(const IMergeTreeDataPart & part, const DiskPtr & disk, const String & path) override; + MutableDataPartStoragePtr tryToFetchIfShared(const IMergeTreeDataPart & part, const DiskPtr & disk, const String & path) override; /// Get best replica having this partition on a same type remote disk String getSharedDataReplica(const IMergeTreeDataPart & part, DataSourceType data_source_type) const; @@ -682,7 +682,7 @@ private: * Used for replace local part on the same s3-shared part in hybrid storage. * Returns false if part is already fetching right now. */ - bool fetchExistsPart( + MutableDataPartStoragePtr fetchExistsPart( const String & part_name, const StorageMetadataPtr & metadata_snapshot, const String & replica_path, diff --git a/src/Storages/System/StorageSystemParts.cpp b/src/Storages/System/StorageSystemParts.cpp index d788efd8860..fa1c26b623d 100644 --- a/src/Storages/System/StorageSystemParts.cpp +++ b/src/Storages/System/StorageSystemParts.cpp @@ -198,9 +198,9 @@ void StorageSystemParts::processNextStorage( if (part->isStoredOnDisk()) { if (columns_mask[src_index++]) - columns[res_index++]->insert(part->data_part_storage->getDiskName()); + columns[res_index++]->insert(part->getDataPartStorage().getDiskName()); if (columns_mask[src_index++]) - columns[res_index++]->insert(part->data_part_storage->getFullPath()); + columns[res_index++]->insert(part->getDataPartStorage().getFullPath()); } else { diff --git a/src/Storages/System/StorageSystemPartsColumns.cpp b/src/Storages/System/StorageSystemPartsColumns.cpp index cc6e69b160f..cd51c767eae 100644 --- a/src/Storages/System/StorageSystemPartsColumns.cpp +++ b/src/Storages/System/StorageSystemPartsColumns.cpp @@ -190,9 +190,9 @@ void StorageSystemPartsColumns::processNextStorage( if (columns_mask[src_index++]) columns[res_index++]->insert(info.engine); if (columns_mask[src_index++]) - columns[res_index++]->insert(part->data_part_storage->getDiskName()); + columns[res_index++]->insert(part->getDataPartStorage().getDiskName()); if (columns_mask[src_index++]) - columns[res_index++]->insert(part->data_part_storage->getFullPath()); + columns[res_index++]->insert(part->getDataPartStorage().getFullPath()); if (columns_mask[src_index++]) columns[res_index++]->insert(column.name); diff --git a/src/Storages/System/StorageSystemProjectionParts.cpp b/src/Storages/System/StorageSystemProjectionParts.cpp index 3934e7c9623..37c62ba5eb0 100644 --- a/src/Storages/System/StorageSystemProjectionParts.cpp +++ b/src/Storages/System/StorageSystemProjectionParts.cpp @@ -200,9 +200,9 @@ void StorageSystemProjectionParts::processNextStorage( if (part->isStoredOnDisk()) { if (columns_mask[src_index++]) - columns[res_index++]->insert(part->data_part_storage->getDiskName()); + columns[res_index++]->insert(part->getDataPartStorage().getDiskName()); if (columns_mask[src_index++]) - columns[res_index++]->insert(part->data_part_storage->getFullPath()); + columns[res_index++]->insert(part->getDataPartStorage().getFullPath()); } else { diff --git a/src/Storages/System/StorageSystemProjectionPartsColumns.cpp b/src/Storages/System/StorageSystemProjectionPartsColumns.cpp index 0847010faaa..a5968597885 100644 --- a/src/Storages/System/StorageSystemProjectionPartsColumns.cpp +++ b/src/Storages/System/StorageSystemProjectionPartsColumns.cpp @@ -211,9 +211,9 @@ void StorageSystemProjectionPartsColumns::processNextStorage( if (columns_mask[src_index++]) columns[res_index++]->insert(info.engine); if (columns_mask[src_index++]) - columns[res_index++]->insert(part->data_part_storage->getDiskName()); + columns[res_index++]->insert(part->getDataPartStorage().getDiskName()); if (columns_mask[src_index++]) - columns[res_index++]->insert(part->data_part_storage->getFullPath()); + columns[res_index++]->insert(part->getDataPartStorage().getFullPath()); if (columns_mask[src_index++]) columns[res_index++]->insert(column.name); From 4a8326ff0c33758570aee25702ed99143e43d853 Mon Sep 17 00:00:00 2001 From: Han Fei Date: Sun, 23 Oct 2022 23:22:52 +0200 Subject: [PATCH 040/112] fix typo --- src/Storages/SelectQueryInfo.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h index 94a8c1143f3..565594569ce 100644 --- a/src/Storages/SelectQueryInfo.h +++ b/src/Storages/SelectQueryInfo.h @@ -220,7 +220,7 @@ struct SelectQueryInfo Block minmax_count_projection_block; MergeTreeDataSelectAnalysisResultPtr merge_tree_select_result_ptr; - // If limit is not 0, that means it's a trival limit query. + // If limit is not 0, that means it's a trivial limit query. UInt64 limit = 0; InputOrderInfoPtr getInputOrderInfo() const From cf375c9732d4380a203a4094574b098ebe9a885b Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Sun, 23 Oct 2022 22:29:24 +0000 Subject: [PATCH 041/112] better interface --- .../MergeTree/DataPartStorageOnDisk.cpp | 176 +++--------------- .../MergeTree/DataPartStorageOnDisk.h | 15 +- src/Storages/MergeTree/IDataPartStorage.h | 24 +-- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 139 +++++++++++++- src/Storages/MergeTree/IMergeTreeDataPart.h | 13 ++ src/Storages/MergeTree/MergeTreeData.cpp | 9 +- src/Storages/StorageMergeTree.cpp | 7 +- 7 files changed, 200 insertions(+), 183 deletions(-) diff --git a/src/Storages/MergeTree/DataPartStorageOnDisk.cpp b/src/Storages/MergeTree/DataPartStorageOnDisk.cpp index e28aa359c99..cdbd01efab4 100644 --- a/src/Storages/MergeTree/DataPartStorageOnDisk.cpp +++ b/src/Storages/MergeTree/DataPartStorageOnDisk.cpp @@ -558,154 +558,6 @@ size_t DataPartStorageOnDisk::getVolumeIndex(const IStoragePolicy & storage_poli return storage_policy.getVolumeIndexByDisk(volume->getDisk()); } -void DataPartStorageOnDisk::writeChecksums(const MergeTreeDataPartChecksums & checksums, const WriteSettings & settings) const -{ - std::string path = fs::path(root_path) / part_dir / "checksums.txt"; - - try - { - { - auto out = volume->getDisk()->writeFile(path + ".tmp", 4096, WriteMode::Rewrite, settings); - checksums.write(*out); - } - - volume->getDisk()->moveFile(path + ".tmp", path); - } - catch (...) - { - try - { - if (volume->getDisk()->exists(path + ".tmp")) - volume->getDisk()->removeFile(path + ".tmp"); - } - catch (...) - { - tryLogCurrentException("DataPartStorageOnDisk"); - } - - throw; - } -} - -void DataPartStorageOnDisk::writeColumns(const NamesAndTypesList & columns, const WriteSettings & settings) const -{ - std::string path = fs::path(root_path) / part_dir / "columns.txt"; - - try - { - auto buf = volume->getDisk()->writeFile(path + ".tmp", 4096, WriteMode::Rewrite, settings); - columns.writeText(*buf); - buf->finalize(); - - volume->getDisk()->moveFile(path + ".tmp", path); - } - catch (...) - { - try - { - if (volume->getDisk()->exists(path + ".tmp")) - volume->getDisk()->removeFile(path + ".tmp"); - } - catch (...) - { - tryLogCurrentException("DataPartStorageOnDisk"); - } - - throw; - } -} - -void DataPartStorageOnDisk::writeVersionMetadata(const VersionMetadata & version, bool fsync_part_dir) const -{ - std::string path = fs::path(root_path) / part_dir / "txn_version.txt"; - try - { - { - /// TODO IDisk interface does not allow to open file with O_EXCL flag (for DiskLocal), - /// so we create empty file at first (expecting that createFile throws if file already exists) - /// and then overwrite it. - volume->getDisk()->createFile(path + ".tmp"); - auto buf = volume->getDisk()->writeFile(path + ".tmp", 256); - version.write(*buf); - buf->finalize(); - buf->sync(); - } - - SyncGuardPtr sync_guard; - if (fsync_part_dir) - sync_guard = volume->getDisk()->getDirectorySyncGuard(getRelativePath()); - volume->getDisk()->replaceFile(path + ".tmp", path); - - } - catch (...) - { - try - { - if (volume->getDisk()->exists(path + ".tmp")) - volume->getDisk()->removeFile(path + ".tmp"); - } - catch (...) - { - tryLogCurrentException("DataPartStorageOnDisk"); - } - - throw; - } -} - -void DataPartStorageOnDisk::appendCSNToVersionMetadata(const VersionMetadata & version, VersionMetadata::WhichCSN which_csn) const -{ - /// Small enough appends to file are usually atomic, - /// so we append new metadata instead of rewriting file to reduce number of fsyncs. - /// We don't need to do fsync when writing CSN, because in case of hard restart - /// we will be able to restore CSN from transaction log in Keeper. - - std::string version_file_name = fs::path(root_path) / part_dir / "txn_version.txt"; - DiskPtr disk = volume->getDisk(); - auto out = disk->writeFile(version_file_name, 256, WriteMode::Append); - version.writeCSN(*out, which_csn); - out->finalize(); -} - -void DataPartStorageOnDisk::appendRemovalTIDToVersionMetadata(const VersionMetadata & version, bool clear) const -{ - String version_file_name = fs::path(root_path) / part_dir / "txn_version.txt"; - DiskPtr disk = volume->getDisk(); - auto out = disk->writeFile(version_file_name, 256, WriteMode::Append); - version.writeRemovalTID(*out, clear); - out->finalize(); - - /// fsync is not required when we clearing removal TID, because after hard restart we will fix metadata - if (!clear) - out->sync(); -} - -void DataPartStorageOnDisk::writeDeleteOnDestroyMarker(Poco::Logger * log) const -{ - String marker_path = fs::path(root_path) / part_dir / "delete-on-destroy.txt"; - auto disk = volume->getDisk(); - try - { - volume->getDisk()->createFile(marker_path); - } - catch (Poco::Exception & e) - { - LOG_ERROR(log, "{} (while creating DeleteOnDestroy marker: {})", e.what(), backQuote(fullPath(disk, marker_path))); - } -} - -void DataPartStorageOnDisk::removeDeleteOnDestroyMarker() const -{ - std::string delete_on_destroy_file_name = fs::path(root_path) / part_dir / "delete-on-destroy.txt"; - volume->getDisk()->removeFileIfExists(delete_on_destroy_file_name); -} - -void DataPartStorageOnDisk::removeVersionMetadata() const -{ - std::string version_file_name = fs::path(root_path) / part_dir / "txn_version.txt"; - volume->getDisk()->removeFileIfExists(version_file_name); -} - String DataPartStorageOnDisk::getUniqueId() const { auto disk = volume->getDisk(); @@ -935,6 +787,34 @@ std::unique_ptr DataPartStorageOnDisk::writeFile( return volume->getDisk()->writeFile(fs::path(root_path) / part_dir / name, buf_size, WriteMode::Rewrite, settings); } +std::unique_ptr DataPartStorageOnDisk::writeTransactionFile(WriteMode mode) const +{ + return volume->getDisk()->writeFile(fs::path(root_path) / part_dir / "txn_version.txt", 256, mode); +} + +void DataPartStorageOnDisk::createFile(const String & name) +{ + executeOperation([&](auto & disk) { disk.createFile(fs::path(root_path) / part_dir / name); }); +} + +void DataPartStorageOnDisk::moveFile(const String & from_name, const String & to_name) +{ + executeOperation([&](auto & disk) + { + auto relative_path = fs::path(root_path) / part_dir; + disk.moveFile(relative_path / from_name, relative_path / to_name); + }); +} + +void DataPartStorageOnDisk::replaceFile(const String & from_name, const String & to_name) +{ + executeOperation([&](auto & disk) + { + auto relative_path = fs::path(root_path) / part_dir; + disk.replaceFile(relative_path / from_name, relative_path / to_name); + }); +} + void DataPartStorageOnDisk::removeFile(const String & name) { executeOperation([&](auto & disk) { disk.removeFile(fs::path(root_path) / part_dir / name); }); diff --git a/src/Storages/MergeTree/DataPartStorageOnDisk.h b/src/Storages/MergeTree/DataPartStorageOnDisk.h index 80946c37f79..fda901d0204 100644 --- a/src/Storages/MergeTree/DataPartStorageOnDisk.h +++ b/src/Storages/MergeTree/DataPartStorageOnDisk.h @@ -78,15 +78,6 @@ public: ReservationPtr tryReserve(UInt64 bytes) const override; size_t getVolumeIndex(const IStoragePolicy &) const override; - void writeChecksums(const MergeTreeDataPartChecksums & checksums, const WriteSettings & settings) const override; - void writeColumns(const NamesAndTypesList & columns, const WriteSettings & settings) const override; - void writeVersionMetadata(const VersionMetadata & version, bool fsync_part_dir) const override; - void appendCSNToVersionMetadata(const VersionMetadata & version, VersionMetadata::WhichCSN which_csn) const override; - void appendRemovalTIDToVersionMetadata(const VersionMetadata & version, bool clear) const override; - void writeDeleteOnDestroyMarker(Poco::Logger * log) const override; - void removeDeleteOnDestroyMarker() const override; - void removeVersionMetadata() const override; - String getUniqueId() const override; bool shallParticipateInMerges(const IStoragePolicy &) const override; @@ -123,6 +114,12 @@ public: size_t buf_size, const WriteSettings & settings) override; + std::unique_ptr writeTransactionFile(WriteMode mode) const override; + + void createFile(const String & name) override; + void moveFile(const String & from_name, const String & to_name) override; + void replaceFile(const String & from_name, const String & to_name) override; + void removeFile(const String & name) override; void removeFileIfExists(const String & name) override; void removeRecursive() override; diff --git a/src/Storages/MergeTree/IDataPartStorage.h b/src/Storages/MergeTree/IDataPartStorage.h index 9e3fbe7d13b..3f73199305d 100644 --- a/src/Storages/MergeTree/IDataPartStorage.h +++ b/src/Storages/MergeTree/IDataPartStorage.h @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -142,6 +143,7 @@ public: virtual bool supportZeroCopyReplication() const { return false; } virtual bool supportParallelWrite() const = 0; virtual bool isBroken() const = 0; + /// TODO: remove or at least remove const. virtual void syncRevision(UInt64 revision) const = 0; virtual UInt64 getRevision() const = 0; @@ -160,17 +162,6 @@ public: virtual ReservationPtr tryReserve(UInt64 /*bytes*/) const { return nullptr; } virtual size_t getVolumeIndex(const IStoragePolicy &) const { return 0; } - /// Some methods which change data part internals possibly after creation. - /// Probably we should try to remove it later. - virtual void writeChecksums(const MergeTreeDataPartChecksums & checksums, const WriteSettings & settings) const = 0; - virtual void writeColumns(const NamesAndTypesList & columns, const WriteSettings & settings) const = 0; - virtual void writeVersionMetadata(const VersionMetadata & version, bool fsync_part_dir) const = 0; - virtual void appendCSNToVersionMetadata(const VersionMetadata & version, VersionMetadata::WhichCSN which_csn) const = 0; - virtual void appendRemovalTIDToVersionMetadata(const VersionMetadata & version, bool clear) const = 0; - virtual void writeDeleteOnDestroyMarker(Poco::Logger * log) const = 0; - virtual void removeDeleteOnDestroyMarker() const = 0; - virtual void removeVersionMetadata() const = 0; - /// A leak of abstraction. /// Return some uniq string for file. /// Required for distinguish different copies of the same part on remote FS. @@ -219,7 +210,16 @@ public: virtual void createDirectories() = 0; virtual void createProjection(const std::string & name) = 0; - virtual std::unique_ptr writeFile(const String & name, size_t buf_size, const WriteSettings & settings) = 0; + virtual std::unique_ptr writeFile( + const String & name, + size_t buf_size, + const WriteSettings & settings) = 0; + + virtual std::unique_ptr writeTransactionFile(WriteMode mode) const = 0; + + virtual void createFile(const String & name) = 0; + virtual void moveFile(const String & from_name, const String & to_name) = 0; + virtual void replaceFile(const String & from_name, const String & to_name) = 0; virtual void removeFile(const String & name) = 0; virtual void removeFileIfExists(const String & name) = 0; diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index fdaa4231e9c..f5348311468 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -854,6 +854,120 @@ void IMergeTreeDataPart::loadDefaultCompressionCodec() } } +template +void IMergeTreeDataPart::writeMetadata(const String & filename, const WriteSettings & settings, Writer && writer) +{ + auto & data_part_storage = getDataPartStorage(); + auto tmp_filename = filename + ".tmp"; + + try + { + { + auto out = data_part_storage.writeFile(tmp_filename, 4096, settings); + writer(*out); + out->finalize(); + } + + data_part_storage.moveFile(tmp_filename, filename); + } + catch (...) + { + try + { + if (data_part_storage.exists(tmp_filename)) + data_part_storage.removeFile(tmp_filename); + } + catch (...) + { + tryLogCurrentException("DataPartStorageOnDisk"); + } + + throw; + } +} + +void IMergeTreeDataPart::writeChecksums(const MergeTreeDataPartChecksums & checksums_, const WriteSettings & settings) +{ + writeMetadata("checksums.txt", settings, [&checksums_](auto & buffer) + { + checksums_.write(buffer); + }); +} + +void IMergeTreeDataPart::writeColumns(const NamesAndTypesList & columns_, const WriteSettings & settings) +{ + writeMetadata("columns.txt", settings, [&columns_](auto & buffer) + { + columns_.writeText(buffer); + }); +} + +void IMergeTreeDataPart::writeVersionMetadata(const VersionMetadata & version_, bool fsync_part_dir) const +{ + auto & data_part_storage = const_cast(getDataPartStorage()); + static constexpr auto filename = "txn_version.txt"; + static constexpr auto tmp_filename = "txn_version.txt.tmp"; + + try + { + { + /// TODO IDisk interface does not allow to open file with O_EXCL flag (for DiskLocal), + /// so we create empty file at first (expecting that createFile throws if file already exists) + /// and then overwrite it. + data_part_storage.createFile(tmp_filename); + auto write_settings = storage.getContext()->getWriteSettings(); + auto buf = data_part_storage.writeFile(tmp_filename, 256, write_settings); + version_.write(*buf); + buf->finalize(); + buf->sync(); + } + + SyncGuardPtr sync_guard; + if (fsync_part_dir) + sync_guard = data_part_storage.getDirectorySyncGuard(); + data_part_storage.replaceFile(tmp_filename, filename); + } + catch (...) + { + try + { + if (data_part_storage.exists(tmp_filename)) + data_part_storage.removeFile(tmp_filename); + } + catch (...) + { + tryLogCurrentException("DataPartStorageOnDisk"); + } + + throw; + } +} + +void IMergeTreeDataPart::writeDeleteOnDestroyMarker() +{ + static constexpr auto marker_path = "delete-on-destroy.txt"; + + try + { + getDataPartStorage().createFile(marker_path); + } + catch (Poco::Exception & e) + { + LOG_ERROR(storage.log, "{} (while creating DeleteOnDestroy marker: {})", + e.what(), (fs::path(getDataPartStorage().getFullPath()) / marker_path).string()); + } +} + +void IMergeTreeDataPart::removeDeleteOnDestroyMarker() +{ + getDataPartStorage().removeFileIfExists("delete-on-destroy.txt"); +} + +void IMergeTreeDataPart::removeVersionMetadata() +{ + getDataPartStorage().removeFileIfExists("txn_version.txt"); +} + void IMergeTreeDataPart::appendFilesOfDefaultCompressionCodec(Strings & files) { files.push_back(DEFAULT_COMPRESSION_CODEC_FILE_NAME); @@ -980,7 +1094,7 @@ void IMergeTreeDataPart::loadChecksums(bool require) LOG_WARNING(storage.log, "Checksums for part {} not found. Will calculate them from data on disk.", name); checksums = checkDataPart(shared_from_this(), false); - getDataPartStorage().writeChecksums(checksums, {}); + writeChecksums(checksums, {}); bytes_on_disk = checksums.getTotalSizeOnDisk(); } @@ -993,8 +1107,6 @@ void IMergeTreeDataPart::appendFilesOfChecksums(Strings & files) void IMergeTreeDataPart::loadRowsCount() { - //String path = fs::path(getRelativePath()) / "count.txt"; - auto read_rows_count = [&]() { auto buf = metadata_manager->read("count.txt"); @@ -1186,7 +1298,7 @@ void IMergeTreeDataPart::loadColumns(bool require) if (columns.empty()) throw Exception("No columns in part " + name, ErrorCodes::NO_FILE_IN_DATA_PART); - getDataPartStorage().writeColumns(loaded_columns, {}); + writeColumns(loaded_columns, {}); } else { @@ -1245,7 +1357,7 @@ void IMergeTreeDataPart::storeVersionMetadata(bool force) const throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Transactions are not supported for in-memory parts (table: {}, part: {})", storage.getStorageID().getNameForLogs(), name); - getDataPartStorage().writeVersionMetadata(version, storage.getSettings()->fsync_part_directory); + writeVersionMetadata(version, storage.getSettings()->fsync_part_directory); } void IMergeTreeDataPart::appendCSNToVersionMetadata(VersionMetadata::WhichCSN which_csn) const @@ -1257,7 +1369,14 @@ void IMergeTreeDataPart::appendCSNToVersionMetadata(VersionMetadata::WhichCSN wh chassert(!(which_csn == VersionMetadata::WhichCSN::REMOVAL && version.removal_csn == 0)); chassert(isStoredOnDisk()); - getDataPartStorage().appendCSNToVersionMetadata(version, which_csn); + /// Small enough appends to file are usually atomic, + /// so we append new metadata instead of rewriting file to reduce number of fsyncs. + /// We don't need to do fsync when writing CSN, because in case of hard restart + /// we will be able to restore CSN from transaction log in Keeper. + + auto out = getDataPartStorage().writeTransactionFile(WriteMode::Append); + version.writeCSN(*out, which_csn); + out->finalize(); } void IMergeTreeDataPart::appendRemovalTIDToVersionMetadata(bool clear) const @@ -1280,7 +1399,13 @@ void IMergeTreeDataPart::appendRemovalTIDToVersionMetadata(bool clear) const else LOG_TEST(storage.log, "Appending removal TID for {} (creation: {}, removal {})", name, version.creation_tid, version.removal_tid); - getDataPartStorage().appendRemovalTIDToVersionMetadata(version, clear); + auto out = getDataPartStorage().writeTransactionFile(WriteMode::Append); + version.writeRemovalTID(*out, clear); + out->finalize(); + + /// fsync is not required when we clearing removal TID, because after hard restart we will fix metadata + if (!clear) + out->sync(); } void IMergeTreeDataPart::loadVersionMetadata() const diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index fbe4f992de4..6515eb1a65c 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -1,5 +1,6 @@ #pragma once +#include "IO/WriteSettings.h" #include #include #include @@ -439,6 +440,12 @@ public: /// True if here is lightweight deleted mask file in part. bool hasLightweightDelete() const { return columns.contains(LightweightDeleteDescription::FILTER_COLUMN.name); } + void writeChecksums(const MergeTreeDataPartChecksums & checksums_, const WriteSettings & settings); + + void writeDeleteOnDestroyMarker(); + void removeDeleteOnDestroyMarker(); + void removeVersionMetadata(); + protected: /// Total size of all columns, calculated once in calcuateColumnSizesOnDisk @@ -560,6 +567,12 @@ private: /// any specifial compression. void loadDefaultCompressionCodec(); + void writeColumns(const NamesAndTypesList & columns_, const WriteSettings & settings); + void writeVersionMetadata(const VersionMetadata & version_, bool fsync_part_dir) const; + + template + void writeMetadata(const String & filename, const WriteSettings & settings, Writer && writer); + static void appendFilesOfDefaultCompressionCodec(Strings & files); /// Found column without specific compression and return codec diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 83f3a167fa7..04e5432f239 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1685,7 +1685,8 @@ scope_guard MergeTreeData::getTemporaryPartDirectoryHolder(const String & part_d MergeTreeData::MutableDataPartPtr MergeTreeData::preparePartForRemoval(const DataPartPtr & part) { - if (part->getState() != DataPartState::Deleting) + auto state = part->getState(); + if (state != DataPartState::Deleting && state != DataPartState::DeleteOnDestroy) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot remove part {}, because it has state: {}", part->name, magic_enum::enum_name(part->getState())); @@ -3666,7 +3667,7 @@ void MergeTreeData::swapActivePart(MergeTreeData::DataPartPtr part_copy) /// All other locks are taken in StorageReplicatedMergeTree lockSharedData(*part_copy); - original_active_part->getDataPartStorage().writeDeleteOnDestroyMarker(log); + preparePartForRemoval(original_active_part)->writeDeleteOnDestroyMarker(); return; } } @@ -3801,8 +3802,8 @@ static void loadPartAndFixMetadataImpl(MergeTreeData::MutableDataPartPtr part) { part->loadColumnsChecksumsIndexes(false, true); part->modification_time = part->getDataPartStorage().getLastModified().epochTime(); - part->getDataPartStorage().removeDeleteOnDestroyMarker(); - part->getDataPartStorage().removeVersionMetadata(); + part->removeDeleteOnDestroyMarker(); + part->removeVersionMetadata(); } void MergeTreeData::calculateColumnAndSecondaryIndexSizesImpl() diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 3b456ab5360..90340bb01a7 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -1,4 +1,5 @@ #include "StorageMergeTree.h" +#include "Storages/MergeTree/IMergeTreeDataPart.h" #include @@ -1739,8 +1740,7 @@ CheckResults StorageMergeTree::checkData(const ASTPtr & query, ContextPtr local_ for (auto & part : data_parts) { /// If the checksums file is not present, calculate the checksums and write them to disk. - String checksums_path = "checksums.txt"; - String tmp_checksums_path = "checksums.txt.tmp"; + static constexpr auto checksums_path = "checksums.txt"; if (part->isStoredOnDisk() && !part->getDataPartStorage().exists(checksums_path)) { try @@ -1748,7 +1748,8 @@ CheckResults StorageMergeTree::checkData(const ASTPtr & query, ContextPtr local_ auto calculated_checksums = checkDataPart(part, false); calculated_checksums.checkEqual(part->checksums, true); - part->getDataPartStorage().writeChecksums(part->checksums, local_context->getWriteSettings()); + auto & part_mutable = const_cast(*part); + part_mutable.writeChecksums(part->checksums, local_context->getWriteSettings()); part->checkMetadata(); results.emplace_back(part->name, true, "Checksums recounted and written to disk."); From 7243e12ef88b6c762561e523ce8bdd8f1599988e Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 24 Oct 2022 07:41:12 +0000 Subject: [PATCH 042/112] Fix build --- contrib/libcxxabi-cmake/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/libcxxabi-cmake/CMakeLists.txt b/contrib/libcxxabi-cmake/CMakeLists.txt index 221a18de6e5..a59452eee9a 100644 --- a/contrib/libcxxabi-cmake/CMakeLists.txt +++ b/contrib/libcxxabi-cmake/CMakeLists.txt @@ -9,7 +9,7 @@ set(SRCS "${LIBCXXABI_SOURCE_DIR}/src/cxa_exception_storage.cpp" "${LIBCXXABI_SOURCE_DIR}/src/cxa_guard.cpp" "${LIBCXXABI_SOURCE_DIR}/src/cxa_handlers.cpp" -"${LIBCXXABI_SOURCE_DIR}/src/cxa_noexception.cpp" +# "${LIBCXXABI_SOURCE_DIR}/src/cxa_noexception.cpp" "${LIBCXXABI_SOURCE_DIR}/src/cxa_personality.cpp" "${LIBCXXABI_SOURCE_DIR}/src/cxa_thread_atexit.cpp" "${LIBCXXABI_SOURCE_DIR}/src/cxa_vector.cpp" From 5579d139eba0cb533a2722eaa11371b4212e12c0 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Mon, 24 Oct 2022 08:37:52 +0000 Subject: [PATCH 043/112] Avoid crash with big int in prewhere --- .../MergeTree/MergeTreeBaseSelectProcessor.cpp | 4 ++-- .../02473_prewhere_with_bigint.reference | 0 .../0_stateless/02473_prewhere_with_bigint.sql | 15 +++++++++++++++ 3 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/02473_prewhere_with_bigint.reference create mode 100644 tests/queries/0_stateless/02473_prewhere_with_bigint.sql diff --git a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp index 475407a402b..710b8aef50c 100644 --- a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp @@ -628,12 +628,12 @@ Block MergeTreeBaseSelectProcessor::transformHeader( else { WhichDataType which(removeNullable(recursiveRemoveLowCardinality(prewhere_column.type))); - if (which.isInt() || which.isUInt()) + if (which.isNativeInt() || which.isNativeUInt()) prewhere_column.column = prewhere_column.type->createColumnConst(block.rows(), 1u)->convertToFullColumnIfConst(); else if (which.isFloat()) prewhere_column.column = prewhere_column.type->createColumnConst(block.rows(), 1.0f)->convertToFullColumnIfConst(); else - throw Exception("Illegal type " + prewhere_column.type->getName() + " of column for filter.", + throw Exception("Illegal type " + prewhere_column.type->getName() + " of column for filter", ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER); } } diff --git a/tests/queries/0_stateless/02473_prewhere_with_bigint.reference b/tests/queries/0_stateless/02473_prewhere_with_bigint.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02473_prewhere_with_bigint.sql b/tests/queries/0_stateless/02473_prewhere_with_bigint.sql new file mode 100644 index 00000000000..852da729648 --- /dev/null +++ b/tests/queries/0_stateless/02473_prewhere_with_bigint.sql @@ -0,0 +1,15 @@ +CREATE TABLE prewhere_int128 (a Int128) ENGINE=MergeTree ORDER BY a; +SELECT a FROM prewhere_int128 WHERE a; -- { serverError 59 } +DROP TABLE prewhere_int128; + +CREATE TABLE prewhere_int256 (a Int256) ENGINE=MergeTree ORDER BY a; +SELECT a FROM prewhere_int256 WHERE a; -- { serverError 59 } +DROP TABLE prewhere_int256; + +CREATE TABLE prewhere_uint128 (a UInt128) ENGINE=MergeTree ORDER BY a; +SELECT a FROM prewhere_uint128 WHERE a; -- { serverError 59 } +DROP TABLE prewhere_uint128; + +CREATE TABLE prewhere_uint256 (a UInt256) ENGINE=MergeTree ORDER BY a; +SELECT a FROM prewhere_uint256 WHERE a; -- { serverError 59 } +DROP TABLE prewhere_uint256; From 8f00d0d1cc709733b2571b23cba6bead445fed72 Mon Sep 17 00:00:00 2001 From: Han Fei Date: Mon, 24 Oct 2022 12:56:36 +0200 Subject: [PATCH 044/112] fix tidy --- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 164ec8777de..ba7b0e963eb 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -253,17 +253,19 @@ ProcessorPtr ReadFromMergeTree::createSource( if (query_info.limit > 0 && query_info.limit < total_rows) total_rows = query_info.limit; - auto source = std::make_shared( - data, storage_snapshot, part.data_part, max_block_size, preferred_block_size_bytes, - preferred_max_column_in_block_size_bytes, required_columns, part.ranges, use_uncompressed_cache, prewhere_info, - actions_settings, reader_settings, virt_column_names, part.part_index_in_query, has_limit_below_one_block, std::move(extension)); - /// Actually it means that parallel reading from replicas enabled /// and we have to collaborate with initiator. /// In this case we won't set approximate rows, because it will be accounted multiple times. /// Also do not count amount of read rows if we read in order of sorting key, /// because we don't know actual amount of read rows in case when limit is set. - if (!extension.has_value() && !reader_settings.read_in_order) + bool set_rows_approx = !extension.has_value() && !reader_settings.read_in_order; + + auto source = std::make_shared( + data, storage_snapshot, part.data_part, max_block_size, preferred_block_size_bytes, + preferred_max_column_in_block_size_bytes, required_columns, part.ranges, use_uncompressed_cache, prewhere_info, + actions_settings, reader_settings, virt_column_names, part.part_index_in_query, has_limit_below_one_block, std::move(extension)); + + if (set_rows_approx) source -> addTotalRowsApprox(total_rows); return source; From 14e3bb6354f4acf4341731b67ba6a626752c69e3 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 24 Oct 2022 13:58:26 +0000 Subject: [PATCH 045/112] Another case. --- src/Functions/FunctionBinaryArithmetic.h | 6 ++-- ...461_mullable_pk_monotonicity_bug.reference | 32 +++++++++++++++++++ .../02461_mullable_pk_monotonicity_bug.sql | 32 ++++++++++++++++--- 3 files changed, 63 insertions(+), 7 deletions(-) diff --git a/src/Functions/FunctionBinaryArithmetic.h b/src/Functions/FunctionBinaryArithmetic.h index 3ffe054a439..1c1d0453230 100644 --- a/src/Functions/FunctionBinaryArithmetic.h +++ b/src/Functions/FunctionBinaryArithmetic.h @@ -1781,6 +1781,7 @@ public: { auto left_type = removeNullable(removeLowCardinality(left.type)); auto right_type = removeNullable(removeLowCardinality(right.type)); + auto ret_type = removeNullable(removeLowCardinality(return_type)); auto transform = [&](const Field & point) { @@ -1791,7 +1792,7 @@ public: /// This is a bit dangerous to call Base::executeImpl cause it ignores `use Default Implementation For XXX` flags. /// It was possible to check monotonicity for nullable right type which result to exception. /// Adding removeNullable above fixes the issue, but some other inconsistency may left. - auto col = Base::executeImpl(columns_with_constant, return_type, 1); + auto col = Base::executeImpl(columns_with_constant, ret_type, 1); Field point_transformed; col->get(0, point_transformed); return point_transformed; @@ -1822,6 +1823,7 @@ public: { auto left_type = removeNullable(removeLowCardinality(left.type)); auto right_type = removeNullable(removeLowCardinality(right.type)); + auto ret_type = removeNullable(removeLowCardinality(return_type)); auto transform = [&](const Field & point) { @@ -1829,7 +1831,7 @@ public: = {{left_type->createColumnConst(1, point), left_type, left.name}, {right_type->createColumnConst(1, (*right.column)[0]), right_type, right.name}}; - auto col = Base::executeImpl(columns_with_constant, return_type, 1); + auto col = Base::executeImpl(columns_with_constant, ret_type, 1); Field point_transformed; col->get(0, point_transformed); return point_transformed; diff --git a/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.reference b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.reference index 5ee6e6c67c4..29da9bc651b 100644 --- a/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.reference +++ b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.reference @@ -30,3 +30,35 @@ 2 1 2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1970-01-01 00:00:01 +1970-01-01 00:00:02 +1970-01-01 00:00:01 +1970-01-01 00:00:02 +1970-01-01 00:00:01 +1970-01-01 00:00:02 +1970-01-01 00:00:01 +1970-01-01 00:00:02 +1970-01-01 00:00:01 +1970-01-01 00:00:02 +1970-01-01 00:00:01 +1970-01-01 00:00:02 +1970-01-01 00:00:01 +1970-01-01 00:00:02 +1970-01-01 00:00:01 +1970-01-01 00:00:02 diff --git a/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql index ce190694514..af2d5b859b3 100644 --- a/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql +++ b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql @@ -1,6 +1,5 @@ create table tab (x Nullable(UInt8)) engine = MergeTree order by x settings allow_nullable_key = 1, index_granularity = 2; insert into tab select number from numbers(4); - set allow_suspicious_low_cardinality_types=1; set max_rows_to_read = 2; @@ -14,9 +13,7 @@ SELECT 1 + x FROM tab where plus(1::LowCardinality(UInt8), x) <= 2 order by x; SELECT 1 + x FROM tab where plus(1::LowCardinality(Nullable(UInt8)), x) <= 2 order by x; drop table tab; - set max_rows_to_read = 100; - create table tab (x LowCardinality(UInt8)) engine = MergeTree order by x settings allow_nullable_key = 1, index_granularity = 2; insert into tab select number from numbers(4); @@ -31,10 +28,35 @@ SELECT 1 + x FROM tab where plus(1::LowCardinality(UInt8), x) <= 2 order by x; SELECT 1 + x FROM tab where plus(1::LowCardinality(Nullable(UInt8)), x) <= 2 order by x; drop table tab; - set max_rows_to_read = 100; - create table tab (x UInt128) engine = MergeTree order by x settings allow_nullable_key = 1, index_granularity = 2; insert into tab select number from numbers(4); +set max_rows_to_read = 2; +SELECT x + 1 FROM tab where plus(x, 1) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::Nullable(UInt8)) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(UInt8)) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(Nullable(UInt8))) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1, x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::Nullable(UInt8), x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(UInt8), x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(Nullable(UInt8)), x) <= 2 order by x; + +set max_rows_to_read = 100; SELECT x + 1 FROM tab WHERE (x + 1::LowCardinality(UInt8)) <= -9223372036854775808 order by x; + +drop table tab; +create table tab (x DateTime) engine = MergeTree order by x settings allow_nullable_key = 1, index_granularity = 2; +insert into tab select number from numbers(4); + +set max_rows_to_read = 2; +SELECT x + 1 FROM tab where plus(x, 1) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::Nullable(UInt8)) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(UInt8)) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(Nullable(UInt8))) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1, x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::Nullable(UInt8), x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(UInt8), x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(Nullable(UInt8)), x) <= 2 order by x; + +SELECT x + 1 FROM tab WHERE (x + CAST('1', 'Nullable(UInt8)')) <= -2147483647 ORDER BY x ASC NULLS FIRST; From 128fdd785e772bcdd1c8b7b3c558363cf58a44d8 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Mon, 24 Oct 2022 14:44:22 +0000 Subject: [PATCH 046/112] better interface --- .../MergeTree/DataPartStorageOnDisk.cpp | 67 ++----------------- .../MergeTree/DataPartStorageOnDisk.h | 4 +- src/Storages/MergeTree/IDataPartStorage.h | 9 ++- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 59 +++++++++++++++- src/Storages/MergeTree/MergeTask.h | 2 +- .../MergeTree/MergeTreeDataWriter.cpp | 8 +-- src/Storages/MergeTree/MergeTreeDataWriter.h | 8 +-- 7 files changed, 81 insertions(+), 76 deletions(-) diff --git a/src/Storages/MergeTree/DataPartStorageOnDisk.cpp b/src/Storages/MergeTree/DataPartStorageOnDisk.cpp index cdbd01efab4..a3df17c57de 100644 --- a/src/Storages/MergeTree/DataPartStorageOnDisk.cpp +++ b/src/Storages/MergeTree/DataPartStorageOnDisk.cpp @@ -1,3 +1,4 @@ +#include "Storages/MergeTree/IDataPartStorage.h" #include #include #include @@ -50,7 +51,12 @@ std::string DataPartStorageOnDisk::getFullRootPath() const return fs::path(volume->getDisk()->getPath()) / root_path / ""; } -MutableDataPartStoragePtr DataPartStorageOnDisk::getProjection(const std::string & name) const +MutableDataPartStoragePtr DataPartStorageOnDisk::getProjection(const std::string & name) +{ + return std::make_shared(volume, std::string(fs::path(root_path) / part_dir), name); +} + +DataPartStoragePtr DataPartStorageOnDisk::getProjection(const std::string & name) const { return std::make_shared(volume, std::string(fs::path(root_path) / part_dir), name); } @@ -136,65 +142,6 @@ std::unique_ptr DataPartStorageOnDisk::readFile( return volume->getDisk()->readFile(fs::path(root_path) / part_dir / name, settings, read_hint, file_size); } -static std::unique_ptr openForReading(const DiskPtr & disk, const String & path) -{ - size_t file_size = disk->getFileSize(path); - return disk->readFile(path, ReadSettings().adjustBufferSize(file_size), file_size); -} - -void DataPartStorageOnDisk::loadVersionMetadata(VersionMetadata & version, Poco::Logger * log) const -{ - std::string version_file_name = fs::path(root_path) / part_dir / "txn_version.txt"; - String tmp_version_file_name = version_file_name + ".tmp"; - DiskPtr disk = volume->getDisk(); - - auto remove_tmp_file = [&]() - { - auto last_modified = disk->getLastModified(tmp_version_file_name); - auto buf = openForReading(disk, tmp_version_file_name); - String content; - readStringUntilEOF(content, *buf); - LOG_WARNING(log, "Found file {} that was last modified on {}, has size {} and the following content: {}", - tmp_version_file_name, last_modified.epochTime(), content.size(), content); - disk->removeFile(tmp_version_file_name); - }; - - if (disk->exists(version_file_name)) - { - auto buf = openForReading(disk, version_file_name); - version.read(*buf); - if (disk->exists(tmp_version_file_name)) - remove_tmp_file(); - return; - } - - /// Four (?) cases are possible: - /// 1. Part was created without transactions. - /// 2. Version metadata file was not renamed from *.tmp on part creation. - /// 3. Version metadata were written to *.tmp file, but hard restart happened before fsync. - /// 4. Fsyncs in storeVersionMetadata() work incorrectly. - - if (!disk->exists(tmp_version_file_name)) - { - /// Case 1. - /// We do not have version metadata and transactions history for old parts, - /// so let's consider that such parts were created by some ancient transaction - /// and were committed with some prehistoric CSN. - /// NOTE It might be Case 3, but version metadata file is written on part creation before other files, - /// so it's not Case 3 if part is not broken. - version.setCreationTID(Tx::PrehistoricTID, nullptr); - version.creation_csn = Tx::PrehistoricCSN; - return; - } - - /// Case 2. - /// Content of *.tmp file may be broken, just use fake TID. - /// Transaction was not committed if *.tmp file was not renamed, so we should complete rollback by removing part. - version.setCreationTID(Tx::DummyTID, nullptr); - version.creation_csn = Tx::RolledBackCSN; - remove_tmp_file(); -} - void DataPartStorageOnDisk::checkConsistency(const MergeTreeDataPartChecksums & checksums) const { checksums.checkSizes(volume->getDisk(), getRelativePath()); diff --git a/src/Storages/MergeTree/DataPartStorageOnDisk.h b/src/Storages/MergeTree/DataPartStorageOnDisk.h index fda901d0204..f93c6a235d6 100644 --- a/src/Storages/MergeTree/DataPartStorageOnDisk.h +++ b/src/Storages/MergeTree/DataPartStorageOnDisk.h @@ -21,7 +21,8 @@ public: std::string getPartDirectory() const override { return part_dir; } std::string getFullRootPath() const override; - MutableDataPartStoragePtr getProjection(const std::string & name) const override; + MutableDataPartStoragePtr getProjection(const std::string & name) override; + DataPartStoragePtr getProjection(const std::string & name) const override; bool exists() const override; bool exists(const std::string & name) const override; @@ -41,7 +42,6 @@ public: std::optional read_hint, std::optional file_size) const override; - void loadVersionMetadata(VersionMetadata & version, Poco::Logger * log) const override; void checkConsistency(const MergeTreeDataPartChecksums & checksums) const override; void remove( diff --git a/src/Storages/MergeTree/IDataPartStorage.h b/src/Storages/MergeTree/IDataPartStorage.h index 3f73199305d..8f478b3c688 100644 --- a/src/Storages/MergeTree/IDataPartStorage.h +++ b/src/Storages/MergeTree/IDataPartStorage.h @@ -82,16 +82,19 @@ public: /// virtual std::string getRelativeRootPath() const = 0; /// Get a storage for projection. - virtual std::shared_ptr getProjection(const std::string & name) const = 0; + virtual std::shared_ptr getProjection(const std::string & name) = 0; + virtual std::shared_ptr getProjection(const std::string & name) const = 0; /// Part directory exists. virtual bool exists() const = 0; + /// File inside part directory exists. Specified path is relative to the part path. virtual bool exists(const std::string & name) const = 0; virtual bool isDirectory(const std::string & name) const = 0; /// Modification time for part directory. virtual Poco::Timestamp getLastModified() const = 0; + /// Iterate part directory. Iteration in subdirectory is not needed yet. virtual DataPartStorageIteratorPtr iterate() const = 0; @@ -108,7 +111,6 @@ public: std::optional read_hint, std::optional file_size) const = 0; - virtual void loadVersionMetadata(VersionMetadata & version, Poco::Logger * log) const = 0; virtual void checkConsistency(const MergeTreeDataPartChecksums & checksums) const = 0; struct ProjectionChecksums @@ -130,7 +132,8 @@ public: /// Get a name like 'prefix_partdir_tryN' which does not exist in a root dir. /// TODO: remove it. - virtual std::optional getRelativePathForPrefix(Poco::Logger * log, const String & prefix, bool detached, bool broken) const = 0; + virtual std::optional getRelativePathForPrefix( + Poco::Logger * log, const String & prefix, bool detached, bool broken) const = 0; /// Reset part directory, used for in-memory parts. /// TODO: remove it. diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index f5348311468..24fabc7cff1 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -904,9 +904,9 @@ void IMergeTreeDataPart::writeColumns(const NamesAndTypesList & columns_, const void IMergeTreeDataPart::writeVersionMetadata(const VersionMetadata & version_, bool fsync_part_dir) const { - auto & data_part_storage = const_cast(getDataPartStorage()); static constexpr auto filename = "txn_version.txt"; static constexpr auto tmp_filename = "txn_version.txt.tmp"; + auto & data_part_storage = const_cast(getDataPartStorage()); try { @@ -1408,10 +1408,65 @@ void IMergeTreeDataPart::appendRemovalTIDToVersionMetadata(bool clear) const out->sync(); } +static std::unique_ptr openForReading(const IDataPartStorage & part_storage, const String & filename) +{ + size_t file_size = part_storage.getFileSize(filename); + return part_storage.readFile(filename, ReadSettings().adjustBufferSize(file_size), file_size, file_size); +} + void IMergeTreeDataPart::loadVersionMetadata() const try { - getDataPartStorage().loadVersionMetadata(version, storage.log); + static constexpr auto version_file_name = "txn_version.txt"; + static constexpr auto tmp_version_file_name = "txn_version.txt.tmp"; + auto & data_part_storage = const_cast(getDataPartStorage()); + + auto remove_tmp_file = [&]() + { + auto last_modified = data_part_storage.getLastModified(); + auto buf = openForReading(data_part_storage, tmp_version_file_name); + + String content; + readStringUntilEOF(content, *buf); + LOG_WARNING(storage.log, "Found file {} that was last modified on {}, has size {} and the following content: {}", + tmp_version_file_name, last_modified.epochTime(), content.size(), content); + data_part_storage.removeFile(tmp_version_file_name); + }; + + if (data_part_storage.exists(version_file_name)) + { + auto buf = openForReading(data_part_storage, version_file_name); + version.read(*buf); + if (data_part_storage.exists(tmp_version_file_name)) + remove_tmp_file(); + return; + } + + /// Four (?) cases are possible: + /// 1. Part was created without transactions. + /// 2. Version metadata file was not renamed from *.tmp on part creation. + /// 3. Version metadata were written to *.tmp file, but hard restart happened before fsync. + /// 4. Fsyncs in storeVersionMetadata() work incorrectly. + + if (!data_part_storage.exists(tmp_version_file_name)) + { + /// Case 1. + /// We do not have version metadata and transactions history for old parts, + /// so let's consider that such parts were created by some ancient transaction + /// and were committed with some prehistoric CSN. + /// NOTE It might be Case 3, but version metadata file is written on part creation before other files, + /// so it's not Case 3 if part is not broken. + version.setCreationTID(Tx::PrehistoricTID, nullptr); + version.creation_csn = Tx::PrehistoricCSN; + return; + } + + /// Case 2. + /// Content of *.tmp file may be broken, just use fake TID. + /// Transaction was not committed if *.tmp file was not renamed, so we should complete rollback by removing part. + version.setCreationTID(Tx::DummyTID, nullptr); + version.creation_csn = Tx::RolledBackCSN; + remove_tmp_file(); } catch (Exception & e) { diff --git a/src/Storages/MergeTree/MergeTask.h b/src/Storages/MergeTree/MergeTask.h index f21d542c7a0..6a29cdbb5ca 100644 --- a/src/Storages/MergeTree/MergeTask.h +++ b/src/Storages/MergeTree/MergeTask.h @@ -134,7 +134,7 @@ private: StorageMetadataPtr metadata_snapshot{nullptr}; FutureMergedMutatedPartPtr future_part{nullptr}; /// This will be either nullptr or new_data_part, so raw pointer is ok. - const IMergeTreeDataPart * parent_part{nullptr}; + IMergeTreeDataPart * parent_part{nullptr}; ContextPtr context{nullptr}; time_t time_of_merge{0}; ReservationSharedPtr space_reservation{nullptr}; diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index 856a684d18d..55404324b75 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -483,7 +483,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPartImpl( MergeTreeDataPartType part_type, const String & relative_path, bool is_temp, - const IMergeTreeDataPart * parent_part, + IMergeTreeDataPart * parent_part, const MergeTreeData & data, Poco::Logger * log, Block block, @@ -587,7 +587,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPart( Poco::Logger * log, Block block, const ProjectionDescription & projection, - const IMergeTreeDataPart * parent_part) + IMergeTreeDataPart * parent_part) { String part_name = projection.name; MergeTreeDataPartType part_type; @@ -623,7 +623,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempProjectionPart( Poco::Logger * log, Block block, const ProjectionDescription & projection, - const IMergeTreeDataPart * parent_part, + IMergeTreeDataPart * parent_part, size_t block_num) { String part_name = fmt::format("{}_{}", projection.name, block_num); @@ -658,7 +658,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeInMemoryProjectionP Poco::Logger * log, Block block, const ProjectionDescription & projection, - const IMergeTreeDataPart * parent_part) + IMergeTreeDataPart * parent_part) { return writeProjectionPartImpl( projection.name, diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.h b/src/Storages/MergeTree/MergeTreeDataWriter.h index 72ceb8b38e3..8e405016cde 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.h +++ b/src/Storages/MergeTree/MergeTreeDataWriter.h @@ -77,7 +77,7 @@ public: Poco::Logger * log, Block block, const ProjectionDescription & projection, - const IMergeTreeDataPart * parent_part); + IMergeTreeDataPart * parent_part); /// For mutation: MATERIALIZE PROJECTION. static TemporaryPart writeTempProjectionPart( @@ -85,7 +85,7 @@ public: Poco::Logger * log, Block block, const ProjectionDescription & projection, - const IMergeTreeDataPart * parent_part, + IMergeTreeDataPart * parent_part, size_t block_num); /// For WriteAheadLog AddPart. @@ -94,7 +94,7 @@ public: Poco::Logger * log, Block block, const ProjectionDescription & projection, - const IMergeTreeDataPart * parent_part); + IMergeTreeDataPart * parent_part); static Block mergeBlock( const Block & block, @@ -109,7 +109,7 @@ private: MergeTreeDataPartType part_type, const String & relative_path, bool is_temp, - const IMergeTreeDataPart * parent_part, + IMergeTreeDataPart * parent_part, const MergeTreeData & data, Poco::Logger * log, Block block, From 724f8335294603ed5fd1724f2f01d299c57f1175 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Mon, 24 Oct 2022 15:08:41 +0000 Subject: [PATCH 047/112] Use PREWHERE in tests --- tests/queries/0_stateless/02473_prewhere_with_bigint.sql | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/02473_prewhere_with_bigint.sql b/tests/queries/0_stateless/02473_prewhere_with_bigint.sql index 852da729648..f11a1a279de 100644 --- a/tests/queries/0_stateless/02473_prewhere_with_bigint.sql +++ b/tests/queries/0_stateless/02473_prewhere_with_bigint.sql @@ -1,15 +1,15 @@ CREATE TABLE prewhere_int128 (a Int128) ENGINE=MergeTree ORDER BY a; -SELECT a FROM prewhere_int128 WHERE a; -- { serverError 59 } +SELECT a FROM prewhere_int128 PREWHERE a; -- { serverError 59 } DROP TABLE prewhere_int128; CREATE TABLE prewhere_int256 (a Int256) ENGINE=MergeTree ORDER BY a; -SELECT a FROM prewhere_int256 WHERE a; -- { serverError 59 } +SELECT a FROM prewhere_int256 PREWHERE a; -- { serverError 59 } DROP TABLE prewhere_int256; CREATE TABLE prewhere_uint128 (a UInt128) ENGINE=MergeTree ORDER BY a; -SELECT a FROM prewhere_uint128 WHERE a; -- { serverError 59 } +SELECT a FROM prewhere_uint128 PREWHERE a; -- { serverError 59 } DROP TABLE prewhere_uint128; CREATE TABLE prewhere_uint256 (a UInt256) ENGINE=MergeTree ORDER BY a; -SELECT a FROM prewhere_uint256 WHERE a; -- { serverError 59 } +SELECT a FROM prewhere_uint256 PREWHERE a; -- { serverError 59 } DROP TABLE prewhere_uint256; From dcb7667808373cf760c2eed6ddba58e119e9d846 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Mon, 24 Oct 2022 15:08:51 +0000 Subject: [PATCH 048/112] share transaction for projections --- src/Storages/MergeTree/DataPartStorageOnDisk.cpp | 16 ++++++++++++++-- src/Storages/MergeTree/DataPartStorageOnDisk.h | 3 +++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/src/Storages/MergeTree/DataPartStorageOnDisk.cpp b/src/Storages/MergeTree/DataPartStorageOnDisk.cpp index a3df17c57de..f1690a2cc59 100644 --- a/src/Storages/MergeTree/DataPartStorageOnDisk.cpp +++ b/src/Storages/MergeTree/DataPartStorageOnDisk.cpp @@ -1,4 +1,3 @@ -#include "Storages/MergeTree/IDataPartStorage.h" #include #include #include @@ -31,6 +30,16 @@ DataPartStorageOnDisk::DataPartStorageOnDisk(VolumePtr volume_, std::string root { } +DataPartStorageOnDisk::DataPartStorageOnDisk( + VolumePtr volume_, std::string root_path_, std::string part_dir_, DiskTransactionPtr transaction_) + : volume(std::move(volume_)) + , root_path(std::move(root_path_)) + , part_dir(std::move(part_dir_)) + , transaction(std::move(transaction_)) + , has_shared_transaction(transaction != nullptr) +{ +} + std::string DataPartStorageOnDisk::getFullPath() const { return fs::path(volume->getDisk()->getPath()) / root_path / part_dir / ""; @@ -53,7 +62,7 @@ std::string DataPartStorageOnDisk::getFullRootPath() const MutableDataPartStoragePtr DataPartStorageOnDisk::getProjection(const std::string & name) { - return std::make_shared(volume, std::string(fs::path(root_path) / part_dir), name); + return std::shared_ptr(new DataPartStorageOnDisk(volume, std::string(fs::path(root_path) / part_dir), name, transaction)); } DataPartStoragePtr DataPartStorageOnDisk::getProjection(const std::string & name) const @@ -822,6 +831,9 @@ void DataPartStorageOnDisk::commitTransaction() if (!transaction) throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no uncommitted transaction"); + if (has_shared_transaction) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot commit shared transaction"); + transaction->commit(); transaction.reset(); } diff --git a/src/Storages/MergeTree/DataPartStorageOnDisk.h b/src/Storages/MergeTree/DataPartStorageOnDisk.h index f93c6a235d6..b5030f484ae 100644 --- a/src/Storages/MergeTree/DataPartStorageOnDisk.h +++ b/src/Storages/MergeTree/DataPartStorageOnDisk.h @@ -145,6 +145,9 @@ private: std::string root_path; std::string part_dir; DiskTransactionPtr transaction; + bool has_shared_transaction = false; + + DataPartStorageOnDisk(VolumePtr volume_, std::string root_path_, std::string part_dir_, DiskTransactionPtr transaction_); template void executeOperation(Op && op); From f865b28da496efd8d85c77812b2eacb1c17056ee Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Mon, 24 Oct 2022 22:38:53 +0000 Subject: [PATCH 049/112] fix drop of MergeTree tables --- src/Storages/MergeTree/DataPartStorageOnDisk.cpp | 2 +- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 2 +- src/Storages/MergeTree/MergeTreeData.cpp | 8 ++++++-- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/src/Storages/MergeTree/DataPartStorageOnDisk.cpp b/src/Storages/MergeTree/DataPartStorageOnDisk.cpp index f1690a2cc59..250ee4792fc 100644 --- a/src/Storages/MergeTree/DataPartStorageOnDisk.cpp +++ b/src/Storages/MergeTree/DataPartStorageOnDisk.cpp @@ -705,7 +705,7 @@ void DataPartStorageOnDisk::changeRootPath(const std::string & from_root, const --prefix_size; if (prefix_size > root_path.size() - || std::string_view(from_root).substr(0, prefix_size) != std::string_view(root_path).substr(0, prefix_size)) + || std::string_view(from_root).substr(0, prefix_size) != std::string_view(root_path).substr(0, prefix_size)) throw Exception( ErrorCodes::LOGICAL_ERROR, "Cannot change part root to {} because it is not a prefix of current root {}", diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 24fabc7cff1..e9b6a28d8a1 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -1562,6 +1562,7 @@ try relative_path = parent_part->getDataPartStorage().getRelativePath(); } + auto old_projection_root_path = getDataPartStorage().getRelativePath(); auto to = fs::path(relative_path) / new_relative_path; metadata_manager->deleteAll(true); @@ -1569,7 +1570,6 @@ try getDataPartStorage().rename(to.parent_path(), to.filename(), storage.log, remove_new_dir_if_exists, fsync_dir); metadata_manager->updateAll(true); - auto old_projection_root_path = getDataPartStorage().getRelativePath(); auto new_projection_root_path = to.string(); for (const auto & [_, part] : projection_parts) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 04e5432f239..47fcdfa6907 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -2172,7 +2172,12 @@ void MergeTreeData::dropAllData() auto lock = lockParts(); - DataPartsVector all_parts(data_parts_by_info.begin(), data_parts_by_info.end()); + DataPartsVector all_parts; + for (auto it = data_parts_by_info.begin(); it != data_parts_by_info.end(); ++it) + { + modifyPartState(it, DataPartState::Deleting); + all_parts.push_back(*it); + } { std::lock_guard wal_lock(write_ahead_log_mutex); @@ -2185,7 +2190,6 @@ void MergeTreeData::dropAllData() if (!getStorageID().hasUUID()) getContext()->dropCaches(); - /// Removing of each data part before recursive removal of directory is to speed-up removal, because there will be less number of syscalls. NameSet part_names_failed; try From cbfa887e52546a94a852c71d760a2293bba26282 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Mon, 24 Oct 2022 23:51:57 +0000 Subject: [PATCH 050/112] fix merge of projections --- src/Storages/MergeTree/MergeTask.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 27a71345a5e..98f05ade18d 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -140,9 +140,9 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() local_single_disk_volume, global_ctx->data->relative_data_path, local_tmp_part_basename); - } - data_part_storage->beginTransaction(); + data_part_storage->beginTransaction(); + } if (data_part_storage->exists()) throw Exception("Directory " + data_part_storage->getFullPath() + " already exists", ErrorCodes::DIRECTORY_ALREADY_EXISTS); From af0c54e0557ff0867c01806b4999d038d6b6e9a4 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 25 Oct 2022 11:13:41 +0000 Subject: [PATCH 051/112] Small fix --- src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp | 4 ++-- tests/queries/0_stateless/02473_prewhere_with_bigint.sql | 9 +++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp index 710b8aef50c..93b314cfd3f 100644 --- a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp @@ -633,8 +633,8 @@ Block MergeTreeBaseSelectProcessor::transformHeader( else if (which.isFloat()) prewhere_column.column = prewhere_column.type->createColumnConst(block.rows(), 1.0f)->convertToFullColumnIfConst(); else - throw Exception("Illegal type " + prewhere_column.type->getName() + " of column for filter", - ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER); + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER, "Illegal type {} of column for filter", prewhere_column.type->getName()); } } diff --git a/tests/queries/0_stateless/02473_prewhere_with_bigint.sql b/tests/queries/0_stateless/02473_prewhere_with_bigint.sql index f11a1a279de..29c6f0da2a1 100644 --- a/tests/queries/0_stateless/02473_prewhere_with_bigint.sql +++ b/tests/queries/0_stateless/02473_prewhere_with_bigint.sql @@ -1,15 +1,24 @@ +DROP TABLE IF EXISTS prewhere_int128; +DROP TABLE IF EXISTS prewhere_int256; +DROP TABLE IF EXISTS prewhere_uint128; +DROP TABLE IF EXISTS prewhere_uint256; + CREATE TABLE prewhere_int128 (a Int128) ENGINE=MergeTree ORDER BY a; +INSERT INTO prewhere_int128 VALUES (1); SELECT a FROM prewhere_int128 PREWHERE a; -- { serverError 59 } DROP TABLE prewhere_int128; CREATE TABLE prewhere_int256 (a Int256) ENGINE=MergeTree ORDER BY a; +INSERT INTO prewhere_int256 VALUES (1); SELECT a FROM prewhere_int256 PREWHERE a; -- { serverError 59 } DROP TABLE prewhere_int256; CREATE TABLE prewhere_uint128 (a UInt128) ENGINE=MergeTree ORDER BY a; +INSERT INTO prewhere_uint128 VALUES (1); SELECT a FROM prewhere_uint128 PREWHERE a; -- { serverError 59 } DROP TABLE prewhere_uint128; CREATE TABLE prewhere_uint256 (a UInt256) ENGINE=MergeTree ORDER BY a; +INSERT INTO prewhere_uint256 VALUES (1); SELECT a FROM prewhere_uint256 PREWHERE a; -- { serverError 59 } DROP TABLE prewhere_uint256; From 40ac6fda7420073c1ec717e0ebab199195c98c78 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 25 Oct 2022 13:43:14 +0000 Subject: [PATCH 052/112] Fixing test. --- ...461_mullable_pk_monotonicity_bug.reference | 32 +++++++++---------- .../02461_mullable_pk_monotonicity_bug.sql | 18 +++++------ 2 files changed, 25 insertions(+), 25 deletions(-) diff --git a/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.reference b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.reference index 29da9bc651b..c0d3de1806a 100644 --- a/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.reference +++ b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.reference @@ -46,19 +46,19 @@ 2 1 2 -1970-01-01 00:00:01 -1970-01-01 00:00:02 -1970-01-01 00:00:01 -1970-01-01 00:00:02 -1970-01-01 00:00:01 -1970-01-01 00:00:02 -1970-01-01 00:00:01 -1970-01-01 00:00:02 -1970-01-01 00:00:01 -1970-01-01 00:00:02 -1970-01-01 00:00:01 -1970-01-01 00:00:02 -1970-01-01 00:00:01 -1970-01-01 00:00:02 -1970-01-01 00:00:01 -1970-01-01 00:00:02 +2022-02-02 00:00:01 +2022-02-02 00:00:02 +2022-02-02 00:00:01 +2022-02-02 00:00:02 +2022-02-02 00:00:01 +2022-02-02 00:00:02 +2022-02-02 00:00:01 +2022-02-02 00:00:02 +2022-02-02 00:00:01 +2022-02-02 00:00:02 +2022-02-02 00:00:01 +2022-02-02 00:00:02 +2022-02-02 00:00:01 +2022-02-02 00:00:02 +2022-02-02 00:00:01 +2022-02-02 00:00:02 diff --git a/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql index af2d5b859b3..75c8cb2b7e7 100644 --- a/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql +++ b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql @@ -47,16 +47,16 @@ SELECT x + 1 FROM tab WHERE (x + 1::LowCardinality(UInt8)) <= -92233720368547758 drop table tab; create table tab (x DateTime) engine = MergeTree order by x settings allow_nullable_key = 1, index_granularity = 2; -insert into tab select number from numbers(4); +insert into tab select toDateTime('2022-02-02') + number from numbers(4); set max_rows_to_read = 2; -SELECT x + 1 FROM tab where plus(x, 1) <= 2 order by x; -SELECT x + 1 FROM tab where plus(x, 1::Nullable(UInt8)) <= 2 order by x; -SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(UInt8)) <= 2 order by x; -SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(Nullable(UInt8))) <= 2 order by x; -SELECT 1 + x FROM tab where plus(1, x) <= 2 order by x; -SELECT 1 + x FROM tab where plus(1::Nullable(UInt8), x) <= 2 order by x; -SELECT 1 + x FROM tab where plus(1::LowCardinality(UInt8), x) <= 2 order by x; -SELECT 1 + x FROM tab where plus(1::LowCardinality(Nullable(UInt8)), x) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1) <= toDateTime('2022-02-02') + 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::Nullable(UInt8)) <= toDateTime('2022-02-02') + 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(UInt8)) <= toDateTime('2022-02-02') + 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(Nullable(UInt8))) <= toDateTime('2022-02-02') + 2 order by x; +SELECT 1 + x FROM tab where plus(1, x) <= toDateTime('2022-02-02') + 2 order by x; +SELECT 1 + x FROM tab where plus(1::Nullable(UInt8), x) <= toDateTime('2022-02-02') + 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(UInt8), x) <= toDateTime('2022-02-02') + 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(Nullable(UInt8)), x) <= toDateTime('2022-02-02') + 2 order by x; SELECT x + 1 FROM tab WHERE (x + CAST('1', 'Nullable(UInt8)')) <= -2147483647 ORDER BY x ASC NULLS FIRST; From bfb5e6eae4c9a97fa7bffdb5445c33bad97e611d Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 25 Oct 2022 13:58:29 +0000 Subject: [PATCH 053/112] Fix logical error for invalid prewhere type in StorageMerge. --- src/Storages/StorageMerge.cpp | 10 ++++++++-- .../0_stateless/01902_table_function_merge_db_repr.sql | 2 +- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 9891340a0d0..c68e9103704 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -520,6 +520,8 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( modified_select.setFinal(); } + modified_select.replaceDatabaseAndTable(database_name, table_name); + auto storage_stage = storage->getQueryProcessingStage(modified_context, QueryProcessingStage::Complete, storage_snapshot, modified_query_info); if (processed_stage <= storage_stage) @@ -545,6 +547,12 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( } else { + { + /// Analyze query to check that types are valid (e.g. in PREWHERE). + InterpreterSelectQuery interpreter + (modified_query_info.query, modified_context, SelectQueryOptions(processed_stage).ignoreProjections()); + } + storage->read( plan, real_column_names, @@ -569,8 +577,6 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( } else if (processed_stage > storage_stage) { - modified_select.replaceDatabaseAndTable(database_name, table_name); - /// Maximum permissible parallelism is streams_num modified_context->setSetting("max_threads", streams_num); modified_context->setSetting("max_streams_to_max_threads_ratio", 1); diff --git a/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql b/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql index 460ce16ccad..ee6f052d694 100644 --- a/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql +++ b/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql @@ -65,7 +65,7 @@ SELECT _database, _table, n FROM merge(currentDatabase(), '^t') ORDER BY _databa --fuzzed LOGICAL_ERROR CREATE TABLE 01902_db.t4 (n Date) ENGINE=MergeTree ORDER BY n; INSERT INTO 01902_db.t4 SELECT * FROM numbers(10); -SELECT NULL FROM 01902_db.t_merge WHERE n ORDER BY _table DESC; +SELECT NULL FROM 01902_db.t_merge WHERE n ORDER BY _table DESC; -- {serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER} DROP DATABASE 01902_db; DROP DATABASE 01902_db1; From 728342ec7c57b8567aa4b93cce8bda7f03edd32a Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 25 Oct 2022 21:41:08 +0000 Subject: [PATCH 054/112] Un-inline stuff --- .../Algorithms/AggregatingSortedAlgorithm.cpp | 33 +++++++++++++++++++ .../Algorithms/AggregatingSortedAlgorithm.h | 31 +++-------------- 2 files changed, 38 insertions(+), 26 deletions(-) diff --git a/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.cpp index 7a0847bcbc5..db08f3ffbd3 100644 --- a/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.cpp @@ -125,6 +125,39 @@ static void postprocessChunk(Chunk & chunk, const AggregatingSortedAlgorithm::Co } +AggregatingSortedAlgorithm::SimpleAggregateDescription::SimpleAggregateDescription( + AggregateFunctionPtr function_, const size_t column_number_, + DataTypePtr nested_type_, DataTypePtr real_type_) + : function(std::move(function_)), column_number(column_number_) + , nested_type(std::move(nested_type_)), real_type(std::move(real_type_)) +{ + add_function = function->getAddressOfAddFunction(); + state.reset(function->sizeOfData(), function->alignOfData()); +} + +void AggregatingSortedAlgorithm::SimpleAggregateDescription::createState() +{ + if (created) + return; + function->create(state.data()); + created = true; +} + +void AggregatingSortedAlgorithm::SimpleAggregateDescription::destroyState() +{ + if (!created) + return; + function->destroy(state.data()); + created = false; +} + +/// Explicitly destroy aggregation state if the stream is terminated +AggregatingSortedAlgorithm::SimpleAggregateDescription::~SimpleAggregateDescription() +{ + destroyState(); +} + + AggregatingSortedAlgorithm::AggregatingMergedData::AggregatingMergedData( MutableColumns columns_, UInt64 max_block_size_, ColumnsDefinition & def_) : MergedData(std::move(columns_), false, max_block_size_), def(def_) diff --git a/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h index 0ffbd1262d3..d670242ed81 100644 --- a/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h @@ -43,36 +43,15 @@ public: bool created = false; SimpleAggregateDescription( - AggregateFunctionPtr function_, const size_t column_number_, - DataTypePtr nested_type_, DataTypePtr real_type_) - : function(std::move(function_)), column_number(column_number_) - , nested_type(std::move(nested_type_)), real_type(std::move(real_type_)) - { - add_function = function->getAddressOfAddFunction(); - state.reset(function->sizeOfData(), function->alignOfData()); - } + AggregateFunctionPtr function_, const size_t column_number_, + DataTypePtr nested_type_, DataTypePtr real_type_); - void createState() - { - if (created) - return; - function->create(state.data()); - created = true; - } + void createState(); - void destroyState() - { - if (!created) - return; - function->destroy(state.data()); - created = false; - } + void destroyState(); /// Explicitly destroy aggregation state if the stream is terminated - ~SimpleAggregateDescription() - { - destroyState(); - } + ~SimpleAggregateDescription(); SimpleAggregateDescription() = default; SimpleAggregateDescription(SimpleAggregateDescription &&) = default; From c4d4f2dbedde1802a880fa1d2783ffd8fca20f2c Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Tue, 25 Oct 2022 22:14:06 +0000 Subject: [PATCH 055/112] better interface --- src/Disks/IStoragePolicy.cpp | 29 +++++++++++++++++++ src/Disks/IStoragePolicy.h | 10 +++++-- src/Disks/StoragePolicy.cpp | 16 ++-------- src/Disks/StoragePolicy.h | 5 +--- .../MergeTree/DataPartStorageOnDisk.cpp | 25 ++-------------- .../MergeTree/DataPartStorageOnDisk.h | 7 ----- src/Storages/MergeTree/DataPartsExchange.cpp | 1 + src/Storages/MergeTree/IDataPartStorage.h | 9 +----- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 3 +- .../MergeTree/MergeFromLogEntryTask.cpp | 4 ++- src/Storages/MergeTree/MergeTreeData.cpp | 2 +- .../MergeTree/MergeTreePartsMover.cpp | 21 ++++++++------ src/Storages/StorageMergeTree.cpp | 4 ++- 13 files changed, 66 insertions(+), 70 deletions(-) diff --git a/src/Disks/IStoragePolicy.cpp b/src/Disks/IStoragePolicy.cpp index 2ba6df4be8f..c843ee11563 100644 --- a/src/Disks/IStoragePolicy.cpp +++ b/src/Disks/IStoragePolicy.cpp @@ -31,4 +31,33 @@ VolumePtr IStoragePolicy::getVolumeByName(const String & volume_name) const return volume; } +size_t IStoragePolicy::getVolumeIndexByDiskName(const String & disk_name) const +{ + auto index = tryGetVolumeIndexByDiskName(disk_name); + if (!index) + throw Exception(ErrorCodes::UNKNOWN_DISK, + "No disk {} in policy {}", backQuote(disk_name), backQuote(getName())); + + return *index; +} + +VolumePtr IStoragePolicy::tryGetVolumeByDiskName(const String & disk_name) const +{ + auto index = tryGetVolumeIndexByDiskName(disk_name); + if (!index) + return nullptr; + + return getVolume(*index); +} + +VolumePtr IStoragePolicy::getVolumeByDiskName(const String & disk_name) const +{ + auto volume = tryGetVolumeByDiskName(disk_name); + if (!volume) + throw Exception(ErrorCodes::UNKNOWN_DISK, + "No disk {} in policy {}", backQuote(disk_name), backQuote(getName())); + + return volume; +} + } diff --git a/src/Disks/IStoragePolicy.h b/src/Disks/IStoragePolicy.h index 8d14a26691b..a6a5fe5f692 100644 --- a/src/Disks/IStoragePolicy.h +++ b/src/Disks/IStoragePolicy.h @@ -4,6 +4,7 @@ #include #include +#include #include namespace DB @@ -55,12 +56,15 @@ public: /// Get volume by index. virtual VolumePtr getVolume(size_t index) const = 0; virtual VolumePtr tryGetVolumeByName(const String & volume_name) const = 0; - virtual VolumePtr tryGetVolumeByDisk(const DiskPtr & disk_ptr) const = 0; VolumePtr getVolumeByName(const String & volume_name) const; /// Checks if storage policy can be replaced by another one. virtual void checkCompatibleWith(const StoragePolicyPtr & new_storage_policy) const = 0; - /// Find volume index, which contains disk - virtual size_t getVolumeIndexByDisk(const DiskPtr & disk_ptr) const = 0; + /// Finds a volume index, which contains disk + virtual std::optional tryGetVolumeIndexByDiskName(const String & disk_name) const = 0; + size_t getVolumeIndexByDiskName(const String & disk_name) const; + /// Finds a volume which contains a specified disk. + VolumePtr tryGetVolumeByDiskName(const String & disk_name) const; + VolumePtr getVolumeByDiskName(const String & disk_name) const; /// Check if we have any volume with stopped merges virtual bool hasAnyVolumeWithDisabledMerges() const = 0; virtual bool containsVolume(const String & volume_name) const = 0; diff --git a/src/Disks/StoragePolicy.cpp b/src/Disks/StoragePolicy.cpp index 10513c6beae..1e5b5825b8c 100644 --- a/src/Disks/StoragePolicy.cpp +++ b/src/Disks/StoragePolicy.cpp @@ -311,22 +311,12 @@ void StoragePolicy::checkCompatibleWith(const StoragePolicyPtr & new_storage_pol } -size_t StoragePolicy::getVolumeIndexByDisk(const DiskPtr & disk_ptr) const +std::optional StoragePolicy::tryGetVolumeIndexByDiskName(const String & disk_name) const { - auto it = volume_index_by_disk_name.find(disk_ptr->getName()); + auto it = volume_index_by_disk_name.find(disk_name); if (it != volume_index_by_disk_name.end()) return it->second; - else - throw Exception("No disk " + backQuote(disk_ptr->getName()) + " in policy " + backQuote(name), ErrorCodes::UNKNOWN_DISK); -} - - -VolumePtr StoragePolicy::tryGetVolumeByDisk(const DiskPtr & disk_ptr) const -{ - auto it = volume_index_by_disk_name.find(disk_ptr->getName()); - if (it == volume_index_by_disk_name.end()) - return nullptr; - return getVolume(it->second); + return {}; } diff --git a/src/Disks/StoragePolicy.h b/src/Disks/StoragePolicy.h index fd0169a6ebe..9631f1c2e52 100644 --- a/src/Disks/StoragePolicy.h +++ b/src/Disks/StoragePolicy.h @@ -68,7 +68,7 @@ public: ReservationPtr reserve(UInt64 bytes, size_t min_volume_index) const override; /// Find volume index, which contains disk - size_t getVolumeIndexByDisk(const DiskPtr & disk_ptr) const override; + std::optional tryGetVolumeIndexByDiskName(const String & disk_name) const override; /// Reserves 0 bytes on disk with max available space /// Do not use this function when it is possible to predict size. @@ -85,9 +85,6 @@ public: VolumePtr tryGetVolumeByName(const String & volume_name) const override; - /// Finds a volume which contains a specified disk. - VolumePtr tryGetVolumeByDisk(const DiskPtr & disk_ptr) const override; - /// Checks if storage policy can be replaced by another one. void checkCompatibleWith(const StoragePolicyPtr & new_storage_policy) const override; diff --git a/src/Storages/MergeTree/DataPartStorageOnDisk.cpp b/src/Storages/MergeTree/DataPartStorageOnDisk.cpp index 250ee4792fc..7b36a9873e4 100644 --- a/src/Storages/MergeTree/DataPartStorageOnDisk.cpp +++ b/src/Storages/MergeTree/DataPartStorageOnDisk.cpp @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -129,6 +128,7 @@ static UInt64 calculateTotalSizeOnDiskImpl(const DiskPtr & disk, const String & { if (disk->isFile(from)) return disk->getFileSize(from); + std::vector files; disk->listFiles(from, files); UInt64 res = 0; @@ -490,11 +490,6 @@ std::string DataPartStorageOnDisk::getDiskPath() const return volume->getDisk()->getPath(); } -DataPartStorageOnDisk::DisksSet::const_iterator DataPartStorageOnDisk::isStoredOnDisk(const DisksSet & disks) const -{ - return disks.find(volume->getDisk()); -} - ReservationPtr DataPartStorageOnDisk::reserve(UInt64 bytes) const { auto res = volume->reserve(bytes); @@ -509,11 +504,6 @@ ReservationPtr DataPartStorageOnDisk::tryReserve(UInt64 bytes) const return volume->reserve(bytes); } -size_t DataPartStorageOnDisk::getVolumeIndex(const IStoragePolicy & storage_policy) const -{ - return storage_policy.getVolumeIndexByDisk(volume->getDisk()); -} - String DataPartStorageOnDisk::getUniqueId() const { auto disk = volume->getDisk(); @@ -523,16 +513,6 @@ String DataPartStorageOnDisk::getUniqueId() const return disk->getUniqueId(fs::path(getRelativePath()) / "checksums.txt"); } -bool DataPartStorageOnDisk::shallParticipateInMerges(const IStoragePolicy & storage_policy) const -{ - /// `IMergeTreeDataPart::volume` describes space where current part belongs, and holds - /// `SingleDiskVolume` object which does not contain up-to-date settings of corresponding volume. - /// Therefore we shall obtain volume from storage policy. - auto volume_ptr = storage_policy.getVolume(storage_policy.getVolumeIndexByDisk(volume->getDisk())); - - return !volume_ptr->areMergesAvoided(); -} - void DataPartStorageOnDisk::backup( const MergeTreeDataPartChecksums & checksums, const NameSet & files_without_checksums, @@ -821,7 +801,8 @@ void DataPartStorageOnDisk::createProjection(const std::string & name) void DataPartStorageOnDisk::beginTransaction() { if (transaction) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Uncommitted transaction already exists"); + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Uncommitted {}transaction already exists", has_shared_transaction ? "shared " : ""); transaction = volume->getDisk()->createTransaction(); } diff --git a/src/Storages/MergeTree/DataPartStorageOnDisk.h b/src/Storages/MergeTree/DataPartStorageOnDisk.h index b5030f484ae..bea1596e1f7 100644 --- a/src/Storages/MergeTree/DataPartStorageOnDisk.h +++ b/src/Storages/MergeTree/DataPartStorageOnDisk.h @@ -71,17 +71,10 @@ public: UInt64 getRevision() const override; std::unordered_map getSerializedMetadata(const std::vector & paths) const override; std::string getDiskPath() const override; - - DisksSet::const_iterator isStoredOnDisk(const DisksSet & disks) const override; - ReservationPtr reserve(UInt64 bytes) const override; ReservationPtr tryReserve(UInt64 bytes) const override; - size_t getVolumeIndex(const IStoragePolicy &) const override; - String getUniqueId() const override; - bool shallParticipateInMerges(const IStoragePolicy &) const override; - void backup( const MergeTreeDataPartChecksums & checksums, const NameSet & files_without_checksums, diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index 88a745820e9..afef354a2e6 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -152,6 +152,7 @@ void Service::processQuery(const HTMLForm & params, ReadBuffer & /*body*/, Write UInt64 revision = parse(params.get("disk_revision", "0")); if (revision) part->getDataPartStorage().syncRevision(revision); + revision = part->getDataPartStorage().getRevision(); if (revision) response.addCookie({"disk_revision", toString(revision)}); diff --git a/src/Storages/MergeTree/IDataPartStorage.h b/src/Storages/MergeTree/IDataPartStorage.h index 8f478b3c688..0e4ebe68157 100644 --- a/src/Storages/MergeTree/IDataPartStorage.h +++ b/src/Storages/MergeTree/IDataPartStorage.h @@ -150,29 +150,22 @@ public: /// TODO: remove or at least remove const. virtual void syncRevision(UInt64 revision) const = 0; virtual UInt64 getRevision() const = 0; + virtual std::unordered_map getSerializedMetadata(const std::vector & paths) const = 0; /// Get a path for internal disk if relevant. It is used mainly for logging. virtual std::string getDiskPath() const = 0; - /// Check if data part is stored on one of the specified disk in set. - using DisksSet = std::unordered_set; - virtual DisksSet::const_iterator isStoredOnDisk(const DisksSet & disks) const { return disks.end(); } - /// Reserve space on the same disk. /// Probably we should try to remove it later. /// TODO: remove constness virtual ReservationPtr reserve(UInt64 /*bytes*/) const { return nullptr; } virtual ReservationPtr tryReserve(UInt64 /*bytes*/) const { return nullptr; } - virtual size_t getVolumeIndex(const IStoragePolicy &) const { return 0; } /// A leak of abstraction. /// Return some uniq string for file. /// Required for distinguish different copies of the same part on remote FS. virtual String getUniqueId() const = 0; - /// A leak of abstraction - virtual bool shallParticipateInMerges(const IStoragePolicy &) const { return true; } - /// Create a backup of a data part. /// This method adds a new entry to backup_entries. /// Also creates a new tmp_dir for internal disk (if disk is mentioned the first time). diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index e9b6a28d8a1..368af55aa15 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -1545,7 +1545,8 @@ void IMergeTreeDataPart::appendFilesOfColumns(Strings & files) bool IMergeTreeDataPart::shallParticipateInMerges(const StoragePolicyPtr & storage_policy) const { - return getDataPartStorage().shallParticipateInMerges(*storage_policy); + auto disk_name = getDataPartStorage().getDiskName(); + return !storage_policy->getVolumeByDiskName(disk_name)->areMergesAvoided(); } void IMergeTreeDataPart::renameTo(const String & new_relative_path, bool remove_new_dir_if_exists) diff --git a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp index f3b81a4793e..9a9b8a4a6bb 100644 --- a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp @@ -160,7 +160,9 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare() for (auto & part_ptr : parts) { ttl_infos.update(part_ptr->ttl_infos); - max_volume_index = std::max(max_volume_index, part_ptr->getDataPartStorage().getVolumeIndex(*storage.getStoragePolicy())); + auto disk_name = part_ptr->getDataPartStorage().getDiskName(); + size_t volume_index = storage.getStoragePolicy()->getVolumeIndexByDiskName(disk_name); + max_volume_index = std::max(max_volume_index, volume_index); } /// It will live until the whole task is being destroyed diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 9dc8e147e76..bb589161b57 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -5059,7 +5059,7 @@ bool MergeTreeData::shouldPerformTTLMoveOnInsert(const SpacePtr & move_destinati if (move_destination->isDisk()) { auto disk = std::static_pointer_cast(move_destination); - if (auto volume = getStoragePolicy()->tryGetVolumeByDisk(disk)) + if (auto volume = getStoragePolicy()->tryGetVolumeByDiskName(disk->getName())) return volume->perform_ttl_move_on_insert; } return false; diff --git a/src/Storages/MergeTree/MergeTreePartsMover.cpp b/src/Storages/MergeTree/MergeTreePartsMover.cpp index bd277ca4374..b618b068769 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.cpp +++ b/src/Storages/MergeTree/MergeTreePartsMover.cpp @@ -100,7 +100,6 @@ bool MergeTreePartsMover::selectPartsForMove( return false; std::unordered_map need_to_move; - std::unordered_set need_to_move_disks; const auto policy = data->getStoragePolicy(); const auto & volumes = policy->getVolumes(); @@ -115,10 +114,7 @@ bool MergeTreePartsMover::selectPartsForMove( UInt64 unreserved_space = disk->getUnreservedSpace(); if (unreserved_space < required_maximum_available_space && !disk->isBroken()) - { need_to_move.emplace(disk, required_maximum_available_space - unreserved_space); - need_to_move_disks.emplace(disk); - } } } } @@ -140,8 +136,16 @@ bool MergeTreePartsMover::selectPartsForMove( auto ttl_entry = selectTTLDescriptionForTTLInfos(metadata_snapshot->getMoveTTLs(), part->ttl_infos.moves_ttl, time_of_move, true); auto to_insert = need_to_move.end(); - if (auto disk_it = part->getDataPartStorage().isStoredOnDisk(need_to_move_disks); disk_it != need_to_move_disks.end()) - to_insert = need_to_move.find(*disk_it); + auto part_disk_name = part->getDataPartStorage().getDiskName(); + + for (auto it = need_to_move.begin(); it != need_to_move.end(); ++it) + { + if (it->first->getName() == part_disk_name) + { + to_insert = it; + break; + } + } ReservationPtr reservation; if (ttl_entry) @@ -158,9 +162,8 @@ bool MergeTreePartsMover::selectPartsForMove( /// In order to not over-move, we need to "release" required space on this disk, /// possibly to zero. if (to_insert != need_to_move.end()) - { to_insert->second.decreaseRequiredSizeAndRemoveRedundantParts(part->getBytesOnDisk()); - } + ++parts_to_move_by_ttl_rules; parts_to_move_total_size_bytes += part->getBytesOnDisk(); } @@ -173,7 +176,7 @@ bool MergeTreePartsMover::selectPartsForMove( for (auto && move : need_to_move) { - auto min_volume_index = policy->getVolumeIndexByDisk(move.first) + 1; + auto min_volume_index = policy->getVolumeIndexByDiskName(move.first->getName()) + 1; for (auto && part : move.second.getAccumulatedParts()) { auto reservation = policy->reserve(part->getBytesOnDisk(), min_volume_index); diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index ee17dca567f..a450a9ef3a9 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -389,7 +389,9 @@ CurrentlyMergingPartsTagger::CurrentlyMergingPartsTagger( for (auto & part_ptr : future_part->parts) { ttl_infos.update(part_ptr->ttl_infos); - max_volume_index = std::max(max_volume_index, part_ptr->getDataPartStorage().getVolumeIndex(*storage.getStoragePolicy())); + auto disk_name = part_ptr->getDataPartStorage().getDiskName(); + size_t volume_index = storage.getStoragePolicy()->getVolumeIndexByDiskName(disk_name); + max_volume_index = std::max(max_volume_index, volume_index); } reserved_space = storage.balancedReservation( From d8b09b430f1e956d05d6a60b659b221c45dbbd06 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Tue, 25 Oct 2022 22:27:11 +0000 Subject: [PATCH 056/112] add comments --- src/Storages/MergeTree/IDataPartStorage.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/Storages/MergeTree/IDataPartStorage.h b/src/Storages/MergeTree/IDataPartStorage.h index 0e4ebe68157..baad9748b10 100644 --- a/src/Storages/MergeTree/IDataPartStorage.h +++ b/src/Storages/MergeTree/IDataPartStorage.h @@ -211,6 +211,9 @@ public: size_t buf_size, const WriteSettings & settings) = 0; + /// A special const method to write transaction file. + /// It's const, because file with transaction metadata + /// can be modifed after part creation. virtual std::unique_ptr writeTransactionFile(WriteMode mode) const = 0; virtual void createFile(const String & name) = 0; @@ -237,7 +240,9 @@ public: bool remove_new_dir_if_exists, bool fsync_part_dir) = 0; + /// Starts a transaction of mutable operations. virtual void beginTransaction() = 0; + /// Commits a transaction of mutable operations. virtual void commitTransaction() = 0; virtual bool hasActiveTransaction() const = 0; }; @@ -245,6 +250,9 @@ public: using DataPartStoragePtr = std::shared_ptr; using MutableDataPartStoragePtr = std::shared_ptr; +/// A holder that encapsulates data part storage and +/// gives access to const storage from const methods +/// and to mutable storage from non-const methods. class DataPartStorageHolder : public boost::noncopyable { public: From 79889198b1356a9d85fe9ca7dd76d2b95894f03a Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Tue, 25 Oct 2022 23:03:37 +0000 Subject: [PATCH 057/112] fix style check --- src/Disks/StoragePolicy.cpp | 1 - src/Storages/MergeTree/IDataPartStorage.h | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Disks/StoragePolicy.cpp b/src/Disks/StoragePolicy.cpp index 1e5b5825b8c..10524ffcc0f 100644 --- a/src/Disks/StoragePolicy.cpp +++ b/src/Disks/StoragePolicy.cpp @@ -26,7 +26,6 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; extern const int EXCESSIVE_ELEMENT_IN_CONFIG; extern const int NO_ELEMENTS_IN_CONFIG; - extern const int UNKNOWN_DISK; extern const int UNKNOWN_POLICY; extern const int UNKNOWN_VOLUME; extern const int LOGICAL_ERROR; diff --git a/src/Storages/MergeTree/IDataPartStorage.h b/src/Storages/MergeTree/IDataPartStorage.h index baad9748b10..c6669908db4 100644 --- a/src/Storages/MergeTree/IDataPartStorage.h +++ b/src/Storages/MergeTree/IDataPartStorage.h @@ -213,7 +213,7 @@ public: /// A special const method to write transaction file. /// It's const, because file with transaction metadata - /// can be modifed after part creation. + /// can be modified after part creation. virtual std::unique_ptr writeTransactionFile(WriteMode mode) const = 0; virtual void createFile(const String & name) = 0; From 91c3744cfe9e9d7aaf03bbdda2e45a960dc56e73 Mon Sep 17 00:00:00 2001 From: DanRoscigno Date: Tue, 25 Oct 2022 21:33:09 -0400 Subject: [PATCH 058/112] add backup to S3 endpoint --- docs/en/operations/{backup.md => _backup.md} | 140 ++++++++++++++++-- .../statements/alter/partition.md | 2 +- 2 files changed, 128 insertions(+), 14 deletions(-) rename docs/en/operations/{backup.md => _backup.md} (68%) diff --git a/docs/en/operations/backup.md b/docs/en/operations/_backup.md similarity index 68% rename from docs/en/operations/backup.md rename to docs/en/operations/_backup.md index a755e3ef9a6..c543c49a083 100644 --- a/docs/en/operations/backup.md +++ b/docs/en/operations/_backup.md @@ -5,6 +5,13 @@ sidebar_label: Data backup and restore title: Data backup and restore --- +- [Backup to a local disk](#backup-to-a-local-disk) +- [Configuring backup/restore to use an S3 endpoint](#configuring-backuprestore-to-use-an-s3-endpoint) +- [Backup/restore using an S3 disk](#backuprestore-using-an-s3-disk) +- [Alternatives](#alternatives) + +## Background + While [replication](../engines/table-engines/mergetree-family/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [you can’t just drop tables with a MergeTree-like engine containing more than 50 Gb of data](server-configuration-parameters/settings.md#max-table-size-to-drop). However, these safeguards do not cover all possible cases and can be circumvented. In order to effectively mitigate possible human errors, you should carefully prepare a strategy for backing up and restoring your data **in advance**. @@ -15,7 +22,9 @@ Each company has different resources available and business requirements, so the Keep in mind that if you backed something up and never tried to restore it, chances are that restore will not work properly when you actually need it (or at least it will take longer than business can tolerate). So whatever backup approach you choose, make sure to automate the restore process as well, and practice it on a spare ClickHouse cluster regularly. ::: -## Configure a backup destination +## Backup to a local disk + +### Configure a backup destination In the examples below you will see the backup destination specified like `Disk('backups', '1.zip')`. To prepare the destination add a file to `/etc/clickhouse-server/config.d/backup_disk.xml` specifying the backup destination. For example, this file defines disk named `backups` and then adds that disk to the **backups > allowed_disk** list: @@ -39,7 +48,7 @@ In the examples below you will see the backup destination specified like `Disk(' ``` -## Parameters +### Parameters Backups can be either full or incremental, and can include tables (including materialized views, projections, and dictionaries), and databases. Backups can be synchronous (default) or asynchronous. They can be compressed. Backups can be password protected. @@ -52,7 +61,7 @@ The BACKUP and RESTORE statements take a list of DATABASE and TABLE names, a des - `password` for the file on disk - `base_backup`: the destination of the previous backup of this source. For example, `Disk('backups', '1.zip')` -## Usage examples +### Usage examples Backup and then restore a table: ``` @@ -81,7 +90,7 @@ RESTORE TABLE test.table AS test.table2 FROM Disk('backups', '1.zip') BACKUP TABLE test.table3 AS test.table4 TO Disk('backups', '2.zip') ``` -## Incremental backups +### Incremental backups Incremental backups can be taken by specifying the `base_backup`. :::note @@ -100,7 +109,7 @@ RESTORE TABLE test.table AS test.table2 FROM Disk('backups', 'incremental-a.zip'); ``` -## Assign a password to the backup +### Assign a password to the backup Backups written to disk can have a password applied to the file: ``` @@ -116,7 +125,7 @@ RESTORE TABLE test.table SETTINGS password='qwerty' ``` -## Compression settings +### Compression settings If you would like to specify the compression method or level: ``` @@ -125,14 +134,14 @@ BACKUP TABLE test.table SETTINGS compression_method='lzma', compression_level=3 ``` -## Restore specific partitions +### Restore specific partitions If specific partitions associated with a table need to be restored these can be specified. To restore partitions 1 and 4 from backup: ``` RESTORE TABLE test.table PARTITIONS '2', '3' FROM Disk('backups', 'filename.zip') ``` -## Check the status of backups +### Check the status of backups The backup command returns an `id` and `status`, and that `id` can be used to get the status of the backup. This is very useful to check the progress of long ASYNC backups. The example below shows a failure that happened when trying to overwrite an existing backup file: ```sql @@ -171,13 +180,118 @@ end_time: 2022-08-30 09:21:46 1 row in set. Elapsed: 0.002 sec. ``` -## Backup to S3 +## Configuring BACKUP/RESTORE to use an S3 Endpoint -It is possible to `BACKUP`/`RESTORE` to S3, but this disk should be configured -in a proper way, since by default you will need to backup metadata from local -disk to make backup full. +To write backups to an S3 bucket you need three pieces of information: +- S3 endpoint, + for example `https://mars-doc-test.s3.amazonaws.com/backup-S3/` +- Access key ID, + for example `ABC123` +- Secret access key, + for example `Abc+123` -First of all, you need to configure S3 disk in a special way: +:::note +Creating an S3 bucket is covered in [Use S3 Object Storage as a ClickHouse disk](/docs/en/integrations/data-ingestion/s3/configuring-s3-for-clickhouse-use.md), just come back to this doc after saving the policy, there is no need to configure ClickHouse to use the S3 bucket. +::: + +The destination for a backup will be specified like this: +``` +S3('/', '', ') +``` + +```sql +CREATE TABLE data +( + `key` Int, + `value` String, + `array` Array(String) +) +ENGINE = MergeTree +ORDER BY tuple() +``` + +```sql +INSERT INTO data SELECT * +FROM generateRandom('key Int, value String, array Array(String)') +LIMIT 1000 +``` + +### Create a base (initial) backup + +Incremental backups require a _base_ backup to start from, this example will be used +later as the base backup. The first parameter of the S3 destination is the S3 endpoint followed by the directory within the bucket to use for this backup. In this example the directory is named `my_backup`. + +```sql +BACKUP TABLE data TO S3('https://mars-doc-test.s3.amazonaws.com/backup-S3/my_backup', 'ABC123', 'Abc+123') +``` + +```response +┌─id───────────────────────────────────┬─status─────────┐ +│ de442b75-a66c-4a3c-a193-f76f278c70f3 │ BACKUP_CREATED │ +└──────────────────────────────────────┴────────────────┘ +``` + +### Add more data + +Incremental backups are populated with the difference between the base backup and the current content of the table being backed up. Add more data before taking the incremental backup: + +```sql +INSERT INTO data SELECT * +FROM generateRandom('key Int, value String, array Array(String)') +LIMIT 100 +``` +### Take an incremental backup + +This backup command is similar to the base backup, but adds `SETTINGS base_backup` and the location of the base backup. Note that the destination for the incremental backup is not the same directory as the base, it is the same endpoint with a different target directory within the bucket. The base backup is in `my_backup`, and the incremental will be written to `my_incremental`: +```sql +BACKUP TABLE data TO S3('https://mars-doc-test.s3.amazonaws.com/backup-S3/my_incremental', 'ABC123', 'Abc+123') SETTINGS base_backup = S3('https://mars-doc-test.s3.amazonaws.com/backup-S3/my_backup', 'ABC123', 'Abc+123') +``` + +```response +┌─id───────────────────────────────────┬─status─────────┐ +│ f6cd3900-850f-41c9-94f1-0c4df33ea528 │ BACKUP_CREATED │ +└──────────────────────────────────────┴────────────────┘ +``` +### Restore from the incremental backup + +This command restores the incremental backup into a new table, `data3`. Note that when an incremental backup is restored, the base backup is also included. Specify only the incremental backup when restoring: +```sql +RESTORE TABLE data AS data3 FROM S3('https://mars-doc-test.s3.amazonaws.com/backup-S3/my_incremental', 'ABC123', 'Abc+123') +``` + +```response +┌─id───────────────────────────────────┬─status───┐ +│ ff0c8c39-7dff-4324-a241-000796de11ca │ RESTORED │ +└──────────────────────────────────────┴──────────┘ +``` + +### Verify the count + +There were two inserts into the original table `data`, one with 1,000 rows and one with 100 rows, for a total of 1,100. Verify that the restored table has 1,100 rows: +```sql +SELECT count() +FROM data3 +``` +```response +┌─count()─┐ +│ 1100 │ +└─────────┘ +``` + +### Verify the content +This compares the content of the original table, `data` with the restored table `data3`: +```sql +SELECT throwIf(( + SELECT groupArray(tuple(*)) + FROM data + ) != ( + SELECT groupArray(tuple(*)) + FROM data3 + ), 'Data does not match after BACKUP/RESTORE') +``` +## BACKUP/RESTORE Using an S3 Disk + +It is also possible to `BACKUP`/`RESTORE` to S3 by configuring an S3 disk in the ClickHouse storage configuration. Configure the disk like this by adding a file to `/etc/clickhouse-server/config.d`: ```xml diff --git a/docs/en/sql-reference/statements/alter/partition.md b/docs/en/sql-reference/statements/alter/partition.md index a216de85cfc..da99c52538f 100644 --- a/docs/en/sql-reference/statements/alter/partition.md +++ b/docs/en/sql-reference/statements/alter/partition.md @@ -194,7 +194,7 @@ To restore data from a backup, do the following: Restoring from a backup does not require stopping the server. -For more information about backups and restoring data, see the [Data Backup](../../../operations/backup.md) section. +For more information about backups and restoring data, see the [Data Backup](/docs/en/manage/backups.mdx) section. ## UNFREEZE PARTITION From e996e2ff1560fecaf801c7cb42c3870839d6d4bc Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 26 Oct 2022 07:48:37 +0000 Subject: [PATCH 059/112] Fix clang-tidy --- programs/copier/TaskCluster.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/copier/TaskCluster.cpp b/programs/copier/TaskCluster.cpp index 6b7911f56f2..957c7d2120d 100644 --- a/programs/copier/TaskCluster.cpp +++ b/programs/copier/TaskCluster.cpp @@ -40,7 +40,7 @@ void DB::TaskCluster::reloadSettings(const Poco::Util::AbstractConfiguration & c if (config.has(prefix + "settings")) settings_common.loadSettingsFromConfig(prefix + "settings", config); - settings_common.prefer_localhost_replica = 0; + settings_common.prefer_localhost_replica = false; settings_pull = settings_common; if (config.has(prefix + "settings_pull")) From 3eca9ada3b25ba789c049753138b799280c71fb8 Mon Sep 17 00:00:00 2001 From: Roman Vasin Date: Wed, 26 Oct 2022 08:00:12 +0000 Subject: [PATCH 060/112] Add Date32 to formatDateTIme --- src/Functions/formatDateTime.cpp | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/Functions/formatDateTime.cpp b/src/Functions/formatDateTime.cpp index a10c059b342..4f28dae7a66 100644 --- a/src/Functions/formatDateTime.cpp +++ b/src/Functions/formatDateTime.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -45,6 +46,7 @@ template <> struct ActionValueTypeMap { using ActionValueTyp template <> struct ActionValueTypeMap { using ActionValueType = UInt32; }; template <> struct ActionValueTypeMap { using ActionValueType = UInt32; }; template <> struct ActionValueTypeMap { using ActionValueType = UInt16; }; +template <> struct ActionValueTypeMap { using ActionValueType = Int32; }; template <> struct ActionValueTypeMap { using ActionValueType = UInt32; }; // TODO(vnemkov): to add sub-second format instruction, make that DateTime64 and do some math in Action. template <> struct ActionValueTypeMap { using ActionValueType = Int64; }; @@ -324,7 +326,7 @@ public: "Illegal type " + arguments[0].type->getName() + " of 1 argument of function " + getName() + " when arguments size is 1. Should be integer", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - if (arguments.size() > 1 && !(isInteger(arguments[0].type) || isDate(arguments[0].type) || isDateTime(arguments[0].type) || isDateTime64(arguments[0].type))) + if (arguments.size() > 1 && !(isInteger(arguments[0].type) || isDate(arguments[0].type) || isDateTime(arguments[0].type) || isDate32(arguments[0].type) || isDateTime64(arguments[0].type))) throw Exception( "Illegal type " + arguments[0].type->getName() + " of 1 argument of function " + getName() + " when arguments size is 2 or 3. Should be a integer or a date with time", @@ -337,7 +339,7 @@ public: "Number of arguments for function " + getName() + " doesn't match: passed " + toString(arguments.size()) + ", should be 2 or 3", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - if (!isDate(arguments[0].type) && !isDateTime(arguments[0].type) && !isDateTime64(arguments[0].type)) + if (!isDate(arguments[0].type) && !isDateTime(arguments[0].type) && !isDate32(arguments[0].type) && !isDateTime64(arguments[0].type)) throw Exception( "Illegal type " + arguments[0].type->getName() + " of 1 argument of function " + getName() + ". Should be a date or a date with time", @@ -393,6 +395,7 @@ public: })) { if (!((res = executeType(arguments, result_type)) + || (res = executeType(arguments, result_type)) || (res = executeType(arguments, result_type)) || (res = executeType(arguments, result_type)))) throw Exception( @@ -405,6 +408,7 @@ public: else { if (!((res = executeType(arguments, result_type)) + || (res = executeType(arguments, result_type)) || (res = executeType(arguments, result_type)) || (res = executeType(arguments, result_type)))) throw Exception( @@ -496,6 +500,13 @@ public: instruction.perform(pos, static_cast(c.whole), time_zone); } } + else if constexpr (std::is_same_v) + { + for (auto & instruction : instructions) + { + instruction.perform(pos, static_cast(vec[i]), time_zone); + } + } else { for (auto & instruction : instructions) From 117674c0cef594c125b472020a9e8339538d92ca Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 26 Oct 2022 08:48:12 +0000 Subject: [PATCH 061/112] Update version_date.tsv and changelogs after v22.10.1.1875-stable --- docker/server/Dockerfile.alpine | 2 +- docker/server/Dockerfile.ubuntu | 2 +- docs/changelogs/v22.10.1.1875-stable.md | 351 ++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 1 + 4 files changed, 354 insertions(+), 2 deletions(-) create mode 100644 docs/changelogs/v22.10.1.1875-stable.md diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index 5b597f927a2..9b633b66188 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="22.9.3.18" +ARG VERSION="22.10.1.1875" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # user/group precreated explicitly with fixed uid/gid on purpose. diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index c6254b898ed..35c78763a31 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -21,7 +21,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list ARG REPO_CHANNEL="stable" ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" -ARG VERSION="22.9.3.18" +ARG VERSION="22.10.1.1875" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # set non-empty deb_location_url url to create a docker image diff --git a/docs/changelogs/v22.10.1.1875-stable.md b/docs/changelogs/v22.10.1.1875-stable.md new file mode 100644 index 00000000000..49f93168a00 --- /dev/null +++ b/docs/changelogs/v22.10.1.1875-stable.md @@ -0,0 +1,351 @@ +--- +sidebar_position: 1 +sidebar_label: 2022 +--- + +# 2022 Changelog + +### ClickHouse release v22.10.1.1875-stable (011ae8675a2) FIXME as compared to v22.9.1.2603-stable (3030d4c7ff0) + +#### Backward Incompatible Change +* Rename cache commands: `show caches` -> `show filesystem caches`, `describe cache` -> `describe filesystem cache`. [#41508](https://github.com/ClickHouse/ClickHouse/pull/41508) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Remove support for the `WITH TIMEOUT` section for `LIVE VIEW`. This closes [#40557](https://github.com/ClickHouse/ClickHouse/issues/40557). [#42173](https://github.com/ClickHouse/ClickHouse/pull/42173) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### New Feature +* Add Rust code support into ClickHouse with BLAKE3 hash-function library as an example. [#33435](https://github.com/ClickHouse/ClickHouse/pull/33435) ([BoloniniD](https://github.com/BoloniniD)). +* This is the initial implement of Kusto Query Language. (MVP). [#37961](https://github.com/ClickHouse/ClickHouse/pull/37961) ([Yong Wang](https://github.com/kashwy)). +* * Support limiting of temporary data stored on disk using settings `max_temporary_data_on_disk_size_for_user`/`max_temporary_data_on_disk_size_for_query` . [#40893](https://github.com/ClickHouse/ClickHouse/pull/40893) ([Vladimir C](https://github.com/vdimir)). +* Support Java integers hashing in `javaHash`. [#41131](https://github.com/ClickHouse/ClickHouse/pull/41131) ([JackyWoo](https://github.com/JackyWoo)). +* This PR is to support the OpenSSL in-house build like the BoringSSL submodule. Build flag i.e. ENABLE_CH_BUNDLE_BORINGSSL is used to choose between BoringSSL and OpenSSL. By default, the BoringSSL in-house build will be used. [#41142](https://github.com/ClickHouse/ClickHouse/pull/41142) ([MeenaRenganathan22](https://github.com/MeenaRenganathan22)). +* Composable protocol configuration is added. [#41198](https://github.com/ClickHouse/ClickHouse/pull/41198) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Add OpenTelemetry support to ON CLUSTER DDL(require `distributed_ddl_entry_format_version` to be set to 4). [#41484](https://github.com/ClickHouse/ClickHouse/pull/41484) ([Frank Chen](https://github.com/FrankChen021)). +* Add setting `format_json_object_each_row_column_for_object_name` to write/parse object name as column value in JSONObjectEachRow format. [#41703](https://github.com/ClickHouse/ClickHouse/pull/41703) ([Kruglov Pavel](https://github.com/Avogar)). +* adds Morton Coding (ZCurve) encode/decode functions. [#41753](https://github.com/ClickHouse/ClickHouse/pull/41753) ([Constantine Peresypkin](https://github.com/pkit)). +* Implement support for different UUID binary formats with support for the two most prevalent ones: the default big-endian and Microsoft's mixed-endian as specified in [RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.1). [#42108](https://github.com/ClickHouse/ClickHouse/pull/42108) ([ltrk2](https://github.com/ltrk2)). +* Added an aggregate function `analysisOfVariance` (`anova`) to perform a statistical test over several groups of normally distributed observations to find out whether all groups have the same mean or not. Original PR [#37872](https://github.com/ClickHouse/ClickHouse/issues/37872). [#42131](https://github.com/ClickHouse/ClickHouse/pull/42131) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Add support for `SET setting_name = DEFAULT`. [#42187](https://github.com/ClickHouse/ClickHouse/pull/42187) ([Filatenkov Artur](https://github.com/FArthur-cmd)). +* * Add `URL` Functions which conform rfc. Functions include: `cutToFirstSignificantSubdomainCustomRFC`, `cutToFirstSignificantSubdomainCustomWithWWWRFC`, `cutToFirstSignificantSubdomainRFC`, `cutToFirstSignificantSubdomainWithWWWRFC`, `domainRFC`, `domainWithoutWWWRFC`, `firstSignificantSubdomainCustomRFC`, `firstSignificantSubdomainRFC`, `portRFC`, `topLevelDomainRFC`. [#42274](https://github.com/ClickHouse/ClickHouse/pull/42274) ([Quanfa Fu](https://github.com/dentiscalprum)). +* Added functions (`randUniform`, `randNormal`, `randLogNormal`, `randExponential`, `randChiSquared`, `randStudentT`, `randFisherF`, `randBernoulli`, `randBinomial`, `randNegativeBinomial`, `randPoisson` ) to generate random values according to the specified distributions. This closes [#21834](https://github.com/ClickHouse/ClickHouse/issues/21834). [#42411](https://github.com/ClickHouse/ClickHouse/pull/42411) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). + +#### Performance Improvement +* Implement operator precedence element parser to resolve stack overflow issues and make the required stack size smaller. [#34892](https://github.com/ClickHouse/ClickHouse/pull/34892) ([Nikolay Degterinsky](https://github.com/evillique)). +* DISTINCT in order optimization leverage sorting properties of data streams. This improvement will enable reading in order for DISTINCT if applicable (before it was necessary to provide ORDER BY for columns in DISTINCT). [#41014](https://github.com/ClickHouse/ClickHouse/pull/41014) ([Igor Nikonov](https://github.com/devcrafter)). +* ColumnVector: optimize UInt8 index with AVX512VBMI. [#41247](https://github.com/ClickHouse/ClickHouse/pull/41247) ([Guo Wangyang](https://github.com/guowangy)). +* The performance experiments of **SSB** (Star Schema Benchmark) on the ICX device (Intel Xeon Platinum 8380 CPU, 80 cores, 160 threads) shows that this change could bring a **2.95x** improvement of the geomean of all subcases' QPS. [#41675](https://github.com/ClickHouse/ClickHouse/pull/41675) ([Zhiguo Zhou](https://github.com/ZhiguoZh)). +* Fixed slowness in JSONExtract with LowCardinality(String) tuples. [#41726](https://github.com/ClickHouse/ClickHouse/pull/41726) ([AlfVII](https://github.com/AlfVII)). +* Add ldapr capabilities to AArch64 builds. This is supported from Graviton 2+, Azure and GCP instances. Only appeared in clang-15 [not so long ago](https://github.com/llvm/llvm-project/commit/9609b5daffe9fd28d83d83da895abc5113f76c24). [#41778](https://github.com/ClickHouse/ClickHouse/pull/41778) ([Daniel Kutenin](https://github.com/danlark1)). +* Improve performance when comparing strings and one argument is empty constant string. [#41870](https://github.com/ClickHouse/ClickHouse/pull/41870) ([Jiebin Sun](https://github.com/jiebinn)). +* optimize insertFrom of ColumnAggregateFunction to share Aggregate State in some cases. [#41960](https://github.com/ClickHouse/ClickHouse/pull/41960) ([flynn](https://github.com/ucasfl)). +* Relax the "Too many parts" threshold. This closes [#6551](https://github.com/ClickHouse/ClickHouse/issues/6551). Now ClickHouse will allow more parts in a partition if the average part size is large enough (at least 10 GiB). This allows to have up to petabytes of data in a single partition of a single table on a single server, which is possible using disk shelves or object storage. [#42002](https://github.com/ClickHouse/ClickHouse/pull/42002) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Make writing to AzureBlobStorage more efficient (respect `max_single_part_upload_size` instead of writing a block per each buffer size). Inefficiency mentioned in [#41754](https://github.com/ClickHouse/ClickHouse/issues/41754). [#42041](https://github.com/ClickHouse/ClickHouse/pull/42041) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Make thread ids in the process list and query_log unique to avoid waste. [#42180](https://github.com/ClickHouse/ClickHouse/pull/42180) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Improvement +* Added new infrastructure for query analysis and planning under `allow_experimental_analyzer` setting. [#31796](https://github.com/ClickHouse/ClickHouse/pull/31796) ([Maksim Kita](https://github.com/kitaisreal)). +* * Support expression `(EXPLAIN SELECT ...)` in a subquery. Queries like `SELECT * FROM (EXPLAIN PIPELINE SELECT col FROM TABLE ORDER BY col)` became valid. [#40630](https://github.com/ClickHouse/ClickHouse/pull/40630) ([Vladimir C](https://github.com/vdimir)). +* Currently changing `async_insert_max_data_size` or `async_insert_busy_timeout_ms` in scope of query makes no sense and this leads to bad user experience. E.g. user wants to insert data rarely and he doesn't have an access to server config to tune default settings. [#40668](https://github.com/ClickHouse/ClickHouse/pull/40668) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Embedded Keeper will always start in the background allowing ClickHouse to start without achieving quorum. [#40991](https://github.com/ClickHouse/ClickHouse/pull/40991) ([Antonio Andelic](https://github.com/antonio2368)). +* Improvements for reading from remote filesystems, made threadpool size for reads/writes configurable. Closes [#41070](https://github.com/ClickHouse/ClickHouse/issues/41070). [#41011](https://github.com/ClickHouse/ClickHouse/pull/41011) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Made reestablishing a new connection more reactive in case of expiration of the previous one. Previously there was a task which spawns every minute by default and thus a table could be in readonly state for about this time. [#41092](https://github.com/ClickHouse/ClickHouse/pull/41092) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Support all combinators combination in WindowTransform/arratReduce*/initializeAggregation/aggregate functions versioning. Previously combinators like `ForEach/Resample/Map` didn't work in these places, using them led to exception like`State function ... inserts results into non-state column`. [#41107](https://github.com/ClickHouse/ClickHouse/pull/41107) ([Kruglov Pavel](https://github.com/Avogar)). +* Now projections can be used with zero copy replication. [#41147](https://github.com/ClickHouse/ClickHouse/pull/41147) ([alesapin](https://github.com/alesapin)). +* - Add function tryDecrypt that returns NULL when decrypt fail (e.g. decrypt with incorrect key) instead of throwing exception. [#41206](https://github.com/ClickHouse/ClickHouse/pull/41206) ([Duc Canh Le](https://github.com/canhld94)). +* Add the `unreserved_space` column to the `system.disks` table to check how much space is not taken by reservations per disk. [#41254](https://github.com/ClickHouse/ClickHouse/pull/41254) ([filimonov](https://github.com/filimonov)). +* Support s3 authorisation headers from ast arguments. [#41261](https://github.com/ClickHouse/ClickHouse/pull/41261) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Add setting 'allow_implicit_no_password' that forbids creating a user with no password unless 'IDENTIFIED WITH no_password' is explicitly specified. [#41341](https://github.com/ClickHouse/ClickHouse/pull/41341) ([Nikolay Degterinsky](https://github.com/evillique)). +* keeper-improvement: add support for uploading snapshots to S3. S3 information can be defined inside `keeper_server.s3_snapshot`. [#41342](https://github.com/ClickHouse/ClickHouse/pull/41342) ([Antonio Andelic](https://github.com/antonio2368)). +* Add support for MultiRead in Keeper and internal ZooKeeper client. [#41410](https://github.com/ClickHouse/ClickHouse/pull/41410) ([Antonio Andelic](https://github.com/antonio2368)). +* add a support for decimal type comparing with floating point literal in IN operator. [#41544](https://github.com/ClickHouse/ClickHouse/pull/41544) ([liang.huang](https://github.com/lhuang09287750)). +* Allow readable size values in cache config. [#41688](https://github.com/ClickHouse/ClickHouse/pull/41688) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Check file path for path traversal attacks in errors logger for input formats. [#41694](https://github.com/ClickHouse/ClickHouse/pull/41694) ([Kruglov Pavel](https://github.com/Avogar)). +* ClickHouse could cache stale DNS entries for some period of time (15 seconds by default) until the cache won't be updated asynchronously. During these period ClickHouse can nevertheless try to establish a connection and produce errors. This behaviour is fixed. [#41707](https://github.com/ClickHouse/ClickHouse/pull/41707) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Add interactive history search with fzf-like utility (fzf/sk) for `clickhouse-client`/`clickhouse-local` (note you can use `FZF_DEFAULT_OPTS`/`SKIM_DEFAULT_OPTIONS` to additionally configure the behavior). [#41730](https://github.com/ClickHouse/ClickHouse/pull/41730) ([Azat Khuzhin](https://github.com/azat)). +* For client when connecting to a secure server with invalid certificate only allow to proceed with '--accept-certificate' flag. [#41743](https://github.com/ClickHouse/ClickHouse/pull/41743) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Add function "tryBase58Decode()", similar to the existing function "tryBase64Decode()". [#41824](https://github.com/ClickHouse/ClickHouse/pull/41824) ([Robert Schulze](https://github.com/rschu1ze)). +* Improve feedback when replacing partition with different primary key. Fixes [#34798](https://github.com/ClickHouse/ClickHouse/issues/34798). [#41838](https://github.com/ClickHouse/ClickHouse/pull/41838) ([Salvatore](https://github.com/tbsal)). +* Replace back `clickhouse su` command with `sudo -u` in start in order to respect limits in `/etc/security/limits.conf`. [#41847](https://github.com/ClickHouse/ClickHouse/pull/41847) ([Eugene Konkov](https://github.com/ekonkov)). +* Fix parallel parsing: segmentator now checks max_block_size. [#41852](https://github.com/ClickHouse/ClickHouse/pull/41852) ([Vitaly Baranov](https://github.com/vitlibar)). +* Don't report TABLE_IS_DROPPED exception in order to skip table in case is was just dropped. [#41908](https://github.com/ClickHouse/ClickHouse/pull/41908) ([AlfVII](https://github.com/AlfVII)). +* Improve option enable_extended_results_for_datetime_functions to return results of type DateTime64 for functions toStartOfDay, toStartOfHour, toStartOfFifteenMinutes, toStartOfTenMinutes, toStartOfFiveMinutes, toStartOfMinute and timeSlot. [#41910](https://github.com/ClickHouse/ClickHouse/pull/41910) ([Roman Vasin](https://github.com/rvasin)). +* Improve DateTime type inference for text formats. Now it respect setting `date_time_input_format` and doesn't try to infer datetimes from numbers as timestamps. Closes [#41389](https://github.com/ClickHouse/ClickHouse/issues/41389) Closes [#42206](https://github.com/ClickHouse/ClickHouse/issues/42206). [#41912](https://github.com/ClickHouse/ClickHouse/pull/41912) ([Kruglov Pavel](https://github.com/Avogar)). +* Remove confusing warning when inserting with `perform_ttl_move_on_insert`=false. [#41980](https://github.com/ClickHouse/ClickHouse/pull/41980) ([Vitaly Baranov](https://github.com/vitlibar)). +* Allow user to write `countState(*)` similar to `count(*)`. This closes [#9338](https://github.com/ClickHouse/ClickHouse/issues/9338). [#41983](https://github.com/ClickHouse/ClickHouse/pull/41983) ([Amos Bird](https://github.com/amosbird)). +* - Fix rankCorr size overflow. [#42020](https://github.com/ClickHouse/ClickHouse/pull/42020) ([Duc Canh Le](https://github.com/canhld94)). +* Added an option to specify an arbitrary string as an environment name in the Sentry's config for more handy reports. [#42037](https://github.com/ClickHouse/ClickHouse/pull/42037) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Added system table `asynchronous_insert_log `. It contains information about asynchronous inserts (including results of queries in fire-and-forget mode (with `wait_for_async_insert=0`)) for better introspection. [#42040](https://github.com/ClickHouse/ClickHouse/pull/42040) ([Anton Popov](https://github.com/CurtizJ)). +* Fix parsing out-of-range Date from CSV:. [#42044](https://github.com/ClickHouse/ClickHouse/pull/42044) ([Andrey Zvonov](https://github.com/zvonand)). +* parseDataTimeBestEffort support comma between date and time. Closes [#42038](https://github.com/ClickHouse/ClickHouse/issues/42038). [#42049](https://github.com/ClickHouse/ClickHouse/pull/42049) ([flynn](https://github.com/ucasfl)). +* Add support for methods lz4, bz2, snappy in 'Accept-Encoding'. [#42071](https://github.com/ClickHouse/ClickHouse/pull/42071) ([Nikolay Degterinsky](https://github.com/evillique)). +* Various minor fixes for BLAKE3 function. [#42073](https://github.com/ClickHouse/ClickHouse/pull/42073) ([BoloniniD](https://github.com/BoloniniD)). +* Improved stale replica recovery process for `ReplicatedMergeTree`. If lost replica have some parts which absent on a healthy replica, but these parts should appear in future according to replication queue of the healthy replica, then lost replica will keep such parts instead of detaching them. [#42134](https://github.com/ClickHouse/ClickHouse/pull/42134) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Support BACKUP to S3 with as-is path/data structure. [#42232](https://github.com/ClickHouse/ClickHouse/pull/42232) ([Azat Khuzhin](https://github.com/azat)). +* Add a possibility to use Date32 arguments for date_diff function. Fix issue in date_diff function when using DateTime64 arguments with start date before Unix epoch and end date after Unix epoch. [#42308](https://github.com/ClickHouse/ClickHouse/pull/42308) ([Roman Vasin](https://github.com/rvasin)). +* When uploading big parts to minio, 'Complete Multipart Upload' can take a long time. Minio sends heartbeats every 10 seconds (see https://github.com/minio/minio/pull/7198). But clickhouse times out earlier, because the default send/receive timeout is [set](https://github.com/ClickHouse/ClickHouse/blob/cc24fcd6d5dfb67f5f66f5483e986bd1010ad9cf/src/IO/S3/PocoHTTPClient.cpp#L123) to 5 seconds. [#42321](https://github.com/ClickHouse/ClickHouse/pull/42321) ([filimonov](https://github.com/filimonov)). +* Add `S3` as a new type of the destination of backups. [#42333](https://github.com/ClickHouse/ClickHouse/pull/42333) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix rarely invalid cast of aggregate state types with complex types such as Decimal. This fixes [#42408](https://github.com/ClickHouse/ClickHouse/issues/42408). [#42417](https://github.com/ClickHouse/ClickHouse/pull/42417) ([Amos Bird](https://github.com/amosbird)). +* Support skipping cache completely (both download to cache and reading cached data) in case the requested read range exceeds the threshold defined by cache setting `bypass_cache_threashold`, requires to be enabled with `enable_bypass_cache_with_threshold`). [#42418](https://github.com/ClickHouse/ClickHouse/pull/42418) ([Han Shukai](https://github.com/KinderRiven)). +* Merge parts if every part in the range is older than a certain threshold. The threshold can be set by using `min_age_to_force_merge_seconds`. This closes [#35836](https://github.com/ClickHouse/ClickHouse/issues/35836). [#42423](https://github.com/ClickHouse/ClickHouse/pull/42423) ([Antonio Andelic](https://github.com/antonio2368)). +* Enabled CompiledExpressionCache in clickhouse-local. [#42477](https://github.com/ClickHouse/ClickHouse/pull/42477) ([AlfVII](https://github.com/AlfVII)). +* Remove support for the `{database}` macro from the client's prompt. It was displayed incorrectly if the database was unspecified and it was not updated on `USE` statements. This closes [#25891](https://github.com/ClickHouse/ClickHouse/issues/25891). [#42508](https://github.com/ClickHouse/ClickHouse/pull/42508) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* - Improve the time to recover lost keeper connections. [#42541](https://github.com/ClickHouse/ClickHouse/pull/42541) ([Raúl Marín](https://github.com/Algunenano)). +* Allow to use Date32 arguments for dateName function. [#42554](https://github.com/ClickHouse/ClickHouse/pull/42554) ([Roman Vasin](https://github.com/rvasin)). + +#### Bug Fix +* Now filters with NULL literals will be used during index analysis. This closes https://github.com/ClickHouse/ClickHouse/pull/41814 [#34063](https://github.com/ClickHouse/ClickHouse/issues/34063). [#41842](https://github.com/ClickHouse/ClickHouse/pull/41842) ([Amos Bird](https://github.com/amosbird)). +* - Choose correct aggregation method for LowCardinality with BigInt. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)). +* Fix using subqueries in row policy filters. This PR fixes [#32463](https://github.com/ClickHouse/ClickHouse/issues/32463). [#42562](https://github.com/ClickHouse/ClickHouse/pull/42562) ([Vitaly Baranov](https://github.com/vitlibar)). + +#### Build/Testing/Packaging Improvement +* Added support of WHERE clause generation to AST Fuzzer and possibility to add or remove ORDER BY and WHERE clause. [#38519](https://github.com/ClickHouse/ClickHouse/pull/38519) ([Ilya Yatsishin](https://github.com/qoega)). +* Aarch64 binaries now require at least ARMv8.2, released in 2016. Most notably, this enables use of ARM LSE, i.e. native atomic operations. Also, CMake build option "NO_ARMV81_OR_HIGHER" has been added to allow compilation of binaries for older ARMv8.0 hardware, e.g. Raspberry Pi 4. [#41610](https://github.com/ClickHouse/ClickHouse/pull/41610) ([Robert Schulze](https://github.com/rschu1ze)). +* After updating runners to 22.04 cgroups stopped to work in privileged mode, here's the issue https://github.com/moby/moby/issues/42275#issuecomment-1115055846. [#41857](https://github.com/ClickHouse/ClickHouse/pull/41857) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Allow building ClickHouse with Musl (small changes after it was already supported but broken). [#41987](https://github.com/ClickHouse/ClickHouse/pull/41987) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* - Add the `$CLICKHOUSE_CRONFILE` file checking to avoid running the `sed` command to get the file not found error. [#42081](https://github.com/ClickHouse/ClickHouse/pull/42081) ([Chun-Sheng, Li](https://github.com/peter279k)). +* Update cctz to the latest master, update tzdb to 2020e. [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)). +* Update tzdata to 2022e to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix power8 support. [#42462](https://github.com/ClickHouse/ClickHouse/pull/42462) ([Boris Kuschel](https://github.com/bkuschel)). + +#### Bug Fix (user-visible misbehavior in official stable or prestable release) + +* Several fixes for DiskWeb. [#41652](https://github.com/ClickHouse/ClickHouse/pull/41652) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fixes issue when docker run will fail if "https_port" is not present in config. [#41693](https://github.com/ClickHouse/ClickHouse/pull/41693) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Mutations were not cancelled properly on server shutdown or `SYSTEM STOP MERGES` query and cancellation might take long time, it's fixed. [#41699](https://github.com/ClickHouse/ClickHouse/pull/41699) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix wrong result of queries with `ORDER BY` or `GROUP BY` by columns from prefix of sorting key, wrapped into monotonic functions, with enable "read in order" optimization (settings `optimize_read_in_order` and `optimize_aggregation_in_order`). [#41701](https://github.com/ClickHouse/ClickHouse/pull/41701) ([Anton Popov](https://github.com/CurtizJ)). +* Fix possible crash in `SELECT` from `Merge` table with enabled `optimize_monotonous_functions_in_order_by` setting. Fixes [#41269](https://github.com/ClickHouse/ClickHouse/issues/41269). [#41740](https://github.com/ClickHouse/ClickHouse/pull/41740) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fixed "Part ... intersects part ..." error that might happen in extremely rare cases if replica was restarted just after detaching some part as broken. [#41741](https://github.com/ClickHouse/ClickHouse/pull/41741) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Don't allow to create or alter merge tree tables with virtual column name _row_exists, which is reserved for lightweight delete. Fixed [#41716](https://github.com/ClickHouse/ClickHouse/issues/41716). [#41763](https://github.com/ClickHouse/ClickHouse/pull/41763) ([Jianmei Zhang](https://github.com/zhangjmruc)). +* Fix a bug that CORS headers are missing in some HTTP responses. [#41792](https://github.com/ClickHouse/ClickHouse/pull/41792) ([Frank Chen](https://github.com/FrankChen021)). +* 22.9 might fail to startup `ReplicatedMergeTree` table if that table was created by 20.3 or older version and was never altered, it's fixed. Fixes [#41742](https://github.com/ClickHouse/ClickHouse/issues/41742). [#41796](https://github.com/ClickHouse/ClickHouse/pull/41796) ([Alexander Tokmakov](https://github.com/tavplubix)). +* When the batch sending fails for some reason, it cannot be automatically recovered, and if it is not processed in time, it will lead to accumulation, and the printed error message will become longer and longer, which will cause the http thread to block. [#41813](https://github.com/ClickHouse/ClickHouse/pull/41813) ([zhongyuankai](https://github.com/zhongyuankai)). +* Fix compact parts with compressed marks setting. Fixes [#41783](https://github.com/ClickHouse/ClickHouse/issues/41783) and [#41746](https://github.com/ClickHouse/ClickHouse/issues/41746). [#41823](https://github.com/ClickHouse/ClickHouse/pull/41823) ([alesapin](https://github.com/alesapin)). +* Old versions of Replicated database doesn't have a special marker in [Zoo]Keeper. We need to check only whether the node contains come obscure data instead of special mark. [#41875](https://github.com/ClickHouse/ClickHouse/pull/41875) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fix possible exception in fs cache. [#41884](https://github.com/ClickHouse/ClickHouse/pull/41884) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix use_environment_credentials for s3 table function. [#41970](https://github.com/ClickHouse/ClickHouse/pull/41970) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fixed "Directory already exists and is not empty" error on detaching broken part that might prevent `ReplicatedMergeTree` table from starting replication. Fixes [#40957](https://github.com/ClickHouse/ClickHouse/issues/40957). [#41981](https://github.com/ClickHouse/ClickHouse/pull/41981) ([Alexander Tokmakov](https://github.com/tavplubix)). +* toDateTime64() now returns the same output with negative integer and float arguments. [#42025](https://github.com/ClickHouse/ClickHouse/pull/42025) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix write into AzureBlobStorage. Partially closes [#41754](https://github.com/ClickHouse/ClickHouse/issues/41754). [#42034](https://github.com/ClickHouse/ClickHouse/pull/42034) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix the bzip2 decoding issue for specific bzip2 files. [#42046](https://github.com/ClickHouse/ClickHouse/pull/42046) ([Nikolay Degterinsky](https://github.com/evillique)). +* - Fix SQL function "toLastDayOfMonth()" with setting "enable_extended_results_for_datetime_functions = 1" at the beginning of the extended range (January 1900). - Fix SQL function "toRelativeWeekNum()" with setting "enable_extended_results_for_datetime_functions = 1" at the end of extended range (December 2299). - Improve the performance of for SQL functions "toISOYear()", "toFirstDayNumOfISOYearIndex()" and "toYearWeekOfNewyearMode()" by avoiding unnecessary index arithmetics. [#42084](https://github.com/ClickHouse/ClickHouse/pull/42084) ([Roman Vasin](https://github.com/rvasin)). +* The maximum size of fetches for each table accidentally was set to 8 while the pool size could be bigger. Now the maximum size of fetches for table is equal to the pool size. [#42090](https://github.com/ClickHouse/ClickHouse/pull/42090) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* A table might be shut down and a dictionary might be detached before checking if can be dropped without breaking dependencies between table, it's fixed. Fixes [#41982](https://github.com/ClickHouse/ClickHouse/issues/41982). [#42106](https://github.com/ClickHouse/ClickHouse/pull/42106) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix bad inefficiency of `remote_filesystem_read_method=read` with filesystem cache. Closes [#42125](https://github.com/ClickHouse/ClickHouse/issues/42125). [#42129](https://github.com/ClickHouse/ClickHouse/pull/42129) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix possible timeout exception for distributed queries with use_hedged_requests=0. [#42130](https://github.com/ClickHouse/ClickHouse/pull/42130) ([Azat Khuzhin](https://github.com/azat)). +* Fixed a minor bug inside function `runningDifference` in case of using it with `Date32` type. Previously `Date` was used and it may cause some logical errors like `Bad cast from type DB::ColumnVector to DB::ColumnVector'`. [#42143](https://github.com/ClickHouse/ClickHouse/pull/42143) ([Alfred Xu](https://github.com/sperlingxx)). +* Fix reusing of files > 4GB from base backup. [#42146](https://github.com/ClickHouse/ClickHouse/pull/42146) ([Azat Khuzhin](https://github.com/azat)). +* DISTINCT in order fails with LOGICAL_ERROR if first column in sorting key contains function. [#42186](https://github.com/ClickHouse/ClickHouse/pull/42186) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* - Fix read from buffer with read in order desc. [#42236](https://github.com/ClickHouse/ClickHouse/pull/42236) ([Duc Canh Le](https://github.com/canhld94)). +* Fix a bug which prevents ClickHouse to start when background_pool_size setting is set on default profile but background_merges_mutations_concurrency_ratio is not. [#42315](https://github.com/ClickHouse/ClickHouse/pull/42315) ([nvartolomei](https://github.com/nvartolomei)). +* `ALTER UPDATE` of attached part (with columns different from table schema) could create an invalid `columns.txt` metadata on disk. Reading from such part could fail with errors or return invalid data. Fixes [#42161](https://github.com/ClickHouse/ClickHouse/issues/42161). [#42319](https://github.com/ClickHouse/ClickHouse/pull/42319) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Setting `additional_table_filters` were not applied to `Distributed` storage. Fixes [#41692](https://github.com/ClickHouse/ClickHouse/issues/41692). [#42322](https://github.com/ClickHouse/ClickHouse/pull/42322) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix a data race in query finish/cancel. This closes [#42346](https://github.com/ClickHouse/ClickHouse/issues/42346). [#42362](https://github.com/ClickHouse/ClickHouse/pull/42362) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* This reverts [#40217](https://github.com/ClickHouse/ClickHouse/issues/40217) which introduced a regression in date/time functions. [#42367](https://github.com/ClickHouse/ClickHouse/pull/42367) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix assert cast in join on falsy condition, Close [#42380](https://github.com/ClickHouse/ClickHouse/issues/42380). [#42407](https://github.com/ClickHouse/ClickHouse/pull/42407) ([Vladimir C](https://github.com/vdimir)). +* Fix buffer overflow in the processing of Decimal data types. This closes [#42451](https://github.com/ClickHouse/ClickHouse/issues/42451). [#42465](https://github.com/ClickHouse/ClickHouse/pull/42465) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* `AggregateFunctionQuantile` now correctly works with UInt128 columns. Previously, the quantile state interpreted `UInt128` columns as `Int128` which could have led to incorrect results. [#42473](https://github.com/ClickHouse/ClickHouse/pull/42473) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix bad_assert during INSERT into Annoy indexes over non-Float32 columns. [#42485](https://github.com/ClickHouse/ClickHouse/pull/42485) ([Robert Schulze](https://github.com/rschu1ze)). +* This closes [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix function `arrayElement` with type `Map` with `Nullable` values and `Nullable` index. [#42623](https://github.com/ClickHouse/ClickHouse/pull/42623) ([Anton Popov](https://github.com/CurtizJ)). + +#### Bug Fix (user-visible misbehaviour in official stable or prestable release) + +* Fix unexpected table loading error when partition key contains alias function names during server upgrade. [#36379](https://github.com/ClickHouse/ClickHouse/pull/36379) ([Amos Bird](https://github.com/amosbird)). + +#### Build Improvement + +* Fixed SipHash Endian issue for s390x platform. [#41372](https://github.com/ClickHouse/ClickHouse/pull/41372) ([Harry Lee](https://github.com/HarryLeeIBM)). +* Enable lib base64 for ppc64le platform. [#41974](https://github.com/ClickHouse/ClickHouse/pull/41974) ([Suzy Wang](https://github.com/SuzyWangIBMer)). +* Fixed Endian issue in T64 compression codec on s390x. [#42314](https://github.com/ClickHouse/ClickHouse/pull/42314) ([Harry Lee](https://github.com/HarryLeeIBM)). + +#### NO CL ENTRY + +* NO CL ENTRY: 'Revert "Disable parallel s3 multipart upload for part moves."'. [#41681](https://github.com/ClickHouse/ClickHouse/pull/41681) ([Alexander Tokmakov](https://github.com/tavplubix)). +* NO CL ENTRY: 'Revert "Attempt to fix abort from parallel parsing"'. [#42545](https://github.com/ClickHouse/ClickHouse/pull/42545) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* NO CL ENTRY: 'Revert "Low cardinality cases moved to the function for its corresponding type"'. [#42633](https://github.com/ClickHouse/ClickHouse/pull/42633) ([Anton Popov](https://github.com/CurtizJ)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Test for ignore function in PARTITION KEY [#39875](https://github.com/ClickHouse/ClickHouse/pull/39875) ([UnamedRus](https://github.com/UnamedRus)). +* Add fuzzer for table definitions [#40096](https://github.com/ClickHouse/ClickHouse/pull/40096) ([Anton Popov](https://github.com/CurtizJ)). +* Add missing tests for legacy geobase [#40684](https://github.com/ClickHouse/ClickHouse/pull/40684) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove obsolete comment from the config.xml [#41518](https://github.com/ClickHouse/ClickHouse/pull/41518) ([filimonov](https://github.com/filimonov)). +* Resurrect parallel distributed insert select with s3Cluster [#41535](https://github.com/ClickHouse/ClickHouse/pull/41535) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Update runners to a recent version to install on 22.04 [#41556](https://github.com/ClickHouse/ClickHouse/pull/41556) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Refactor wiping sensitive information from logs. [#41562](https://github.com/ClickHouse/ClickHouse/pull/41562) ([Vitaly Baranov](https://github.com/vitlibar)). +* Better S3 logs [#41587](https://github.com/ClickHouse/ClickHouse/pull/41587) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix typos in JSON formats after [#40910](https://github.com/ClickHouse/ClickHouse/issues/40910) [#41614](https://github.com/ClickHouse/ClickHouse/pull/41614) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix drop for KeeperMap [#41616](https://github.com/ClickHouse/ClickHouse/pull/41616) ([Antonio Andelic](https://github.com/antonio2368)). +* increase default max_suspicious_broken_parts to 100 [#41619](https://github.com/ClickHouse/ClickHouse/pull/41619) ([Denny Crane](https://github.com/den-crane)). +* Release AWS SDK log level + replace one exception [#41649](https://github.com/ClickHouse/ClickHouse/pull/41649) ([alesapin](https://github.com/alesapin)). +* Fix a destruction order for views ThreadStatus [#41650](https://github.com/ClickHouse/ClickHouse/pull/41650) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Add very explicit logging on disk choice for fetch [#41653](https://github.com/ClickHouse/ClickHouse/pull/41653) ([alesapin](https://github.com/alesapin)). +* Fix race between ~BackgroundSchedulePool and ~DNSCacheUpdater [#41654](https://github.com/ClickHouse/ClickHouse/pull/41654) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Add changelog for 22.9 [#41668](https://github.com/ClickHouse/ClickHouse/pull/41668) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Update version after release [#41670](https://github.com/ClickHouse/ClickHouse/pull/41670) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix error message [#41680](https://github.com/ClickHouse/ClickHouse/pull/41680) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Add test for setting output_format_json_validate_utf8 [#41691](https://github.com/ClickHouse/ClickHouse/pull/41691) ([Kruglov Pavel](https://github.com/Avogar)). +* Resolve findings from clang-tidy [#41702](https://github.com/ClickHouse/ClickHouse/pull/41702) ([ltrk2](https://github.com/ltrk2)). +* Ignore Keeper errors from ReplicatedMergeTreeAttachThread in stress tests [#41717](https://github.com/ClickHouse/ClickHouse/pull/41717) ([Antonio Andelic](https://github.com/antonio2368)). +* Collect logs in Stress test using clickhouse-local [#41721](https://github.com/ClickHouse/ClickHouse/pull/41721) ([Antonio Andelic](https://github.com/antonio2368)). +* Disable flaky `test_merge_tree_azure_blob_storage` [#41722](https://github.com/ClickHouse/ClickHouse/pull/41722) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Update version_date.tsv and changelogs after v22.9.2.7-stable [#41724](https://github.com/ClickHouse/ClickHouse/pull/41724) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Fix part removal retries [#41728](https://github.com/ClickHouse/ClickHouse/pull/41728) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Try fix azure tests [#41731](https://github.com/ClickHouse/ClickHouse/pull/41731) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix test build [#41732](https://github.com/ClickHouse/ClickHouse/pull/41732) ([Robert Schulze](https://github.com/rschu1ze)). +* Change logging levels in cache [#41733](https://github.com/ClickHouse/ClickHouse/pull/41733) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Revert of "Revert the revert of "ColumnVector: optimize filter with AVX512 VBMI2 compress store" [#40033](https://github.com/ClickHouse/ClickHouse/issues/40033)" [#41752](https://github.com/ClickHouse/ClickHouse/pull/41752) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix SET query parameters formatting [#41755](https://github.com/ClickHouse/ClickHouse/pull/41755) ([Nikolay Degterinsky](https://github.com/evillique)). +* Support to run testcases on macOS [#41760](https://github.com/ClickHouse/ClickHouse/pull/41760) ([Frank Chen](https://github.com/FrankChen021)). +* Bump LLVM from 12 to 13 [#41762](https://github.com/ClickHouse/ClickHouse/pull/41762) ([Robert Schulze](https://github.com/rschu1ze)). +* ColumnVector: re-enable AVX512_VBMI/AVX512_VBMI2 optimized filter and index [#41765](https://github.com/ClickHouse/ClickHouse/pull/41765) ([Guo Wangyang](https://github.com/guowangy)). +* Update 02354_annoy.sql [#41767](https://github.com/ClickHouse/ClickHouse/pull/41767) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix the typo preventing building latest images [#41769](https://github.com/ClickHouse/ClickHouse/pull/41769) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Make automatic download script choose between ARMv8.0 or ARMv8.2 builds [#41775](https://github.com/ClickHouse/ClickHouse/pull/41775) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix tests for docker-ci [#41777](https://github.com/ClickHouse/ClickHouse/pull/41777) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Possible fix for KeeperMap drop [#41784](https://github.com/ClickHouse/ClickHouse/pull/41784) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix drop of completely dropped table [#41789](https://github.com/ClickHouse/ClickHouse/pull/41789) ([alesapin](https://github.com/alesapin)). +* Log git hash during startup [#41790](https://github.com/ClickHouse/ClickHouse/pull/41790) ([Robert Schulze](https://github.com/rschu1ze)). +* Revert "ColumnVector: optimize UInt8 index with AVX512VBMI ([#41247](https://github.com/ClickHouse/ClickHouse/issues/41247))" [#41797](https://github.com/ClickHouse/ClickHouse/pull/41797) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Small fix in dashboard [#41798](https://github.com/ClickHouse/ClickHouse/pull/41798) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Keep the most important log in stress tests [#41821](https://github.com/ClickHouse/ClickHouse/pull/41821) ([alesapin](https://github.com/alesapin)). +* Use copy for some operations instead of hardlinks [#41832](https://github.com/ClickHouse/ClickHouse/pull/41832) ([alesapin](https://github.com/alesapin)). +* Remove unused variable in registerStorageMergeTree.cpp [#41839](https://github.com/ClickHouse/ClickHouse/pull/41839) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix Jepsen [#41845](https://github.com/ClickHouse/ClickHouse/pull/41845) ([Antonio Andelic](https://github.com/antonio2368)). +* Increase `request_timeout_ms` for s3 tests in CI [#41853](https://github.com/ClickHouse/ClickHouse/pull/41853) ([Kseniia Sumarokova](https://github.com/kssenii)). +* tests: fix debug symbols (and possible crashes) for backward compatiblity check [#41854](https://github.com/ClickHouse/ClickHouse/pull/41854) ([Azat Khuzhin](https://github.com/azat)). +* Remove two redundant lines [#41856](https://github.com/ClickHouse/ClickHouse/pull/41856) ([alesapin](https://github.com/alesapin)). +* Infer Object type only when allow_experimental_object_type is enabled [#41858](https://github.com/ClickHouse/ClickHouse/pull/41858) ([Kruglov Pavel](https://github.com/Avogar)). +* Add default UNION/EXCEPT/INTERSECT to the echo query text [#41862](https://github.com/ClickHouse/ClickHouse/pull/41862) ([Nikolay Degterinsky](https://github.com/evillique)). +* Consolidate CMake-generated config headers [#41873](https://github.com/ClickHouse/ClickHouse/pull/41873) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix 02267_file_globs_schema_inference.sql flakiness [#41877](https://github.com/ClickHouse/ClickHouse/pull/41877) ([Kruglov Pavel](https://github.com/Avogar)). +* Docs: Remove obsolete modelEvaluate() mention [#41878](https://github.com/ClickHouse/ClickHouse/pull/41878) ([Robert Schulze](https://github.com/rschu1ze)). +* Better exception message for duplicate column names in schema inference [#41885](https://github.com/ClickHouse/ClickHouse/pull/41885) ([Kruglov Pavel](https://github.com/Avogar)). +* Docs: Reference external papers as DOIs [#41886](https://github.com/ClickHouse/ClickHouse/pull/41886) ([Robert Schulze](https://github.com/rschu1ze)). +* Make LDAPR a prerequisite for downloading the ARMv8.2 build [#41897](https://github.com/ClickHouse/ClickHouse/pull/41897) ([Robert Schulze](https://github.com/rschu1ze)). +* Another sync replicas in test_recovery_replica [#41898](https://github.com/ClickHouse/ClickHouse/pull/41898) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* remove unused code [#41921](https://github.com/ClickHouse/ClickHouse/pull/41921) ([flynn](https://github.com/ucasfl)). +* Move all queries for MV creation to the end of queue during recovering [#41932](https://github.com/ClickHouse/ClickHouse/pull/41932) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fix broken test_disks_app_func [#41933](https://github.com/ClickHouse/ClickHouse/pull/41933) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Temporarily disable ThreadFuzzer with TSan [#41943](https://github.com/ClickHouse/ClickHouse/pull/41943) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Enable some disabled S3 tests [#41945](https://github.com/ClickHouse/ClickHouse/pull/41945) ([alesapin](https://github.com/alesapin)). +* QOL log improvements [#41947](https://github.com/ClickHouse/ClickHouse/pull/41947) ([Raúl Marín](https://github.com/Algunenano)). +* Fix non-deterministic test results [#41948](https://github.com/ClickHouse/ClickHouse/pull/41948) ([Robert Schulze](https://github.com/rschu1ze)). +* Earlier throw exception in PullingAsyncPipelineExecutor. [#41949](https://github.com/ClickHouse/ClickHouse/pull/41949) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix linker error [#41950](https://github.com/ClickHouse/ClickHouse/pull/41950) ([ltrk2](https://github.com/ltrk2)). +* Bump LLVM from 13 to 14 [#41951](https://github.com/ClickHouse/ClickHouse/pull/41951) ([Robert Schulze](https://github.com/rschu1ze)). +* Update version_date.tsv and changelogs after v22.3.13.80-lts [#41953](https://github.com/ClickHouse/ClickHouse/pull/41953) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Update version_date.tsv and changelogs after v22.7.6.74-stable [#41954](https://github.com/ClickHouse/ClickHouse/pull/41954) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Update version_date.tsv and changelogs after v22.8.6.71-lts [#41955](https://github.com/ClickHouse/ClickHouse/pull/41955) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Update version_date.tsv and changelogs after v22.9.3.18-stable [#41956](https://github.com/ClickHouse/ClickHouse/pull/41956) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Add a warning message to release.py script, require release type [#41975](https://github.com/ClickHouse/ClickHouse/pull/41975) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Rename max_temp_data_on_disk -> max_temporary_data_on_disk [#41984](https://github.com/ClickHouse/ClickHouse/pull/41984) ([Vladimir C](https://github.com/vdimir)). +* Add more checkStackSize calls [#41991](https://github.com/ClickHouse/ClickHouse/pull/41991) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix test 02403_big_http_chunk_size [#41996](https://github.com/ClickHouse/ClickHouse/pull/41996) ([Vitaly Baranov](https://github.com/vitlibar)). +* More sane behavior of part number thresholds override in query level settings [#42001](https://github.com/ClickHouse/ClickHouse/pull/42001) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove useless code [#42004](https://github.com/ClickHouse/ClickHouse/pull/42004) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Refactoring: Uninline some error handling methods [#42010](https://github.com/ClickHouse/ClickHouse/pull/42010) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix warning that ENABLE_REPLXX is unused [#42013](https://github.com/ClickHouse/ClickHouse/pull/42013) ([Robert Schulze](https://github.com/rschu1ze)). +* Drop leftovers of libexecinfo [#42014](https://github.com/ClickHouse/ClickHouse/pull/42014) ([Robert Schulze](https://github.com/rschu1ze)). +* More detailed exception message [#42022](https://github.com/ClickHouse/ClickHouse/pull/42022) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Build against an LLVM version which has clang[-extra-tools], lldb and lld removed [#42023](https://github.com/ClickHouse/ClickHouse/pull/42023) ([Robert Schulze](https://github.com/rschu1ze)). +* Add log message and lower the retry timeout in MergeTreeRestartingThread [#42026](https://github.com/ClickHouse/ClickHouse/pull/42026) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Update amqp-cpp [#42031](https://github.com/ClickHouse/ClickHouse/pull/42031) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix No such key during table drop [#42036](https://github.com/ClickHouse/ClickHouse/pull/42036) ([alesapin](https://github.com/alesapin)). +* Temporarily disable too aggressive tests [#42050](https://github.com/ClickHouse/ClickHouse/pull/42050) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix style check [#42055](https://github.com/ClickHouse/ClickHouse/pull/42055) ([Anton Popov](https://github.com/CurtizJ)). +* Function name normalization fix functions header [#42063](https://github.com/ClickHouse/ClickHouse/pull/42063) ([Maksim Kita](https://github.com/kitaisreal)). +* remove unused virtual keyword [#42065](https://github.com/ClickHouse/ClickHouse/pull/42065) ([flynn](https://github.com/ucasfl)). +* Fix crash in `SummingMergeTree` with `LowCardinality` [#42066](https://github.com/ClickHouse/ClickHouse/pull/42066) ([Anton Popov](https://github.com/CurtizJ)). +* Fix drop of completely dropped table [#42067](https://github.com/ClickHouse/ClickHouse/pull/42067) ([alesapin](https://github.com/alesapin)). +* Fix assertion in bloom filter index [#42072](https://github.com/ClickHouse/ClickHouse/pull/42072) ([Anton Popov](https://github.com/CurtizJ)). +* Ignore core.autocrlf for tests references [#42076](https://github.com/ClickHouse/ClickHouse/pull/42076) ([Azat Khuzhin](https://github.com/azat)). +* Fix progress for INSERT SELECT [#42078](https://github.com/ClickHouse/ClickHouse/pull/42078) ([Azat Khuzhin](https://github.com/azat)). +* Avoid adding extra new line after using fuzzy history search [#42080](https://github.com/ClickHouse/ClickHouse/pull/42080) ([Azat Khuzhin](https://github.com/azat)). +* Add `at` to runner AMI, bump gh runner version [#42082](https://github.com/ClickHouse/ClickHouse/pull/42082) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Use send_metadata instead of send_object_metadata [#42085](https://github.com/ClickHouse/ClickHouse/pull/42085) ([Elena Torró](https://github.com/elenatorro)). +* Docs: Preparations to remove misc statements page [#42086](https://github.com/ClickHouse/ClickHouse/pull/42086) ([Robert Schulze](https://github.com/rschu1ze)). +* Followup for TemporaryDataOnDisk [#42103](https://github.com/ClickHouse/ClickHouse/pull/42103) ([Vladimir C](https://github.com/vdimir)). +* Disable 02122_join_group_by_timeout for debug [#42104](https://github.com/ClickHouse/ClickHouse/pull/42104) ([Vladimir C](https://github.com/vdimir)). +* Update version_date.tsv and changelogs after v22.6.9.11-stable [#42114](https://github.com/ClickHouse/ClickHouse/pull/42114) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* JIT compilation migration to LLVM 15 [#42123](https://github.com/ClickHouse/ClickHouse/pull/42123) ([Maksim Kita](https://github.com/kitaisreal)). +* Fix build without TSA [#42128](https://github.com/ClickHouse/ClickHouse/pull/42128) ([Raúl Marín](https://github.com/Algunenano)). +* Update codespell-ignore-words.list [#42132](https://github.com/ClickHouse/ClickHouse/pull/42132) ([Dan Roscigno](https://github.com/DanRoscigno)). +* Add null pointer checks [#42135](https://github.com/ClickHouse/ClickHouse/pull/42135) ([ltrk2](https://github.com/ltrk2)). +* Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787) [#42136](https://github.com/ClickHouse/ClickHouse/pull/42136) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Follow up for [#42129](https://github.com/ClickHouse/ClickHouse/issues/42129) [#42144](https://github.com/ClickHouse/ClickHouse/pull/42144) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix checking parent for old-format parts [#42147](https://github.com/ClickHouse/ClickHouse/pull/42147) ([alesapin](https://github.com/alesapin)). +* Revert "Resurrect parallel distributed insert select with s3Cluster [#42150](https://github.com/ClickHouse/ClickHouse/pull/42150) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Docs: Add "TABLE" to CHECK/DESCRIBE statements in sidebar [#42152](https://github.com/ClickHouse/ClickHouse/pull/42152) ([Robert Schulze](https://github.com/rschu1ze)). +* Add logging during merge tree startup [#42163](https://github.com/ClickHouse/ClickHouse/pull/42163) ([alesapin](https://github.com/alesapin)). +* Abort instead of `__builtin_unreachable` in debug builds [#42168](https://github.com/ClickHouse/ClickHouse/pull/42168) ([Alexander Tokmakov](https://github.com/tavplubix)). +* [RFC] Enable -Wshorten-64-to-32 [#42190](https://github.com/ClickHouse/ClickHouse/pull/42190) ([Azat Khuzhin](https://github.com/azat)). +* Fix dialect setting description [#42196](https://github.com/ClickHouse/ClickHouse/pull/42196) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Add a test for #658 [#42197](https://github.com/ClickHouse/ClickHouse/pull/42197) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* use alias for MergeMutateSelectedEntry share ptr [#42211](https://github.com/ClickHouse/ClickHouse/pull/42211) ([Tian Xinhui](https://github.com/xinhuitian)). +* Fix LLVM build [#42216](https://github.com/ClickHouse/ClickHouse/pull/42216) ([Raúl Marín](https://github.com/Algunenano)). +* Exclude comments from style-check defined extern [#42217](https://github.com/ClickHouse/ClickHouse/pull/42217) ([Vladimir C](https://github.com/vdimir)). +* Update safeExit.cpp [#42220](https://github.com/ClickHouse/ClickHouse/pull/42220) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Disable concurrent parts removal [#42222](https://github.com/ClickHouse/ClickHouse/pull/42222) ([alesapin](https://github.com/alesapin)). +* Fail fast on empty URL in HDFS [#42223](https://github.com/ClickHouse/ClickHouse/pull/42223) ([Ilya Yatsishin](https://github.com/qoega)). +* Add a test for [#2389](https://github.com/ClickHouse/ClickHouse/issues/2389) [#42235](https://github.com/ClickHouse/ClickHouse/pull/42235) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Use MultiRead where possible [#42243](https://github.com/ClickHouse/ClickHouse/pull/42243) ([Antonio Andelic](https://github.com/antonio2368)). +* Minor cleanups of LLVM integration [#42249](https://github.com/ClickHouse/ClickHouse/pull/42249) ([Robert Schulze](https://github.com/rschu1ze)). +* remove useless code [#42253](https://github.com/ClickHouse/ClickHouse/pull/42253) ([flynn](https://github.com/ucasfl)). +* Early return of corner cases in selectPartsToMutate function [#42254](https://github.com/ClickHouse/ClickHouse/pull/42254) ([Tian Xinhui](https://github.com/xinhuitian)). +* Refactor the implementation of user-defined functions [#42263](https://github.com/ClickHouse/ClickHouse/pull/42263) ([Vitaly Baranov](https://github.com/vitlibar)). +* assert unused value in test_replicated_merge_tree_compatibility [#42266](https://github.com/ClickHouse/ClickHouse/pull/42266) ([nvartolomei](https://github.com/nvartolomei)). +* Fix Date Interval add/minus over DataTypeDate32 [#42279](https://github.com/ClickHouse/ClickHouse/pull/42279) ([Alfred Xu](https://github.com/sperlingxx)). +* Fix log-level in `clickhouse-disks` [#42302](https://github.com/ClickHouse/ClickHouse/pull/42302) ([Nikolay Degterinsky](https://github.com/evillique)). +* Remove forgotten debug logging [#42313](https://github.com/ClickHouse/ClickHouse/pull/42313) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix another trash in zero-copy replication [#42317](https://github.com/ClickHouse/ClickHouse/pull/42317) ([alesapin](https://github.com/alesapin)). +* go update for diagnostics tool [#42325](https://github.com/ClickHouse/ClickHouse/pull/42325) ([Dale McDiarmid](https://github.com/gingerwizard)). +* Better logging for asynchronous inserts [#42345](https://github.com/ClickHouse/ClickHouse/pull/42345) ([Anton Popov](https://github.com/CurtizJ)). +* Use nfpm packager for archlinux packages [#42349](https://github.com/ClickHouse/ClickHouse/pull/42349) ([Azat Khuzhin](https://github.com/azat)). +* Bump llvm/clang to 15.0.2 [#42351](https://github.com/ClickHouse/ClickHouse/pull/42351) ([Azat Khuzhin](https://github.com/azat)). +* Make getResource() independent from the order of the sections [#42353](https://github.com/ClickHouse/ClickHouse/pull/42353) ([Azat Khuzhin](https://github.com/azat)). +* Smaller threshold for multipart upload part size increase [#42392](https://github.com/ClickHouse/ClickHouse/pull/42392) ([alesapin](https://github.com/alesapin)). +* Better error message for unsupported delimiters in custom formats [#42406](https://github.com/ClickHouse/ClickHouse/pull/42406) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix formatting of `ALTER FREEZE` [#42409](https://github.com/ClickHouse/ClickHouse/pull/42409) ([Anton Popov](https://github.com/CurtizJ)). +* Replace table name in ast fuzzer more often [#42413](https://github.com/ClickHouse/ClickHouse/pull/42413) ([Anton Popov](https://github.com/CurtizJ)). +* Add *-15 tools to cmake.tools for GCC build [#42430](https://github.com/ClickHouse/ClickHouse/pull/42430) ([Ilya Yatsishin](https://github.com/qoega)). +* Deactivate tasks in ReplicatedMergeTree until startup [#42441](https://github.com/ClickHouse/ClickHouse/pull/42441) ([alesapin](https://github.com/alesapin)). +* Revert "Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787)" [#42442](https://github.com/ClickHouse/ClickHouse/pull/42442) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Update woboq_codebrowser location [#42448](https://github.com/ClickHouse/ClickHouse/pull/42448) ([Azat Khuzhin](https://github.com/azat)). +* add mdx and jsx to list of doc files [#42454](https://github.com/ClickHouse/ClickHouse/pull/42454) ([Dan Roscigno](https://github.com/DanRoscigno)). +* Remove code browser docs [#42455](https://github.com/ClickHouse/ClickHouse/pull/42455) ([Dan Roscigno](https://github.com/DanRoscigno)). +* Better workaround for emitting .debug_aranges section [#42457](https://github.com/ClickHouse/ClickHouse/pull/42457) ([Azat Khuzhin](https://github.com/azat)). +* Fix flaky test [#42459](https://github.com/ClickHouse/ClickHouse/pull/42459) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix UBSan report in Julian Day functions [#42464](https://github.com/ClickHouse/ClickHouse/pull/42464) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* rename filesystem_query_cache [#42472](https://github.com/ClickHouse/ClickHouse/pull/42472) ([Han Shukai](https://github.com/KinderRiven)). +* Add convenience typedefs for Date/Date32/DateTime/DateTime64 columns [#42476](https://github.com/ClickHouse/ClickHouse/pull/42476) ([Robert Schulze](https://github.com/rschu1ze)). +* Add error "Destination table is myself" to exception list in BC check [#42479](https://github.com/ClickHouse/ClickHouse/pull/42479) ([Kruglov Pavel](https://github.com/Avogar)). +* Get current clickhouse version without sending query in BC check [#42483](https://github.com/ClickHouse/ClickHouse/pull/42483) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix logical error from welchTTest [#42487](https://github.com/ClickHouse/ClickHouse/pull/42487) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Attempt to fix abort from parallel parsing [#42496](https://github.com/ClickHouse/ClickHouse/pull/42496) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Increase threshold for using physical cores for `max_threads` [#42503](https://github.com/ClickHouse/ClickHouse/pull/42503) ([Nikita Taranov](https://github.com/nickitat)). +* Add a test for [#16827](https://github.com/ClickHouse/ClickHouse/issues/16827) [#42511](https://github.com/ClickHouse/ClickHouse/pull/42511) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add a test for [#13653](https://github.com/ClickHouse/ClickHouse/issues/13653) [#42512](https://github.com/ClickHouse/ClickHouse/pull/42512) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix aliases [#42514](https://github.com/ClickHouse/ClickHouse/pull/42514) ([Nikolay Degterinsky](https://github.com/evillique)). +* tests: fix 00705_drop_create_merge_tree flakiness [#42522](https://github.com/ClickHouse/ClickHouse/pull/42522) ([Azat Khuzhin](https://github.com/azat)). +* Fix sanitizer reports in integration tests [#42529](https://github.com/ClickHouse/ClickHouse/pull/42529) ([Azat Khuzhin](https://github.com/azat)). +* Fix `KeeperTCPHandler` data race [#42532](https://github.com/ClickHouse/ClickHouse/pull/42532) ([Antonio Andelic](https://github.com/antonio2368)). +* Disable `test_storage_nats`, because it's permanently broken [#42535](https://github.com/ClickHouse/ClickHouse/pull/42535) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Better logs in clickhouse-disks [#42549](https://github.com/ClickHouse/ClickHouse/pull/42549) ([Nikolay Degterinsky](https://github.com/evillique)). +* add lib_fuzzer and lib_fuzzer_no_main to llvm-project build [#42550](https://github.com/ClickHouse/ClickHouse/pull/42550) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Some polishing: replicated merge tree [#42560](https://github.com/ClickHouse/ClickHouse/pull/42560) ([Igor Nikonov](https://github.com/devcrafter)). +* Temporarily disable flaky `test_replicated_merge_tree_hdfs_zero_copy` [#42563](https://github.com/ClickHouse/ClickHouse/pull/42563) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Adapt internal data structures to 512-bit era [#42564](https://github.com/ClickHouse/ClickHouse/pull/42564) ([Nikita Taranov](https://github.com/nickitat)). +* Fix strange code in date monotonicity [#42574](https://github.com/ClickHouse/ClickHouse/pull/42574) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Clear thread::id when ThreadFromGlobalPool exits. [#42577](https://github.com/ClickHouse/ClickHouse/pull/42577) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* ci/stress: fix memory limits overrides for hung check [#42585](https://github.com/ClickHouse/ClickHouse/pull/42585) ([Azat Khuzhin](https://github.com/azat)). +* tests: avoid model overlap for obfuscator [#42586](https://github.com/ClickHouse/ClickHouse/pull/42586) ([Azat Khuzhin](https://github.com/azat)). +* Fix possible segfault in expression parser [#42598](https://github.com/ClickHouse/ClickHouse/pull/42598) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix incorrect trace log line on dict reload [#42609](https://github.com/ClickHouse/ClickHouse/pull/42609) ([filimonov](https://github.com/filimonov)). +* Fix flaky 02458_datediff_date32 test [#42611](https://github.com/ClickHouse/ClickHouse/pull/42611) ([Roman Vasin](https://github.com/rvasin)). +* Revert revert 41268 disable s3 parallel write for part moves to disk s3 [#42617](https://github.com/ClickHouse/ClickHouse/pull/42617) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Try to fix data race on zookeeper vs DDLWorker at server shutdown. [#42620](https://github.com/ClickHouse/ClickHouse/pull/42620) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Add a template for installation issues [#42626](https://github.com/ClickHouse/ClickHouse/pull/42626) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Fix typo in cmake code related to fuzzing [#42627](https://github.com/ClickHouse/ClickHouse/pull/42627) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fix build [#42635](https://github.com/ClickHouse/ClickHouse/pull/42635) ([Anton Popov](https://github.com/CurtizJ)). +* Add .rgignore for test data [#42639](https://github.com/ClickHouse/ClickHouse/pull/42639) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 65ec5ddec01..7bbd8547506 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,3 +1,4 @@ +v22.10.1.1875-stable 2022-10-26 v22.9.3.18-stable 2022-09-30 v22.9.2.7-stable 2022-09-23 v22.9.1.2603-stable 2022-09-22 From 7973b4e5e50d31e0529574a3ce34a72b6844c147 Mon Sep 17 00:00:00 2001 From: Ilya Yatsishin <2159081+qoega@users.noreply.github.com> Date: Wed, 26 Oct 2022 11:09:01 +0200 Subject: [PATCH 062/112] Fix error handling --- tests/ci/clickhouse_helper.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/tests/ci/clickhouse_helper.py b/tests/ci/clickhouse_helper.py index a81334860d1..c82d9da05e9 100644 --- a/tests/ci/clickhouse_helper.py +++ b/tests/ci/clickhouse_helper.py @@ -37,12 +37,8 @@ class ClickHouseHelper: url, params=params, data=json_str, headers=auth ) except Exception as e: - logging.warning( - "Received exception while sending data to %s on %s attempt: %s", - url, - i, - e, - ) + error = f"Received exception while sending data to {url} on {i} attempt: {e}" + logging.warning(error) continue logging.info("Response content '%s'", response.content) From a8f3b39dcc23ef389cf68cf7c5cfccbe98c3c6ee Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Wed, 26 Oct 2022 12:07:42 +0200 Subject: [PATCH 063/112] Fix execution of version_helper.py to use git tweaks --- tests/ci/version_helper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/version_helper.py b/tests/ci/version_helper.py index 966858c0747..162bab6a50a 100755 --- a/tests/ci/version_helper.py +++ b/tests/ci/version_helper.py @@ -344,7 +344,7 @@ def main(): update_contributors() return - version = get_version_from_repo(args.version_path) + version = get_version_from_repo(args.version_path, Git(True)) if args.update: version = version.update(args.update) From 14d0f6457b864db108b3834302f4d72a74f3d71f Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 25 Oct 2022 17:18:32 +0000 Subject: [PATCH 064/112] Add tests and doc for some url-related functions --- .../URL/cutToFirstSignificantSubdomain.cpp | 32 +++++- .../cutToFirstSignificantSubdomainCustom.cpp | 39 +++++++- src/Functions/URL/domain.cpp | 20 +++- src/Functions/URL/domainWithoutWWW.cpp | 19 +++- .../URL/firstSignificantSubdomain.cpp | 24 ++++- src/Functions/URL/port.cpp | 14 ++- src/Functions/URL/topLevelDomain.cpp | 19 +++- tests/performance/url_hits.xml | 4 + .../0_stateless/00398_url_functions.reference | 17 ++++ ...nctions.sql => 00398_url_functions.sql.j2} | 98 +++++++++---------- .../queries/0_stateless/01284_port.reference | 24 +++++ tests/queries/0_stateless/01284_port.sql | 34 ------- tests/queries/0_stateless/01284_port.sql.j2 | 39 ++++++++ .../0_stateless/01601_custom_tld.reference | 89 +++++++++++++++++ .../queries/0_stateless/01601_custom_tld.sql | 57 ----------- .../0_stateless/01601_custom_tld.sql.j2 | 61 ++++++++++++ ...new_functions_must_be_documented.reference | 18 ---- 17 files changed, 428 insertions(+), 180 deletions(-) rename tests/queries/0_stateless/{00398_url_functions.sql => 00398_url_functions.sql.j2} (69%) delete mode 100644 tests/queries/0_stateless/01284_port.sql create mode 100644 tests/queries/0_stateless/01284_port.sql.j2 delete mode 100644 tests/queries/0_stateless/01601_custom_tld.sql create mode 100644 tests/queries/0_stateless/01601_custom_tld.sql.j2 diff --git a/src/Functions/URL/cutToFirstSignificantSubdomain.cpp b/src/Functions/URL/cutToFirstSignificantSubdomain.cpp index 10c41b6a4c3..7bf09d1eb00 100644 --- a/src/Functions/URL/cutToFirstSignificantSubdomain.cpp +++ b/src/Functions/URL/cutToFirstSignificantSubdomain.cpp @@ -43,10 +43,34 @@ using FunctionCutToFirstSignificantSubdomainWithWWWRFC = FunctionStringToString< REGISTER_FUNCTION(CutToFirstSignificantSubdomain) { - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); + factory.registerFunction( + { + R"(Returns the part of the domain that includes top-level subdomains up to the "first significant subdomain" (see documentation of the `firstSignificantSubdomain`).)", + Documentation::Examples{ + {"cutToFirstSignificantSubdomain1", "SELECT cutToFirstSignificantSubdomain('https://news.clickhouse.com.tr/')"}, + {"cutToFirstSignificantSubdomain2", "SELECT cutToFirstSignificantSubdomain('www.tr')"}, + {"cutToFirstSignificantSubdomain3", "SELECT cutToFirstSignificantSubdomain('tr')"}, + }, + Documentation::Categories{"URL"} + }); + factory.registerFunction( + { + R"(Returns the part of the domain that includes top-level subdomains up to the "first significant subdomain", without stripping "www".)", + Documentation::Examples{}, + Documentation::Categories{"URL"} + }); + factory.registerFunction( + { + R"(Similar to `cutToFirstSignificantSubdomain` but follows stricter rules to be compatible with RFC 3986 and less performant.)", + Documentation::Examples{}, + Documentation::Categories{"URL"} + }); + factory.registerFunction( + { + R"(Similar to `cutToFirstSignificantSubdomainWithWWW` but follows stricter rules to be compatible with RFC 3986 and less performant.)", + Documentation::Examples{}, + Documentation::Categories{"URL"} + }); } } diff --git a/src/Functions/URL/cutToFirstSignificantSubdomainCustom.cpp b/src/Functions/URL/cutToFirstSignificantSubdomainCustom.cpp index 521216c84a7..e81921d69ff 100644 --- a/src/Functions/URL/cutToFirstSignificantSubdomainCustom.cpp +++ b/src/Functions/URL/cutToFirstSignificantSubdomainCustom.cpp @@ -42,10 +42,41 @@ using FunctionCutToFirstSignificantSubdomainCustomWithWWWRFC = FunctionCutToFirs REGISTER_FUNCTION(CutToFirstSignificantSubdomainCustom) { - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); + factory.registerFunction( + { + R"( +Returns the part of the domain that includes top-level subdomains up to the first significant subdomain. Accepts custom TLD list name. + +Can be useful if you need fresh TLD list or you have custom. + )", + Documentation::Examples{ + {"cutToFirstSignificantSubdomainCustom", "SELECT cutToFirstSignificantSubdomainCustom('bar.foo.there-is-no-such-domain', 'public_suffix_list');"}, + }, + Documentation::Categories{"URL"} + }); + factory.registerFunction( + { + R"( +Returns the part of the domain that includes top-level subdomains up to the first significant subdomain without stripping `www`. +Accepts custom TLD list name from config. + +Can be useful if you need fresh TLD list or you have custom. + )", + Documentation::Examples{{"cutToFirstSignificantSubdomainCustomWithWWW", "SELECT cutToFirstSignificantSubdomainCustomWithWWW('www.foo', 'public_suffix_list')"}}, + Documentation::Categories{"URL"} + }); + factory.registerFunction( + { + R"(Similar to `cutToFirstSignificantSubdomainCustom` but follows stricter rules according to RFC 3986.)", + Documentation::Examples{}, + Documentation::Categories{"URL"} + }); + factory.registerFunction( + { + R"(Similar to `cutToFirstSignificantSubdomainCustomWithWWW` but follows stricter rules according to RFC 3986.)", + Documentation::Examples{}, + Documentation::Categories{"URL"} + }); } } diff --git a/src/Functions/URL/domain.cpp b/src/Functions/URL/domain.cpp index e7fead24dc9..fce7cea4693 100644 --- a/src/Functions/URL/domain.cpp +++ b/src/Functions/URL/domain.cpp @@ -14,8 +14,24 @@ using FunctionDomainRFC = FunctionStringToString(); - factory.registerFunction(); + factory.registerFunction( + { + R"( +Extracts the hostname from a URL. + +The URL can be specified with or without a scheme. +If the argument can't be parsed as URL, the function returns an empty string. + )", + Documentation::Examples{{"domain", "SELECT domain('svn+ssh://some.svn-hosting.com:80/repo/trunk')"}}, + Documentation::Categories{"URL"} + }); + + factory.registerFunction( + { + R"(Similar to `domain` but follows stricter rules to be compatible with RFC 3986 and less performant.)", + Documentation::Examples{}, + Documentation::Categories{"URL"} + }); } } diff --git a/src/Functions/URL/domainWithoutWWW.cpp b/src/Functions/URL/domainWithoutWWW.cpp index 2fa9159d7af..48401e5e6e5 100644 --- a/src/Functions/URL/domainWithoutWWW.cpp +++ b/src/Functions/URL/domainWithoutWWW.cpp @@ -14,8 +14,23 @@ using FunctionDomainWithoutWWWRFC = FunctionStringToString(); - factory.registerFunction(); + factory.registerFunction( + { + R"( +Extracts the hostname from a URL, removing the leading "www." if present. + +The URL can be specified with or without a scheme. +If the argument can't be parsed as URL, the function returns an empty string. + )", + Documentation::Examples{{"domainWithoutWWW", "SELECT domainWithoutWWW('https://www.clickhouse.com')"}}, + Documentation::Categories{"URL"} + }); + factory.registerFunction( + { + R"(Similar to `domainWithoutWWW` but follows stricter rules to be compatible with RFC 3986 and less performant.)", + Documentation::Examples{}, + Documentation::Categories{"URL"} + }); } } diff --git a/src/Functions/URL/firstSignificantSubdomain.cpp b/src/Functions/URL/firstSignificantSubdomain.cpp index 902a4f43fba..62307ef816c 100644 --- a/src/Functions/URL/firstSignificantSubdomain.cpp +++ b/src/Functions/URL/firstSignificantSubdomain.cpp @@ -14,8 +14,28 @@ using FunctionFirstSignificantSubdomainRFC = FunctionStringToString(); - factory.registerFunction(); + factory.registerFunction( + { + R"( +Returns the "first significant subdomain". + +The first significant subdomain is a second-level domain if it is 'com', 'net', 'org', or 'co'. +Otherwise, it is a third-level domain. + +For example, firstSignificantSubdomain('https://news.clickhouse.com/') = 'clickhouse', firstSignificantSubdomain ('https://news.clickhouse.com.tr/') = 'clickhouse'. + +The list of "insignificant" second-level domains and other implementation details may change in the future. + )", + Documentation::Examples{{"firstSignificantSubdomain", "SELECT firstSignificantSubdomain('https://news.clickhouse.com/')"}}, + Documentation::Categories{"URL"} + }); + + factory.registerFunction( + { + R"(Returns the "first significant subdomain" according to RFC 1034.)", + Documentation::Examples{}, + Documentation::Categories{"URL"} + }); } } diff --git a/src/Functions/URL/port.cpp b/src/Functions/URL/port.cpp index f716f3e454b..52fa4077c18 100644 --- a/src/Functions/URL/port.cpp +++ b/src/Functions/URL/port.cpp @@ -139,8 +139,18 @@ struct FunctionPortRFC : public FunctionPortImpl REGISTER_FUNCTION(Port) { - factory.registerFunction(); - factory.registerFunction(); + factory.registerFunction( + { + R"(Returns the port or `default_port` if there is no port in the URL (or in case of validation error).)", + Documentation::Examples{}, + Documentation::Categories{"URL"} + }); + factory.registerFunction( + { + R"(Similar to `port`, but conforms to RFC 3986.)", + Documentation::Examples{}, + Documentation::Categories{"URL"} + }); } } diff --git a/src/Functions/URL/topLevelDomain.cpp b/src/Functions/URL/topLevelDomain.cpp index f5610ed93b7..ed9b40d4b73 100644 --- a/src/Functions/URL/topLevelDomain.cpp +++ b/src/Functions/URL/topLevelDomain.cpp @@ -53,8 +53,23 @@ using FunctionTopLevelDomainRFC = FunctionStringToString(); - factory.registerFunction(); + factory.registerFunction( + { + R"( +Extracts the the top-level domain from a URL. + +Returns an empty string if the argument cannot be parsed as a URL or does not contain a top-level domain. + )", + Documentation::Examples{{"topLevelDomain", "SELECT topLevelDomain('svn+ssh://www.some.svn-hosting.com:80/repo/trunk')"}}, + Documentation::Categories{"URL"} + }); + + factory.registerFunction( + { + R"(Similar to topLevelDomain, but conforms to RFC 3986.)", + Documentation::Examples{}, + Documentation::Categories{"URL"} + }); } } diff --git a/tests/performance/url_hits.xml b/tests/performance/url_hits.xml index 4a07c38b83f..46b39f3a6e9 100644 --- a/tests/performance/url_hits.xml +++ b/tests/performance/url_hits.xml @@ -13,10 +13,14 @@ protocol domain + domainRFC domainWithoutWWW + domainWithoutWWWRFC topLevelDomain firstSignificantSubdomain + firstSignificantSubdomainRFC cutToFirstSignificantSubdomain + cutToFirstSignificantSubdomainRFC path pathFull queryString diff --git a/tests/queries/0_stateless/00398_url_functions.reference b/tests/queries/0_stateless/00398_url_functions.reference index 2e5a97b380e..39d740e55cd 100644 --- a/tests/queries/0_stateless/00398_url_functions.reference +++ b/tests/queries/0_stateless/00398_url_functions.reference @@ -124,8 +124,25 @@ example.com example.com com +example.com +example.com +example.com +example.com +example.com +example.com +example.com +example.com +example.com +com + ====CUT TO FIRST SIGNIFICANT SUBDOMAIN WITH WWW==== +www.com +example.com +example.com +example.com +example.com + www.com example.com example.com diff --git a/tests/queries/0_stateless/00398_url_functions.sql b/tests/queries/0_stateless/00398_url_functions.sql.j2 similarity index 69% rename from tests/queries/0_stateless/00398_url_functions.sql rename to tests/queries/0_stateless/00398_url_functions.sql.j2 index cbefde7515a..dd7da2ce6ad 100644 --- a/tests/queries/0_stateless/00398_url_functions.sql +++ b/tests/queries/0_stateless/00398_url_functions.sql.j2 @@ -7,42 +7,28 @@ SELECT protocol('http://127.0.0.1:443/') AS Scheme; SELECT protocol('//127.0.0.1:443/') AS Scheme; SELECT '====HOST===='; -SELECT domain('http://paul@www.example.com:80/') AS Host; -SELECT domain('user:password@example.com:8080') AS Host; -SELECT domain('http://user:password@example.com:8080') AS Host; -SELECT domain('http://user:password@example.com:8080/path?query=value#fragment') AS Host; -SELECT domain('newuser:@example.com') AS Host; -SELECT domain('http://:pass@example.com') AS Host; -SELECT domain(':newpass@example.com') AS Host; -SELECT domain('http://user:pass@example@.com') AS Host; -SELECT domain('http://user:pass:example.com') AS Host; -SELECT domain('http:/paul/example/com') AS Host; -SELECT domain('http://www.example.com?q=4') AS Host; -SELECT domain('http://127.0.0.1:443/') AS Host; -SELECT domain('//www.example.com') AS Host; -SELECT domain('//paul@www.example.com') AS Host; -SELECT domain('www.example.com') as Host; -SELECT domain('example.com') as Host; -SELECT domainWithoutWWW('//paul@www.example.com') AS Host; -SELECT domainWithoutWWW('http://paul@www.example.com:80/') AS Host; -SELECT domainRFC('http://paul@www.example.com:80/') AS Host; -SELECT domainRFC('user:password@example.com:8080') AS Host; -SELECT domainRFC('http://user:password@example.com:8080') AS Host; -SELECT domainRFC('http://user:password@example.com:8080/path?query=value#fragment') AS Host; -SELECT domainRFC('newuser:@example.com') AS Host; -SELECT domainRFC('http://:pass@example.com') AS Host; -SELECT domainRFC(':newpass@example.com') AS Host; -SELECT domainRFC('http://user:pass@example@.com') AS Host; -SELECT domainRFC('http://user:pass:example.com') AS Host; -SELECT domainRFC('http:/paul/example/com') AS Host; -SELECT domainRFC('http://www.example.com?q=4') AS Host; -SELECT domainRFC('http://127.0.0.1:443/') AS Host; -SELECT domainRFC('//www.example.com') AS Host; -SELECT domainRFC('//paul@www.example.com') AS Host; -SELECT domainRFC('www.example.com') as Host; -SELECT domainRFC('example.com') as Host; -SELECT domainWithoutWWWRFC('//paul@www.example.com') AS Host; -SELECT domainWithoutWWWRFC('http://paul@www.example.com:80/') AS Host; +{% for suffix in ['', 'RFC'] -%} + +SELECT domain{{ suffix }}('http://paul@www.example.com:80/') AS Host; +SELECT domain{{ suffix }}('user:password@example.com:8080') AS Host; +SELECT domain{{ suffix }}('http://user:password@example.com:8080') AS Host; +SELECT domain{{ suffix }}('http://user:password@example.com:8080/path?query=value#fragment') AS Host; +SELECT domain{{ suffix }}('newuser:@example.com') AS Host; +SELECT domain{{ suffix }}('http://:pass@example.com') AS Host; +SELECT domain{{ suffix }}(':newpass@example.com') AS Host; +SELECT domain{{ suffix }}('http://user:pass@example@.com') AS Host; +SELECT domain{{ suffix }}('http://user:pass:example.com') AS Host; +SELECT domain{{ suffix }}('http:/paul/example/com') AS Host; +SELECT domain{{ suffix }}('http://www.example.com?q=4') AS Host; +SELECT domain{{ suffix }}('http://127.0.0.1:443/') AS Host; +SELECT domain{{ suffix }}('//www.example.com') AS Host; +SELECT domain{{ suffix }}('//paul@www.example.com') AS Host; +SELECT domain{{ suffix }}('www.example.com') as Host; +SELECT domain{{ suffix }}('example.com') as Host; +SELECT domainWithoutWWW{{ suffix }}('//paul@www.example.com') AS Host; +SELECT domainWithoutWWW{{ suffix }}('http://paul@www.example.com:80/') AS Host; + +{% endfor %} SELECT '====NETLOC===='; SELECT netloc('http://paul@www.example.com:80/') AS Netloc; @@ -121,25 +107,31 @@ SELECT decodeURLComponent(encodeURLComponent('http://paul@127.0.0.1/?query=hello SELECT decodeURLFormComponent(encodeURLFormComponent('http://paul@127.0.0.1/?query=hello world foo+bar#a=b')); SELECT '====CUT TO FIRST SIGNIFICANT SUBDOMAIN===='; -SELECT cutToFirstSignificantSubdomain('http://www.example.com'); -SELECT cutToFirstSignificantSubdomain('http://www.example.com:1234'); -SELECT cutToFirstSignificantSubdomain('http://www.example.com/a/b/c'); -SELECT cutToFirstSignificantSubdomain('http://www.example.com/a/b/c?a=b'); -SELECT cutToFirstSignificantSubdomain('http://www.example.com/a/b/c?a=b#d=f'); -SELECT cutToFirstSignificantSubdomain('http://paul@www.example.com/a/b/c?a=b#d=f'); -SELECT cutToFirstSignificantSubdomain('//paul@www.example.com/a/b/c?a=b#d=f'); -SELECT cutToFirstSignificantSubdomain('www.example.com'); -SELECT cutToFirstSignificantSubdomain('example.com'); -SELECT cutToFirstSignificantSubdomain('www.com'); -SELECT cutToFirstSignificantSubdomain('com'); + +{% for suffix in ['', 'RFC'] -%} +SELECT cutToFirstSignificantSubdomain{{ suffix }}('http://www.example.com'); +SELECT cutToFirstSignificantSubdomain{{ suffix }}('http://www.example.com:1234'); +SELECT cutToFirstSignificantSubdomain{{ suffix }}('http://www.example.com/a/b/c'); +SELECT cutToFirstSignificantSubdomain{{ suffix }}('http://www.example.com/a/b/c?a=b'); +SELECT cutToFirstSignificantSubdomain{{ suffix }}('http://www.example.com/a/b/c?a=b#d=f'); +SELECT cutToFirstSignificantSubdomain{{ suffix }}('http://paul@www.example.com/a/b/c?a=b#d=f'); +SELECT cutToFirstSignificantSubdomain{{ suffix }}('//paul@www.example.com/a/b/c?a=b#d=f'); +SELECT cutToFirstSignificantSubdomain{{ suffix }}('www.example.com'); +SELECT cutToFirstSignificantSubdomain{{ suffix }}('example.com'); +SELECT cutToFirstSignificantSubdomain{{ suffix }}('www.com'); +SELECT cutToFirstSignificantSubdomain{{ suffix }}('com'); +{% endfor %} SELECT '====CUT TO FIRST SIGNIFICANT SUBDOMAIN WITH WWW===='; -SELECT cutToFirstSignificantSubdomainWithWWW('http://com'); -SELECT cutToFirstSignificantSubdomainWithWWW('http://www.com'); -SELECT cutToFirstSignificantSubdomainWithWWW('http://www.example.com'); -SELECT cutToFirstSignificantSubdomainWithWWW('http://www.foo.example.com'); -SELECT cutToFirstSignificantSubdomainWithWWW('http://www.example.com:1'); -SELECT cutToFirstSignificantSubdomainWithWWW('http://www.example.com/'); + +{% for suffix in ['', 'RFC'] -%} +SELECT cutToFirstSignificantSubdomainWithWWW{{ suffix }}('http://com'); +SELECT cutToFirstSignificantSubdomainWithWWW{{ suffix }}('http://www.com'); +SELECT cutToFirstSignificantSubdomainWithWWW{{ suffix }}('http://www.example.com'); +SELECT cutToFirstSignificantSubdomainWithWWW{{ suffix }}('http://www.foo.example.com'); +SELECT cutToFirstSignificantSubdomainWithWWW{{ suffix }}('http://www.example.com:1'); +SELECT cutToFirstSignificantSubdomainWithWWW{{ suffix }}('http://www.example.com/'); +{% endfor %} SELECT '====CUT WWW===='; SELECT cutWWW('http://www.example.com'); diff --git a/tests/queries/0_stateless/01284_port.reference b/tests/queries/0_stateless/01284_port.reference index 7e776595065..5b7b58bc7e4 100644 --- a/tests/queries/0_stateless/01284_port.reference +++ b/tests/queries/0_stateless/01284_port.reference @@ -22,3 +22,27 @@ ipv6 0 host-no-dot 0 +ipv4 +0 +80 +80 +80 +80 +hostname +0 +80 +80 +80 +80 +default-port +80 +80 +ipv6 +0 +0 +0 +0 +0 +0 +host-no-dot +0 diff --git a/tests/queries/0_stateless/01284_port.sql b/tests/queries/0_stateless/01284_port.sql deleted file mode 100644 index 9c31a5d42ad..00000000000 --- a/tests/queries/0_stateless/01284_port.sql +++ /dev/null @@ -1,34 +0,0 @@ -select 'ipv4'; -select port('http://127.0.0.1/'); -select port('http://127.0.0.1:80'); -select port('http://127.0.0.1:80/'); -select port('//127.0.0.1:80/'); -select port('127.0.0.1:80'); -select 'hostname'; -select port('http://foobar.com/'); -select port('http://foobar.com:80'); -select port('http://foobar.com:80/'); -select port('//foobar.com:80/'); -select port('foobar.com:80'); - -select 'default-port'; -select port('http://127.0.0.1/', toUInt16(80)); -select port('http://foobar.com/', toUInt16(80)); - --- unsupported -/* ILLEGAL_TYPE_OF_ARGUMENT */ select port(toFixedString('', 1)); -- { serverError 43; } -/* ILLEGAL_TYPE_OF_ARGUMENT */ select port('', 1); -- { serverError 43; } -/* NUMBER_OF_ARGUMENTS_DOESNT_MATCH */ select port('', 1, 1); -- { serverError 42; } - --- --- Known limitations of domain() (getURLHost()) --- -select 'ipv6'; -select port('http://[2001:db8::8a2e:370:7334]/'); -select port('http://[2001:db8::8a2e:370:7334]:80'); -select port('http://[2001:db8::8a2e:370:7334]:80/'); -select port('//[2001:db8::8a2e:370:7334]:80/'); -select port('[2001:db8::8a2e:370:7334]:80'); -select port('2001:db8::8a2e:370:7334:80'); -select 'host-no-dot'; -select port('//foobar:80/'); diff --git a/tests/queries/0_stateless/01284_port.sql.j2 b/tests/queries/0_stateless/01284_port.sql.j2 new file mode 100644 index 00000000000..6f78b3b8e3b --- /dev/null +++ b/tests/queries/0_stateless/01284_port.sql.j2 @@ -0,0 +1,39 @@ +{% for suffix in ['', 'RFC'] -%} + +select 'ipv4'; +select port{{ suffix }}('http://127.0.0.1/'); +select port{{ suffix }}('http://127.0.0.1:80'); +select port{{ suffix }}('http://127.0.0.1:80/'); +select port{{ suffix }}('//127.0.0.1:80/'); +select port{{ suffix }}('127.0.0.1:80'); + +select 'hostname'; +select port{{ suffix }}('http://foobar.com/'); +select port{{ suffix }}('http://foobar.com:80'); +select port{{ suffix }}('http://foobar.com:80/'); +select port{{ suffix }}('//foobar.com:80/'); +select port{{ suffix }}('foobar.com:80'); + +select 'default-port'; +select port{{ suffix }}('http://127.0.0.1/', toUInt16(80)); +select port{{ suffix }}('http://foobar.com/', toUInt16(80)); + +-- unsupported +/* ILLEGAL_TYPE_OF_ARGUMENT */ select port(toFixedString('', 1)); -- { serverError 43; } +/* ILLEGAL_TYPE_OF_ARGUMENT */ select port{{ suffix }}('', 1); -- { serverError 43; } +/* NUMBER_OF_ARGUMENTS_DOESNT_MATCH */ select port{{ suffix }}('', 1, 1); -- { serverError 42; } + +-- +-- Known limitations of domain() (getURLHost()) +-- +select 'ipv6'; +select port{{ suffix }}('http://[2001:db8::8a2e:370:7334]/'); +select port{{ suffix }}('http://[2001:db8::8a2e:370:7334]:80'); +select port{{ suffix }}('http://[2001:db8::8a2e:370:7334]:80/'); +select port{{ suffix }}('//[2001:db8::8a2e:370:7334]:80/'); +select port{{ suffix }}('[2001:db8::8a2e:370:7334]:80'); +select port{{ suffix }}('2001:db8::8a2e:370:7334:80'); +select 'host-no-dot'; +select port{{ suffix }}('//foobar:80/'); + +{%- endfor %} diff --git a/tests/queries/0_stateless/01601_custom_tld.reference b/tests/queries/0_stateless/01601_custom_tld.reference index 981067606a2..7ef6eb7d3a2 100644 --- a/tests/queries/0_stateless/01601_custom_tld.reference +++ b/tests/queries/0_stateless/01601_custom_tld.reference @@ -89,3 +89,92 @@ select cutToFirstSignificantSubdomainCustom('city.kawasaki.jp', 'public_suffix_l city.kawasaki.jp select cutToFirstSignificantSubdomainCustom('some.city.kawasaki.jp', 'public_suffix_list'); city.kawasaki.jp +select '-- no-tld'; +-- no-tld +-- even if there is no TLD, 2-nd level by default anyway +-- FIXME: make this behavior optional (so that TLD for host never changed, either empty or something real) +select cutToFirstSignificantSubdomainRFC('there-is-no-such-domain'); + +select cutToFirstSignificantSubdomainRFC('foo.there-is-no-such-domain'); +foo.there-is-no-such-domain +select cutToFirstSignificantSubdomainRFC('bar.foo.there-is-no-such-domain'); +foo.there-is-no-such-domain +select cutToFirstSignificantSubdomainCustomRFC('there-is-no-such-domain', 'public_suffix_list'); + +select cutToFirstSignificantSubdomainCustomRFC('foo.there-is-no-such-domain', 'public_suffix_list'); +foo.there-is-no-such-domain +select cutToFirstSignificantSubdomainCustomRFC('bar.foo.there-is-no-such-domain', 'public_suffix_list'); +foo.there-is-no-such-domain +select firstSignificantSubdomainCustomRFC('bar.foo.there-is-no-such-domain', 'public_suffix_list'); +foo +select '-- generic'; +-- generic +select firstSignificantSubdomainCustomRFC('foo.kernel.biz.ss', 'public_suffix_list'); -- kernel +kernel +select cutToFirstSignificantSubdomainCustomRFC('foo.kernel.biz.ss', 'public_suffix_list'); -- kernel.biz.ss +kernel.biz.ss +select '-- difference'; +-- difference +-- biz.ss is not in the default TLD list, hence: +select cutToFirstSignificantSubdomainRFC('foo.kernel.biz.ss'); -- biz.ss +biz.ss +select cutToFirstSignificantSubdomainCustomRFC('foo.kernel.biz.ss', 'public_suffix_list'); -- kernel.biz.ss +kernel.biz.ss +select '-- 3+level'; +-- 3+level +select cutToFirstSignificantSubdomainCustomRFC('xx.blogspot.co.at', 'public_suffix_list'); -- xx.blogspot.co.at +xx.blogspot.co.at +select firstSignificantSubdomainCustomRFC('xx.blogspot.co.at', 'public_suffix_list'); -- blogspot +blogspot +select cutToFirstSignificantSubdomainCustomRFC('foo.bar.xx.blogspot.co.at', 'public_suffix_list'); -- xx.blogspot.co.at +xx.blogspot.co.at +select firstSignificantSubdomainCustomRFC('foo.bar.xx.blogspot.co.at', 'public_suffix_list'); -- blogspot +blogspot +select '-- url'; +-- url +select cutToFirstSignificantSubdomainCustomRFC('http://foobar.com', 'public_suffix_list'); +foobar.com +select cutToFirstSignificantSubdomainCustomRFC('http://foobar.com/foo', 'public_suffix_list'); +foobar.com +select cutToFirstSignificantSubdomainCustomRFC('http://bar.foobar.com/foo', 'public_suffix_list'); +foobar.com +select cutToFirstSignificantSubdomainCustomRFC('http://xx.blogspot.co.at', 'public_suffix_list'); +xx.blogspot.co.at +select '-- www'; +-- www +select cutToFirstSignificantSubdomainCustomWithWWWRFC('http://www.foo', 'public_suffix_list'); +www.foo +select cutToFirstSignificantSubdomainCustomRFC('http://www.foo', 'public_suffix_list'); +foo +select '-- vector'; +-- vector +select cutToFirstSignificantSubdomainCustomRFC('http://xx.blogspot.co.at/' || toString(number), 'public_suffix_list') from numbers(1); +xx.blogspot.co.at +select cutToFirstSignificantSubdomainCustomRFC('there-is-no-such-domain' || toString(number), 'public_suffix_list') from numbers(1); + +select '-- no new line'; +-- no new line +select cutToFirstSignificantSubdomainCustomRFC('foo.bar', 'no_new_line_list'); +foo.bar +select cutToFirstSignificantSubdomainCustomRFC('a.foo.bar', 'no_new_line_list'); +a.foo.bar +select cutToFirstSignificantSubdomainCustomRFC('a.foo.baz', 'no_new_line_list'); +foo.baz +select '-- asterisk'; +-- asterisk +select cutToFirstSignificantSubdomainCustomRFC('foo.something.sheffield.sch.uk', 'public_suffix_list'); +something.sheffield.sch.uk +select cutToFirstSignificantSubdomainCustomRFC('something.sheffield.sch.uk', 'public_suffix_list'); +something.sheffield.sch.uk +select cutToFirstSignificantSubdomainCustomRFC('sheffield.sch.uk', 'public_suffix_list'); +sheffield.sch.uk +select '-- exclamation mark'; +-- exclamation mark +select cutToFirstSignificantSubdomainCustomRFC('foo.kawasaki.jp', 'public_suffix_list'); +foo.kawasaki.jp +select cutToFirstSignificantSubdomainCustomRFC('foo.foo.kawasaki.jp', 'public_suffix_list'); +foo.foo.kawasaki.jp +select cutToFirstSignificantSubdomainCustomRFC('city.kawasaki.jp', 'public_suffix_list'); +city.kawasaki.jp +select cutToFirstSignificantSubdomainCustomRFC('some.city.kawasaki.jp', 'public_suffix_list'); +city.kawasaki.jp diff --git a/tests/queries/0_stateless/01601_custom_tld.sql b/tests/queries/0_stateless/01601_custom_tld.sql deleted file mode 100644 index 69ae209af2c..00000000000 --- a/tests/queries/0_stateless/01601_custom_tld.sql +++ /dev/null @@ -1,57 +0,0 @@ --- { echo } - -select '-- no-tld'; --- even if there is no TLD, 2-nd level by default anyway --- FIXME: make this behavior optional (so that TLD for host never changed, either empty or something real) -select cutToFirstSignificantSubdomain('there-is-no-such-domain'); -select cutToFirstSignificantSubdomain('foo.there-is-no-such-domain'); -select cutToFirstSignificantSubdomain('bar.foo.there-is-no-such-domain'); -select cutToFirstSignificantSubdomainCustom('there-is-no-such-domain', 'public_suffix_list'); -select cutToFirstSignificantSubdomainCustom('foo.there-is-no-such-domain', 'public_suffix_list'); -select cutToFirstSignificantSubdomainCustom('bar.foo.there-is-no-such-domain', 'public_suffix_list'); -select firstSignificantSubdomainCustom('bar.foo.there-is-no-such-domain', 'public_suffix_list'); - -select '-- generic'; -select firstSignificantSubdomainCustom('foo.kernel.biz.ss', 'public_suffix_list'); -- kernel -select cutToFirstSignificantSubdomainCustom('foo.kernel.biz.ss', 'public_suffix_list'); -- kernel.biz.ss - -select '-- difference'; --- biz.ss is not in the default TLD list, hence: -select cutToFirstSignificantSubdomain('foo.kernel.biz.ss'); -- biz.ss -select cutToFirstSignificantSubdomainCustom('foo.kernel.biz.ss', 'public_suffix_list'); -- kernel.biz.ss - -select '-- 3+level'; -select cutToFirstSignificantSubdomainCustom('xx.blogspot.co.at', 'public_suffix_list'); -- xx.blogspot.co.at -select firstSignificantSubdomainCustom('xx.blogspot.co.at', 'public_suffix_list'); -- blogspot -select cutToFirstSignificantSubdomainCustom('foo.bar.xx.blogspot.co.at', 'public_suffix_list'); -- xx.blogspot.co.at -select firstSignificantSubdomainCustom('foo.bar.xx.blogspot.co.at', 'public_suffix_list'); -- blogspot - -select '-- url'; -select cutToFirstSignificantSubdomainCustom('http://foobar.com', 'public_suffix_list'); -select cutToFirstSignificantSubdomainCustom('http://foobar.com/foo', 'public_suffix_list'); -select cutToFirstSignificantSubdomainCustom('http://bar.foobar.com/foo', 'public_suffix_list'); -select cutToFirstSignificantSubdomainCustom('http://xx.blogspot.co.at', 'public_suffix_list'); - -select '-- www'; -select cutToFirstSignificantSubdomainCustomWithWWW('http://www.foo', 'public_suffix_list'); -select cutToFirstSignificantSubdomainCustom('http://www.foo', 'public_suffix_list'); - -select '-- vector'; -select cutToFirstSignificantSubdomainCustom('http://xx.blogspot.co.at/' || toString(number), 'public_suffix_list') from numbers(1); -select cutToFirstSignificantSubdomainCustom('there-is-no-such-domain' || toString(number), 'public_suffix_list') from numbers(1); - -select '-- no new line'; -select cutToFirstSignificantSubdomainCustom('foo.bar', 'no_new_line_list'); -select cutToFirstSignificantSubdomainCustom('a.foo.bar', 'no_new_line_list'); -select cutToFirstSignificantSubdomainCustom('a.foo.baz', 'no_new_line_list'); - -select '-- asterisk'; -select cutToFirstSignificantSubdomainCustom('foo.something.sheffield.sch.uk', 'public_suffix_list'); -select cutToFirstSignificantSubdomainCustom('something.sheffield.sch.uk', 'public_suffix_list'); -select cutToFirstSignificantSubdomainCustom('sheffield.sch.uk', 'public_suffix_list'); - -select '-- exclamation mark'; -select cutToFirstSignificantSubdomainCustom('foo.kawasaki.jp', 'public_suffix_list'); -select cutToFirstSignificantSubdomainCustom('foo.foo.kawasaki.jp', 'public_suffix_list'); -select cutToFirstSignificantSubdomainCustom('city.kawasaki.jp', 'public_suffix_list'); -select cutToFirstSignificantSubdomainCustom('some.city.kawasaki.jp', 'public_suffix_list'); diff --git a/tests/queries/0_stateless/01601_custom_tld.sql.j2 b/tests/queries/0_stateless/01601_custom_tld.sql.j2 new file mode 100644 index 00000000000..1e0982ea1b7 --- /dev/null +++ b/tests/queries/0_stateless/01601_custom_tld.sql.j2 @@ -0,0 +1,61 @@ +-- { echo } + +{% for suffix in ['', 'RFC'] -%} + +select '-- no-tld'; +-- even if there is no TLD, 2-nd level by default anyway +-- FIXME: make this behavior optional (so that TLD for host never changed, either empty or something real) +select cutToFirstSignificantSubdomain{{ suffix }}('there-is-no-such-domain'); +select cutToFirstSignificantSubdomain{{ suffix }}('foo.there-is-no-such-domain'); +select cutToFirstSignificantSubdomain{{ suffix }}('bar.foo.there-is-no-such-domain'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('there-is-no-such-domain', 'public_suffix_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('foo.there-is-no-such-domain', 'public_suffix_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('bar.foo.there-is-no-such-domain', 'public_suffix_list'); +select firstSignificantSubdomainCustom{{ suffix }}('bar.foo.there-is-no-such-domain', 'public_suffix_list'); + +select '-- generic'; +select firstSignificantSubdomainCustom{{ suffix }}('foo.kernel.biz.ss', 'public_suffix_list'); -- kernel +select cutToFirstSignificantSubdomainCustom{{ suffix }}('foo.kernel.biz.ss', 'public_suffix_list'); -- kernel.biz.ss + +select '-- difference'; +-- biz.ss is not in the default TLD list, hence: +select cutToFirstSignificantSubdomain{{ suffix }}('foo.kernel.biz.ss'); -- biz.ss +select cutToFirstSignificantSubdomainCustom{{ suffix }}('foo.kernel.biz.ss', 'public_suffix_list'); -- kernel.biz.ss + +select '-- 3+level'; +select cutToFirstSignificantSubdomainCustom{{ suffix }}('xx.blogspot.co.at', 'public_suffix_list'); -- xx.blogspot.co.at +select firstSignificantSubdomainCustom{{ suffix }}('xx.blogspot.co.at', 'public_suffix_list'); -- blogspot +select cutToFirstSignificantSubdomainCustom{{ suffix }}('foo.bar.xx.blogspot.co.at', 'public_suffix_list'); -- xx.blogspot.co.at +select firstSignificantSubdomainCustom{{ suffix }}('foo.bar.xx.blogspot.co.at', 'public_suffix_list'); -- blogspot + +select '-- url'; +select cutToFirstSignificantSubdomainCustom{{ suffix }}('http://foobar.com', 'public_suffix_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('http://foobar.com/foo', 'public_suffix_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('http://bar.foobar.com/foo', 'public_suffix_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('http://xx.blogspot.co.at', 'public_suffix_list'); + +select '-- www'; +select cutToFirstSignificantSubdomainCustomWithWWW{{ suffix }}('http://www.foo', 'public_suffix_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('http://www.foo', 'public_suffix_list'); + +select '-- vector'; +select cutToFirstSignificantSubdomainCustom{{ suffix }}('http://xx.blogspot.co.at/' || toString(number), 'public_suffix_list') from numbers(1); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('there-is-no-such-domain' || toString(number), 'public_suffix_list') from numbers(1); + +select '-- no new line'; +select cutToFirstSignificantSubdomainCustom{{ suffix }}('foo.bar', 'no_new_line_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('a.foo.bar', 'no_new_line_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('a.foo.baz', 'no_new_line_list'); + +select '-- asterisk'; +select cutToFirstSignificantSubdomainCustom{{ suffix }}('foo.something.sheffield.sch.uk', 'public_suffix_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('something.sheffield.sch.uk', 'public_suffix_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('sheffield.sch.uk', 'public_suffix_list'); + +select '-- exclamation mark'; +select cutToFirstSignificantSubdomainCustom{{ suffix }}('foo.kawasaki.jp', 'public_suffix_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('foo.foo.kawasaki.jp', 'public_suffix_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('city.kawasaki.jp', 'public_suffix_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('some.city.kawasaki.jp', 'public_suffix_list'); + +{% endfor %} diff --git a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference index 3fd12051f4a..040a8c8d317 100644 --- a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference +++ b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference @@ -219,14 +219,6 @@ cutFragment cutIPv6 cutQueryString cutQueryStringAndFragment -cutToFirstSignificantSubdomain -cutToFirstSignificantSubdomainCustom -cutToFirstSignificantSubdomainCustomRFC -cutToFirstSignificantSubdomainCustomWithWWW -cutToFirstSignificantSubdomainCustomWithWWWRFC -cutToFirstSignificantSubdomainRFC -cutToFirstSignificantSubdomainWithWWW -cutToFirstSignificantSubdomainWithWWWRFC cutURLParameter cutWWW dateDiff @@ -284,10 +276,6 @@ dictGetUUIDOrDefault dictHas dictIsIn divide -domain -domainRFC -domainWithoutWWW -domainWithoutWWWRFC dotProduct dumpColumnStructure e @@ -336,10 +324,8 @@ filesystemAvailable filesystemCapacity filesystemFree finalizeAggregation -firstSignificantSubdomain firstSignificantSubdomainCustom firstSignificantSubdomainCustomRFC -firstSignificantSubdomainRFC flattenTuple floor format @@ -600,8 +586,6 @@ polygonsUnionCartesian polygonsUnionSpherical polygonsWithinCartesian polygonsWithinSpherical -port -portRFC position positionCaseInsensitive positionCaseInsensitiveUTF8 @@ -906,8 +890,6 @@ toYear toYearWeek today tokens -topLevelDomain -topLevelDomainRFC transactionID transactionLatestSnapshot transactionOldestSnapshot From e0852ba028b59db7e538cf0265c768b66fa04513 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Wed, 26 Oct 2022 13:15:32 +0200 Subject: [PATCH 065/112] Always run BuilderReport in all CI types --- .github/workflows/backport_branches.yml | 2 ++ .github/workflows/master.yml | 2 ++ .github/workflows/release_branches.yml | 2 ++ 3 files changed, 6 insertions(+) diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml index 4c8d023f2ec..30a77a9b27f 100644 --- a/.github/workflows/backport_branches.yml +++ b/.github/workflows/backport_branches.yml @@ -466,6 +466,7 @@ jobs: - BuilderDebTsan - BuilderDebDebug runs-on: [self-hosted, style-checker] + if: ${{ success() || failure() }} steps: - name: Set envs run: | @@ -504,6 +505,7 @@ jobs: - BuilderBinDarwin - BuilderBinDarwinAarch64 runs-on: [self-hosted, style-checker] + if: ${{ success() || failure() }} steps: - name: Set envs run: | diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index 3d22cb984dd..fba8a975ca6 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -974,6 +974,7 @@ jobs: - BuilderDebTsan - BuilderDebUBsan runs-on: [self-hosted, style-checker] + if: ${{ success() || failure() }} steps: - name: Set envs run: | @@ -1021,6 +1022,7 @@ jobs: - BuilderBinClangTidy - BuilderDebShared runs-on: [self-hosted, style-checker] + if: ${{ success() || failure() }} steps: - name: Set envs run: | diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml index 8f42ca92646..abe85d3e72d 100644 --- a/.github/workflows/release_branches.yml +++ b/.github/workflows/release_branches.yml @@ -541,6 +541,7 @@ jobs: - BuilderDebMsan - BuilderDebDebug runs-on: [self-hosted, style-checker] + if: ${{ success() || failure() }} steps: - name: Set envs run: | @@ -580,6 +581,7 @@ jobs: - BuilderBinDarwin - BuilderBinDarwinAarch64 runs-on: [self-hosted, style-checker] + if: ${{ success() || failure() }} steps: - name: Set envs run: | From 4e2f56ac34e6a92da3db446be4fe12d3ada63296 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Wed, 26 Oct 2022 13:24:51 +0200 Subject: [PATCH 066/112] Try to force StyleCheck running --- .github/workflows/pull_request.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 2795dc62d6d..3951f99b16b 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -112,7 +112,7 @@ jobs: StyleCheck: needs: DockerHubPush runs-on: [self-hosted, style-checker] - if: ${{ success() || failure() }} + if: ${{ success() || failure() || always() }} steps: - name: Set envs run: | From 0f93c75dd92f0dd4dc8ded906f39828af570b441 Mon Sep 17 00:00:00 2001 From: DanRoscigno Date: Wed, 26 Oct 2022 09:12:42 -0400 Subject: [PATCH 067/112] add recent merges to CHANGELOG --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 22f6afc4901..27b3358b26e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,8 @@ * Add OpenTelemetry support to ON CLUSTER DDL (require `distributed_ddl_entry_format_version` to be set to 4). [#41484](https://github.com/ClickHouse/ClickHouse/pull/41484) ([Frank Chen](https://github.com/FrankChen021)). * Added system table `asynchronous_insert_log`. It contains information about asynchronous inserts (including results of queries in fire-and-forget mode (with `wait_for_async_insert=0`)) for better introspection. [#42040](https://github.com/ClickHouse/ClickHouse/pull/42040) ([Anton Popov](https://github.com/CurtizJ)). * Add support for methods `lz4`, `bz2`, `snappy` in HTTP's `Accept-Encoding` which is a non-standard extension to HTTP protocol. [#42071](https://github.com/ClickHouse/ClickHouse/pull/42071) ([Nikolay Degterinsky](https://github.com/evillique)). +* Adds Morton Coding (ZCurve) encode/decode functions. [#41753](https://github.com/ClickHouse/ClickHouse/pull/41753) ([Constantine Peresypkin](https://github.com/pkit)). +* Add support for `SET setting_name = DEFAULT`. [#42187](https://github.com/ClickHouse/ClickHouse/pull/42187) ([Filatenkov Artur](https://github.com/FArthur-cmd)). #### Experimental Feature * Added new infrastructure for query analysis and planning under the `allow_experimental_analyzer` setting. [#31796](https://github.com/ClickHouse/ClickHouse/pull/31796) ([Maksim Kita](https://github.com/kitaisreal)). @@ -86,6 +88,9 @@ Only allow clients connecting to a secure server with an invalid certificate onl * Fix rarely invalid cast of aggregate state types with complex types such as Decimal. This fixes [#42408](https://github.com/ClickHouse/ClickHouse/issues/42408). [#42417](https://github.com/ClickHouse/ClickHouse/pull/42417) ([Amos Bird](https://github.com/amosbird)). * Allow to use `Date32` arguments for `dateName` function. [#42554](https://github.com/ClickHouse/ClickHouse/pull/42554) ([Roman Vasin](https://github.com/rvasin)). * Now filters with NULL literals will be used during index analysis. [#34063](https://github.com/ClickHouse/ClickHouse/issues/34063). [#41842](https://github.com/ClickHouse/ClickHouse/pull/41842) ([Amos Bird](https://github.com/amosbird)). +* Merge parts if every part in the range is older than a certain threshold. The threshold can be set by using `min_age_to_force_merge_seconds`. This closes [#35836](https://github.com/ClickHouse/ClickHouse/issues/35836). [#42423](https://github.com/ClickHouse/ClickHouse/pull/42423) ([Antonio Andelic](https://github.com/antonio2368)). This is continuation of [#39550i](https://github.com/ClickHouse/ClickHouse/pull/39550) by [@fastio](https://github.com/fastio) who implemented most of the logic. +* Added new infrastructure for query analysis and planning under `allow_experimental_analyzer` setting. [#31796](https://github.com/ClickHouse/ClickHouse/pull/31796) ([Maksim Kita](https://github.com/kitaisreal)). +* Improve the time to recover lost keeper connections. [#42541](https://github.com/ClickHouse/ClickHouse/pull/42541) ([Raúl Marín](https://github.com/Algunenano)). #### Build/Testing/Packaging Improvement * Add fuzzer for table definitions [#40096](https://github.com/ClickHouse/ClickHouse/pull/40096) ([Anton Popov](https://github.com/CurtizJ)). This represents the biggest advancement for ClickHouse testing in this year so far. @@ -139,6 +144,7 @@ Only allow clients connecting to a secure server with an invalid certificate onl * Fix bad_cast assert during INSERT into `Annoy` indexes over non-Float32 columns. `Annoy` indices is an experimental feature. [#42485](https://github.com/ClickHouse/ClickHouse/pull/42485) ([Robert Schulze](https://github.com/rschu1ze)). * Arithmetic operator with Date or DateTime and 128 or 256-bit integer was referencing uninitialized memory. [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Fix unexpected table loading error when partition key contains alias function names during server upgrade. [#36379](https://github.com/ClickHouse/ClickHouse/pull/36379) ([Amos Bird](https://github.com/amosbird)). +* Fixes a crash in `JSONExtract` with `LowCardinality`. [#42633](https://github.com/ClickHouse/ClickHouse/pull/42633) ([Anton Popov](https://github.com/CurtizJ)). ### ClickHouse release 22.9, 2022-09-22 From 5bc8b267b5f00e160963f7cdc0e96439010198db Mon Sep 17 00:00:00 2001 From: DanRoscigno Date: Wed, 26 Oct 2022 09:16:03 -0400 Subject: [PATCH 068/112] remove newline --- CHANGELOG.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 27b3358b26e..3948a24d52a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -68,8 +68,7 @@ * Allow readable size values (like `1TB`) in cache config. [#41688](https://github.com/ClickHouse/ClickHouse/pull/41688) ([Kseniia Sumarokova](https://github.com/kssenii)). * ClickHouse could cache stale DNS entries for some period of time (15 seconds by default) until the cache won't be updated asynchronously. During these periods ClickHouse can nevertheless try to establish a connection and produce errors. This behavior is fixed. [#41707](https://github.com/ClickHouse/ClickHouse/pull/41707) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). * Add interactive history search with fzf-like utility (fzf/sk) for `clickhouse-client`/`clickhouse-local` (note you can use `FZF_DEFAULT_OPTS`/`SKIM_DEFAULT_OPTIONS` to additionally configure the behavior). [#41730](https://github.com/ClickHouse/ClickHouse/pull/41730) ([Azat Khuzhin](https://github.com/azat)). -* -Only allow clients connecting to a secure server with an invalid certificate only to proceed with the '--accept-certificate' flag. [#41743](https://github.com/ClickHouse/ClickHouse/pull/41743) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Only allow clients connecting to a secure server with an invalid certificate only to proceed with the '--accept-certificate' flag. [#41743](https://github.com/ClickHouse/ClickHouse/pull/41743) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). * Add function `tryBase58Decode`, similar to the existing function `tryBase64Decode`. [#41824](https://github.com/ClickHouse/ClickHouse/pull/41824) ([Robert Schulze](https://github.com/rschu1ze)). * Improve feedback when replacing partition with different primary key. Fixes [#34798](https://github.com/ClickHouse/ClickHouse/issues/34798). [#41838](https://github.com/ClickHouse/ClickHouse/pull/41838) ([Salvatore](https://github.com/tbsal)). * Fix parallel parsing: segmentator now checks `max_block_size`. This fixed memory overallocation in case of parallel parsing and small LIMIT. [#41852](https://github.com/ClickHouse/ClickHouse/pull/41852) ([Vitaly Baranov](https://github.com/vitlibar)). From d3461abd1f0c43055daaff0def45c32de2b203f1 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Wed, 26 Oct 2022 15:27:09 +0200 Subject: [PATCH 069/112] Update version to 22.11.1.1 --- cmake/autogenerated_versions.txt | 10 ++++---- .../StorageSystemContributors.generated.cpp | 24 +++++++++++++++++++ 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt index 9728451f38a..11b37f5a7c8 100644 --- a/cmake/autogenerated_versions.txt +++ b/cmake/autogenerated_versions.txt @@ -2,11 +2,11 @@ # NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION, # only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes. -SET(VERSION_REVISION 54467) +SET(VERSION_REVISION 54468) SET(VERSION_MAJOR 22) -SET(VERSION_MINOR 10) +SET(VERSION_MINOR 11) SET(VERSION_PATCH 1) -SET(VERSION_GITHASH 3030d4c7ff09ec44ab07d0a8069ea923227288a1) -SET(VERSION_DESCRIBE v22.10.1.1-testing) -SET(VERSION_STRING 22.10.1.1) +SET(VERSION_GITHASH 98ab5a3c189232ea2a3dddb9d2be7196ae8b3434) +SET(VERSION_DESCRIBE v22.11.1.1-testing) +SET(VERSION_STRING 22.11.1.1) # end of autochange diff --git a/src/Storages/System/StorageSystemContributors.generated.cpp b/src/Storages/System/StorageSystemContributors.generated.cpp index 25eae3b83b6..e1f4f7b82bf 100644 --- a/src/Storages/System/StorageSystemContributors.generated.cpp +++ b/src/Storages/System/StorageSystemContributors.generated.cpp @@ -12,6 +12,7 @@ const char * auto_contributors[] { "821008736@qq.com", "ANDREI STAROVEROV", "Aaron Katz", + "Adam Rutkowski", "Adri Fernandez", "Ahmed Dardery", "Aimiyoo", @@ -76,11 +77,15 @@ const char * auto_contributors[] { "Alexey Elymanov", "Alexey Gusev", "Alexey Ilyukhov", + "Alexey Ivanov", "Alexey Milovidov", "Alexey Tronov", "Alexey Vasiliev", "Alexey Zatelepin", "Alexsey Shestakov", + "AlfVII", + "Alfonso Martinez", + "Alfred Xu", "Ali Demirci", "Aliaksandr Pliutau", "Aliaksandr Shylau", @@ -196,6 +201,7 @@ const char * auto_contributors[] { "Brian Hunter", "Bulat Gaifullin", "Carbyn", + "Carlos Rodríguez Hernández", "Caspian", "Chao Ma", "Chao Wang", @@ -222,6 +228,7 @@ const char * auto_contributors[] { "DIAOZHAFENG", "Dale McDiarmid", "Dale Mcdiarmid", + "Dalitso Banda", "Dan Roscigno", "DanRoscigno", "Daniel Bershatsky", @@ -267,6 +274,7 @@ const char * auto_contributors[] { "Dmitry S..ky / skype: dvska-at-skype", "Dmitry Ukolov", "Doge", + "Dom Del Nano", "Dongdong Yang", "DoomzD", "Dr. Strange Looker", @@ -276,6 +284,7 @@ const char * auto_contributors[] { "Egor Savin", "Ekaterina", "Eldar Zaitov", + "Elena", "Elena Baskakova", "Elghazal Ahmed", "Elizaveta Mironyuk", @@ -342,6 +351,7 @@ const char * auto_contributors[] { "Grigory Pervakov", "GruffGemini", "Guillaume Tassery", + "Guo Wangyang", "Guo Wei (William)", "Haavard Kvaalen", "Habibullah Oladepo", @@ -349,6 +359,7 @@ const char * auto_contributors[] { "Hakob Saghatelyan", "Hamoon", "Han Fei", + "Han Shukai", "Harry Lee", "Harry-Lee", "HarryLeeIBM", @@ -404,6 +415,7 @@ const char * auto_contributors[] { "Jack Song", "JackyWoo", "Jacob Hayes", + "Jacob Herrington", "Jake Liu", "Jakub Kuklis", "James Maidment", @@ -419,6 +431,7 @@ const char * auto_contributors[] { "Jiading Guo", "Jiang Tao", "Jianmei Zhang", + "Jiebin Sun", "Jochen Schalanda", "John", "John Hummel", @@ -432,6 +445,7 @@ const char * auto_contributors[] { "Julian Gilyadov", "Julian Zhou", "Julio Jimenez", + "Jus", "Justin Hilliard", "Kang Liu", "Karl Pietrzak", @@ -652,6 +666,7 @@ const char * auto_contributors[] { "OuO", "PHO", "Pablo Alegre", + "Pablo Marcos", "Paramtamtam", "Patrick Zippenfenig", "Paul Loyd", @@ -681,6 +696,7 @@ const char * auto_contributors[] { "Prashant Shahi", "Pxl", "Pysaoke", + "Quanfa Fu", "Quid37", "Rafael Acevedo", "Rafael David Tinoco", @@ -693,6 +709,7 @@ const char * auto_contributors[] { "RedClusive", "RegulusZ", "Reilee", + "Reinaldy Rafli", "Reto Kromer", "Ri", "Rich Raposa", @@ -726,6 +743,7 @@ const char * auto_contributors[] { "Sachin", "Safronov Michail", "SaltTan", + "Salvatore Mesoraca", "Sami Kerola", "Samuel Chou", "San", @@ -927,6 +945,7 @@ const char * auto_contributors[] { "ZhiYong Wang", "Zhichang Yu", "Zhichun Wu", + "Zhiguo Zhou", "Zhipeng", "Zijie Lu", "Zoran Pandovski", @@ -950,6 +969,7 @@ const char * auto_contributors[] { "alexander goryanets", "alexander kozhikhov", "alexey-milovidov", + "alexeyerm", "alexeypavlenko", "alfredlu", "amesaru", @@ -1131,6 +1151,7 @@ const char * auto_contributors[] { "jennyma", "jetgm", "jewisliu", + "jferroal", "jiahui-97", "jianmei zhang", "jinjunzh", @@ -1236,6 +1257,7 @@ const char * auto_contributors[] { "mo-avatar", "morty", "moscas", + "mosinnik", "mreddy017", "msaf1980", "msirm", @@ -1321,6 +1343,7 @@ const char * auto_contributors[] { "simon-says", "snyk-bot", "songenjie", + "sperlingxx", "spff", "spongedc", "spume", @@ -1422,6 +1445,7 @@ const char * auto_contributors[] { "zhongyuankai", "zhoubintao", "zhukai", + "zimv", "zkun", "zlx19950903", "zombee0", From 42f5a3b2f886711459771526e012710d068c2fd0 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 26 Oct 2022 13:34:29 +0000 Subject: [PATCH 070/112] Update version_date.tsv and changelogs after v22.10.1.1877-stable --- docker/server/Dockerfile.alpine | 2 +- docker/server/Dockerfile.ubuntu | 2 +- .../{v22.10.1.1875-stable.md => v22.10.1.1877-stable.md} | 3 ++- utils/list-versions/version_date.tsv | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) rename docs/changelogs/{v22.10.1.1875-stable.md => v22.10.1.1877-stable.md} (99%) diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index 9b633b66188..cf4eb3fe645 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="22.10.1.1875" +ARG VERSION="22.10.1.1877" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # user/group precreated explicitly with fixed uid/gid on purpose. diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 35c78763a31..d26657a7979 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -21,7 +21,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list ARG REPO_CHANNEL="stable" ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" -ARG VERSION="22.10.1.1875" +ARG VERSION="22.10.1.1877" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # set non-empty deb_location_url url to create a docker image diff --git a/docs/changelogs/v22.10.1.1875-stable.md b/docs/changelogs/v22.10.1.1877-stable.md similarity index 99% rename from docs/changelogs/v22.10.1.1875-stable.md rename to docs/changelogs/v22.10.1.1877-stable.md index 49f93168a00..77e540ce928 100644 --- a/docs/changelogs/v22.10.1.1875-stable.md +++ b/docs/changelogs/v22.10.1.1877-stable.md @@ -5,7 +5,7 @@ sidebar_label: 2022 # 2022 Changelog -### ClickHouse release v22.10.1.1875-stable (011ae8675a2) FIXME as compared to v22.9.1.2603-stable (3030d4c7ff0) +### ClickHouse release v22.10.1.1877-stable (98ab5a3c189) FIXME as compared to v22.9.1.2603-stable (3030d4c7ff0) #### Backward Incompatible Change * Rename cache commands: `show caches` -> `show filesystem caches`, `describe cache` -> `describe filesystem cache`. [#41508](https://github.com/ClickHouse/ClickHouse/pull/41508) ([Kseniia Sumarokova](https://github.com/kssenii)). @@ -348,4 +348,5 @@ sidebar_label: 2022 * Fix typo in cmake code related to fuzzing [#42627](https://github.com/ClickHouse/ClickHouse/pull/42627) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). * Fix build [#42635](https://github.com/ClickHouse/ClickHouse/pull/42635) ([Anton Popov](https://github.com/CurtizJ)). * Add .rgignore for test data [#42639](https://github.com/ClickHouse/ClickHouse/pull/42639) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix flaky 02457_datediff_via_unix_epoch test [#42655](https://github.com/ClickHouse/ClickHouse/pull/42655) ([Roman Vasin](https://github.com/rvasin)). diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 7bbd8547506..e72fce63fda 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,4 +1,4 @@ -v22.10.1.1875-stable 2022-10-26 +v22.10.1.1877-stable 2022-10-26 v22.9.3.18-stable 2022-09-30 v22.9.2.7-stable 2022-09-23 v22.9.1.2603-stable 2022-09-22 From 9b4da8c89ae79f0f6e4da4c8c20d862ef52a48e9 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Wed, 26 Oct 2022 15:41:12 +0200 Subject: [PATCH 071/112] Update SECURITY.md --- SECURITY.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/SECURITY.md b/SECURITY.md index fb6caa92cb8..0fb333c8ea3 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -10,9 +10,11 @@ The following versions of ClickHouse server are currently being supported with s | Version | Supported | |:-|:-| +| 22.10 | ✔️ | +| 22.9 | ✔️ | | 22.8 | ✔️ | -| 22.7 | ✔️ | -| 22.6 | ✔️ | +| 22.7 | ❌ | +| 22.6 | ❌ | | 22.5 | ❌ | | 22.4 | ❌ | | 22.3 | ✔️ | From 1520bcd53c27c2c24a3e85c313d593cc1871a136 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 26 Oct 2022 15:50:10 +0000 Subject: [PATCH 072/112] Fix error code. --- src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp | 4 ++-- src/Storages/StorageMerge.cpp | 6 ------ 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp index 475407a402b..d8daec3b88e 100644 --- a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp @@ -607,7 +607,7 @@ Block MergeTreeBaseSelectProcessor::transformHeader( if (!row_level_column.type->canBeUsedInBooleanContext()) { throw Exception("Invalid type for filter in PREWHERE: " + row_level_column.type->getName(), - ErrorCodes::LOGICAL_ERROR); + ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER); } block.erase(prewhere_info->row_level_column_name); @@ -620,7 +620,7 @@ Block MergeTreeBaseSelectProcessor::transformHeader( if (!prewhere_column.type->canBeUsedInBooleanContext()) { throw Exception("Invalid type for filter in PREWHERE: " + prewhere_column.type->getName(), - ErrorCodes::LOGICAL_ERROR); + ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER); } if (prewhere_info->remove_prewhere_column) diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index c68e9103704..6d81b424f51 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -547,12 +547,6 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( } else { - { - /// Analyze query to check that types are valid (e.g. in PREWHERE). - InterpreterSelectQuery interpreter - (modified_query_info.query, modified_context, SelectQueryOptions(processed_stage).ignoreProjections()); - } - storage->read( plan, real_column_names, From 09fe9c3ed130cbf2f70bcf5018063d0593a17a6b Mon Sep 17 00:00:00 2001 From: Roman Vasin Date: Wed, 26 Oct 2022 16:07:56 +0000 Subject: [PATCH 073/112] Use {} in exceptions --- src/Functions/formatDateTime.cpp | 99 ++++++++++++++------------------ 1 file changed, 42 insertions(+), 57 deletions(-) diff --git a/src/Functions/formatDateTime.cpp b/src/Functions/formatDateTime.cpp index 4f28dae7a66..c5240abf7a1 100644 --- a/src/Functions/formatDateTime.cpp +++ b/src/Functions/formatDateTime.cpp @@ -317,44 +317,39 @@ public: if constexpr (support_integer) { if (arguments.size() != 1 && arguments.size() != 2 && arguments.size() != 3) - throw Exception( - "Number of arguments for function " + getName() + " doesn't match: passed " + toString(arguments.size()) - + ", should be 1, 2 or 3", - ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Number of arguments for function {} doesn't match: passed {}, should be 1, 2 or 3", + getName(), arguments.size()); if (arguments.size() == 1 && !isInteger(arguments[0].type)) - throw Exception( - "Illegal type " + arguments[0].type->getName() + " of 1 argument of function " + getName() - + " when arguments size is 1. Should be integer", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of 1 argument of function {} when arguments size is 1. Should be integer", + arguments[0].type->getName(), getName()); if (arguments.size() > 1 && !(isInteger(arguments[0].type) || isDate(arguments[0].type) || isDateTime(arguments[0].type) || isDate32(arguments[0].type) || isDateTime64(arguments[0].type))) - throw Exception( - "Illegal type " + arguments[0].type->getName() + " of 1 argument of function " + getName() - + " when arguments size is 2 or 3. Should be a integer or a date with time", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of 1 argument of function {} when arguments size is 2 or 3. Should be a integer or a date with time", + arguments[0].type->getName(), getName()); } else { if (arguments.size() != 2 && arguments.size() != 3) - throw Exception( - "Number of arguments for function " + getName() + " doesn't match: passed " + toString(arguments.size()) - + ", should be 2 or 3", - ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Number of arguments for function {} doesn't match: passed {}, should be 2 or 3", + getName(), arguments.size()); if (!isDate(arguments[0].type) && !isDateTime(arguments[0].type) && !isDate32(arguments[0].type) && !isDateTime64(arguments[0].type)) - throw Exception( - "Illegal type " + arguments[0].type->getName() + " of 1 argument of function " + getName() - + ". Should be a date or a date with time", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of 1 argument of function {}. Should be a date or a date with time", + arguments[0].type->getName(), getName()); } if (arguments.size() == 2 && !WhichDataType(arguments[1].type).isString()) - throw Exception( - "Illegal type " + arguments[1].type->getName() + " of 2 argument of function " + getName() + ". Must be String.", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of 2 argument of function {}. Must be String.", + arguments[1].type->getName(), getName()); if (arguments.size() == 3 && !WhichDataType(arguments[2].type).isString()) - throw Exception( - "Illegal type " + arguments[2].type->getName() + " of 3 argument of function " + getName() + ". Must be String.", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of 3 argument of function {}. Must be String.", + arguments[2].type->getName(), getName()); if (arguments.size() == 1) return std::make_shared(); @@ -375,10 +370,9 @@ public: return true; })) { - throw Exception( - "Illegal column " + arguments[0].column->getName() + " of function " + getName() - + ", must be Integer or DateTime when arguments size is 1.", - ErrorCodes::ILLEGAL_COLUMN); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, + "Illegal column {} of function {}, must be Integer or DateTime when arguments size is 1.", + arguments[0].column->getName(), getName()); } } else @@ -387,10 +381,9 @@ public: { using FromDataType = std::decay_t; if (!(res = executeType(arguments, result_type))) - throw Exception( - "Illegal column " + arguments[0].column->getName() + " of function " + getName() - + ", must be Integer or DateTime.", - ErrorCodes::ILLEGAL_COLUMN); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, + "Illegal column {} of function {}, must be Integer or DateTime.", + arguments[0].column->getName(), getName()); return true; })) { @@ -398,10 +391,9 @@ public: || (res = executeType(arguments, result_type)) || (res = executeType(arguments, result_type)) || (res = executeType(arguments, result_type)))) - throw Exception( - "Illegal column " + arguments[0].column->getName() + " of function " + getName() - + ", must be Integer or DateTime.", - ErrorCodes::ILLEGAL_COLUMN); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, + "Illegal column {} of function {}, must be Integer or DateTime.", + arguments[0].column->getName(), getName()); } } } @@ -411,10 +403,9 @@ public: || (res = executeType(arguments, result_type)) || (res = executeType(arguments, result_type)) || (res = executeType(arguments, result_type)))) - throw Exception( - "Illegal column " + arguments[0].column->getName() + " of function " + getName() - + ", must be Date or DateTime.", - ErrorCodes::ILLEGAL_COLUMN); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, + "Illegal column {} of function {}, must be Date or DateTime.", + arguments[0].column->getName(), getName()); } return res; @@ -429,10 +420,9 @@ public: const ColumnConst * pattern_column = checkAndGetColumnConst(arguments[1].column.get()); if (!pattern_column) - throw Exception("Illegal column " + arguments[1].column->getName() - + " of second ('format') argument of function " + getName() - + ". Must be constant string.", - ErrorCodes::ILLEGAL_COLUMN); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, + "Illegal column {} of second ('format') argument of function {}. Must be constant string.", + arguments[1].column->getName(), getName()); String pattern = pattern_column->getValue(); @@ -500,13 +490,6 @@ public: instruction.perform(pos, static_cast(c.whole), time_zone); } } - else if constexpr (std::is_same_v) - { - for (auto & instruction : instructions) - { - instruction.perform(pos, static_cast(vec[i]), time_zone); - } - } else { for (auto & instruction : instructions) @@ -723,12 +706,14 @@ public: // Unimplemented case 'U': [[fallthrough]]; case 'W': - throw Exception("Wrong pattern '" + pattern + "', symbol '" + *pos + " is not implemented ' for function " + getName(), - ErrorCodes::NOT_IMPLEMENTED); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "Wrong pattern '{}', symbol '{}' is not implemented for function {}", + pattern, *pos, getName()); default: - throw Exception( - "Wrong pattern '" + pattern + "', unexpected symbol '" + *pos + "' for function " + getName(), ErrorCodes::ILLEGAL_COLUMN); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, + "Wrong pattern '{}', unexpected symbol '{}' for function {}", + pattern, *pos, getName()); } ++pos; From 22011aeaec07b9b14f4a6bc6855860ab0be10eda Mon Sep 17 00:00:00 2001 From: DanRoscigno Date: Wed, 26 Oct 2022 12:39:17 -0400 Subject: [PATCH 074/112] remove frontmatter --- docs/en/operations/_backup.md | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/docs/en/operations/_backup.md b/docs/en/operations/_backup.md index c543c49a083..d694c51cee6 100644 --- a/docs/en/operations/_backup.md +++ b/docs/en/operations/_backup.md @@ -1,9 +1,5 @@ ---- -slug: /en/operations/backup -sidebar_position: 49 -sidebar_label: Data backup and restore -title: Data backup and restore ---- + +[//]: # (This file is included in Manage > Backups) - [Backup to a local disk](#backup-to-a-local-disk) - [Configuring backup/restore to use an S3 endpoint](#configuring-backuprestore-to-use-an-s3-endpoint) From 31a0044981e7da291487a48a8871dc8fed9e12e9 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 26 Oct 2022 17:14:38 +0000 Subject: [PATCH 075/112] Update version_date.tsv and changelogs after v22.9.4.32-stable --- docs/changelogs/v22.9.4.32-stable.md | 33 ++++++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 1 + 2 files changed, 34 insertions(+) create mode 100644 docs/changelogs/v22.9.4.32-stable.md diff --git a/docs/changelogs/v22.9.4.32-stable.md b/docs/changelogs/v22.9.4.32-stable.md new file mode 100644 index 00000000000..d6c3f4ba498 --- /dev/null +++ b/docs/changelogs/v22.9.4.32-stable.md @@ -0,0 +1,33 @@ +--- +sidebar_position: 1 +sidebar_label: 2022 +--- + +# 2022 Changelog + +### ClickHouse release v22.9.4.32-stable (3db8bcf1a70) FIXME as compared to v22.9.3.18-stable (0cb4b15d2fa) + +#### Bug Fix +* Backported in [#42435](https://github.com/ClickHouse/ClickHouse/issues/42435): - Choose correct aggregation method for LowCardinality with BigInt. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)). + +#### Build/Testing/Packaging Improvement +* Backported in [#42297](https://github.com/ClickHouse/ClickHouse/issues/42297): Update cctz to the latest master, update tzdb to 2020e. [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)). +* Backported in [#42361](https://github.com/ClickHouse/ClickHouse/issues/42361): Update tzdata to 2022e to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Bug Fix (user-visible misbehavior in official stable or prestable release) + +* Backported in [#42122](https://github.com/ClickHouse/ClickHouse/issues/42122): Fixed "Part ... intersects part ..." error that might happen in extremely rare cases if replica was restarted just after detaching some part as broken. [#41741](https://github.com/ClickHouse/ClickHouse/pull/41741) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Backported in [#41938](https://github.com/ClickHouse/ClickHouse/issues/41938): Don't allow to create or alter merge tree tables with virtual column name _row_exists, which is reserved for lightweight delete. Fixed [#41716](https://github.com/ClickHouse/ClickHouse/issues/41716). [#41763](https://github.com/ClickHouse/ClickHouse/pull/41763) ([Jianmei Zhang](https://github.com/zhangjmruc)). +* Backported in [#42179](https://github.com/ClickHouse/ClickHouse/issues/42179): Fix reusing of files > 4GB from base backup. [#42146](https://github.com/ClickHouse/ClickHouse/pull/42146) ([Azat Khuzhin](https://github.com/azat)). +* Backported in [#42301](https://github.com/ClickHouse/ClickHouse/issues/42301): Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#42388](https://github.com/ClickHouse/ClickHouse/issues/42388): `ALTER UPDATE` of attached part (with columns different from table schema) could create an invalid `columns.txt` metadata on disk. Reading from such part could fail with errors or return invalid data. Fixes [#42161](https://github.com/ClickHouse/ClickHouse/issues/42161). [#42319](https://github.com/ClickHouse/ClickHouse/pull/42319) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#42500](https://github.com/ClickHouse/ClickHouse/issues/42500): Setting `additional_table_filters` were not applied to `Distributed` storage. Fixes [#41692](https://github.com/ClickHouse/ClickHouse/issues/41692). [#42322](https://github.com/ClickHouse/ClickHouse/pull/42322) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#42581](https://github.com/ClickHouse/ClickHouse/issues/42581): This reverts [#40217](https://github.com/ClickHouse/ClickHouse/issues/40217) which introduced a regression in date/time functions. [#42367](https://github.com/ClickHouse/ClickHouse/pull/42367) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#42572](https://github.com/ClickHouse/ClickHouse/issues/42572): Fix buffer overflow in the processing of Decimal data types. This closes [#42451](https://github.com/ClickHouse/ClickHouse/issues/42451). [#42465](https://github.com/ClickHouse/ClickHouse/pull/42465) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#42595](https://github.com/ClickHouse/ClickHouse/issues/42595): This closes [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Add a warning message to release.py script, require release type [#41975](https://github.com/ClickHouse/ClickHouse/pull/41975) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787) [#42136](https://github.com/ClickHouse/ClickHouse/pull/42136) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index e72fce63fda..7807fa32cbc 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,4 +1,5 @@ v22.10.1.1877-stable 2022-10-26 +v22.9.4.32-stable 2022-10-26 v22.9.3.18-stable 2022-09-30 v22.9.2.7-stable 2022-09-23 v22.9.1.2603-stable 2022-09-22 From 18c0e7b2285061187aa3daf6fdc19b89539df57e Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 26 Oct 2022 17:19:01 +0000 Subject: [PATCH 076/112] Update version_date.tsv and changelogs after v22.8.7.34-lts --- docs/changelogs/v22.8.7.34-lts.md | 37 ++++++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 3 +++ 2 files changed, 40 insertions(+) create mode 100644 docs/changelogs/v22.8.7.34-lts.md diff --git a/docs/changelogs/v22.8.7.34-lts.md b/docs/changelogs/v22.8.7.34-lts.md new file mode 100644 index 00000000000..0dc899f4717 --- /dev/null +++ b/docs/changelogs/v22.8.7.34-lts.md @@ -0,0 +1,37 @@ +--- +sidebar_position: 1 +sidebar_label: 2022 +--- + +# 2022 Changelog + +### ClickHouse release v22.8.7.34-lts (3c38e5e8ab9) FIXME as compared to v22.8.6.71-lts (7bf38a43e30) + +#### Improvement +* Backported in [#42096](https://github.com/ClickHouse/ClickHouse/issues/42096): Replace back `clickhouse su` command with `sudo -u` in start in order to respect limits in `/etc/security/limits.conf`. [#41847](https://github.com/ClickHouse/ClickHouse/pull/41847) ([Eugene Konkov](https://github.com/ekonkov)). + +#### Bug Fix +* Backported in [#42434](https://github.com/ClickHouse/ClickHouse/issues/42434): - Choose correct aggregation method for LowCardinality with BigInt. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)). + +#### Build/Testing/Packaging Improvement +* Backported in [#42296](https://github.com/ClickHouse/ClickHouse/issues/42296): Update cctz to the latest master, update tzdb to 2020e. [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)). +* Backported in [#42360](https://github.com/ClickHouse/ClickHouse/issues/42360): Update tzdata to 2022e to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Bug Fix (user-visible misbehavior in official stable or prestable release) + +* Backported in [#42489](https://github.com/ClickHouse/ClickHouse/issues/42489): Removed skipping of mutations in unaffected partitions of `MergeTree` tables, because this feature never worked correctly and might cause resurrection of finished mutations. [#40589](https://github.com/ClickHouse/ClickHouse/pull/40589) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Backported in [#42121](https://github.com/ClickHouse/ClickHouse/issues/42121): Fixed "Part ... intersects part ..." error that might happen in extremely rare cases if replica was restarted just after detaching some part as broken. [#41741](https://github.com/ClickHouse/ClickHouse/pull/41741) ([Alexander Tokmakov](https://github.com/tavplubix)). +* - Prevent crash when passing wrong aggregation states to groupBitmap*. [#41972](https://github.com/ClickHouse/ClickHouse/pull/41972) ([Raúl Marín](https://github.com/Algunenano)). +* - Fix read bytes/rows in X-ClickHouse-Summary with materialized views. [#41973](https://github.com/ClickHouse/ClickHouse/pull/41973) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#42269](https://github.com/ClickHouse/ClickHouse/issues/42269): Fix reusing of files > 4GB from base backup. [#42146](https://github.com/ClickHouse/ClickHouse/pull/42146) ([Azat Khuzhin](https://github.com/azat)). +* Backported in [#42300](https://github.com/ClickHouse/ClickHouse/issues/42300): Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#42387](https://github.com/ClickHouse/ClickHouse/issues/42387): `ALTER UPDATE` of attached part (with columns different from table schema) could create an invalid `columns.txt` metadata on disk. Reading from such part could fail with errors or return invalid data. Fixes [#42161](https://github.com/ClickHouse/ClickHouse/issues/42161). [#42319](https://github.com/ClickHouse/ClickHouse/pull/42319) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#42499](https://github.com/ClickHouse/ClickHouse/issues/42499): Setting `additional_table_filters` were not applied to `Distributed` storage. Fixes [#41692](https://github.com/ClickHouse/ClickHouse/issues/41692). [#42322](https://github.com/ClickHouse/ClickHouse/pull/42322) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#42571](https://github.com/ClickHouse/ClickHouse/issues/42571): Fix buffer overflow in the processing of Decimal data types. This closes [#42451](https://github.com/ClickHouse/ClickHouse/issues/42451). [#42465](https://github.com/ClickHouse/ClickHouse/pull/42465) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#42594](https://github.com/ClickHouse/ClickHouse/issues/42594): This closes [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Add a warning message to release.py script, require release type [#41975](https://github.com/ClickHouse/ClickHouse/pull/41975) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787) [#42136](https://github.com/ClickHouse/ClickHouse/pull/42136) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index e72fce63fda..0470152ecff 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,13 +1,16 @@ v22.10.1.1877-stable 2022-10-26 +v22.9.4.32-stable 2022-10-26 v22.9.3.18-stable 2022-09-30 v22.9.2.7-stable 2022-09-23 v22.9.1.2603-stable 2022-09-22 +v22.8.7.34-lts 2022-10-26 v22.8.6.71-lts 2022-09-30 v22.8.5.29-lts 2022-09-13 v22.8.4.7-lts 2022-08-31 v22.8.3.13-lts 2022-08-29 v22.8.2.11-lts 2022-08-23 v22.8.1.2097-lts 2022-08-18 +v22.7.7.24-stable 2022-10-26 v22.7.6.74-stable 2022-09-30 v22.7.5.13-stable 2022-08-29 v22.7.4.16-stable 2022-08-23 From 4c8f0e7591f4248248984df84ecd545e90b20455 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 26 Oct 2022 17:28:41 +0000 Subject: [PATCH 077/112] Update version_date.tsv and changelogs after v22.7.7.24-stable --- docs/changelogs/v22.7.7.24-stable.md | 29 ++++++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 3 +++ 2 files changed, 32 insertions(+) create mode 100644 docs/changelogs/v22.7.7.24-stable.md diff --git a/docs/changelogs/v22.7.7.24-stable.md b/docs/changelogs/v22.7.7.24-stable.md new file mode 100644 index 00000000000..d7b83775502 --- /dev/null +++ b/docs/changelogs/v22.7.7.24-stable.md @@ -0,0 +1,29 @@ +--- +sidebar_position: 1 +sidebar_label: 2022 +--- + +# 2022 Changelog + +### ClickHouse release v22.7.7.24-stable (02ad1f979a8) FIXME as compared to v22.7.6.74-stable (c00ffb3c11a) + +#### Bug Fix +* Backported in [#42433](https://github.com/ClickHouse/ClickHouse/issues/42433): - Choose correct aggregation method for LowCardinality with BigInt. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)). + +#### Build/Testing/Packaging Improvement +* Backported in [#42329](https://github.com/ClickHouse/ClickHouse/issues/42329): Update cctz to the latest master, update tzdb to 2020e. [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)). +* Backported in [#42359](https://github.com/ClickHouse/ClickHouse/issues/42359): Update tzdata to 2022e to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Bug Fix (user-visible misbehavior in official stable or prestable release) + +* Backported in [#42268](https://github.com/ClickHouse/ClickHouse/issues/42268): Fix reusing of files > 4GB from base backup. [#42146](https://github.com/ClickHouse/ClickHouse/pull/42146) ([Azat Khuzhin](https://github.com/azat)). +* Backported in [#42299](https://github.com/ClickHouse/ClickHouse/issues/42299): Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#42386](https://github.com/ClickHouse/ClickHouse/issues/42386): `ALTER UPDATE` of attached part (with columns different from table schema) could create an invalid `columns.txt` metadata on disk. Reading from such part could fail with errors or return invalid data. Fixes [#42161](https://github.com/ClickHouse/ClickHouse/issues/42161). [#42319](https://github.com/ClickHouse/ClickHouse/pull/42319) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#42498](https://github.com/ClickHouse/ClickHouse/issues/42498): Setting `additional_table_filters` were not applied to `Distributed` storage. Fixes [#41692](https://github.com/ClickHouse/ClickHouse/issues/41692). [#42322](https://github.com/ClickHouse/ClickHouse/pull/42322) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#42593](https://github.com/ClickHouse/ClickHouse/issues/42593): This closes [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Add a warning message to release.py script, require release type [#41975](https://github.com/ClickHouse/ClickHouse/pull/41975) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787) [#42136](https://github.com/ClickHouse/ClickHouse/pull/42136) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index e72fce63fda..0470152ecff 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,13 +1,16 @@ v22.10.1.1877-stable 2022-10-26 +v22.9.4.32-stable 2022-10-26 v22.9.3.18-stable 2022-09-30 v22.9.2.7-stable 2022-09-23 v22.9.1.2603-stable 2022-09-22 +v22.8.7.34-lts 2022-10-26 v22.8.6.71-lts 2022-09-30 v22.8.5.29-lts 2022-09-13 v22.8.4.7-lts 2022-08-31 v22.8.3.13-lts 2022-08-29 v22.8.2.11-lts 2022-08-23 v22.8.1.2097-lts 2022-08-18 +v22.7.7.24-stable 2022-10-26 v22.7.6.74-stable 2022-09-30 v22.7.5.13-stable 2022-08-29 v22.7.4.16-stable 2022-08-23 From acc03cf52e7079110cc99f2c17854672697e4f80 Mon Sep 17 00:00:00 2001 From: Julio Jimenez Date: Wed, 26 Oct 2022 13:44:46 -0400 Subject: [PATCH 078/112] Sonar Cloud Workflow (#42534) * Sonar Cloud Workflow Signed-off-by: Julio Jimenez * build command Signed-off-by: Julio Jimenez * try builder Signed-off-by: Julio Jimenez * build Signed-off-by: Julio Jimenez * ok Signed-off-by: Julio Jimenez * ok Signed-off-by: Julio Jimenez * not sure why python was there Signed-off-by: Julio Jimenez * ccache Signed-off-by: Julio Jimenez * install python3 Signed-off-by: Julio Jimenez * env vars and simpler cmake Signed-off-by: Julio Jimenez * ninjaless Signed-off-by: Julio Jimenez * mandatory properties Signed-off-by: Julio Jimenez * typo Signed-off-by: Julio Jimenez Signed-off-by: Julio Jimenez --- .github/workflows/sonar.yml | 64 +++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 .github/workflows/sonar.yml diff --git a/.github/workflows/sonar.yml b/.github/workflows/sonar.yml new file mode 100644 index 00000000000..316f1e90d40 --- /dev/null +++ b/.github/workflows/sonar.yml @@ -0,0 +1,64 @@ +name: Sonar Cloud +on: + push: + branches: + - master + pull_request: + types: [opened, synchronize, reopened] +env: + CC: clang-15 + CXX: clang++-15 +jobs: + sonar_cloud: + name: Sonar Cloud + runs-on: [self-hosted, builder] + env: + SONAR_SCANNER_VERSION: 4.7.0.2747 + SONAR_SERVER_URL: "https://sonarcloud.io" + BUILD_WRAPPER_OUT_DIR: build_wrapper_output_directory # Directory where build-wrapper output will be placed + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis + submodules: true + - name: Set up JDK 11 + uses: actions/setup-java@v1 + with: + java-version: 11 + - name: Download and set up sonar-scanner + env: + SONAR_SCANNER_DOWNLOAD_URL: https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${{ env.SONAR_SCANNER_VERSION }}-linux.zip + run: | + mkdir -p $HOME/.sonar + curl -sSLo $HOME/.sonar/sonar-scanner.zip ${{ env.SONAR_SCANNER_DOWNLOAD_URL }} + unzip -o $HOME/.sonar/sonar-scanner.zip -d $HOME/.sonar/ + echo "$HOME/.sonar/sonar-scanner-${{ env.SONAR_SCANNER_VERSION }}-linux/bin" >> $GITHUB_PATH + - name: Download and set up build-wrapper + env: + BUILD_WRAPPER_DOWNLOAD_URL: ${{ env.SONAR_SERVER_URL }}/static/cpp/build-wrapper-linux-x86.zip + run: | + curl -sSLo $HOME/.sonar/build-wrapper-linux-x86.zip ${{ env.BUILD_WRAPPER_DOWNLOAD_URL }} + unzip -o $HOME/.sonar/build-wrapper-linux-x86.zip -d $HOME/.sonar/ + echo "$HOME/.sonar/build-wrapper-linux-x86" >> $GITHUB_PATH + - name: Set Up Build Tools + run: | + sudo apt-get update + sudo apt-get install -yq git cmake ccache python3 ninja-build + sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" + - name: Run build-wrapper + run: | + mkdir build + cd build + cmake .. + cd .. + build-wrapper-linux-x86-64 --out-dir ${{ env.BUILD_WRAPPER_OUT_DIR }} cmake --build build/ + - name: Run sonar-scanner + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} + run: | + sonar-scanner \ + --define sonar.host.url="${{ env.SONAR_SERVER_URL }}" \ + --define sonar.cfamily.build-wrapper-output="${{ env.BUILD_WRAPPER_OUT_DIR }}" \ + --define sonar.projectKey="clickhouse-java" \ + --define sonar.organization="ClickHouse" From 313cbb1058a13abb03cc0c839c7ace6b6e8ba68a Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 26 Oct 2022 17:49:36 +0000 Subject: [PATCH 079/112] Fix test. --- src/Storages/StorageMerge.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 6d81b424f51..9891340a0d0 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -520,8 +520,6 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( modified_select.setFinal(); } - modified_select.replaceDatabaseAndTable(database_name, table_name); - auto storage_stage = storage->getQueryProcessingStage(modified_context, QueryProcessingStage::Complete, storage_snapshot, modified_query_info); if (processed_stage <= storage_stage) @@ -571,6 +569,8 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( } else if (processed_stage > storage_stage) { + modified_select.replaceDatabaseAndTable(database_name, table_name); + /// Maximum permissible parallelism is streams_num modified_context->setSetting("max_threads", streams_num); modified_context->setSetting("max_streams_to_max_threads_ratio", 1); From c8444f751f968ebd3b048d6edcc9d90c1c6ac02d Mon Sep 17 00:00:00 2001 From: Julio Jimenez Date: Wed, 26 Oct 2022 14:07:10 -0400 Subject: [PATCH 080/112] Move SonarCloud Job to nightly Signed-off-by: Julio Jimenez --- .github/workflows/nightly.yml | 53 +++++++++++++++++++++++++++++ .github/workflows/sonar.yml | 64 ----------------------------------- 2 files changed, 53 insertions(+), 64 deletions(-) delete mode 100644 .github/workflows/sonar.yml diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 801f7eda94a..834f74822a1 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -122,3 +122,56 @@ jobs: docker ps --quiet | xargs --no-run-if-empty docker kill ||: docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" + SonarCloud: + name: Sonar Cloud + runs-on: [self-hosted, builder] + env: + SONAR_SCANNER_VERSION: 4.7.0.2747 + SONAR_SERVER_URL: "https://sonarcloud.io" + BUILD_WRAPPER_OUT_DIR: build_wrapper_output_directory # Directory where build-wrapper output will be placed + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis + submodules: true + - name: Set up JDK 11 + uses: actions/setup-java@v1 + with: + java-version: 11 + - name: Download and set up sonar-scanner + env: + SONAR_SCANNER_DOWNLOAD_URL: https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${{ env.SONAR_SCANNER_VERSION }}-linux.zip + run: | + mkdir -p $HOME/.sonar + curl -sSLo $HOME/.sonar/sonar-scanner.zip ${{ env.SONAR_SCANNER_DOWNLOAD_URL }} + unzip -o $HOME/.sonar/sonar-scanner.zip -d $HOME/.sonar/ + echo "$HOME/.sonar/sonar-scanner-${{ env.SONAR_SCANNER_VERSION }}-linux/bin" >> $GITHUB_PATH + - name: Download and set up build-wrapper + env: + BUILD_WRAPPER_DOWNLOAD_URL: ${{ env.SONAR_SERVER_URL }}/static/cpp/build-wrapper-linux-x86.zip + run: | + curl -sSLo $HOME/.sonar/build-wrapper-linux-x86.zip ${{ env.BUILD_WRAPPER_DOWNLOAD_URL }} + unzip -o $HOME/.sonar/build-wrapper-linux-x86.zip -d $HOME/.sonar/ + echo "$HOME/.sonar/build-wrapper-linux-x86" >> $GITHUB_PATH + - name: Set Up Build Tools + run: | + sudo apt-get update + sudo apt-get install -yq git cmake ccache python3 ninja-build + sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" + - name: Run build-wrapper + run: | + mkdir build + cd build + cmake .. + cd .. + build-wrapper-linux-x86-64 --out-dir ${{ env.BUILD_WRAPPER_OUT_DIR }} cmake --build build/ + - name: Run sonar-scanner + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} + run: | + sonar-scanner \ + --define sonar.host.url="${{ env.SONAR_SERVER_URL }}" \ + --define sonar.cfamily.build-wrapper-output="${{ env.BUILD_WRAPPER_OUT_DIR }}" \ + --define sonar.projectKey="ClickHouse_ClickHouse" \ + --define sonar.organization="clickhouse-java" \ No newline at end of file diff --git a/.github/workflows/sonar.yml b/.github/workflows/sonar.yml deleted file mode 100644 index 316f1e90d40..00000000000 --- a/.github/workflows/sonar.yml +++ /dev/null @@ -1,64 +0,0 @@ -name: Sonar Cloud -on: - push: - branches: - - master - pull_request: - types: [opened, synchronize, reopened] -env: - CC: clang-15 - CXX: clang++-15 -jobs: - sonar_cloud: - name: Sonar Cloud - runs-on: [self-hosted, builder] - env: - SONAR_SCANNER_VERSION: 4.7.0.2747 - SONAR_SERVER_URL: "https://sonarcloud.io" - BUILD_WRAPPER_OUT_DIR: build_wrapper_output_directory # Directory where build-wrapper output will be placed - steps: - - uses: actions/checkout@v2 - with: - fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis - submodules: true - - name: Set up JDK 11 - uses: actions/setup-java@v1 - with: - java-version: 11 - - name: Download and set up sonar-scanner - env: - SONAR_SCANNER_DOWNLOAD_URL: https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${{ env.SONAR_SCANNER_VERSION }}-linux.zip - run: | - mkdir -p $HOME/.sonar - curl -sSLo $HOME/.sonar/sonar-scanner.zip ${{ env.SONAR_SCANNER_DOWNLOAD_URL }} - unzip -o $HOME/.sonar/sonar-scanner.zip -d $HOME/.sonar/ - echo "$HOME/.sonar/sonar-scanner-${{ env.SONAR_SCANNER_VERSION }}-linux/bin" >> $GITHUB_PATH - - name: Download and set up build-wrapper - env: - BUILD_WRAPPER_DOWNLOAD_URL: ${{ env.SONAR_SERVER_URL }}/static/cpp/build-wrapper-linux-x86.zip - run: | - curl -sSLo $HOME/.sonar/build-wrapper-linux-x86.zip ${{ env.BUILD_WRAPPER_DOWNLOAD_URL }} - unzip -o $HOME/.sonar/build-wrapper-linux-x86.zip -d $HOME/.sonar/ - echo "$HOME/.sonar/build-wrapper-linux-x86" >> $GITHUB_PATH - - name: Set Up Build Tools - run: | - sudo apt-get update - sudo apt-get install -yq git cmake ccache python3 ninja-build - sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" - - name: Run build-wrapper - run: | - mkdir build - cd build - cmake .. - cd .. - build-wrapper-linux-x86-64 --out-dir ${{ env.BUILD_WRAPPER_OUT_DIR }} cmake --build build/ - - name: Run sonar-scanner - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} - run: | - sonar-scanner \ - --define sonar.host.url="${{ env.SONAR_SERVER_URL }}" \ - --define sonar.cfamily.build-wrapper-output="${{ env.BUILD_WRAPPER_OUT_DIR }}" \ - --define sonar.projectKey="clickhouse-java" \ - --define sonar.organization="ClickHouse" From 488c2200466571ebe97dec9ac98925d6eb4a8f48 Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky Date: Thu, 27 Oct 2022 01:45:38 +0000 Subject: [PATCH 081/112] Fix bug in ParserFunction --- src/Parsers/ExpressionListParsers.cpp | 10 ++++++++++ .../02474_fix_function_parser_bug.reference | 0 .../0_stateless/02474_fix_function_parser_bug.sql | 1 + 3 files changed, 11 insertions(+) create mode 100644 tests/queries/0_stateless/02474_fix_function_parser_bug.reference create mode 100644 tests/queries/0_stateless/02474_fix_function_parser_bug.sql diff --git a/src/Parsers/ExpressionListParsers.cpp b/src/Parsers/ExpressionListParsers.cpp index 4af4dabb12e..2f39162e104 100644 --- a/src/Parsers/ExpressionListParsers.cpp +++ b/src/Parsers/ExpressionListParsers.cpp @@ -830,6 +830,16 @@ public: explicit FunctionLayer(String function_name_, bool allow_function_parameters_ = true) : function_name(function_name_), allow_function_parameters(allow_function_parameters_){} + bool getResult(ASTPtr & node) override + { + // FunctionLayer can be the only layer in our Layers stack, + // so we need to check that we exited the main cycle properly + if (!finished) + return false; + + return Layer::getResult(node); + } + bool parse(IParser::Pos & pos, Expected & expected, Action & action) override { /// | 0 | 1 | 2 | diff --git a/tests/queries/0_stateless/02474_fix_function_parser_bug.reference b/tests/queries/0_stateless/02474_fix_function_parser_bug.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02474_fix_function_parser_bug.sql b/tests/queries/0_stateless/02474_fix_function_parser_bug.sql new file mode 100644 index 00000000000..12e9e03f151 --- /dev/null +++ b/tests/queries/0_stateless/02474_fix_function_parser_bug.sql @@ -0,0 +1 @@ +CREATE DATABASE conv_mian ENGINE QALL(COLUMNS('|T.D'),¸mp} -- { clientError 62 } From f31061c68f0c27e4ce1bbec3ed3dcb0822423417 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 27 Oct 2022 09:04:46 +0300 Subject: [PATCH 082/112] Revert "Sonar Cloud Workflow (#42534)" This reverts commit acc03cf52e7079110cc99f2c17854672697e4f80. --- .github/workflows/sonar.yml | 64 ------------------------------------- 1 file changed, 64 deletions(-) delete mode 100644 .github/workflows/sonar.yml diff --git a/.github/workflows/sonar.yml b/.github/workflows/sonar.yml deleted file mode 100644 index 316f1e90d40..00000000000 --- a/.github/workflows/sonar.yml +++ /dev/null @@ -1,64 +0,0 @@ -name: Sonar Cloud -on: - push: - branches: - - master - pull_request: - types: [opened, synchronize, reopened] -env: - CC: clang-15 - CXX: clang++-15 -jobs: - sonar_cloud: - name: Sonar Cloud - runs-on: [self-hosted, builder] - env: - SONAR_SCANNER_VERSION: 4.7.0.2747 - SONAR_SERVER_URL: "https://sonarcloud.io" - BUILD_WRAPPER_OUT_DIR: build_wrapper_output_directory # Directory where build-wrapper output will be placed - steps: - - uses: actions/checkout@v2 - with: - fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis - submodules: true - - name: Set up JDK 11 - uses: actions/setup-java@v1 - with: - java-version: 11 - - name: Download and set up sonar-scanner - env: - SONAR_SCANNER_DOWNLOAD_URL: https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${{ env.SONAR_SCANNER_VERSION }}-linux.zip - run: | - mkdir -p $HOME/.sonar - curl -sSLo $HOME/.sonar/sonar-scanner.zip ${{ env.SONAR_SCANNER_DOWNLOAD_URL }} - unzip -o $HOME/.sonar/sonar-scanner.zip -d $HOME/.sonar/ - echo "$HOME/.sonar/sonar-scanner-${{ env.SONAR_SCANNER_VERSION }}-linux/bin" >> $GITHUB_PATH - - name: Download and set up build-wrapper - env: - BUILD_WRAPPER_DOWNLOAD_URL: ${{ env.SONAR_SERVER_URL }}/static/cpp/build-wrapper-linux-x86.zip - run: | - curl -sSLo $HOME/.sonar/build-wrapper-linux-x86.zip ${{ env.BUILD_WRAPPER_DOWNLOAD_URL }} - unzip -o $HOME/.sonar/build-wrapper-linux-x86.zip -d $HOME/.sonar/ - echo "$HOME/.sonar/build-wrapper-linux-x86" >> $GITHUB_PATH - - name: Set Up Build Tools - run: | - sudo apt-get update - sudo apt-get install -yq git cmake ccache python3 ninja-build - sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" - - name: Run build-wrapper - run: | - mkdir build - cd build - cmake .. - cd .. - build-wrapper-linux-x86-64 --out-dir ${{ env.BUILD_WRAPPER_OUT_DIR }} cmake --build build/ - - name: Run sonar-scanner - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} - run: | - sonar-scanner \ - --define sonar.host.url="${{ env.SONAR_SERVER_URL }}" \ - --define sonar.cfamily.build-wrapper-output="${{ env.BUILD_WRAPPER_OUT_DIR }}" \ - --define sonar.projectKey="clickhouse-java" \ - --define sonar.organization="ClickHouse" From b74cccfa0adf3b4aaf79270fcf5bc72ec17bd3f6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 27 Oct 2022 10:19:50 +0300 Subject: [PATCH 083/112] Update CHANGELOG.md --- CHANGELOG.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3948a24d52a..68767612892 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -88,7 +88,6 @@ * Allow to use `Date32` arguments for `dateName` function. [#42554](https://github.com/ClickHouse/ClickHouse/pull/42554) ([Roman Vasin](https://github.com/rvasin)). * Now filters with NULL literals will be used during index analysis. [#34063](https://github.com/ClickHouse/ClickHouse/issues/34063). [#41842](https://github.com/ClickHouse/ClickHouse/pull/41842) ([Amos Bird](https://github.com/amosbird)). * Merge parts if every part in the range is older than a certain threshold. The threshold can be set by using `min_age_to_force_merge_seconds`. This closes [#35836](https://github.com/ClickHouse/ClickHouse/issues/35836). [#42423](https://github.com/ClickHouse/ClickHouse/pull/42423) ([Antonio Andelic](https://github.com/antonio2368)). This is continuation of [#39550i](https://github.com/ClickHouse/ClickHouse/pull/39550) by [@fastio](https://github.com/fastio) who implemented most of the logic. -* Added new infrastructure for query analysis and planning under `allow_experimental_analyzer` setting. [#31796](https://github.com/ClickHouse/ClickHouse/pull/31796) ([Maksim Kita](https://github.com/kitaisreal)). * Improve the time to recover lost keeper connections. [#42541](https://github.com/ClickHouse/ClickHouse/pull/42541) ([Raúl Marín](https://github.com/Algunenano)). #### Build/Testing/Packaging Improvement @@ -143,7 +142,6 @@ * Fix bad_cast assert during INSERT into `Annoy` indexes over non-Float32 columns. `Annoy` indices is an experimental feature. [#42485](https://github.com/ClickHouse/ClickHouse/pull/42485) ([Robert Schulze](https://github.com/rschu1ze)). * Arithmetic operator with Date or DateTime and 128 or 256-bit integer was referencing uninitialized memory. [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Fix unexpected table loading error when partition key contains alias function names during server upgrade. [#36379](https://github.com/ClickHouse/ClickHouse/pull/36379) ([Amos Bird](https://github.com/amosbird)). -* Fixes a crash in `JSONExtract` with `LowCardinality`. [#42633](https://github.com/ClickHouse/ClickHouse/pull/42633) ([Anton Popov](https://github.com/CurtizJ)). ### ClickHouse release 22.9, 2022-09-22 From 2b08fe69634c080f01af7109dcab4ae57afd4c13 Mon Sep 17 00:00:00 2001 From: flynn Date: Thu, 27 Oct 2022 08:56:18 +0000 Subject: [PATCH 084/112] Fix truncate table does not hold lock correctly --- src/Interpreters/InterpreterDropQuery.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp index 8be3dce7bf1..28f8e43ee9b 100644 --- a/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -213,7 +213,7 @@ BlockIO InterpreterDropQuery::executeToTableImpl(ContextPtr context_, ASTDropQue { /// And for simple MergeTree we can stop merges before acquiring the lock auto merges_blocker = table->getActionLock(ActionLocks::PartsMerge); - auto table_lock = table->lockExclusively(context_->getCurrentQueryId(), context_->getSettingsRef().lock_acquire_timeout); + table_lock = table->lockExclusively(context_->getCurrentQueryId(), context_->getSettingsRef().lock_acquire_timeout); } auto metadata_snapshot = table->getInMemoryMetadataPtr(); From 5fcbc26800d44fb76051265efc007a1640c42f41 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Wed, 26 Oct 2022 16:21:25 +0200 Subject: [PATCH 085/112] OrderByLimitByDuplicateEliminationPass improve performance --- ...OrderByLimitByDuplicateEliminationPass.cpp | 27 +++++++++++++------ 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/src/Analyzer/Passes/OrderByLimitByDuplicateEliminationPass.cpp b/src/Analyzer/Passes/OrderByLimitByDuplicateEliminationPass.cpp index 0232d8958ff..e4d6633b6e6 100644 --- a/src/Analyzer/Passes/OrderByLimitByDuplicateEliminationPass.cpp +++ b/src/Analyzer/Passes/OrderByLimitByDuplicateEliminationPass.cpp @@ -10,23 +10,34 @@ namespace DB namespace { -struct QueryTreeNodeHash +struct QueryTreeNodeWithHash { - size_t operator()(const IQueryTreeNode * node) const + explicit QueryTreeNodeWithHash(const IQueryTreeNode * node_) + : node(node_) + , hash(node->getTreeHash().first) + {} + + const IQueryTreeNode * node = nullptr; + size_t hash = 0; +}; + +struct QueryTreeNodeWithHashHash +{ + size_t operator()(const QueryTreeNodeWithHash & node_with_hash) const { - return node->getTreeHash().first; + return node_with_hash.hash; } }; -struct QueryTreeNodeEqualTo +struct QueryTreeNodeWithHashEqualTo { - size_t operator()(const IQueryTreeNode * lhs_node, const IQueryTreeNode * rhs_node) const + bool operator()(const QueryTreeNodeWithHash & lhs_node, const QueryTreeNodeWithHash & rhs_node) const { - return lhs_node->isEqual(*rhs_node); + return lhs_node.hash == rhs_node.hash && lhs_node.node->isEqual(*rhs_node.node); } }; -using QueryTreeNodeSet = std::unordered_set; +using QueryTreeNodeWithHashSet = std::unordered_set; class OrderByLimitByDuplicateEliminationVisitor : public InDepthQueryTreeVisitor { @@ -82,7 +93,7 @@ public: } private: - QueryTreeNodeSet unique_expressions_nodes_set; + QueryTreeNodeWithHashSet unique_expressions_nodes_set; }; } From b2ab692d87ec8a8fac20b7d8cc92653504bb6aaf Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky Date: Thu, 27 Oct 2022 10:03:03 +0000 Subject: [PATCH 086/112] Safer getResult() --- src/Parsers/ExpressionListParsers.cpp | 252 +++++++++++++------------- 1 file changed, 128 insertions(+), 124 deletions(-) diff --git a/src/Parsers/ExpressionListParsers.cpp b/src/Parsers/ExpressionListParsers.cpp index 2f39162e104..c362340d013 100644 --- a/src/Parsers/ExpressionListParsers.cpp +++ b/src/Parsers/ExpressionListParsers.cpp @@ -561,13 +561,10 @@ public: virtual bool getResult(ASTPtr & node) { - if (elements.size() == 1) - { - node = std::move(elements[0]); - return true; - } + if (!finished) + return false; - return false; + return getResultImpl(node); } virtual bool parse(IParser::Pos & /*pos*/, Expected & /*expected*/, Action & /*action*/) = 0; @@ -746,6 +743,17 @@ public: Checkpoint current_checkpoint = Checkpoint::None; protected: + virtual bool getResultImpl(ASTPtr & node) + { + if (elements.size() == 1) + { + node = std::move(elements[0]); + return true; + } + + return false; + } + std::vector operators; ASTs operands; ASTs elements; @@ -766,17 +774,12 @@ public: bool getResult(ASTPtr & node) override { /// We can exit the main cycle outside the parse() function, - /// so we need to merge the element here + /// so we need to merge the element here. + /// Because of this 'finished' flag can also not be set. if (!mergeElement()) return false; - if (elements.size() == 1) - { - node = std::move(elements[0]); - return true; - } - - return false; + return Layer::getResultImpl(node); } bool parse(IParser::Pos & pos, Expected & /*expected*/, Action & /*action*/) override @@ -830,16 +833,6 @@ public: explicit FunctionLayer(String function_name_, bool allow_function_parameters_ = true) : function_name(function_name_), allow_function_parameters(allow_function_parameters_){} - bool getResult(ASTPtr & node) override - { - // FunctionLayer can be the only layer in our Layers stack, - // so we need to check that we exited the main cycle properly - if (!finished) - return false; - - return Layer::getResult(node); - } - bool parse(IParser::Pos & pos, Expected & expected, Action & action) override { /// | 0 | 1 | 2 | @@ -1039,17 +1032,6 @@ private: class RoundBracketsLayer : public Layer { public: - bool getResult(ASTPtr & node) override - { - // Round brackets can mean priority operator as well as function tuple() - if (!is_tuple && elements.size() == 1) - node = std::move(elements[0]); - else - node = makeASTFunction("tuple", std::move(elements)); - - return true; - } - bool parse(IParser::Pos & pos, Expected & expected, Action & action) override { if (ParserToken(TokenType::Comma).ignore(pos, expected)) @@ -1079,6 +1061,19 @@ public: return true; } + +protected: + bool getResultImpl(ASTPtr & node) override + { + // Round brackets can mean priority operator as well as function tuple() + if (!is_tuple && elements.size() == 1) + node = std::move(elements[0]); + else + node = makeASTFunction("tuple", std::move(elements)); + + return true; + } + private: bool is_tuple = false; }; @@ -1087,16 +1082,17 @@ private: class ArrayLayer : public LayerWithSeparator { public: - bool getResult(ASTPtr & node) override - { - node = makeASTFunction("array", std::move(elements)); - return true; - } - bool parse(IParser::Pos & pos, Expected & expected, Action & action) override { return LayerWithSeparator::parse(pos, expected, action); } + +protected: + bool getResultImpl(ASTPtr & node) override + { + node = makeASTFunction("array", std::move(elements)); + return true; + } }; /// Layer for arrayElement square brackets operator @@ -1216,23 +1212,6 @@ class ExtractLayer : public LayerWithSeparator(interval_kind.toDateDiffUnit()), elements[0], elements[1]); - else if (elements.size() == 3) - node = makeASTFunction("dateDiff", std::make_shared(interval_kind.toDateDiffUnit()), elements[0], elements[1], elements[2]); - else - return false; - } - else - { - node = makeASTFunction("dateDiff", std::move(elements)); - } - return true; - } - bool parse(IParser::Pos & pos, Expected & expected, Action & action) override { /// 0. Try to parse interval_kind (-> 1) @@ -1709,6 +1693,25 @@ public: return true; } +protected: + bool getResultImpl(ASTPtr & node) override + { + if (parsed_interval_kind) + { + if (elements.size() == 2) + node = makeASTFunction("dateDiff", std::make_shared(interval_kind.toDateDiffUnit()), elements[0], elements[1]); + else if (elements.size() == 3) + node = makeASTFunction("dateDiff", std::make_shared(interval_kind.toDateDiffUnit()), elements[0], elements[1], elements[2]); + else + return false; + } + else + { + node = makeASTFunction("dateDiff", std::move(elements)); + } + return true; + } + private: IntervalKind interval_kind; bool parsed_interval_kind = false; @@ -1892,16 +1895,6 @@ class ViewLayer : public Layer public: explicit ViewLayer(bool if_permitted_) : if_permitted(if_permitted_) {} - bool getResult(ASTPtr & node) override - { - if (if_permitted) - node = makeASTFunction("viewIfPermitted", std::move(elements)); - else - node = makeASTFunction("view", std::move(elements)); - - return true; - } - bool parse(IParser::Pos & pos, Expected & expected, Action & /*action*/) override { /// view(SELECT ...) @@ -1958,6 +1951,17 @@ public: return true; } +protected: + bool getResultImpl(ASTPtr & node) override + { + if (if_permitted) + node = makeASTFunction("viewIfPermitted", std::move(elements)); + else + node = makeASTFunction("view", std::move(elements)); + + return true; + } + private: bool if_permitted; }; From a36119247faaf4a394fb56625184ceebe3623c8a Mon Sep 17 00:00:00 2001 From: Radistka-75 <76100894+Radistka-75@users.noreply.github.com> Date: Thu, 27 Oct 2022 13:06:28 +0300 Subject: [PATCH 087/112] Update in.md Change md-formatting for note --- docs/ru/sql-reference/operators/in.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/ru/sql-reference/operators/in.md b/docs/ru/sql-reference/operators/in.md index 2b3d87a877f..fa679b890a7 100644 --- a/docs/ru/sql-reference/operators/in.md +++ b/docs/ru/sql-reference/operators/in.md @@ -122,9 +122,9 @@ FROM t_null Существует два варианта IN-ов с подзапросами (аналогично для JOIN-ов): обычный `IN` / `JOIN` и `GLOBAL IN` / `GLOBAL JOIN`. Они отличаются способом выполнения при распределённой обработке запроса. - :::note "Attention" - Помните, что алгоритмы, описанные ниже, могут работать иначе в зависимости от [настройки](../../operations/settings/settings.md) `distributed_product_mode`. - ::: +:::note "Attention" +Помните, что алгоритмы, описанные ниже, могут работать иначе в зависимости от [настройки](../../operations/settings/settings.md) `distributed_product_mode`. +::: При использовании обычного IN-а, запрос отправляется на удалённые серверы, и на каждом из них выполняются подзапросы в секциях `IN` / `JOIN`. При использовании `GLOBAL IN` / `GLOBAL JOIN-а`, сначала выполняются все подзапросы для `GLOBAL IN` / `GLOBAL JOIN-ов`, и результаты складываются во временные таблицы. Затем эти временные таблицы передаются на каждый удалённый сервер, и на них выполняются запросы, с использованием этих переданных временных данных. From d489c46f02b709e01bda01ffe7b3daa26cc63305 Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky Date: Thu, 27 Oct 2022 10:23:41 +0000 Subject: [PATCH 088/112] Better test --- tests/queries/0_stateless/02474_fix_function_parser_bug.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02474_fix_function_parser_bug.sql b/tests/queries/0_stateless/02474_fix_function_parser_bug.sql index 12e9e03f151..67d97aa1c25 100644 --- a/tests/queries/0_stateless/02474_fix_function_parser_bug.sql +++ b/tests/queries/0_stateless/02474_fix_function_parser_bug.sql @@ -1 +1 @@ -CREATE DATABASE conv_mian ENGINE QALL(COLUMNS('|T.D'),¸mp} -- { clientError 62 } +CREATE DATABASE conv_mian ENGINE QALL(COLUMNS('|T.D'),¸mp} -- { clientError SYNTAX_ERROR } From 0245df6a82f4fff9f45f6a62dfe8a5c9307b0819 Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky Date: Thu, 27 Oct 2022 10:41:45 +0000 Subject: [PATCH 089/112] Fix docs --- docs/ru/engines/table-engines/integrations/kafka.md | 7 ++++--- .../mergetree-family/aggregatingmergetree.md | 7 ++++--- .../mergetree-family/collapsingmergetree.md | 9 +++++---- .../table-engines/mergetree-family/graphitemergetree.md | 7 ++++--- .../engines/table-engines/mergetree-family/mergetree.md | 7 ++++--- .../table-engines/mergetree-family/summingmergetree.md | 7 ++++--- docs/ru/sql-reference/functions/date-time-functions.md | 4 ++-- 7 files changed, 27 insertions(+), 21 deletions(-) diff --git a/docs/ru/engines/table-engines/integrations/kafka.md b/docs/ru/engines/table-engines/integrations/kafka.md index 37fc902e777..a5f091e1b23 100644 --- a/docs/ru/engines/table-engines/integrations/kafka.md +++ b/docs/ru/engines/table-engines/integrations/kafka.md @@ -87,14 +87,15 @@ SETTINGS Устаревший способ создания таблицы - :::note "Attention" - Не используйте этот метод в новых проектах. По возможности переключите старые проекты на метод, описанный выше. +:::note "Attention" +Не используйте этот метод в новых проектах. По возможности переключите старые проекты на метод, описанный выше. +::: ``` sql Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format [, kafka_row_delimiter, kafka_schema, kafka_num_consumers, kafka_skip_broken_messages]) ``` - ::: + ## Описание {#opisanie} diff --git a/docs/ru/engines/table-engines/mergetree-family/aggregatingmergetree.md b/docs/ru/engines/table-engines/mergetree-family/aggregatingmergetree.md index aa16113192e..86a275767a0 100644 --- a/docs/ru/engines/table-engines/mergetree-family/aggregatingmergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/aggregatingmergetree.md @@ -39,9 +39,10 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Устаревший способ создания таблицы - :::note "Attention" - Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше. - ::: +:::note "Attention" +Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше. +::: + ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ( diff --git a/docs/ru/engines/table-engines/mergetree-family/collapsingmergetree.md b/docs/ru/engines/table-engines/mergetree-family/collapsingmergetree.md index ecaaa6b8417..72b4725c6ed 100644 --- a/docs/ru/engines/table-engines/mergetree-family/collapsingmergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/collapsingmergetree.md @@ -43,9 +43,10 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Устаревший способ создания таблицы - :::note "Attention" - Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше. - ::: +:::note "Attention" +Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше. +::: + ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ( @@ -59,7 +60,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] - `sign` — Имя столбца с типом строки: `1` — строка состояния, `-1` — строка отмены состояния. - Тип данных столбца — `Int8`. + Тип данных столбца — `Int8`. diff --git a/docs/ru/engines/table-engines/mergetree-family/graphitemergetree.md b/docs/ru/engines/table-engines/mergetree-family/graphitemergetree.md index 818f85f7e37..324a3fd1633 100644 --- a/docs/ru/engines/table-engines/mergetree-family/graphitemergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/graphitemergetree.md @@ -55,9 +55,10 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Устаревший способ создания таблицы - :::note "Attention" - Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше. - ::: +:::note "Attention" +Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше. +::: + ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ( diff --git a/docs/ru/engines/table-engines/mergetree-family/mergetree.md b/docs/ru/engines/table-engines/mergetree-family/mergetree.md index e01e0006b87..f024d5f1985 100644 --- a/docs/ru/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/mergetree.md @@ -115,9 +115,10 @@ ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDa Устаревший способ создания таблицы - :::note "Attention" - Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ, описанный выше. - ::: +:::note "Attention" +Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ, описанный выше. +::: + ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ( diff --git a/docs/ru/engines/table-engines/mergetree-family/summingmergetree.md b/docs/ru/engines/table-engines/mergetree-family/summingmergetree.md index 0d9d268fa46..7b69927e161 100644 --- a/docs/ru/engines/table-engines/mergetree-family/summingmergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/summingmergetree.md @@ -42,9 +42,10 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Устаревший способ создания таблицы - :::note "Attention" - Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше. - ::: +:::note "Attention" +Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше. +::: + ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ( diff --git a/docs/ru/sql-reference/functions/date-time-functions.md b/docs/ru/sql-reference/functions/date-time-functions.md index 80e2561a8d7..d586bbd42e4 100644 --- a/docs/ru/sql-reference/functions/date-time-functions.md +++ b/docs/ru/sql-reference/functions/date-time-functions.md @@ -316,9 +316,9 @@ SELECT toStartOfISOYear(toDate('2017-01-01')) AS ISOYear20170101; Возвращается дата. :::note "Attention" - Возвращаемое значение для некорректных дат зависит от реализации. ClickHouse может вернуть нулевую дату, выбросить исключение, или выполнить «естественное» перетекание дат между месяцами. +Возвращаемое значение для некорректных дат зависит от реализации. ClickHouse может вернуть нулевую дату, выбросить исключение, или выполнить «естественное» перетекание дат между месяцами. ::: - + ## toMonday {#tomonday} Округляет дату или дату-с-временем вниз до ближайшего понедельника. From 8bd94979ebfa9fa2e90544910e78bee6928f4568 Mon Sep 17 00:00:00 2001 From: Roman Vasin Date: Thu, 27 Oct 2022 10:46:41 +0000 Subject: [PATCH 090/112] Add test queries for Date32 --- tests/queries/0_stateless/00718_format_datetime.reference | 2 ++ tests/queries/0_stateless/00718_format_datetime.sql | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/tests/queries/0_stateless/00718_format_datetime.reference b/tests/queries/0_stateless/00718_format_datetime.reference index 4f12a46d7c0..6a0fd451e73 100644 --- a/tests/queries/0_stateless/00718_format_datetime.reference +++ b/tests/queries/0_stateless/00718_format_datetime.reference @@ -28,6 +28,8 @@ PM % no formatting pattern 2018-01-01 00:00:00 +2018-01-01 00:00:00 +1927-01-01 00:00:00 2018-01-01 01:00:00 2018-01-01 04:00:00 +0000 -1100 diff --git a/tests/queries/0_stateless/00718_format_datetime.sql b/tests/queries/0_stateless/00718_format_datetime.sql index 7ed1f0abea4..532a0c02e6f 100644 --- a/tests/queries/0_stateless/00718_format_datetime.sql +++ b/tests/queries/0_stateless/00718_format_datetime.sql @@ -40,6 +40,10 @@ SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%%'); SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), 'no formatting pattern'); SELECT formatDateTime(toDate('2018-01-01'), '%F %T'); + +SELECT formatDateTime(toDate32('2018-01-01'), '%F %T'); +SELECT formatDateTime(toDate32('1927-01-01'), '%F %T'); + SELECT formatDateTime(toDateTime('2018-01-01 01:00:00', 'UTC'), '%F %T', 'UTC'), formatDateTime(toDateTime('2018-01-01 01:00:00', 'UTC'), '%F %T', 'Asia/Istanbul'); From c46ee42a99e785515fe4e4fe1839cdce657b7a58 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 27 Oct 2022 11:12:23 +0000 Subject: [PATCH 091/112] Update version_date.tsv and changelogs after v22.8.8.3-lts --- docs/changelogs/v22.8.8.3-lts.md | 13 +++++++++++++ utils/list-versions/version_date.tsv | 1 + 2 files changed, 14 insertions(+) create mode 100644 docs/changelogs/v22.8.8.3-lts.md diff --git a/docs/changelogs/v22.8.8.3-lts.md b/docs/changelogs/v22.8.8.3-lts.md new file mode 100644 index 00000000000..deaab51fce9 --- /dev/null +++ b/docs/changelogs/v22.8.8.3-lts.md @@ -0,0 +1,13 @@ +--- +sidebar_position: 1 +sidebar_label: 2022 +--- + +# 2022 Changelog + +### ClickHouse release v22.8.8.3-lts (ac5a6cababc) FIXME as compared to v22.8.7.34-lts (3c38e5e8ab9) + +#### Bug Fix (user-visible misbehavior in official stable or prestable release) + +* Backported in [#42677](https://github.com/ClickHouse/ClickHouse/issues/42677): keeper-fix: fix race in accessing logs while snapshot is being installed. [#40627](https://github.com/ClickHouse/ClickHouse/pull/40627) ([Antonio Andelic](https://github.com/antonio2368)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 0470152ecff..8977c98eb7e 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -3,6 +3,7 @@ v22.9.4.32-stable 2022-10-26 v22.9.3.18-stable 2022-09-30 v22.9.2.7-stable 2022-09-23 v22.9.1.2603-stable 2022-09-22 +v22.8.8.3-lts 2022-10-27 v22.8.7.34-lts 2022-10-26 v22.8.6.71-lts 2022-09-30 v22.8.5.29-lts 2022-09-13 From 1fa24161e79833ebd333c67a56b0f41ff4ff5d68 Mon Sep 17 00:00:00 2001 From: Jordi Villar Date: Thu, 27 Oct 2022 18:34:37 +0200 Subject: [PATCH 092/112] Minor fix implicit cast CaresPTRResolver --- src/Common/CaresPTRResolver.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/CaresPTRResolver.cpp b/src/Common/CaresPTRResolver.cpp index c6fe70a09fa..ab4883c104a 100644 --- a/src/Common/CaresPTRResolver.cpp +++ b/src/Common/CaresPTRResolver.cpp @@ -128,7 +128,7 @@ namespace DB int number_of_fds_ready = 0; if (!readable_sockets.empty()) { - number_of_fds_ready = poll(readable_sockets.data(), readable_sockets.size(), timeout); + number_of_fds_ready = poll(readable_sockets.data(), static_cast(readable_sockets.size()), static_cast(timeout)); } if (number_of_fds_ready > 0) From 073d81c31f612ccaba3ba23678966e202ef17dca Mon Sep 17 00:00:00 2001 From: Jordi Villar Date: Thu, 27 Oct 2022 20:25:49 +0200 Subject: [PATCH 093/112] Use nfds_t instead --- src/Common/CaresPTRResolver.cpp | 2 +- utils/iotest/iotest_nonblock.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Common/CaresPTRResolver.cpp b/src/Common/CaresPTRResolver.cpp index ab4883c104a..99b4c34dfbd 100644 --- a/src/Common/CaresPTRResolver.cpp +++ b/src/Common/CaresPTRResolver.cpp @@ -128,7 +128,7 @@ namespace DB int number_of_fds_ready = 0; if (!readable_sockets.empty()) { - number_of_fds_ready = poll(readable_sockets.data(), static_cast(readable_sockets.size()), static_cast(timeout)); + number_of_fds_ready = poll(readable_sockets.data(), static_cast(readable_sockets.size()), static_cast(timeout)); } if (number_of_fds_ready > 0) diff --git a/utils/iotest/iotest_nonblock.cpp b/utils/iotest/iotest_nonblock.cpp index 6db00045e03..32c86282743 100644 --- a/utils/iotest/iotest_nonblock.cpp +++ b/utils/iotest/iotest_nonblock.cpp @@ -101,7 +101,7 @@ int mainImpl(int argc, char ** argv) size_t ops = 0; while (ops < count) { - if (poll(polls.data(), static_cast(descriptors), -1) <= 0) + if (poll(polls.data(), static_cast(descriptors), -1) <= 0) throwFromErrno("poll failed", ErrorCodes::SYSTEM_ERROR); for (size_t i = 0; i < descriptors; ++i) { From a4ce0e344a33a3eb862a8178761bdeddf1022a08 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Thu, 27 Oct 2022 21:11:41 +0000 Subject: [PATCH 094/112] Fix build src/Common/CaresPTRResolver.cpp:126:27 error: implicit conversion loses integer precision: 'int64_t' (aka 'long') to 'int' [-Werror,-Wshorten-64-to-32] --- src/Common/CaresPTRResolver.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/CaresPTRResolver.cpp b/src/Common/CaresPTRResolver.cpp index c6fe70a09fa..f5533a416ad 100644 --- a/src/Common/CaresPTRResolver.cpp +++ b/src/Common/CaresPTRResolver.cpp @@ -128,7 +128,7 @@ namespace DB int number_of_fds_ready = 0; if (!readable_sockets.empty()) { - number_of_fds_ready = poll(readable_sockets.data(), readable_sockets.size(), timeout); + number_of_fds_ready = poll(readable_sockets.data(), readable_sockets.size(), static_cast(timeout)); } if (number_of_fds_ready > 0) From 51e59405bc33ce3b2edddb6a08de5b7795e143d5 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Fri, 28 Oct 2022 09:41:29 +0000 Subject: [PATCH 095/112] Update version_date.tsv and changelogs after v22.3.14.18-lts --- docs/changelogs/v22.3.14.18-lts.md | 26 ++++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 1 + 2 files changed, 27 insertions(+) create mode 100644 docs/changelogs/v22.3.14.18-lts.md diff --git a/docs/changelogs/v22.3.14.18-lts.md b/docs/changelogs/v22.3.14.18-lts.md new file mode 100644 index 00000000000..d0c67a2b241 --- /dev/null +++ b/docs/changelogs/v22.3.14.18-lts.md @@ -0,0 +1,26 @@ +--- +sidebar_position: 1 +sidebar_label: 2022 +--- + +# 2022 Changelog + +### ClickHouse release v22.3.14.18-lts (642946f61b2) FIXME as compared to v22.3.13.80-lts (e2708b01fba) + +#### Bug Fix +* Backported in [#42432](https://github.com/ClickHouse/ClickHouse/issues/42432): - Choose correct aggregation method for LowCardinality with BigInt. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)). + +#### Build/Testing/Packaging Improvement +* Backported in [#42328](https://github.com/ClickHouse/ClickHouse/issues/42328): Update cctz to the latest master, update tzdb to 2020e. [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)). +* Backported in [#42358](https://github.com/ClickHouse/ClickHouse/issues/42358): Update tzdata to 2022e to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Bug Fix (user-visible misbehavior in official stable or prestable release) + +* Backported in [#42298](https://github.com/ClickHouse/ClickHouse/issues/42298): Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#42592](https://github.com/ClickHouse/ClickHouse/issues/42592): This closes [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Add a warning message to release.py script, require release type [#41975](https://github.com/ClickHouse/ClickHouse/pull/41975) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787) [#42136](https://github.com/ClickHouse/ClickHouse/pull/42136) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 8977c98eb7e..5e50b998ec4 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -36,6 +36,7 @@ v22.4.5.9-stable 2022-05-06 v22.4.4.7-stable 2022-04-29 v22.4.3.3-stable 2022-04-26 v22.4.2.1-stable 2022-04-22 +v22.3.14.18-lts 2022-10-28 v22.3.13.80-lts 2022-09-30 v22.3.12.19-lts 2022-08-29 v22.3.11.12-lts 2022-08-10 From 4f62bd6ba05f44ab992a4eed53dc55c8ad9727d9 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Fri, 28 Oct 2022 11:43:06 +0000 Subject: [PATCH 096/112] Update version_date.tsv and changelogs after v22.3.14.23-lts --- docs/changelogs/v22.3.14.23-lts.md | 29 ++++++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 1 + 2 files changed, 30 insertions(+) create mode 100644 docs/changelogs/v22.3.14.23-lts.md diff --git a/docs/changelogs/v22.3.14.23-lts.md b/docs/changelogs/v22.3.14.23-lts.md new file mode 100644 index 00000000000..663d8b43f6f --- /dev/null +++ b/docs/changelogs/v22.3.14.23-lts.md @@ -0,0 +1,29 @@ +--- +sidebar_position: 1 +sidebar_label: 2022 +--- + +# 2022 Changelog + +### ClickHouse release v22.3.14.23-lts (74956bfee4d) FIXME as compared to v22.3.13.80-lts (e2708b01fba) + +#### Improvement +* Backported in [#42527](https://github.com/ClickHouse/ClickHouse/issues/42527): Fix issue with passing MySQL timeouts for MySQL database engine and MySQL table function. Closes [#34168](https://github.com/ClickHouse/ClickHouse/issues/34168)?notification_referrer_id=NT_kwDOAzsV57MzMDMxNjAzNTY5OjU0MjAzODc5. [#40751](https://github.com/ClickHouse/ClickHouse/pull/40751) ([Kseniia Sumarokova](https://github.com/kssenii)). + +#### Bug Fix +* Backported in [#42432](https://github.com/ClickHouse/ClickHouse/issues/42432): - Choose correct aggregation method for LowCardinality with BigInt. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)). + +#### Build/Testing/Packaging Improvement +* Backported in [#42328](https://github.com/ClickHouse/ClickHouse/issues/42328): Update cctz to the latest master, update tzdb to 2020e. [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)). +* Backported in [#42358](https://github.com/ClickHouse/ClickHouse/issues/42358): Update tzdata to 2022e to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Bug Fix (user-visible misbehavior in official stable or prestable release) + +* Backported in [#42298](https://github.com/ClickHouse/ClickHouse/issues/42298): Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#42592](https://github.com/ClickHouse/ClickHouse/issues/42592): This closes [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Add a warning message to release.py script, require release type [#41975](https://github.com/ClickHouse/ClickHouse/pull/41975) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787) [#42136](https://github.com/ClickHouse/ClickHouse/pull/42136) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 8977c98eb7e..21197713694 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -36,6 +36,7 @@ v22.4.5.9-stable 2022-05-06 v22.4.4.7-stable 2022-04-29 v22.4.3.3-stable 2022-04-26 v22.4.2.1-stable 2022-04-22 +v22.3.14.23-lts 2022-10-28 v22.3.13.80-lts 2022-09-30 v22.3.12.19-lts 2022-08-29 v22.3.11.12-lts 2022-08-10 From 15f3f56812d82243d0ed89a45916eb2bf5e1be70 Mon Sep 17 00:00:00 2001 From: Roman Vasin Date: Fri, 28 Oct 2022 12:44:47 +0000 Subject: [PATCH 097/112] Change 1 argument of function to first argument of function etc --- src/Functions/formatDateTime.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Functions/formatDateTime.cpp b/src/Functions/formatDateTime.cpp index c5240abf7a1..9634768e68a 100644 --- a/src/Functions/formatDateTime.cpp +++ b/src/Functions/formatDateTime.cpp @@ -322,11 +322,11 @@ public: getName(), arguments.size()); if (arguments.size() == 1 && !isInteger(arguments[0].type)) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Illegal type {} of 1 argument of function {} when arguments size is 1. Should be integer", + "Illegal type {} of first argument of function {} when arguments size is 1. Should be integer", arguments[0].type->getName(), getName()); if (arguments.size() > 1 && !(isInteger(arguments[0].type) || isDate(arguments[0].type) || isDateTime(arguments[0].type) || isDate32(arguments[0].type) || isDateTime64(arguments[0].type))) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Illegal type {} of 1 argument of function {} when arguments size is 2 or 3. Should be a integer or a date with time", + "Illegal type {} of first argument of function {} when arguments size is 2 or 3. Should be a integer or a date with time", arguments[0].type->getName(), getName()); } else @@ -337,18 +337,18 @@ public: getName(), arguments.size()); if (!isDate(arguments[0].type) && !isDateTime(arguments[0].type) && !isDate32(arguments[0].type) && !isDateTime64(arguments[0].type)) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Illegal type {} of 1 argument of function {}. Should be a date or a date with time", + "Illegal type {} of first argument of function {}. Should be a date or a date with time", arguments[0].type->getName(), getName()); } if (arguments.size() == 2 && !WhichDataType(arguments[1].type).isString()) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Illegal type {} of 2 argument of function {}. Must be String.", + "Illegal type {} of second argument of function {}. Must be String.", arguments[1].type->getName(), getName()); if (arguments.size() == 3 && !WhichDataType(arguments[2].type).isString()) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Illegal type {} of 3 argument of function {}. Must be String.", + "Illegal type {} of third argument of function {}. Must be String.", arguments[2].type->getName(), getName()); if (arguments.size() == 1) From fc16752844eb03adfbcb5c4241a6109243ce4e98 Mon Sep 17 00:00:00 2001 From: Roman Vasin Date: Fri, 28 Oct 2022 12:56:52 +0000 Subject: [PATCH 098/112] Update docs for FROM_UNIXTIME --- docs/en/sql-reference/functions/date-time-functions.md | 4 ++-- docs/ru/sql-reference/functions/date-time-functions.md | 5 ++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index 15fc9ef0c89..f7ea2690b21 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -1068,7 +1068,7 @@ Example: SELECT timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600)); SELECT timeSlots(toDateTime('1980-12-12 21:01:02', 'UTC'), toUInt32(600), 299); SELECT timeSlots(toDateTime64('1980-12-12 21:01:02.1234', 4, 'UTC'), toDecimal64(600.1, 1), toDecimal64(299, 0)); -``` +``` ``` text ┌─timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600))─┐ │ ['2012-01-01 12:00:00','2012-01-01 12:30:00'] │ @@ -1244,7 +1244,7 @@ Result: └──────────────────────────┘ ``` -When there are two arguments: first is an [Integer](../../sql-reference/data-types/int-uint.md) or [DateTime](../../sql-reference/data-types/datetime.md), second is a constant format string — it acts in the same way as [formatDateTime](#formatdatetime) and return [String](../../sql-reference/data-types/string.md#string) type. +When there are two or three arguments, the first an [Integer](../../sql-reference/data-types/int-uint.md), [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md), the second a constant format string and the third an optional constant time zone string — it acts in the same way as [formatDateTime](#formatdatetime) and return [String](../../sql-reference/data-types/string.md#string) type. For example: diff --git a/docs/ru/sql-reference/functions/date-time-functions.md b/docs/ru/sql-reference/functions/date-time-functions.md index 80e2561a8d7..4acd706b3db 100644 --- a/docs/ru/sql-reference/functions/date-time-functions.md +++ b/docs/ru/sql-reference/functions/date-time-functions.md @@ -318,7 +318,7 @@ SELECT toStartOfISOYear(toDate('2017-01-01')) AS ISOYear20170101; :::note "Attention" Возвращаемое значение для некорректных дат зависит от реализации. ClickHouse может вернуть нулевую дату, выбросить исключение, или выполнить «естественное» перетекание дат между месяцами. ::: - + ## toMonday {#tomonday} Округляет дату или дату-с-временем вниз до ближайшего понедельника. @@ -1126,8 +1126,7 @@ SELECT FROM_UNIXTIME(423543535); └──────────────────────────┘ ``` -В случае, когда есть два аргумента: первый типа [Integer](../../sql-reference/data-types/int-uint.md) или [DateTime](../../sql-reference/data-types/datetime.md), а второй является строкой постоянного формата — функция работает также, как [formatDateTime](#formatdatetime), и возвращает значение типа [String](../../sql-reference/data-types/string.md#string). - +В случае, когда есть два или три аргумента: первый типа [Integer](../../sql-reference/data-types/int-uint.md), [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md), а второй является строкой постоянного формата и третий является строкой постоянной временной зоны — функция работает также, как [formatDateTime](#formatdatetime), и возвращает значение типа [String](../../sql-reference/data-types/string.md#string). Запрос: From 5755728b33543d472a00c71dc1789d0532d00785 Mon Sep 17 00:00:00 2001 From: Roman Vasin Date: Fri, 28 Oct 2022 13:11:09 +0000 Subject: [PATCH 099/112] Add Date32 and DateTime64 into exception messages --- src/Functions/formatDateTime.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Functions/formatDateTime.cpp b/src/Functions/formatDateTime.cpp index 9634768e68a..4db04d61d84 100644 --- a/src/Functions/formatDateTime.cpp +++ b/src/Functions/formatDateTime.cpp @@ -371,7 +371,7 @@ public: })) { throw Exception(ErrorCodes::ILLEGAL_COLUMN, - "Illegal column {} of function {}, must be Integer or DateTime when arguments size is 1.", + "Illegal column {} of function {}, must be Integer, Date, Date32, DateTime or DateTime64 when arguments size is 1.", arguments[0].column->getName(), getName()); } } @@ -382,7 +382,7 @@ public: using FromDataType = std::decay_t; if (!(res = executeType(arguments, result_type))) throw Exception(ErrorCodes::ILLEGAL_COLUMN, - "Illegal column {} of function {}, must be Integer or DateTime.", + "Illegal column {} of function {}, must be Integer, Date, Date32, DateTime or DateTime64.", arguments[0].column->getName(), getName()); return true; })) From 7a5432feaa39596be26974ef1d84d6e954deafc1 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Fri, 28 Oct 2022 15:10:53 +0200 Subject: [PATCH 100/112] Fix possible SIGSEGV for web disks when file does not exists It can be triggered in multiple ways, either when file does not exists and you are trying to create MergeTree table from web (that has special code for UUID handling) or by simply OPTIMIZE TABLE FINAL for MergeTree table that is located on web disk, in both cases you will get the following:
stacktrace 2022.10.28 14:08:40.631226 [ 6043 ] {6165bf5f-e76b-4bca-941c-7c7ff5e3b46b} ContextAccess (default): Access granted: OPTIMIZE ON default.data_from_web 2022.10.28 14:08:40.632017 [ 6043 ] {6165bf5f-e76b-4bca-941c-7c7ff5e3b46b} default.data_from_web (a3e65e1f-5fd4-47ed-9dbd-307f2586b52d) (MergerMutator): Selected 1 parts from all_1_1_0 to all_1_1_0 2022.10.28 14:08:40.632496 [ 6043 ] {6165bf5f-e76b-4bca-941c-7c7ff5e3b46b} default.data_from_web (a3e65e1f-5fd4-47ed-9dbd-307f2586b52d): Trying to reserve 1.00 MiB using storage policy from min volume index 0 2022.10.28 14:08:40.632752 [ 6043 ] {6165bf5f-e76b-4bca-941c-7c7ff5e3b46b} DiskObjectStorage(DiskWebServer): Reserved 1.00 MiB on remote disk `web_disk`, having unreserved 16.00 EiB. 2022.10.28 14:08:40.634155 [ 6043 ] {a3e65e1f-5fd4-47ed-9dbd-307f2586b52d::all_1_1_1} MergeTask::PrepareStage: Merging 1 parts: from all_1_1_0 to all_1_1_0 into Compact 2022.10.28 14:08:40.634498 [ 6043 ] {a3e65e1f-5fd4-47ed-9dbd-307f2586b52d::all_1_1_1} WebObjectStorage: Loading metadata for directory: http://127.0.0.1:8080/store/a3e/a3e65e1f-5fd4-47ed-9dbd-307f2586b52d/tmp_merge_all_1_1_1 2022.10.28 14:08:40.635025 [ 6043 ] {a3e65e1f-5fd4-47ed-9dbd-307f2586b52d::all_1_1_1} DiskWeb: Adding directory: store/a3e/a3e65e1f-5fd4-47ed-9dbd-307f2586b52d/tmp_merge_all_1_1_1/ 2022.10.28 14:08:40.635355 [ 6043 ] {a3e65e1f-5fd4-47ed-9dbd-307f2586b52d::all_1_1_1} ReadWriteBufferFromHTTP: Sending request to http://127.0.0.1:8080/store/a3e/a3e65e1f-5fd4-47ed-9dbd-307f2586b52d/tmp_merge_all_1_1_1/.index 2022.10.28 14:08:40.639618 [ 6043 ] {a3e65e1f-5fd4-47ed-9dbd-307f2586b52d::all_1_1_1} DiskWeb: Cannot load disk metadata. Error: Code: 86. DB::Exception: Received error from remote server /store/a3e/a3e65e1f-5fd4-47ed-9dbd-307f2586b52d/tmp_merge_all_1_1_1/.index. HTTP status code: 404 Not Found, body: 404 Not Found

Not Found

The requested resource /store/a3e/a3e65e1f-5fd4-47ed-9dbd-307f2586b52d/tmp_merge_all_1_1_1/.index was not found on this server.

: while loading disk metadata. (RECEIVED_ERROR_FROM_REMOTE_IO_SERVER) (version 22.11.1.1) 2022.10.28 14:08:40.640527 [ 5488 ] {} BaseDaemon: Received signal 11 2022.10.28 14:08:40.641529 [ 9027 ] {} BaseDaemon: ######################################## 2022.10.28 14:08:40.642759 [ 9027 ] {} BaseDaemon: (version 22.11.1.1, build id: 12145DA78CE5E9EBB10A034177FAE5967EF81A4A) (from thread 6043) (query_id: a3e65e1f-5fd4-47ed-9dbd-307f2586b52d::all_1_1_1) (query: optimize table data_from_web final) Received signal Segmentation fault (11) 2022.10.28 14:08:40.643260 [ 9027 ] {} BaseDaemon: Address: NULL pointer. Access: read. Unknown si_code. 2022.10.28 14:08:40.643769 [ 9027 ] {} BaseDaemon: Stack trace: 0x7ffff416c0f2 0x7ffff7cd1ca8 0x7ffff679ae5e 0x7fffd52e7906 0x7fffd50c65aa 0x7fffca7a0d42 0x7fffcaee79ec 0x7fffcaf242f8 0x7fffcaf242b5 0x7fffcaf2427d 0x7fffcaf24255 0x7fffcaf2421d 0x7ffff65c3686 0x7ffff65c2295 0x7fffcaeee2a9 0x7fffcaef2c43 0x7fffcaee3c0e 0x7fffcc4a7851 0x7fffcc4a768f 0x7fffcc4abb2d 0x7fffcfdce828 0x7fffd03e3eaa 0x7fffd03dfe3b 0x7fffc8ec42d4 0x7fffc8ed51d2 0x7ffff4bdd839 0x7ffff4bde0a8 0x7ffff48ab261 0x7ffff48a769a 0x7ffff48a6335 0x7ffff409f8fd 0x7ffff4121a60 2022.10.28 14:08:40.644411 [ 9027 ] {} BaseDaemon: 4. ? @ 0x7ffff416c0f2 in ? 2022.10.28 14:08:40.676390 [ 9027 ] {} BaseDaemon: 5. /src/ch/clickhouse/src/Common/StringUtils/StringUtils.cpp:9: detail::startsWith(std::__1::basic_string, std::__1::allocator> const&, char const*, unsigned long) @ 0x1ca8 in /src/ch/clickhouse/.cmake/src/Common/StringUtils/libstring_utilsd.so 2022.10.28 14:08:40.730727 [ 9027 ] {} BaseDaemon: 6. /src/ch/clickhouse/src/Common/StringUtils/StringUtils.h:19: startsWith(std::__1::basic_string, std::__1::allocator> const&, std::__1::basic_string, std::__1::allocator> const&) @ 0x59ae5e in /src/ch/clickhouse/.cmake/src/libclickhouse_common_iod.so 2022.10.28 14:08:40.923955 [ 9027 ] {} BaseDaemon: 7. /src/ch/clickhouse/src/Disks/ObjectStorages/Web/MetadataStorageFromStaticFilesWebServer.cpp:58: DB::MetadataStorageFromStaticFilesWebServer::exists(std::__1::basic_string, std::__1::allocator> const&) const @ 0x6e7906 in /src/ch/clickhouse/.cmake/src/libdbmsd.so 2022.10.28 14:08:41.291996 [ 9027 ] {} BaseDaemon: 8. /src/ch/clickhouse/src/Disks/ObjectStorages/DiskObjectStorage.cpp:181: DB::DiskObjectStorage::exists(std::__1::basic_string, std::__1::allocator> const&) const @ 0x4c65aa in /src/ch/clickhouse/.cmake/src/libdbmsd.so 2022.10.28 14:08:41.704697 [ 9027 ] {} BaseDaemon: 9. /src/ch/clickhouse/src/Storages/MergeTree/DataPartStorageOnDisk.cpp:74: DB::DataPartStorageOnDisk::exists() const @ 0xda0d42 in /src/ch/clickhouse/.cmake/src/libclickhouse_storages_mergetreed.so 2022.10.28 14:08:43.032459 [ 9027 ] {} BaseDaemon: 10. /src/ch/clickhouse/src/Storages/MergeTree/MergeTask.cpp:147: DB::MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() @ 0x14e79ec in /src/ch/clickhouse/.cmake/src/libclickhouse_storages_mergetreed.so ... Segmentation fault (core dumped)
Signed-off-by: Azat Khuzhin --- .../Web/MetadataStorageFromStaticFilesWebServer.cpp | 3 +++ tests/integration/test_disk_over_web_server/test.py | 3 +++ 2 files changed, 6 insertions(+) diff --git a/src/Disks/ObjectStorages/Web/MetadataStorageFromStaticFilesWebServer.cpp b/src/Disks/ObjectStorages/Web/MetadataStorageFromStaticFilesWebServer.cpp index 06e36a2ddd8..aa125e93dee 100644 --- a/src/Disks/ObjectStorages/Web/MetadataStorageFromStaticFilesWebServer.cpp +++ b/src/Disks/ObjectStorages/Web/MetadataStorageFromStaticFilesWebServer.cpp @@ -55,6 +55,9 @@ bool MetadataStorageFromStaticFilesWebServer::exists(const std::string & path) c path, [](const auto & file, const std::string & path_) { return file.first < path_; } ); + if (it == object_storage.files.end()) + return false; + if (startsWith(it->first, path) || (it != object_storage.files.begin() && startsWith(std::prev(it)->first, path))) return true; diff --git a/tests/integration/test_disk_over_web_server/test.py b/tests/integration/test_disk_over_web_server/test.py index ea6e407a18f..2ccc17db4f4 100644 --- a/tests/integration/test_disk_over_web_server/test.py +++ b/tests/integration/test_disk_over_web_server/test.py @@ -129,6 +129,9 @@ def test_incorrect_usage(cluster): result = node2.query_and_get_error("TRUNCATE TABLE test0") assert "Table is read-only" in result + result = node2.query_and_get_error("OPTIMIZE TABLE test0 FINAL") + assert "Only read-only operations are supported" in result + node2.query("DROP TABLE test0 SYNC") From 665fcf55aa66c0a19d8c7e4c8b9d7fe6de852aee Mon Sep 17 00:00:00 2001 From: Vitalii S Date: Fri, 28 Oct 2022 10:08:04 -0400 Subject: [PATCH 101/112] Update database.md Minor text correction --- docs/en/sql-reference/statements/create/database.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/statements/create/database.md b/docs/en/sql-reference/statements/create/database.md index 432f5975cc8..7954d1362f1 100644 --- a/docs/en/sql-reference/statements/create/database.md +++ b/docs/en/sql-reference/statements/create/database.md @@ -31,7 +31,7 @@ By default, ClickHouse uses its own [Atomic](../../../engines/database-engines/a ### COMMENT -You can add a comment to the database when you creating it. +You can add a comment to the database when you are creating it. The comment is supported for all database engines. From 4a4fb20731019f2b1a66c53b9e1b89bc823e93a9 Mon Sep 17 00:00:00 2001 From: Roman Vasin Date: Fri, 28 Oct 2022 14:08:28 +0000 Subject: [PATCH 102/112] Add Date32 to 01411_from_unixtime and improve 00718_format_datetime --- .../00718_format_datetime.reference | 49 +++++++++-------- .../0_stateless/00718_format_datetime.sql | 54 ++++++++++--------- .../0_stateless/01411_from_unixtime.reference | 34 ++++++------ .../0_stateless/01411_from_unixtime.sql | 34 ++++++------ 4 files changed, 86 insertions(+), 85 deletions(-) diff --git a/tests/queries/0_stateless/00718_format_datetime.reference b/tests/queries/0_stateless/00718_format_datetime.reference index 6a0fd451e73..bc98dd59d5f 100644 --- a/tests/queries/0_stateless/00718_format_datetime.reference +++ b/tests/queries/0_stateless/00718_format_datetime.reference @@ -1,33 +1,32 @@ -20 +20 20 +02 02 +01/02/18 01/02/18 + 2 2 +2018-01-02 2018-01-02 +22 00 02 -01/02/18 - 2 -2018-01-02 -22 -02 -10 +10 12 11 12 -001 -366 -01 -33 -\n -AM +001 001 +366 366 +01 01 +33 00 +\n \n +AM AM AM PM -22:33 -44 -\t -22:33:44 -1 7 -01 01 53 52 -1 0 -18 -2018 -% -no formatting pattern -2018-01-01 00:00:00 +22:33 00:00 +44 00 +\t \t +22:33:44 00:00:00 +1 7 1 7 +01 01 53 52 01 01 53 52 +1 0 1 0 +18 18 +2018 2018 +% % +no formatting pattern no formatting pattern 2018-01-01 00:00:00 1927-01-01 00:00:00 2018-01-01 01:00:00 2018-01-01 04:00:00 diff --git a/tests/queries/0_stateless/00718_format_datetime.sql b/tests/queries/0_stateless/00718_format_datetime.sql index 532a0c02e6f..deb5fb96c6c 100644 --- a/tests/queries/0_stateless/00718_format_datetime.sql +++ b/tests/queries/0_stateless/00718_format_datetime.sql @@ -8,40 +8,42 @@ SELECT formatDateTime(now(), 'unescaped %'); -- { serverError 36 } SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%U'); -- { serverError 48 } SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%W'); -- { serverError 48 } -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%C'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%d'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%D'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%e'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%F'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%H'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%C'), formatDateTime(toDate32('2018-01-02'), '%C'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%d'), formatDateTime(toDate32('2018-01-02'), '%d'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%D'), formatDateTime(toDate32('2018-01-02'), '%D'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%e'), formatDateTime(toDate32('2018-01-02'), '%e'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%F'), formatDateTime(toDate32('2018-01-02'), '%F'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%H'), formatDateTime(toDate32('2018-01-02'), '%H'); SELECT formatDateTime(toDateTime('2018-01-02 02:33:44'), '%H'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%I'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%I'), formatDateTime(toDate32('2018-01-02'), '%I'); SELECT formatDateTime(toDateTime('2018-01-02 11:33:44'), '%I'); SELECT formatDateTime(toDateTime('2018-01-02 00:33:44'), '%I'); -SELECT formatDateTime(toDateTime('2018-01-01 00:33:44'), '%j'); -SELECT formatDateTime(toDateTime('2000-12-31 00:33:44'), '%j'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%m'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%M'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%n'); -SELECT formatDateTime(toDateTime('2018-01-02 00:33:44'), '%p'); +SELECT formatDateTime(toDateTime('2018-01-01 00:33:44'), '%j'), formatDateTime(toDate32('2018-01-01'), '%j'); +SELECT formatDateTime(toDateTime('2000-12-31 00:33:44'), '%j'), formatDateTime(toDate32('2000-12-31'), '%j'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%m'), formatDateTime(toDate32('2018-01-02'), '%m'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%M'), formatDateTime(toDate32('2018-01-02'), '%M'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%n'), formatDateTime(toDate32('2018-01-02'), '%n'); +SELECT formatDateTime(toDateTime('2018-01-02 00:33:44'), '%p'), formatDateTime(toDateTime('2018-01-02'), '%p'); SELECT formatDateTime(toDateTime('2018-01-02 11:33:44'), '%p'); SELECT formatDateTime(toDateTime('2018-01-02 12:33:44'), '%p'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%R'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%S'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%t'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%T'); -SELECT formatDateTime(toDateTime('2018-01-01 22:33:44'), '%u'), formatDateTime(toDateTime('2018-01-07 22:33:44'), '%u'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%R'), formatDateTime(toDate32('2018-01-02'), '%R'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%S'), formatDateTime(toDate32('2018-01-02'), '%S'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%t'), formatDateTime(toDate32('2018-01-02'), '%t'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%T'), formatDateTime(toDate32('2018-01-02'), '%T'); +SELECT formatDateTime(toDateTime('2018-01-01 22:33:44'), '%u'), formatDateTime(toDateTime('2018-01-07 22:33:44'), '%u'), + formatDateTime(toDate32('2018-01-01'), '%u'), formatDateTime(toDate32('2018-01-07'), '%u'); SELECT formatDateTime(toDateTime('1996-01-01 22:33:44'), '%V'), formatDateTime(toDateTime('1996-12-31 22:33:44'), '%V'), - formatDateTime(toDateTime('1999-01-01 22:33:44'), '%V'), formatDateTime(toDateTime('1999-12-31 22:33:44'), '%V'); -SELECT formatDateTime(toDateTime('2018-01-01 22:33:44'), '%w'), formatDateTime(toDateTime('2018-01-07 22:33:44'), '%w'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%y'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%Y'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%%'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), 'no formatting pattern'); + formatDateTime(toDateTime('1999-01-01 22:33:44'), '%V'), formatDateTime(toDateTime('1999-12-31 22:33:44'), '%V'), + formatDateTime(toDate32('1996-01-01'), '%V'), formatDateTime(toDate32('1996-12-31'), '%V'), + formatDateTime(toDate32('1999-01-01'), '%V'), formatDateTime(toDate32('1999-12-31'), '%V'); +SELECT formatDateTime(toDateTime('2018-01-01 22:33:44'), '%w'), formatDateTime(toDateTime('2018-01-07 22:33:44'), '%w'), + formatDateTime(toDate32('2018-01-01'), '%w'), formatDateTime(toDate32('2018-01-07'), '%w'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%y'), formatDateTime(toDate32('2018-01-02'), '%y'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%Y'), formatDateTime(toDate32('2018-01-02'), '%Y'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%%'), formatDateTime(toDate32('2018-01-02'), '%%'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), 'no formatting pattern'), formatDateTime(toDate32('2018-01-02'), 'no formatting pattern'); SELECT formatDateTime(toDate('2018-01-01'), '%F %T'); - -SELECT formatDateTime(toDate32('2018-01-01'), '%F %T'); SELECT formatDateTime(toDate32('1927-01-01'), '%F %T'); SELECT diff --git a/tests/queries/0_stateless/01411_from_unixtime.reference b/tests/queries/0_stateless/01411_from_unixtime.reference index 1bc7519e668..17086e8c58b 100644 --- a/tests/queries/0_stateless/01411_from_unixtime.reference +++ b/tests/queries/0_stateless/01411_from_unixtime.reference @@ -5,25 +5,25 @@ 11 1970-01-15 1970-01-15 06:52:36 -20 +20 20 +02 02 +01/02/18 01/02/18 + 2 2 +2018-01-02 2018-01-02 +22 00 02 -01/02/18 - 2 -2018-01-02 -22 -02 -10 +10 12 11 12 -001 -366 -01 -33 -\n -AM +001 001 +366 366 +01 01 +33 00 +\n \n +AM AM AM PM -22:33 -44 -\t -22:33:44 +22:33 00:00 +44 00 +\t \t +22:33:44 00:00:00 diff --git a/tests/queries/0_stateless/01411_from_unixtime.sql b/tests/queries/0_stateless/01411_from_unixtime.sql index ec7b4d65b57..9a6655768e0 100644 --- a/tests/queries/0_stateless/01411_from_unixtime.sql +++ b/tests/queries/0_stateless/01411_from_unixtime.sql @@ -5,25 +5,25 @@ SELECT FROM_UNIXTIME(5345345, '%C', 'UTC'); SELECT FROM_UNIXTIME(645123, '%H', 'UTC'); SELECT FROM_UNIXTIME(1232456, '%Y-%m-%d', 'UTC'); SELECT FROM_UNIXTIME(1234356, '%Y-%m-%d %R:%S', 'UTC'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%C'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%d'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%D'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%e'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%F'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%H'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%C'), FROM_UNIXTIME(toDate32('2018-01-02'), '%C'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%d'), FROM_UNIXTIME(toDate32('2018-01-02'), '%d'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%D'), FROM_UNIXTIME(toDate32('2018-01-02'), '%D'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%e'), FROM_UNIXTIME(toDate32('2018-01-02'), '%e'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%F'), FROM_UNIXTIME(toDate32('2018-01-02'), '%F'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%H'), FROM_UNIXTIME(toDate32('2018-01-02'), '%H'); SELECT FROM_UNIXTIME(toDateTime('2018-01-02 02:33:44'), '%H'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%I'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%I'), FROM_UNIXTIME(toDate32('2018-01-02'), '%I'); SELECT FROM_UNIXTIME(toDateTime('2018-01-02 11:33:44'), '%I'); SELECT FROM_UNIXTIME(toDateTime('2018-01-02 00:33:44'), '%I'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-01 00:33:44'), '%j'); -SELECT FROM_UNIXTIME(toDateTime('2000-12-31 00:33:44'), '%j'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%m'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%M'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%n'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 00:33:44'), '%p'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-01 00:33:44'), '%j'), FROM_UNIXTIME(toDate32('2018-01-01'), '%j'); +SELECT FROM_UNIXTIME(toDateTime('2000-12-31 00:33:44'), '%j'), FROM_UNIXTIME(toDate32('2000-12-31'), '%j'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%m'), FROM_UNIXTIME(toDate32('2018-01-02'), '%m'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%M'), FROM_UNIXTIME(toDate32('2018-01-02'), '%M'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%n'), FROM_UNIXTIME(toDate32('2018-01-02'), '%n'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 00:33:44'), '%p'), FROM_UNIXTIME(toDate32('2018-01-02'), '%p'); SELECT FROM_UNIXTIME(toDateTime('2018-01-02 11:33:44'), '%p'); SELECT FROM_UNIXTIME(toDateTime('2018-01-02 12:33:44'), '%p'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%R'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%S'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%t'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%T'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%R'), FROM_UNIXTIME(toDate32('2018-01-02'), '%R'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%S'), FROM_UNIXTIME(toDate32('2018-01-02'), '%S'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%t'), FROM_UNIXTIME(toDate32('2018-01-02'), '%t'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%T'), FROM_UNIXTIME(toDate32('2018-01-02'), '%T'); From d4c62688069b5d6baad52d328702973860e17ec1 Mon Sep 17 00:00:00 2001 From: UnamedRus Date: Fri, 28 Oct 2022 18:47:32 +0400 Subject: [PATCH 103/112] Fix Polygon dict xml config Missing tag --- .../external-dictionaries/external-dicts-dict-polygon.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md index 912af5b5bce..e5ee48c9166 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md @@ -14,8 +14,10 @@ Example of a polygon dictionary configuration: - key - Array(Array(Array(Array(Float64)))) + + key + Array(Array(Array(Array(Float64)))) + From 0872d5c440ada1cb9966f8e69b832b2c8eb1a026 Mon Sep 17 00:00:00 2001 From: Julio Jimenez Date: Fri, 28 Oct 2022 13:33:12 -0400 Subject: [PATCH 104/112] style Signed-off-by: Julio Jimenez --- .github/workflows/nightly.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 834f74822a1..eb6e131886c 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -141,7 +141,7 @@ jobs: - name: Download and set up sonar-scanner env: SONAR_SCANNER_DOWNLOAD_URL: https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${{ env.SONAR_SCANNER_VERSION }}-linux.zip - run: | + "run": | mkdir -p $HOME/.sonar curl -sSLo $HOME/.sonar/sonar-scanner.zip ${{ env.SONAR_SCANNER_DOWNLOAD_URL }} unzip -o $HOME/.sonar/sonar-scanner.zip -d $HOME/.sonar/ @@ -149,7 +149,7 @@ jobs: - name: Download and set up build-wrapper env: BUILD_WRAPPER_DOWNLOAD_URL: ${{ env.SONAR_SERVER_URL }}/static/cpp/build-wrapper-linux-x86.zip - run: | + "run": | curl -sSLo $HOME/.sonar/build-wrapper-linux-x86.zip ${{ env.BUILD_WRAPPER_DOWNLOAD_URL }} unzip -o $HOME/.sonar/build-wrapper-linux-x86.zip -d $HOME/.sonar/ echo "$HOME/.sonar/build-wrapper-linux-x86" >> $GITHUB_PATH From 144648a633e90e5f40b196bb4af668afe74c4e2d Mon Sep 17 00:00:00 2001 From: Julio Jimenez Date: Fri, 28 Oct 2022 13:47:37 -0400 Subject: [PATCH 105/112] disable sc2086 Signed-off-by: Julio Jimenez --- .github/workflows/nightly.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index eb6e131886c..527e3b1f2af 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -141,7 +141,8 @@ jobs: - name: Download and set up sonar-scanner env: SONAR_SCANNER_DOWNLOAD_URL: https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${{ env.SONAR_SCANNER_VERSION }}-linux.zip - "run": | + # shellcheck disable=SC2086 + run: | mkdir -p $HOME/.sonar curl -sSLo $HOME/.sonar/sonar-scanner.zip ${{ env.SONAR_SCANNER_DOWNLOAD_URL }} unzip -o $HOME/.sonar/sonar-scanner.zip -d $HOME/.sonar/ @@ -149,7 +150,8 @@ jobs: - name: Download and set up build-wrapper env: BUILD_WRAPPER_DOWNLOAD_URL: ${{ env.SONAR_SERVER_URL }}/static/cpp/build-wrapper-linux-x86.zip - "run": | + # shellcheck disable=SC2086 + run: | curl -sSLo $HOME/.sonar/build-wrapper-linux-x86.zip ${{ env.BUILD_WRAPPER_DOWNLOAD_URL }} unzip -o $HOME/.sonar/build-wrapper-linux-x86.zip -d $HOME/.sonar/ echo "$HOME/.sonar/build-wrapper-linux-x86" >> $GITHUB_PATH From 10bee23c584a2effb509e71d4c7aa09b781e853d Mon Sep 17 00:00:00 2001 From: Julio Jimenez Date: Fri, 28 Oct 2022 13:56:12 -0400 Subject: [PATCH 106/112] no newline at end of file. Signed-off-by: Julio Jimenez --- .github/workflows/nightly.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 527e3b1f2af..1fa6603c960 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -176,4 +176,4 @@ jobs: --define sonar.host.url="${{ env.SONAR_SERVER_URL }}" \ --define sonar.cfamily.build-wrapper-output="${{ env.BUILD_WRAPPER_OUT_DIR }}" \ --define sonar.projectKey="ClickHouse_ClickHouse" \ - --define sonar.organization="clickhouse-java" \ No newline at end of file + --define sonar.organization="clickhouse-java" From dc10c7a446d2d18823fe3e1c07d8996aaeb99787 Mon Sep 17 00:00:00 2001 From: Julio Jimenez Date: Fri, 28 Oct 2022 13:57:47 -0400 Subject: [PATCH 107/112] no newline at end of file. Signed-off-by: Julio Jimenez --- .github/workflows/nightly.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 1fa6603c960..0bada9eb913 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -123,7 +123,6 @@ jobs: docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" SonarCloud: - name: Sonar Cloud runs-on: [self-hosted, builder] env: SONAR_SCANNER_VERSION: 4.7.0.2747 @@ -141,7 +140,6 @@ jobs: - name: Download and set up sonar-scanner env: SONAR_SCANNER_DOWNLOAD_URL: https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${{ env.SONAR_SCANNER_VERSION }}-linux.zip - # shellcheck disable=SC2086 run: | mkdir -p $HOME/.sonar curl -sSLo $HOME/.sonar/sonar-scanner.zip ${{ env.SONAR_SCANNER_DOWNLOAD_URL }} @@ -150,7 +148,6 @@ jobs: - name: Download and set up build-wrapper env: BUILD_WRAPPER_DOWNLOAD_URL: ${{ env.SONAR_SERVER_URL }}/static/cpp/build-wrapper-linux-x86.zip - # shellcheck disable=SC2086 run: | curl -sSLo $HOME/.sonar/build-wrapper-linux-x86.zip ${{ env.BUILD_WRAPPER_DOWNLOAD_URL }} unzip -o $HOME/.sonar/build-wrapper-linux-x86.zip -d $HOME/.sonar/ From 51f2d6046c1b40edc6d07d3b63abe2d5d4e1697f Mon Sep 17 00:00:00 2001 From: Julio Jimenez Date: Fri, 28 Oct 2022 14:10:23 -0400 Subject: [PATCH 108/112] sc2086 Signed-off-by: Julio Jimenez --- .github/workflows/nightly.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 0bada9eb913..81878ccd16f 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -142,14 +142,14 @@ jobs: SONAR_SCANNER_DOWNLOAD_URL: https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${{ env.SONAR_SCANNER_VERSION }}-linux.zip run: | mkdir -p $HOME/.sonar - curl -sSLo $HOME/.sonar/sonar-scanner.zip ${{ env.SONAR_SCANNER_DOWNLOAD_URL }} + curl -sSLo $HOME/.sonar/sonar-scanner.zip "${{ env.SONAR_SCANNER_DOWNLOAD_URL }}" unzip -o $HOME/.sonar/sonar-scanner.zip -d $HOME/.sonar/ echo "$HOME/.sonar/sonar-scanner-${{ env.SONAR_SCANNER_VERSION }}-linux/bin" >> $GITHUB_PATH - name: Download and set up build-wrapper env: BUILD_WRAPPER_DOWNLOAD_URL: ${{ env.SONAR_SERVER_URL }}/static/cpp/build-wrapper-linux-x86.zip run: | - curl -sSLo $HOME/.sonar/build-wrapper-linux-x86.zip ${{ env.BUILD_WRAPPER_DOWNLOAD_URL }} + curl -sSLo "$HOME/.sonar/build-wrapper-linux-x86.zip ${{ env.BUILD_WRAPPER_DOWNLOAD_URL }}" unzip -o $HOME/.sonar/build-wrapper-linux-x86.zip -d $HOME/.sonar/ echo "$HOME/.sonar/build-wrapper-linux-x86" >> $GITHUB_PATH - name: Set Up Build Tools From 53acc23856f83727b53eda5c39e979038f80c52e Mon Sep 17 00:00:00 2001 From: Julio Jimenez Date: Fri, 28 Oct 2022 14:17:29 -0400 Subject: [PATCH 109/112] sc2086 Signed-off-by: Julio Jimenez --- .github/workflows/nightly.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 81878ccd16f..e6da4df7200 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -141,17 +141,17 @@ jobs: env: SONAR_SCANNER_DOWNLOAD_URL: https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${{ env.SONAR_SCANNER_VERSION }}-linux.zip run: | - mkdir -p $HOME/.sonar - curl -sSLo $HOME/.sonar/sonar-scanner.zip "${{ env.SONAR_SCANNER_DOWNLOAD_URL }}" - unzip -o $HOME/.sonar/sonar-scanner.zip -d $HOME/.sonar/ - echo "$HOME/.sonar/sonar-scanner-${{ env.SONAR_SCANNER_VERSION }}-linux/bin" >> $GITHUB_PATH + mkdir -p "$HOME/.sonar" + curl -sSLo "$HOME/.sonar/sonar-scanner.zip" "${{ env.SONAR_SCANNER_DOWNLOAD_URL }}" + unzip -o "$HOME/.sonar/sonar-scanner.zip" -d "$HOME/.sonar/" + echo "$HOME/.sonar/sonar-scanner-${{ env.SONAR_SCANNER_VERSION }}-linux/bin" >> "$GITHUB_PATH" - name: Download and set up build-wrapper env: BUILD_WRAPPER_DOWNLOAD_URL: ${{ env.SONAR_SERVER_URL }}/static/cpp/build-wrapper-linux-x86.zip run: | curl -sSLo "$HOME/.sonar/build-wrapper-linux-x86.zip ${{ env.BUILD_WRAPPER_DOWNLOAD_URL }}" - unzip -o $HOME/.sonar/build-wrapper-linux-x86.zip -d $HOME/.sonar/ - echo "$HOME/.sonar/build-wrapper-linux-x86" >> $GITHUB_PATH + unzip -o "$HOME/.sonar/build-wrapper-linux-x86.zip" -d "$HOME/.sonar/" + echo "$HOME/.sonar/build-wrapper-linux-x86" >> "$GITHUB_PATH" - name: Set Up Build Tools run: | sudo apt-get update From e903efda0a71c9d47d0217d6cbf4e38fa0125a16 Mon Sep 17 00:00:00 2001 From: Denny Crane Date: Fri, 28 Oct 2022 16:00:37 -0300 Subject: [PATCH 110/112] Update column.md --- docs/en/sql-reference/statements/alter/column.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/en/sql-reference/statements/alter/column.md b/docs/en/sql-reference/statements/alter/column.md index 067a350dca7..4c0b79dd8f7 100644 --- a/docs/en/sql-reference/statements/alter/column.md +++ b/docs/en/sql-reference/statements/alter/column.md @@ -204,8 +204,9 @@ It is used if it is necessary to add or update a column with a complicated expre Syntax: ```sql -ALTER TABLE table MATERIALIZE COLUMN col; +ALTER TABLE table MATERIALIZE COLUMN col [IN PARTITION partition | IN PARTITION ID 'partition_id']; ``` +- If you specify a PARTITION, a column will be materialized with only the specified partition. **Example** From 7fa71d8c04e20436e232e1f8c55f6fb46a10f279 Mon Sep 17 00:00:00 2001 From: Denny Crane Date: Fri, 28 Oct 2022 16:04:00 -0300 Subject: [PATCH 111/112] Update column.md --- docs/en/sql-reference/statements/alter/column.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/statements/alter/column.md b/docs/en/sql-reference/statements/alter/column.md index 4c0b79dd8f7..f36aa1357f4 100644 --- a/docs/en/sql-reference/statements/alter/column.md +++ b/docs/en/sql-reference/statements/alter/column.md @@ -204,7 +204,7 @@ It is used if it is necessary to add or update a column with a complicated expre Syntax: ```sql -ALTER TABLE table MATERIALIZE COLUMN col [IN PARTITION partition | IN PARTITION ID 'partition_id']; +ALTER TABLE [db.]table [ON CLUSTER cluster] MATERIALIZE COLUMN col [IN PARTITION partition | IN PARTITION ID 'partition_id']; ``` - If you specify a PARTITION, a column will be materialized with only the specified partition. From 004ca99712c0037f4fc3e162eb451775965dfaba Mon Sep 17 00:00:00 2001 From: Yuko Takagi <70714860+yukotakagi@users.noreply.github.com> Date: Fri, 28 Oct 2022 13:58:14 -0600 Subject: [PATCH 112/112] Update README.md --- README.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 003b78a3cbb..f90df9686c2 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ ClickHouse® is an open-source column-oriented database management system that a ## Useful Links * [Official website](https://clickhouse.com/) has a quick high-level overview of ClickHouse on the main page. -* [ClickHouse Cloud](https://clickhouse.com/cloud) ClickHouse as a service, built by the creators and maintainers. +* [ClickHouse Cloud](https://clickhouse.cloud) ClickHouse as a service, built by the creators and maintainers. * [Tutorial](https://clickhouse.com/docs/en/getting_started/tutorial/) shows how to set up and query a small ClickHouse cluster. * [Documentation](https://clickhouse.com/docs/en/) provides more in-depth information. * [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format. @@ -16,5 +16,6 @@ ClickHouse® is an open-source column-oriented database management system that a * [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any. ## Upcoming events -* [**v22.10 Release Webinar**](https://clickhouse.com/company/events/v22-10-release-webinar) Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release, provide live demos, and share vision into what is coming in the roadmap. -* [**Introducing ClickHouse Cloud**](https://clickhouse.com/company/events/cloud-beta) Introducing ClickHouse as a service, built by creators and maintainers of the fastest OLAP database on earth. Join Tanya Bragin for a detailed walkthrough of ClickHouse Cloud capabilities, as well as a peek behind the curtain to understand the unique architecture that makes our service tick. +* [**v22.11 Release Webinar**](https://clickhouse.com/company/events/v22-11-release-webinar) Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release, provide live demos, and share vision into what is coming in the roadmap. +* [**ClickHouse Meetup at the Deutsche Bank office in Berlin**](https://www.meetup.com/clickhouse-berlin-user-group/events/289311596/) Hear from Deutsche Bank on why they chose ClickHouse for big sensitive data in a regulated environment. The ClickHouse team will then present how ClickHouse is used for real time financial data analytics, including tick data, trade analytics and risk management. +* [**AWS re:Invent**](https://clickhouse.com/company/events/aws-reinvent) Core members of the ClickHouse team -- including 2 of our founders -- will be at re:Invent from November 29 to December 3. We are available on the show floor, but are also determining interest in holding an event during the time there.