diff --git a/contrib/boost b/contrib/boost
index aec12eea7fc..bb179652862 160000
--- a/contrib/boost
+++ b/contrib/boost
@@ -1 +1 @@
-Subproject commit aec12eea7fc762721ae16943d1361340c66c9c17
+Subproject commit bb179652862b528d94a9032a784796c4db846c3f
diff --git a/contrib/boost-cmake/CMakeLists.txt b/contrib/boost-cmake/CMakeLists.txt
index 6f9dce0b042..ef3a1758522 100644
--- a/contrib/boost-cmake/CMakeLists.txt
+++ b/contrib/boost-cmake/CMakeLists.txt
@@ -172,9 +172,9 @@ endif()
# coroutine
set (SRCS_COROUTINE
- "${LIBRARY_DIR}/libs/coroutine/detail/coroutine_context.cpp"
- "${LIBRARY_DIR}/libs/coroutine/exceptions.cpp"
- "${LIBRARY_DIR}/libs/coroutine/posix/stack_traits.cpp"
+ "${LIBRARY_DIR}/libs/coroutine/src/detail/coroutine_context.cpp"
+ "${LIBRARY_DIR}/libs/coroutine/src/exceptions.cpp"
+ "${LIBRARY_DIR}/libs/coroutine/src/posix/stack_traits.cpp"
)
add_library (_boost_coroutine ${SRCS_COROUTINE})
add_library (boost::coroutine ALIAS _boost_coroutine)
diff --git a/contrib/cityhash102/include/city.h b/contrib/cityhash102/include/city.h
index 87363d16444..c98eb7e3585 100644
--- a/contrib/cityhash102/include/city.h
+++ b/contrib/cityhash102/include/city.h
@@ -73,8 +73,8 @@ struct uint128
uint128() = default;
uint128(uint64 low64_, uint64 high64_) : low64(low64_), high64(high64_) {}
- friend bool operator ==(const uint128 & x, const uint128 & y) { return (x.low64 == y.low64) && (x.high64 == y.high64); }
- friend bool operator !=(const uint128 & x, const uint128 & y) { return !(x == y); }
+
+ friend auto operator<=>(const uint128 &, const uint128 &) = default;
};
inline uint64 Uint128Low64(const uint128 & x) { return x.low64; }
diff --git a/contrib/curl b/contrib/curl
index b0edf0b7dae..eb3b049df52 160000
--- a/contrib/curl
+++ b/contrib/curl
@@ -1 +1 @@
-Subproject commit b0edf0b7dae44d9e66f270a257cf654b35d5263d
+Subproject commit eb3b049df526bf125eda23218e680ce7fa9ec46c
diff --git a/contrib/curl-cmake/CMakeLists.txt b/contrib/curl-cmake/CMakeLists.txt
index 70d9c2816dc..733865d5101 100644
--- a/contrib/curl-cmake/CMakeLists.txt
+++ b/contrib/curl-cmake/CMakeLists.txt
@@ -8,125 +8,122 @@ endif()
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/curl")
set (SRCS
- "${LIBRARY_DIR}/lib/fopen.c"
- "${LIBRARY_DIR}/lib/noproxy.c"
- "${LIBRARY_DIR}/lib/idn.c"
- "${LIBRARY_DIR}/lib/cfilters.c"
- "${LIBRARY_DIR}/lib/cf-socket.c"
+ "${LIBRARY_DIR}/lib/altsvc.c"
+ "${LIBRARY_DIR}/lib/amigaos.c"
+ "${LIBRARY_DIR}/lib/asyn-thread.c"
+ "${LIBRARY_DIR}/lib/base64.c"
+ "${LIBRARY_DIR}/lib/bufq.c"
+ "${LIBRARY_DIR}/lib/bufref.c"
+ "${LIBRARY_DIR}/lib/cf-h1-proxy.c"
"${LIBRARY_DIR}/lib/cf-haproxy.c"
"${LIBRARY_DIR}/lib/cf-https-connect.c"
- "${LIBRARY_DIR}/lib/file.c"
- "${LIBRARY_DIR}/lib/timeval.c"
- "${LIBRARY_DIR}/lib/base64.c"
- "${LIBRARY_DIR}/lib/hostip.c"
- "${LIBRARY_DIR}/lib/progress.c"
- "${LIBRARY_DIR}/lib/formdata.c"
- "${LIBRARY_DIR}/lib/cookie.c"
- "${LIBRARY_DIR}/lib/http.c"
- "${LIBRARY_DIR}/lib/sendf.c"
- "${LIBRARY_DIR}/lib/url.c"
- "${LIBRARY_DIR}/lib/dict.c"
- "${LIBRARY_DIR}/lib/if2ip.c"
- "${LIBRARY_DIR}/lib/speedcheck.c"
- "${LIBRARY_DIR}/lib/ldap.c"
- "${LIBRARY_DIR}/lib/version.c"
- "${LIBRARY_DIR}/lib/getenv.c"
- "${LIBRARY_DIR}/lib/escape.c"
- "${LIBRARY_DIR}/lib/mprintf.c"
- "${LIBRARY_DIR}/lib/telnet.c"
- "${LIBRARY_DIR}/lib/netrc.c"
- "${LIBRARY_DIR}/lib/getinfo.c"
- "${LIBRARY_DIR}/lib/transfer.c"
- "${LIBRARY_DIR}/lib/strcase.c"
- "${LIBRARY_DIR}/lib/easy.c"
- "${LIBRARY_DIR}/lib/curl_fnmatch.c"
- "${LIBRARY_DIR}/lib/curl_log.c"
- "${LIBRARY_DIR}/lib/fileinfo.c"
- "${LIBRARY_DIR}/lib/krb5.c"
- "${LIBRARY_DIR}/lib/memdebug.c"
- "${LIBRARY_DIR}/lib/http_chunks.c"
- "${LIBRARY_DIR}/lib/strtok.c"
+ "${LIBRARY_DIR}/lib/cf-socket.c"
+ "${LIBRARY_DIR}/lib/cfilters.c"
+ "${LIBRARY_DIR}/lib/conncache.c"
"${LIBRARY_DIR}/lib/connect.c"
- "${LIBRARY_DIR}/lib/llist.c"
- "${LIBRARY_DIR}/lib/hash.c"
- "${LIBRARY_DIR}/lib/multi.c"
"${LIBRARY_DIR}/lib/content_encoding.c"
- "${LIBRARY_DIR}/lib/share.c"
- "${LIBRARY_DIR}/lib/http_digest.c"
- "${LIBRARY_DIR}/lib/md4.c"
- "${LIBRARY_DIR}/lib/md5.c"
- "${LIBRARY_DIR}/lib/http_negotiate.c"
- "${LIBRARY_DIR}/lib/inet_pton.c"
- "${LIBRARY_DIR}/lib/strtoofft.c"
- "${LIBRARY_DIR}/lib/strerror.c"
- "${LIBRARY_DIR}/lib/amigaos.c"
+ "${LIBRARY_DIR}/lib/cookie.c"
+ "${LIBRARY_DIR}/lib/curl_addrinfo.c"
+ "${LIBRARY_DIR}/lib/curl_des.c"
+ "${LIBRARY_DIR}/lib/curl_endian.c"
+ "${LIBRARY_DIR}/lib/curl_fnmatch.c"
+ "${LIBRARY_DIR}/lib/curl_get_line.c"
+ "${LIBRARY_DIR}/lib/curl_gethostname.c"
+ "${LIBRARY_DIR}/lib/curl_gssapi.c"
+ "${LIBRARY_DIR}/lib/curl_memrchr.c"
+ "${LIBRARY_DIR}/lib/curl_multibyte.c"
+ "${LIBRARY_DIR}/lib/curl_ntlm_core.c"
+ "${LIBRARY_DIR}/lib/curl_ntlm_wb.c"
+ "${LIBRARY_DIR}/lib/curl_path.c"
+ "${LIBRARY_DIR}/lib/curl_range.c"
+ "${LIBRARY_DIR}/lib/curl_rtmp.c"
+ "${LIBRARY_DIR}/lib/curl_sasl.c"
+ "${LIBRARY_DIR}/lib/curl_sspi.c"
+ "${LIBRARY_DIR}/lib/curl_threads.c"
+ "${LIBRARY_DIR}/lib/curl_trc.c"
+ "${LIBRARY_DIR}/lib/dict.c"
+ "${LIBRARY_DIR}/lib/doh.c"
+ "${LIBRARY_DIR}/lib/dynbuf.c"
+ "${LIBRARY_DIR}/lib/dynhds.c"
+ "${LIBRARY_DIR}/lib/easy.c"
+ "${LIBRARY_DIR}/lib/escape.c"
+ "${LIBRARY_DIR}/lib/file.c"
+ "${LIBRARY_DIR}/lib/fileinfo.c"
+ "${LIBRARY_DIR}/lib/fopen.c"
+ "${LIBRARY_DIR}/lib/formdata.c"
+ "${LIBRARY_DIR}/lib/getenv.c"
+ "${LIBRARY_DIR}/lib/getinfo.c"
+ "${LIBRARY_DIR}/lib/gopher.c"
+ "${LIBRARY_DIR}/lib/hash.c"
+ "${LIBRARY_DIR}/lib/headers.c"
+ "${LIBRARY_DIR}/lib/hmac.c"
"${LIBRARY_DIR}/lib/hostasyn.c"
+ "${LIBRARY_DIR}/lib/hostip.c"
"${LIBRARY_DIR}/lib/hostip4.c"
"${LIBRARY_DIR}/lib/hostip6.c"
"${LIBRARY_DIR}/lib/hostsyn.c"
+ "${LIBRARY_DIR}/lib/hsts.c"
+ "${LIBRARY_DIR}/lib/http.c"
+ "${LIBRARY_DIR}/lib/http2.c"
+ "${LIBRARY_DIR}/lib/http_aws_sigv4.c"
+ "${LIBRARY_DIR}/lib/http_chunks.c"
+ "${LIBRARY_DIR}/lib/http_digest.c"
+ "${LIBRARY_DIR}/lib/http_negotiate.c"
+ "${LIBRARY_DIR}/lib/http_ntlm.c"
+ "${LIBRARY_DIR}/lib/http_proxy.c"
+ "${LIBRARY_DIR}/lib/idn.c"
+ "${LIBRARY_DIR}/lib/if2ip.c"
+ "${LIBRARY_DIR}/lib/imap.c"
"${LIBRARY_DIR}/lib/inet_ntop.c"
+ "${LIBRARY_DIR}/lib/inet_pton.c"
+ "${LIBRARY_DIR}/lib/krb5.c"
+ "${LIBRARY_DIR}/lib/ldap.c"
+ "${LIBRARY_DIR}/lib/llist.c"
+ "${LIBRARY_DIR}/lib/md4.c"
+ "${LIBRARY_DIR}/lib/md5.c"
+ "${LIBRARY_DIR}/lib/memdebug.c"
+ "${LIBRARY_DIR}/lib/mime.c"
+ "${LIBRARY_DIR}/lib/mprintf.c"
+ "${LIBRARY_DIR}/lib/mqtt.c"
+ "${LIBRARY_DIR}/lib/multi.c"
+ "${LIBRARY_DIR}/lib/netrc.c"
+ "${LIBRARY_DIR}/lib/nonblock.c"
+ "${LIBRARY_DIR}/lib/noproxy.c"
+ "${LIBRARY_DIR}/lib/openldap.c"
"${LIBRARY_DIR}/lib/parsedate.c"
+ "${LIBRARY_DIR}/lib/pingpong.c"
+ "${LIBRARY_DIR}/lib/pop3.c"
+ "${LIBRARY_DIR}/lib/progress.c"
+ "${LIBRARY_DIR}/lib/psl.c"
+ "${LIBRARY_DIR}/lib/rand.c"
+ "${LIBRARY_DIR}/lib/rename.c"
+ "${LIBRARY_DIR}/lib/rtsp.c"
"${LIBRARY_DIR}/lib/select.c"
- "${LIBRARY_DIR}/lib/splay.c"
- "${LIBRARY_DIR}/lib/strdup.c"
+ "${LIBRARY_DIR}/lib/sendf.c"
+ "${LIBRARY_DIR}/lib/setopt.c"
+ "${LIBRARY_DIR}/lib/sha256.c"
+ "${LIBRARY_DIR}/lib/share.c"
+ "${LIBRARY_DIR}/lib/slist.c"
+ "${LIBRARY_DIR}/lib/smb.c"
+ "${LIBRARY_DIR}/lib/smtp.c"
+ "${LIBRARY_DIR}/lib/socketpair.c"
"${LIBRARY_DIR}/lib/socks.c"
- "${LIBRARY_DIR}/lib/curl_addrinfo.c"
"${LIBRARY_DIR}/lib/socks_gssapi.c"
"${LIBRARY_DIR}/lib/socks_sspi.c"
- "${LIBRARY_DIR}/lib/curl_sspi.c"
- "${LIBRARY_DIR}/lib/slist.c"
- "${LIBRARY_DIR}/lib/nonblock.c"
- "${LIBRARY_DIR}/lib/curl_memrchr.c"
- "${LIBRARY_DIR}/lib/imap.c"
- "${LIBRARY_DIR}/lib/pop3.c"
- "${LIBRARY_DIR}/lib/smtp.c"
- "${LIBRARY_DIR}/lib/pingpong.c"
- "${LIBRARY_DIR}/lib/rtsp.c"
- "${LIBRARY_DIR}/lib/curl_threads.c"
- "${LIBRARY_DIR}/lib/warnless.c"
- "${LIBRARY_DIR}/lib/hmac.c"
- "${LIBRARY_DIR}/lib/curl_rtmp.c"
- "${LIBRARY_DIR}/lib/openldap.c"
- "${LIBRARY_DIR}/lib/curl_gethostname.c"
- "${LIBRARY_DIR}/lib/gopher.c"
- "${LIBRARY_DIR}/lib/http_proxy.c"
- "${LIBRARY_DIR}/lib/asyn-thread.c"
- "${LIBRARY_DIR}/lib/curl_gssapi.c"
- "${LIBRARY_DIR}/lib/http_ntlm.c"
- "${LIBRARY_DIR}/lib/curl_ntlm_wb.c"
- "${LIBRARY_DIR}/lib/curl_ntlm_core.c"
- "${LIBRARY_DIR}/lib/curl_sasl.c"
- "${LIBRARY_DIR}/lib/rand.c"
- "${LIBRARY_DIR}/lib/curl_multibyte.c"
- "${LIBRARY_DIR}/lib/conncache.c"
- "${LIBRARY_DIR}/lib/cf-h1-proxy.c"
- "${LIBRARY_DIR}/lib/http2.c"
- "${LIBRARY_DIR}/lib/smb.c"
- "${LIBRARY_DIR}/lib/curl_endian.c"
- "${LIBRARY_DIR}/lib/curl_des.c"
+ "${LIBRARY_DIR}/lib/speedcheck.c"
+ "${LIBRARY_DIR}/lib/splay.c"
+ "${LIBRARY_DIR}/lib/strcase.c"
+ "${LIBRARY_DIR}/lib/strdup.c"
+ "${LIBRARY_DIR}/lib/strerror.c"
+ "${LIBRARY_DIR}/lib/strtok.c"
+ "${LIBRARY_DIR}/lib/strtoofft.c"
"${LIBRARY_DIR}/lib/system_win32.c"
- "${LIBRARY_DIR}/lib/mime.c"
- "${LIBRARY_DIR}/lib/sha256.c"
- "${LIBRARY_DIR}/lib/setopt.c"
- "${LIBRARY_DIR}/lib/curl_path.c"
- "${LIBRARY_DIR}/lib/curl_range.c"
- "${LIBRARY_DIR}/lib/psl.c"
- "${LIBRARY_DIR}/lib/doh.c"
- "${LIBRARY_DIR}/lib/urlapi.c"
- "${LIBRARY_DIR}/lib/curl_get_line.c"
- "${LIBRARY_DIR}/lib/altsvc.c"
- "${LIBRARY_DIR}/lib/socketpair.c"
- "${LIBRARY_DIR}/lib/bufref.c"
- "${LIBRARY_DIR}/lib/bufq.c"
- "${LIBRARY_DIR}/lib/dynbuf.c"
- "${LIBRARY_DIR}/lib/dynhds.c"
- "${LIBRARY_DIR}/lib/hsts.c"
- "${LIBRARY_DIR}/lib/http_aws_sigv4.c"
- "${LIBRARY_DIR}/lib/mqtt.c"
- "${LIBRARY_DIR}/lib/rename.c"
- "${LIBRARY_DIR}/lib/headers.c"
+ "${LIBRARY_DIR}/lib/telnet.c"
"${LIBRARY_DIR}/lib/timediff.c"
- "${LIBRARY_DIR}/lib/vauth/vauth.c"
+ "${LIBRARY_DIR}/lib/timeval.c"
+ "${LIBRARY_DIR}/lib/transfer.c"
+ "${LIBRARY_DIR}/lib/url.c"
+ "${LIBRARY_DIR}/lib/urlapi.c"
"${LIBRARY_DIR}/lib/vauth/cleartext.c"
"${LIBRARY_DIR}/lib/vauth/cram.c"
"${LIBRARY_DIR}/lib/vauth/digest.c"
@@ -138,23 +135,24 @@ set (SRCS
"${LIBRARY_DIR}/lib/vauth/oauth2.c"
"${LIBRARY_DIR}/lib/vauth/spnego_gssapi.c"
"${LIBRARY_DIR}/lib/vauth/spnego_sspi.c"
+ "${LIBRARY_DIR}/lib/vauth/vauth.c"
+ "${LIBRARY_DIR}/lib/version.c"
"${LIBRARY_DIR}/lib/vquic/vquic.c"
- "${LIBRARY_DIR}/lib/vtls/openssl.c"
+ "${LIBRARY_DIR}/lib/vssh/libssh.c"
+ "${LIBRARY_DIR}/lib/vssh/libssh2.c"
+ "${LIBRARY_DIR}/lib/vtls/bearssl.c"
"${LIBRARY_DIR}/lib/vtls/gtls.c"
- "${LIBRARY_DIR}/lib/vtls/vtls.c"
- "${LIBRARY_DIR}/lib/vtls/nss.c"
- "${LIBRARY_DIR}/lib/vtls/wolfssl.c"
+ "${LIBRARY_DIR}/lib/vtls/hostcheck.c"
+ "${LIBRARY_DIR}/lib/vtls/keylog.c"
+ "${LIBRARY_DIR}/lib/vtls/mbedtls.c"
+ "${LIBRARY_DIR}/lib/vtls/openssl.c"
"${LIBRARY_DIR}/lib/vtls/schannel.c"
"${LIBRARY_DIR}/lib/vtls/schannel_verify.c"
"${LIBRARY_DIR}/lib/vtls/sectransp.c"
- "${LIBRARY_DIR}/lib/vtls/gskit.c"
- "${LIBRARY_DIR}/lib/vtls/mbedtls.c"
- "${LIBRARY_DIR}/lib/vtls/bearssl.c"
- "${LIBRARY_DIR}/lib/vtls/keylog.c"
+ "${LIBRARY_DIR}/lib/vtls/vtls.c"
+ "${LIBRARY_DIR}/lib/vtls/wolfssl.c"
"${LIBRARY_DIR}/lib/vtls/x509asn1.c"
- "${LIBRARY_DIR}/lib/vtls/hostcheck.c"
- "${LIBRARY_DIR}/lib/vssh/libssh2.c"
- "${LIBRARY_DIR}/lib/vssh/libssh.c"
+ "${LIBRARY_DIR}/lib/warnless.c"
)
add_library (_curl ${SRCS})
diff --git a/contrib/krb5 b/contrib/krb5
index b56ce6ba690..1d5c970e936 160000
--- a/contrib/krb5
+++ b/contrib/krb5
@@ -1 +1 @@
-Subproject commit b56ce6ba690e1f320df1a64afa34980c3e462617
+Subproject commit 1d5c970e9369f444caf81d1d06a231a6bad8581f
diff --git a/docker/test/base/setup_export_logs.sh b/docker/test/base/setup_export_logs.sh
index 12fae855b03..9a48c0fcafc 100755
--- a/docker/test/base/setup_export_logs.sh
+++ b/docker/test/base/setup_export_logs.sh
@@ -17,6 +17,9 @@ CONNECTION_PARAMETERS=${CONNECTION_PARAMETERS:=""}
# Create all configured system logs:
clickhouse-client --query "SYSTEM FLUSH LOGS"
+# It's doesn't make sense to try creating tables if SYNC fails
+echo "SYSTEM SYNC DATABASE REPLICA default" | clickhouse-client --receive_timeout 180 $CONNECTION_PARAMETERS || exit 0
+
# For each system log table:
clickhouse-client --query "SHOW TABLES FROM system LIKE '%\\_log'" | while read -r table
do
@@ -38,7 +41,7 @@ do
echo "Creating destination table ${table}_${hash}" >&2
- echo "$statement" | clickhouse-client $CONNECTION_PARAMETERS
+ echo "$statement" | clickhouse-client --distributed_ddl_task_timeout=10 $CONNECTION_PARAMETERS || continue
echo "Creating table system.${table}_sender" >&2
@@ -46,6 +49,7 @@ do
clickhouse-client --query "
CREATE TABLE system.${table}_sender
ENGINE = Distributed(${CLUSTER}, default, ${table}_${hash})
+ SETTINGS flush_on_detach=0
EMPTY AS
SELECT ${EXTRA_COLUMNS_EXPRESSION}, *
FROM system.${table}
diff --git a/docker/test/install/deb/Dockerfile b/docker/test/install/deb/Dockerfile
index 9614473c69b..e9c928b1fe7 100644
--- a/docker/test/install/deb/Dockerfile
+++ b/docker/test/install/deb/Dockerfile
@@ -12,6 +12,7 @@ ENV \
# install systemd packages
RUN apt-get update && \
apt-get install -y --no-install-recommends \
+ sudo \
systemd \
&& \
apt-get clean && \
diff --git a/docker/test/performance-comparison/Dockerfile b/docker/test/performance-comparison/Dockerfile
index cfd7c613868..d31663f9071 100644
--- a/docker/test/performance-comparison/Dockerfile
+++ b/docker/test/performance-comparison/Dockerfile
@@ -1,18 +1,7 @@
# docker build -t clickhouse/performance-comparison .
-# Using ubuntu:22.04 over 20.04 as all other images, since:
-# a) ubuntu 20.04 has too old parallel, and does not support --memsuspend
-# b) anyway for perf tests it should not be important (backward compatiblity
-# with older ubuntu had been checked lots of times in various tests)
-FROM ubuntu:22.04
-
-# ARG for quick switch to a given ubuntu mirror
-ARG apt_archive="http://archive.ubuntu.com"
-RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
-
-ENV LANG=C.UTF-8
-ENV TZ=Europe/Amsterdam
-RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
+ARG FROM_TAG=latest
+FROM clickhouse/test-base:$FROM_TAG
RUN apt-get update \
&& DEBIAN_FRONTEND=noninteractive apt-get install --yes --no-install-recommends \
@@ -56,10 +45,9 @@ COPY * /
# node #0 should be less stable because of system interruptions. We bind
# randomly to node 1 or 0 to gather some statistics on that. We have to bind
# both servers and the tmpfs on which the database is stored. How to do it
-# through Yandex Sandbox API is unclear, but by default tmpfs uses
+# is unclear, but by default tmpfs uses
# 'process allocation policy', not sure which process but hopefully the one that
-# writes to it, so just bind the downloader script as well. We could also try to
-# remount it with proper options in Sandbox task.
+# writes to it, so just bind the downloader script as well.
# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt
# Double-escaped backslashes are a tribute to the engineering wonder of docker --
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh
index f949e66ab17..4b1b5c13b9b 100755
--- a/docker/test/performance-comparison/compare.sh
+++ b/docker/test/performance-comparison/compare.sh
@@ -90,7 +90,7 @@ function configure
set +m
wait_for_server $LEFT_SERVER_PORT $left_pid
- echo Server for setup started
+ echo "Server for setup started"
clickhouse-client --port $LEFT_SERVER_PORT --query "create database test" ||:
clickhouse-client --port $LEFT_SERVER_PORT --query "rename table datasets.hits_v1 to test.hits" ||:
@@ -156,9 +156,9 @@ function restart
wait_for_server $RIGHT_SERVER_PORT $right_pid
echo right ok
- clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.tables where database != 'system'"
+ clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.tables where database NOT IN ('system', 'INFORMATION_SCHEMA', 'information_schema')"
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.build_options"
- clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.tables where database != 'system'"
+ clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.tables where database NOT IN ('system', 'INFORMATION_SCHEMA', 'information_schema')"
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.build_options"
# Check again that both servers we started are running -- this is important
@@ -352,14 +352,12 @@ function get_profiles
wait
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_log where type in ('QueryFinish', 'ExceptionWhileProcessing') format TSVWithNamesAndTypes" > left-query-log.tsv ||: &
- clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_thread_log format TSVWithNamesAndTypes" > left-query-thread-log.tsv ||: &
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.trace_log format TSVWithNamesAndTypes" > left-trace-log.tsv ||: &
clickhouse-client --port $LEFT_SERVER_PORT --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > left-addresses.tsv ||: &
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.metric_log format TSVWithNamesAndTypes" > left-metric-log.tsv ||: &
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.asynchronous_metric_log format TSVWithNamesAndTypes" > left-async-metric-log.tsv ||: &
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_log where type in ('QueryFinish', 'ExceptionWhileProcessing') format TSVWithNamesAndTypes" > right-query-log.tsv ||: &
- clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_thread_log format TSVWithNamesAndTypes" > right-query-thread-log.tsv ||: &
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.trace_log format TSVWithNamesAndTypes" > right-trace-log.tsv ||: &
clickhouse-client --port $RIGHT_SERVER_PORT --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > right-addresses.tsv ||: &
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.metric_log format TSVWithNamesAndTypes" > right-metric-log.tsv ||: &
diff --git a/docker/test/performance-comparison/config/config.d/zzz-perf-comparison-tweaks-config.xml b/docker/test/performance-comparison/config/config.d/zzz-perf-comparison-tweaks-config.xml
index 39c29bb61ca..292665c4f68 100644
--- a/docker/test/performance-comparison/config/config.d/zzz-perf-comparison-tweaks-config.xml
+++ b/docker/test/performance-comparison/config/config.d/zzz-perf-comparison-tweaks-config.xml
@@ -19,31 +19,6 @@
-
-
- ENGINE = Memory
-
-
-
- ENGINE = Memory
-
-
-
- ENGINE = Memory
-
-
-
- ENGINE = Memory
-
-
-
- ENGINE = Memory
-
-
-
1000000000
10
diff --git a/docker/test/performance-comparison/download.sh b/docker/test/performance-comparison/download.sh
index aee11030068..cb243b655c6 100755
--- a/docker/test/performance-comparison/download.sh
+++ b/docker/test/performance-comparison/download.sh
@@ -31,8 +31,6 @@ function download
# Test all of them.
declare -a urls_to_try=(
"$S3_URL/PRs/$left_pr/$left_sha/$BUILD_NAME/performance.tar.zst"
- "$S3_URL/$left_pr/$left_sha/$BUILD_NAME/performance.tar.zst"
- "$S3_URL/$left_pr/$left_sha/$BUILD_NAME/performance.tgz"
)
for path in "${urls_to_try[@]}"
diff --git a/docker/test/performance-comparison/entrypoint.sh b/docker/test/performance-comparison/entrypoint.sh
index 74571777be0..fb5e6bd2a7a 100755
--- a/docker/test/performance-comparison/entrypoint.sh
+++ b/docker/test/performance-comparison/entrypoint.sh
@@ -130,7 +130,7 @@ then
git -C right/ch diff --name-only "$base" pr -- :!tests/performance :!docker/test/performance-comparison | tee other-changed-files.txt
fi
-# Set python output encoding so that we can print queries with Russian letters.
+# Set python output encoding so that we can print queries with non-ASCII letters.
export PYTHONIOENCODING=utf-8
# By default, use the main comparison script from the tested package, so that we
@@ -151,11 +151,7 @@ export PATH
export REF_PR
export REF_SHA
-# Try to collect some core dumps. I've seen two patterns in Sandbox:
-# 1) |/home/zomb-sandbox/venv/bin/python /home/zomb-sandbox/client/sandbox/bin/coredumper.py %e %p %g %u %s %P %c
-# Not sure what this script does (puts them to sandbox resources, logs some messages?),
-# and it's not accessible from inside docker anyway.
-# 2) something like %e.%p.core.dmp. The dump should end up in the workspace directory.
+# Try to collect some core dumps.
# At least we remove the ulimit and then try to pack some common file names into output.
ulimit -c unlimited
cat /proc/sys/kernel/core_pattern
diff --git a/docker/test/style/Dockerfile b/docker/test/style/Dockerfile
index bd1c26855b7..a4feae27c67 100644
--- a/docker/test/style/Dockerfile
+++ b/docker/test/style/Dockerfile
@@ -1,5 +1,5 @@
# docker build -t clickhouse/style-test .
-FROM ubuntu:20.04
+FROM ubuntu:22.04
ARG ACT_VERSION=0.2.33
ARG ACTIONLINT_VERSION=1.6.22
diff --git a/docs/en/engines/database-engines/materialized-mysql.md b/docs/en/engines/database-engines/materialized-mysql.md
index f7cc52e622e..b7e567c7b6c 100644
--- a/docs/en/engines/database-engines/materialized-mysql.md
+++ b/docs/en/engines/database-engines/materialized-mysql.md
@@ -190,7 +190,7 @@ These are the schema conversion manipulations you can do with table overrides fo
* Modify [column TTL](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#mergetree-column-ttl).
* Modify [column compression codec](/docs/en/sql-reference/statements/create/table.md/#codecs).
* Add [ALIAS columns](/docs/en/sql-reference/statements/create/table.md/#alias).
- * Add [skipping indexes](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-data_skipping-indexes)
+ * Add [skipping indexes](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-data_skipping-indexes). Note that you need to enable `use_skip_indexes_if_final` setting to make them work (MaterializedMySQL is using `SELECT ... FINAL` by default)
* Add [projections](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#projections). Note that projection optimizations are
disabled when using `SELECT ... FINAL` (which MaterializedMySQL does by default), so their utility is limited here.
`INDEX ... TYPE hypothesis` as [described in the v21.12 blog post]](https://clickhouse.com/blog/en/2021/clickhouse-v21.12-released/)
diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md
index e2122380510..5ba12eba26a 100644
--- a/docs/en/interfaces/formats.md
+++ b/docs/en/interfaces/formats.md
@@ -2136,6 +2136,7 @@ To exchange data with Hadoop, you can use [HDFS table engine](/docs/en/engines/t
- [input_format_parquet_case_insensitive_column_matching](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_case_insensitive_column_matching) - ignore case when matching Parquet columns with ClickHouse columns. Default value - `false`.
- [input_format_parquet_allow_missing_columns](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_allow_missing_columns) - allow missing columns while reading Parquet data. Default value - `false`.
- [input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference) - allow skipping columns with unsupported types while schema inference for Parquet format. Default value - `false`.
+- [input_format_parquet_local_file_min_bytes_for_seek](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_local_file_min_bytes_for_seek) - min bytes required for local read (file) to do seek, instead of read with ignore in Parquet input format. Default value - `8192`.
- [output_format_parquet_fixed_string_as_fixed_byte_array](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_fixed_string_as_fixed_byte_array) - use Parquet FIXED_LENGTH_BYTE_ARRAY type instead of Binary/String for FixedString columns. Default value - `true`.
- [output_format_parquet_version](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_version) - The version of Parquet format used in output format. Default value - `2.latest`.
- [output_format_parquet_compression_method](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_compression_method) - compression method used in output Parquet format. Default value - `snappy`.
diff --git a/docs/en/operations/settings/merge-tree-settings.md b/docs/en/operations/settings/merge-tree-settings.md
index 4122b4af40f..8ea599b9861 100644
--- a/docs/en/operations/settings/merge-tree-settings.md
+++ b/docs/en/operations/settings/merge-tree-settings.md
@@ -56,11 +56,11 @@ Possible values:
- Any positive integer.
-Default value: 300.
+Default value: 3000.
To achieve maximum performance of `SELECT` queries, it is necessary to minimize the number of parts processed, see [Merge Tree](../../development/architecture.md#merge-tree).
-You can set a larger value to 600 (1200), this will reduce the probability of the `Too many parts` error, but at the same time `SELECT` performance might degrade. Also in case of a merge issue (for example, due to insufficient disk space) you will notice it later than it could be with the original 300.
+Prior to 23.6 this setting was set to 300. You can set a higher different value, it will reduce the probability of the `Too many parts` error, but at the same time `SELECT` performance might degrade. Also in case of a merge issue (for example, due to insufficient disk space) you will notice it later than it could be with the original 300.
## parts_to_delay_insert {#parts-to-delay-insert}
diff --git a/docs/en/operations/settings/settings-formats.md b/docs/en/operations/settings/settings-formats.md
index beb1d372e08..86aabae187f 100644
--- a/docs/en/operations/settings/settings-formats.md
+++ b/docs/en/operations/settings/settings-formats.md
@@ -1223,6 +1223,12 @@ Allow skipping columns with unsupported types while schema inference for format
Disabled by default.
+### input_format_parquet_local_file_min_bytes_for_seek {#input_format_parquet_local_file_min_bytes_for_seek}
+
+min bytes required for local read (file) to do seek, instead of read with ignore in Parquet input format.
+
+Default value - `8192`.
+
### output_format_parquet_string_as_string {#output_format_parquet_string_as_string}
Use Parquet String type instead of Binary for String columns.
diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md
index de3cc00e4c9..29434ef556f 100644
--- a/docs/en/operations/settings/settings.md
+++ b/docs/en/operations/settings/settings.md
@@ -98,6 +98,18 @@ Default value: 0.
```
+## mutations_execute_nondeterministic_on_initiator {#mutations_execute_nondeterministic_on_initiator}
+
+If true constant nondeterministic functions (e.g. function `now()`) are executed on initiator and replaced to literals in `UPDATE` and `DELETE` queries. It helps to keep data in sync on replicas while executing mutations with constant nondeterministic functions. Default value: `false`.
+
+## mutations_execute_subqueries_on_initiator {#mutations_execute_subqueries_on_initiator}
+
+If true scalar subqueries are executed on initiator and replaced to literals in `UPDATE` and `DELETE` queries. Default value: `false`.
+
+## mutations_max_literal_size_to_replace {#mutations_max_literal_size_to_replace}
+
+The maximum size of serialized literal in bytes to replace in `UPDATE` and `DELETE` queries. Takes effect only if at least one the two settings above is enabled. Default value: 16384 (16 KiB).
+
## distributed_product_mode {#distributed-product-mode}
Changes the behaviour of [distributed subqueries](../../sql-reference/operators/in.md).
@@ -4298,7 +4310,7 @@ Use this setting only for backward compatibility if your use cases depend on old
## session_timezone {#session_timezone}
Sets the implicit time zone of the current session or query.
-The implicit time zone is the time zone applied to values of type DateTime/DateTime64 which have no explicitly specified time zone.
+The implicit time zone is the time zone applied to values of type DateTime/DateTime64 which have no explicitly specified time zone.
The setting takes precedence over the globally configured (server-level) implicit time zone.
A value of '' (empty string) means that the implicit time zone of the current session or query is equal to the [server time zone](../server-configuration-parameters/settings.md#server_configuration_parameters-timezone).
@@ -4333,7 +4345,7 @@ SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zuric
```
:::warning
-Not all functions that parse DateTime/DateTime64 respect `session_timezone`. This can lead to subtle errors.
+Not all functions that parse DateTime/DateTime64 respect `session_timezone`. This can lead to subtle errors.
See the following example and explanation.
:::
diff --git a/docs/en/sql-reference/data-types/geo.md b/docs/en/sql-reference/data-types/geo.md
index 3b2787008d2..1d37b829dd5 100644
--- a/docs/en/sql-reference/data-types/geo.md
+++ b/docs/en/sql-reference/data-types/geo.md
@@ -26,9 +26,9 @@ SELECT p, toTypeName(p) FROM geo_point;
Result:
``` text
-┌─p─────┬─toTypeName(p)─┐
+┌─p───────┬─toTypeName(p)─┐
│ (10,10) │ Point │
-└───────┴───────────────┘
+└─────────┴───────────────┘
```
## Ring
diff --git a/docs/en/sql-reference/dictionaries/index.md b/docs/en/sql-reference/dictionaries/index.md
index c95ff5758f4..dd8031461e0 100644
--- a/docs/en/sql-reference/dictionaries/index.md
+++ b/docs/en/sql-reference/dictionaries/index.md
@@ -1092,7 +1092,7 @@ Types of sources (`source_type`):
- [Local file](#local_file)
- [Executable File](#executable)
- [Executable Pool](#executable_pool)
-- [HTTP(s)](#http)
+- [HTTP(S)](#http)
- DBMS
- [ODBC](#odbc)
- [MySQL](#mysql)
@@ -1102,7 +1102,7 @@ Types of sources (`source_type`):
- [Cassandra](#cassandra)
- [PostgreSQL](#postgresql)
-## Local File {#local_file}
+### Local File {#local_file}
Example of settings:
@@ -1132,7 +1132,7 @@ When a dictionary with source `FILE` is created via DDL command (`CREATE DICTION
- [Dictionary function](../../sql-reference/table-functions/dictionary.md#dictionary-function)
-## Executable File {#executable}
+### Executable File {#executable}
Working with executable files depends on [how the dictionary is stored in memory](#storig-dictionaries-in-memory). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request to the executable file’s STDIN. Otherwise, ClickHouse starts the executable file and treats its output as dictionary data.
@@ -1161,7 +1161,7 @@ Setting fields:
That dictionary source can be configured only via XML configuration. Creating dictionaries with executable source via DDL is disabled; otherwise, the DB user would be able to execute arbitrary binaries on the ClickHouse node.
-## Executable Pool {#executable_pool}
+### Executable Pool {#executable_pool}
Executable pool allows loading data from pool of processes. This source does not work with dictionary layouts that need to load all data from source. Executable pool works if the dictionary [is stored](#ways-to-store-dictionaries-in-memory) using `cache`, `complex_key_cache`, `ssd_cache`, `complex_key_ssd_cache`, `direct`, or `complex_key_direct` layouts.
@@ -1196,9 +1196,9 @@ Setting fields:
That dictionary source can be configured only via XML configuration. Creating dictionaries with executable source via DDL is disabled, otherwise, the DB user would be able to execute arbitrary binary on ClickHouse node.
-## Http(s) {#https}
+### HTTP(S) {#https}
-Working with an HTTP(s) server depends on [how the dictionary is stored in memory](#storig-dictionaries-in-memory). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request via the `POST` method.
+Working with an HTTP(S) server depends on [how the dictionary is stored in memory](#storig-dictionaries-in-memory). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request via the `POST` method.
Example of settings:
@@ -1248,7 +1248,55 @@ Setting fields:
When creating a dictionary using the DDL command (`CREATE DICTIONARY ...`) remote hosts for HTTP dictionaries are checked against the contents of `remote_url_allow_hosts` section from config to prevent database users to access arbitrary HTTP server.
-### Known Vulnerability of the ODBC Dictionary Functionality
+### DBMS
+
+#### ODBC
+
+You can use this method to connect any database that has an ODBC driver.
+
+Example of settings:
+
+``` xml
+
+```
+
+or
+
+``` sql
+SOURCE(ODBC(
+ db 'DatabaseName'
+ table 'SchemaName.TableName'
+ connection_string 'DSN=some_parameters'
+ invalidate_query 'SQL_QUERY'
+ query 'SELECT id, value_1, value_2 FROM db_name.table_name'
+))
+```
+
+Setting fields:
+
+- `db` – Name of the database. Omit it if the database name is set in the `` parameters.
+- `table` – Name of the table and schema if exists.
+- `connection_string` – Connection string.
+- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](#dictionary-updates).
+- `query` – The custom query. Optional parameter.
+
+:::note
+The `table` and `query` fields cannot be used together. And either one of the `table` or `query` fields must be declared.
+:::
+
+ClickHouse receives quoting symbols from ODBC-driver and quote all settings in queries to driver, so it’s necessary to set table name accordingly to table name case in database.
+
+If you have a problems with encodings when using Oracle, see the corresponding [FAQ](/knowledgebase/oracle-odbc) item.
+
+##### Known Vulnerability of the ODBC Dictionary Functionality
:::note
When connecting to the database through the ODBC driver connection parameter `Servername` can be substituted. In this case values of `USERNAME` and `PASSWORD` from `odbc.ini` are sent to the remote server and can be compromised.
@@ -1277,7 +1325,7 @@ SELECT * FROM odbc('DSN=gregtest;Servername=some-server.com', 'test_db');
ODBC driver will send values of `USERNAME` and `PASSWORD` from `odbc.ini` to `some-server.com`.
-### Example of Connecting Postgresql
+##### Example of Connecting Postgresql
Ubuntu OS.
@@ -1358,7 +1406,7 @@ LIFETIME(MIN 300 MAX 360)
You may need to edit `odbc.ini` to specify the full path to the library with the driver `DRIVER=/usr/local/lib/psqlodbcw.so`.
-### Example of Connecting MS SQL Server
+##### Example of Connecting MS SQL Server
Ubuntu OS.
@@ -1462,55 +1510,7 @@ LAYOUT(FLAT())
LIFETIME(MIN 300 MAX 360)
```
-## DBMS
-
-### ODBC
-
-You can use this method to connect any database that has an ODBC driver.
-
-Example of settings:
-
-``` xml
-
-```
-
-or
-
-``` sql
-SOURCE(ODBC(
- db 'DatabaseName'
- table 'SchemaName.TableName'
- connection_string 'DSN=some_parameters'
- invalidate_query 'SQL_QUERY'
- query 'SELECT id, value_1, value_2 FROM db_name.table_name'
-))
-```
-
-Setting fields:
-
-- `db` – Name of the database. Omit it if the database name is set in the `` parameters.
-- `table` – Name of the table and schema if exists.
-- `connection_string` – Connection string.
-- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](#dictionary-updates).
-- `query` – The custom query. Optional parameter.
-
-:::note
-The `table` and `query` fields cannot be used together. And either one of the `table` or `query` fields must be declared.
-:::
-
-ClickHouse receives quoting symbols from ODBC-driver and quote all settings in queries to driver, so it’s necessary to set table name accordingly to table name case in database.
-
-If you have a problems with encodings when using Oracle, see the corresponding [FAQ](/knowledgebase/oracle-odbc) item.
-
-### Mysql
+#### Mysql
Example of settings:
@@ -1627,7 +1627,7 @@ SOURCE(MYSQL(
))
```
-### ClickHouse
+#### ClickHouse
Example of settings:
@@ -1680,7 +1680,7 @@ Setting fields:
The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared.
:::
-### Mongodb
+#### Mongodb
Example of settings:
@@ -1723,7 +1723,7 @@ Setting fields:
- `options` - MongoDB connection string options (optional parameter).
-### Redis
+#### Redis
Example of settings:
@@ -1756,7 +1756,7 @@ Setting fields:
- `storage_type` – The structure of internal Redis storage using for work with keys. `simple` is for simple sources and for hashed single key sources, `hash_map` is for hashed sources with two keys. Ranged sources and cache sources with complex key are unsupported. May be omitted, default value is `simple`.
- `db_index` – The specific numeric index of Redis logical database. May be omitted, default value is 0.
-### Cassandra
+#### Cassandra
Example of settings:
@@ -1798,7 +1798,7 @@ Setting fields:
The `column_family` or `where` fields cannot be used together with the `query` field. And either one of the `column_family` or `query` fields must be declared.
:::
-### PostgreSQL
+#### PostgreSQL
Example of settings:
@@ -1855,7 +1855,7 @@ Setting fields:
The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared.
:::
-## Null
+### Null
A special source that can be used to create dummy (empty) dictionaries. Such dictionaries can useful for tests or with setups with separated data and query nodes at nodes with Distributed tables.
diff --git a/docs/en/sql-reference/functions/hash-functions.md b/docs/en/sql-reference/functions/hash-functions.md
index 06097d92480..556fe622c27 100644
--- a/docs/en/sql-reference/functions/hash-functions.md
+++ b/docs/en/sql-reference/functions/hash-functions.md
@@ -51,7 +51,7 @@ Calculates the MD5 from a string and returns the resulting set of bytes as Fixed
If you do not need MD5 in particular, but you need a decent cryptographic 128-bit hash, use the ‘sipHash128’ function instead.
If you want to get the same result as output by the md5sum utility, use lower(hex(MD5(s))).
-## sipHash64 (#hash_functions-siphash64)
+## sipHash64 {#hash_functions-siphash64}
Produces a 64-bit [SipHash](https://en.wikipedia.org/wiki/SipHash) hash value.
@@ -63,9 +63,9 @@ This is a cryptographic hash function. It works at least three times faster than
The function [interprets](/docs/en/sql-reference/functions/type-conversion-functions.md/#type_conversion_functions-reinterpretAsString) all the input parameters as strings and calculates the hash value for each of them. It then combines the hashes by the following algorithm:
-1. The first and the second hash value are concatenated to an array which is hashed.
-2. The previously calculated hash value and the hash of the third input parameter are hashed in a similar way.
-3. This calculation is repeated for all remaining hash values of the original input.
+1. The first and the second hash value are concatenated to an array which is hashed.
+2. The previously calculated hash value and the hash of the third input parameter are hashed in a similar way.
+3. This calculation is repeated for all remaining hash values of the original input.
**Arguments**
diff --git a/docs/en/sql-reference/statements/truncate.md b/docs/en/sql-reference/statements/truncate.md
index 457031a2157..4b46210aa09 100644
--- a/docs/en/sql-reference/statements/truncate.md
+++ b/docs/en/sql-reference/statements/truncate.md
@@ -4,8 +4,9 @@ sidebar_position: 52
sidebar_label: TRUNCATE
---
-# TRUNCATE Statement
+# TRUNCATE Statements
+## TRUNCATE TABLE
``` sql
TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster]
```
@@ -21,3 +22,10 @@ You can specify how long (in seconds) to wait for inactive replicas to execute `
:::note
If the `alter_sync` is set to `2` and some replicas are not active for more than the time, specified by the `replication_wait_for_inactive_replica_timeout` setting, then an exception `UNFINISHED` is thrown.
:::
+
+## TRUNCATE DATABASE
+``` sql
+TRUNCATE DATBASE [IF EXISTS] [db.]name [ON CLUSTER cluster]
+```
+
+Removes all tables from a database but keeps the database itself. When the clause `IF EXISTS` is omitted, the query returns an error if the database does not exist.
diff --git a/packages/clickhouse-keeper.yaml b/packages/clickhouse-keeper.yaml
index e9c2e929755..3b77d968763 100644
--- a/packages/clickhouse-keeper.yaml
+++ b/packages/clickhouse-keeper.yaml
@@ -32,6 +32,12 @@ contents:
dst: /usr/bin/clickhouse-keeper
- src: clickhouse-keeper.service
dst: /lib/systemd/system/clickhouse-keeper.service
+- src: clickhouse
+ dst: /usr/bin/clickhouse-keeper-client
+ type: symlink
+- src: clickhouse
+ dst: /usr/bin/clickhouse-keeper-converter
+ type: symlink
# docs
- src: ../AUTHORS
dst: /usr/share/doc/clickhouse-keeper/AUTHORS
diff --git a/programs/keeper/Keeper.cpp b/programs/keeper/Keeper.cpp
index 1723c274fdb..22f0b2c2ac6 100644
--- a/programs/keeper/Keeper.cpp
+++ b/programs/keeper/Keeper.cpp
@@ -110,19 +110,18 @@ void Keeper::createServer(const std::string & listen_host, const char * port_nam
}
catch (const Poco::Exception &)
{
- std::string message = "Listen [" + listen_host + "]:" + std::to_string(port) + " failed: " + getCurrentExceptionMessage(false);
-
if (listen_try)
{
- LOG_WARNING(&logger(), "{}. If it is an IPv6 or IPv4 address and your host has disabled IPv6 or IPv4, then consider to "
+ LOG_WARNING(&logger(), "Listen [{}]:{} failed: {}. If it is an IPv6 or IPv4 address and your host has disabled IPv6 or IPv4, "
+ "then consider to "
"specify not disabled IPv4 or IPv6 address to listen in element of configuration "
"file. Example for disabled IPv6: 0.0.0.0 ."
" Example for disabled IPv4: ::",
- message);
+ listen_host, port, getCurrentExceptionMessage(false));
}
else
{
- throw Exception::createDeprecated(message, ErrorCodes::NETWORK_ERROR);
+ throw Exception(ErrorCodes::NETWORK_ERROR, "Listen [{}]:{} failed: {}", listen_host, port, getCurrentExceptionMessage(false));
}
}
}
@@ -291,12 +290,6 @@ try
{
path = config().getString("keeper_server.storage_path");
}
- else if (std::filesystem::is_directory(std::filesystem::path{config().getString("path", DBMS_DEFAULT_PATH)} / "coordination"))
- {
- throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG,
- "By default 'keeper.storage_path' could be assigned to {}, but the directory {} already exists. Please specify 'keeper.storage_path' in the keeper configuration explicitly",
- KEEPER_DEFAULT_PATH, String{std::filesystem::path{config().getString("path", DBMS_DEFAULT_PATH)} / "coordination"});
- }
else if (config().has("keeper_server.log_storage_path"))
{
path = std::filesystem::path(config().getString("keeper_server.log_storage_path")).parent_path();
@@ -305,6 +298,12 @@ try
{
path = std::filesystem::path(config().getString("keeper_server.snapshot_storage_path")).parent_path();
}
+ else if (std::filesystem::is_directory(std::filesystem::path{config().getString("path", DBMS_DEFAULT_PATH)} / "coordination"))
+ {
+ throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG,
+ "By default 'keeper.storage_path' could be assigned to {}, but the directory {} already exists. Please specify 'keeper.storage_path' in the keeper configuration explicitly",
+ KEEPER_DEFAULT_PATH, String{std::filesystem::path{config().getString("path", DBMS_DEFAULT_PATH)} / "coordination"});
+ }
else
{
path = KEEPER_DEFAULT_PATH;
diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp
index 587c88a2745..b38e17ecade 100644
--- a/programs/local/LocalServer.cpp
+++ b/programs/local/LocalServer.cpp
@@ -2,6 +2,8 @@
#include
#include
+#include
+#include
#include
#include
#include
@@ -655,43 +657,66 @@ void LocalServer::processConfig()
/// There is no need for concurrent queries, override max_concurrent_queries.
global_context->getProcessList().setMaxSize(0);
- /// Size of cache for uncompressed blocks. Zero means disabled.
- String uncompressed_cache_policy = config().getString("uncompressed_cache_policy", "");
- size_t uncompressed_cache_size = config().getUInt64("uncompressed_cache_size", 0);
+ const size_t memory_amount = getMemoryAmount();
+ const double cache_size_to_ram_max_ratio = config().getDouble("cache_size_to_ram_max_ratio", 0.5);
+ const size_t max_cache_size = static_cast(memory_amount * cache_size_to_ram_max_ratio);
+
+ String uncompressed_cache_policy = config().getString("uncompressed_cache_policy", DEFAULT_UNCOMPRESSED_CACHE_POLICY);
+ size_t uncompressed_cache_size = config().getUInt64("uncompressed_cache_size", DEFAULT_UNCOMPRESSED_CACHE_MAX_SIZE);
+ if (uncompressed_cache_size > max_cache_size)
+ {
+ uncompressed_cache_size = max_cache_size;
+ LOG_INFO(log, "Lowered uncompressed cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
+ }
if (uncompressed_cache_size)
global_context->setUncompressedCache(uncompressed_cache_policy, uncompressed_cache_size);
- /// Size of cache for marks (index of MergeTree family of tables).
- String mark_cache_policy = config().getString("mark_cache_policy", "");
- size_t mark_cache_size = config().getUInt64("mark_cache_size", 5368709120);
+ String mark_cache_policy = config().getString("mark_cache_policy", DEFAULT_MARK_CACHE_POLICY);
+ size_t mark_cache_size = config().getUInt64("mark_cache_size", DEFAULT_MARK_CACHE_MAX_SIZE);
+ if (!mark_cache_size)
+ LOG_ERROR(log, "Too low mark cache size will lead to severe performance degradation.");
+ if (mark_cache_size > max_cache_size)
+ {
+ mark_cache_size = max_cache_size;
+ LOG_INFO(log, "Lowered mark cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(mark_cache_size));
+ }
if (mark_cache_size)
global_context->setMarkCache(mark_cache_policy, mark_cache_size);
- /// Size of cache for uncompressed blocks of MergeTree indices. Zero means disabled.
- size_t index_uncompressed_cache_size = config().getUInt64("index_uncompressed_cache_size", 0);
+ size_t index_uncompressed_cache_size = config().getUInt64("index_uncompressed_cache_size", DEFAULT_INDEX_UNCOMPRESSED_CACHE_MAX_SIZE);
+ if (index_uncompressed_cache_size > max_cache_size)
+ {
+ index_uncompressed_cache_size = max_cache_size;
+ LOG_INFO(log, "Lowered index uncompressed cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
+ }
if (index_uncompressed_cache_size)
global_context->setIndexUncompressedCache(index_uncompressed_cache_size);
- /// Size of cache for index marks (index of MergeTree skip indices).
- size_t index_mark_cache_size = config().getUInt64("index_mark_cache_size", 0);
+ size_t index_mark_cache_size = config().getUInt64("index_mark_cache_size", DEFAULT_INDEX_MARK_CACHE_MAX_SIZE);
+ if (index_mark_cache_size > max_cache_size)
+ {
+ index_mark_cache_size = max_cache_size;
+ LOG_INFO(log, "Lowered index mark cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
+ }
if (index_mark_cache_size)
global_context->setIndexMarkCache(index_mark_cache_size);
- /// A cache for mmapped files.
- size_t mmap_cache_size = config().getUInt64("mmap_cache_size", 1000); /// The choice of default is arbitrary.
+ size_t mmap_cache_size = config().getUInt64("mmap_cache_size", DEFAULT_MMAP_CACHE_MAX_SIZE);
+ if (mmap_cache_size > max_cache_size)
+ {
+ mmap_cache_size = max_cache_size;
+ LOG_INFO(log, "Lowered mmap file cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
+ }
if (mmap_cache_size)
global_context->setMMappedFileCache(mmap_cache_size);
+ /// In Server.cpp (./clickhouse-server), we would initialize the query cache here.
+ /// Intentionally not doing this in clickhouse-local as it doesn't make sense.
+
#if USE_EMBEDDED_COMPILER
- /// 128 MB
- constexpr size_t compiled_expression_cache_size_default = 1024 * 1024 * 128;
- size_t compiled_expression_cache_size = config().getUInt64("compiled_expression_cache_size", compiled_expression_cache_size_default);
-
- constexpr size_t compiled_expression_cache_elements_size_default = 10000;
- size_t compiled_expression_cache_elements_size
- = config().getUInt64("compiled_expression_cache_elements_size", compiled_expression_cache_elements_size_default);
-
- CompiledExpressionCacheFactory::instance().init(compiled_expression_cache_size, compiled_expression_cache_elements_size);
+ size_t compiled_expression_cache_max_size_in_bytes = config().getUInt64("compiled_expression_cache_size", DEFAULT_COMPILED_EXPRESSION_CACHE_MAX_SIZE);
+ size_t compiled_expression_cache_max_elements = config().getUInt64("compiled_expression_cache_elements_size", DEFAULT_COMPILED_EXPRESSION_CACHE_MAX_ENTRIES);
+ CompiledExpressionCacheFactory::instance().init(compiled_expression_cache_max_size_in_bytes, compiled_expression_cache_max_elements);
#endif
/// NOTE: it is important to apply any overrides before
diff --git a/programs/obfuscator/Obfuscator.cpp b/programs/obfuscator/Obfuscator.cpp
index 3042ae2bb57..31288b4aa01 100644
--- a/programs/obfuscator/Obfuscator.cpp
+++ b/programs/obfuscator/Obfuscator.cpp
@@ -365,17 +365,14 @@ static void transformFixedString(const UInt8 * src, UInt8 * dst, size_t size, UI
hash.update(seed);
hash.update(i);
+ const auto checksum = getSipHash128AsArray(hash);
if (size >= 16)
{
- char * hash_dst = reinterpret_cast(std::min(pos, end - 16));
- hash.get128(hash_dst);
+ auto * hash_dst = std::min(pos, end - 16);
+ memcpy(hash_dst, checksum.data(), checksum.size());
}
else
- {
- char value[16];
- hash.get128(value);
- memcpy(dst, value, end - dst);
- }
+ memcpy(dst, checksum.data(), end - dst);
pos += 16;
++i;
@@ -401,7 +398,7 @@ static void transformUUID(const UUID & src_uuid, UUID & dst_uuid, UInt64 seed)
hash.update(reinterpret_cast(&src), sizeof(UUID));
/// Saving version and variant from an old UUID
- hash.get128(reinterpret_cast(&dst));
+ dst = hash.get128();
dst.items[1] = (dst.items[1] & 0x1fffffffffffffffull) | (src.items[1] & 0xe000000000000000ull);
dst.items[0] = (dst.items[0] & 0xffffffffffff0fffull) | (src.items[0] & 0x000000000000f000ull);
diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp
index afad5be6b26..738cf90fb7b 100644
--- a/programs/server/Server.cpp
+++ b/programs/server/Server.cpp
@@ -29,6 +29,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -325,19 +326,18 @@ void Server::createServer(
}
catch (const Poco::Exception &)
{
- std::string message = "Listen [" + listen_host + "]:" + std::to_string(port) + " failed: " + getCurrentExceptionMessage(false);
-
if (listen_try)
{
- LOG_WARNING(&logger(), "{}. If it is an IPv6 or IPv4 address and your host has disabled IPv6 or IPv4, then consider to "
+ LOG_WARNING(&logger(), "Listen [{}]:{} failed: {}. If it is an IPv6 or IPv4 address and your host has disabled IPv6 or IPv4, "
+ "then consider to "
"specify not disabled IPv4 or IPv6 address to listen in element of configuration "
"file. Example for disabled IPv6: 0.0.0.0 ."
" Example for disabled IPv4: ::",
- message);
+ listen_host, port, getCurrentExceptionMessage(false));
}
else
{
- throw Exception::createDeprecated(message, ErrorCodes::NETWORK_ERROR);
+ throw Exception(ErrorCodes::NETWORK_ERROR, "Listen [{}]:{} failed: {}", listen_host, port, getCurrentExceptionMessage(false));
}
}
}
@@ -658,10 +658,10 @@ try
global_context->addWarningMessage("Server was built with sanitizer. It will work slowly.");
#endif
- const auto memory_amount = getMemoryAmount();
+ const size_t physical_server_memory = getMemoryAmount();
LOG_INFO(log, "Available RAM: {}; physical cores: {}; logical cores: {}.",
- formatReadableSizeWithBinarySuffix(memory_amount),
+ formatReadableSizeWithBinarySuffix(physical_server_memory),
getNumberOfPhysicalCPUCores(), // on ARM processors it can show only enabled at current moment cores
std::thread::hardware_concurrency());
@@ -1136,9 +1136,10 @@ try
server_settings_.loadSettingsFromConfig(*config);
size_t max_server_memory_usage = server_settings_.max_server_memory_usage;
-
double max_server_memory_usage_to_ram_ratio = server_settings_.max_server_memory_usage_to_ram_ratio;
- size_t default_max_server_memory_usage = static_cast(memory_amount * max_server_memory_usage_to_ram_ratio);
+
+ size_t current_physical_server_memory = getMemoryAmount(); /// With cgroups, the amount of memory available to the server can be changed dynamically.
+ size_t default_max_server_memory_usage = static_cast(current_physical_server_memory * max_server_memory_usage_to_ram_ratio);
if (max_server_memory_usage == 0)
{
@@ -1146,7 +1147,7 @@ try
LOG_INFO(log, "Setting max_server_memory_usage was set to {}"
" ({} available * {:.2f} max_server_memory_usage_to_ram_ratio)",
formatReadableSizeWithBinarySuffix(max_server_memory_usage),
- formatReadableSizeWithBinarySuffix(memory_amount),
+ formatReadableSizeWithBinarySuffix(current_physical_server_memory),
max_server_memory_usage_to_ram_ratio);
}
else if (max_server_memory_usage > default_max_server_memory_usage)
@@ -1157,7 +1158,7 @@ try
" calculated as {} available"
" * {:.2f} max_server_memory_usage_to_ram_ratio",
formatReadableSizeWithBinarySuffix(max_server_memory_usage),
- formatReadableSizeWithBinarySuffix(memory_amount),
+ formatReadableSizeWithBinarySuffix(current_physical_server_memory),
max_server_memory_usage_to_ram_ratio);
}
@@ -1167,14 +1168,14 @@ try
size_t merges_mutations_memory_usage_soft_limit = server_settings_.merges_mutations_memory_usage_soft_limit;
- size_t default_merges_mutations_server_memory_usage = static_cast(memory_amount * server_settings_.merges_mutations_memory_usage_to_ram_ratio);
+ size_t default_merges_mutations_server_memory_usage = static_cast(current_physical_server_memory * server_settings_.merges_mutations_memory_usage_to_ram_ratio);
if (merges_mutations_memory_usage_soft_limit == 0)
{
merges_mutations_memory_usage_soft_limit = default_merges_mutations_server_memory_usage;
LOG_INFO(log, "Setting merges_mutations_memory_usage_soft_limit was set to {}"
" ({} available * {:.2f} merges_mutations_memory_usage_to_ram_ratio)",
formatReadableSizeWithBinarySuffix(merges_mutations_memory_usage_soft_limit),
- formatReadableSizeWithBinarySuffix(memory_amount),
+ formatReadableSizeWithBinarySuffix(current_physical_server_memory),
server_settings_.merges_mutations_memory_usage_to_ram_ratio);
}
else if (merges_mutations_memory_usage_soft_limit > default_merges_mutations_server_memory_usage)
@@ -1183,7 +1184,7 @@ try
LOG_WARNING(log, "Setting merges_mutations_memory_usage_soft_limit was set to {}"
" ({} available * {:.2f} merges_mutations_memory_usage_to_ram_ratio)",
formatReadableSizeWithBinarySuffix(merges_mutations_memory_usage_soft_limit),
- formatReadableSizeWithBinarySuffix(memory_amount),
+ formatReadableSizeWithBinarySuffix(current_physical_server_memory),
server_settings_.merges_mutations_memory_usage_to_ram_ratio);
}
@@ -1485,16 +1486,14 @@ try
/// Set up caches.
- size_t max_cache_size = static_cast(memory_amount * server_settings.cache_size_to_ram_max_ratio);
+ const size_t max_cache_size = static_cast(physical_server_memory * server_settings.cache_size_to_ram_max_ratio);
String uncompressed_cache_policy = server_settings.uncompressed_cache_policy;
- LOG_INFO(log, "Uncompressed cache policy name {}", uncompressed_cache_policy);
size_t uncompressed_cache_size = server_settings.uncompressed_cache_size;
if (uncompressed_cache_size > max_cache_size)
{
uncompressed_cache_size = max_cache_size;
- LOG_INFO(log, "Uncompressed cache size was lowered to {} because the system has low amount of memory",
- formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
+ LOG_INFO(log, "Lowered uncompressed cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
}
global_context->setUncompressedCache(uncompressed_cache_policy, uncompressed_cache_size);
@@ -1513,39 +1512,59 @@ try
server_settings.async_insert_queue_flush_on_shutdown));
}
- size_t mark_cache_size = server_settings.mark_cache_size;
String mark_cache_policy = server_settings.mark_cache_policy;
+ size_t mark_cache_size = server_settings.mark_cache_size;
if (!mark_cache_size)
LOG_ERROR(log, "Too low mark cache size will lead to severe performance degradation.");
if (mark_cache_size > max_cache_size)
{
mark_cache_size = max_cache_size;
- LOG_INFO(log, "Mark cache size was lowered to {} because the system has low amount of memory",
- formatReadableSizeWithBinarySuffix(mark_cache_size));
+ LOG_INFO(log, "Lowered mark cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(mark_cache_size));
}
global_context->setMarkCache(mark_cache_policy, mark_cache_size);
- if (server_settings.index_uncompressed_cache_size)
+ size_t index_uncompressed_cache_size = server_settings.index_uncompressed_cache_size;
+ if (index_uncompressed_cache_size > max_cache_size)
+ {
+ index_uncompressed_cache_size = max_cache_size;
+ LOG_INFO(log, "Lowered index uncompressed cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
+ }
+ if (index_uncompressed_cache_size)
global_context->setIndexUncompressedCache(server_settings.index_uncompressed_cache_size);
- if (server_settings.index_mark_cache_size)
+ size_t index_mark_cache_size = server_settings.index_mark_cache_size;
+ if (index_mark_cache_size > max_cache_size)
+ {
+ index_mark_cache_size = max_cache_size;
+ LOG_INFO(log, "Lowered index mark cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
+ }
+ if (index_mark_cache_size)
global_context->setIndexMarkCache(server_settings.index_mark_cache_size);
- if (server_settings.mmap_cache_size)
+ size_t mmap_cache_size = server_settings.mmap_cache_size;
+ if (mmap_cache_size > max_cache_size)
+ {
+ mmap_cache_size = max_cache_size;
+ LOG_INFO(log, "Lowered mmap file cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
+ }
+ if (mmap_cache_size)
global_context->setMMappedFileCache(server_settings.mmap_cache_size);
- /// A cache for query results.
- global_context->setQueryCache(config());
+ size_t query_cache_max_size_in_bytes = config().getUInt64("query_cache.max_size_in_bytes", DEFAULT_QUERY_CACHE_MAX_SIZE);
+ size_t query_cache_max_entries = config().getUInt64("query_cache.max_entries", DEFAULT_QUERY_CACHE_MAX_ENTRIES);
+ size_t query_cache_query_cache_max_entry_size_in_bytes = config().getUInt64("query_cache.max_entry_size_in_bytes", DEFAULT_QUERY_CACHE_MAX_ENTRY_SIZE_IN_BYTES);
+ size_t query_cache_max_entry_size_in_rows = config().getUInt64("query_cache.max_entry_rows_in_rows", DEFAULT_QUERY_CACHE_MAX_ENTRY_SIZE_IN_ROWS);
+ if (query_cache_max_size_in_bytes > max_cache_size)
+ {
+ query_cache_max_size_in_bytes = max_cache_size;
+ LOG_INFO(log, "Lowered query cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
+ }
+ global_context->setQueryCache(query_cache_max_size_in_bytes, query_cache_max_entries, query_cache_query_cache_max_entry_size_in_bytes, query_cache_max_entry_size_in_rows);
#if USE_EMBEDDED_COMPILER
- /// 128 MB
- constexpr size_t compiled_expression_cache_size_default = 1024 * 1024 * 128;
- size_t compiled_expression_cache_size = config().getUInt64("compiled_expression_cache_size", compiled_expression_cache_size_default);
-
- constexpr size_t compiled_expression_cache_elements_size_default = 10000;
- size_t compiled_expression_cache_elements_size = config().getUInt64("compiled_expression_cache_elements_size", compiled_expression_cache_elements_size_default);
-
- CompiledExpressionCacheFactory::instance().init(compiled_expression_cache_size, compiled_expression_cache_elements_size);
+ size_t compiled_expression_cache_max_size_in_bytes = config().getUInt64("compiled_expression_cache_size", DEFAULT_COMPILED_EXPRESSION_CACHE_MAX_SIZE);
+ size_t compiled_expression_cache_max_elements = config().getUInt64("compiled_expression_cache_elements_size", DEFAULT_COMPILED_EXPRESSION_CACHE_MAX_ENTRIES);
+ CompiledExpressionCacheFactory::instance().init(compiled_expression_cache_max_size_in_bytes, compiled_expression_cache_max_elements);
#endif
/// Set path for format schema files
diff --git a/src/Access/Common/AccessType.h b/src/Access/Common/AccessType.h
index 06507fd85c8..c8ac0c4a210 100644
--- a/src/Access/Common/AccessType.h
+++ b/src/Access/Common/AccessType.h
@@ -95,7 +95,7 @@ enum class AccessType
M(CREATE_NAMED_COLLECTION, "", NAMED_COLLECTION, NAMED_COLLECTION_ADMIN) /* allows to execute CREATE NAMED COLLECTION */ \
M(CREATE, "", GROUP, ALL) /* allows to execute {CREATE|ATTACH} */ \
\
- M(DROP_DATABASE, "", DATABASE, DROP) /* allows to execute {DROP|DETACH} DATABASE */\
+ M(DROP_DATABASE, "", DATABASE, DROP) /* allows to execute {DROP|DETACH|TRUNCATE} DATABASE */\
M(DROP_TABLE, "", TABLE, DROP) /* allows to execute {DROP|DETACH} TABLE */\
M(DROP_VIEW, "", VIEW, DROP) /* allows to execute {DROP|DETACH} TABLE for views;
implicitly enabled by the grant DROP_TABLE */\
diff --git a/src/Access/LDAPClient.cpp b/src/Access/LDAPClient.cpp
index ac2f1683f0c..2af779aa9ae 100644
--- a/src/Access/LDAPClient.cpp
+++ b/src/Access/LDAPClient.cpp
@@ -549,7 +549,7 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params)
if (rc != LDAP_SUCCESS)
{
- String message = "LDAP search failed";
+ String message;
const char * raw_err_str = ldap_err2string(rc);
if (raw_err_str && *raw_err_str != '\0')
@@ -570,7 +570,7 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params)
message += matched_msg;
}
- throw Exception::createDeprecated(message, ErrorCodes::LDAP_ERROR);
+ throw Exception(ErrorCodes::LDAP_ERROR, "LDAP search failed{}", message);
}
break;
diff --git a/src/Access/MultipleAccessStorage.cpp b/src/Access/MultipleAccessStorage.cpp
index 0550c140c17..24bee1278c3 100644
--- a/src/Access/MultipleAccessStorage.cpp
+++ b/src/Access/MultipleAccessStorage.cpp
@@ -502,4 +502,15 @@ void MultipleAccessStorage::restoreFromBackup(RestorerFromBackup & restorer)
throwBackupNotAllowed();
}
+bool MultipleAccessStorage::containsStorage(std::string_view storage_type) const
+{
+ auto storages = getStoragesInternal();
+
+ for (const auto & storage : *storages)
+ {
+ if (storage->getStorageType() == storage_type)
+ return true;
+ }
+ return false;
+}
}
diff --git a/src/Access/MultipleAccessStorage.h b/src/Access/MultipleAccessStorage.h
index 069d414f601..940606948a0 100644
--- a/src/Access/MultipleAccessStorage.h
+++ b/src/Access/MultipleAccessStorage.h
@@ -57,6 +57,7 @@ public:
bool isRestoreAllowed() const override;
void backup(BackupEntriesCollector & backup_entries_collector, const String & data_path_in_backup, AccessEntityType type) const override;
void restoreFromBackup(RestorerFromBackup & restorer) override;
+ bool containsStorage(std::string_view storage_type) const;
protected:
std::optional findImpl(AccessEntityType type, const String & name) const override;
diff --git a/src/AggregateFunctions/AggregateFunctionGroupArray.cpp b/src/AggregateFunctions/AggregateFunctionGroupArray.cpp
index 93a6925d84b..fec4a6fe50a 100644
--- a/src/AggregateFunctions/AggregateFunctionGroupArray.cpp
+++ b/src/AggregateFunctions/AggregateFunctionGroupArray.cpp
@@ -4,6 +4,8 @@
#include
#include
#include
+#include
+#include
namespace DB
@@ -43,6 +45,13 @@ inline AggregateFunctionPtr createAggregateFunctionGroupArrayImpl(const DataType
return std::make_shared>(argument_type, parameters, std::forward(args)...);
}
+size_t getMaxArraySize()
+{
+ if (auto context = Context::getGlobalContextInstance())
+ return context->getServerSettings().aggregate_function_group_array_max_element_size;
+
+ return 0xFFFFFF;
+}
template
AggregateFunctionPtr createAggregateFunctionGroupArray(
@@ -51,7 +60,7 @@ AggregateFunctionPtr createAggregateFunctionGroupArray(
assertUnary(name, argument_types);
bool limit_size = false;
- UInt64 max_elems = std::numeric_limits::max();
+ UInt64 max_elems = getMaxArraySize();
if (parameters.empty())
{
@@ -78,7 +87,7 @@ AggregateFunctionPtr createAggregateFunctionGroupArray(
{
if (Tlast)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "groupArrayLast make sense only with max_elems (groupArrayLast(max_elems)())");
- return createAggregateFunctionGroupArrayImpl>(argument_types[0], parameters);
+ return createAggregateFunctionGroupArrayImpl>(argument_types[0], parameters, max_elems);
}
else
return createAggregateFunctionGroupArrayImpl>(argument_types[0], parameters, max_elems);
diff --git a/src/AggregateFunctions/AggregateFunctionGroupArray.h b/src/AggregateFunctions/AggregateFunctionGroupArray.h
index b5905105457..49552b57c82 100644
--- a/src/AggregateFunctions/AggregateFunctionGroupArray.h
+++ b/src/AggregateFunctions/AggregateFunctionGroupArray.h
@@ -21,7 +21,7 @@
#include
-#define AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE 0xFFFFFF
+#define AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ELEMENT_SIZE 0xFFFFFF
namespace DB
@@ -128,7 +128,7 @@ class GroupArrayNumericImpl final
public:
explicit GroupArrayNumericImpl(
- const DataTypePtr & data_type_, const Array & parameters_, UInt64 max_elems_ = std::numeric_limits::max(), UInt64 seed_ = 123456)
+ const DataTypePtr & data_type_, const Array & parameters_, UInt64 max_elems_, UInt64 seed_ = 123456)
: IAggregateFunctionDataHelper, GroupArrayNumericImpl>(
{data_type_}, parameters_, std::make_shared(data_type_))
, max_elems(max_elems_)
@@ -263,10 +263,18 @@ public:
}
}
+ static void checkArraySize(size_t elems, size_t max_elems)
+ {
+ if (unlikely(elems > max_elems))
+ throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
+ "Too large array size {} (maximum: {})", elems, max_elems);
+ }
+
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional /* version */) const override
{
const auto & value = this->data(place).value;
- const size_t size = value.size();
+ const UInt64 size = value.size();
+ checkArraySize(size, max_elems);
writeVarUInt(size, buf);
for (const auto & element : value)
writeBinaryLittleEndian(element, buf);
@@ -287,13 +295,7 @@ public:
{
size_t size = 0;
readVarUInt(size, buf);
-
- if (unlikely(size > AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE))
- throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
- "Too large array size (maximum: {})", AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE);
-
- if (limit_num_elems && unlikely(size > max_elems))
- throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size, it should not exceed {}", max_elems);
+ checkArraySize(size, max_elems);
auto & value = this->data(place).value;
@@ -357,9 +359,17 @@ struct GroupArrayNodeBase
const_cast(arena->alignedInsert(reinterpret_cast(this), sizeof(Node) + size, alignof(Node))));
}
+ static void checkElementSize(size_t size, size_t max_size)
+ {
+ if (unlikely(size > max_size))
+ throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
+ "Too large array element size {} (maximum: {})", size, max_size);
+ }
+
/// Write node to buffer
void write(WriteBuffer & buf) const
{
+ checkElementSize(size, AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ELEMENT_SIZE);
writeVarUInt(size, buf);
buf.write(data(), size);
}
@@ -369,9 +379,7 @@ struct GroupArrayNodeBase
{
UInt64 size;
readVarUInt(size, buf);
- if (unlikely(size > AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE))
- throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
- "Too large array size (maximum: {})", AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE);
+ checkElementSize(size, AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ELEMENT_SIZE);
Node * node = reinterpret_cast(arena->alignedAlloc(sizeof(Node) + size, alignof(Node)));
node->size = size;
@@ -455,7 +463,7 @@ class GroupArrayGeneralImpl final
UInt64 seed;
public:
- GroupArrayGeneralImpl(const DataTypePtr & data_type_, const Array & parameters_, UInt64 max_elems_ = std::numeric_limits::max(), UInt64 seed_ = 123456)
+ GroupArrayGeneralImpl(const DataTypePtr & data_type_, const Array & parameters_, UInt64 max_elems_, UInt64 seed_ = 123456)
: IAggregateFunctionDataHelper, GroupArrayGeneralImpl>(
{data_type_}, parameters_, std::make_shared(data_type_))
, data_type(this->argument_types[0])
@@ -596,9 +604,18 @@ public:
}
}
+ static void checkArraySize(size_t elems, size_t max_elems)
+ {
+ if (unlikely(elems > max_elems))
+ throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
+ "Too large array size {} (maximum: {})", elems, max_elems);
+ }
+
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional /* version */) const override
{
- writeVarUInt(data(place).value.size(), buf);
+ UInt64 elems = data(place).value.size();
+ checkArraySize(elems, max_elems);
+ writeVarUInt(elems, buf);
auto & value = data(place).value;
for (auto & node : value)
@@ -624,12 +641,7 @@ public:
if (unlikely(elems == 0))
return;
- if (unlikely(elems > AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE))
- throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
- "Too large array size (maximum: {})", AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE);
-
- if (limit_num_elems && unlikely(elems > max_elems))
- throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size, it should not exceed {}", max_elems);
+ checkArraySize(elems, max_elems);
auto & value = data(place).value;
@@ -673,6 +685,6 @@ public:
bool allocatesMemoryInArena() const override { return true; }
};
-#undef AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE
+#undef AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ELEMENT_SIZE
}
diff --git a/src/AggregateFunctions/AggregateFunctionUniq.h b/src/AggregateFunctions/AggregateFunctionUniq.h
index 2810051a82f..1752d5751d5 100644
--- a/src/AggregateFunctions/AggregateFunctionUniq.h
+++ b/src/AggregateFunctions/AggregateFunctionUniq.h
@@ -315,10 +315,9 @@ struct Adder
{
StringRef value = column.getDataAt(row_num);
- UInt128 key;
SipHash hash;
hash.update(value.data, value.size);
- hash.get128(key);
+ const auto key = hash.get128();
data.set.template insert(key);
}
diff --git a/src/AggregateFunctions/UniqVariadicHash.h b/src/AggregateFunctions/UniqVariadicHash.h
index 94f54a7a059..840380e7f0f 100644
--- a/src/AggregateFunctions/UniqVariadicHash.h
+++ b/src/AggregateFunctions/UniqVariadicHash.h
@@ -107,9 +107,7 @@ struct UniqVariadicHash
++column;
}
- UInt128 key;
- hash.get128(key);
- return key;
+ return hash.get128();
}
};
@@ -131,9 +129,7 @@ struct UniqVariadicHash
++column;
}
- UInt128 key;
- hash.get128(key);
- return key;
+ return hash.get128();
}
};
diff --git a/src/Analyzer/HashUtils.h b/src/Analyzer/HashUtils.h
index 2203e7d5203..3727ea1ea14 100644
--- a/src/Analyzer/HashUtils.h
+++ b/src/Analyzer/HashUtils.h
@@ -20,7 +20,7 @@ struct QueryTreeNodeWithHash
{}
QueryTreeNodePtrType node = nullptr;
- std::pair hash;
+ CityHash_v1_0_2::uint128 hash;
};
template
@@ -55,6 +55,6 @@ struct std::hash>
{
size_t operator()(const DB::QueryTreeNodeWithHash & node_with_hash) const
{
- return node_with_hash.hash.first;
+ return node_with_hash.hash.low64;
}
};
diff --git a/src/Analyzer/IQueryTreeNode.cpp b/src/Analyzer/IQueryTreeNode.cpp
index f1056975f7f..babc76f29d6 100644
--- a/src/Analyzer/IQueryTreeNode.cpp
+++ b/src/Analyzer/IQueryTreeNode.cpp
@@ -229,10 +229,7 @@ IQueryTreeNode::Hash IQueryTreeNode::getTreeHash() const
}
}
- Hash result;
- hash_state.get128(result);
-
- return result;
+ return getSipHash128AsPair(hash_state);
}
QueryTreeNodePtr IQueryTreeNode::clone() const
diff --git a/src/Analyzer/IQueryTreeNode.h b/src/Analyzer/IQueryTreeNode.h
index 763963b734a..3f6816696b4 100644
--- a/src/Analyzer/IQueryTreeNode.h
+++ b/src/Analyzer/IQueryTreeNode.h
@@ -106,7 +106,7 @@ public:
*/
bool isEqual(const IQueryTreeNode & rhs, CompareOptions compare_options = { .compare_aliases = true }) const;
- using Hash = std::pair;
+ using Hash = CityHash_v1_0_2::uint128;
using HashState = SipHash;
/** Get tree hash identifying current tree
diff --git a/src/Analyzer/Passes/QueryAnalysisPass.cpp b/src/Analyzer/Passes/QueryAnalysisPass.cpp
index 0b3d19f1861..0c3dc5e70d7 100644
--- a/src/Analyzer/Passes/QueryAnalysisPass.cpp
+++ b/src/Analyzer/Passes/QueryAnalysisPass.cpp
@@ -2033,7 +2033,7 @@ void QueryAnalyzer::evaluateScalarSubqueryIfNeeded(QueryTreeNodePtr & node, Iden
auto & nearest_query_scope_query_node = nearest_query_scope->scope_node->as();
auto & mutable_context = nearest_query_scope_query_node.getMutableContext();
- auto scalar_query_hash_string = std::to_string(node_with_hash.hash.first) + '_' + std::to_string(node_with_hash.hash.second);
+ auto scalar_query_hash_string = DB::toString(node_with_hash.hash);
if (mutable_context->hasQueryContext())
mutable_context->getQueryContext()->addScalar(scalar_query_hash_string, scalar_block);
diff --git a/src/Backups/BackupCoordinationRemote.cpp b/src/Backups/BackupCoordinationRemote.cpp
index 214873fb765..e5fcbf26781 100644
--- a/src/Backups/BackupCoordinationRemote.cpp
+++ b/src/Backups/BackupCoordinationRemote.cpp
@@ -187,7 +187,7 @@ BackupCoordinationRemote::BackupCoordinationRemote(
if (code == Coordination::Error::ZNODEEXISTS)
zk->handleEphemeralNodeExistenceNoFailureInjection(alive_node_path, "");
else if (code != Coordination::Error::ZOK)
- throw zkutil::KeeperException(code, alive_node_path);
+ throw zkutil::KeeperException::fromPath(code, alive_node_path);
}
})
{
@@ -745,7 +745,7 @@ bool BackupCoordinationRemote::startWritingFile(size_t data_file_index)
else if (code == Coordination::Error::ZNODEEXISTS)
host_is_assigned = (zk->get(full_path) == host_index_str); /// The previous retry could write this ZooKeeper's node and then fail.
else
- throw zkutil::KeeperException(code, full_path);
+ throw zkutil::KeeperException::fromPath(code, full_path);
});
if (!host_is_assigned)
@@ -815,7 +815,7 @@ bool BackupCoordinationRemote::hasConcurrentBackups(const std::atomic &)
break;
bool is_last_attempt = (attempt == MAX_ZOOKEEPER_ATTEMPTS - 1);
if ((code != Coordination::Error::ZBADVERSION) || is_last_attempt)
- throw zkutil::KeeperException(code, backup_stage_path);
+ throw zkutil::KeeperException::fromPath(code, backup_stage_path);
}
});
diff --git a/src/Backups/BackupCoordinationStageSync.cpp b/src/Backups/BackupCoordinationStageSync.cpp
index ebddbb8b82f..9b9ddc8515c 100644
--- a/src/Backups/BackupCoordinationStageSync.cpp
+++ b/src/Backups/BackupCoordinationStageSync.cpp
@@ -56,7 +56,7 @@ void BackupCoordinationStageSync::set(const String & current_host, const String
{
auto code = zookeeper->trySet(zookeeper_path, new_stage);
if (code != Coordination::Error::ZOK)
- throw zkutil::KeeperException(code, zookeeper_path);
+ throw zkutil::KeeperException::fromPath(code, zookeeper_path);
}
else
{
@@ -64,7 +64,7 @@ void BackupCoordinationStageSync::set(const String & current_host, const String
String alive_node_path = zookeeper_path + "/alive|" + current_host;
auto code = zookeeper->tryCreate(alive_node_path, "", zkutil::CreateMode::Ephemeral);
if (code != Coordination::Error::ZOK && code != Coordination::Error::ZNODEEXISTS)
- throw zkutil::KeeperException(code, alive_node_path);
+ throw zkutil::KeeperException::fromPath(code, alive_node_path);
zookeeper->createIfNotExists(zookeeper_path + "/started|" + current_host, "");
zookeeper->createIfNotExists(zookeeper_path + "/current|" + current_host + "|" + new_stage, message);
@@ -90,7 +90,7 @@ void BackupCoordinationStageSync::setError(const String & current_host, const Ex
/// so the following line tries to preserve the error status.
auto code = zookeeper->trySet(zookeeper_path, Stage::ERROR);
if (code != Coordination::Error::ZOK)
- throw zkutil::KeeperException(code, zookeeper_path);
+ throw zkutil::KeeperException::fromPath(code, zookeeper_path);
});
}
diff --git a/src/Backups/BackupIO_S3.cpp b/src/Backups/BackupIO_S3.cpp
index d32047efae4..7926d0b2564 100644
--- a/src/Backups/BackupIO_S3.cpp
+++ b/src/Backups/BackupIO_S3.cpp
@@ -159,6 +159,7 @@ void BackupReaderS3::copyFileToDisk(const String & path_in_backup, size_t file_s
blob_path.size(), mode);
copyS3File(
+ client,
client,
s3_uri.bucket,
fs::path(s3_uri.key) / path_in_backup,
@@ -218,6 +219,7 @@ void BackupWriterS3::copyFileFromDisk(const String & path_in_backup, DiskPtr src
{
LOG_TRACE(log, "Copying file {} from disk {} to S3", src_path, src_disk->getName());
copyS3File(
+ client,
client,
/* src_bucket */ blob_path[1],
/* src_key= */ blob_path[0],
@@ -238,7 +240,7 @@ void BackupWriterS3::copyFileFromDisk(const String & path_in_backup, DiskPtr src
void BackupWriterS3::copyDataToFile(const String & path_in_backup, const CreateReadBufferFunction & create_read_buffer, UInt64 start_pos, UInt64 length)
{
- copyDataToS3File(create_read_buffer, start_pos, length, client, s3_uri.bucket, fs::path(s3_uri.key) / path_in_backup, request_settings, {},
+ copyDataToS3File(create_read_buffer, start_pos, length, client, client, s3_uri.bucket, fs::path(s3_uri.key) / path_in_backup, request_settings, {},
threadPoolCallbackRunner(getBackupsIOThreadPool().get(), "BackupWriterS3"));
}
diff --git a/src/Backups/RestoreCoordinationRemote.cpp b/src/Backups/RestoreCoordinationRemote.cpp
index f95969b52a1..37abebb26b7 100644
--- a/src/Backups/RestoreCoordinationRemote.cpp
+++ b/src/Backups/RestoreCoordinationRemote.cpp
@@ -46,7 +46,7 @@ RestoreCoordinationRemote::RestoreCoordinationRemote(
if (code == Coordination::Error::ZNODEEXISTS)
zk->handleEphemeralNodeExistenceNoFailureInjection(alive_node_path, "");
else if (code != Coordination::Error::ZOK)
- throw zkutil::KeeperException(code, alive_node_path);
+ throw zkutil::KeeperException::fromPath(code, alive_node_path);
}
})
{
@@ -129,7 +129,7 @@ bool RestoreCoordinationRemote::acquireCreatingTableInReplicatedDatabase(const S
path += "/" + escapeForFileName(table_name);
auto code = zk->tryCreate(path, toString(current_host_index), zkutil::CreateMode::Persistent);
if ((code != Coordination::Error::ZOK) && (code != Coordination::Error::ZNODEEXISTS))
- throw zkutil::KeeperException(code, path);
+ throw zkutil::KeeperException::fromPath(code, path);
if (code == Coordination::Error::ZOK)
{
@@ -155,7 +155,7 @@ bool RestoreCoordinationRemote::acquireInsertingDataIntoReplicatedTable(const St
String path = zookeeper_path + "/repl_tables_data_acquired/" + escapeForFileName(table_zk_path);
auto code = zk->tryCreate(path, toString(current_host_index), zkutil::CreateMode::Persistent);
if ((code != Coordination::Error::ZOK) && (code != Coordination::Error::ZNODEEXISTS))
- throw zkutil::KeeperException(code, path);
+ throw zkutil::KeeperException::fromPath(code, path);
if (code == Coordination::Error::ZOK)
{
@@ -181,7 +181,7 @@ bool RestoreCoordinationRemote::acquireReplicatedAccessStorage(const String & ac
String path = zookeeper_path + "/repl_access_storages_acquired/" + escapeForFileName(access_storage_zk_path);
auto code = zk->tryCreate(path, toString(current_host_index), zkutil::CreateMode::Persistent);
if ((code != Coordination::Error::ZOK) && (code != Coordination::Error::ZNODEEXISTS))
- throw zkutil::KeeperException(code, path);
+ throw zkutil::KeeperException::fromPath(code, path);
if (code == Coordination::Error::ZOK)
{
@@ -217,7 +217,7 @@ bool RestoreCoordinationRemote::acquireReplicatedSQLObjects(const String & loade
auto code = zk->tryCreate(path, "", zkutil::CreateMode::Persistent);
if ((code != Coordination::Error::ZOK) && (code != Coordination::Error::ZNODEEXISTS))
- throw zkutil::KeeperException(code, path);
+ throw zkutil::KeeperException::fromPath(code, path);
if (code == Coordination::Error::ZOK)
{
@@ -302,7 +302,7 @@ bool RestoreCoordinationRemote::hasConcurrentRestores(const std::atomic
break;
bool is_last_attempt = (attempt == MAX_ZOOKEEPER_ATTEMPTS - 1);
if ((code != Coordination::Error::ZBADVERSION) || is_last_attempt)
- throw zkutil::KeeperException(code, path);
+ throw zkutil::KeeperException::fromPath(code, path);
}
});
diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp
index 9ad6a46866f..495e3bdfd4e 100644
--- a/src/Client/ClientBase.cpp
+++ b/src/Client/ClientBase.cpp
@@ -847,7 +847,9 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa
visitor.visit(parsed_query);
/// Get new query after substitutions.
- query = serializeAST(*parsed_query);
+ if (visitor.getNumberOfReplacedParameters())
+ query = serializeAST(*parsed_query);
+ chassert(!query.empty());
}
if (allow_merge_tree_settings && parsed_query->as())
@@ -1332,7 +1334,9 @@ void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr pars
visitor.visit(parsed_query);
/// Get new query after substitutions.
- query = serializeAST(*parsed_query);
+ if (visitor.getNumberOfReplacedParameters())
+ query = serializeAST(*parsed_query);
+ chassert(!query.empty());
}
/// Process the query that requires transferring data blocks to the server.
@@ -1811,7 +1815,7 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin
}
if (const auto * use_query = parsed_query->as())
{
- const String & new_database = use_query->database;
+ const String & new_database = use_query->getDatabase();
/// If the client initiates the reconnection, it takes the settings from the config.
config().setString("database", new_database);
/// If the connection initiates the reconnection, it uses its variable.
diff --git a/src/Client/ConnectionEstablisher.cpp b/src/Client/ConnectionEstablisher.cpp
index 439025447ca..9805838a311 100644
--- a/src/Client/ConnectionEstablisher.cpp
+++ b/src/Client/ConnectionEstablisher.cpp
@@ -170,7 +170,7 @@ bool ConnectionEstablisherAsync::checkTimeout()
epoll_event events[2];
events[0].data.fd = events[1].data.fd = -1;
- size_t ready_count = epoll.getManyReady(2, events, false);
+ size_t ready_count = epoll.getManyReady(2, events, 0);
for (size_t i = 0; i != ready_count; ++i)
{
if (events[i].data.fd == socket_fd)
diff --git a/src/Client/HedgedConnections.cpp b/src/Client/HedgedConnections.cpp
index 0efad1188fa..7d723d02347 100644
--- a/src/Client/HedgedConnections.cpp
+++ b/src/Client/HedgedConnections.cpp
@@ -388,7 +388,7 @@ int HedgedConnections::getReadyFileDescriptor(AsyncCallback async_callback)
bool blocking = !static_cast(async_callback);
while (events_count == 0)
{
- events_count = epoll.getManyReady(1, &event, blocking);
+ events_count = epoll.getManyReady(1, &event, blocking ? -1 : 0);
if (!events_count && async_callback)
async_callback(epoll.getFileDescriptor(), 0, AsyncEventTimeoutType::NONE, epoll.getDescription(), AsyncTaskExecutor::Event::READ | AsyncTaskExecutor::Event::ERROR);
}
diff --git a/src/Client/QueryFuzzer.cpp b/src/Client/QueryFuzzer.cpp
index 5ce95c82528..86cedf65345 100644
--- a/src/Client/QueryFuzzer.cpp
+++ b/src/Client/QueryFuzzer.cpp
@@ -521,8 +521,7 @@ void QueryFuzzer::fuzzCreateQuery(ASTCreateQuery & create)
if (create.storage)
create.storage->updateTreeHash(sip_hash);
- IAST::Hash hash;
- sip_hash.get128(hash);
+ const auto hash = getSipHash128AsPair(sip_hash);
/// Save only tables with unique definition.
if (created_tables_hashes.insert(hash).second)
diff --git a/src/Columns/ColumnAggregateFunction.cpp b/src/Columns/ColumnAggregateFunction.cpp
index 62ec324455e..3ebb30df87e 100644
--- a/src/Columns/ColumnAggregateFunction.cpp
+++ b/src/Columns/ColumnAggregateFunction.cpp
@@ -524,7 +524,7 @@ void ColumnAggregateFunction::insertDefault()
pushBackAndCreateState(data, arena, func.get());
}
-StringRef ColumnAggregateFunction::serializeValueIntoArena(size_t n, Arena & arena, const char *& begin) const
+StringRef ColumnAggregateFunction::serializeValueIntoArena(size_t n, Arena & arena, const char *& begin, const UInt8 *) const
{
WriteBufferFromArena out(arena, begin);
func->serialize(data[n], out, version);
diff --git a/src/Columns/ColumnAggregateFunction.h b/src/Columns/ColumnAggregateFunction.h
index f9ce45708c9..7c7201e585a 100644
--- a/src/Columns/ColumnAggregateFunction.h
+++ b/src/Columns/ColumnAggregateFunction.h
@@ -162,7 +162,7 @@ public:
void insertDefault() override;
- StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override;
+ StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin, const UInt8 *) const override;
const char * deserializeAndInsertFromArena(const char * src_arena) override;
diff --git a/src/Columns/ColumnArray.cpp b/src/Columns/ColumnArray.cpp
index 74512d1669b..1cb8188bce6 100644
--- a/src/Columns/ColumnArray.cpp
+++ b/src/Columns/ColumnArray.cpp
@@ -205,7 +205,7 @@ void ColumnArray::insertData(const char * pos, size_t length)
}
-StringRef ColumnArray::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const
+StringRef ColumnArray::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin, const UInt8 *) const
{
size_t array_size = sizeAt(n);
size_t offset = offsetAt(n);
diff --git a/src/Columns/ColumnArray.h b/src/Columns/ColumnArray.h
index f011d9a607b..2a9bfa405e5 100644
--- a/src/Columns/ColumnArray.h
+++ b/src/Columns/ColumnArray.h
@@ -77,7 +77,7 @@ public:
StringRef getDataAt(size_t n) const override;
bool isDefaultAt(size_t n) const override;
void insertData(const char * pos, size_t length) override;
- StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override;
+ StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin, const UInt8 *) const override;
const char * deserializeAndInsertFromArena(const char * pos) override;
const char * skipSerializedInArena(const char * pos) const override;
void updateHashWithValue(size_t n, SipHash & hash) const override;
diff --git a/src/Columns/ColumnCompressed.h b/src/Columns/ColumnCompressed.h
index bfe7cdb4924..b780fbbf37a 100644
--- a/src/Columns/ColumnCompressed.h
+++ b/src/Columns/ColumnCompressed.h
@@ -88,7 +88,7 @@ public:
void insertData(const char *, size_t) override { throwMustBeDecompressed(); }
void insertDefault() override { throwMustBeDecompressed(); }
void popBack(size_t) override { throwMustBeDecompressed(); }
- StringRef serializeValueIntoArena(size_t, Arena &, char const *&) const override { throwMustBeDecompressed(); }
+ StringRef serializeValueIntoArena(size_t, Arena &, char const *&, const UInt8 *) const override { throwMustBeDecompressed(); }
const char * deserializeAndInsertFromArena(const char *) override { throwMustBeDecompressed(); }
const char * skipSerializedInArena(const char *) const override { throwMustBeDecompressed(); }
void updateHashWithValue(size_t, SipHash &) const override { throwMustBeDecompressed(); }
diff --git a/src/Columns/ColumnConst.h b/src/Columns/ColumnConst.h
index f769dd6cc2a..dc84e0c2402 100644
--- a/src/Columns/ColumnConst.h
+++ b/src/Columns/ColumnConst.h
@@ -151,7 +151,7 @@ public:
s -= n;
}
- StringRef serializeValueIntoArena(size_t, Arena & arena, char const *& begin) const override
+ StringRef serializeValueIntoArena(size_t, Arena & arena, char const *& begin, const UInt8 *) const override
{
return data->serializeValueIntoArena(0, arena, begin);
}
diff --git a/src/Columns/ColumnDecimal.cpp b/src/Columns/ColumnDecimal.cpp
index 8e5792934cf..142ee6c271d 100644
--- a/src/Columns/ColumnDecimal.cpp
+++ b/src/Columns/ColumnDecimal.cpp
@@ -59,9 +59,26 @@ bool ColumnDecimal::hasEqualValues() const
}
template
-StringRef ColumnDecimal::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const
+StringRef ColumnDecimal::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin, const UInt8 * null_bit) const
{
- auto * pos = arena.allocContinue(sizeof(T), begin);
+ constexpr size_t null_bit_size = sizeof(UInt8);
+ StringRef res;
+ char * pos;
+ if (null_bit)
+ {
+ res.size = * null_bit ? null_bit_size : null_bit_size + sizeof(T);
+ pos = arena.allocContinue(res.size, begin);
+ res.data = pos;
+ memcpy(pos, null_bit, null_bit_size);
+ if (*null_bit) return res;
+ pos += null_bit_size;
+ }
+ else
+ {
+ res.size = sizeof(T);
+ pos = arena.allocContinue(res.size, begin);
+ res.data = pos;
+ }
memcpy(pos, &data[n], sizeof(T));
return StringRef(pos, sizeof(T));
}
diff --git a/src/Columns/ColumnDecimal.h b/src/Columns/ColumnDecimal.h
index 03e0b9be558..fb24ae4554b 100644
--- a/src/Columns/ColumnDecimal.h
+++ b/src/Columns/ColumnDecimal.h
@@ -80,7 +80,7 @@ public:
Float64 getFloat64(size_t n) const final { return DecimalUtils::convertTo(data[n], scale); }
- StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override;
+ StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin, const UInt8 * null_bit) const override;
const char * deserializeAndInsertFromArena(const char * pos) override;
const char * skipSerializedInArena(const char * pos) const override;
void updateHashWithValue(size_t n, SipHash & hash) const override;
diff --git a/src/Columns/ColumnFixedString.cpp b/src/Columns/ColumnFixedString.cpp
index 24b5c435ecd..a18e5c522a1 100644
--- a/src/Columns/ColumnFixedString.cpp
+++ b/src/Columns/ColumnFixedString.cpp
@@ -86,11 +86,28 @@ void ColumnFixedString::insertData(const char * pos, size_t length)
memset(chars.data() + old_size + length, 0, n - length);
}
-StringRef ColumnFixedString::serializeValueIntoArena(size_t index, Arena & arena, char const *& begin) const
+StringRef ColumnFixedString::serializeValueIntoArena(size_t index, Arena & arena, char const *& begin, const UInt8 * null_bit) const
{
- auto * pos = arena.allocContinue(n, begin);
+ constexpr size_t null_bit_size = sizeof(UInt8);
+ StringRef res;
+ char * pos;
+ if (null_bit)
+ {
+ res.size = * null_bit ? null_bit_size : null_bit_size + n;
+ pos = arena.allocContinue(res.size, begin);
+ res.data = pos;
+ memcpy(pos, null_bit, null_bit_size);
+ if (*null_bit) return res;
+ pos += null_bit_size;
+ }
+ else
+ {
+ res.size = n;
+ pos = arena.allocContinue(res.size, begin);
+ res.data = pos;
+ }
memcpy(pos, &chars[n * index], n);
- return StringRef(pos, n);
+ return res;
}
const char * ColumnFixedString::deserializeAndInsertFromArena(const char * pos)
diff --git a/src/Columns/ColumnFixedString.h b/src/Columns/ColumnFixedString.h
index 39497e3403e..445432b7b28 100644
--- a/src/Columns/ColumnFixedString.h
+++ b/src/Columns/ColumnFixedString.h
@@ -115,7 +115,7 @@ public:
chars.resize_assume_reserved(chars.size() - n * elems);
}
- StringRef serializeValueIntoArena(size_t index, Arena & arena, char const *& begin) const override;
+ StringRef serializeValueIntoArena(size_t index, Arena & arena, char const *& begin, const UInt8 *) const override;
const char * deserializeAndInsertFromArena(const char * pos) override;
diff --git a/src/Columns/ColumnFunction.h b/src/Columns/ColumnFunction.h
index a1f6245c2bd..c21e88744e0 100644
--- a/src/Columns/ColumnFunction.h
+++ b/src/Columns/ColumnFunction.h
@@ -96,7 +96,7 @@ public:
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot insert into {}", getName());
}
- StringRef serializeValueIntoArena(size_t, Arena &, char const *&) const override
+ StringRef serializeValueIntoArena(size_t, Arena &, char const *&, const UInt8 *) const override
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot serialize from {}", getName());
}
diff --git a/src/Columns/ColumnLowCardinality.cpp b/src/Columns/ColumnLowCardinality.cpp
index 9269ea4ee4d..41358a4e538 100644
--- a/src/Columns/ColumnLowCardinality.cpp
+++ b/src/Columns/ColumnLowCardinality.cpp
@@ -255,7 +255,7 @@ void ColumnLowCardinality::insertData(const char * pos, size_t length)
idx.insertPosition(dictionary.getColumnUnique().uniqueInsertData(pos, length));
}
-StringRef ColumnLowCardinality::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const
+StringRef ColumnLowCardinality::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin, const UInt8 *) const
{
return getDictionary().serializeValueIntoArena(getIndexes().getUInt(n), arena, begin);
}
diff --git a/src/Columns/ColumnLowCardinality.h b/src/Columns/ColumnLowCardinality.h
index dcd07ff3b34..91bd5945fd9 100644
--- a/src/Columns/ColumnLowCardinality.h
+++ b/src/Columns/ColumnLowCardinality.h
@@ -87,7 +87,7 @@ public:
void popBack(size_t n) override { idx.popBack(n); }
- StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override;
+ StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin, const UInt8 *) const override;
const char * deserializeAndInsertFromArena(const char * pos) override;
diff --git a/src/Columns/ColumnMap.cpp b/src/Columns/ColumnMap.cpp
index 797700e87b0..ddcde43ca23 100644
--- a/src/Columns/ColumnMap.cpp
+++ b/src/Columns/ColumnMap.cpp
@@ -111,7 +111,7 @@ void ColumnMap::popBack(size_t n)
nested->popBack(n);
}
-StringRef ColumnMap::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const
+StringRef ColumnMap::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin, const UInt8 *) const
{
return nested->serializeValueIntoArena(n, arena, begin);
}
diff --git a/src/Columns/ColumnMap.h b/src/Columns/ColumnMap.h
index e5bc26127df..fde8a7e0e67 100644
--- a/src/Columns/ColumnMap.h
+++ b/src/Columns/ColumnMap.h
@@ -58,7 +58,7 @@ public:
void insert(const Field & x) override;
void insertDefault() override;
void popBack(size_t n) override;
- StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override;
+ StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin, const UInt8 *) const override;
const char * deserializeAndInsertFromArena(const char * pos) override;
const char * skipSerializedInArena(const char * pos) const override;
void updateHashWithValue(size_t n, SipHash & hash) const override;
diff --git a/src/Columns/ColumnNullable.cpp b/src/Columns/ColumnNullable.cpp
index 2eb2ff0bf69..fcd95e5c963 100644
--- a/src/Columns/ColumnNullable.cpp
+++ b/src/Columns/ColumnNullable.cpp
@@ -4,6 +4,10 @@
#include
#include
#include
+#include
+#include
+#include
+#include
#include
#include
#include
@@ -34,6 +38,7 @@ ColumnNullable::ColumnNullable(MutableColumnPtr && nested_column_, MutableColumn
{
/// ColumnNullable cannot have constant nested column. But constant argument could be passed. Materialize it.
nested_column = getNestedColumn().convertToFullColumnIfConst();
+ nested_type = nested_column->getDataType();
if (!getNestedColumn().canBeInsideNullable())
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "{} cannot be inside Nullable column", getNestedColumn().getName());
@@ -134,21 +139,77 @@ void ColumnNullable::insertData(const char * pos, size_t length)
}
}
-StringRef ColumnNullable::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const
+StringRef ColumnNullable::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin, const UInt8 *) const
{
const auto & arr = getNullMapData();
static constexpr auto s = sizeof(arr[0]);
+ char * pos;
- auto * pos = arena.allocContinue(s, begin);
- memcpy(pos, &arr[n], s);
-
- if (arr[n])
- return StringRef(pos, s);
-
- auto nested_ref = getNestedColumn().serializeValueIntoArena(n, arena, begin);
-
- /// serializeValueIntoArena may reallocate memory. Have to use ptr from nested_ref.data and move it back.
- return StringRef(nested_ref.data - s, nested_ref.size + s);
+ switch (nested_type)
+ {
+ case TypeIndex::UInt8:
+ return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::UInt16:
+ return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::UInt32:
+ return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::UInt64:
+ return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::UInt128:
+ return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::UInt256:
+ return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::Int8:
+ return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::Int16:
+ return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::Int32:
+ return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::Int64:
+ return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::Int128:
+ return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::Int256:
+ return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::Float32:
+ return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::Float64:
+ return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::Date:
+ return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::Date32:
+ return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::DateTime:
+ return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::DateTime64:
+ return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::String:
+ return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::FixedString:
+ return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::Decimal32:
+ return static_cast *>(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::Decimal64:
+ return static_cast *>(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::Decimal128:
+ return static_cast *>(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::Decimal256:
+ return static_cast *>(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::UUID:
+ return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::IPv4:
+ return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ case TypeIndex::IPv6:
+ return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]);
+ default:
+ pos = arena.allocContinue(s, begin);
+ memcpy(pos, &arr[n], s);
+ if (arr[n])
+ return StringRef(pos, s);
+ auto nested_ref = getNestedColumn().serializeValueIntoArena(n, arena, begin);
+ /// serializeValueIntoArena may reallocate memory. Have to use ptr from nested_ref.data and move it back.
+ return StringRef(nested_ref.data - s, nested_ref.size + s);
+ }
}
const char * ColumnNullable::deserializeAndInsertFromArena(const char * pos)
diff --git a/src/Columns/ColumnNullable.h b/src/Columns/ColumnNullable.h
index bc95eca69b9..b57fdf3064d 100644
--- a/src/Columns/ColumnNullable.h
+++ b/src/Columns/ColumnNullable.h
@@ -6,6 +6,7 @@
#include
#include
+#include "Core/TypeId.h"
#include "config.h"
@@ -62,7 +63,7 @@ public:
StringRef getDataAt(size_t) const override;
/// Will insert null value if pos=nullptr
void insertData(const char * pos, size_t length) override;
- StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override;
+ StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin, const UInt8 * null_bit) const override;
const char * deserializeAndInsertFromArena(const char * pos) override;
const char * skipSerializedInArena(const char * pos) const override;
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
@@ -212,6 +213,8 @@ public:
private:
WrappedPtr nested_column;
WrappedPtr null_map;
+ // optimize serializeValueIntoArena
+ TypeIndex nested_type;
template
void applyNullMapImpl(const NullMap & map);
diff --git a/src/Columns/ColumnObject.h b/src/Columns/ColumnObject.h
index bc5a6b69bb0..36a33a8f10f 100644
--- a/src/Columns/ColumnObject.h
+++ b/src/Columns/ColumnObject.h
@@ -244,7 +244,7 @@ public:
StringRef getDataAt(size_t) const override { throwMustBeConcrete(); }
bool isDefaultAt(size_t) const override { throwMustBeConcrete(); }
void insertData(const char *, size_t) override { throwMustBeConcrete(); }
- StringRef serializeValueIntoArena(size_t, Arena &, char const *&) const override { throwMustBeConcrete(); }
+ StringRef serializeValueIntoArena(size_t, Arena &, char const *&, const UInt8 *) const override { throwMustBeConcrete(); }
const char * deserializeAndInsertFromArena(const char *) override { throwMustBeConcrete(); }
const char * skipSerializedInArena(const char *) const override { throwMustBeConcrete(); }
void updateHashWithValue(size_t, SipHash &) const override { throwMustBeConcrete(); }
diff --git a/src/Columns/ColumnSparse.cpp b/src/Columns/ColumnSparse.cpp
index 4f76a9be4b9..057c0cd7112 100644
--- a/src/Columns/ColumnSparse.cpp
+++ b/src/Columns/ColumnSparse.cpp
@@ -150,7 +150,7 @@ void ColumnSparse::insertData(const char * pos, size_t length)
insertSingleValue([&](IColumn & column) { column.insertData(pos, length); });
}
-StringRef ColumnSparse::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const
+StringRef ColumnSparse::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin, const UInt8 *) const
{
return values->serializeValueIntoArena(getValueIndex(n), arena, begin);
}
diff --git a/src/Columns/ColumnSparse.h b/src/Columns/ColumnSparse.h
index 26e05655f60..48c7422dd27 100644
--- a/src/Columns/ColumnSparse.h
+++ b/src/Columns/ColumnSparse.h
@@ -78,7 +78,7 @@ public:
/// Will insert null value if pos=nullptr
void insertData(const char * pos, size_t length) override;
- StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override;
+ StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin, const UInt8 *) const override;
const char * deserializeAndInsertFromArena(const char * pos) override;
const char * skipSerializedInArena(const char *) const override;
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
diff --git a/src/Columns/ColumnString.cpp b/src/Columns/ColumnString.cpp
index 38c7b2c0dd6..50fe90ad8ef 100644
--- a/src/Columns/ColumnString.cpp
+++ b/src/Columns/ColumnString.cpp
@@ -213,17 +213,30 @@ ColumnPtr ColumnString::permute(const Permutation & perm, size_t limit) const
}
-StringRef ColumnString::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const
+StringRef ColumnString::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin, const UInt8 * null_bit) const
{
size_t string_size = sizeAt(n);
size_t offset = offsetAt(n);
-
+ constexpr size_t null_bit_size = sizeof(UInt8);
StringRef res;
- res.size = sizeof(string_size) + string_size;
- char * pos = arena.allocContinue(res.size, begin);
+ char * pos;
+ if (null_bit)
+ {
+ res.size = * null_bit ? null_bit_size : null_bit_size + sizeof(string_size) + string_size;
+ pos = arena.allocContinue(res.size, begin);
+ res.data = pos;
+ memcpy(pos, null_bit, null_bit_size);
+ if (*null_bit) return res;
+ pos += null_bit_size;
+ }
+ else
+ {
+ res.size = sizeof(string_size) + string_size;
+ pos = arena.allocContinue(res.size, begin);
+ res.data = pos;
+ }
memcpy(pos, &string_size, sizeof(string_size));
memcpy(pos + sizeof(string_size), &chars[offset], string_size);
- res.data = pos;
return res;
}
diff --git a/src/Columns/ColumnString.h b/src/Columns/ColumnString.h
index 08c876a803d..e8e5ebbcbf9 100644
--- a/src/Columns/ColumnString.h
+++ b/src/Columns/ColumnString.h
@@ -11,6 +11,7 @@
#include
#include
#include
+#include
class Collator;
@@ -168,7 +169,7 @@ public:
offsets.resize_assume_reserved(offsets.size() - n);
}
- StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override;
+ StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin, const UInt8 * null_bit) const override;
const char * deserializeAndInsertFromArena(const char * pos) override;
diff --git a/src/Columns/ColumnTuple.cpp b/src/Columns/ColumnTuple.cpp
index 9702d275114..d8992125be4 100644
--- a/src/Columns/ColumnTuple.cpp
+++ b/src/Columns/ColumnTuple.cpp
@@ -171,7 +171,7 @@ void ColumnTuple::popBack(size_t n)
column->popBack(n);
}
-StringRef ColumnTuple::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const
+StringRef ColumnTuple::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin, const UInt8 *) const
{
StringRef res(begin, 0);
for (const auto & column : columns)
diff --git a/src/Columns/ColumnTuple.h b/src/Columns/ColumnTuple.h
index e7dee9b8ff9..79099f4c098 100644
--- a/src/Columns/ColumnTuple.h
+++ b/src/Columns/ColumnTuple.h
@@ -61,7 +61,7 @@ public:
void insertFrom(const IColumn & src_, size_t n) override;
void insertDefault() override;
void popBack(size_t n) override;
- StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override;
+ StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin, const UInt8 *) const override;
const char * deserializeAndInsertFromArena(const char * pos) override;
const char * skipSerializedInArena(const char * pos) const override;
void updateHashWithValue(size_t n, SipHash & hash) const override;
diff --git a/src/Columns/ColumnUnique.h b/src/Columns/ColumnUnique.h
index 377255d80c7..7c916fe77a6 100644
--- a/src/Columns/ColumnUnique.h
+++ b/src/Columns/ColumnUnique.h
@@ -79,7 +79,7 @@ public:
Float32 getFloat32(size_t n) const override { return getNestedColumn()->getFloat32(n); }
bool getBool(size_t n) const override { return getNestedColumn()->getBool(n); }
bool isNullAt(size_t n) const override { return is_nullable && n == getNullValueIndex(); }
- StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override;
+ StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin, const UInt8 * null_bit) const override;
const char * skipSerializedInArena(const char * pos) const override;
void updateHashWithValue(size_t n, SipHash & hash_func) const override
{
@@ -373,7 +373,7 @@ size_t ColumnUnique::uniqueInsertData(const char * pos, size_t lengt
}
template
-StringRef ColumnUnique::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const
+StringRef ColumnUnique::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin, const UInt8 *) const
{
if (is_nullable)
{
@@ -670,8 +670,9 @@ UInt128 ColumnUnique::IncrementalHash::getHash(const ColumnType & co
for (size_t i = 0; i < column_size; ++i)
column.updateHashWithValue(i, sip_hash);
+ hash = sip_hash.get128();
+
std::lock_guard lock(mutex);
- sip_hash.get128(hash);
cur_hash = hash;
num_added_rows.store(column_size);
}
diff --git a/src/Columns/ColumnVector.cpp b/src/Columns/ColumnVector.cpp
index f2fe343a371..a9b8c0ccacb 100644
--- a/src/Columns/ColumnVector.cpp
+++ b/src/Columns/ColumnVector.cpp
@@ -49,11 +49,28 @@ namespace ErrorCodes
}
template
-StringRef ColumnVector::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const
+StringRef ColumnVector::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin, const UInt8 * null_bit) const
{
- auto * pos = arena.allocContinue(sizeof(T), begin);
+ constexpr size_t null_bit_size = sizeof(UInt8);
+ StringRef res;
+ char * pos;
+ if (null_bit)
+ {
+ res.size = * null_bit ? null_bit_size : null_bit_size + sizeof(T);
+ pos = arena.allocContinue(res.size, begin);
+ res.data = pos;
+ memcpy(pos, null_bit, null_bit_size);
+ if (*null_bit) return res;
+ pos += null_bit_size;
+ }
+ else
+ {
+ res.size = sizeof(T);
+ pos = arena.allocContinue(res.size, begin);
+ res.data = pos;
+ }
unalignedStore(pos, data[n]);
- return StringRef(pos, sizeof(T));
+ return res;
}
template
diff --git a/src/Columns/ColumnVector.h b/src/Columns/ColumnVector.h
index b8ebff2a5d5..7bb69656c5a 100644
--- a/src/Columns/ColumnVector.h
+++ b/src/Columns/ColumnVector.h
@@ -174,7 +174,7 @@ public:
data.resize_assume_reserved(data.size() - n);
}
- StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override;
+ StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin, const UInt8 * null_bit) const override;
const char * deserializeAndInsertFromArena(const char * pos) override;
diff --git a/src/Columns/IColumn.h b/src/Columns/IColumn.h
index b4eaf5c28f5..12ac1102efd 100644
--- a/src/Columns/IColumn.h
+++ b/src/Columns/IColumn.h
@@ -218,7 +218,7 @@ public:
* For example, to obtain unambiguous representation of Array of strings, strings data should be interleaved with their sizes.
* Parameter begin should be used with Arena::allocContinue.
*/
- virtual StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const = 0;
+ virtual StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin, const UInt8 * null_bit = nullptr) const = 0;
/// Deserializes a value that was serialized using IColumn::serializeValueIntoArena method.
/// Returns pointer to the position after the read data.
diff --git a/src/Columns/IColumnDummy.h b/src/Columns/IColumnDummy.h
index 82d4c857b29..4cadae2bc3d 100644
--- a/src/Columns/IColumnDummy.h
+++ b/src/Columns/IColumnDummy.h
@@ -57,7 +57,7 @@ public:
++s;
}
- StringRef serializeValueIntoArena(size_t /*n*/, Arena & arena, char const *& begin) const override
+ StringRef serializeValueIntoArena(size_t /*n*/, Arena & arena, char const *& begin, const UInt8 *) const override
{
/// Has to put one useless byte into Arena, because serialization into zero number of bytes is ambiguous.
char * res = arena.allocContinue(1, begin);
diff --git a/src/Columns/tests/gtest_column_unique.cpp b/src/Columns/tests/gtest_column_unique.cpp
index 15208da70fb..ab2cb42b603 100644
--- a/src/Columns/tests/gtest_column_unique.cpp
+++ b/src/Columns/tests/gtest_column_unique.cpp
@@ -117,7 +117,7 @@ void column_unique_unique_deserialize_from_arena_impl(ColumnType & column, const
const char * pos = nullptr;
for (size_t i = 0; i < num_values; ++i)
{
- auto ref = column_unique_pattern->serializeValueIntoArena(idx->getUInt(i), arena, pos);
+ auto ref = column_unique_pattern->serializeValueIntoArena(idx->getUInt(i), arena, pos, nullptr);
const char * new_pos;
column_unique->uniqueDeserializeAndInsertFromArena(ref.data, new_pos);
ASSERT_EQ(new_pos - ref.data, ref.size) << "Deserialized data has different sizes at position " << i;
@@ -140,8 +140,8 @@ void column_unique_unique_deserialize_from_arena_impl(ColumnType & column, const
const char * pos_lc = nullptr;
for (size_t i = 0; i < num_values; ++i)
{
- auto ref_string = column.serializeValueIntoArena(i, arena_string, pos_string);
- auto ref_lc = column_unique->serializeValueIntoArena(idx->getUInt(i), arena_lc, pos_lc);
+ auto ref_string = column.serializeValueIntoArena(i, arena_string, pos_string, nullptr);
+ auto ref_lc = column_unique->serializeValueIntoArena(idx->getUInt(i), arena_lc, pos_lc, nullptr);
ASSERT_EQ(ref_string, ref_lc) << "Serialized data is different from pattern at position " << i;
}
}
diff --git a/src/Common/CacheBase.h b/src/Common/CacheBase.h
index 84cbd5b5c6f..aa7b3ea10cf 100644
--- a/src/Common/CacheBase.h
+++ b/src/Common/CacheBase.h
@@ -51,10 +51,11 @@ public:
{
auto on_weight_loss_function = [&](size_t weight_loss) { onRemoveOverflowWeightLoss(weight_loss); };
- static constexpr std::string_view default_cache_policy = "SLRU";
-
if (cache_policy_name.empty())
+ {
+ static constexpr auto default_cache_policy = "SLRU";
cache_policy_name = default_cache_policy;
+ }
if (cache_policy_name == "LRU")
{
diff --git a/src/Common/DNSResolver.cpp b/src/Common/DNSResolver.cpp
index a8ff347f399..285362e32f1 100644
--- a/src/Common/DNSResolver.cpp
+++ b/src/Common/DNSResolver.cpp
@@ -313,8 +313,8 @@ bool DNSResolver::updateCacheImpl(
UpdateF && update_func,
ElemsT && elems,
UInt32 max_consecutive_failures,
- const String & notfound_log_msg,
- const String & dropped_log_msg)
+ FormatStringHelper notfound_log_msg,
+ FormatStringHelper dropped_log_msg)
{
bool updated = false;
String lost_elems;
@@ -351,7 +351,7 @@ bool DNSResolver::updateCacheImpl(
}
if (!lost_elems.empty())
- LOG_INFO(log, fmt::runtime(notfound_log_msg), lost_elems);
+ LOG_INFO(log, notfound_log_msg.format(std::move(lost_elems)));
if (elements_to_drop.size())
{
updated = true;
@@ -363,7 +363,7 @@ bool DNSResolver::updateCacheImpl(
deleted_elements += cacheElemToString(it->first);
elems.erase(it);
}
- LOG_INFO(log, fmt::runtime(dropped_log_msg), deleted_elements);
+ LOG_INFO(log, dropped_log_msg.format(std::move(deleted_elements)));
}
return updated;
diff --git a/src/Common/DNSResolver.h b/src/Common/DNSResolver.h
index 84715b392a8..1017607a5bd 100644
--- a/src/Common/DNSResolver.h
+++ b/src/Common/DNSResolver.h
@@ -5,6 +5,7 @@
#include
#include
#include
+#include
namespace Poco { class Logger; }
@@ -61,13 +62,12 @@ public:
private:
template
-
bool updateCacheImpl(
UpdateF && update_func,
ElemsT && elems,
UInt32 max_consecutive_failures,
- const String & notfound_log_msg,
- const String & dropped_log_msg);
+ FormatStringHelper notfound_log_msg,
+ FormatStringHelper dropped_log_msg);
DNSResolver();
diff --git a/src/Common/DateLUTImpl.cpp b/src/Common/DateLUTImpl.cpp
index 4c21d9c9783..bb677b3a62d 100644
--- a/src/Common/DateLUTImpl.cpp
+++ b/src/Common/DateLUTImpl.cpp
@@ -3,7 +3,7 @@
#include
#include
#include
-#include
+#include
#include
#include
@@ -12,6 +12,14 @@
#include
+namespace DB
+{
+namespace ErrorCodes
+{
+ extern const int BAD_ARGUMENTS;
+}
+}
+
/// Embedded timezones.
std::string_view getTimeZone(const char * name);
@@ -66,7 +74,7 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_)
cctz::time_zone cctz_time_zone;
if (!cctz::load_time_zone(time_zone, &cctz_time_zone))
- throw Poco::Exception("Cannot load time zone " + time_zone_);
+ throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Cannot load time zone {}", time_zone_);
constexpr cctz::civil_day epoch{1970, 1, 1};
constexpr cctz::civil_day lut_start{DATE_LUT_MIN_YEAR, 1, 1};
diff --git a/src/Common/Epoll.cpp b/src/Common/Epoll.cpp
index 182981aca27..ac06f044beb 100644
--- a/src/Common/Epoll.cpp
+++ b/src/Common/Epoll.cpp
@@ -2,6 +2,7 @@
#include "Epoll.h"
#include
+#include
#include
#include
@@ -57,21 +58,35 @@ void Epoll::remove(int fd)
throwFromErrno("Cannot remove descriptor from epoll", DB::ErrorCodes::EPOLL_ERROR);
}
-size_t Epoll::getManyReady(int max_events, epoll_event * events_out, bool blocking) const
+size_t Epoll::getManyReady(int max_events, epoll_event * events_out, int timeout) const
{
if (events_count == 0)
throw Exception(ErrorCodes::LOGICAL_ERROR, "There are no events in epoll");
+ Stopwatch watch;
int ready_size;
- int timeout = blocking ? -1 : 0;
- do
+ while (true)
{
ready_size = epoll_wait(epoll_fd, events_out, max_events, timeout);
- if (ready_size == -1 && errno != EINTR)
- throwFromErrno("Error in epoll_wait", DB::ErrorCodes::EPOLL_ERROR);
+ /// If `ready_size` = 0, it's timeout.
+ if (ready_size < 0)
+ {
+ if (errno == EINTR)
+ {
+ if (timeout >= 0)
+ {
+ timeout = std::max(0, static_cast(timeout - watch.elapsedMilliseconds()));
+ watch.restart();
+ }
+ continue;
+ }
+ else
+ throwFromErrno("Error in epoll_wait", DB::ErrorCodes::EPOLL_ERROR);
+ }
+ else
+ break;
}
- while (ready_size <= 0 && (ready_size != 0 || blocking));
return ready_size;
}
diff --git a/src/Common/Epoll.h b/src/Common/Epoll.h
index ac42a8fc56d..5eadd5a7a65 100644
--- a/src/Common/Epoll.h
+++ b/src/Common/Epoll.h
@@ -30,10 +30,11 @@ public:
/// Remove file descriptor to epoll.
void remove(int fd);
- /// Get events from epoll. Events are written in events_out, this function returns an amount of ready events.
- /// If blocking is false and there are no ready events,
- /// return empty vector, otherwise wait for ready events.
- size_t getManyReady(int max_events, epoll_event * events_out, bool blocking) const;
+ /// Get events from epoll. Events are written in events_out, this function returns an amount of
+ /// ready events. The timeout argument specifies the number of milliseconds to wait for ready
+ /// events. Timeout of -1 causes epoll_wait() to block indefinitely, while specifying a timeout
+ /// equal to zero will return immediately, even if no events are available.
+ size_t getManyReady(int max_events, epoll_event * events_out, int timeout) const;
int getFileDescriptor() const { return epoll_fd; }
diff --git a/src/Common/MemoryTracker.cpp b/src/Common/MemoryTracker.cpp
index 7005a57d175..93bd50a0b49 100644
--- a/src/Common/MemoryTracker.cpp
+++ b/src/Common/MemoryTracker.cpp
@@ -151,8 +151,11 @@ void MemoryTracker::logPeakMemoryUsage()
{
log_peak_memory_usage_in_destructor = false;
const auto * description = description_ptr.load(std::memory_order_relaxed);
+ auto peak_bytes = peak.load(std::memory_order::relaxed);
+ if (peak_bytes < 128 * 1024)
+ return;
LOG_DEBUG(&Poco::Logger::get("MemoryTracker"),
- "Peak memory usage{}: {}.", (description ? " " + std::string(description) : ""), ReadableSize(peak));
+ "Peak memory usage{}: {}.", (description ? " " + std::string(description) : ""), ReadableSize(peak_bytes));
}
void MemoryTracker::logMemoryUsage(Int64 current) const
diff --git a/src/Common/NamePrompter.h b/src/Common/NamePrompter.h
index 79e78529d95..65a2c76a102 100644
--- a/src/Common/NamePrompter.h
+++ b/src/Common/NamePrompter.h
@@ -117,6 +117,11 @@ public:
DB::appendHintsMessage(error_message, hints);
}
+ String getHintsMessage(const String & name) const
+ {
+ return getHintsErrorMessageSuffix(getHints(name));
+ }
+
IHints() = default;
IHints(const IHints &) = default;
diff --git a/src/Common/ShellCommandSettings.cpp b/src/Common/ShellCommandSettings.cpp
new file mode 100644
index 00000000000..951a20e949c
--- /dev/null
+++ b/src/Common/ShellCommandSettings.cpp
@@ -0,0 +1,27 @@
+#include
+
+#include
+#include
+#include
+
+namespace DB
+{
+
+namespace ErrorCodes
+{
+ extern const int BAD_ARGUMENTS;
+}
+
+ExternalCommandStderrReaction parseExternalCommandStderrReaction(const std::string & config)
+{
+ auto reaction = magic_enum::enum_cast(Poco::toUpper(config));
+ if (!reaction)
+ throw Exception(
+ ErrorCodes::BAD_ARGUMENTS,
+ "Unknown stderr_reaction: {}. Possible values are 'none', 'log', 'log_first', 'log_last' and 'throw'",
+ config);
+
+ return *reaction;
+}
+
+}
diff --git a/src/Common/ShellCommandSettings.h b/src/Common/ShellCommandSettings.h
new file mode 100644
index 00000000000..4cfd862b873
--- /dev/null
+++ b/src/Common/ShellCommandSettings.h
@@ -0,0 +1,19 @@
+#pragma once
+
+#include
+
+namespace DB
+{
+
+enum class ExternalCommandStderrReaction
+{
+ NONE, /// Do nothing.
+ LOG, /// Try to log all outputs of stderr from the external command immediately.
+ LOG_FIRST, /// Try to log first 1_KiB outputs of stderr from the external command after exit.
+ LOG_LAST, /// Same as above, but log last 1_KiB outputs.
+ THROW /// Immediately throw exception when the external command outputs something to its stderr.
+};
+
+ExternalCommandStderrReaction parseExternalCommandStderrReaction(const std::string & config);
+
+}
diff --git a/src/Common/SipHash.h b/src/Common/SipHash.h
index cdec00d4bcc..43d620adde0 100644
--- a/src/Common/SipHash.h
+++ b/src/Common/SipHash.h
@@ -13,6 +13,8 @@
* (~ 700 MB/sec, 15 million strings per second)
*/
+#include "TransformEndianness.hpp"
+
#include
#include
#include
@@ -22,14 +24,12 @@
#include
#include
+#include
-namespace DB
-{
-namespace ErrorCodes
+namespace DB::ErrorCodes
{
extern const int LOGICAL_ERROR;
}
-}
#define SIPROUND \
do \
@@ -161,71 +161,50 @@ public:
}
}
- template
+ template
ALWAYS_INLINE void update(const T & x)
{
if constexpr (std::endian::native == std::endian::big)
{
- T rev_x = x;
- char *start = reinterpret_cast(&rev_x);
- char *end = start + sizeof(T);
- std::reverse(start, end);
- update(reinterpret_cast(&rev_x), sizeof(rev_x)); /// NOLINT
+ auto transformed_x = x;
+ if constexpr (!std::is_same_v)
+ transformed_x = Transform()(x);
+ else
+ DB::transformEndianness(transformed_x);
+
+ update(reinterpret_cast(&transformed_x), sizeof(transformed_x)); /// NOLINT
}
else
update(reinterpret_cast(&x), sizeof(x)); /// NOLINT
}
- ALWAYS_INLINE void update(const std::string & x)
- {
- update(x.data(), x.length());
- }
+ ALWAYS_INLINE void update(const std::string & x) { update(x.data(), x.length()); }
+ ALWAYS_INLINE void update(const std::string_view x) { update(x.data(), x.size()); }
+ ALWAYS_INLINE void update(const char * s) { update(std::string_view(s)); }
- ALWAYS_INLINE void update(const std::string_view x)
- {
- update(x.data(), x.size());
- }
-
- /// Get the result in some form. This can only be done once!
-
- ALWAYS_INLINE void get128(char * out)
- {
- finalize();
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
- unalignedStore(out + 8, v0 ^ v1);
- unalignedStore(out, v2 ^ v3);
-#else
- unalignedStore(out, v0 ^ v1);
- unalignedStore(out + 8, v2 ^ v3);
-#endif
- }
-
- template
- ALWAYS_INLINE void get128(T & lo, T & hi)
- {
- static_assert(sizeof(T) == 8);
- finalize();
- lo = v0 ^ v1;
- hi = v2 ^ v3;
- }
-
- template
- ALWAYS_INLINE void get128(T & dst)
- {
- static_assert(sizeof(T) == 16);
- get128(reinterpret_cast(&dst));
- }
-
- UInt64 get64()
+ ALWAYS_INLINE UInt64 get64()
{
finalize();
return v0 ^ v1 ^ v2 ^ v3;
}
- UInt128 get128()
+ template
+ requires (sizeof(T) == 8)
+ ALWAYS_INLINE void get128(T & lo, T & hi)
+ {
+ finalize();
+ lo = v0 ^ v1;
+ hi = v2 ^ v3;
+ }
+
+ ALWAYS_INLINE UInt128 get128()
{
UInt128 res;
- get128(res);
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ get128(res.items[1], res.items[0]);
+#else
+ get128(res.items[0], res.items[1]);
+#endif
return res;
}
@@ -247,9 +226,7 @@ public:
{
lo = std::byteswap(lo);
hi = std::byteswap(hi);
- auto tmp = hi;
- hi = lo;
- lo = tmp;
+ std::swap(lo, hi);
}
UInt128 res = hi;
@@ -265,11 +242,18 @@ public:
#include
-inline void sipHash128(const char * data, const size_t size, char * out)
+inline std::array getSipHash128AsArray(SipHash & sip_hash)
{
- SipHash hash;
- hash.update(data, size);
- hash.get128(out);
+ std::array arr;
+ *reinterpret_cast(arr.data()) = sip_hash.get128();
+ return arr;
+}
+
+inline CityHash_v1_0_2::uint128 getSipHash128AsPair(SipHash & sip_hash)
+{
+ CityHash_v1_0_2::uint128 result;
+ sip_hash.get128(result.low64, result.high64);
+ return result;
}
inline UInt128 sipHash128Keyed(UInt64 key0, UInt64 key1, const char * data, const size_t size)
@@ -309,7 +293,7 @@ inline UInt64 sipHash64(const char * data, const size_t size)
}
template
-UInt64 sipHash64(const T & x)
+inline UInt64 sipHash64(const T & x)
{
SipHash hash;
hash.update(x);
diff --git a/src/Common/TransformEndianness.hpp b/src/Common/TransformEndianness.hpp
index 05f7778a12e..1657305acda 100644
--- a/src/Common/TransformEndianness.hpp
+++ b/src/Common/TransformEndianness.hpp
@@ -2,6 +2,7 @@
#include
#include
+#include
#include
@@ -48,7 +49,7 @@ inline void transformEndianness(T & value)
}
template
-requires std::is_scoped_enum_v
+requires std::is_enum_v || std::is_scoped_enum_v
inline void transformEndianness(T & x)
{
using UnderlyingType = std::underlying_type_t;
diff --git a/src/Common/ZooKeeper/IKeeper.cpp b/src/Common/ZooKeeper/IKeeper.cpp
index f0a07241735..5897d04b8a2 100644
--- a/src/Common/ZooKeeper/IKeeper.cpp
+++ b/src/Common/ZooKeeper/IKeeper.cpp
@@ -21,29 +21,33 @@ namespace ProfileEvents
namespace Coordination
{
-Exception::Exception(const std::string & msg, const Error code_, int)
- : DB::Exception(msg, DB::ErrorCodes::KEEPER_EXCEPTION), code(code_)
+void Exception::incrementErrorMetrics(const Error code_)
{
- if (Coordination::isUserError(code))
+ if (Coordination::isUserError(code_))
ProfileEvents::increment(ProfileEvents::ZooKeeperUserExceptions);
- else if (Coordination::isHardwareError(code))
+ else if (Coordination::isHardwareError(code_))
ProfileEvents::increment(ProfileEvents::ZooKeeperHardwareExceptions);
else
ProfileEvents::increment(ProfileEvents::ZooKeeperOtherExceptions);
}
-Exception::Exception(const std::string & msg, const Error code_)
- : Exception(msg + " (" + errorMessage(code_) + ")", code_, 0)
+Exception::Exception(const std::string & msg, const Error code_, int)
+ : DB::Exception(msg, DB::ErrorCodes::KEEPER_EXCEPTION)
+ , code(code_)
{
+ incrementErrorMetrics(code);
+}
+
+Exception::Exception(PreformattedMessage && msg, const Error code_)
+ : DB::Exception(std::move(msg), DB::ErrorCodes::KEEPER_EXCEPTION)
+ , code(code_)
+{
+ extendedMessage(errorMessage(code));
+ incrementErrorMetrics(code);
}
Exception::Exception(const Error code_)
- : Exception(errorMessage(code_), code_, 0)
-{
-}
-
-Exception::Exception(const Error code_, const std::string & path)
- : Exception(std::string{errorMessage(code_)} + ", path: " + path, code_, 0)
+ : Exception(code_, "Coordination error: {}", errorMessage(code_))
{
}
@@ -56,10 +60,10 @@ using namespace DB;
static void addRootPath(String & path, const String & root_path)
{
if (path.empty())
- throw Exception("Path cannot be empty", Error::ZBADARGUMENTS);
+ throw Exception::fromMessage(Error::ZBADARGUMENTS, "Path cannot be empty");
if (path[0] != '/')
- throw Exception("Path must begin with /, got path '" + path + "'", Error::ZBADARGUMENTS);
+ throw Exception(Error::ZBADARGUMENTS, "Path must begin with /, got path '{}'", path);
if (root_path.empty())
return;
@@ -76,7 +80,7 @@ static void removeRootPath(String & path, const String & root_path)
return;
if (path.size() <= root_path.size())
- throw Exception("Received path is not longer than root_path", Error::ZDATAINCONSISTENCY);
+ throw Exception::fromMessage(Error::ZDATAINCONSISTENCY, "Received path is not longer than root_path");
path = path.substr(root_path.size());
}
diff --git a/src/Common/ZooKeeper/IKeeper.h b/src/Common/ZooKeeper/IKeeper.h
index 43f0f9c5f6f..ec23b52ceb1 100644
--- a/src/Common/ZooKeeper/IKeeper.h
+++ b/src/Common/ZooKeeper/IKeeper.h
@@ -17,6 +17,13 @@
* - ZooKeeper emulation layer on top of Etcd, FoundationDB, whatever.
*/
+namespace DB
+{
+namespace ErrorCodes
+{
+ extern const int KEEPER_EXCEPTION;
+}
+}
namespace Coordination
{
@@ -158,6 +165,10 @@ struct WatchResponse : virtual Response
};
using WatchCallback = std::function;
+/// Passing watch callback as a shared_ptr allows to
+/// - avoid copying of the callback
+/// - registering the same callback only once per path
+using WatchCallbackPtr = std::shared_ptr;
struct SetACLRequest : virtual Request
{
@@ -450,17 +461,46 @@ class Exception : public DB::Exception
private:
/// Delegate constructor, used to minimize repetition; last parameter used for overload resolution.
Exception(const std::string & msg, const Error code_, int); /// NOLINT
+ Exception(PreformattedMessage && msg, const Error code_);
+
+ /// Message must be a compile-time constant
+ template
+ requires std::is_convertible_v
+ Exception(T && message, const Error code_) : DB::Exception(DB::ErrorCodes::KEEPER_EXCEPTION, std::forward(message)), code(code_)
+ {
+ incrementErrorMetrics(code);
+ }
+
+ static void incrementErrorMetrics(const Error code_);
public:
explicit Exception(const Error code_); /// NOLINT
- Exception(const std::string & msg, const Error code_); /// NOLINT
- Exception(const Error code_, const std::string & path); /// NOLINT
Exception(const Exception & exc);
template
- Exception(const Error code_, fmt::format_string fmt, Args &&... args)
- : Exception(fmt::format(fmt, std::forward(args)...), code_)
+ Exception(const Error code_, FormatStringHelper fmt, Args &&... args)
+ : DB::Exception(DB::ErrorCodes::KEEPER_EXCEPTION, std::move(fmt), std::forward(args)...)
+ , code(code_)
{
+ incrementErrorMetrics(code);
+ }
+
+ inline static Exception createDeprecated(const std::string & msg, const Error code_)
+ {
+ return Exception(msg, code_, 0);
+ }
+
+ inline static Exception fromPath(const Error code_, const std::string & path)
+ {
+ return Exception(code_, "Coordination error: {}, path {}", errorMessage(code_), path);
+ }
+
+ /// Message must be a compile-time constant
+ template
+ requires std::is_convertible_v
+ inline static Exception fromMessage(const Error code_, T && message)
+ {
+ return Exception(std::forward(message), code_);
}
const char * name() const noexcept override { return "Coordination::Exception"; }
@@ -521,12 +561,12 @@ public:
virtual void exists(
const String & path,
ExistsCallback callback,
- WatchCallback watch) = 0;
+ WatchCallbackPtr watch) = 0;
virtual void get(
const String & path,
GetCallback callback,
- WatchCallback watch) = 0;
+ WatchCallbackPtr watch) = 0;
virtual void set(
const String & path,
@@ -538,7 +578,7 @@ public:
const String & path,
ListRequestType list_request_type,
ListCallback callback,
- WatchCallback watch) = 0;
+ WatchCallbackPtr watch) = 0;
virtual void check(
const String & path,
diff --git a/src/Common/ZooKeeper/KeeperException.h b/src/Common/ZooKeeper/KeeperException.h
index 4442c41a19d..f957bd2288f 100644
--- a/src/Common/ZooKeeper/KeeperException.h
+++ b/src/Common/ZooKeeper/KeeperException.h
@@ -24,6 +24,9 @@ public:
static void check(Coordination::Error code, const Coordination::Requests & requests, const Coordination::Responses & responses);
KeeperMultiException(Coordination::Error code, const Coordination::Requests & requests, const Coordination::Responses & responses);
+
+private:
+ KeeperMultiException(Coordination::Error code, size_t failed_op_index_, const Coordination::Requests & requests_, const Coordination::Responses & responses_);
};
size_t getFailedOpIndex(Coordination::Error code, const Coordination::Responses & responses);
diff --git a/src/Common/ZooKeeper/TestKeeper.cpp b/src/Common/ZooKeeper/TestKeeper.cpp
index dabc0ae4eef..f46c3f814a9 100644
--- a/src/Common/ZooKeeper/TestKeeper.cpp
+++ b/src/Common/ZooKeeper/TestKeeper.cpp
@@ -42,9 +42,9 @@ static void processWatchesImpl(const String & path, TestKeeper::Watches & watche
auto it = watches.find(watch_response.path);
if (it != watches.end())
{
- for (auto & callback : it->second)
+ for (const auto & callback : it->second)
if (callback)
- callback(watch_response);
+ (*callback)(watch_response);
watches.erase(it);
}
@@ -55,9 +55,9 @@ static void processWatchesImpl(const String & path, TestKeeper::Watches & watche
it = list_watches.find(watch_list_response.path);
if (it != list_watches.end())
{
- for (auto & callback : it->second)
+ for (const auto & callback : it->second)
if (callback)
- callback(watch_list_response);
+ (*callback)(watch_list_response);
list_watches.erase(it);
}
@@ -177,7 +177,7 @@ struct TestKeeperMultiRequest final : MultiRequest, TestKeeperRequest
requests.push_back(std::make_shared(*concrete_request_check));
}
else
- throw Exception("Illegal command as part of multi ZooKeeper request", Error::ZBADARGUMENTS);
+ throw Exception::fromMessage(Error::ZBADARGUMENTS, "Illegal command as part of multi ZooKeeper request");
}
}
@@ -389,7 +389,7 @@ std::pair TestKeeperListRequest::process(TestKeeper::Containe
{
auto path_prefix = path;
if (path_prefix.empty())
- throw Exception("Logical error: path cannot be empty", Error::ZSESSIONEXPIRED);
+ throw Exception::fromMessage(Error::ZSESSIONEXPIRED, "Logical error: path cannot be empty");
if (path_prefix.back() != '/')
path_prefix += '/';
@@ -587,11 +587,11 @@ void TestKeeper::processingThread()
? list_watches
: watches;
- watches_type[info.request->getPath()].emplace_back(std::move(info.watch));
+ watches_type[info.request->getPath()].insert(info.watch);
}
else if (response->error == Error::ZNONODE && dynamic_cast(info.request.get()))
{
- watches[info.request->getPath()].emplace_back(std::move(info.watch));
+ watches[info.request->getPath()].insert(info.watch);
}
}
@@ -634,13 +634,13 @@ void TestKeeper::finalize(const String &)
response.state = EXPIRED_SESSION;
response.error = Error::ZSESSIONEXPIRED;
- for (auto & callback : path_watch.second)
+ for (const auto & callback : path_watch.second)
{
if (callback)
{
try
{
- callback(response);
+ (*callback)(response);
}
catch (...)
{
@@ -677,7 +677,7 @@ void TestKeeper::finalize(const String &)
response.error = Error::ZSESSIONEXPIRED;
try
{
- info.watch(response);
+ (*info.watch)(response);
}
catch (...)
{
@@ -705,10 +705,10 @@ void TestKeeper::pushRequest(RequestInfo && request)
std::lock_guard lock(push_request_mutex);
if (expired)
- throw Exception("Session expired", Error::ZSESSIONEXPIRED);
+ throw Exception::fromMessage(Error::ZSESSIONEXPIRED, "Session expired");
if (!requests_queue.tryPush(std::move(request), args.operation_timeout_ms))
- throw Exception("Cannot push request to queue within operation timeout", Error::ZOPERATIONTIMEOUT);
+ throw Exception::fromMessage(Error::ZOPERATIONTIMEOUT, "Cannot push request to queue within operation timeout");
}
catch (...)
{
@@ -756,7 +756,7 @@ void TestKeeper::remove(
void TestKeeper::exists(
const String & path,
ExistsCallback callback,
- WatchCallback watch)
+ WatchCallbackPtr watch)
{
TestKeeperExistsRequest request;
request.path = path;
@@ -771,7 +771,7 @@ void TestKeeper::exists(
void TestKeeper::get(
const String & path,
GetCallback callback,
- WatchCallback watch)
+ WatchCallbackPtr watch)
{
TestKeeperGetRequest request;
request.path = path;
@@ -804,7 +804,7 @@ void TestKeeper::list(
const String & path,
ListRequestType list_request_type,
ListCallback callback,
- WatchCallback watch)
+ WatchCallbackPtr watch)
{
TestKeeperFilteredListRequest request;
request.path = path;
diff --git a/src/Common/ZooKeeper/TestKeeper.h b/src/Common/ZooKeeper/TestKeeper.h
index 991f689394e..3a760270207 100644
--- a/src/Common/ZooKeeper/TestKeeper.h
+++ b/src/Common/ZooKeeper/TestKeeper.h
@@ -59,12 +59,12 @@ public:
void exists(
const String & path,
ExistsCallback callback,
- WatchCallback watch) override;
+ WatchCallbackPtr watch) override;
void get(
const String & path,
GetCallback callback,
- WatchCallback watch) override;
+ WatchCallbackPtr watch) override;
void set(
const String & path,
@@ -76,7 +76,7 @@ public:
const String & path,
ListRequestType list_request_type,
ListCallback callback,
- WatchCallback watch) override;
+ WatchCallbackPtr watch) override;
void check(
const String & path,
@@ -117,7 +117,7 @@ public:
using Container = std::map;
- using WatchCallbacks = std::vector;
+ using WatchCallbacks = std::unordered_set;
using Watches = std::map;
private:
@@ -127,7 +127,7 @@ private:
{
TestKeeperRequestPtr request;
ResponseCallback callback;
- WatchCallback watch;
+ WatchCallbackPtr watch;
clock::time_point time;
};
diff --git a/src/Common/ZooKeeper/ZooKeeper.cpp b/src/Common/ZooKeeper/ZooKeeper.cpp
index 10331a4e410..d4127b76a9d 100644
--- a/src/Common/ZooKeeper/ZooKeeper.cpp
+++ b/src/Common/ZooKeeper/ZooKeeper.cpp
@@ -51,7 +51,7 @@ const int CreateMode::EphemeralSequential = 3;
static void check(Coordination::Error code, const std::string & path)
{
if (code != Coordination::Error::ZOK)
- throw KeeperException(code, path);
+ throw KeeperException::fromPath(code, path);
}
@@ -64,7 +64,7 @@ void ZooKeeper::init(ZooKeeperArgs args_)
if (args.implementation == "zookeeper")
{
if (args.hosts.empty())
- throw KeeperException("No hosts passed to ZooKeeper constructor.", Coordination::Error::ZBADARGUMENTS);
+ throw KeeperException::fromMessage(Coordination::Error::ZBADARGUMENTS, "No hosts passed to ZooKeeper constructor.");
Coordination::ZooKeeper::Nodes nodes;
nodes.reserve(args.hosts.size());
@@ -107,9 +107,9 @@ void ZooKeeper::init(ZooKeeperArgs args_)
{
/// For DNS errors we throw exception with ZCONNECTIONLOSS code, so it will be considered as hardware error, not user error
if (dns_error)
- throw KeeperException("Cannot resolve any of provided ZooKeeper hosts due to DNS error", Coordination::Error::ZCONNECTIONLOSS);
+ throw KeeperException::fromMessage(Coordination::Error::ZCONNECTIONLOSS, "Cannot resolve any of provided ZooKeeper hosts due to DNS error");
else
- throw KeeperException("Cannot use any of provided ZooKeeper nodes", Coordination::Error::ZCONNECTIONLOSS);
+ throw KeeperException::fromMessage(Coordination::Error::ZCONNECTIONLOSS, "Cannot use any of provided ZooKeeper nodes");
}
impl = std::make_unique(nodes, args, zk_log, [this](size_t node_idx, const Coordination::ZooKeeper::Node & node)
@@ -145,11 +145,11 @@ void ZooKeeper::init(ZooKeeperArgs args_)
auto future = asyncExists("/");
auto res = future.wait_for(std::chrono::milliseconds(args.operation_timeout_ms));
if (res != std::future_status::ready)
- throw KeeperException("Cannot check if zookeeper root exists.", Coordination::Error::ZOPERATIONTIMEOUT);
+ throw KeeperException::fromMessage(Coordination::Error::ZOPERATIONTIMEOUT, "Cannot check if zookeeper root exists.");
auto code = future.get().error;
if (!(code == Coordination::Error::ZOK || code == Coordination::Error::ZNONODE))
- throw KeeperException(code, "/");
+ throw KeeperException::fromPath(code, "/");
if (code == Coordination::Error::ZNONODE)
throw KeeperException(Coordination::Error::ZNONODE, "ZooKeeper root doesn't exist. You should create root node {} before start.", args.chroot);
@@ -212,7 +212,7 @@ static Coordination::WatchCallback callbackForEvent(const EventPtr & watch)
Coordination::Error ZooKeeper::getChildrenImpl(const std::string & path, Strings & res,
Coordination::Stat * stat,
- Coordination::WatchCallback watch_callback,
+ Coordination::WatchCallbackPtr watch_callback,
Coordination::ListRequestType list_request_type)
{
auto future_result = asyncTryGetChildrenNoThrow(path, watch_callback, list_request_type);
@@ -250,6 +250,13 @@ Strings ZooKeeper::getChildrenWatch(const std::string & path, Coordination::Stat
return res;
}
+Strings ZooKeeper::getChildrenWatch(const std::string & path, Coordination::Stat * stat, Coordination::WatchCallbackPtr watch_callback, Coordination::ListRequestType list_request_type)
+{
+ Strings res;
+ check(tryGetChildrenWatch(path, res, stat, watch_callback, list_request_type), path);
+ return res;
+}
+
Coordination::Error ZooKeeper::tryGetChildren(
const std::string & path,
Strings & res,
@@ -257,12 +264,9 @@ Coordination::Error ZooKeeper::tryGetChildren(
const EventPtr & watch,
Coordination::ListRequestType list_request_type)
{
- Coordination::Error code = getChildrenImpl(path, res, stat, callbackForEvent(watch), list_request_type);
-
- if (!(code == Coordination::Error::ZOK || code == Coordination::Error::ZNONODE))
- throw KeeperException(code, path);
-
- return code;
+ return tryGetChildrenWatch(path, res, stat,
+ watch ? std::make_shared(callbackForEvent(watch)) : Coordination::WatchCallbackPtr{},
+ list_request_type);
}
Coordination::Error ZooKeeper::tryGetChildrenWatch(
@@ -271,11 +275,23 @@ Coordination::Error ZooKeeper::tryGetChildrenWatch(
Coordination::Stat * stat,
Coordination::WatchCallback watch_callback,
Coordination::ListRequestType list_request_type)
+{
+ return tryGetChildrenWatch(path, res, stat,
+ watch_callback ? std::make_shared(watch_callback) : Coordination::WatchCallbackPtr{},
+ list_request_type);
+}
+
+Coordination::Error ZooKeeper::tryGetChildrenWatch(
+ const std::string & path,
+ Strings & res,
+ Coordination::Stat * stat,
+ Coordination::WatchCallbackPtr watch_callback,
+ Coordination::ListRequestType list_request_type)
{
Coordination::Error code = getChildrenImpl(path, res, stat, watch_callback, list_request_type);
if (!(code == Coordination::Error::ZOK || code == Coordination::Error::ZNONODE))
- throw KeeperException(code, path);
+ throw KeeperException::fromPath(code, path);
return code;
}
@@ -314,7 +330,7 @@ Coordination::Error ZooKeeper::tryCreate(const std::string & path, const std::st
code == Coordination::Error::ZNONODE ||
code == Coordination::Error::ZNODEEXISTS ||
code == Coordination::Error::ZNOCHILDRENFOREPHEMERALS))
- throw KeeperException(code, path);
+ throw KeeperException::fromPath(code, path);
return code;
}
@@ -333,7 +349,7 @@ void ZooKeeper::createIfNotExists(const std::string & path, const std::string &
if (code == Coordination::Error::ZOK || code == Coordination::Error::ZNODEEXISTS)
return;
else
- throw KeeperException(code, path);
+ throw KeeperException::fromPath(code, path);
}
void ZooKeeper::createAncestors(const std::string & path)
@@ -355,14 +371,14 @@ void ZooKeeper::createAncestors(const std::string & path)
/// The parent node doesn't exist. Save the current node and try with the parent
last_pos = current_node.rfind('/');
if (last_pos == std::string::npos || last_pos == 0)
- throw KeeperException(code, path);
+ throw KeeperException::fromPath(code, path);
pending_nodes.emplace_back(std::move(current_node));
current_node = path.substr(0, last_pos);
}
else if (code == Coordination::Error::ZOK || code == Coordination::Error::ZNODEEXISTS)
break;
else
- throw KeeperException(code, path);
+ throw KeeperException::fromPath(code, path);
}
for (const std::string & pending : pending_nodes | std::views::reverse)
@@ -423,7 +439,7 @@ Coordination::Error ZooKeeper::tryRemove(const std::string & path, int32_t versi
code == Coordination::Error::ZNONODE ||
code == Coordination::Error::ZBADVERSION ||
code == Coordination::Error::ZNOTEMPTY))
- throw KeeperException(code, path);
+ throw KeeperException::fromPath(code, path);
return code;
}
@@ -457,7 +473,7 @@ bool ZooKeeper::existsWatch(const std::string & path, Coordination::Stat * stat,
Coordination::Error code = existsImpl(path, stat, watch_callback);
if (!(code == Coordination::Error::ZOK || code == Coordination::Error::ZNONODE))
- throw KeeperException(code, path);
+ throw KeeperException::fromPath(code, path);
return code != Coordination::Error::ZNONODE;
}
@@ -524,7 +540,7 @@ bool ZooKeeper::tryGetWatch(
Coordination::Error code = getImpl(path, res, stat, watch_callback);
if (!(code == Coordination::Error::ZOK || code == Coordination::Error::ZNONODE))
- throw KeeperException(code, path);
+ throw KeeperException::fromPath(code, path);
if (return_code)
*return_code = code;
@@ -566,7 +582,7 @@ void ZooKeeper::createOrUpdate(const std::string & path, const std::string & dat
create(path, data, mode);
}
else if (code != Coordination::Error::ZOK)
- throw KeeperException(code, path);
+ throw KeeperException::fromPath(code, path);
}
Coordination::Error ZooKeeper::trySet(const std::string & path, const std::string & data,
@@ -577,7 +593,7 @@ Coordination::Error ZooKeeper::trySet(const std::string & path, const std::strin
if (!(code == Coordination::Error::ZOK ||
code == Coordination::Error::ZNONODE ||
code == Coordination::Error::ZBADVERSION))
- throw KeeperException(code, path);
+ throw KeeperException::fromPath(code, path);
return code;
}
@@ -756,7 +772,7 @@ bool ZooKeeper::tryRemoveChildrenRecursive(const std::string & path, bool probab
continue;
}
- throw KeeperException(res.error, batch[i]);
+ throw KeeperException::fromPath(res.error, batch[i]);
}
}
return removed_as_expected;
@@ -814,7 +830,7 @@ bool ZooKeeper::waitForDisappear(const std::string & path, const WaitCondition &
do
{
/// Use getData insteand of exists to avoid watch leak.
- impl->get(path, callback, watch);
+ impl->get(path, callback, std::make_shared(watch));
if (!state->event.tryWait(1000))
continue;
@@ -823,7 +839,7 @@ bool ZooKeeper::waitForDisappear(const std::string & path, const WaitCondition &
return true;
if (state->code)
- throw KeeperException(static_cast(state->code.load(std::memory_order_seq_cst)), path);
+ throw KeeperException::fromPath(static_cast(state->code.load(std::memory_order_seq_cst)), path);
if (state->event_type == Coordination::DELETED)
return true;
@@ -844,7 +860,7 @@ void ZooKeeper::handleEphemeralNodeExistence(const std::string & path, const std
{
auto code = tryRemove(path, stat.version);
if (code != Coordination::Error::ZOK && code != Coordination::Error::ZNONODE)
- throw Coordination::Exception(code, path);
+ throw Coordination::Exception::fromPath(code, path);
}
else
{
@@ -893,7 +909,7 @@ std::future ZooKeeper::asyncCreate(const std::stri
auto callback = [promise, path](const Coordination::CreateResponse & response) mutable
{
if (response.error != Coordination::Error::ZOK)
- promise->set_exception(std::make_exception_ptr(KeeperException(path, response.error)));
+ promise->set_exception(std::make_exception_ptr(KeeperException::fromPath(response.error, path)));
else
promise->set_value(response);
};
@@ -924,12 +940,13 @@ std::future ZooKeeper::asyncGet(const std::string & p
auto callback = [promise, path](const Coordination::GetResponse & response) mutable
{
if (response.error != Coordination::Error::ZOK)
- promise->set_exception(std::make_exception_ptr(KeeperException(path, response.error)));
+ promise->set_exception(std::make_exception_ptr(KeeperException::fromPath(response.error, path)));
else
promise->set_value(response);
};
- impl->get(path, std::move(callback), watch_callback);
+ impl->get(path, std::move(callback),
+ watch_callback ? std::make_shared(watch_callback) : Coordination::WatchCallbackPtr{});
return future;
}
@@ -943,7 +960,8 @@ std::future ZooKeeper::asyncTryGetNoThrow(const std::
promise->set_value(response);
};
- impl->get(path, std::move(callback), watch_callback);
+ impl->get(path, std::move(callback),
+ watch_callback ? std::make_shared(watch_callback) : Coordination::WatchCallbackPtr{});
return future;
}
@@ -956,7 +974,7 @@ std::future ZooKeeper::asyncTryGet(const std::string
auto callback = [promise, path](const Coordination::GetResponse & response) mutable
{
if (response.error != Coordination::Error::ZOK && response.error != Coordination::Error::ZNONODE)
- promise->set_exception(std::make_exception_ptr(KeeperException(path, response.error)));
+ promise->set_exception(std::make_exception_ptr(KeeperException::fromPath(response.error, path)));
else
promise->set_value(response);
};
@@ -973,12 +991,13 @@ std::future ZooKeeper::asyncExists(const std::stri
auto callback = [promise, path](const Coordination::ExistsResponse & response) mutable
{
if (response.error != Coordination::Error::ZOK && response.error != Coordination::Error::ZNONODE)
- promise->set_exception(std::make_exception_ptr(KeeperException(path, response.error)));
+ promise->set_exception(std::make_exception_ptr(KeeperException::fromPath(response.error, path)));
else
promise->set_value(response);
};
- impl->exists(path, std::move(callback), watch_callback);
+ impl->exists(path, std::move(callback),
+ watch_callback ? std::make_shared(watch_callback) : Coordination::WatchCallbackPtr{});
return future;
}
@@ -992,7 +1011,8 @@ std::future ZooKeeper::asyncTryExistsNoThrow(const
promise->set_value(response);
};
- impl->exists(path, std::move(callback), watch_callback);
+ impl->exists(path, std::move(callback),
+ watch_callback ? std::make_shared(watch_callback) : Coordination::WatchCallbackPtr{});
return future;
}
@@ -1004,7 +1024,7 @@ std::future ZooKeeper::asyncSet(const std::string & p
auto callback = [promise, path](const Coordination::SetResponse & response) mutable
{
if (response.error != Coordination::Error::ZOK)
- promise->set_exception(std::make_exception_ptr(KeeperException(path, response.error)));
+ promise->set_exception(std::make_exception_ptr(KeeperException::fromPath(response.error, path)));
else
promise->set_value(response);
};
@@ -1037,17 +1057,18 @@ std::future ZooKeeper::asyncGetChildren(
auto callback = [promise, path](const Coordination::ListResponse & response) mutable
{
if (response.error != Coordination::Error::ZOK)
- promise->set_exception(std::make_exception_ptr(KeeperException(path, response.error)));
+ promise->set_exception(std::make_exception_ptr(KeeperException::fromPath(response.error, path)));
else
promise->set_value(response);
};
- impl->list(path, list_request_type, std::move(callback), watch_callback);
+ impl->list(path, list_request_type, std::move(callback),
+ watch_callback ? std::make_shared(watch_callback) : Coordination::WatchCallbackPtr{});
return future;
}
std::future ZooKeeper::asyncTryGetChildrenNoThrow(
- const std::string & path, Coordination::WatchCallback watch_callback, Coordination::ListRequestType list_request_type)
+ const std::string & path, Coordination::WatchCallbackPtr watch_callback, Coordination::ListRequestType list_request_type)
{
auto promise = std::make_shared>();
auto future = promise->get_future();
@@ -1070,7 +1091,7 @@ ZooKeeper::asyncTryGetChildren(const std::string & path, Coordination::ListReque
auto callback = [promise, path](const Coordination::ListResponse & response) mutable
{
if (response.error != Coordination::Error::ZOK && response.error != Coordination::Error::ZNONODE)
- promise->set_exception(std::make_exception_ptr(KeeperException(path, response.error)));
+ promise->set_exception(std::make_exception_ptr(KeeperException::fromPath(response.error, path)));
else
promise->set_value(response);
};
@@ -1087,7 +1108,7 @@ std::future ZooKeeper::asyncRemove(const std::stri
auto callback = [promise, path](const Coordination::RemoveResponse & response) mutable
{
if (response.error != Coordination::Error::ZOK)
- promise->set_exception(std::make_exception_ptr(KeeperException(path, response.error)));
+ promise->set_exception(std::make_exception_ptr(KeeperException::fromPath(response.error, path)));
else
promise->set_value(response);
};
@@ -1108,7 +1129,7 @@ std::future ZooKeeper::asyncTryRemove(const std::s
&& response.error != Coordination::Error::ZBADVERSION
&& response.error != Coordination::Error::ZNOTEMPTY)
{
- promise->set_exception(std::make_exception_ptr(KeeperException(path, response.error)));
+ promise->set_exception(std::make_exception_ptr(KeeperException::fromPath(response.error, path)));
}
else
promise->set_value(response);
@@ -1243,11 +1264,16 @@ size_t getFailedOpIndex(Coordination::Error exception_code, const Coordination::
}
-KeeperMultiException::KeeperMultiException(Coordination::Error exception_code, const Coordination::Requests & requests_, const Coordination::Responses & responses_)
- : KeeperException("Transaction failed", exception_code),
- requests(requests_), responses(responses_), failed_op_index(getFailedOpIndex(exception_code, responses))
+KeeperMultiException::KeeperMultiException(Coordination::Error exception_code, size_t failed_op_index_, const Coordination::Requests & requests_, const Coordination::Responses & responses_)
+ : KeeperException(exception_code, "Transaction failed: Op #{}, path", failed_op_index_),
+ requests(requests_), responses(responses_), failed_op_index(failed_op_index_)
+{
+ addMessage(getPathForFirstFailedOp());
+}
+
+KeeperMultiException::KeeperMultiException(Coordination::Error exception_code, const Coordination::Requests & requests_, const Coordination::Responses & responses_)
+ : KeeperMultiException(exception_code, getFailedOpIndex(exception_code, responses_), requests_, responses_)
{
- addMessage("Op #" + std::to_string(failed_op_index) + ", path: " + getPathForFirstFailedOp());
}
diff --git a/src/Common/ZooKeeper/ZooKeeper.h b/src/Common/ZooKeeper/ZooKeeper.h
index c2cba7ef401..38c81369c73 100644
--- a/src/Common/ZooKeeper/ZooKeeper.h
+++ b/src/Common/ZooKeeper/ZooKeeper.h
@@ -333,6 +333,11 @@ public:
Coordination::WatchCallback watch_callback,
Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL);
+ Strings getChildrenWatch(const std::string & path,
+ Coordination::Stat * stat,
+ Coordination::WatchCallbackPtr watch_callback,
+ Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL);
+
using MultiGetChildrenResponse = MultiReadResponses;
using MultiTryGetChildrenResponse = MultiReadResponses;
@@ -369,6 +374,13 @@ public:
Coordination::WatchCallback watch_callback,
Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL);
+ Coordination::Error tryGetChildrenWatch(
+ const std::string & path,
+ Strings & res,
+ Coordination::Stat * stat,
+ Coordination::WatchCallbackPtr watch_callback,
+ Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL);
+
template
MultiTryGetChildrenResponse
tryGetChildren(TIter start, TIter end, Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL)
@@ -474,7 +486,7 @@ public:
/// Like the previous one but don't throw any exceptions on future.get()
FutureGetChildren asyncTryGetChildrenNoThrow(
const std::string & path,
- Coordination::WatchCallback watch_callback = {},
+ Coordination::WatchCallbackPtr watch_callback = {},
Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL);
using FutureSet = std::future;
@@ -545,7 +557,7 @@ private:
const std::string & path,
Strings & res,
Coordination::Stat * stat,
- Coordination::WatchCallback watch_callback,
+ Coordination::WatchCallbackPtr watch_callback,
Coordination::ListRequestType list_request_type);
Coordination::Error multiImpl(const Coordination::Requests & requests, Coordination::Responses & responses);
Coordination::Error existsImpl(const std::string & path, Coordination::Stat * stat_, Coordination::WatchCallback watch_callback);
diff --git a/src/Common/ZooKeeper/ZooKeeperArgs.cpp b/src/Common/ZooKeeper/ZooKeeperArgs.cpp
index 4c73b9ffc6d..5d01294e9b0 100644
--- a/src/Common/ZooKeeper/ZooKeeperArgs.cpp
+++ b/src/Common/ZooKeeper/ZooKeeperArgs.cpp
@@ -36,7 +36,7 @@ ZooKeeperArgs::ZooKeeperArgs(const Poco::Util::AbstractConfiguration & config, c
}
if (session_timeout_ms < 0 || operation_timeout_ms < 0 || connection_timeout_ms < 0)
- throw KeeperException("Timeout cannot be negative", Coordination::Error::ZBADARGUMENTS);
+ throw KeeperException::fromMessage(Coordination::Error::ZBADARGUMENTS, "Timeout cannot be negative");
/// init get_priority_load_balancing
get_priority_load_balancing.hostname_differences.resize(hosts.size());
@@ -63,7 +63,7 @@ void ZooKeeperArgs::initFromKeeperServerSection(const Poco::Util::AbstractConfig
auto tcp_port_secure = config.getString(key);
if (tcp_port_secure.empty())
- throw KeeperException("Empty tcp_port_secure in config file", Coordination::Error::ZBADARGUMENTS);
+ throw KeeperException::fromMessage(Coordination::Error::ZBADARGUMENTS, "Empty tcp_port_secure in config file");
}
bool secure{false};
@@ -81,7 +81,7 @@ void ZooKeeperArgs::initFromKeeperServerSection(const Poco::Util::AbstractConfig
}
if (tcp_port.empty())
- throw KeeperException("No tcp_port or tcp_port_secure in config file", Coordination::Error::ZBADARGUMENTS);
+ throw KeeperException::fromMessage(Coordination::Error::ZBADARGUMENTS, "No tcp_port or tcp_port_secure in config file");
if (auto coordination_key = std::string{config_name} + ".coordination_settings";
config.has(coordination_key))
diff --git a/src/Common/ZooKeeper/ZooKeeperCommon.cpp b/src/Common/ZooKeeper/ZooKeeperCommon.cpp
index e88d66e5444..8341199cd1e 100644
--- a/src/Common/ZooKeeper/ZooKeeperCommon.cpp
+++ b/src/Common/ZooKeeper/ZooKeeperCommon.cpp
@@ -461,8 +461,7 @@ void ZooKeeperErrorResponse::readImpl(ReadBuffer & in)
Coordination::read(read_error, in);
if (read_error != error)
- throw Exception(fmt::format("Error code in ErrorResponse ({}) doesn't match error code in header ({})", read_error, error),
- Error::ZMARSHALLINGERROR);
+ throw Exception(Error::ZMARSHALLINGERROR, "Error code in ErrorResponse ({}) doesn't match error code in header ({})", read_error, error);
}
void ZooKeeperErrorResponse::writeImpl(WriteBuffer & out) const
@@ -534,7 +533,7 @@ ZooKeeperMultiRequest::ZooKeeperMultiRequest(const Requests & generic_requests,
requests.push_back(std::make_shared(*concrete_request_list));
}
else
- throw Exception("Illegal command as part of multi ZooKeeper request", Error::ZBADARGUMENTS);
+ throw Exception::fromMessage(Error::ZBADARGUMENTS, "Illegal command as part of multi ZooKeeper request");
}
}
@@ -577,9 +576,9 @@ void ZooKeeperMultiRequest::readImpl(ReadBuffer & in)
if (done)
{
if (op_num != OpNum::Error)
- throw Exception("Unexpected op_num received at the end of results for multi transaction", Error::ZMARSHALLINGERROR);
+ throw Exception::fromMessage(Error::ZMARSHALLINGERROR, "Unexpected op_num received at the end of results for multi transaction");
if (error != -1)
- throw Exception("Unexpected error value received at the end of results for multi transaction", Error::ZMARSHALLINGERROR);
+ throw Exception::fromMessage(Error::ZMARSHALLINGERROR, "Unexpected error value received at the end of results for multi transaction");
break;
}
@@ -588,7 +587,7 @@ void ZooKeeperMultiRequest::readImpl(ReadBuffer & in)
requests.push_back(request);
if (in.eof())
- throw Exception("Not enough results received for multi transaction", Error::ZMARSHALLINGERROR);
+ throw Exception::fromMessage(Error::ZMARSHALLINGERROR, "Not enough results received for multi transaction");
}
}
@@ -621,7 +620,7 @@ void ZooKeeperMultiResponse::readImpl(ReadBuffer & in)
Coordination::read(op_error, in);
if (done)
- throw Exception("Not enough results received for multi transaction", Error::ZMARSHALLINGERROR);
+ throw Exception::fromMessage(Error::ZMARSHALLINGERROR, "Not enough results received for multi transaction");
/// op_num == -1 is special for multi transaction.
/// For unknown reason, error code is duplicated in header and in response body.
@@ -657,11 +656,11 @@ void ZooKeeperMultiResponse::readImpl(ReadBuffer & in)
Coordination::read(error_read, in);
if (!done)
- throw Exception("Too many results received for multi transaction", Error::ZMARSHALLINGERROR);
+ throw Exception::fromMessage(Error::ZMARSHALLINGERROR, "Too many results received for multi transaction");
if (op_num != OpNum::Error)
- throw Exception("Unexpected op_num received at the end of results for multi transaction", Error::ZMARSHALLINGERROR);
+ throw Exception::fromMessage(Error::ZMARSHALLINGERROR, "Unexpected op_num received at the end of results for multi transaction");
if (error_read != -1)
- throw Exception("Unexpected error value received at the end of results for multi transaction", Error::ZMARSHALLINGERROR);
+ throw Exception::fromMessage(Error::ZMARSHALLINGERROR, "Unexpected error value received at the end of results for multi transaction");
}
}
diff --git a/src/Common/ZooKeeper/ZooKeeperCommon.h b/src/Common/ZooKeeper/ZooKeeperCommon.h
index e4b2cc97744..5b662c7f4c1 100644
--- a/src/Common/ZooKeeper/ZooKeeperCommon.h
+++ b/src/Common/ZooKeeper/ZooKeeperCommon.h
@@ -163,7 +163,7 @@ struct ZooKeeperWatchResponse final : WatchResponse, ZooKeeperResponse
OpNum getOpNum() const override
{
chassert(false);
- throw Exception("OpNum for watch response doesn't exist", Error::ZRUNTIMEINCONSISTENCY);
+ throw Exception::fromMessage(Error::ZRUNTIMEINCONSISTENCY, "OpNum for watch response doesn't exist");
}
void fillLogElements(LogElements & elems, size_t idx) const override;
@@ -214,7 +214,7 @@ struct ZooKeeperCloseResponse final : ZooKeeperResponse
{
void readImpl(ReadBuffer &) override
{
- throw Exception("Received response for close request", Error::ZRUNTIMEINCONSISTENCY);
+ throw Exception::fromMessage(Error::ZRUNTIMEINCONSISTENCY, "Received response for close request");
}
void writeImpl(WriteBuffer &) const override {}
diff --git a/src/Common/ZooKeeper/ZooKeeperConstants.cpp b/src/Common/ZooKeeper/ZooKeeperConstants.cpp
index 9bb9c7b0488..7fffea1d08d 100644
--- a/src/Common/ZooKeeper/ZooKeeperConstants.cpp
+++ b/src/Common/ZooKeeper/ZooKeeperConstants.cpp
@@ -33,7 +33,7 @@ static const std::unordered_set VALID_OPERATIONS =
OpNum getOpNum(int32_t raw_op_num)
{
if (!VALID_OPERATIONS.contains(raw_op_num))
- throw Exception("Operation " + std::to_string(raw_op_num) + " is unknown", Error::ZUNIMPLEMENTED);
+ throw Exception(Error::ZUNIMPLEMENTED, "Operation {} is unknown", raw_op_num);
return static_cast(raw_op_num);
}
diff --git a/src/Common/ZooKeeper/ZooKeeperIO.cpp b/src/Common/ZooKeeper/ZooKeeperIO.cpp
index 2911d511254..6a51ffb36fa 100644
--- a/src/Common/ZooKeeper/ZooKeeperIO.cpp
+++ b/src/Common/ZooKeeper/ZooKeeperIO.cpp
@@ -62,10 +62,10 @@ void read(std::string & s, ReadBuffer & in)
}
if (size < 0)
- throw Exception("Negative size while reading string from ZooKeeper", Error::ZMARSHALLINGERROR);
+ throw Exception::fromMessage(Error::ZMARSHALLINGERROR, "Negative size while reading string from ZooKeeper");
if (size > MAX_STRING_OR_ARRAY_SIZE)
- throw Exception("Too large string size while reading from ZooKeeper", Error::ZMARSHALLINGERROR);
+ throw Exception::fromMessage(Error::ZMARSHALLINGERROR,"Too large string size while reading from ZooKeeper");
s.resize(size);
size_t read_bytes = in.read(s.data(), size);
diff --git a/src/Common/ZooKeeper/ZooKeeperIO.h b/src/Common/ZooKeeper/ZooKeeperIO.h
index 81b56a02e27..83973c1ae22 100644
--- a/src/Common/ZooKeeper/ZooKeeperIO.h
+++ b/src/Common/ZooKeeper/ZooKeeperIO.h
@@ -62,7 +62,7 @@ void read(std::array & s, ReadBuffer & in)
int32_t size = 0;
read(size, in);
if (size != N)
- throw Exception("Unexpected array size while reading from ZooKeeper", Error::ZMARSHALLINGERROR);
+ throw Exception::fromMessage(Error::ZMARSHALLINGERROR, "Unexpected array size while reading from ZooKeeper");
in.readStrict(s.data(), N);
}
@@ -72,9 +72,9 @@ void read(std::vector & arr, ReadBuffer & in)
int32_t size = 0;
read(size, in);
if (size < 0)
- throw Exception("Negative size while reading array from ZooKeeper", Error::ZMARSHALLINGERROR);
+ throw Exception::fromMessage(Error::ZMARSHALLINGERROR, "Negative size while reading array from ZooKeeper");
if (size > MAX_STRING_OR_ARRAY_SIZE)
- throw Exception("Too large array size while reading from ZooKeeper", Error::ZMARSHALLINGERROR);
+ throw Exception::fromMessage(Error::ZMARSHALLINGERROR, "Too large array size while reading from ZooKeeper");
arr.resize(size);
for (auto & elem : arr)
read(elem, in);
diff --git a/src/Common/ZooKeeper/ZooKeeperImpl.cpp b/src/Common/ZooKeeper/ZooKeeperImpl.cpp
index d84d5fa3a69..886522687bd 100644
--- a/src/Common/ZooKeeper/ZooKeeperImpl.cpp
+++ b/src/Common/ZooKeeper/ZooKeeperImpl.cpp
@@ -289,7 +289,7 @@ static void removeRootPath(String & path, const String & chroot)
return;
if (path.size() <= chroot.size())
- throw Exception(Error::ZDATAINCONSISTENCY, "Received path is not longer than chroot");
+ throw Exception::fromMessage(Error::ZDATAINCONSISTENCY, "Received path is not longer than chroot");
path = path.substr(chroot.size());
}
@@ -387,7 +387,7 @@ void ZooKeeper::connect(
Poco::Timespan connection_timeout)
{
if (nodes.empty())
- throw Exception(Error::ZBADARGUMENTS, "No nodes passed to ZooKeeper constructor");
+ throw Exception::fromMessage(Error::ZBADARGUMENTS, "No nodes passed to ZooKeeper constructor");
static constexpr size_t num_tries = 3;
bool connected = false;
@@ -479,8 +479,6 @@ void ZooKeeper::connect(
if (!connected)
{
WriteBufferFromOwnString message;
-
- message << "All connection tries failed while connecting to ZooKeeper. nodes: ";
bool first = true;
for (const auto & node : nodes)
{
@@ -496,7 +494,7 @@ void ZooKeeper::connect(
}
message << fail_reasons.str() << "\n";
- throw Exception(Error::ZCONNECTIONLOSS, message.str());
+ throw Exception(Error::ZCONNECTIONLOSS, "All connection tries failed while connecting to ZooKeeper. nodes: {}", message.str());
}
else
{
@@ -543,7 +541,7 @@ void ZooKeeper::receiveHandshake()
/// It's better for faster failover than just connection drop.
/// Implemented in clickhouse-keeper.
if (protocol_version_read == KEEPER_PROTOCOL_VERSION_CONNECTION_REJECT)
- throw Exception(Error::ZCONNECTIONLOSS,
+ throw Exception::fromMessage(Error::ZCONNECTIONLOSS,
"Keeper server rejected the connection during the handshake. "
"Possibly it's overloaded, doesn't see leader or stale");
else
@@ -784,9 +782,9 @@ void ZooKeeper::receiveEvent()
}
else
{
- for (auto & callback : it->second)
+ for (const auto & callback : it->second)
if (callback)
- callback(watch_response); /// NOTE We may process callbacks not under mutex.
+ (*callback)(watch_response); /// NOTE We may process callbacks not under mutex.
CurrentMetrics::sub(CurrentMetrics::ZooKeeperWatch, it->second.size());
watches.erase(it);
@@ -800,7 +798,7 @@ void ZooKeeper::receiveEvent()
auto it = operations.find(xid);
if (it == operations.end())
- throw Exception("Received response for unknown xid " + DB::toString(xid), Error::ZRUNTIMEINCONSISTENCY);
+ throw Exception(Error::ZRUNTIMEINCONSISTENCY, "Received response for unknown xid {}", xid);
/// After this point, we must invoke callback, that we've grabbed from 'operations'.
/// Invariant: all callbacks are invoked either in case of success or in case of error.
@@ -848,13 +846,17 @@ void ZooKeeper::receiveEvent()
if (add_watch)
{
- CurrentMetrics::add(CurrentMetrics::ZooKeeperWatch);
/// The key of wathces should exclude the args.chroot
String req_path = request_info.request->getPath();
removeRootPath(req_path, args.chroot);
std::lock_guard lock(watches_mutex);
- watches[req_path].emplace_back(std::move(request_info.watch));
+ auto & callbacks = watches[req_path];
+ if (request_info.watch && *request_info.watch)
+ {
+ if (callbacks.insert(request_info.watch).second)
+ CurrentMetrics::add(CurrentMetrics::ZooKeeperWatch);
+ }
}
}
@@ -1004,14 +1006,14 @@ void ZooKeeper::finalize(bool error_send, bool error_receive, const String & rea
response.state = EXPIRED_SESSION;
response.error = Error::ZSESSIONEXPIRED;
- for (auto & callback : path_watches.second)
+ for (const auto & callback : path_watches.second)
{
watch_callback_count += 1;
if (callback)
{
try
{
- callback(response);
+ (*callback)(response);
}
catch (...)
{
@@ -1056,7 +1058,7 @@ void ZooKeeper::finalize(bool error_send, bool error_receive, const String & rea
response.error = Error::ZSESSIONEXPIRED;
try
{
- info.watch(response);
+ (*info.watch)(response);
}
catch (...)
{
@@ -1088,9 +1090,9 @@ void ZooKeeper::pushRequest(RequestInfo && info)
{
info.request->xid = next_xid.fetch_add(1);
if (info.request->xid == CLOSE_XID)
- throw Exception(Error::ZSESSIONEXPIRED, "xid equal to close_xid");
+ throw Exception::fromMessage(Error::ZSESSIONEXPIRED, "xid equal to close_xid");
if (info.request->xid < 0)
- throw Exception(Error::ZSESSIONEXPIRED, "XID overflow");
+ throw Exception::fromMessage(Error::ZSESSIONEXPIRED, "XID overflow");
if (auto * multi_request = dynamic_cast(info.request.get()))
{
@@ -1104,7 +1106,7 @@ void ZooKeeper::pushRequest(RequestInfo && info)
if (!requests_queue.tryPush(std::move(info), args.operation_timeout_ms))
{
if (requests_queue.isFinished())
- throw Exception(Error::ZSESSIONEXPIRED, "Session expired");
+ throw Exception::fromMessage(Error::ZSESSIONEXPIRED, "Session expired");
throw Exception(Error::ZOPERATIONTIMEOUT, "Cannot push request to queue within operation timeout of {} ms", args.operation_timeout_ms);
}
@@ -1234,7 +1236,7 @@ void ZooKeeper::remove(
void ZooKeeper::exists(
const String & path,
ExistsCallback callback,
- WatchCallback watch)
+ WatchCallbackPtr watch)
{
ZooKeeperExistsRequest request;
request.path = path;
@@ -1252,7 +1254,7 @@ void ZooKeeper::exists(
void ZooKeeper::get(
const String & path,
GetCallback callback,
- WatchCallback watch)
+ WatchCallbackPtr watch)
{
ZooKeeperGetRequest request;
request.path = path;
@@ -1291,13 +1293,13 @@ void ZooKeeper::list(
const String & path,
ListRequestType list_request_type,
ListCallback callback,
- WatchCallback watch)
+ WatchCallbackPtr watch)
{
std::shared_ptr request{nullptr};
if (!isFeatureEnabled(KeeperFeatureFlag::FILTERED_LIST))
{
if (list_request_type != ListRequestType::ALL)
- throw Exception(Error::ZBADARGUMENTS, "Filtered list request type cannot be used because it's not supported by the server");
+ throw Exception::fromMessage(Error::ZBADARGUMENTS, "Filtered list request type cannot be used because it's not supported by the server");
request = std::make_shared();
}
@@ -1312,7 +1314,8 @@ void ZooKeeper::list(
RequestInfo request_info;
request_info.callback = [callback](const Response & response) { callback(dynamic_cast(response)); };
- request_info.watch = watch;
+ if (watch)
+ request_info.watch = std::move(watch);
request_info.request = std::move(request);
pushRequest(std::move(request_info));
@@ -1380,7 +1383,7 @@ void ZooKeeper::multi(
ZooKeeperMultiRequest request(requests, default_acls);
if (request.getOpNum() == OpNum::MultiRead && !isFeatureEnabled(KeeperFeatureFlag::MULTI_READ))
- throw Exception(Error::ZBADARGUMENTS, "MultiRead request type cannot be used because it's not supported by the server");
+ throw Exception::fromMessage(Error::ZBADARGUMENTS, "MultiRead request type cannot be used because it's not supported by the server");
RequestInfo request_info;
request_info.request = std::make_shared(std::move(request));
@@ -1502,7 +1505,7 @@ void ZooKeeper::setupFaultDistributions()
void ZooKeeper::checkSessionDeadline() const
{
if (unlikely(hasReachedDeadline()))
- throw Exception(Error::ZSESSIONEXPIRED, "Session expired (force expiry client-side)");
+ throw Exception::fromMessage(Error::ZSESSIONEXPIRED, "Session expired (force expiry client-side)");
}
bool ZooKeeper::hasReachedDeadline() const
@@ -1513,13 +1516,13 @@ bool ZooKeeper::hasReachedDeadline() const
void ZooKeeper::maybeInjectSendFault()
{
if (unlikely(inject_setup.test() && send_inject_fault && send_inject_fault.value()(thread_local_rng)))
- throw Exception(Error::ZSESSIONEXPIRED, "Session expired (fault injected on recv)");
+ throw Exception::fromMessage(Error::ZSESSIONEXPIRED, "Session expired (fault injected on recv)");
}
void ZooKeeper::maybeInjectRecvFault()
{
if (unlikely(inject_setup.test() && recv_inject_fault && recv_inject_fault.value()(thread_local_rng)))
- throw Exception(Error::ZSESSIONEXPIRED, "Session expired (fault injected on recv)");
+ throw Exception::fromMessage(Error::ZSESSIONEXPIRED, "Session expired (fault injected on recv)");
}
void ZooKeeper::maybeInjectSendSleep()
diff --git a/src/Common/ZooKeeper/ZooKeeperImpl.h b/src/Common/ZooKeeper/ZooKeeperImpl.h
index 6a6f34b8b3d..56e199352e9 100644
--- a/src/Common/ZooKeeper/ZooKeeperImpl.h
+++ b/src/Common/ZooKeeper/ZooKeeperImpl.h
@@ -154,12 +154,12 @@ public:
void exists(
const String & path,
ExistsCallback callback,
- WatchCallback watch) override;
+ WatchCallbackPtr watch) override;
void get(
const String & path,
GetCallback callback,
- WatchCallback watch) override;
+ WatchCallbackPtr watch) override;
void set(
const String & path,
@@ -171,7 +171,7 @@ public:
const String & path,
ListRequestType list_request_type,
ListCallback callback,
- WatchCallback watch) override;
+ WatchCallbackPtr watch) override;
void check(
const String & path,
@@ -252,7 +252,7 @@ private:
{
ZooKeeperRequestPtr request;
ResponseCallback callback;
- WatchCallback watch;
+ WatchCallbackPtr watch;
clock::time_point time;
};
@@ -267,7 +267,7 @@ private:
Operations operations TSA_GUARDED_BY(operations_mutex);
std::mutex operations_mutex;
- using WatchCallbacks = std::vector;
+ using WatchCallbacks = std::unordered_set;
using Watches = std::map;
Watches watches TSA_GUARDED_BY(watches_mutex);
diff --git a/src/Common/ZooKeeper/ZooKeeperWithFaultInjection.h b/src/Common/ZooKeeper/ZooKeeperWithFaultInjection.h
index 9d02d674010..4887e896e9b 100644
--- a/src/Common/ZooKeeper/ZooKeeperWithFaultInjection.h
+++ b/src/Common/ZooKeeper/ZooKeeperWithFaultInjection.h
@@ -29,7 +29,7 @@ public:
if (distribution(rndgen) || must_fail_before_op)
{
must_fail_before_op = false;
- throw zkutil::KeeperException("Fault injection before operation", Coordination::Error::ZSESSIONEXPIRED);
+ throw zkutil::KeeperException::fromMessage(Coordination::Error::ZSESSIONEXPIRED, "Fault injection before operation");
}
}
void afterOperation()
@@ -37,7 +37,7 @@ public:
if (distribution(rndgen) || must_fail_after_op)
{
must_fail_after_op = false;
- throw zkutil::KeeperException("Fault injection after operation", Coordination::Error::ZOPERATIONTIMEOUT);
+ throw zkutil::KeeperException::fromMessage(Coordination::Error::ZOPERATIONTIMEOUT, "Fault injection after operation");
}
}
@@ -263,7 +263,7 @@ public:
auto code = tryCreate(path, data, mode, path_created);
if (code != Coordination::Error::ZOK)
- throw zkutil::KeeperException(code, path);
+ throw zkutil::KeeperException::fromPath(code, path);
return path_created;
}
@@ -327,7 +327,7 @@ public:
if (code == Coordination::Error::ZOK || code == Coordination::Error::ZNODEEXISTS)
return;
- throw zkutil::KeeperException(code, path);
+ throw zkutil::KeeperException::fromPath(code, path);
}
Coordination::Responses multi(const Coordination::Requests & requests)
@@ -507,8 +507,8 @@ private:
++calls_total;
if (!keeper)
- throw zkutil::KeeperException(
- "Session is considered to be expired due to fault injection", Coordination::Error::ZSESSIONEXPIRED);
+ throw zkutil::KeeperException::fromMessage(Coordination::Error::ZSESSIONEXPIRED,
+ "Session is considered to be expired due to fault injection");
if constexpr (inject_failure_before_op)
{
diff --git a/src/Common/ZooKeeper/examples/zkutil_test_commands_new_lib.cpp b/src/Common/ZooKeeper/examples/zkutil_test_commands_new_lib.cpp
index 021f444386a..fe38b486ada 100644
--- a/src/Common/ZooKeeper/examples/zkutil_test_commands_new_lib.cpp
+++ b/src/Common/ZooKeeper/examples/zkutil_test_commands_new_lib.cpp
@@ -5,6 +5,7 @@
#include
#include
#include
+#include
#include
@@ -72,13 +73,15 @@ try
//event.set();
},
- [](const WatchResponse & response)
- {
- if (response.error != Coordination::Error::ZOK)
- std::cerr << "Watch (get) on /test, Error: " << errorMessage(response.error) << '\n';
- else
- std::cerr << "Watch (get) on /test, path: " << response.path << ", type: " << response.type << '\n';
- });
+ std::make_shared(
+ [](const WatchResponse & response)
+ {
+ if (response.error != Coordination::Error::ZOK)
+ std::cerr << "Watch (get) on /test, Error: " << errorMessage(response.error) << '\n';
+ else
+ std::cerr << "Watch (get) on /test, path: " << response.path << ", type: " << response.type << '\n';
+ })
+ );
//event.wait();
@@ -114,13 +117,15 @@ try
//event.set();
},
- [](const WatchResponse & response)
- {
- if (response.error != Coordination::Error::ZOK)
- std::cerr << "Watch (list) on /, Error: " << errorMessage(response.error) << '\n';
- else
- std::cerr << "Watch (list) on /, path: " << response.path << ", type: " << response.type << '\n';
- });
+ std::make_shared(
+ [](const WatchResponse & response)
+ {
+ if (response.error != Coordination::Error::ZOK)
+ std::cerr << "Watch (list) on /, Error: " << errorMessage(response.error) << '\n';
+ else
+ std::cerr << "Watch (list) on /, path: " << response.path << ", type: " << response.type << '\n';
+ })
+ );
//event.wait();
@@ -136,13 +141,15 @@ try
//event.set();
},
- [](const WatchResponse & response)
- {
- if (response.error != Coordination::Error::ZOK)
- std::cerr << "Watch (exists) on /test, Error: " << errorMessage(response.error) << '\n';
- else
- std::cerr << "Watch (exists) on /test, path: " << response.path << ", type: " << response.type << '\n';
- });
+ std::make_shared(
+ [](const WatchResponse & response)
+ {
+ if (response.error != Coordination::Error::ZOK)
+ std::cerr << "Watch (exists) on /test, Error: " << errorMessage(response.error) << '\n';
+ else
+ std::cerr << "Watch (exists) on /test, path: " << response.path << ", type: " << response.type << '\n';
+ })
+ );
//event.wait();
diff --git a/src/Common/examples/hashes_test.cpp b/src/Common/examples/hashes_test.cpp
index eccf7c9b2e6..99479e79302 100644
--- a/src/Common/examples/hashes_test.cpp
+++ b/src/Common/examples/hashes_test.cpp
@@ -94,7 +94,8 @@ int main(int, char **)
{
SipHash hash;
hash.update(strings[i].data(), strings[i].size());
- hash.get128(&hashes[i * 16]);
+ const auto hashed_value = getSipHash128AsArray(hash);
+ memcpy(&hashes[i * 16], hashed_value.data(), hashed_value.size());
}
watch.stop();
diff --git a/src/Common/getHashOfLoadedBinary.cpp b/src/Common/getHashOfLoadedBinary.cpp
index b81300b8536..6487bcd4f1c 100644
--- a/src/Common/getHashOfLoadedBinary.cpp
+++ b/src/Common/getHashOfLoadedBinary.cpp
@@ -37,8 +37,7 @@ SipHash getHashOfLoadedBinary()
std::string getHashOfLoadedBinaryHex()
{
SipHash hash = getHashOfLoadedBinary();
- UInt128 checksum;
- hash.get128(checksum);
+ const auto checksum = hash.get128();
return getHexUIntUppercase(checksum);
}
diff --git a/src/Common/randomSeed.cpp b/src/Common/randomSeed.cpp
index 9f0ffd8a6c7..e1aa56fa811 100644
--- a/src/Common/randomSeed.cpp
+++ b/src/Common/randomSeed.cpp
@@ -39,7 +39,7 @@ DB::UInt64 randomSeed()
#if defined(__linux__)
struct utsname sysinfo;
if (uname(&sysinfo) == 0)
- hash.update(sysinfo);
+ hash.update(sysinfo);
#endif
return hash.get64();
diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp
index 99c28674273..1fd67aef72c 100644
--- a/src/Coordination/KeeperDispatcher.cpp
+++ b/src/Coordination/KeeperDispatcher.cpp
@@ -687,7 +687,7 @@ int64_t KeeperDispatcher::getSessionID(int64_t session_timeout_ms)
}
if (response->error != Coordination::Error::ZOK)
- promise->set_exception(std::make_exception_ptr(zkutil::KeeperException("SessionID request failed with error", response->error)));
+ promise->set_exception(std::make_exception_ptr(zkutil::KeeperException::fromMessage(response->error, "SessionID request failed with error")));
promise->set_value(session_id_response.session_id);
};
diff --git a/src/Core/Defines.h b/src/Core/Defines.h
index efe14b93a3d..3039f0a67cf 100644
--- a/src/Core/Defines.h
+++ b/src/Core/Defines.h
@@ -1,6 +1,7 @@
#pragma once
#include
+#include
#define DBMS_DEFAULT_PORT 9000
#define DBMS_DEFAULT_SECURE_PORT 9440
@@ -64,6 +65,21 @@
/// Max depth of hierarchical dictionary
#define DBMS_HIERARCHICAL_DICTIONARY_MAX_DEPTH 1000
+/// Default maximum (total and entry) sizes and policies of various caches
+static constexpr auto DEFAULT_UNCOMPRESSED_CACHE_MAX_SIZE = 0_MiB;
+static constexpr auto DEFAULT_UNCOMPRESSED_CACHE_POLICY = "SLRU";
+static constexpr auto DEFAULT_MARK_CACHE_MAX_SIZE = 5368_MiB;
+static constexpr auto DEFAULT_MARK_CACHE_POLICY = "SLRU";
+static constexpr auto DEFAULT_INDEX_UNCOMPRESSED_CACHE_MAX_SIZE = 0_MiB;
+static constexpr auto DEFAULT_INDEX_MARK_CACHE_MAX_SIZE = 0_MiB;
+static constexpr auto DEFAULT_MMAP_CACHE_MAX_SIZE = 1_KiB; /// chosen by rolling dice
+static constexpr auto DEFAULT_COMPILED_EXPRESSION_CACHE_MAX_SIZE = 128_MiB;
+static constexpr auto DEFAULT_COMPILED_EXPRESSION_CACHE_MAX_ENTRIES = 10'000;
+static constexpr auto DEFAULT_QUERY_CACHE_MAX_SIZE = 1_GiB;
+static constexpr auto DEFAULT_QUERY_CACHE_MAX_ENTRIES = 1024uz;
+static constexpr auto DEFAULT_QUERY_CACHE_MAX_ENTRY_SIZE_IN_BYTES = 1_MiB;
+static constexpr auto DEFAULT_QUERY_CACHE_MAX_ENTRY_SIZE_IN_ROWS = 30'000'000uz;
+
/// Query profiler cannot work with sanitizers.
/// Sanitizers are using quick "frame walking" stack unwinding (this implies -fno-omit-frame-pointer)
/// And they do unwinding frequently (on every malloc/free, thread/mutex operations, etc).
diff --git a/src/Core/ServerSettings.h b/src/Core/ServerSettings.h
index 7678e8c3f24..c50633f11c8 100644
--- a/src/Core/ServerSettings.h
+++ b/src/Core/ServerSettings.h
@@ -2,6 +2,7 @@
#include
+#include