mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 17:41:59 +00:00
Merge branch 'master' into fix-crash-in-totals-structure
This commit is contained in:
commit
f421b4bf13
4
LICENSE
4
LICENSE
@ -1,4 +1,4 @@
|
||||
Copyright 2016-2022 ClickHouse, Inc.
|
||||
Copyright 2016-2023 ClickHouse, Inc.
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
@ -188,7 +188,7 @@ Copyright 2016-2022 ClickHouse, Inc.
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016-2022 ClickHouse, Inc.
|
||||
Copyright 2016-2023 ClickHouse, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include <base/MoveOrCopyIfThrow.h>
|
||||
|
||||
/** Pool for limited size objects that cannot be used from different threads simultaneously.
|
||||
* The main use case is to have fixed size of objects that can be reused in difference threads during their lifetime
|
||||
* The main use case is to have fixed size of objects that can be reused in different threads during their lifetime
|
||||
* and have to be initialized on demand.
|
||||
* Two main properties of pool are allocated objects size and borrowed objects size.
|
||||
* Allocated objects size is size of objects that are currently allocated by the pool.
|
||||
|
@ -8,16 +8,13 @@ set (SRCS
|
||||
getPageSize.cpp
|
||||
getThreadId.cpp
|
||||
JSON.cpp
|
||||
LineReader.cpp
|
||||
mremap.cpp
|
||||
phdr_cache.cpp
|
||||
preciseExp10.cpp
|
||||
setTerminalEcho.cpp
|
||||
shift10.cpp
|
||||
sleep.cpp
|
||||
terminalColors.cpp
|
||||
errnoToString.cpp
|
||||
ReplxxLineReader.cpp
|
||||
StringRef.cpp
|
||||
safeExit.cpp
|
||||
throwError.cpp
|
||||
@ -40,11 +37,6 @@ else ()
|
||||
target_compile_definitions(common PUBLIC WITH_COVERAGE=0)
|
||||
endif ()
|
||||
|
||||
# FIXME: move libraries for line reading out from base
|
||||
if (TARGET ch_rust::skim)
|
||||
target_link_libraries(common PUBLIC ch_rust::skim)
|
||||
endif()
|
||||
|
||||
target_include_directories(common PUBLIC .. "${CMAKE_CURRENT_BINARY_DIR}/..")
|
||||
|
||||
if (OS_DARWIN AND NOT USE_STATIC_LIBRARIES)
|
||||
|
@ -1,28 +0,0 @@
|
||||
#include <base/setTerminalEcho.h>
|
||||
#include <base/errnoToString.h>
|
||||
#include <stdexcept>
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
#include <termios.h>
|
||||
#include <unistd.h>
|
||||
|
||||
|
||||
void setTerminalEcho(bool enable)
|
||||
{
|
||||
/// Obtain terminal attributes,
|
||||
/// toggle the ECHO flag
|
||||
/// and set them back.
|
||||
|
||||
struct termios tty{};
|
||||
|
||||
if (0 != tcgetattr(STDIN_FILENO, &tty))
|
||||
throw std::runtime_error(std::string("setTerminalEcho failed get: ") + errnoToString());
|
||||
|
||||
if (enable)
|
||||
tty.c_lflag |= ECHO;
|
||||
else
|
||||
tty.c_lflag &= ~ECHO;
|
||||
|
||||
if (0 != tcsetattr(STDIN_FILENO, TCSANOW, &tty))
|
||||
throw std::runtime_error(std::string("setTerminalEcho failed set: ") + errnoToString());
|
||||
}
|
@ -1,4 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
/// Enable or disable echoing of typed characters. Throws std::runtime_error on error.
|
||||
void setTerminalEcho(bool enable);
|
@ -102,6 +102,11 @@ elseif (ARCH_AMD64)
|
||||
SET(ENABLE_AVX512_FOR_SPEC_OP 0)
|
||||
endif()
|
||||
|
||||
# ClickHouse can be cross-compiled (e.g. on an ARM host for x86) but it is also possible to build ClickHouse on x86 w/o AVX for x86 w/
|
||||
# AVX. We only check that the compiler can emit certain SIMD instructions, we don't care if the host system is able to run the binary.
|
||||
# Therefore, use check_cxx_source_compiles (= does the code compile+link?) instead of check_cxx_source_runs (= does the code
|
||||
# compile+link+run).
|
||||
|
||||
set (TEST_FLAG "-mssse3")
|
||||
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
|
||||
check_cxx_source_compiles("
|
||||
|
2
contrib/cctz
vendored
2
contrib/cctz
vendored
@ -1 +1 @@
|
||||
Subproject commit 5c8528fb35e89ee0b3a7157490423fba0d4dd7b5
|
||||
Subproject commit 7c78edd52b4d65acc103c2f195818ffcabe6fe0d
|
@ -107,8 +107,11 @@ fi
|
||||
mv ./programs/clickhouse* /output
|
||||
[ -x ./programs/self-extracting/clickhouse ] && mv ./programs/self-extracting/clickhouse /output
|
||||
mv ./src/unit_tests_dbms /output ||: # may not exist for some binary builds
|
||||
find . -name '*.so' -print -exec mv '{}' /output \;
|
||||
find . -name '*.so.*' -print -exec mv '{}' /output \;
|
||||
|
||||
# Exclude cargo build directory since it may have some shared libraries
|
||||
# (even though they are not required for the clickhouse binary)
|
||||
find . -name '*.so' -not -path '*/cargo/*' -print -exec mv '{}' /output \;
|
||||
find . -name '*.so.*' -not -path '*/cargo/*' -print -exec mv '{}' /output \;
|
||||
|
||||
prepare_combined_output () {
|
||||
local OUTPUT
|
||||
|
@ -80,7 +80,7 @@ do
|
||||
done
|
||||
|
||||
# if clickhouse user is defined - create it (user "default" already exists out of box)
|
||||
if [ -n "$CLICKHOUSE_USER" ] && [ "$CLICKHOUSE_USER" != "default" ] || [ -n "$CLICKHOUSE_PASSWORD" ]; then
|
||||
if [ -n "$CLICKHOUSE_USER" ] && [ "$CLICKHOUSE_USER" != "default" ] || [ -n "$CLICKHOUSE_PASSWORD" ] || [ "$CLICKHOUSE_ACCESS_MANAGEMENT" != "0" ]; then
|
||||
echo "$0: create new user '$CLICKHOUSE_USER' instead 'default'"
|
||||
cat <<EOT > /etc/clickhouse-server/users.d/default-user.xml
|
||||
<clickhouse>
|
||||
|
@ -51,7 +51,6 @@ function clone
|
||||
)
|
||||
|
||||
ls -lath ||:
|
||||
|
||||
}
|
||||
|
||||
function wget_with_retry
|
||||
@ -158,7 +157,7 @@ function fuzz
|
||||
mkdir -p /var/run/clickhouse-server
|
||||
|
||||
# NOTE: we use process substitution here to preserve keep $! as a pid of clickhouse-server
|
||||
clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db 2>&1 | pigz > server.log.gz &
|
||||
clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db > server.log 2>&1 &
|
||||
server_pid=$!
|
||||
|
||||
kill -0 $server_pid
|
||||
@ -263,7 +262,7 @@ quit
|
||||
if [ "$server_died" == 1 ]
|
||||
then
|
||||
# The server has died.
|
||||
if ! zgrep --text -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*\|.*is located.*\|SUMMARY: AddressSanitizer:.*\|SUMMARY: MemorySanitizer:.*\|SUMMARY: ThreadSanitizer:.*\|.*_LIBCPP_ASSERT.*" server.log.gz > description.txt
|
||||
if ! grep --text -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*\|.*is located.*\|SUMMARY: AddressSanitizer:.*\|SUMMARY: MemorySanitizer:.*\|SUMMARY: ThreadSanitizer:.*\|.*_LIBCPP_ASSERT.*" server.log > description.txt
|
||||
then
|
||||
echo "Lost connection to server. See the logs." > description.txt
|
||||
fi
|
||||
@ -343,24 +342,28 @@ case "$stage" in
|
||||
time fuzz
|
||||
;&
|
||||
"report")
|
||||
|
||||
CORE_LINK=''
|
||||
if [ -f core.gz ]; then
|
||||
CORE_LINK='<a href="core.gz">core.gz</a>'
|
||||
fi
|
||||
|
||||
grep --text -F '<Fatal>' server.log > fatal.log ||:
|
||||
|
||||
pigz server.log
|
||||
|
||||
cat > report.html <<EOF ||:
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<style>
|
||||
body { font-family: "DejaVu Sans", "Noto Sans", Arial, sans-serif; background: #EEE; }
|
||||
h1 { margin-left: 10px; }
|
||||
th, td { border: 0; padding: 5px 10px 5px 10px; text-align: left; vertical-align: top; line-height: 1.5; background-color: #FFF;
|
||||
td { white-space: pre; font-family: Monospace, Courier New; }
|
||||
border: 0; box-shadow: 0 0 0 1px rgba(0, 0, 0, 0.05), 0 8px 25px -5px rgba(0, 0, 0, 0.1); }
|
||||
th, td { border: 0; padding: 5px 10px 5px 10px; text-align: left; vertical-align: top; line-height: 1.5; background-color: #FFF; }
|
||||
td { white-space: pre; font-family: Monospace, Courier New; box-shadow: 0 0 0 1px rgba(0, 0, 0, 0.05), 0 8px 25px -5px rgba(0, 0, 0, 0.1); }
|
||||
a { color: #06F; text-decoration: none; }
|
||||
a:hover, a:active { color: #F40; text-decoration: underline; }
|
||||
table { border: 0; }
|
||||
p.links a { padding: 5px; margin: 3px; background: #FFF; line-height: 2; white-space: nowrap; box-shadow: 0 0 0 1px rgba(0, 0, 0, 0.05), 0 8px 25px -5px rgba(0, 0, 0, 0.1); }
|
||||
th { cursor: pointer; }
|
||||
|
||||
</style>
|
||||
<title>AST Fuzzer for PR #${PR_TO_TEST} @ ${SHA_TO_TEST}</title>
|
||||
@ -385,7 +388,14 @@ th { cursor: pointer; }
|
||||
<tr>
|
||||
<td>AST Fuzzer</td>
|
||||
<td>$(cat status.txt)</td>
|
||||
<td style="white-space: pre;">$(clickhouse-local --input-format RawBLOB --output-format RawBLOB --query "SELECT encodeXMLComponent(*) FROM table" < description.txt)</td>
|
||||
<td>$(
|
||||
clickhouse-local --input-format RawBLOB --output-format RawBLOB --query "SELECT encodeXMLComponent(*) FROM table" < description.txt || cat description.txt
|
||||
)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td colspan="3" style="white-space: pre-wrap;">$(
|
||||
clickhouse-local --input-format RawBLOB --output-format RawBLOB --query "SELECT encodeXMLComponent(*) FROM table" < fatal.log || cat fatal.log
|
||||
)</td>
|
||||
</tr>
|
||||
</table>
|
||||
</body>
|
||||
|
@ -75,7 +75,7 @@ fi
|
||||
TEST_PATH=${1:-/usr/share/clickhouse-test}
|
||||
MINIO_DATA_PATH=${TEST_PATH}/queries/${QUERY_DIR}/data_minio
|
||||
|
||||
# Iterating over globs will cause redudant FILE variale to be a path to a file, not a filename
|
||||
# Iterating over globs will cause redundant FILE variable to be a path to a file, not a filename
|
||||
# shellcheck disable=SC2045
|
||||
for FILE in $(ls "${MINIO_DATA_PATH}"); do
|
||||
echo "$FILE";
|
||||
|
@ -11,31 +11,6 @@ set -x
|
||||
# core.COMM.PID-TID
|
||||
sysctl kernel.core_pattern='core.%e.%p-%P'
|
||||
|
||||
# Thread Fuzzer allows to check more permutations of possible thread scheduling
|
||||
# and find more potential issues.
|
||||
# Temporarily disable ThreadFuzzer with tsan because of https://github.com/google/sanitizers/issues/1540
|
||||
is_tsan_build=$(clickhouse local -q "select value like '% -fsanitize=thread %' from system.build_options where name='CXX_FLAGS'")
|
||||
if [ "$is_tsan_build" -eq "0" ]; then
|
||||
export THREAD_FUZZER_CPU_TIME_PERIOD_US=1000
|
||||
export THREAD_FUZZER_SLEEP_PROBABILITY=0.1
|
||||
export THREAD_FUZZER_SLEEP_TIME_US=100000
|
||||
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_MIGRATE_PROBABILITY=1
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_MIGRATE_PROBABILITY=1
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_MIGRATE_PROBABILITY=1
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_MIGRATE_PROBABILITY=1
|
||||
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US=10000
|
||||
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US=10000
|
||||
fi
|
||||
|
||||
|
||||
function install_packages()
|
||||
{
|
||||
@ -54,7 +29,7 @@ function configure()
|
||||
|
||||
# we mount tests folder from repo to /usr/share
|
||||
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||
ln -s /usr/share/clickhouse-test/ci/download_release_packets.py /usr/bin/download_release_packets
|
||||
ln -s /usr/share/clickhouse-test/ci/download_release_packages.py /usr/bin/download_release_packages
|
||||
ln -s /usr/share/clickhouse-test/ci/get_previous_release_tag.py /usr/bin/get_previous_release_tag
|
||||
|
||||
# avoid too slow startup
|
||||
@ -226,6 +201,31 @@ quit
|
||||
|
||||
install_packages package_folder
|
||||
|
||||
# Thread Fuzzer allows to check more permutations of possible thread scheduling
|
||||
# and find more potential issues.
|
||||
# Temporarily disable ThreadFuzzer with tsan because of https://github.com/google/sanitizers/issues/1540
|
||||
is_tsan_build=$(clickhouse local -q "select value like '% -fsanitize=thread %' from system.build_options where name='CXX_FLAGS'")
|
||||
if [ "$is_tsan_build" -eq "0" ]; then
|
||||
export THREAD_FUZZER_CPU_TIME_PERIOD_US=1000
|
||||
export THREAD_FUZZER_SLEEP_PROBABILITY=0.1
|
||||
export THREAD_FUZZER_SLEEP_TIME_US=100000
|
||||
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_MIGRATE_PROBABILITY=1
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_MIGRATE_PROBABILITY=1
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_MIGRATE_PROBABILITY=1
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_MIGRATE_PROBABILITY=1
|
||||
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US=10000
|
||||
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US=10000
|
||||
fi
|
||||
|
||||
export ZOOKEEPER_FAULT_INJECTION=1
|
||||
configure
|
||||
|
||||
@ -360,10 +360,10 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
|
||||
echo "Clone previous release repository"
|
||||
git clone https://github.com/ClickHouse/ClickHouse.git --no-tags --progress --branch=$previous_release_tag --no-recurse-submodules --depth=1 previous_release_repository
|
||||
|
||||
echo "Download previous release server"
|
||||
echo "Download clickhouse-server from the previous release"
|
||||
mkdir previous_release_package_folder
|
||||
|
||||
echo $previous_release_tag | download_release_packets && echo -e 'Download script exit code\tOK' >> /test_output/test_results.tsv \
|
||||
echo $previous_release_tag | download_release_packages && echo -e 'Download script exit code\tOK' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Download script failed\tFAIL' >> /test_output/test_results.tsv
|
||||
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.clean.log
|
||||
@ -380,10 +380,10 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
|
||||
echo -e "Backward compatibility check: Failed to clone previous release tests\tFAIL" >> /test_output/test_results.tsv
|
||||
elif ! [ "$(ls -A previous_release_package_folder/clickhouse-common-static_*.deb && ls -A previous_release_package_folder/clickhouse-server_*.deb)" ]
|
||||
then
|
||||
echo -e "Backward compatibility check: Failed to download previous release packets\tFAIL" >> /test_output/test_results.tsv
|
||||
echo -e "Backward compatibility check: Failed to download previous release packages\tFAIL" >> /test_output/test_results.tsv
|
||||
else
|
||||
echo -e "Successfully cloned previous release tests\tOK" >> /test_output/test_results.tsv
|
||||
echo -e "Successfully downloaded previous release packets\tOK" >> /test_output/test_results.tsv
|
||||
echo -e "Successfully downloaded previous release packages\tOK" >> /test_output/test_results.tsv
|
||||
|
||||
# Uninstall current packages
|
||||
dpkg --remove clickhouse-client
|
||||
@ -447,7 +447,13 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
|
||||
|| echo -e 'Backward compatibility check: Test script failed\tFAIL' >> /test_output/test_results.tsv
|
||||
rm -rf tmp_stress_output
|
||||
|
||||
clickhouse-client --query="SELECT 'Tables count:', count() FROM system.tables"
|
||||
# We experienced deadlocks in this command in very rare cases. Let's debug it:
|
||||
timeout 10m clickhouse-client --query="SELECT 'Tables count:', count() FROM system.tables" ||
|
||||
(
|
||||
echo "thread apply all backtrace (on select tables count)" >> /test_output/gdb.log
|
||||
timeout 30m gdb -batch -ex 'thread apply all backtrace' -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log
|
||||
clickhouse stop --force
|
||||
)
|
||||
|
||||
stop 1
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.backward.stress.log
|
||||
@ -455,7 +461,8 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
|
||||
# Start new server
|
||||
mv package_folder/clickhouse /usr/bin/
|
||||
mv package_folder/clickhouse.debug /usr/lib/debug/usr/bin/clickhouse.debug
|
||||
export ZOOKEEPER_FAULT_INJECTION=1
|
||||
# Disable fault injections on start (we don't test them here, and it can lead to tons of requests in case of huge number of tables).
|
||||
export ZOOKEEPER_FAULT_INJECTION=0
|
||||
configure
|
||||
start 500
|
||||
clickhouse-client --query "SELECT 'Backward compatibility check: Server successfully started', 'OK'" >> /test_output/test_results.tsv \
|
||||
@ -468,7 +475,7 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
|
||||
sleep 60
|
||||
|
||||
stop
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.backward.clean.log
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.backward.dirty.log
|
||||
|
||||
# Error messages (we should ignore some errors)
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/38643 ("Unknown index: idx.")
|
||||
@ -516,7 +523,7 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
|
||||
-e "MutateFromLogEntryTask" \
|
||||
-e "No connection to ZooKeeper, cannot get shared table ID" \
|
||||
-e "Session expired" \
|
||||
/var/log/clickhouse-server/clickhouse-server.backward.clean.log | zgrep -Fa "<Error>" > /test_output/bc_check_error_messages.txt \
|
||||
/var/log/clickhouse-server/clickhouse-server.backward.dirty.log | zgrep -Fa "<Error>" > /test_output/bc_check_error_messages.txt \
|
||||
&& echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
|
@ -14,9 +14,6 @@ def get_options(i, backward_compatibility_check):
|
||||
if 0 < i:
|
||||
options.append("--order=random")
|
||||
|
||||
if i % 3 == 1:
|
||||
options.append("--db-engine=Ordinary")
|
||||
|
||||
if i % 3 == 2 and not backward_compatibility_check:
|
||||
options.append(
|
||||
'''--db-engine="Replicated('/test/db/test_{}', 's1', 'r1')"'''.format(i)
|
||||
|
@ -1,82 +0,0 @@
|
||||
# docker build -t clickhouse/testflows-runner .
|
||||
FROM ubuntu:20.04
|
||||
|
||||
# ARG for quick switch to a given ubuntu mirror
|
||||
ARG apt_archive="http://archive.ubuntu.com"
|
||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||
|
||||
RUN apt-get update \
|
||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
||||
ca-certificates \
|
||||
bash \
|
||||
btrfs-progs \
|
||||
e2fsprogs \
|
||||
iptables \
|
||||
xfsprogs \
|
||||
tar \
|
||||
pigz \
|
||||
wget \
|
||||
git \
|
||||
iproute2 \
|
||||
cgroupfs-mount \
|
||||
python3-pip \
|
||||
tzdata \
|
||||
libicu-dev \
|
||||
bsdutils \
|
||||
curl \
|
||||
liblua5.1-dev \
|
||||
luajit \
|
||||
libssl-dev \
|
||||
libcurl4-openssl-dev \
|
||||
gdb \
|
||||
&& rm -rf \
|
||||
/var/lib/apt/lists/* \
|
||||
/var/cache/debconf \
|
||||
/tmp/* \
|
||||
&& apt-get clean
|
||||
|
||||
ENV TZ=Europe/Moscow
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN pip3 install urllib3 testflows==1.7.20 docker-compose==1.29.2 docker==5.0.0 dicttoxml kazoo tzlocal==2.1 pytz python-dateutil numpy
|
||||
|
||||
ENV DOCKER_CHANNEL stable
|
||||
ENV DOCKER_VERSION 20.10.6
|
||||
|
||||
# Architecture of the image when BuildKit/buildx is used
|
||||
ARG TARGETARCH
|
||||
|
||||
# Install docker
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& case $arch in \
|
||||
amd64) rarch=x86_64 ;; \
|
||||
arm64) rarch=aarch64 ;; \
|
||||
esac \
|
||||
&& set -eux \
|
||||
&& if ! wget -nv -O docker.tgz "https://download.docker.com/linux/static/${DOCKER_CHANNEL}/${rarch}/docker-${DOCKER_VERSION}.tgz"; then \
|
||||
echo >&2 "error: failed to download 'docker-${DOCKER_VERSION}' from '${DOCKER_CHANNEL}' for '${rarch}'" \
|
||||
&& exit 1; \
|
||||
fi \
|
||||
&& tar --extract \
|
||||
--file docker.tgz \
|
||||
--strip-components 1 \
|
||||
--directory /usr/local/bin/ \
|
||||
&& rm docker.tgz \
|
||||
&& dockerd --version \
|
||||
&& docker --version
|
||||
|
||||
COPY modprobe.sh /usr/local/bin/modprobe
|
||||
COPY dockerd-entrypoint.sh /usr/local/bin/
|
||||
COPY process_testflows_result.py /usr/local/bin/
|
||||
|
||||
RUN set -x \
|
||||
&& addgroup --system dockremap \
|
||||
&& adduser --system dockremap \
|
||||
&& adduser dockremap dockremap \
|
||||
&& echo 'dockremap:165536:65536' >> /etc/subuid \
|
||||
&& echo 'dockremap:165536:65536' >> /etc/subgid
|
||||
|
||||
VOLUME /var/lib/docker
|
||||
EXPOSE 2375
|
||||
ENTRYPOINT ["dockerd-entrypoint.sh"]
|
||||
CMD ["sh", "-c", "python3 regression.py --no-color -o new-fails --local --clickhouse-binary-path ${CLICKHOUSE_TESTS_SERVER_BIN_PATH} --log test.log ${TESTFLOWS_OPTS}; cat test.log | tfs report results --format json > results.json; /usr/local/bin/process_testflows_result.py || echo -e 'failure\tCannot parse results' > check_status.tsv; find * -type f | grep _instances | grep clickhouse-server | xargs -n1 tar -rvf clickhouse_logs.tar; gzip -9 clickhouse_logs.tar"]
|
@ -1,39 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "Configure to use Yandex dockerhub-proxy"
|
||||
mkdir -p /etc/docker/
|
||||
cat > /etc/docker/daemon.json << EOF
|
||||
{
|
||||
"insecure-registries" : ["dockerhub-proxy.dockerhub-proxy-zone:5000"],
|
||||
"registry-mirrors" : ["http://dockerhub-proxy.dockerhub-proxy-zone:5000"]
|
||||
}
|
||||
EOF
|
||||
|
||||
# In case of test hung it is convenient to use pytest --pdb to debug it,
|
||||
# and on hung you can simply press Ctrl-C and it will spawn a python pdb,
|
||||
# but on SIGINT dockerd will exit, so ignore it to preserve the daemon.
|
||||
trap '' INT
|
||||
dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 &>/var/log/somefile &
|
||||
|
||||
set +e
|
||||
reties=0
|
||||
while true; do
|
||||
docker info &>/dev/null && break
|
||||
reties=$((reties+1))
|
||||
if [[ $reties -ge 100 ]]; then # 10 sec max
|
||||
echo "Can't start docker daemon, timeout exceeded." >&2
|
||||
exit 1;
|
||||
fi
|
||||
sleep 0.1
|
||||
done
|
||||
set -e
|
||||
|
||||
echo "Start tests"
|
||||
export CLICKHOUSE_TESTS_SERVER_BIN_PATH=/clickhouse
|
||||
export CLICKHOUSE_TESTS_CLIENT_BIN_PATH=/clickhouse
|
||||
export CLICKHOUSE_TESTS_BASE_CONFIG_DIR=/clickhouse-config
|
||||
export CLICKHOUSE_ODBC_BRIDGE_BINARY_PATH=/clickhouse-odbc-bridge
|
||||
|
||||
cd /ClickHouse/tests/testflows
|
||||
exec "$@"
|
@ -1,20 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -eu
|
||||
|
||||
# "modprobe" without modprobe
|
||||
# https://twitter.com/lucabruno/status/902934379835662336
|
||||
|
||||
# this isn't 100% fool-proof, but it'll have a much higher success rate than simply using the "real" modprobe
|
||||
|
||||
# Docker often uses "modprobe -va foo bar baz"
|
||||
# so we ignore modules that start with "-"
|
||||
for module; do
|
||||
if [ "${module#-}" = "$module" ]; then
|
||||
ip link show "$module" || true
|
||||
lsmod | grep "$module" || true
|
||||
fi
|
||||
done
|
||||
|
||||
# remove /usr/local/... from PATH so we can exec the real modprobe as a last resort
|
||||
export PATH='/usr/sbin:/usr/bin:/sbin:/bin'
|
||||
exec modprobe "$@"
|
@ -1,71 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import logging
|
||||
import argparse
|
||||
import csv
|
||||
import json
|
||||
|
||||
|
||||
def process_result(result_folder):
|
||||
json_path = os.path.join(result_folder, "results.json")
|
||||
if not os.path.exists(json_path):
|
||||
return "success", "No testflows in branch", None, []
|
||||
|
||||
test_binary_log = os.path.join(result_folder, "test.log")
|
||||
with open(json_path) as source:
|
||||
results = json.loads(source.read())
|
||||
|
||||
total_tests = 0
|
||||
total_ok = 0
|
||||
total_fail = 0
|
||||
total_other = 0
|
||||
test_results = []
|
||||
for test in results["tests"]:
|
||||
test_name = test["test"]["test_name"]
|
||||
test_result = test["result"]["result_type"].upper()
|
||||
test_time = str(test["result"]["message_rtime"])
|
||||
total_tests += 1
|
||||
if test_result == "OK":
|
||||
total_ok += 1
|
||||
elif test_result == "FAIL" or test_result == "ERROR":
|
||||
total_fail += 1
|
||||
else:
|
||||
total_other += 1
|
||||
|
||||
test_results.append((test_name, test_result, test_time))
|
||||
if total_fail != 0:
|
||||
status = "failure"
|
||||
else:
|
||||
status = "success"
|
||||
|
||||
description = "failed: {}, passed: {}, other: {}".format(
|
||||
total_fail, total_ok, total_other
|
||||
)
|
||||
return status, description, test_results, [json_path, test_binary_log]
|
||||
|
||||
|
||||
def write_results(results_file, status_file, results, status):
|
||||
with open(results_file, "w") as f:
|
||||
out = csv.writer(f, delimiter="\t")
|
||||
out.writerows(results)
|
||||
with open(status_file, "w") as f:
|
||||
out = csv.writer(f, delimiter="\t")
|
||||
out.writerow(status)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
|
||||
parser = argparse.ArgumentParser(
|
||||
description="ClickHouse script for parsing results of Testflows tests"
|
||||
)
|
||||
parser.add_argument("--in-results-dir", default="./")
|
||||
parser.add_argument("--out-results-file", default="./test_results.tsv")
|
||||
parser.add_argument("--out-status-file", default="./check_status.tsv")
|
||||
args = parser.parse_args()
|
||||
|
||||
state, description, test_results, logs = process_result(args.in_results_dir)
|
||||
logging.info("Result parsed")
|
||||
status = (state, description)
|
||||
write_results(args.out_results_file, args.out_status_file, test_results, status)
|
||||
logging.info("Result written")
|
@ -34,7 +34,14 @@ SETTINGS
|
||||
[kafka_max_block_size = 0,]
|
||||
[kafka_skip_broken_messages = N,]
|
||||
[kafka_commit_every_batch = 0,]
|
||||
[kafka_thread_per_consumer = 0]
|
||||
[kafka_client_id = '',]
|
||||
[kafka_poll_timeout_ms = 0,]
|
||||
[kafka_poll_max_batch_size = 0,]
|
||||
[kafka_flush_interval_ms = 0,]
|
||||
[kafka_thread_per_consumer = 0,]
|
||||
[kafka_handle_error_mode = 'default',]
|
||||
[kafka_commit_on_select = false,]
|
||||
[kafka_max_rows_per_message = 1];
|
||||
```
|
||||
|
||||
Required parameters:
|
||||
@ -46,13 +53,20 @@ Required parameters:
|
||||
|
||||
Optional parameters:
|
||||
|
||||
- `kafka_row_delimiter` — Delimiter character, which ends the message.
|
||||
- `kafka_row_delimiter` — Delimiter character, which ends the message. **This setting is deprecated and is no longer used, not left for compatibility reasons.**
|
||||
- `kafka_schema` — Parameter that must be used if the format requires a schema definition. For example, [Cap’n Proto](https://capnproto.org/) requires the path to the schema file and the name of the root `schema.capnp:Message` object.
|
||||
- `kafka_num_consumers` — The number of consumers per table. Default: `1`. Specify more consumers if the throughput of one consumer is insufficient. The total number of consumers should not exceed the number of partitions in the topic, since only one consumer can be assigned per partition, and must not be greater than the number of physical cores on the server where ClickHouse is deployed.
|
||||
- `kafka_max_block_size` — The maximum batch size (in messages) for poll (default: `max_block_size`).
|
||||
- `kafka_skip_broken_messages` — Kafka message parser tolerance to schema-incompatible messages per block. Default: `0`. If `kafka_skip_broken_messages = N` then the engine skips *N* Kafka messages that cannot be parsed (a message equals a row of data).
|
||||
- `kafka_commit_every_batch` — Commit every consumed and handled batch instead of a single commit after writing a whole block (default: `0`).
|
||||
- `kafka_thread_per_consumer` — Provide independent thread for each consumer (default: `0`). When enabled, every consumer flush the data independently, in parallel (otherwise — rows from several consumers squashed to form one block).
|
||||
- `kafka_num_consumers` — The number of consumers per table. Specify more consumers if the throughput of one consumer is insufficient. The total number of consumers should not exceed the number of partitions in the topic, since only one consumer can be assigned per partition, and must not be greater than the number of physical cores on the server where ClickHouse is deployed. Default: `1`.
|
||||
- `kafka_max_block_size` — The maximum batch size (in messages) for poll. Default: [max_insert_block_size](../../../operations/settings/settings.md#setting-max_insert_block_size).
|
||||
- `kafka_skip_broken_messages` — Kafka message parser tolerance to schema-incompatible messages per block. If `kafka_skip_broken_messages = N` then the engine skips *N* Kafka messages that cannot be parsed (a message equals a row of data). Default: `0`.
|
||||
- `kafka_commit_every_batch` — Commit every consumed and handled batch instead of a single commit after writing a whole block. Default: `0`.
|
||||
- `kafka_client_id` — Client identifier. Empty by default.
|
||||
- `kafka_poll_timeout_ms` — Timeout for single poll from Kafka. Default: [stream_poll_timeout_ms](../../../operations/settings/settings.md#stream_poll_timeout_ms).
|
||||
- `kafka_poll_max_batch_size` — Maximum amount of messages to be polled in a single Kafka poll. Default: [max_block_size](../../../operations/settings/settings.md#setting-max_block_size).
|
||||
- `kafka_flush_interval_ms` — Timeout for flushing data from Kafka. Default: [stream_flush_interval_ms](../../../operations/settings/settings.md#stream-flush-interval-ms).
|
||||
- `kafka_thread_per_consumer` — Provide independent thread for each consumer. When enabled, every consumer flush the data independently, in parallel (otherwise — rows from several consumers squashed to form one block). Default: `0`.
|
||||
- `kafka_handle_error_mode` — How to handle errors for Kafka engine. Possible values: default, stream.
|
||||
- `kafka_commit_on_select` — Commit messages when select query is made. Default: `false`.
|
||||
- `kafka_max_rows_per_message` — The maximum number of rows written in one kafka message for row-based formats. Default : `1`.
|
||||
|
||||
Examples:
|
||||
|
||||
@ -94,7 +108,7 @@ Do not use this method in new projects. If possible, switch old projects to the
|
||||
|
||||
``` sql
|
||||
Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format
|
||||
[, kafka_row_delimiter, kafka_schema, kafka_num_consumers, kafka_skip_broken_messages])
|
||||
[, kafka_row_delimiter, kafka_schema, kafka_num_consumers, kafka_max_block_size, kafka_skip_broken_messages, kafka_commit_every_batch, kafka_client_id, kafka_poll_timeout_ms, kafka_poll_max_batch_size, kafka_flush_interval_ms, kafka_thread_per_consumer, kafka_handle_error_mode, kafka_commit_on_select, kafka_max_rows_per_message]);
|
||||
```
|
||||
|
||||
</details>
|
||||
@ -193,6 +207,14 @@ Example:
|
||||
- `_headers.name` — Array of message's headers keys.
|
||||
- `_headers.value` — Array of message's headers values.
|
||||
|
||||
## Data formats support {#data-formats-support}
|
||||
|
||||
Kafka engine supports all [formats](../../../interfaces/formats.md) supported in ClickHouse.
|
||||
The number of rows in one Kafka message depends on whether the format is row-based or block-based:
|
||||
|
||||
- For row-based formats the number of rows in one Kafka message can be controlled by setting `kafka_max_rows_per_message`.
|
||||
- For block-based formats we cannot divide block into smaller parts, but the number of rows in one block can be controlled by general setting [max_block_size](../../../operations/settings/settings.md#setting-max_block_size).
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns)
|
||||
|
@ -37,8 +37,10 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
[nats_max_block_size = N,]
|
||||
[nats_flush_interval_ms = N,]
|
||||
[nats_username = 'user',]
|
||||
[nats_password = 'password']
|
||||
[redis_password = 'clickhouse']
|
||||
[nats_password = 'password',]
|
||||
[nats_token = 'clickhouse',]
|
||||
[nats_startup_connect_tries = '5']
|
||||
[nats_max_rows_per_message = 1]
|
||||
```
|
||||
|
||||
Required parameters:
|
||||
@ -49,7 +51,7 @@ Required parameters:
|
||||
|
||||
Optional parameters:
|
||||
|
||||
- `nats_row_delimiter` – Delimiter character, which ends the message.
|
||||
- `nats_row_delimiter` – Delimiter character, which ends the message. **This setting is deprecated and is no longer used, not left for compatibility reasons.**
|
||||
- `nats_schema` – Parameter that must be used if the format requires a schema definition. For example, [Cap’n Proto](https://capnproto.org/) requires the path to the schema file and the name of the root `schema.capnp:Message` object.
|
||||
- `nats_num_consumers` – The number of consumers per table. Default: `1`. Specify more consumers if the throughput of one consumer is insufficient.
|
||||
- `nats_queue_group` – Name for queue group of NATS subscribers. Default is the table name.
|
||||
@ -57,11 +59,13 @@ Optional parameters:
|
||||
- `nats_reconnect_wait` – Amount of time in milliseconds to sleep between each reconnect attempt. Default: `5000`.
|
||||
- `nats_server_list` - Server list for connection. Can be specified to connect to NATS cluster.
|
||||
- `nats_skip_broken_messages` - NATS message parser tolerance to schema-incompatible messages per block. Default: `0`. If `nats_skip_broken_messages = N` then the engine skips *N* RabbitMQ messages that cannot be parsed (a message equals a row of data).
|
||||
- `nats_max_block_size` - Number of row collected by poll(s) for flushing data from NATS.
|
||||
- `nats_flush_interval_ms` - Timeout for flushing data read from NATS.
|
||||
- `nats_max_block_size` - Number of row collected by poll(s) for flushing data from NATS. Default: [max_insert_block_size](../../../operations/settings/settings.md#setting-max_insert_block_size).
|
||||
- `nats_flush_interval_ms` - Timeout for flushing data read from NATS. Default: [stream_flush_interval_ms](../../../operations/settings/settings.md#stream-flush-interval-ms).
|
||||
- `nats_username` - NATS username.
|
||||
- `nats_password` - NATS password.
|
||||
- `nats_token` - NATS auth token.
|
||||
- `nats_startup_connect_tries` - Number of connect tries at startup. Default: `5`.
|
||||
- `nats_max_rows_per_message` — The maximum number of rows written in one NATS message for row-based formats. (default : `1`).
|
||||
|
||||
SSL connection:
|
||||
|
||||
@ -159,6 +163,14 @@ If you want to change the target table by using `ALTER`, we recommend disabling
|
||||
|
||||
## Virtual Columns {#virtual-columns}
|
||||
|
||||
- `_subject` - NATS message subject.
|
||||
- `_subject` - NATS message subject.
|
||||
|
||||
## Data formats support {#data-formats-support}
|
||||
|
||||
NATS engine supports all [formats](../../../interfaces/formats.md) supported in ClickHouse.
|
||||
The number of rows in one NATS message depends on whether the format is row-based or block-based:
|
||||
|
||||
- For row-based formats the number of rows in one NATS message can be controlled by setting `nats_max_rows_per_message`.
|
||||
- For block-based formats we cannot divide block into smaller parts, but the number of rows in one block can be controlled by general setting [max_block_size](../../../operations/settings/settings.md#setting-max_block_size).
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/nats/) <!--hide-->
|
||||
|
@ -37,8 +37,16 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
[rabbitmq_persistent = 0,]
|
||||
[rabbitmq_skip_broken_messages = N,]
|
||||
[rabbitmq_max_block_size = N,]
|
||||
[rabbitmq_flush_interval_ms = N]
|
||||
[rabbitmq_queue_settings_list = 'x-dead-letter-exchange=my-dlx,x-max-length=10,x-overflow=reject-publish']
|
||||
[rabbitmq_flush_interval_ms = N,]
|
||||
[rabbitmq_queue_settings_list = 'x-dead-letter-exchange=my-dlx,x-max-length=10,x-overflow=reject-publish',]
|
||||
[rabbitmq_queue_consume = false,]
|
||||
[rabbitmq_address = '',]
|
||||
[rabbitmq_vhost = '/',]
|
||||
[rabbitmq_queue_consume = false,]
|
||||
[rabbitmq_username = '',]
|
||||
[rabbitmq_password = '',]
|
||||
[rabbitmq_commit_on_select = false,]
|
||||
[rabbitmq_max_rows_per_message = 1]
|
||||
```
|
||||
|
||||
Required parameters:
|
||||
@ -49,19 +57,27 @@ Required parameters:
|
||||
|
||||
Optional parameters:
|
||||
|
||||
- `rabbitmq_exchange_type` – The type of RabbitMQ exchange: `direct`, `fanout`, `topic`, `headers`, `consistent_hash`. Default: `fanout`.
|
||||
- `rabbitmq_routing_key_list` – A comma-separated list of routing keys.
|
||||
- `rabbitmq_row_delimiter` – Delimiter character, which ends the message.
|
||||
- `rabbitmq_schema` – Parameter that must be used if the format requires a schema definition. For example, [Cap’n Proto](https://capnproto.org/) requires the path to the schema file and the name of the root `schema.capnp:Message` object.
|
||||
- `rabbitmq_num_consumers` – The number of consumers per table. Default: `1`. Specify more consumers if the throughput of one consumer is insufficient.
|
||||
- `rabbitmq_num_queues` – Total number of queues. Default: `1`. Increasing this number can significantly improve performance.
|
||||
- `rabbitmq_queue_base` - Specify a hint for queue names. Use cases of this setting are described below.
|
||||
- `rabbitmq_deadletter_exchange` - Specify name for a [dead letter exchange](https://www.rabbitmq.com/dlx.html). You can create another table with this exchange name and collect messages in cases when they are republished to dead letter exchange. By default dead letter exchange is not specified.
|
||||
- `rabbitmq_persistent` - If set to 1 (true), in insert query delivery mode will be set to 2 (marks messages as 'persistent'). Default: `0`.
|
||||
- `rabbitmq_skip_broken_messages` – RabbitMQ message parser tolerance to schema-incompatible messages per block. Default: `0`. If `rabbitmq_skip_broken_messages = N` then the engine skips *N* RabbitMQ messages that cannot be parsed (a message equals a row of data).
|
||||
- `rabbitmq_max_block_size`
|
||||
- `rabbitmq_flush_interval_ms`
|
||||
- `rabbitmq_queue_settings_list` - allows to set RabbitMQ settings when creating a queue. Available settings: `x-max-length`, `x-max-length-bytes`, `x-message-ttl`, `x-expires`, `x-priority`, `x-max-priority`, `x-overflow`, `x-dead-letter-exchange`, `x-queue-type`. The `durable` setting is enabled automatically for the queue.
|
||||
- `rabbitmq_exchange_type` – The type of RabbitMQ exchange: `direct`, `fanout`, `topic`, `headers`, `consistent_hash`. Default: `fanout`.
|
||||
- `rabbitmq_routing_key_list` – A comma-separated list of routing keys.
|
||||
- `rabbitmq_row_delimiter` – Delimiter character, which ends the message. **This setting is deprecated and is no longer used, not left for compatibility reasons.**
|
||||
- `rabbitmq_schema` – Parameter that must be used if the format requires a schema definition. For example, [Cap’n Proto](https://capnproto.org/) requires the path to the schema file and the name of the root `schema.capnp:Message` object.
|
||||
- `rabbitmq_num_consumers` – The number of consumers per table. Specify more consumers if the throughput of one consumer is insufficient. Default: `1`
|
||||
- `rabbitmq_num_queues` – Total number of queues. Increasing this number can significantly improve performance. Default: `1`.
|
||||
- `rabbitmq_queue_base` - Specify a hint for queue names. Use cases of this setting are described below.
|
||||
- `rabbitmq_deadletter_exchange` - Specify name for a [dead letter exchange](https://www.rabbitmq.com/dlx.html). You can create another table with this exchange name and collect messages in cases when they are republished to dead letter exchange. By default dead letter exchange is not specified.
|
||||
- `rabbitmq_persistent` - If set to 1 (true), in insert query delivery mode will be set to 2 (marks messages as 'persistent'). Default: `0`.
|
||||
- `rabbitmq_skip_broken_messages` – RabbitMQ message parser tolerance to schema-incompatible messages per block. If `rabbitmq_skip_broken_messages = N` then the engine skips *N* RabbitMQ messages that cannot be parsed (a message equals a row of data). Default: `0`.
|
||||
- `rabbitmq_max_block_size` - Number of row collected before flushing data from RabbitMQ. Default: [max_insert_block_size](../../../operations/settings/settings.md#setting-max_insert_block_size).
|
||||
- `rabbitmq_flush_interval_ms` - Timeout for flushing data from RabbitMQ. Default: [stream_flush_interval_ms](../../../operations/settings/settings.md#stream-flush-interval-ms).
|
||||
- `rabbitmq_queue_settings_list` - allows to set RabbitMQ settings when creating a queue. Available settings: `x-max-length`, `x-max-length-bytes`, `x-message-ttl`, `x-expires`, `x-priority`, `x-max-priority`, `x-overflow`, `x-dead-letter-exchange`, `x-queue-type`. The `durable` setting is enabled automatically for the queue.
|
||||
- `rabbitmq_address` - Address for connection. Use ether this setting or `rabbitmq_host_port`.
|
||||
- `rabbitmq_vhost` - RabbitMQ vhost. Default: `'\'`.
|
||||
- `rabbitmq_queue_consume` - Use user-defined queues and do not make any RabbitMQ setup: declaring exchanges, queues, bindings. Default: `false`.
|
||||
- `rabbitmq_username` - RabbitMQ username.
|
||||
- `rabbitmq_password` - RabbitMQ password.
|
||||
- `rabbitmq_commit_on_select` - Commit messages when select query is made. Default: `false`.
|
||||
- `rabbitmq_max_rows_per_message` — The maximum number of rows written in one RabbitMQ message for row-based formats. Default : `1`.
|
||||
|
||||
|
||||
SSL connection:
|
||||
|
||||
@ -166,11 +182,20 @@ Example:
|
||||
|
||||
## Virtual Columns {#virtual-columns}
|
||||
|
||||
- `_exchange_name` - RabbitMQ exchange name.
|
||||
- `_channel_id` - ChannelID, on which consumer, who received the message, was declared.
|
||||
- `_delivery_tag` - DeliveryTag of the received message. Scoped per channel.
|
||||
- `_redelivered` - `redelivered` flag of the message.
|
||||
- `_message_id` - messageID of the received message; non-empty if was set, when message was published.
|
||||
- `_timestamp` - timestamp of the received message; non-empty if was set, when message was published.
|
||||
- `_exchange_name` - RabbitMQ exchange name.
|
||||
- `_channel_id` - ChannelID, on which consumer, who received the message, was declared.
|
||||
- `_delivery_tag` - DeliveryTag of the received message. Scoped per channel.
|
||||
- `_redelivered` - `redelivered` flag of the message.
|
||||
- `_message_id` - messageID of the received message; non-empty if was set, when message was published.
|
||||
- `_timestamp` - timestamp of the received message; non-empty if was set, when message was published.
|
||||
|
||||
## Data formats support {#data-formats-support}
|
||||
|
||||
RabbitMQ engine supports all [formats](../../../interfaces/formats.md) supported in ClickHouse.
|
||||
The number of rows in one RabbitMQ message depends on whether the format is row-based or block-based:
|
||||
|
||||
- For row-based formats the number of rows in one RabbitMQ message can be controlled by setting `rabbitmq_max_rows_per_message`.
|
||||
- For block-based formats we cannot divide block into smaller parts, but the number of rows in one block can be controlled by general setting [max_block_size](../../../operations/settings/settings.md#setting-max_block_size).
|
||||
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/rabbitmq/) <!--hide-->
|
||||
|
File diff suppressed because it is too large
Load Diff
1573
docs/en/interfaces/schema-inference.md
Normal file
1573
docs/en/interfaces/schema-inference.md
Normal file
File diff suppressed because it is too large
Load Diff
@ -127,6 +127,13 @@ Default value: 100000.
|
||||
|
||||
A large number of parts in a table reduces performance of ClickHouse queries and increases ClickHouse boot time. Most often this is a consequence of an incorrect design (mistakes when choosing a partitioning strategy - too small partitions).
|
||||
|
||||
## simultaneous_parts_removal_limit {#simultaneous-parts-removal-limit}
|
||||
|
||||
If there are a lot of outdated parts cleanup thread will try to delete up to `simultaneous_parts_removal_limit` parts during one iteration.
|
||||
`simultaneous_parts_removal_limit` set to `0` means unlimited.
|
||||
|
||||
Default value: 0.
|
||||
|
||||
## replicated_deduplication_window {#replicated-deduplication-window}
|
||||
|
||||
The number of most recently inserted blocks for which ClickHouse Keeper stores hash sums to check for duplicates.
|
||||
|
@ -1011,6 +1011,12 @@ The default value is 7500.
|
||||
|
||||
The smaller the value, the more often data is flushed into the table. Setting the value too low leads to poor performance.
|
||||
|
||||
## stream_poll_timeout_ms {#stream_poll_timeout_ms}
|
||||
|
||||
Timeout for polling data from/to streaming storages.
|
||||
|
||||
Default value: 500.
|
||||
|
||||
## load_balancing {#settings-load_balancing}
|
||||
|
||||
Specifies the algorithm of replicas selection that is used for distributed query processing.
|
||||
@ -3625,7 +3631,7 @@ z IPv4
|
||||
Controls making inferred types `Nullable` in schema inference for formats without information about nullability.
|
||||
If the setting is enabled, the inferred type will be `Nullable` only if column contains `NULL` in a sample that is parsed during schema inference.
|
||||
|
||||
Default value: `false`.
|
||||
Default value: `true`.
|
||||
|
||||
## input_format_try_infer_integers {#input_format_try_infer_integers}
|
||||
|
||||
|
70
docs/en/operations/system-tables/schema_inference_cache.md
Normal file
70
docs/en/operations/system-tables/schema_inference_cache.md
Normal file
@ -0,0 +1,70 @@
|
||||
---
|
||||
slug: /en/operations/system-tables/schema_inference_cache
|
||||
---
|
||||
# Schema inference cache
|
||||
|
||||
Contains information about all cached file schemas.
|
||||
|
||||
Columns:
|
||||
- `storage` ([String](/docs/en/sql-reference/data-types/string.md)) — Storage name: File, URL, S3 or HDFS.
|
||||
- `source` ([String](/docs/en/sql-reference/data-types/string.md)) — File source.
|
||||
- `format` ([String](/docs/en/sql-reference/data-types/string.md)) — Format name.
|
||||
- `additional_format_info` ([String](/docs/en/sql-reference/data-types/string.md)) - Additional information required to identify the schema. For example, format specific settings.
|
||||
- `registration_time` ([DateTime](/docs/en/sql-reference/data-types/datetime.md)) — Timestamp when schema was added in cache.
|
||||
- `schema` ([String](/docs/en/sql-reference/data-types/string.md)) - Cached schema.
|
||||
|
||||
**Example**
|
||||
|
||||
Let's say we have a file `data.jsonl` with this content:
|
||||
```json
|
||||
{"id" : 1, "age" : 25, "name" : "Josh", "hobbies" : ["football", "cooking", "music"]}
|
||||
{"id" : 2, "age" : 19, "name" : "Alan", "hobbies" : ["tennis", "art"]}
|
||||
{"id" : 3, "age" : 32, "name" : "Lana", "hobbies" : ["fitness", "reading", "shopping"]}
|
||||
{"id" : 4, "age" : 47, "name" : "Brayan", "hobbies" : ["movies", "skydiving"]}
|
||||
```
|
||||
|
||||
:::tip
|
||||
Place `data.jsonl` in the `user_files_path` directory. You can find this by looking
|
||||
in your ClickHouse configuration files. The default is:
|
||||
```
|
||||
<user_files_path>/var/lib/clickhouse/user_files/</user_files_path>
|
||||
```
|
||||
:::
|
||||
|
||||
Open `clickhouse-client` and run the `DESCRIBE` query:
|
||||
|
||||
```sql
|
||||
DESCRIBE file('data.jsonl') SETTINGS input_format_try_infer_integers=0;
|
||||
```
|
||||
|
||||
```response
|
||||
┌─name────┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ id │ Nullable(Float64) │ │ │ │ │ │
|
||||
│ age │ Nullable(Float64) │ │ │ │ │ │
|
||||
│ name │ Nullable(String) │ │ │ │ │ │
|
||||
│ hobbies │ Array(Nullable(String)) │ │ │ │ │ │
|
||||
└─────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
Let's see the content of the `system.schema_inference_cache` table:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
FROM system.schema_inference_cache
|
||||
FORMAT Vertical
|
||||
```
|
||||
```response
|
||||
Row 1:
|
||||
──────
|
||||
storage: File
|
||||
source: /home/droscigno/user_files/data.jsonl
|
||||
format: JSONEachRow
|
||||
additional_format_info: schema_inference_hints=, max_rows_to_read_for_schema_inference=25000, schema_inference_make_columns_nullable=true, try_infer_integers=false, try_infer_dates=true, try_infer_datetimes=true, try_infer_numbers_from_strings=true, read_bools_as_numbers=true, try_infer_objects=false
|
||||
registration_time: 2022-12-29 17:49:52
|
||||
schema: id Nullable(Float64), age Nullable(Float64), name Nullable(String), hobbies Array(Nullable(String))
|
||||
```
|
||||
|
||||
|
||||
**See also**
|
||||
- [Automatic schema inference from input data](/docs/en/interfaces/schema-inference.md)
|
||||
|
@ -0,0 +1,41 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/grouparraylast
|
||||
sidebar_position: 110
|
||||
---
|
||||
|
||||
# groupArrayLast
|
||||
|
||||
Syntax: `groupArrayLast(max_size)(x)`
|
||||
|
||||
Creates an array of last argument values.
|
||||
For example, `groupArrayLast(1)(x)` is equivalent to `[anyLast (x)]`.
|
||||
|
||||
In some cases, you can still rely on the order of execution. This applies to cases when `SELECT` comes from a subquery that uses `ORDER BY`.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
select groupArrayLast(2)(number+1) numbers from numbers(10)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─numbers─┐
|
||||
│ [9,10] │
|
||||
└─────────┘
|
||||
```
|
||||
|
||||
In compare to `groupArray`:
|
||||
|
||||
```sql
|
||||
select groupArray(2)(number+1) numbers from numbers(10)
|
||||
```
|
||||
|
||||
```text
|
||||
┌─numbers─┐
|
||||
│ [1,2] │
|
||||
└─────────┘
|
||||
```
|
@ -32,6 +32,7 @@ ClickHouse-specific aggregate functions:
|
||||
- [topK](../../../sql-reference/aggregate-functions/reference/topk.md)
|
||||
- [topKWeighted](../../../sql-reference/aggregate-functions/reference/topkweighted.md)
|
||||
- [groupArray](../../../sql-reference/aggregate-functions/reference/grouparray.md)
|
||||
- [groupArrayLast](../../../sql-reference/aggregate-functions/reference/grouparraylast.md)
|
||||
- [groupUniqArray](../../../sql-reference/aggregate-functions/reference/groupuniqarray.md)
|
||||
- [groupArrayInsertAt](../../../sql-reference/aggregate-functions/reference/grouparrayinsertat.md)
|
||||
- [groupArrayMovingAvg](../../../sql-reference/aggregate-functions/reference/grouparraymovingavg.md)
|
||||
|
@ -410,35 +410,35 @@ Converts a date with time to a certain fixed date, while preserving the time.
|
||||
|
||||
## toRelativeYearNum
|
||||
|
||||
Converts a date with time or date to the number of the year, starting from a certain fixed point in the past.
|
||||
Converts a date or date with time to the number of the year, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeQuarterNum
|
||||
|
||||
Converts a date with time or date to the number of the quarter, starting from a certain fixed point in the past.
|
||||
Converts a date or date with time to the number of the quarter, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeMonthNum
|
||||
|
||||
Converts a date with time or date to the number of the month, starting from a certain fixed point in the past.
|
||||
Converts a date or date with time to the number of the month, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeWeekNum
|
||||
|
||||
Converts a date with time or date to the number of the week, starting from a certain fixed point in the past.
|
||||
Converts a date or date with time to the number of the week, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeDayNum
|
||||
|
||||
Converts a date with time or date to the number of the day, starting from a certain fixed point in the past.
|
||||
Converts a date or date with time to the number of the day, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeHourNum
|
||||
|
||||
Converts a date with time or date to the number of the hour, starting from a certain fixed point in the past.
|
||||
Converts a date or date with time to the number of the hour, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeMinuteNum
|
||||
|
||||
Converts a date with time or date to the number of the minute, starting from a certain fixed point in the past.
|
||||
Converts a date or date with time to the number of the minute, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeSecondNum
|
||||
|
||||
Converts a date with time or date to the number of the second, starting from a certain fixed point in the past.
|
||||
Converts a date or date with time to the number of the second, starting from a certain fixed point in the past.
|
||||
|
||||
## toISOYear
|
||||
|
||||
@ -517,6 +517,154 @@ SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(d
|
||||
└────────────┴───────────┴───────────┴───────────┘
|
||||
```
|
||||
|
||||
## age
|
||||
|
||||
Returns the `unit` component of the difference between `startdate` and `enddate`. The difference is calculated using a precision of 1 second.
|
||||
E.g. the difference between `2021-12-29` and `2022-01-01` is 3 days for `day` unit, 0 months for `month` unit, 0 years for `year` unit.
|
||||
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
age('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
|
||||
Possible values:
|
||||
|
||||
- `second` (possible abbreviations: `ss`, `s`)
|
||||
- `minute` (possible abbreviations: `mi`, `n`)
|
||||
- `hour` (possible abbreviations: `hh`, `h`)
|
||||
- `day` (possible abbreviations: `dd`, `d`)
|
||||
- `week` (possible abbreviations: `wk`, `ww`)
|
||||
- `month` (possible abbreviations: `mm`, `m`)
|
||||
- `quarter` (possible abbreviations: `qq`, `q`)
|
||||
- `year` (possible abbreviations: `yyyy`, `yy`)
|
||||
|
||||
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — The second time value to subtract from (the minuend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional). If specified, it is applied to both `startdate` and `enddate`. If not specified, timezones of `startdate` and `enddate` are used. If they are not the same, the result is unspecified. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
Difference between `enddate` and `startdate` expressed in `unit`.
|
||||
|
||||
Type: [Int](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT age('hour', toDateTime('2018-01-01 22:30:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─age('hour', toDateTime('2018-01-01 22:30:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 24 │
|
||||
└───────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
toDate('2022-01-01') AS e,
|
||||
toDate('2021-12-29') AS s,
|
||||
age('day', s, e) AS day_age,
|
||||
age('month', s, e) AS month__age,
|
||||
age('year', s, e) AS year_age;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌──────────e─┬──────────s─┬─day_age─┬─month__age─┬─year_age─┐
|
||||
│ 2022-01-01 │ 2021-12-29 │ 3 │ 0 │ 0 │
|
||||
└────────────┴────────────┴─────────┴────────────┴──────────┘
|
||||
```
|
||||
|
||||
|
||||
## date\_diff
|
||||
|
||||
Returns the count of the specified `unit` boundaries crossed between the `startdate` and `enddate`.
|
||||
The difference is calculated using relative units, e.g. the difference between `2021-12-29` and `2022-01-01` is 3 days for day unit (see [toRelativeDayNum](#torelativedaynum)), 1 month for month unit (see [toRelativeMonthNum](#torelativemonthnum)), 1 year for year unit (see [toRelativeYearNum](#torelativeyearnum)).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
date_diff('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
Aliases: `dateDiff`, `DATE_DIFF`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
|
||||
Possible values:
|
||||
|
||||
- `second` (possible abbreviations: `ss`, `s`)
|
||||
- `minute` (possible abbreviations: `mi`, `n`)
|
||||
- `hour` (possible abbreviations: `hh`, `h`)
|
||||
- `day` (possible abbreviations: `dd`, `d`)
|
||||
- `week` (possible abbreviations: `wk`, `ww`)
|
||||
- `month` (possible abbreviations: `mm`, `m`)
|
||||
- `quarter` (possible abbreviations: `qq`, `q`)
|
||||
- `year` (possible abbreviations: `yyyy`, `yy`)
|
||||
|
||||
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — The second time value to subtract from (the minuend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional). If specified, it is applied to both `startdate` and `enddate`. If not specified, timezones of `startdate` and `enddate` are used. If they are not the same, the result is unspecified. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
Difference between `enddate` and `startdate` expressed in `unit`.
|
||||
|
||||
Type: [Int](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 25 │
|
||||
└────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
toDate('2022-01-01') AS e,
|
||||
toDate('2021-12-29') AS s,
|
||||
dateDiff('day', s, e) AS day_diff,
|
||||
dateDiff('month', s, e) AS month__diff,
|
||||
dateDiff('year', s, e) AS year_diff;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌──────────e─┬──────────s─┬─day_diff─┬─month__diff─┬─year_diff─┐
|
||||
│ 2022-01-01 │ 2021-12-29 │ 3 │ 1 │ 1 │
|
||||
└────────────┴────────────┴──────────┴─────────────┴───────────┘
|
||||
```
|
||||
|
||||
## date\_trunc
|
||||
|
||||
Truncates date and time data to the specified part of date.
|
||||
@ -637,80 +785,6 @@ Result:
|
||||
└───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## date\_diff
|
||||
|
||||
Returns the difference between two dates or dates with time values.
|
||||
The difference is calculated using relative units, e.g. the difference between `2022-01-01` and `2021-12-29` is 3 days for day unit (see [toRelativeDayNum](#torelativedaynum)), 1 month for month unit (see [toRelativeMonthNum](#torelativemonthnum)), 1 year for year unit (see [toRelativeYearNum](#torelativeyearnum)).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
date_diff('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
Aliases: `dateDiff`, `DATE_DIFF`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
|
||||
Possible values:
|
||||
|
||||
- `second`
|
||||
- `minute`
|
||||
- `hour`
|
||||
- `day`
|
||||
- `week`
|
||||
- `month`
|
||||
- `quarter`
|
||||
- `year`
|
||||
|
||||
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — The second time value to subtract from (the minuend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional). If specified, it is applied to both `startdate` and `enddate`. If not specified, timezones of `startdate` and `enddate` are used. If they are not the same, the result is unspecified. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
Difference between `enddate` and `startdate` expressed in `unit`.
|
||||
|
||||
Type: [Int](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 25 │
|
||||
└────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
toDate('2022-01-01') AS e,
|
||||
toDate('2021-12-29') AS s,
|
||||
dateDiff('day', s, e) AS day_diff,
|
||||
dateDiff('month', s, e) AS month__diff,
|
||||
dateDiff('year', s, e) AS year_diff;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌──────────e─┬──────────s─┬─day_diff─┬─month__diff─┬─year_diff─┐
|
||||
│ 2022-01-01 │ 2021-12-29 │ 3 │ 1 │ 1 │
|
||||
└────────────┴────────────┴──────────┴─────────────┴───────────┘
|
||||
```
|
||||
|
||||
## date\_sub
|
||||
|
||||
Subtracts the time interval or date interval from the provided date or date with time.
|
||||
@ -1085,6 +1159,8 @@ SELECT timeSlots(toDateTime64('1980-12-12 21:01:02.1234', 4, 'UTC'), toDecimal64
|
||||
|
||||
Formats a Time according to the given Format string. Format is a constant expression, so you cannot have multiple formats for a single result column.
|
||||
|
||||
formatDateTime uses MySQL datetime format style, refer to https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
@ -1158,6 +1234,64 @@ Result:
|
||||
└─────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [formatDateTimeInJodaSyntax](##formatDateTimeInJodaSyntax)
|
||||
|
||||
|
||||
## formatDateTimeInJodaSyntax
|
||||
|
||||
Similar to formatDateTime, except that it formats datetime in Joda style instead of MySQL style. Refer to https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html.
|
||||
|
||||
|
||||
**Replacement fields**
|
||||
|
||||
Using replacement fields, you can define a pattern for the resulting string.
|
||||
|
||||
|
||||
| Placeholder | Description | Presentation | Examples |
|
||||
| ----------- | ----------- | ------------- | -------- |
|
||||
| G | era | text | AD |
|
||||
| C | century of era (>=0) | number | 20 |
|
||||
| Y | year of era (>=0) | year | 1996 |
|
||||
| x | weekyear(not supported yet) | year | 1996 |
|
||||
| w | week of weekyear(not supported yet) | number | 27 |
|
||||
| e | day of week | number | 2 |
|
||||
| E | day of week | text | Tuesday; Tue |
|
||||
| y | year | year | 1996 |
|
||||
| D | day of year | number | 189 |
|
||||
| M | month of year | month | July; Jul; 07 |
|
||||
| d | day of month | number | 10 |
|
||||
| a | halfday of day | text | PM |
|
||||
| K | hour of halfday (0~11) | number | 0 |
|
||||
| h | clockhour of halfday (1~12) | number | 12 |
|
||||
| H | hour of day (0~23) | number | 0 |
|
||||
| k | clockhour of day (1~24) | number | 24 |
|
||||
| m | minute of hour | number | 30 |
|
||||
| s | second of minute | number | 55 |
|
||||
| S | fraction of second(not supported yet) | number | 978 |
|
||||
| z | time zone(short name not supported yet) | text | Pacific Standard Time; PST |
|
||||
| Z | time zone offset/id(not supported yet) | zone | -0800; -08:00; America/Los_Angeles |
|
||||
| ' | escape for text | delimiter| |
|
||||
| '' | single quote | literal | ' |
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT formatDateTimeInJodaSyntax(toDateTime('2010-01-04 12:34:56'), 'yyyy-MM-dd HH:mm:ss')
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
┌─formatDateTimeInJodaSyntax(toDateTime('2010-01-04 12:34:56'), 'yyyy-MM-dd HH:mm:ss')─┐
|
||||
│ 2010-01-04 12:34:56 │
|
||||
└─────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
## dateName
|
||||
|
||||
Returns specified part of date.
|
||||
@ -1241,6 +1375,8 @@ Result:
|
||||
|
||||
Function converts Unix timestamp to a calendar date and a time of a day. When there is only a single argument of [Integer](../../sql-reference/data-types/int-uint.md) type, it acts in the same way as [toDateTime](../../sql-reference/functions/type-conversion-functions.md#todatetime) and return [DateTime](../../sql-reference/data-types/datetime.md) type.
|
||||
|
||||
FROM_UNIXTIME uses MySQL datetime format style, refer to https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format.
|
||||
|
||||
Alias: `fromUnixTimestamp`.
|
||||
|
||||
**Example:**
|
||||
@ -1273,6 +1409,28 @@ SELECT FROM_UNIXTIME(1234334543, '%Y-%m-%d %R:%S') AS DateTime;
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [fromUnixTimestampInJodaSyntax](##fromUnixTimestampInJodaSyntax)
|
||||
|
||||
|
||||
## fromUnixTimestampInJodaSyntax
|
||||
Similar to FROM_UNIXTIME, except that it formats time in Joda style instead of MySQL style. Refer to https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html.
|
||||
|
||||
**Example:**
|
||||
Query:
|
||||
``` sql
|
||||
SELECT fromUnixTimestampInJodaSyntax(1669804872, 'yyyy-MM-dd HH:mm:ss', 'UTC');
|
||||
```
|
||||
|
||||
Result:
|
||||
```
|
||||
┌─fromUnixTimestampInJodaSyntax(1669804872, 'yyyy-MM-dd HH:mm:ss', 'UTC')─┐
|
||||
│ 2022-11-30 10:41:12 │
|
||||
└────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
## toModifiedJulianDay
|
||||
|
||||
Converts a [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) date in text form `YYYY-MM-DD` to a [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) number in Int32. This function supports date from `0000-01-01` to `9999-12-31`. It raises an exception if the argument cannot be parsed as a date, or the date is invalid.
|
||||
|
@ -14,7 +14,7 @@ ClickHouse has the [same behavior as C++ programs](https://en.cppreference.com/w
|
||||
|
||||
## toInt(8\|16\|32\|64\|128\|256)
|
||||
|
||||
Converts an input value to the [Int](../../sql-reference/data-types/int-uint.md) data type. This function family includes:
|
||||
Converts an input value to the [Int](/docs/en/sql-reference/data-types/int-uint.md) data type. This function family includes:
|
||||
|
||||
- `toInt8(expr)` — Results in the `Int8` data type.
|
||||
- `toInt16(expr)` — Results in the `Int16` data type.
|
||||
@ -25,7 +25,7 @@ Converts an input value to the [Int](../../sql-reference/data-types/int-uint.md)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — [Expression](../../sql-reference/syntax.md#syntax-expressions) returning a number or a string with the decimal representation of a number. Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped.
|
||||
- `expr` — [Expression](/docs/en/sql-reference/syntax.md/#syntax-expressions) returning a number or a string with the decimal representation of a number. Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped.
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -33,7 +33,7 @@ Integer value in the `Int8`, `Int16`, `Int32`, `Int64`, `Int128` or `Int256` dat
|
||||
|
||||
Functions use [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning they truncate fractional digits of numbers.
|
||||
|
||||
The behavior of functions for the [NaN and Inf](../../sql-reference/data-types/float.md#data_type-float-nan-inf) arguments is undefined. Remember about [numeric conversions issues](#numeric-conversion-issues), when using the functions.
|
||||
The behavior of functions for the [NaN and Inf](/docs/en/sql-reference/data-types/float.md/#data_type-float-nan-inf) arguments is undefined. Remember about [numeric conversions issues](#numeric-conversion-issues), when using the functions.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -114,7 +114,7 @@ Result:
|
||||
|
||||
## toUInt(8\|16\|32\|64\|256)
|
||||
|
||||
Converts an input value to the [UInt](../../sql-reference/data-types/int-uint.md) data type. This function family includes:
|
||||
Converts an input value to the [UInt](/docs/en/sql-reference/data-types/int-uint.md) data type. This function family includes:
|
||||
|
||||
- `toUInt8(expr)` — Results in the `UInt8` data type.
|
||||
- `toUInt16(expr)` — Results in the `UInt16` data type.
|
||||
@ -124,7 +124,7 @@ Converts an input value to the [UInt](../../sql-reference/data-types/int-uint.md
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — [Expression](../../sql-reference/syntax.md#syntax-expressions) returning a number or a string with the decimal representation of a number. Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped.
|
||||
- `expr` — [Expression](/docs/en/sql-reference/syntax.md/#syntax-expressions) returning a number or a string with the decimal representation of a number. Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped.
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -132,7 +132,7 @@ Integer value in the `UInt8`, `UInt16`, `UInt32`, `UInt64` or `UInt256` data typ
|
||||
|
||||
Functions use [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning they truncate fractional digits of numbers.
|
||||
|
||||
The behavior of functions for negative arguments and for the [NaN and Inf](../../sql-reference/data-types/float.md#data_type-float-nan-inf) arguments is undefined. If you pass a string with a negative number, for example `'-32'`, ClickHouse raises an exception. Remember about [numeric conversions issues](#numeric-conversion-issues), when using the functions.
|
||||
The behavior of functions for negative arguments and for the [NaN and Inf](/docs/en/sql-reference/data-types/float.md/#data_type-float-nan-inf) arguments is undefined. If you pass a string with a negative number, for example `'-32'`, ClickHouse raises an exception. Remember about [numeric conversions issues](#numeric-conversion-issues), when using the functions.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -166,7 +166,111 @@ Result:
|
||||
|
||||
## toDate
|
||||
|
||||
Alias: `DATE`.
|
||||
Converts the argument to `Date` data type.
|
||||
|
||||
If the argument is `DateTime` or `DateTime64`, it truncates it, leaving the date component of the DateTime:
|
||||
```sql
|
||||
SELECT
|
||||
now() AS x,
|
||||
toDate(x)
|
||||
```
|
||||
```response
|
||||
┌───────────────────x─┬─toDate(now())─┐
|
||||
│ 2022-12-30 13:44:17 │ 2022-12-30 │
|
||||
└─────────────────────┴───────────────┘
|
||||
```
|
||||
|
||||
If the argument is a string, it is parsed as Date or DateTime. If it was parsed as DateTime, the date component is being used:
|
||||
```sql
|
||||
SELECT
|
||||
toDate('2022-12-30') AS x,
|
||||
toTypeName(x)
|
||||
```
|
||||
```response
|
||||
┌──────────x─┬─toTypeName(toDate('2022-12-30'))─┐
|
||||
│ 2022-12-30 │ Date │
|
||||
└────────────┴──────────────────────────────────┘
|
||||
|
||||
1 row in set. Elapsed: 0.001 sec.
|
||||
```
|
||||
```sql
|
||||
SELECT
|
||||
toDate('2022-12-30 01:02:03') AS x,
|
||||
toTypeName(x)
|
||||
```
|
||||
```response
|
||||
┌──────────x─┬─toTypeName(toDate('2022-12-30 01:02:03'))─┐
|
||||
│ 2022-12-30 │ Date │
|
||||
└────────────┴───────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
If the argument is a number and it looks like a UNIX timestamp (is greater than 65535), it is interpreted as a DateTime, then truncated to Date in the current timezone. The timezone argument can be specified as a second argument of the function. The truncation to Date depends on the timezone:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
now() AS current_time,
|
||||
toUnixTimestamp(current_time) AS ts,
|
||||
toDateTime(ts) AS time_Amsterdam,
|
||||
toDateTime(ts, 'Pacific/Apia') AS time_Samoa,
|
||||
toDate(time_Amsterdam) AS date_Amsterdam,
|
||||
toDate(time_Samoa) AS date_Samoa,
|
||||
toDate(ts) AS date_Amsterdam_2,
|
||||
toDate(ts, 'Pacific/Apia') AS date_Samoa_2
|
||||
```
|
||||
```response
|
||||
Row 1:
|
||||
──────
|
||||
current_time: 2022-12-30 13:51:54
|
||||
ts: 1672404714
|
||||
time_Amsterdam: 2022-12-30 13:51:54
|
||||
time_Samoa: 2022-12-31 01:51:54
|
||||
date_Amsterdam: 2022-12-30
|
||||
date_Samoa: 2022-12-31
|
||||
date_Amsterdam_2: 2022-12-30
|
||||
date_Samoa_2: 2022-12-31
|
||||
```
|
||||
|
||||
The example above demonstrates how the same UNIX timestamp can be interpreted as different dates in different time zones.
|
||||
|
||||
If the argument is a number and it is smaller than 65536, it is interpreted as the number of days since 1970-01-01 (a UNIX day) and converted to Date. It corresponds to the internal numeric representation of the `Date` data type. Example:
|
||||
|
||||
```sql
|
||||
SELECT toDate(12345)
|
||||
```
|
||||
```response
|
||||
┌─toDate(12345)─┐
|
||||
│ 2003-10-20 │
|
||||
└───────────────┘
|
||||
```
|
||||
|
||||
This conversion does not depend on timezones.
|
||||
|
||||
If the argument does not fit in the range of the Date type, it results in an implementation-defined behavior, that can saturate to the maximum supported date or overflow:
|
||||
```sql
|
||||
SELECT toDate(10000000000.)
|
||||
```
|
||||
```response
|
||||
┌─toDate(10000000000.)─┐
|
||||
│ 2106-02-07 │
|
||||
└──────────────────────┘
|
||||
```
|
||||
|
||||
The function `toDate` can be also written in alternative forms:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
now() AS time,
|
||||
toDate(time),
|
||||
DATE(time),
|
||||
CAST(time, 'Date')
|
||||
```
|
||||
```response
|
||||
┌────────────────time─┬─toDate(now())─┬─DATE(now())─┬─CAST(now(), 'Date')─┐
|
||||
│ 2022-12-30 13:54:58 │ 2022-12-30 │ 2022-12-30 │ 2022-12-30 │
|
||||
└─────────────────────┴───────────────┴─────────────┴─────────────────────┘
|
||||
```
|
||||
|
||||
Have a nice day working with dates and times.
|
||||
|
||||
## toDateOrZero
|
||||
|
||||
@ -184,7 +288,7 @@ Alias: `DATE`.
|
||||
|
||||
## toDate32
|
||||
|
||||
Converts the argument to the [Date32](../../sql-reference/data-types/date32.md) data type. If the value is outside the range returns the border values supported by `Date32`. If the argument has [Date](../../sql-reference/data-types/date.md) type, borders of `Date` are taken into account.
|
||||
Converts the argument to the [Date32](/docs/en/sql-reference/data-types/date32.md) data type. If the value is outside the range, `toDate32` returns the border values supported by `Date32`. If the argument has [Date](/docs/en/sql-reference/data-types/date.md) type, borders of `Date` are taken into account.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -194,13 +298,13 @@ toDate32(expr)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — The value. [String](../../sql-reference/data-types/string.md), [UInt32](../../sql-reference/data-types/int-uint.md) or [Date](../../sql-reference/data-types/date.md).
|
||||
- `expr` — The value. [String](/docs/en/sql-reference/data-types/string.md), [UInt32](/docs/en/sql-reference/data-types/int-uint.md) or [Date](/docs/en/sql-reference/data-types/date.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A calendar date.
|
||||
|
||||
Type: [Date32](../../sql-reference/data-types/date32.md).
|
||||
Type: [Date32](/docs/en/sql-reference/data-types/date32.md).
|
||||
|
||||
**Example**
|
||||
|
||||
@ -242,7 +346,7 @@ SELECT toDate32(toDate('1899-01-01')) AS value, toTypeName(value);
|
||||
|
||||
## toDate32OrZero
|
||||
|
||||
The same as [toDate32](#todate32) but returns the min value of [Date32](../../sql-reference/data-types/date32.md) if invalid argument is received.
|
||||
The same as [toDate32](#todate32) but returns the min value of [Date32](/docs/en/sql-reference/data-types/date32.md) if an invalid argument is received.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -262,7 +366,7 @@ Result:
|
||||
|
||||
## toDate32OrNull
|
||||
|
||||
The same as [toDate32](#todate32) but returns `NULL` if invalid argument is received.
|
||||
The same as [toDate32](#todate32) but returns `NULL` if an invalid argument is received.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -282,7 +386,7 @@ Result:
|
||||
|
||||
## toDate32OrDefault
|
||||
|
||||
Converts the argument to the [Date32](../../sql-reference/data-types/date32.md) data type. If the value is outside the range returns the lower border value supported by `Date32`. If the argument has [Date](../../sql-reference/data-types/date.md) type, borders of `Date` are taken into account. Returns default value if invalid argument is received.
|
||||
Converts the argument to the [Date32](/docs/en/sql-reference/data-types/date32.md) data type. If the value is outside the range, `toDate32OrDefault` returns the lower border value supported by `Date32`. If the argument has [Date](/docs/en/sql-reference/data-types/date.md) type, borders of `Date` are taken into account. Returns default value if an invalid argument is received.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -304,7 +408,7 @@ Result:
|
||||
|
||||
## toDateTime64
|
||||
|
||||
Converts the argument to the [DateTime64](../../sql-reference/data-types/datetime64.md) data type.
|
||||
Converts the argument to the [DateTime64](/docs/en/sql-reference/data-types/datetime64.md) data type.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -314,7 +418,7 @@ toDateTime64(expr, scale, [timezone])
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — The value. [String](../../sql-reference/data-types/string.md), [UInt32](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [DateTime](../../sql-reference/data-types/datetime.md).
|
||||
- `expr` — The value. [String](/docs/en/sql-reference/data-types/string.md), [UInt32](/docs/en/sql-reference/data-types/int-uint.md), [Float](/docs/en/sql-reference/data-types/float.md) or [DateTime](/docs/en/sql-reference/data-types/datetime.md).
|
||||
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
|
||||
- `timezone` - Time zone of the specified datetime64 object.
|
||||
|
||||
@ -322,7 +426,7 @@ toDateTime64(expr, scale, [timezone])
|
||||
|
||||
- A calendar date and time of day, with sub-second precision.
|
||||
|
||||
Type: [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
Type: [DateTime64](/docs/en/sql-reference/data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
|
||||
@ -378,7 +482,7 @@ SELECT toDateTime64('2019-01-01 00:00:00', 3, 'Asia/Istanbul') AS value, toTypeN
|
||||
|
||||
## toDecimal(32\|64\|128\|256)
|
||||
|
||||
Converts `value` to the [Decimal](../../sql-reference/data-types/decimal.md) data type with precision of `S`. The `value` can be a number or a string. The `S` (scale) parameter specifies the number of decimal places.
|
||||
Converts `value` to the [Decimal](/docs/en/sql-reference/data-types/decimal.md) data type with precision of `S`. The `value` can be a number or a string. The `S` (scale) parameter specifies the number of decimal places.
|
||||
|
||||
- `toDecimal32(value, S)`
|
||||
- `toDecimal64(value, S)`
|
||||
@ -387,7 +491,7 @@ Converts `value` to the [Decimal](../../sql-reference/data-types/decimal.md) dat
|
||||
|
||||
## toDecimal(32\|64\|128\|256)OrNull
|
||||
|
||||
Converts an input string to a [Nullable(Decimal(P,S))](../../sql-reference/data-types/decimal.md) data type value. This family of functions include:
|
||||
Converts an input string to a [Nullable(Decimal(P,S))](/docs/en/sql-reference/data-types/decimal.md) data type value. This family of functions includes:
|
||||
|
||||
- `toDecimal32OrNull(expr, S)` — Results in `Nullable(Decimal32(S))` data type.
|
||||
- `toDecimal64OrNull(expr, S)` — Results in `Nullable(Decimal64(S))` data type.
|
||||
@ -398,7 +502,7 @@ These functions should be used instead of `toDecimal*()` functions, if you prefe
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — [Expression](../../sql-reference/syntax.md#syntax-expressions), returns a value in the [String](../../sql-reference/data-types/string.md) data type. ClickHouse expects the textual representation of the decimal number. For example, `'1.111'`.
|
||||
- `expr` — [Expression](/docs/en/sql-reference/syntax.md/#syntax-expressions), returns a value in the [String](/docs/en/sql-reference/data-types/string.md) data type. ClickHouse expects the textual representation of the decimal number. For example, `'1.111'`.
|
||||
- `S` — Scale, the number of decimal places in the resulting value.
|
||||
|
||||
**Returned value**
|
||||
@ -441,7 +545,7 @@ Result:
|
||||
|
||||
## toDecimal(32\|64\|128\|256)OrDefault
|
||||
|
||||
Converts an input string to a [Decimal(P,S)](../../sql-reference/data-types/decimal.md) data type value. This family of functions include:
|
||||
Converts an input string to a [Decimal(P,S)](/docs/en/sql-reference/data-types/decimal.md) data type value. This family of functions includes:
|
||||
|
||||
- `toDecimal32OrDefault(expr, S)` — Results in `Decimal32(S)` data type.
|
||||
- `toDecimal64OrDefault(expr, S)` — Results in `Decimal64(S)` data type.
|
||||
@ -452,7 +556,7 @@ These functions should be used instead of `toDecimal*()` functions, if you prefe
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — [Expression](../../sql-reference/syntax.md#syntax-expressions), returns a value in the [String](../../sql-reference/data-types/string.md) data type. ClickHouse expects the textual representation of the decimal number. For example, `'1.111'`.
|
||||
- `expr` — [Expression](/docs/en/sql-reference/syntax.md/#syntax-expressions), returns a value in the [String](/docs/en/sql-reference/data-types/string.md) data type. ClickHouse expects the textual representation of the decimal number. For example, `'1.111'`.
|
||||
- `S` — Scale, the number of decimal places in the resulting value.
|
||||
|
||||
**Returned value**
|
||||
@ -494,7 +598,7 @@ Result:
|
||||
|
||||
## toDecimal(32\|64\|128\|256)OrZero
|
||||
|
||||
Converts an input value to the [Decimal(P,S)](../../sql-reference/data-types/decimal.md) data type. This family of functions include:
|
||||
Converts an input value to the [Decimal(P,S)](/docs/en/sql-reference/data-types/decimal.md) data type. This family of functions includes:
|
||||
|
||||
- `toDecimal32OrZero( expr, S)` — Results in `Decimal32(S)` data type.
|
||||
- `toDecimal64OrZero( expr, S)` — Results in `Decimal64(S)` data type.
|
||||
@ -505,7 +609,7 @@ These functions should be used instead of `toDecimal*()` functions, if you prefe
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — [Expression](../../sql-reference/syntax.md#syntax-expressions), returns a value in the [String](../../sql-reference/data-types/string.md) data type. ClickHouse expects the textual representation of the decimal number. For example, `'1.111'`.
|
||||
- `expr` — [Expression](/docs/en/sql-reference/syntax.md/#syntax-expressions), returns a value in the [String](/docs/en/sql-reference/data-types/string.md) data type. ClickHouse expects the textual representation of the decimal number. For example, `'1.111'`.
|
||||
- `S` — Scale, the number of decimal places in the resulting value.
|
||||
|
||||
**Returned value**
|
||||
@ -564,7 +668,7 @@ YYYY-MM-DD hh:mm:ss
|
||||
|
||||
As an exception, if converting from UInt32, Int32, UInt64, or Int64 numeric types to Date, and if the number is greater than or equal to 65536, the number is interpreted as a Unix timestamp (and not as the number of days) and is rounded to the date. This allows support for the common occurrence of writing ‘toDate(unix_timestamp)’, which otherwise would be an error and would require writing the more cumbersome ‘toDate(toDateTime(unix_timestamp))’.
|
||||
|
||||
Conversion between a date and date with time is performed the natural way: by adding a null time or dropping the time.
|
||||
Conversion between a date and a date with time is performed the natural way: by adding a null time or dropping the time.
|
||||
|
||||
Conversion between numeric types uses the same rules as assignments between different numeric types in C++.
|
||||
|
||||
@ -643,15 +747,15 @@ These functions accept a string and interpret the bytes placed at the beginning
|
||||
|
||||
## reinterpretAsString
|
||||
|
||||
This function accepts a number or date or date with time, and returns a string containing bytes representing the corresponding value in host order (little endian). Null bytes are dropped from the end. For example, a UInt32 type value of 255 is a string that is one byte long.
|
||||
This function accepts a number or date or date with time and returns a string containing bytes representing the corresponding value in host order (little endian). Null bytes are dropped from the end. For example, a UInt32 type value of 255 is a string that is one byte long.
|
||||
|
||||
## reinterpretAsFixedString
|
||||
|
||||
This function accepts a number or date or date with time, and returns a FixedString containing bytes representing the corresponding value in host order (little endian). Null bytes are dropped from the end. For example, a UInt32 type value of 255 is a FixedString that is one byte long.
|
||||
This function accepts a number or date or date with time and returns a FixedString containing bytes representing the corresponding value in host order (little endian). Null bytes are dropped from the end. For example, a UInt32 type value of 255 is a FixedString that is one byte long.
|
||||
|
||||
## reinterpretAsUUID
|
||||
|
||||
Accepts 16 bytes string and returns UUID containing bytes representing the corresponding value in network byte order (big-endian). If the string isn't long enough, the function works as if the string is padded with the necessary number of null bytes to the end. If the string longer than 16 bytes, the extra bytes at the end are ignored.
|
||||
Accepts 16 bytes string and returns UUID containing bytes representing the corresponding value in network byte order (big-endian). If the string isn't long enough, the function works as if the string is padded with the necessary number of null bytes to the end. If the string is longer than 16 bytes, the extra bytes at the end are ignored.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -661,11 +765,11 @@ reinterpretAsUUID(fixed_string)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `fixed_string` — Big-endian byte string. [FixedString](../../sql-reference/data-types/fixedstring.md#fixedstring).
|
||||
- `fixed_string` — Big-endian byte string. [FixedString](/docs/en/sql-reference/data-types/fixedstring.md/#fixedstring).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The UUID type value. [UUID](../../sql-reference/data-types/uuid.md#uuid-data-type).
|
||||
- The UUID type value. [UUID](/docs/en/sql-reference/data-types/uuid.md/#uuid-data-type).
|
||||
|
||||
**Examples**
|
||||
|
||||
@ -718,7 +822,7 @@ reinterpret(x, type)
|
||||
**Arguments**
|
||||
|
||||
- `x` — Any type.
|
||||
- `type` — Destination type. [String](../../sql-reference/data-types/string.md).
|
||||
- `type` — Destination type. [String](/docs/en/sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -757,7 +861,7 @@ x::t
|
||||
**Arguments**
|
||||
|
||||
- `x` — A value to convert. May be of any type.
|
||||
- `T` — The name of the target data type. [String](../../sql-reference/data-types/string.md).
|
||||
- `T` — The name of the target data type. [String](/docs/en/sql-reference/data-types/string.md).
|
||||
- `t` — The target data type.
|
||||
|
||||
**Returned value**
|
||||
@ -806,9 +910,9 @@ Result:
|
||||
└─────────────────────┴─────────────────────┴────────────┴─────────────────────┴───────────────────────────┘
|
||||
```
|
||||
|
||||
Conversion to FixedString(N) only works for arguments of type [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
Conversion to FixedString(N) only works for arguments of type [String](/docs/en/sql-reference/data-types/string.md) or [FixedString](/docs/en/sql-reference/data-types/fixedstring.md).
|
||||
|
||||
Type conversion to [Nullable](../../sql-reference/data-types/nullable.md) and back is supported.
|
||||
Type conversion to [Nullable](/docs/en/sql-reference/data-types/nullable.md) and back is supported.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -844,7 +948,7 @@ Result:
|
||||
|
||||
**See also**
|
||||
|
||||
- [cast_keep_nullable](../../operations/settings/settings.md#cast_keep_nullable) setting
|
||||
- [cast_keep_nullable](/docs/en/operations/settings/settings.md/#cast_keep_nullable) setting
|
||||
|
||||
## accurateCast(x, T)
|
||||
|
||||
@ -882,7 +986,7 @@ Code: 70. DB::Exception: Received from localhost:9000. DB::Exception: Value in c
|
||||
|
||||
## accurateCastOrNull(x, T)
|
||||
|
||||
Converts input value `x` to the specified data type `T`. Always returns [Nullable](../../sql-reference/data-types/nullable.md) type and returns [NULL](../../sql-reference/syntax.md#null-literal) if the casted value is not representable in the target type.
|
||||
Converts input value `x` to the specified data type `T`. Always returns [Nullable](/docs/en/sql-reference/data-types/nullable.md) type and returns [NULL](/docs/en/sql-reference/syntax.md/#null-literal) if the casted value is not representable in the target type.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -991,7 +1095,7 @@ Result:
|
||||
|
||||
## toInterval(Year\|Quarter\|Month\|Week\|Day\|Hour\|Minute\|Second)
|
||||
|
||||
Converts a Number type argument to an [Interval](../../sql-reference/data-types/special-data-types/interval.md) data type.
|
||||
Converts a Number type argument to an [Interval](/docs/en/sql-reference/data-types/special-data-types/interval.md) data type.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -1039,7 +1143,7 @@ Result:
|
||||
## parseDateTimeBestEffort
|
||||
## parseDateTime32BestEffort
|
||||
|
||||
Converts a date and time in the [String](../../sql-reference/data-types/string.md) representation to [DateTime](../../sql-reference/data-types/datetime.md#data_type-datetime) data type.
|
||||
Converts a date and time in the [String](/docs/en/sql-reference/data-types/string.md) representation to [DateTime](/docs/en/sql-reference/data-types/datetime.md/#data_type-datetime) data type.
|
||||
|
||||
The function parses [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601), [RFC 1123 - 5.2.14 RFC-822 Date and Time Specification](https://tools.ietf.org/html/rfc1123#page-55), ClickHouse’s and some other date and time formats.
|
||||
|
||||
@ -1051,8 +1155,8 @@ parseDateTimeBestEffort(time_string [, time_zone])
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `time_string` — String containing a date and time to convert. [String](../../sql-reference/data-types/string.md).
|
||||
- `time_zone` — Time zone. The function parses `time_string` according to the time zone. [String](../../sql-reference/data-types/string.md).
|
||||
- `time_string` — String containing a date and time to convert. [String](/docs/en/sql-reference/data-types/string.md).
|
||||
- `time_zone` — Time zone. The function parses `time_string` according to the time zone. [String](/docs/en/sql-reference/data-types/string.md).
|
||||
|
||||
**Supported non-standard formats**
|
||||
|
||||
@ -1175,7 +1279,7 @@ Same as [parseDateTimeBestEffortUS](#parsedatetimebesteffortUS) function except
|
||||
|
||||
## parseDateTime64BestEffort
|
||||
|
||||
Same as [parseDateTimeBestEffort](#parsedatetimebesteffort) function but also parse milliseconds and microseconds and returns [DateTime](../../sql-reference/functions/type-conversion-functions.md#data_type-datetime) data type.
|
||||
Same as [parseDateTimeBestEffort](#parsedatetimebesteffort) function but also parse milliseconds and microseconds and returns [DateTime](/docs/en/sql-reference/functions/type-conversion-functions.md/#data_type-datetime) data type.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -1185,13 +1289,13 @@ parseDateTime64BestEffort(time_string [, precision [, time_zone]])
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `time_string` — String containing a date or date with time to convert. [String](../../sql-reference/data-types/string.md).
|
||||
- `precision` — Required precision. `3` — for milliseconds, `6` — for microseconds. Default — `3`. Optional. [UInt8](../../sql-reference/data-types/int-uint.md).
|
||||
- `time_zone` — [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](../../sql-reference/data-types/string.md).
|
||||
- `time_string` — String containing a date or date with time to convert. [String](/docs/en/sql-reference/data-types/string.md).
|
||||
- `precision` — Required precision. `3` — for milliseconds, `6` — for microseconds. Default — `3`. Optional. [UInt8](/docs/en/sql-reference/data-types/int-uint.md).
|
||||
- `time_zone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md/#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](/docs/en/sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- `time_string` converted to the [DateTime](../../sql-reference/data-types/datetime.md) data type.
|
||||
- `time_string` converted to the [DateTime](/docs/en/sql-reference/data-types/datetime.md) data type.
|
||||
|
||||
**Examples**
|
||||
|
||||
@ -1242,7 +1346,7 @@ Same as for [parseDateTime64BestEffort](#parsedatetime64besteffort), except that
|
||||
|
||||
## toLowCardinality
|
||||
|
||||
Converts input parameter to the [LowCardinality](../../sql-reference/data-types/lowcardinality.md) version of same data type.
|
||||
Converts input parameter to the [LowCardinality](/docs/en/sql-reference/data-types/lowcardinality.md) version of same data type.
|
||||
|
||||
To convert data from the `LowCardinality` data type use the [CAST](#type_conversion_function-cast) function. For example, `CAST(x as String)`.
|
||||
|
||||
@ -1254,7 +1358,7 @@ toLowCardinality(expr)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — [Expression](../../sql-reference/syntax.md#syntax-expressions) resulting in one of the [supported data types](../../sql-reference/data-types/index.md#data_types).
|
||||
- `expr` — [Expression](/docs/en/sql-reference/syntax.md/#syntax-expressions) resulting in one of the [supported data types](/docs/en/sql-reference/data-types/index.md/#data_types).
|
||||
|
||||
**Returned values**
|
||||
|
||||
@ -1388,12 +1492,12 @@ formatRow(format, x, y, ...)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `format` — Text format. For example, [CSV](../../interfaces/formats.md#csv), [TSV](../../interfaces/formats.md#tabseparated).
|
||||
- `format` — Text format. For example, [CSV](/docs/en/interfaces/formats.md/#csv), [TSV](/docs/en/interfaces/formats.md/#tabseparated).
|
||||
- `x`,`y`, ... — Expressions.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A formatted string (for text formats it's usually terminated with the new line character).
|
||||
- A formatted string. (for text formats it's usually terminated with the new line character).
|
||||
|
||||
**Example**
|
||||
|
||||
@ -1417,9 +1521,39 @@ Result:
|
||||
└──────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Note**: If format contains suffix/prefix, it will be written in each row.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT formatRow('CustomSeparated', number, 'good')
|
||||
FROM numbers(3)
|
||||
SETTINGS format_custom_result_before_delimiter='<prefix>\n', format_custom_result_after_delimiter='<suffix>'
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─formatRow('CustomSeparated', number, 'good')─┐
|
||||
│ <prefix>
|
||||
0 good
|
||||
<suffix> │
|
||||
│ <prefix>
|
||||
1 good
|
||||
<suffix> │
|
||||
│ <prefix>
|
||||
2 good
|
||||
<suffix> │
|
||||
└──────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Note: Only row-based formats are supported in this function.
|
||||
|
||||
## formatRowNoNewline
|
||||
|
||||
Converts arbitrary expressions into a string via given format. The function trims the last `\n` if any.
|
||||
Converts arbitrary expressions into a string via given format. Differs from formatRow in that this function trims the last `\n` if any.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -1429,7 +1563,7 @@ formatRowNoNewline(format, x, y, ...)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `format` — Text format. For example, [CSV](../../interfaces/formats.md#csv), [TSV](../../interfaces/formats.md#tabseparated).
|
||||
- `format` — Text format. For example, [CSV](/docs/en/interfaces/formats.md/#csv), [TSV](/docs/en/interfaces/formats.md/#tabseparated).
|
||||
- `x`,`y`, ... — Expressions.
|
||||
|
||||
**Returned value**
|
||||
@ -1457,7 +1591,7 @@ Result:
|
||||
|
||||
## snowflakeToDateTime
|
||||
|
||||
Extracts time from [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) as [DateTime](../data-types/datetime.md) format.
|
||||
Extracts time from [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) as [DateTime](/docs/en/sql-reference/data-types/datetime.md) format.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -1467,12 +1601,12 @@ snowflakeToDateTime(value [, time_zone])
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `value` — Snowflake ID. [Int64](../data-types/int-uint.md).
|
||||
- `time_zone` — [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](../../sql-reference/data-types/string.md).
|
||||
- `value` — Snowflake ID. [Int64](/docs/en/sql-reference/data-types/int-uint.md).
|
||||
- `time_zone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md/#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](/docs/en/sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Input value converted to the [DateTime](../data-types/datetime.md) data type.
|
||||
- Input value converted to the [DateTime](/docs/en/sql-reference/data-types/datetime.md) data type.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -1493,7 +1627,7 @@ Result:
|
||||
|
||||
## snowflakeToDateTime64
|
||||
|
||||
Extracts time from [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) as [DateTime64](../data-types/datetime64.md) format.
|
||||
Extracts time from [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) as [DateTime64](/docs/en/sql-reference/data-types/datetime64.md) format.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -1503,12 +1637,12 @@ snowflakeToDateTime64(value [, time_zone])
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `value` — Snowflake ID. [Int64](../data-types/int-uint.md).
|
||||
- `time_zone` — [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](../../sql-reference/data-types/string.md).
|
||||
- `value` — Snowflake ID. [Int64](/docs/en/sql-reference/data-types/int-uint.md).
|
||||
- `time_zone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md/#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](/docs/en/sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Input value converted to the [DateTime64](../data-types/datetime64.md) data type.
|
||||
- Input value converted to the [DateTime64](/docs/en/sql-reference/data-types/datetime64.md) data type.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -1529,7 +1663,7 @@ Result:
|
||||
|
||||
## dateTimeToSnowflake
|
||||
|
||||
Converts [DateTime](../data-types/datetime.md) value to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time.
|
||||
Converts [DateTime](/docs/en/sql-reference/data-types/datetime.md) value to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -1539,11 +1673,11 @@ dateTimeToSnowflake(value)
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `value` — Date and time. [DateTime](../../sql-reference/data-types/datetime.md).
|
||||
- `value` — Date and time. [DateTime](/docs/en/sql-reference/data-types/datetime.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Input value converted to the [Int64](../data-types/int-uint.md) data type as the first Snowflake ID at that time.
|
||||
- Input value converted to the [Int64](/docs/en/sql-reference/data-types/int-uint.md) data type as the first Snowflake ID at that time.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -1563,7 +1697,7 @@ Result:
|
||||
|
||||
## dateTime64ToSnowflake
|
||||
|
||||
Convert [DateTime64](../data-types/datetime64.md) to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time.
|
||||
Convert [DateTime64](/docs/en/sql-reference/data-types/datetime64.md) to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -1573,11 +1707,11 @@ dateTime64ToSnowflake(value)
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `value` — Date and time. [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
- `value` — Date and time. [DateTime64](/docs/en/sql-reference/data-types/datetime64.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Input value converted to the [Int64](../data-types/int-uint.md) data type as the first Snowflake ID at that time.
|
||||
- Input value converted to the [Int64](/docs/en/sql-reference/data-types/int-uint.md) data type as the first Snowflake ID at that time.
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -424,23 +424,23 @@ WITH toDateTime64('2020-01-01 10:20:30.999', 3) AS dt64 SELECT toStartOfSecond(d
|
||||
|
||||
## toRelativeYearNum {#torelativeyearnum}
|
||||
|
||||
Переводит дату-с-временем или дату в номер года, начиная с некоторого фиксированного момента в прошлом.
|
||||
Переводит дату или дату-с-временем в номер года, начиная с некоторого фиксированного момента в прошлом.
|
||||
|
||||
## toRelativeQuarterNum {#torelativequarternum}
|
||||
|
||||
Переводит дату-с-временем или дату в номер квартала, начиная с некоторого фиксированного момента в прошлом.
|
||||
Переводит дату или дату-с-временем в номер квартала, начиная с некоторого фиксированного момента в прошлом.
|
||||
|
||||
## toRelativeMonthNum {#torelativemonthnum}
|
||||
|
||||
Переводит дату-с-временем или дату в номер месяца, начиная с некоторого фиксированного момента в прошлом.
|
||||
Переводит дату или дату-с-временем в номер месяца, начиная с некоторого фиксированного момента в прошлом.
|
||||
|
||||
## toRelativeWeekNum {#torelativeweeknum}
|
||||
|
||||
Переводит дату-с-временем или дату в номер недели, начиная с некоторого фиксированного момента в прошлом.
|
||||
Переводит дату или дату-с-временем в номер недели, начиная с некоторого фиксированного момента в прошлом.
|
||||
|
||||
## toRelativeDayNum {#torelativedaynum}
|
||||
|
||||
Переводит дату-с-временем или дату в номер дня, начиная с некоторого фиксированного момента в прошлом.
|
||||
Переводит дату или дату-с-временем в номер дня, начиная с некоторого фиксированного момента в прошлом.
|
||||
|
||||
## toRelativeHourNum {#torelativehournum}
|
||||
|
||||
@ -456,7 +456,7 @@ WITH toDateTime64('2020-01-01 10:20:30.999', 3) AS dt64 SELECT toStartOfSecond(d
|
||||
|
||||
## toISOYear {#toisoyear}
|
||||
|
||||
Переводит дату-с-временем или дату в число типа UInt16, содержащее номер ISO года. ISO год отличается от обычного года, потому что в соответствии с [ISO 8601:1988](https://en.wikipedia.org/wiki/ISO_8601) ISO год начинается необязательно первого января.
|
||||
Переводит дату или дату-с-временем в число типа UInt16, содержащее номер ISO года. ISO год отличается от обычного года, потому что в соответствии с [ISO 8601:1988](https://en.wikipedia.org/wiki/ISO_8601) ISO год начинается необязательно первого января.
|
||||
|
||||
**Пример**
|
||||
|
||||
@ -479,7 +479,7 @@ SELECT
|
||||
|
||||
## toISOWeek {#toisoweek}
|
||||
|
||||
Переводит дату-с-временем или дату в число типа UInt8, содержащее номер ISO недели.
|
||||
Переводит дату или дату-с-временем в число типа UInt8, содержащее номер ISO недели.
|
||||
Начало ISO года отличается от начала обычного года, потому что в соответствии с [ISO 8601:1988](https://en.wikipedia.org/wiki/ISO_8601) первая неделя года - это неделя с четырьмя или более днями в этом году.
|
||||
|
||||
1 Января 2017 г. - воскресение, т.е. первая ISO неделя 2017 года началась в понедельник 2 января, поэтому 1 января 2017 это последняя неделя 2016 года.
|
||||
@ -503,7 +503,7 @@ SELECT
|
||||
```
|
||||
|
||||
## toWeek(date\[, mode\]\[, timezone\]) {#toweek}
|
||||
Переводит дату-с-временем или дату в число UInt8, содержащее номер недели. Второй аргументам mode задает режим, начинается ли неделя с воскресенья или с понедельника и должно ли возвращаемое значение находиться в диапазоне от 0 до 53 или от 1 до 53. Если аргумент mode опущен, то используется режим 0.
|
||||
Переводит дату или дату-с-временем в число UInt8, содержащее номер недели. Второй аргументам mode задает режим, начинается ли неделя с воскресенья или с понедельника и должно ли возвращаемое значение находиться в диапазоне от 0 до 53 или от 1 до 53. Если аргумент mode опущен, то используется режим 0.
|
||||
|
||||
`toISOWeek() ` эквивалентно `toWeek(date,3)`.
|
||||
|
||||
@ -569,6 +569,132 @@ SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(d
|
||||
└────────────┴───────────┴───────────┴───────────┘
|
||||
```
|
||||
|
||||
## age
|
||||
|
||||
Вычисляет компонент `unit` разницы между `startdate` и `enddate`. Разница вычисляется с точностью в 1 секунду.
|
||||
Например, разница между `2021-12-29` и `2022-01-01` 3 дня для единицы `day`, 0 месяцев для единицы `month`, 0 лет для единицы `year`.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
age('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
|
||||
Возможные значения:
|
||||
|
||||
- `second` (возможные сокращения: `ss`, `s`)
|
||||
- `minute` (возможные сокращения: `mi`, `n`)
|
||||
- `hour` (возможные сокращения: `hh`, `h`)
|
||||
- `day` (возможные сокращения: `dd`, `d`)
|
||||
- `week` (возможные сокращения: `wk`, `ww`)
|
||||
- `month` (возможные сокращения: `mm`, `m`)
|
||||
- `quarter` (возможные сокращения: `qq`, `q`)
|
||||
- `year` (возможные сокращения: `yyyy`, `yy`)
|
||||
|
||||
- `startdate` — первая дата или дата со временем, которая вычитается из `enddate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — вторая дата или дата со временем, из которой вычитается `startdate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (необязательно). Если этот аргумент указан, то он применяется как для `startdate`, так и для `enddate`. Если этот аргумент не указан, то используются часовые пояса аргументов `startdate` и `enddate`. Если часовые пояса аргументов `startdate` и `enddate` не совпадают, то результат не определен. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
Разница между `enddate` и `startdate`, выраженная в `unit`.
|
||||
|
||||
Тип: [Int](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT age('hour', toDateTime('2018-01-01 22:30:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─age('hour', toDateTime('2018-01-01 22:30:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 24 │
|
||||
└───────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
toDate('2022-01-01') AS e,
|
||||
toDate('2021-12-29') AS s,
|
||||
age('day', s, e) AS day_age,
|
||||
age('month', s, e) AS month__age,
|
||||
age('year', s, e) AS year_age;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌──────────e─┬──────────s─┬─day_age─┬─month__age─┬─year_age─┐
|
||||
│ 2022-01-01 │ 2021-12-29 │ 3 │ 0 │ 0 │
|
||||
└────────────┴────────────┴─────────┴────────────┴──────────┘
|
||||
```
|
||||
|
||||
## date\_diff {#date_diff}
|
||||
|
||||
Вычисляет разницу указанных границ `unit` пересекаемых между `startdate` и `enddate`.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
date_diff('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
Синонимы: `dateDiff`, `DATE_DIFF`.
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
|
||||
Возможные значения:
|
||||
|
||||
- `second` (возможные сокращения: `ss`, `s`)
|
||||
- `minute` (возможные сокращения: `mi`, `n`)
|
||||
- `hour` (возможные сокращения: `hh`, `h`)
|
||||
- `day` (возможные сокращения: `dd`, `d`)
|
||||
- `week` (возможные сокращения: `wk`, `ww`)
|
||||
- `month` (возможные сокращения: `mm`, `m`)
|
||||
- `quarter` (возможные сокращения: `qq`, `q`)
|
||||
- `year` (возможные сокращения: `yyyy`, `yy`)
|
||||
|
||||
- `startdate` — первая дата или дата со временем, которая вычитается из `enddate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — вторая дата или дата со временем, из которой вычитается `startdate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (необязательно). Если этот аргумент указан, то он применяется как для `startdate`, так и для `enddate`. Если этот аргумент не указан, то используются часовые пояса аргументов `startdate` и `enddate`. Если часовые пояса аргументов `startdate` и `enddate` не совпадают, то результат не определен. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
Разница между `enddate` и `startdate`, выраженная в `unit`.
|
||||
|
||||
Тип: [Int](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 25 │
|
||||
└────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## date_trunc {#date_trunc}
|
||||
|
||||
Отсекает от даты и времени части, меньшие чем указанная часть.
|
||||
@ -689,60 +815,6 @@ SELECT date_add(YEAR, 3, toDate('2018-01-01'));
|
||||
└───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## date\_diff {#date_diff}
|
||||
|
||||
Вычисляет разницу между двумя значениями дат или дат со временем.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
date_diff('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
Синонимы: `dateDiff`, `DATE_DIFF`.
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
|
||||
Возможные значения:
|
||||
|
||||
- `second`
|
||||
- `minute`
|
||||
- `hour`
|
||||
- `day`
|
||||
- `week`
|
||||
- `month`
|
||||
- `quarter`
|
||||
- `year`
|
||||
|
||||
- `startdate` — первая дата или дата со временем, которая вычитается из `enddate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — вторая дата или дата со временем, из которой вычитается `startdate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (необязательно). Если этот аргумент указан, то он применяется как для `startdate`, так и для `enddate`. Если этот аргумент не указан, то используются часовые пояса аргументов `startdate` и `enddate`. Если часовые пояса аргументов `startdate` и `enddate` не совпадают, то результат не определен. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
Разница между `enddate` и `startdate`, выраженная в `unit`.
|
||||
|
||||
Тип: [Int](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 25 │
|
||||
└────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## date\_sub {#date_sub}
|
||||
|
||||
Вычитает интервал времени или даты из указанной даты или даты со временем.
|
||||
|
@ -1316,7 +1316,7 @@ formatRow(format, x, y, ...)
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Отформатированная строка (в текстовых форматах обычно с завершающим переводом строки).
|
||||
- Отформатированная строка. (в текстовых форматах обычно с завершающим переводом строки).
|
||||
|
||||
**Пример**
|
||||
|
||||
@ -1340,9 +1340,39 @@ FROM numbers(3);
|
||||
└──────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Примечание**: если формат содержит префикс/суффикс, то он будет записан в каждой строке.
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT formatRow('CustomSeparated', number, 'good')
|
||||
FROM numbers(3)
|
||||
SETTINGS format_custom_result_before_delimiter='<prefix>\n', format_custom_result_after_delimiter='<suffix>'
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─formatRow('CustomSeparated', number, 'good')─┐
|
||||
│ <prefix>
|
||||
0 good
|
||||
<suffix> │
|
||||
│ <prefix>
|
||||
1 good
|
||||
<suffix> │
|
||||
│ <prefix>
|
||||
2 good
|
||||
<suffix> │
|
||||
└──────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Примечание**: данная функция поддерживает только строковые форматы вывода.
|
||||
|
||||
## formatRowNoNewline {#formatrownonewline}
|
||||
|
||||
Преобразует произвольные выражения в строку заданного формата. При этом удаляет лишние переводы строк `\n`, если они появились.
|
||||
Преобразует произвольные выражения в строку заданного формата. Отличается от функции formatRow тем, что удаляет лишний перевод строки `\n` а конце, если он есть.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
|
@ -11,7 +11,7 @@ sidebar_position: 29
|
||||
这系列的引擎有:
|
||||
|
||||
- [StripeLog](stripelog.md)
|
||||
- [日志](log.md)
|
||||
- [Log](log.md)
|
||||
- [TinyLog](tinylog.md)
|
||||
|
||||
## 共同属性 {#table_engines-log-engine-family-common-properties}
|
||||
|
@ -13,6 +13,10 @@ set (CLICKHOUSE_CLIENT_LINK
|
||||
string_utils
|
||||
)
|
||||
|
||||
if (TARGET ch_rust::skim)
|
||||
list(APPEND CLICKHOUSE_CLIENT_LINK PRIVATE ch_rust::skim)
|
||||
endif()
|
||||
|
||||
# Always use internal readpassphrase
|
||||
list(APPEND CLICKHOUSE_CLIENT_LINK PRIVATE readpassphrase)
|
||||
|
||||
|
@ -30,9 +30,10 @@
|
||||
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/WriteBufferFromOStream.h>
|
||||
#include <IO/UseSSL.h>
|
||||
#include <IO/WriteBufferFromOStream.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/copyData.h>
|
||||
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/ASTDropQuery.h>
|
||||
@ -41,6 +42,8 @@
|
||||
#include <Parsers/ASTInsertQuery.h>
|
||||
#include <Parsers/ASTSelectQuery.h>
|
||||
|
||||
#include <Processors/Transforms/getSourceFromASTInsertQuery.h>
|
||||
|
||||
#include <Interpreters/InterpreterSetQuery.h>
|
||||
|
||||
#include <Functions/registerFunctions.h>
|
||||
@ -827,6 +830,20 @@ bool Client::processWithFuzzing(const String & full_query)
|
||||
WriteBufferFromOStream ast_buf(std::cout, 4096);
|
||||
formatAST(*query, ast_buf, false /*highlight*/);
|
||||
ast_buf.next();
|
||||
if (const auto * insert = query->as<ASTInsertQuery>())
|
||||
{
|
||||
/// For inserts with data it's really useful to have the data itself available in the logs, as formatAST doesn't print it
|
||||
if (insert->hasInlinedData())
|
||||
{
|
||||
String bytes;
|
||||
{
|
||||
auto read_buf = getReadBufferFromASTInsertQuery(query);
|
||||
WriteBufferFromString write_buf(bytes);
|
||||
copyData(*read_buf, write_buf);
|
||||
}
|
||||
std::cout << std::endl << bytes;
|
||||
}
|
||||
}
|
||||
std::cout << std::endl << std::endl;
|
||||
|
||||
try
|
||||
|
@ -18,6 +18,10 @@ if(NOT CLICKHOUSE_ONE_SHARED)
|
||||
target_link_libraries(clickhouse-local-lib PRIVATE clickhouse-server-lib)
|
||||
endif()
|
||||
|
||||
if (TARGET ch_rust::skim)
|
||||
target_link_libraries(clickhouse-local-lib PRIVATE ch_rust::skim)
|
||||
endif()
|
||||
|
||||
# Always use internal readpassphrase
|
||||
target_link_libraries(clickhouse-local-lib PRIVATE readpassphrase)
|
||||
|
||||
|
@ -39,5 +39,21 @@ function(clickhouse_import_crate)
|
||||
corrosion_import_crate(NO_STD ${ARGN})
|
||||
endfunction()
|
||||
|
||||
add_subdirectory (BLAKE3)
|
||||
add_subdirectory (skim)
|
||||
# Add crate from the build directory.
|
||||
#
|
||||
# Our crates has configuration files:
|
||||
# - config for cargo (see config.toml.in)
|
||||
# - and possibly config for build (build.rs.in)
|
||||
#
|
||||
# And to avoid overlaps different builds for one source directory, crate will
|
||||
# be copied from source directory to the binary directory.
|
||||
file(COPY ".cargo" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}")
|
||||
function(add_rust_subdirectory src)
|
||||
set(dst "${CMAKE_CURRENT_BINARY_DIR}/${src}")
|
||||
message(STATUS "Copy ${src} to ${dst}")
|
||||
file(COPY "${src}" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}")
|
||||
add_subdirectory("${dst}" "${dst}")
|
||||
endfunction()
|
||||
|
||||
add_rust_subdirectory (BLAKE3)
|
||||
add_rust_subdirectory (skim)
|
||||
|
@ -1,647 +0,0 @@
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <AggregateFunctions/FactoryHelpers.h>
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
#include <Common/SymbolIndex.h>
|
||||
#include <Common/ArenaAllocator.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <filesystem>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int FUNCTION_NOT_ALLOWED;
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
}
|
||||
|
||||
struct AggregateFunctionFlameGraphTree
|
||||
{
|
||||
struct ListNode;
|
||||
|
||||
struct TreeNode
|
||||
{
|
||||
TreeNode * parent = nullptr;
|
||||
ListNode * children = nullptr;
|
||||
UInt64 ptr = 0;
|
||||
size_t allocated = 0;
|
||||
};
|
||||
|
||||
struct ListNode
|
||||
{
|
||||
ListNode * next = nullptr;
|
||||
TreeNode * child = nullptr;
|
||||
};
|
||||
|
||||
TreeNode root;
|
||||
|
||||
static ListNode * createChild(TreeNode * parent, UInt64 ptr, Arena * arena)
|
||||
{
|
||||
|
||||
ListNode * list_node = reinterpret_cast<ListNode *>(arena->alloc(sizeof(ListNode)));
|
||||
TreeNode * tree_node = reinterpret_cast<TreeNode *>(arena->alloc(sizeof(TreeNode)));
|
||||
|
||||
list_node->child = tree_node;
|
||||
list_node->next = nullptr;
|
||||
|
||||
tree_node->parent =parent;
|
||||
tree_node->children = nullptr;
|
||||
tree_node->ptr = ptr;
|
||||
tree_node->allocated = 0;
|
||||
|
||||
return list_node;
|
||||
}
|
||||
|
||||
TreeNode * find(const UInt64 * stack, size_t stack_size, Arena * arena)
|
||||
{
|
||||
TreeNode * node = &root;
|
||||
for (size_t i = 0; i < stack_size; ++i)
|
||||
{
|
||||
UInt64 ptr = stack[i];
|
||||
if (ptr == 0)
|
||||
break;
|
||||
|
||||
if (!node->children)
|
||||
{
|
||||
node->children = createChild(node, ptr, arena);
|
||||
node = node->children->child;
|
||||
}
|
||||
else
|
||||
{
|
||||
ListNode * list = node->children;
|
||||
while (list->child->ptr != ptr && list->next)
|
||||
list = list->next;
|
||||
|
||||
if (list->child->ptr != ptr)
|
||||
{
|
||||
list->next = createChild(node, ptr, arena);
|
||||
list = list->next;
|
||||
}
|
||||
|
||||
node = list->child;
|
||||
}
|
||||
}
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
static void append(DB::PaddedPODArray<UInt64> & values, DB::PaddedPODArray<UInt64> & offsets, std::vector<UInt64> & frame)
|
||||
{
|
||||
UInt64 prev = offsets.empty() ? 0 : offsets.back();
|
||||
offsets.push_back(prev + frame.size());
|
||||
for (UInt64 val : frame)
|
||||
values.push_back(val);
|
||||
}
|
||||
|
||||
struct Trace
|
||||
{
|
||||
using Frames = std::vector<UInt64>;
|
||||
|
||||
Frames frames;
|
||||
|
||||
/// The total number of bytes allocated for traces with the same prefix.
|
||||
size_t allocated_total = 0;
|
||||
/// This counter is relevant in case we want to filter some traces with small amount of bytes.
|
||||
/// It shows the total number of bytes for *filtered* traces with the same prefix.
|
||||
/// This is the value which is used in flamegraph.
|
||||
size_t allocated_self = 0;
|
||||
};
|
||||
|
||||
using Traces = std::vector<Trace>;
|
||||
|
||||
Traces dump(size_t max_depth, size_t min_bytes) const
|
||||
{
|
||||
Traces traces;
|
||||
Trace::Frames frames;
|
||||
std::vector<size_t> allocated_total;
|
||||
std::vector<size_t> allocated_self;
|
||||
std::vector<ListNode *> nodes;
|
||||
|
||||
nodes.push_back(root.children);
|
||||
allocated_total.push_back(root.allocated);
|
||||
allocated_self.push_back(root.allocated);
|
||||
|
||||
while (!nodes.empty())
|
||||
{
|
||||
if (nodes.back() == nullptr)
|
||||
{
|
||||
traces.push_back({frames, allocated_total.back(), allocated_self.back()});
|
||||
|
||||
nodes.pop_back();
|
||||
allocated_total.pop_back();
|
||||
allocated_self.pop_back();
|
||||
|
||||
/// We don't have root's frame so framers are empty in the end.
|
||||
if (!frames.empty())
|
||||
frames.pop_back();
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
TreeNode * current = nodes.back()->child;
|
||||
nodes.back() = nodes.back()->next;
|
||||
|
||||
bool enough_bytes = current->allocated >= min_bytes;
|
||||
bool enough_depth = max_depth == 0 || nodes.size() < max_depth;
|
||||
|
||||
if (enough_bytes)
|
||||
{
|
||||
frames.push_back(current->ptr);
|
||||
allocated_self.back() -= current->allocated;
|
||||
|
||||
if (enough_depth)
|
||||
{
|
||||
allocated_total.push_back(current->allocated);
|
||||
allocated_self.push_back(current->allocated);
|
||||
nodes.push_back(current->children);
|
||||
}
|
||||
else
|
||||
{
|
||||
traces.push_back({frames, current->allocated, current->allocated});
|
||||
frames.pop_back();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return traces;
|
||||
}
|
||||
};
|
||||
|
||||
static void insertData(DB::PaddedPODArray<UInt8> & chars, DB::PaddedPODArray<UInt64> & offsets, const char * pos, size_t length)
|
||||
{
|
||||
const size_t old_size = chars.size();
|
||||
const size_t new_size = old_size + length + 1;
|
||||
|
||||
chars.resize(new_size);
|
||||
if (length)
|
||||
memcpy(chars.data() + old_size, pos, length);
|
||||
chars[old_size + length] = 0;
|
||||
offsets.push_back(new_size);
|
||||
}
|
||||
|
||||
/// Split str by line feed and write as separate row to ColumnString.
|
||||
static void fillColumn(DB::PaddedPODArray<UInt8> & chars, DB::PaddedPODArray<UInt64> & offsets, const std::string & str)
|
||||
{
|
||||
size_t start = 0;
|
||||
size_t end = 0;
|
||||
size_t size = str.size();
|
||||
|
||||
while (end < size)
|
||||
{
|
||||
if (str[end] == '\n')
|
||||
{
|
||||
insertData(chars, offsets, str.data() + start, end - start);
|
||||
start = end + 1;
|
||||
}
|
||||
|
||||
++end;
|
||||
}
|
||||
|
||||
if (start < end)
|
||||
insertData(chars, offsets, str.data() + start, end - start);
|
||||
}
|
||||
|
||||
void dumpFlameGraph(
|
||||
const AggregateFunctionFlameGraphTree::Traces & traces,
|
||||
DB::PaddedPODArray<UInt8> & chars,
|
||||
DB::PaddedPODArray<UInt64> & offsets)
|
||||
{
|
||||
DB::WriteBufferFromOwnString out;
|
||||
|
||||
std::unordered_map<uintptr_t, size_t> mapping;
|
||||
|
||||
#if defined(__ELF__) && !defined(OS_FREEBSD)
|
||||
auto symbol_index_ptr = DB::SymbolIndex::instance();
|
||||
const DB::SymbolIndex & symbol_index = *symbol_index_ptr;
|
||||
#endif
|
||||
|
||||
for (const auto & trace : traces)
|
||||
{
|
||||
if (trace.allocated_self == 0)
|
||||
continue;
|
||||
|
||||
for (size_t i = 0; i < trace.frames.size(); ++i)
|
||||
{
|
||||
if (i)
|
||||
out << ";";
|
||||
|
||||
const void * ptr = reinterpret_cast<const void *>(trace.frames[i]);
|
||||
|
||||
#if defined(__ELF__) && !defined(OS_FREEBSD)
|
||||
if (const auto * symbol = symbol_index.findSymbol(ptr))
|
||||
writeString(demangle(symbol->name), out);
|
||||
else
|
||||
DB::writePointerHex(ptr, out);
|
||||
#else
|
||||
DB::writePointerHex(ptr, out);
|
||||
#endif
|
||||
}
|
||||
|
||||
out << ' ' << trace.allocated_self << "\n";
|
||||
}
|
||||
|
||||
fillColumn(chars, offsets, out.str());
|
||||
}
|
||||
|
||||
struct AggregateFunctionFlameGraphData
|
||||
{
|
||||
struct Entry
|
||||
{
|
||||
AggregateFunctionFlameGraphTree::TreeNode * trace;
|
||||
UInt64 size;
|
||||
Entry * next = nullptr;
|
||||
};
|
||||
|
||||
struct Pair
|
||||
{
|
||||
Entry * allocation = nullptr;
|
||||
Entry * deallocation = nullptr;
|
||||
};
|
||||
|
||||
using Entries = HashMap<UInt64, Pair>;
|
||||
|
||||
AggregateFunctionFlameGraphTree tree;
|
||||
Entries entries;
|
||||
Entry * free_list = nullptr;
|
||||
|
||||
Entry * alloc(Arena * arena)
|
||||
{
|
||||
if (free_list)
|
||||
{
|
||||
auto * res = free_list;
|
||||
free_list = free_list->next;
|
||||
return res;
|
||||
}
|
||||
|
||||
return reinterpret_cast<Entry *>(arena->alloc(sizeof(Entry)));
|
||||
}
|
||||
|
||||
void release(Entry * entry)
|
||||
{
|
||||
entry->next = free_list;
|
||||
free_list = entry;
|
||||
}
|
||||
|
||||
static void track(Entry * allocation)
|
||||
{
|
||||
auto * node = allocation->trace;
|
||||
while (node)
|
||||
{
|
||||
node->allocated += allocation->size;
|
||||
node = node->parent;
|
||||
}
|
||||
}
|
||||
|
||||
static void untrack(Entry * allocation)
|
||||
{
|
||||
auto * node = allocation->trace;
|
||||
while (node)
|
||||
{
|
||||
node->allocated -= allocation->size;
|
||||
node = node->parent;
|
||||
}
|
||||
}
|
||||
|
||||
static Entry * tryFindMatchAndRemove(Entry *& list, UInt64 size)
|
||||
{
|
||||
if (!list)
|
||||
return nullptr;
|
||||
|
||||
if (list->size == size)
|
||||
{
|
||||
Entry * entry = list;
|
||||
list = list->next;
|
||||
return entry;
|
||||
}
|
||||
else
|
||||
{
|
||||
Entry * parent = list;
|
||||
while (parent->next && parent->next->size != size)
|
||||
parent = parent->next;
|
||||
|
||||
if (parent->next && parent->next->size == size)
|
||||
{
|
||||
Entry * entry = parent->next;
|
||||
parent->next = entry->next;
|
||||
return entry;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void add(UInt64 ptr, Int64 size, const UInt64 * stack, size_t stack_size, Arena * arena)
|
||||
{
|
||||
/// In case if argument is nullptr, only track allocations.
|
||||
if (ptr == 0)
|
||||
{
|
||||
if (size > 0)
|
||||
{
|
||||
auto * node = tree.find(stack, stack_size, arena);
|
||||
Entry entry{.trace = node, .size = UInt64(size)};
|
||||
track(&entry);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
auto & place = entries[ptr];
|
||||
if (size > 0)
|
||||
{
|
||||
if (auto * deallocation = tryFindMatchAndRemove(place.deallocation, size))
|
||||
{
|
||||
release(deallocation);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto * node = tree.find(stack, stack_size, arena);
|
||||
|
||||
auto * allocation = alloc(arena);
|
||||
allocation->size = UInt64(size);
|
||||
allocation->trace = node;
|
||||
|
||||
track(allocation);
|
||||
|
||||
allocation->next = place.allocation;
|
||||
place.allocation = allocation;
|
||||
}
|
||||
}
|
||||
else if (size < 0)
|
||||
{
|
||||
UInt64 abs_size = -size;
|
||||
if (auto * allocation = tryFindMatchAndRemove(place.allocation, abs_size))
|
||||
{
|
||||
untrack(allocation);
|
||||
release(allocation);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto * deallocation = alloc(arena);
|
||||
deallocation->size = abs_size;
|
||||
|
||||
deallocation->next = place.deallocation;
|
||||
place.deallocation = deallocation;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void merge(const AggregateFunctionFlameGraphTree & other_tree, Arena * arena)
|
||||
{
|
||||
AggregateFunctionFlameGraphTree::Trace::Frames frames;
|
||||
std::vector<AggregateFunctionFlameGraphTree::ListNode *> nodes;
|
||||
|
||||
nodes.push_back(other_tree.root.children);
|
||||
|
||||
while (!nodes.empty())
|
||||
{
|
||||
if (nodes.back() == nullptr)
|
||||
{
|
||||
nodes.pop_back();
|
||||
|
||||
/// We don't have root's frame so framers are empty in the end.
|
||||
if (!frames.empty())
|
||||
frames.pop_back();
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
AggregateFunctionFlameGraphTree::TreeNode * current = nodes.back()->child;
|
||||
nodes.back() = nodes.back()->next;
|
||||
|
||||
frames.push_back(current->ptr);
|
||||
|
||||
if (current->children)
|
||||
nodes.push_back(current->children);
|
||||
else
|
||||
{
|
||||
if (current->allocated)
|
||||
add(0, current->allocated, frames.data(), frames.size(), arena);
|
||||
|
||||
frames.pop_back();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void merge(const AggregateFunctionFlameGraphData & other, Arena * arena)
|
||||
{
|
||||
AggregateFunctionFlameGraphTree::Trace::Frames frames;
|
||||
for (const auto & entry : other.entries)
|
||||
{
|
||||
for (auto * allocation = entry.value.second.allocation; allocation; allocation = allocation->next)
|
||||
{
|
||||
frames.clear();
|
||||
const auto * node = allocation->trace;
|
||||
while (node->ptr)
|
||||
{
|
||||
frames.push_back(node->ptr);
|
||||
node = node->parent;
|
||||
}
|
||||
|
||||
std::reverse(frames.begin(), frames.end());
|
||||
add(entry.value.first, allocation->size, frames.data(), frames.size(), arena);
|
||||
untrack(allocation);
|
||||
}
|
||||
|
||||
for (auto * deallocation = entry.value.second.deallocation; deallocation; deallocation = deallocation->next)
|
||||
{
|
||||
add(entry.value.first, -Int64(deallocation->size), nullptr, 0, arena);
|
||||
}
|
||||
}
|
||||
|
||||
merge(other.tree, arena);
|
||||
}
|
||||
|
||||
void dumpFlameGraph(
|
||||
DB::PaddedPODArray<UInt8> & chars,
|
||||
DB::PaddedPODArray<UInt64> & offsets,
|
||||
size_t max_depth, size_t min_bytes) const
|
||||
{
|
||||
DB::dumpFlameGraph(tree.dump(max_depth, min_bytes), chars, offsets);
|
||||
}
|
||||
};
|
||||
|
||||
/// Aggregate function which builds a flamegraph using the list of stacktraces.
|
||||
/// The output is an array of strings which can be used by flamegraph.pl util.
|
||||
/// See https://github.com/brendangregg/FlameGraph
|
||||
///
|
||||
/// Syntax: flameGraph(traces, [size = 1], [ptr = 0])
|
||||
/// - trace : Array(UInt64), a stacktrace
|
||||
/// - size : Int64, an allocation size (for memory profiling)
|
||||
/// - ptr : UInt64, an allocation address
|
||||
/// In case if ptr != 0, a flameGraph will map allocations (size > 0) and deallocations (size < 0) with the same size and ptr.
|
||||
/// Only allocations which were not freed are shown. Not mapped deallocations are ignored.
|
||||
///
|
||||
/// Usage:
|
||||
///
|
||||
/// * Build a flamegraph based on CPU query profiler
|
||||
/// set query_profiler_cpu_time_period_ns=10000000;
|
||||
/// SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits WHERE SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
/// clickhouse client --allow_introspection_functions=1
|
||||
/// -q "select arrayJoin(flameGraph(arrayReverse(trace))) from system.trace_log where trace_type = 'CPU' and query_id = 'xxx'"
|
||||
/// | ~/dev/FlameGraph/flamegraph.pl > flame_cpu.svg
|
||||
///
|
||||
/// * Build a flamegraph based on memory query profiler, showing all allocations
|
||||
/// set memory_profiler_sample_probability=1, max_untracked_memory=1;
|
||||
/// SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits WHERE SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
/// clickhouse client --allow_introspection_functions=1
|
||||
/// -q "select arrayJoin(flameGraph(trace, size)) from system.trace_log where trace_type = 'MemorySample' and query_id = 'xxx'"
|
||||
/// | ~/dev/FlameGraph/flamegraph.pl --countname=bytes --color=mem > flame_mem.svg
|
||||
///
|
||||
/// * Build a flamegraph based on memory query profiler, showing allocations which were not deallocated in query context
|
||||
/// set memory_profiler_sample_probability=1, max_untracked_memory=1, use_uncompressed_cache=1, merge_tree_max_rows_to_use_cache=100000000000, merge_tree_max_bytes_to_use_cache=1000000000000;
|
||||
/// SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits WHERE SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
/// clickhouse client --allow_introspection_functions=1
|
||||
/// -q "select arrayJoin(flameGraph(trace, size, ptr)) from system.trace_log where trace_type = 'MemorySample' and query_id = 'xxx'"
|
||||
/// | ~/dev/FlameGraph/flamegraph.pl --countname=bytes --color=mem > flame_mem_untracked.svg
|
||||
///
|
||||
/// * Build a flamegraph based on memory query profiler, showing active allocations at the fixed point of time
|
||||
/// set memory_profiler_sample_probability=1, max_untracked_memory=1;
|
||||
/// SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits WHERE SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
/// 1. Memory usage per second
|
||||
/// select event_time, m, formatReadableSize(max(s) as m) from (select event_time, sum(size) over (order by event_time) as s from system.trace_log where query_id = 'xxx' and trace_type = 'MemorySample') group by event_time order by event_time;
|
||||
/// 2. Find a time point with maximal memory usage
|
||||
/// select argMax(event_time, s), max(s) from (select event_time, sum(size) over (order by event_time) as s from system.trace_log where query_id = 'xxx' and trace_type = 'MemorySample');
|
||||
/// 3. Fix active allocations at fixed point of time
|
||||
/// clickhouse client --allow_introspection_functions=1
|
||||
/// -q "select arrayJoin(flameGraph(trace, size, ptr)) from (select * from system.trace_log where trace_type = 'MemorySample' and query_id = 'xxx' and event_time <= 'yyy' order by event_time)"
|
||||
/// | ~/dev/FlameGraph/flamegraph.pl --countname=bytes --color=mem > flame_mem_time_point_pos.svg
|
||||
/// 4. Find deallocations at fixed point of time
|
||||
/// clickhouse client --allow_introspection_functions=1
|
||||
/// -q "select arrayJoin(flameGraph(trace, -size, ptr)) from (select * from system.trace_log where trace_type = 'MemorySample' and query_id = 'xxx' and event_time > 'yyy' order by event_time desc)"
|
||||
/// | ~/dev/FlameGraph/flamegraph.pl --countname=bytes --color=mem > flame_mem_time_point_neg.svg
|
||||
class AggregateFunctionFlameGraph final : public IAggregateFunctionDataHelper<AggregateFunctionFlameGraphData, AggregateFunctionFlameGraph>
|
||||
{
|
||||
public:
|
||||
explicit AggregateFunctionFlameGraph(const DataTypes & argument_types_)
|
||||
: IAggregateFunctionDataHelper<AggregateFunctionFlameGraphData, AggregateFunctionFlameGraph>(argument_types_, {}, createResultType())
|
||||
{}
|
||||
|
||||
String getName() const override { return "flameGraph"; }
|
||||
|
||||
static DataTypePtr createResultType()
|
||||
{
|
||||
return std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>());
|
||||
}
|
||||
|
||||
bool allocatesMemoryInArena() const override { return true; }
|
||||
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override
|
||||
{
|
||||
const auto * trace = typeid_cast<const ColumnArray *>(columns[0]);
|
||||
|
||||
const auto & trace_offsets = trace->getOffsets();
|
||||
const auto & trace_values = typeid_cast<const ColumnUInt64 *>(&trace->getData())->getData();
|
||||
UInt64 prev_offset = 0;
|
||||
if (row_num)
|
||||
prev_offset = trace_offsets[row_num - 1];
|
||||
UInt64 trace_size = trace_offsets[row_num] - prev_offset;
|
||||
|
||||
Int64 allocated = 1;
|
||||
if (argument_types.size() >= 2)
|
||||
{
|
||||
const auto & sizes = typeid_cast<const ColumnInt64 *>(columns[1])->getData();
|
||||
allocated = sizes[row_num];
|
||||
}
|
||||
|
||||
UInt64 ptr = 0;
|
||||
if (argument_types.size() >= 3)
|
||||
{
|
||||
const auto & ptrs = typeid_cast<const ColumnUInt64 *>(columns[2])->getData();
|
||||
ptr = ptrs[row_num];
|
||||
}
|
||||
|
||||
this->data(place).add(ptr, allocated, trace_values.data() + prev_offset, trace_size, arena);
|
||||
}
|
||||
|
||||
void addManyDefaults(
|
||||
AggregateDataPtr __restrict /*place*/,
|
||||
const IColumn ** /*columns*/,
|
||||
size_t /*length*/,
|
||||
Arena * /*arena*/) const override
|
||||
{
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
this->data(place).merge(this->data(rhs), arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict, WriteBuffer &, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Serialization for function flameGraph is not implemented.");
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict, ReadBuffer &, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Deserialization for function flameGraph is not implemented.");
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
auto & array = assert_cast<ColumnArray &>(to);
|
||||
auto & str = assert_cast<ColumnString &>(array.getData());
|
||||
|
||||
this->data(place).dumpFlameGraph(str.getChars(), str.getOffsets(), 0, 0);
|
||||
|
||||
array.getOffsets().push_back(str.size());
|
||||
}
|
||||
};
|
||||
|
||||
static void check(const std::string & name, const DataTypes & argument_types, const Array & params)
|
||||
{
|
||||
assertNoParameters(name, params);
|
||||
|
||||
if (argument_types.empty() || argument_types.size() > 3)
|
||||
throw Exception(
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||
"Aggregate function {} requires 1 to 3 arguments : trace, [size = 1], [ptr = 0]",
|
||||
name);
|
||||
|
||||
auto ptr_type = std::make_shared<DataTypeUInt64>();
|
||||
auto trace_type = std::make_shared<DataTypeArray>(ptr_type);
|
||||
auto size_type = std::make_shared<DataTypeInt64>();
|
||||
|
||||
if (!argument_types[0]->equals(*trace_type))
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"First argument (trace) for function {} must be Array(UInt64), but it has type {}",
|
||||
name, argument_types[0]->getName());
|
||||
|
||||
if (argument_types.size() >= 2 && !argument_types[1]->equals(*size_type))
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Second argument (size) for function {} must be Int64, but it has type {}",
|
||||
name, argument_types[1]->getName());
|
||||
|
||||
if (argument_types.size() >= 3 && !argument_types[2]->equals(*ptr_type))
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Third argument (ptr) for function {} must be UInt64, but it has type {}",
|
||||
name, argument_types[2]->getName());
|
||||
}
|
||||
|
||||
AggregateFunctionPtr createAggregateFunctionFlameGraph(const std::string & name, const DataTypes & argument_types, const Array & params, const Settings * settings)
|
||||
{
|
||||
if (!settings->allow_introspection_functions)
|
||||
throw Exception(ErrorCodes::FUNCTION_NOT_ALLOWED,
|
||||
"Introspection functions are disabled, because setting 'allow_introspection_functions' is set to 0");
|
||||
|
||||
check(name, argument_types, params);
|
||||
return std::make_shared<AggregateFunctionFlameGraph>(argument_types);
|
||||
}
|
||||
|
||||
void registerAggregateFunctionFlameGraph(AggregateFunctionFactory & factory)
|
||||
{
|
||||
AggregateFunctionProperties properties = { .returns_default_when_only_null = true, .is_order_dependent = true };
|
||||
|
||||
factory.registerFunction("flameGraph", { createAggregateFunctionFlameGraph, properties });
|
||||
}
|
||||
|
||||
}
|
@ -40,15 +40,10 @@ inline AggregateFunctionPtr createAggregateFunctionGroupArrayImpl(const DataType
|
||||
return std::make_shared<GroupArrayGeneralImpl<GroupArrayNodeString, Trait>>(argument_type, parameters, std::forward<TArgs>(args)...);
|
||||
|
||||
return std::make_shared<GroupArrayGeneralImpl<GroupArrayNodeGeneral, Trait>>(argument_type, parameters, std::forward<TArgs>(args)...);
|
||||
|
||||
// Link list implementation doesn't show noticeable performance improvement
|
||||
// if (which.idx == TypeIndex::String)
|
||||
// return std::make_shared<GroupArrayGeneralListImpl<GroupArrayListNodeString, Trait>>(argument_type, std::forward<TArgs>(args)...);
|
||||
|
||||
// return std::make_shared<GroupArrayGeneralListImpl<GroupArrayListNodeGeneral, Trait>>(argument_type, std::forward<TArgs>(args)...);
|
||||
}
|
||||
|
||||
|
||||
template <bool Tlast>
|
||||
AggregateFunctionPtr createAggregateFunctionGroupArray(
|
||||
const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
|
||||
{
|
||||
@ -79,9 +74,13 @@ AggregateFunctionPtr createAggregateFunctionGroupArray(
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
if (!limit_size)
|
||||
return createAggregateFunctionGroupArrayImpl<GroupArrayTrait<false, Sampler::NONE>>(argument_types[0], parameters);
|
||||
{
|
||||
if (Tlast)
|
||||
throw Exception("groupArrayLast make sense only with max_elems (groupArrayLast(max_elems)())", ErrorCodes::BAD_ARGUMENTS);
|
||||
return createAggregateFunctionGroupArrayImpl<GroupArrayTrait</* Thas_limit= */ false, Tlast, /* Tsampler= */ Sampler::NONE>>(argument_types[0], parameters);
|
||||
}
|
||||
else
|
||||
return createAggregateFunctionGroupArrayImpl<GroupArrayTrait<true, Sampler::NONE>>(argument_types[0], parameters, max_elems);
|
||||
return createAggregateFunctionGroupArrayImpl<GroupArrayTrait</* Thas_limit= */ true, Tlast, /* Tsampler= */ Sampler::NONE>>(argument_types[0], parameters, max_elems);
|
||||
}
|
||||
|
||||
AggregateFunctionPtr createAggregateFunctionGroupArraySample(
|
||||
@ -114,7 +113,7 @@ AggregateFunctionPtr createAggregateFunctionGroupArraySample(
|
||||
else
|
||||
seed = thread_local_rng();
|
||||
|
||||
return createAggregateFunctionGroupArrayImpl<GroupArrayTrait<true, Sampler::RNG>>(argument_types[0], parameters, max_elems, seed);
|
||||
return createAggregateFunctionGroupArrayImpl<GroupArrayTrait</* Thas_limit= */ true, /* Tlast= */ false, /* Tsampler= */ Sampler::RNG>>(argument_types[0], parameters, max_elems, seed);
|
||||
}
|
||||
|
||||
}
|
||||
@ -124,8 +123,9 @@ void registerAggregateFunctionGroupArray(AggregateFunctionFactory & factory)
|
||||
{
|
||||
AggregateFunctionProperties properties = { .returns_default_when_only_null = false, .is_order_dependent = true };
|
||||
|
||||
factory.registerFunction("groupArray", { createAggregateFunctionGroupArray, properties });
|
||||
factory.registerFunction("groupArray", { createAggregateFunctionGroupArray<false>, properties });
|
||||
factory.registerFunction("groupArraySample", { createAggregateFunctionGroupArraySample, properties });
|
||||
factory.registerFunction("groupArrayLast", { createAggregateFunctionGroupArray<true>, properties });
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -37,24 +37,25 @@ enum class Sampler
|
||||
{
|
||||
NONE,
|
||||
RNG,
|
||||
DETERMINATOR // TODO
|
||||
};
|
||||
|
||||
template <bool Thas_limit, Sampler Tsampler>
|
||||
template <bool Thas_limit, bool Tlast, Sampler Tsampler>
|
||||
struct GroupArrayTrait
|
||||
{
|
||||
static constexpr bool has_limit = Thas_limit;
|
||||
static constexpr bool last = Tlast;
|
||||
static constexpr Sampler sampler = Tsampler;
|
||||
};
|
||||
|
||||
template <typename Trait>
|
||||
static constexpr const char * getNameByTrait()
|
||||
{
|
||||
if (Trait::last)
|
||||
return "groupArrayLast";
|
||||
if (Trait::sampler == Sampler::NONE)
|
||||
return "groupArray";
|
||||
else if (Trait::sampler == Sampler::RNG)
|
||||
return "groupArraySample";
|
||||
// else if (Trait::sampler == Sampler::DETERMINATOR) // TODO
|
||||
|
||||
UNREACHABLE();
|
||||
}
|
||||
@ -100,6 +101,8 @@ struct GroupArrayNumericData<T, false>
|
||||
using Allocator = MixedAlignedArenaAllocator<alignof(T), 4096>;
|
||||
using Array = PODArray<T, 32, Allocator>;
|
||||
|
||||
// For groupArrayLast()
|
||||
size_t total_values = 0;
|
||||
Array value;
|
||||
};
|
||||
|
||||
@ -129,7 +132,7 @@ public:
|
||||
|
||||
String getName() const override { return getNameByTrait<Trait>(); }
|
||||
|
||||
void insert(Data & a, const T & v, Arena * arena) const
|
||||
void insertWithSampler(Data & a, const T & v, Arena * arena) const
|
||||
{
|
||||
++a.total_values;
|
||||
if (a.value.size() < max_elems)
|
||||
@ -151,88 +154,107 @@ public:
|
||||
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override
|
||||
{
|
||||
const auto & row_value = assert_cast<const ColumnVector<T> &>(*columns[0]).getData()[row_num];
|
||||
auto & cur_elems = this->data(place);
|
||||
|
||||
++cur_elems.total_values;
|
||||
|
||||
if constexpr (Trait::sampler == Sampler::NONE)
|
||||
{
|
||||
if (limit_num_elems && this->data(place).value.size() >= max_elems)
|
||||
if (limit_num_elems && cur_elems.value.size() >= max_elems)
|
||||
{
|
||||
if constexpr (Trait::last)
|
||||
cur_elems.value[(cur_elems.total_values - 1) % max_elems] = row_value;
|
||||
return;
|
||||
}
|
||||
|
||||
this->data(place).value.push_back(assert_cast<const ColumnVector<T> &>(*columns[0]).getData()[row_num], arena);
|
||||
cur_elems.value.push_back(row_value, arena);
|
||||
}
|
||||
|
||||
if constexpr (Trait::sampler == Sampler::RNG)
|
||||
{
|
||||
auto & a = this->data(place);
|
||||
++a.total_values;
|
||||
if (a.value.size() < max_elems)
|
||||
a.value.push_back(assert_cast<const ColumnVector<T> &>(*columns[0]).getData()[row_num], arena);
|
||||
if (cur_elems.value.size() < max_elems)
|
||||
cur_elems.value.push_back(row_value, arena);
|
||||
else
|
||||
{
|
||||
UInt64 rnd = a.genRandom(a.total_values);
|
||||
UInt64 rnd = cur_elems.genRandom(cur_elems.total_values);
|
||||
if (rnd < max_elems)
|
||||
a.value[rnd] = assert_cast<const ColumnVector<T> &>(*columns[0]).getData()[row_num];
|
||||
cur_elems.value[rnd] = row_value;
|
||||
}
|
||||
}
|
||||
// TODO
|
||||
// if constexpr (Trait::sampler == Sampler::DETERMINATOR)
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
if constexpr (Trait::sampler == Sampler::NONE)
|
||||
{
|
||||
auto & cur_elems = this->data(place);
|
||||
auto & rhs_elems = this->data(rhs);
|
||||
auto & cur_elems = this->data(place);
|
||||
auto & rhs_elems = this->data(rhs);
|
||||
|
||||
if (!limit_num_elems)
|
||||
if (rhs_elems.value.empty())
|
||||
return;
|
||||
|
||||
if constexpr (Trait::last)
|
||||
mergeNoSamplerLast(cur_elems, rhs_elems, arena);
|
||||
else if constexpr (Trait::sampler == Sampler::NONE)
|
||||
mergeNoSampler(cur_elems, rhs_elems, arena);
|
||||
else if constexpr (Trait::sampler == Sampler::RNG)
|
||||
mergeWithRNGSampler(cur_elems, rhs_elems, arena);
|
||||
}
|
||||
|
||||
void mergeNoSamplerLast(Data & cur_elems, const Data & rhs_elems, Arena * arena) const
|
||||
{
|
||||
UInt64 new_elements = std::min(static_cast<size_t>(max_elems), cur_elems.value.size() + rhs_elems.value.size());
|
||||
cur_elems.value.resize_exact(new_elements, arena);
|
||||
for (auto & value : rhs_elems.value)
|
||||
{
|
||||
cur_elems.value[cur_elems.total_values % max_elems] = value;
|
||||
++cur_elems.total_values;
|
||||
}
|
||||
assert(rhs_elems.total_values >= rhs_elems.value.size());
|
||||
cur_elems.total_values += rhs_elems.total_values - rhs_elems.value.size();
|
||||
}
|
||||
|
||||
void mergeNoSampler(Data & cur_elems, const Data & rhs_elems, Arena * arena) const
|
||||
{
|
||||
if (!limit_num_elems)
|
||||
{
|
||||
if (rhs_elems.value.size())
|
||||
cur_elems.value.insertByOffsets(rhs_elems.value, 0, rhs_elems.value.size(), arena);
|
||||
}
|
||||
else
|
||||
{
|
||||
UInt64 elems_to_insert = std::min(static_cast<size_t>(max_elems) - cur_elems.value.size(), rhs_elems.value.size());
|
||||
if (elems_to_insert)
|
||||
cur_elems.value.insertByOffsets(rhs_elems.value, 0, elems_to_insert, arena);
|
||||
}
|
||||
}
|
||||
|
||||
void mergeWithRNGSampler(Data & cur_elems, const Data & rhs_elems, Arena * arena) const
|
||||
{
|
||||
if (rhs_elems.total_values <= max_elems)
|
||||
{
|
||||
for (size_t i = 0; i < rhs_elems.value.size(); ++i)
|
||||
insertWithSampler(cur_elems, rhs_elems.value[i], arena);
|
||||
}
|
||||
else if (cur_elems.total_values <= max_elems)
|
||||
{
|
||||
decltype(cur_elems.value) from;
|
||||
from.swap(cur_elems.value, arena);
|
||||
cur_elems.value.assign(rhs_elems.value.begin(), rhs_elems.value.end(), arena);
|
||||
cur_elems.total_values = rhs_elems.total_values;
|
||||
for (size_t i = 0; i < from.size(); ++i)
|
||||
insertWithSampler(cur_elems, from[i], arena);
|
||||
}
|
||||
else
|
||||
{
|
||||
cur_elems.randomShuffle();
|
||||
cur_elems.total_values += rhs_elems.total_values;
|
||||
for (size_t i = 0; i < max_elems; ++i)
|
||||
{
|
||||
if (rhs_elems.value.size())
|
||||
cur_elems.value.insertByOffsets(rhs_elems.value, 0, rhs_elems.value.size(), arena);
|
||||
}
|
||||
else
|
||||
{
|
||||
UInt64 elems_to_insert = std::min(static_cast<size_t>(max_elems) - cur_elems.value.size(), rhs_elems.value.size());
|
||||
if (elems_to_insert)
|
||||
cur_elems.value.insertByOffsets(rhs_elems.value, 0, elems_to_insert, arena);
|
||||
UInt64 rnd = cur_elems.genRandom(cur_elems.total_values);
|
||||
if (rnd < rhs_elems.total_values)
|
||||
cur_elems.value[i] = rhs_elems.value[i];
|
||||
}
|
||||
}
|
||||
|
||||
if constexpr (Trait::sampler == Sampler::RNG)
|
||||
{
|
||||
if (this->data(rhs).value.empty()) /// rhs state is empty
|
||||
return;
|
||||
|
||||
auto & a = this->data(place);
|
||||
auto & b = this->data(rhs);
|
||||
|
||||
if (b.total_values <= max_elems)
|
||||
{
|
||||
for (size_t i = 0; i < b.value.size(); ++i)
|
||||
insert(a, b.value[i], arena);
|
||||
}
|
||||
else if (a.total_values <= max_elems)
|
||||
{
|
||||
decltype(a.value) from;
|
||||
from.swap(a.value, arena);
|
||||
a.value.assign(b.value.begin(), b.value.end(), arena);
|
||||
a.total_values = b.total_values;
|
||||
for (size_t i = 0; i < from.size(); ++i)
|
||||
insert(a, from[i], arena);
|
||||
}
|
||||
else
|
||||
{
|
||||
a.randomShuffle();
|
||||
a.total_values += b.total_values;
|
||||
for (size_t i = 0; i < max_elems; ++i)
|
||||
{
|
||||
UInt64 rnd = a.genRandom(a.total_values);
|
||||
if (rnd < b.total_values)
|
||||
a.value[i] = b.value[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO
|
||||
// if constexpr (Trait::sampler == Sampler::DETERMINATOR)
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
@ -242,6 +264,9 @@ public:
|
||||
writeVarUInt(size, buf);
|
||||
buf.write(reinterpret_cast<const char *>(value.data()), size * sizeof(value[0]));
|
||||
|
||||
if constexpr (Trait::last)
|
||||
DB::writeIntBinary<size_t>(this->data(place).total_values, buf);
|
||||
|
||||
if constexpr (Trait::sampler == Sampler::RNG)
|
||||
{
|
||||
DB::writeIntBinary<size_t>(this->data(place).total_values, buf);
|
||||
@ -249,9 +274,6 @@ public:
|
||||
rng_buf << this->data(place).rng;
|
||||
DB::writeStringBinary(rng_buf.str(), buf);
|
||||
}
|
||||
|
||||
// TODO
|
||||
// if constexpr (Trait::sampler == Sampler::DETERMINATOR)
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
@ -267,9 +289,12 @@ public:
|
||||
|
||||
auto & value = this->data(place).value;
|
||||
|
||||
value.resize(size, arena);
|
||||
value.resize_exact(size, arena);
|
||||
buf.readStrict(reinterpret_cast<char *>(value.data()), size * sizeof(value[0]));
|
||||
|
||||
if constexpr (Trait::last)
|
||||
DB::readIntBinary<size_t>(this->data(place).total_values, buf);
|
||||
|
||||
if constexpr (Trait::sampler == Sampler::RNG)
|
||||
{
|
||||
DB::readIntBinary<size_t>(this->data(place).total_values, buf);
|
||||
@ -278,9 +303,6 @@ public:
|
||||
ReadBufferFromString rng_buf(rng_string);
|
||||
rng_buf >> this->data(place).rng;
|
||||
}
|
||||
|
||||
// TODO
|
||||
// if constexpr (Trait::sampler == Sampler::DETERMINATOR)
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
@ -396,6 +418,8 @@ struct GroupArrayGeneralData<Node, false>
|
||||
using Allocator = MixedAlignedArenaAllocator<alignof(Node *), 4096>;
|
||||
using Array = PODArray<Node *, 32, Allocator>;
|
||||
|
||||
// For groupArrayLast()
|
||||
size_t total_values = 0;
|
||||
Array value;
|
||||
};
|
||||
|
||||
@ -430,7 +454,7 @@ public:
|
||||
|
||||
String getName() const override { return getNameByTrait<Trait>(); }
|
||||
|
||||
void insert(Data & a, const Node * v, Arena * arena) const
|
||||
void insertWithSampler(Data & a, const Node * v, Arena * arena) const
|
||||
{
|
||||
++a.total_values;
|
||||
if (a.value.size() < max_elems)
|
||||
@ -452,96 +476,110 @@ public:
|
||||
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override
|
||||
{
|
||||
auto & cur_elems = data(place);
|
||||
|
||||
++cur_elems.total_values;
|
||||
|
||||
if constexpr (Trait::sampler == Sampler::NONE)
|
||||
{
|
||||
if (limit_num_elems && data(place).value.size() >= max_elems)
|
||||
if (limit_num_elems && cur_elems.value.size() >= max_elems)
|
||||
{
|
||||
if (Trait::last)
|
||||
{
|
||||
Node * node = Node::allocate(*columns[0], row_num, arena);
|
||||
cur_elems.value[(cur_elems.total_values - 1) % max_elems] = node;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
Node * node = Node::allocate(*columns[0], row_num, arena);
|
||||
data(place).value.push_back(node, arena);
|
||||
cur_elems.value.push_back(node, arena);
|
||||
}
|
||||
|
||||
if constexpr (Trait::sampler == Sampler::RNG)
|
||||
{
|
||||
auto & a = data(place);
|
||||
++a.total_values;
|
||||
if (a.value.size() < max_elems)
|
||||
a.value.push_back(Node::allocate(*columns[0], row_num, arena), arena);
|
||||
if (cur_elems.value.size() < max_elems)
|
||||
cur_elems.value.push_back(Node::allocate(*columns[0], row_num, arena), arena);
|
||||
else
|
||||
{
|
||||
UInt64 rnd = a.genRandom(a.total_values);
|
||||
UInt64 rnd = cur_elems.genRandom(cur_elems.total_values);
|
||||
if (rnd < max_elems)
|
||||
a.value[rnd] = Node::allocate(*columns[0], row_num, arena);
|
||||
cur_elems.value[rnd] = Node::allocate(*columns[0], row_num, arena);
|
||||
}
|
||||
}
|
||||
// TODO
|
||||
// if constexpr (Trait::sampler == Sampler::DETERMINATOR)
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
if constexpr (Trait::sampler == Sampler::NONE)
|
||||
mergeNoSampler(place, rhs, arena);
|
||||
else if constexpr (Trait::sampler == Sampler::RNG)
|
||||
mergeWithRNGSampler(place, rhs, arena);
|
||||
// TODO
|
||||
// else if constexpr (Trait::sampler == Sampler::DETERMINATOR)
|
||||
}
|
||||
auto & cur_elems = data(place);
|
||||
auto & rhs_elems = data(rhs);
|
||||
|
||||
void ALWAYS_INLINE mergeNoSampler(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const
|
||||
{
|
||||
if (data(rhs).value.empty()) /// rhs state is empty
|
||||
if (rhs_elems.value.empty())
|
||||
return;
|
||||
|
||||
if constexpr (Trait::last)
|
||||
mergeNoSamplerLast(cur_elems, rhs_elems, arena);
|
||||
else if constexpr (Trait::sampler == Sampler::NONE)
|
||||
mergeNoSampler(cur_elems, rhs_elems, arena);
|
||||
else if constexpr (Trait::sampler == Sampler::RNG)
|
||||
mergeWithRNGSampler(cur_elems, rhs_elems, arena);
|
||||
}
|
||||
|
||||
void ALWAYS_INLINE mergeNoSamplerLast(Data & cur_elems, const Data & rhs_elems, Arena * arena) const
|
||||
{
|
||||
UInt64 new_elements = std::min(static_cast<size_t>(max_elems), cur_elems.value.size() + rhs_elems.value.size());
|
||||
cur_elems.value.resize_exact(new_elements, arena);
|
||||
for (auto & value : rhs_elems.value)
|
||||
{
|
||||
cur_elems.value[cur_elems.total_values % max_elems] = value->clone(arena);
|
||||
++cur_elems.total_values;
|
||||
}
|
||||
assert(rhs_elems.total_values >= rhs_elems.value.size());
|
||||
cur_elems.total_values += rhs_elems.total_values - rhs_elems.value.size();
|
||||
}
|
||||
|
||||
void ALWAYS_INLINE mergeNoSampler(Data & cur_elems, const Data & rhs_elems, Arena * arena) const
|
||||
{
|
||||
UInt64 new_elems;
|
||||
if (limit_num_elems)
|
||||
{
|
||||
if (data(place).value.size() >= max_elems)
|
||||
if (cur_elems.value.size() >= max_elems)
|
||||
return;
|
||||
|
||||
new_elems = std::min(data(rhs).value.size(), static_cast<size_t>(max_elems) - data(place).value.size());
|
||||
new_elems = std::min(rhs_elems.value.size(), static_cast<size_t>(max_elems) - cur_elems.value.size());
|
||||
}
|
||||
else
|
||||
new_elems = data(rhs).value.size();
|
||||
new_elems = rhs_elems.value.size();
|
||||
|
||||
auto & a = data(place).value;
|
||||
auto & b = data(rhs).value;
|
||||
for (UInt64 i = 0; i < new_elems; ++i)
|
||||
a.push_back(b[i]->clone(arena), arena);
|
||||
cur_elems.value.push_back(rhs_elems.value[i]->clone(arena), arena);
|
||||
}
|
||||
|
||||
void ALWAYS_INLINE mergeWithRNGSampler(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const
|
||||
void ALWAYS_INLINE mergeWithRNGSampler(Data & cur_elems, const Data & rhs_elems, Arena * arena) const
|
||||
{
|
||||
if (data(rhs).value.empty()) /// rhs state is empty
|
||||
return;
|
||||
|
||||
auto & a = data(place);
|
||||
auto & b = data(rhs);
|
||||
|
||||
if (b.total_values <= max_elems)
|
||||
if (rhs_elems.total_values <= max_elems)
|
||||
{
|
||||
for (size_t i = 0; i < b.value.size(); ++i)
|
||||
insert(a, b.value[i], arena);
|
||||
for (size_t i = 0; i < rhs_elems.value.size(); ++i)
|
||||
insertWithSampler(cur_elems, rhs_elems.value[i], arena);
|
||||
}
|
||||
else if (a.total_values <= max_elems)
|
||||
else if (cur_elems.total_values <= max_elems)
|
||||
{
|
||||
decltype(a.value) from;
|
||||
from.swap(a.value, arena);
|
||||
for (auto & node : b.value)
|
||||
a.value.push_back(node->clone(arena), arena);
|
||||
a.total_values = b.total_values;
|
||||
decltype(cur_elems.value) from;
|
||||
from.swap(cur_elems.value, arena);
|
||||
for (auto & node : rhs_elems.value)
|
||||
cur_elems.value.push_back(node->clone(arena), arena);
|
||||
cur_elems.total_values = rhs_elems.total_values;
|
||||
for (size_t i = 0; i < from.size(); ++i)
|
||||
insert(a, from[i], arena);
|
||||
insertWithSampler(cur_elems, from[i], arena);
|
||||
}
|
||||
else
|
||||
{
|
||||
a.randomShuffle();
|
||||
a.total_values += b.total_values;
|
||||
cur_elems.randomShuffle();
|
||||
cur_elems.total_values += rhs_elems.total_values;
|
||||
for (size_t i = 0; i < max_elems; ++i)
|
||||
{
|
||||
UInt64 rnd = a.genRandom(a.total_values);
|
||||
if (rnd < b.total_values)
|
||||
a.value[i] = b.value[i]->clone(arena);
|
||||
UInt64 rnd = cur_elems.genRandom(cur_elems.total_values);
|
||||
if (rnd < rhs_elems.total_values)
|
||||
cur_elems.value[i] = rhs_elems.value[i]->clone(arena);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -554,6 +592,9 @@ public:
|
||||
for (auto & node : value)
|
||||
node->write(buf);
|
||||
|
||||
if constexpr (Trait::last)
|
||||
DB::writeIntBinary<size_t>(data(place).total_values, buf);
|
||||
|
||||
if constexpr (Trait::sampler == Sampler::RNG)
|
||||
{
|
||||
DB::writeIntBinary<size_t>(data(place).total_values, buf);
|
||||
@ -561,9 +602,6 @@ public:
|
||||
rng_buf << data(place).rng;
|
||||
DB::writeStringBinary(rng_buf.str(), buf);
|
||||
}
|
||||
|
||||
// TODO
|
||||
// if constexpr (Trait::sampler == Sampler::DETERMINATOR)
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
@ -582,10 +620,13 @@ public:
|
||||
|
||||
auto & value = data(place).value;
|
||||
|
||||
value.resize(elems, arena);
|
||||
value.resize_exact(elems, arena);
|
||||
for (UInt64 i = 0; i < elems; ++i)
|
||||
value[i] = Node::read(buf, arena);
|
||||
|
||||
if constexpr (Trait::last)
|
||||
DB::readIntBinary<size_t>(data(place).total_values, buf);
|
||||
|
||||
if constexpr (Trait::sampler == Sampler::RNG)
|
||||
{
|
||||
DB::readIntBinary<size_t>(data(place).total_values, buf);
|
||||
@ -594,9 +635,6 @@ public:
|
||||
ReadBufferFromString rng_buf(rng_string);
|
||||
rng_buf >> data(place).rng;
|
||||
}
|
||||
|
||||
// TODO
|
||||
// if constexpr (Trait::sampler == Sampler::DETERMINATOR)
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
@ -622,222 +660,6 @@ public:
|
||||
bool allocatesMemoryInArena() const override { return true; }
|
||||
};
|
||||
|
||||
template <typename Node>
|
||||
struct GroupArrayListNodeBase : public GroupArrayNodeBase<Node>
|
||||
{
|
||||
Node * next;
|
||||
};
|
||||
|
||||
struct GroupArrayListNodeString : public GroupArrayListNodeBase<GroupArrayListNodeString>
|
||||
{
|
||||
using Node = GroupArrayListNodeString;
|
||||
|
||||
/// Create node from string
|
||||
static Node * allocate(const IColumn & column, size_t row_num, Arena * arena)
|
||||
{
|
||||
StringRef string = assert_cast<const ColumnString &>(column).getDataAt(row_num);
|
||||
|
||||
Node * node = reinterpret_cast<Node *>(arena->alignedAlloc(sizeof(Node) + string.size, alignof(Node)));
|
||||
node->next = nullptr;
|
||||
node->size = string.size;
|
||||
memcpy(node->data(), string.data, string.size);
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
void insertInto(IColumn & column) { assert_cast<ColumnString &>(column).insertData(data(), size); }
|
||||
};
|
||||
|
||||
struct GroupArrayListNodeGeneral : public GroupArrayListNodeBase<GroupArrayListNodeGeneral>
|
||||
{
|
||||
using Node = GroupArrayListNodeGeneral;
|
||||
|
||||
static Node * allocate(const IColumn & column, size_t row_num, Arena * arena)
|
||||
{
|
||||
const char * begin = arena->alignedAlloc(sizeof(Node), alignof(Node));
|
||||
StringRef value = column.serializeValueIntoArena(row_num, *arena, begin);
|
||||
|
||||
Node * node = reinterpret_cast<Node *>(const_cast<char *>(begin));
|
||||
node->next = nullptr;
|
||||
node->size = value.size;
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
void insertInto(IColumn & column) { column.deserializeAndInsertFromArena(data()); }
|
||||
};
|
||||
|
||||
|
||||
template <typename Node>
|
||||
struct GroupArrayGeneralListData
|
||||
{
|
||||
UInt64 elems = 0;
|
||||
Node * first = nullptr;
|
||||
Node * last = nullptr;
|
||||
};
|
||||
|
||||
|
||||
/// Implementation of groupArray for String or any ComplexObject via linked list
|
||||
/// It has poor performance in case of many small objects
|
||||
template <typename Node, typename Trait>
|
||||
class GroupArrayGeneralListImpl final
|
||||
: public IAggregateFunctionDataHelper<GroupArrayGeneralListData<Node>, GroupArrayGeneralListImpl<Node, Trait>>
|
||||
{
|
||||
static constexpr bool limit_num_elems = Trait::has_limit;
|
||||
using Data = GroupArrayGeneralListData<Node>;
|
||||
static Data & data(AggregateDataPtr __restrict place) { return *reinterpret_cast<Data *>(place); }
|
||||
static const Data & data(ConstAggregateDataPtr __restrict place) { return *reinterpret_cast<const Data *>(place); }
|
||||
|
||||
DataTypePtr & data_type;
|
||||
UInt64 max_elems;
|
||||
|
||||
public:
|
||||
GroupArrayGeneralListImpl(const DataTypePtr & data_type_, const Array & parameters_, UInt64 max_elems_ = std::numeric_limits<UInt64>::max())
|
||||
: IAggregateFunctionDataHelper<GroupArrayGeneralListData<Node>, GroupArrayGeneralListImpl<Node, Trait>>({data_type_}, parameters_, std::make_shared<DataTypeArray>(data_type_))
|
||||
, data_type(this->argument_types[0])
|
||||
, max_elems(max_elems_)
|
||||
{
|
||||
}
|
||||
|
||||
String getName() const override { return getNameByTrait<Trait>(); }
|
||||
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override
|
||||
{
|
||||
if (limit_num_elems && data(place).elems >= max_elems)
|
||||
return;
|
||||
|
||||
Node * node = Node::allocate(*columns[0], row_num, arena);
|
||||
|
||||
if (unlikely(!data(place).first))
|
||||
{
|
||||
data(place).first = node;
|
||||
data(place).last = node;
|
||||
}
|
||||
else
|
||||
{
|
||||
data(place).last->next = node;
|
||||
data(place).last = node;
|
||||
}
|
||||
|
||||
++data(place).elems;
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
/// It is sadly, but rhs's Arena could be destroyed
|
||||
|
||||
if (!data(rhs).first) /// rhs state is empty
|
||||
return;
|
||||
|
||||
UInt64 new_elems;
|
||||
UInt64 cur_elems = data(place).elems;
|
||||
if (limit_num_elems)
|
||||
{
|
||||
if (data(place).elems >= max_elems)
|
||||
return;
|
||||
|
||||
new_elems = std::min(data(place).elems + data(rhs).elems, static_cast<size_t>(max_elems));
|
||||
}
|
||||
else
|
||||
{
|
||||
new_elems = data(place).elems + data(rhs).elems;
|
||||
}
|
||||
|
||||
Node * p_rhs = data(rhs).first;
|
||||
Node * p_lhs;
|
||||
|
||||
if (unlikely(!data(place).last)) /// lhs state is empty
|
||||
{
|
||||
p_lhs = p_rhs->clone(arena);
|
||||
data(place).first = data(place).last = p_lhs;
|
||||
p_rhs = p_rhs->next;
|
||||
++cur_elems;
|
||||
}
|
||||
else
|
||||
{
|
||||
p_lhs = data(place).last;
|
||||
}
|
||||
|
||||
for (; cur_elems < new_elems; ++cur_elems)
|
||||
{
|
||||
Node * p_new = p_rhs->clone(arena);
|
||||
p_lhs->next = p_new;
|
||||
p_rhs = p_rhs->next;
|
||||
p_lhs = p_new;
|
||||
}
|
||||
|
||||
p_lhs->next = nullptr;
|
||||
data(place).last = p_lhs;
|
||||
data(place).elems = new_elems;
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
{
|
||||
writeVarUInt(data(place).elems, buf);
|
||||
|
||||
Node * p = data(place).first;
|
||||
while (p)
|
||||
{
|
||||
p->write(buf);
|
||||
p = p->next;
|
||||
}
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena * arena) const override
|
||||
{
|
||||
UInt64 elems;
|
||||
readVarUInt(elems, buf);
|
||||
data(place).elems = elems;
|
||||
|
||||
if (unlikely(elems == 0))
|
||||
return;
|
||||
|
||||
if (unlikely(elems > AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE))
|
||||
throw Exception("Too large array size", ErrorCodes::TOO_LARGE_ARRAY_SIZE);
|
||||
|
||||
if (limit_num_elems && unlikely(elems > max_elems))
|
||||
throw Exception("Too large array size, it should not exceed " + toString(max_elems), ErrorCodes::TOO_LARGE_ARRAY_SIZE);
|
||||
|
||||
Node * prev = Node::read(buf, arena);
|
||||
data(place).first = prev;
|
||||
|
||||
for (UInt64 i = 1; i < elems; ++i)
|
||||
{
|
||||
Node * cur = Node::read(buf, arena);
|
||||
prev->next = cur;
|
||||
prev = cur;
|
||||
}
|
||||
|
||||
prev->next = nullptr;
|
||||
data(place).last = prev;
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
auto & column_array = assert_cast<ColumnArray &>(to);
|
||||
|
||||
auto & offsets = column_array.getOffsets();
|
||||
offsets.push_back(offsets.back() + data(place).elems);
|
||||
|
||||
auto & column_data = column_array.getData();
|
||||
|
||||
if (std::is_same_v<Node, GroupArrayListNodeString>)
|
||||
{
|
||||
auto & string_offsets = assert_cast<ColumnString &>(column_data).getOffsets();
|
||||
string_offsets.reserve(string_offsets.size() + data(place).elems);
|
||||
}
|
||||
|
||||
Node * p = data(place).first;
|
||||
while (p)
|
||||
{
|
||||
p->insertInto(column_data);
|
||||
p = p->next;
|
||||
}
|
||||
}
|
||||
|
||||
bool allocatesMemoryInArena() const override { return true; }
|
||||
};
|
||||
|
||||
#undef AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE
|
||||
|
||||
}
|
||||
|
@ -468,6 +468,7 @@ public:
|
||||
bool keepKey(const T &) const { return true; }
|
||||
};
|
||||
|
||||
|
||||
template <typename T, bool overflow, bool tuple_argument>
|
||||
class AggregateFunctionSumMapFiltered final :
|
||||
public AggregateFunctionMapBase<T,
|
||||
@ -502,6 +503,8 @@ public:
|
||||
"Aggregate function {} requires an Array as a parameter",
|
||||
getNameImpl());
|
||||
|
||||
this->parameters = params_;
|
||||
|
||||
keys_to_keep.reserve(keys_to_keep_values.size());
|
||||
|
||||
for (const Field & f : keys_to_keep_values)
|
||||
@ -509,7 +512,16 @@ public:
|
||||
}
|
||||
|
||||
static String getNameImpl()
|
||||
{ return overflow ? "sumMapFilteredWithOverflow" : "sumMapFiltered"; }
|
||||
{
|
||||
if constexpr (overflow)
|
||||
{
|
||||
return "sumMapFilteredWithOverflow";
|
||||
}
|
||||
else
|
||||
{
|
||||
return "sumMapFiltered";
|
||||
}
|
||||
}
|
||||
|
||||
bool keepKey(const T & key) const { return keys_to_keep.count(key); }
|
||||
};
|
||||
|
@ -73,7 +73,6 @@ void registerAggregateFunctionExponentialMovingAverage(AggregateFunctionFactory
|
||||
void registerAggregateFunctionSparkbar(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionIntervalLengthSum(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionAnalysisOfVariance(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionFlameGraph(AggregateFunctionFactory &);
|
||||
|
||||
class AggregateFunctionCombinatorFactory;
|
||||
void registerAggregateFunctionCombinatorIf(AggregateFunctionCombinatorFactory &);
|
||||
@ -159,7 +158,6 @@ void registerAggregateFunctions()
|
||||
registerAggregateFunctionExponentialMovingAverage(factory);
|
||||
registerAggregateFunctionSparkbar(factory);
|
||||
registerAggregateFunctionAnalysisOfVariance(factory);
|
||||
registerAggregateFunctionFlameGraph(factory);
|
||||
|
||||
registerWindowFunctions(factory);
|
||||
}
|
||||
|
@ -4307,6 +4307,8 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
||||
}
|
||||
}
|
||||
|
||||
const auto & settings = scope.context->getSettingsRef();
|
||||
|
||||
if (function_node.isWindowFunction())
|
||||
{
|
||||
if (!AggregateFunctionFactory::instance().isAggregateFunctionName(function_name))
|
||||
@ -4324,8 +4326,12 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
||||
"Window function '{}' does not support lambda arguments",
|
||||
function_name);
|
||||
|
||||
bool need_add_or_null = settings.aggregate_functions_null_for_empty && !function_name.ends_with("OrNull");
|
||||
|
||||
AggregateFunctionProperties properties;
|
||||
auto aggregate_function = AggregateFunctionFactory::instance().get(function_name, argument_types, parameters, properties);
|
||||
auto aggregate_function = need_add_or_null
|
||||
? AggregateFunctionFactory::instance().get(function_name + "OrNull", argument_types, parameters, properties)
|
||||
: AggregateFunctionFactory::instance().get(function_name, argument_types, parameters, properties);
|
||||
|
||||
function_node.resolveAsWindowFunction(aggregate_function);
|
||||
|
||||
@ -4384,8 +4390,12 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
||||
"Aggregate function '{}' does not support lambda arguments",
|
||||
function_name);
|
||||
|
||||
bool need_add_or_null = settings.aggregate_functions_null_for_empty && !function_name.ends_with("OrNull");
|
||||
|
||||
AggregateFunctionProperties properties;
|
||||
auto aggregate_function = AggregateFunctionFactory::instance().get(function_name, argument_types, parameters, properties);
|
||||
auto aggregate_function = need_add_or_null
|
||||
? AggregateFunctionFactory::instance().get(function_name + "OrNull", argument_types, parameters, properties)
|
||||
: AggregateFunctionFactory::instance().get(function_name, argument_types, parameters, properties);
|
||||
function_node.resolveAsAggregateFunction(aggregate_function);
|
||||
return result_projection_names;
|
||||
}
|
||||
|
@ -188,15 +188,6 @@ std::optional<FileInfo> BackupCoordinationLocal::getFileInfo(const SizeAndChecks
|
||||
return it->second;
|
||||
}
|
||||
|
||||
std::optional<SizeAndChecksum> BackupCoordinationLocal::getFileSizeAndChecksum(const String & file_name) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
auto it = file_names.find(file_name);
|
||||
if (it == file_names.end())
|
||||
return std::nullopt;
|
||||
return it->second;
|
||||
}
|
||||
|
||||
String BackupCoordinationLocal::getNextArchiveSuffix()
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
|
@ -48,7 +48,6 @@ public:
|
||||
|
||||
std::optional<FileInfo> getFileInfo(const String & file_name) const override;
|
||||
std::optional<FileInfo> getFileInfo(const SizeAndChecksum & size_and_checksum) const override;
|
||||
std::optional<SizeAndChecksum> getFileSizeAndChecksum(const String & file_name) const override;
|
||||
|
||||
String getNextArchiveSuffix() override;
|
||||
Strings getAllArchiveSuffixes() const override;
|
||||
|
@ -575,15 +575,6 @@ std::optional<FileInfo> BackupCoordinationRemote::getFileInfo(const SizeAndCheck
|
||||
return deserializeFileInfo(file_info_str);
|
||||
}
|
||||
|
||||
std::optional<SizeAndChecksum> BackupCoordinationRemote::getFileSizeAndChecksum(const String & file_name) const
|
||||
{
|
||||
auto zk = getZooKeeper();
|
||||
String size_and_checksum;
|
||||
if (!zk->tryGet(zookeeper_path + "/file_names/" + escapeForFileName(file_name), size_and_checksum))
|
||||
return std::nullopt;
|
||||
return deserializeSizeAndChecksum(size_and_checksum);
|
||||
}
|
||||
|
||||
String BackupCoordinationRemote::getNextArchiveSuffix()
|
||||
{
|
||||
auto zk = getZooKeeper();
|
||||
|
@ -51,7 +51,6 @@ public:
|
||||
bool hasFiles(const String & directory) const override;
|
||||
std::optional<FileInfo> getFileInfo(const String & file_name) const override;
|
||||
std::optional<FileInfo> getFileInfo(const SizeAndChecksum & size_and_checksum) const override;
|
||||
std::optional<SizeAndChecksum> getFileSizeAndChecksum(const String & file_name) const override;
|
||||
|
||||
String getNextArchiveSuffix() override;
|
||||
Strings getAllArchiveSuffixes() const override;
|
||||
|
@ -34,6 +34,7 @@ public:
|
||||
bool is_internal_backup = false;
|
||||
std::shared_ptr<IBackupCoordination> backup_coordination;
|
||||
std::optional<UUID> backup_uuid;
|
||||
bool deduplicate_files = true;
|
||||
};
|
||||
|
||||
static BackupFactory & instance();
|
||||
|
@ -80,6 +80,12 @@ namespace
|
||||
throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
||||
return outcome.GetResult().GetContents();
|
||||
}
|
||||
|
||||
bool isNotFoundError(Aws::S3::S3Errors error)
|
||||
{
|
||||
return error == Aws::S3::S3Errors::RESOURCE_NOT_FOUND
|
||||
|| error == Aws::S3::S3Errors::NO_SUCH_KEY;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -370,7 +376,7 @@ void BackupWriterS3::removeFile(const String & file_name)
|
||||
request.SetBucket(s3_uri.bucket);
|
||||
request.SetKey(fs::path(s3_uri.key) / file_name);
|
||||
auto outcome = client->DeleteObject(request);
|
||||
if (!outcome.IsSuccess())
|
||||
if (!outcome.IsSuccess() && !isNotFoundError(outcome.GetError().GetErrorType()))
|
||||
throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
||||
}
|
||||
|
||||
@ -428,7 +434,7 @@ void BackupWriterS3::removeFilesBatch(const Strings & file_names)
|
||||
request.SetDelete(delkeys);
|
||||
|
||||
auto outcome = client->DeleteObjects(request);
|
||||
if (!outcome.IsSuccess())
|
||||
if (!outcome.IsSuccess() && !isNotFoundError(outcome.GetError().GetErrorType()))
|
||||
throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
||||
}
|
||||
}
|
||||
|
@ -167,17 +167,19 @@ BackupImpl::BackupImpl(
|
||||
const ContextPtr & context_,
|
||||
bool is_internal_backup_,
|
||||
const std::shared_ptr<IBackupCoordination> & coordination_,
|
||||
const std::optional<UUID> & backup_uuid_)
|
||||
const std::optional<UUID> & backup_uuid_,
|
||||
bool deduplicate_files_)
|
||||
: backup_name_for_logging(backup_name_for_logging_)
|
||||
, archive_params(archive_params_)
|
||||
, use_archives(!archive_params.archive_name.empty())
|
||||
, open_mode(OpenMode::WRITE)
|
||||
, writer(std::move(writer_))
|
||||
, is_internal_backup(is_internal_backup_)
|
||||
, coordination(coordination_ ? coordination_ : std::make_shared<BackupCoordinationLocal>())
|
||||
, coordination(coordination_)
|
||||
, uuid(backup_uuid_)
|
||||
, version(CURRENT_BACKUP_VERSION)
|
||||
, base_backup_info(base_backup_info_)
|
||||
, deduplicate_files(deduplicate_files_)
|
||||
, log(&Poco::Logger::get("BackupImpl"))
|
||||
{
|
||||
open(context_);
|
||||
@ -287,6 +289,7 @@ void BackupImpl::writeBackupMetadata()
|
||||
|
||||
Poco::AutoPtr<Poco::Util::XMLConfiguration> config{new Poco::Util::XMLConfiguration()};
|
||||
config->setInt("version", CURRENT_BACKUP_VERSION);
|
||||
config->setBool("deduplicate_files", deduplicate_files);
|
||||
config->setString("timestamp", toString(LocalDateTime{timestamp}));
|
||||
config->setString("uuid", toString(*uuid));
|
||||
|
||||
@ -759,7 +762,7 @@ void BackupImpl::writeFile(const String & file_name, BackupEntryPtr entry)
|
||||
};
|
||||
|
||||
/// Empty file, nothing to backup
|
||||
if (info.size == 0)
|
||||
if (info.size == 0 && deduplicate_files)
|
||||
{
|
||||
coordination->addFileInfo(info);
|
||||
return;
|
||||
@ -828,7 +831,7 @@ void BackupImpl::writeFile(const String & file_name, BackupEntryPtr entry)
|
||||
}
|
||||
|
||||
/// Maybe we have a copy of this file in the backup already.
|
||||
if (coordination->getFileInfo(std::pair{info.size, info.checksum}))
|
||||
if (coordination->getFileInfo(std::pair{info.size, info.checksum}) && deduplicate_files)
|
||||
{
|
||||
LOG_TRACE(log, "File {} already exist in current backup, adding reference", adjusted_path);
|
||||
coordination->addFileInfo(info);
|
||||
@ -861,7 +864,7 @@ void BackupImpl::writeFile(const String & file_name, BackupEntryPtr entry)
|
||||
|
||||
bool is_data_file_required;
|
||||
coordination->addFileInfo(info, is_data_file_required);
|
||||
if (!is_data_file_required)
|
||||
if (!is_data_file_required && deduplicate_files)
|
||||
{
|
||||
LOG_TRACE(log, "File {} doesn't exist in current backup, but we have file with same size and checksum", adjusted_path);
|
||||
return; /// We copy data only if it's a new combination of size & checksum.
|
||||
|
@ -47,9 +47,10 @@ public:
|
||||
const std::optional<BackupInfo> & base_backup_info_,
|
||||
std::shared_ptr<IBackupWriter> writer_,
|
||||
const ContextPtr & context_,
|
||||
bool is_internal_backup_ = false,
|
||||
const std::shared_ptr<IBackupCoordination> & coordination_ = {},
|
||||
const std::optional<UUID> & backup_uuid_ = {});
|
||||
bool is_internal_backup_,
|
||||
const std::shared_ptr<IBackupCoordination> & coordination_,
|
||||
const std::optional<UUID> & backup_uuid_,
|
||||
bool deduplicate_files_);
|
||||
|
||||
~BackupImpl() override;
|
||||
|
||||
@ -132,6 +133,7 @@ private:
|
||||
String lock_file_name;
|
||||
std::atomic<size_t> num_files_written = 0;
|
||||
bool writing_finalized = false;
|
||||
bool deduplicate_files = true;
|
||||
const Poco::Logger * log;
|
||||
};
|
||||
|
||||
|
@ -65,6 +65,7 @@ namespace
|
||||
M(String, password) \
|
||||
M(Bool, structure_only) \
|
||||
M(Bool, async) \
|
||||
M(Bool, deduplicate_files) \
|
||||
M(UInt64, shard_num) \
|
||||
M(UInt64, replica_num) \
|
||||
M(Bool, internal) \
|
||||
|
@ -32,6 +32,9 @@ struct BackupSettings
|
||||
/// Whether the BACKUP command must return immediately without waiting until the backup has completed.
|
||||
bool async = false;
|
||||
|
||||
/// Whether the BACKUP will omit similar files (within one backup only).
|
||||
bool deduplicate_files = true;
|
||||
|
||||
/// 1-based shard index to store in the backup. 0 means all shards.
|
||||
/// Can only be used with BACKUP ON CLUSTER.
|
||||
size_t shard_num = 0;
|
||||
|
@ -286,6 +286,7 @@ void BackupsWorker::doBackup(
|
||||
backup_create_params.is_internal_backup = backup_settings.internal;
|
||||
backup_create_params.backup_coordination = backup_coordination;
|
||||
backup_create_params.backup_uuid = backup_settings.backup_uuid;
|
||||
backup_create_params.deduplicate_files = backup_settings.deduplicate_files;
|
||||
BackupMutablePtr backup = BackupFactory::instance().createBackup(backup_create_params);
|
||||
|
||||
/// Write the backup.
|
||||
|
@ -108,7 +108,6 @@ public:
|
||||
|
||||
virtual std::optional<FileInfo> getFileInfo(const String & file_name) const = 0;
|
||||
virtual std::optional<FileInfo> getFileInfo(const SizeAndChecksum & size_and_checksum) const = 0;
|
||||
virtual std::optional<SizeAndChecksum> getFileSizeAndChecksum(const String & file_name) const = 0;
|
||||
|
||||
/// Generates a new archive suffix, e.g. "001", "002", "003", ...
|
||||
virtual String getNextArchiveSuffix() = 0;
|
||||
|
@ -116,7 +116,16 @@ void registerBackupEngineS3(BackupFactory & factory)
|
||||
else
|
||||
{
|
||||
auto writer = std::make_shared<BackupWriterS3>(S3::URI{s3_uri}, access_key_id, secret_access_key, params.context);
|
||||
return std::make_unique<BackupImpl>(backup_name_for_logging, archive_params, params.base_backup_info, writer, params.context, params.is_internal_backup, params.backup_coordination, params.backup_uuid);
|
||||
return std::make_unique<BackupImpl>(
|
||||
backup_name_for_logging,
|
||||
archive_params,
|
||||
params.base_backup_info,
|
||||
writer,
|
||||
params.context,
|
||||
params.is_internal_backup,
|
||||
params.backup_coordination,
|
||||
params.backup_uuid,
|
||||
params.deduplicate_files);
|
||||
}
|
||||
#else
|
||||
throw Exception("S3 support is disabled", ErrorCodes::SUPPORT_IS_DISABLED);
|
||||
|
@ -181,7 +181,16 @@ void registerBackupEnginesFileAndDisk(BackupFactory & factory)
|
||||
writer = std::make_shared<BackupWriterFile>(path);
|
||||
else
|
||||
writer = std::make_shared<BackupWriterDisk>(disk, path);
|
||||
return std::make_unique<BackupImpl>(backup_name_for_logging, archive_params, params.base_backup_info, writer, params.context, params.is_internal_backup, params.backup_coordination, params.backup_uuid);
|
||||
return std::make_unique<BackupImpl>(
|
||||
backup_name_for_logging,
|
||||
archive_params,
|
||||
params.base_backup_info,
|
||||
writer,
|
||||
params.context,
|
||||
params.is_internal_backup,
|
||||
params.backup_coordination,
|
||||
params.backup_uuid,
|
||||
params.deduplicate_files);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -61,14 +61,8 @@ namespace
|
||||
Poco::Net::SocketAddress socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, Poco::Logger * log)
|
||||
{
|
||||
auto address = makeSocketAddress(host, port, log);
|
||||
#if POCO_VERSION < 0x01080000
|
||||
socket.bind(address, /* reuseAddress = */ true);
|
||||
#else
|
||||
socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ false);
|
||||
#endif
|
||||
|
||||
socket.listen(/* backlog = */ 64);
|
||||
|
||||
return address;
|
||||
}
|
||||
}
|
||||
|
@ -2,11 +2,10 @@
|
||||
|
||||
#include <IO/ReadWriteBufferFromHTTP.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <Poco/Net/HTTPRequest.h>
|
||||
#include <Poco/URI.h>
|
||||
#include <filesystem>
|
||||
#include <thread>
|
||||
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
@ -97,9 +96,13 @@ std::unique_ptr<ShellCommand> IBridgeHelper::startBridgeCommand()
|
||||
|
||||
LOG_TRACE(getLog(), "Starting {}", serviceAlias());
|
||||
|
||||
/// We will terminate it with the KILL signal instead of the TERM signal,
|
||||
/// because it's more reliable for arbitrary third-party ODBC drivers.
|
||||
/// The drivers can spawn threads, install their own signal handlers... we don't care.
|
||||
|
||||
ShellCommand::Config command_config(path.string());
|
||||
command_config.arguments = cmd_args;
|
||||
command_config.terminate_in_destructor_strategy = ShellCommand::DestructorStrategy(true);
|
||||
command_config.terminate_in_destructor_strategy = ShellCommand::DestructorStrategy(true, SIGKILL);
|
||||
|
||||
return ShellCommand::executeDirect(command_config);
|
||||
}
|
||||
|
@ -332,6 +332,12 @@ macro (dbms_target_link_libraries)
|
||||
endforeach ()
|
||||
endmacro ()
|
||||
|
||||
macro (dbms_target_include_directories)
|
||||
foreach (module ${all_modules})
|
||||
target_include_directories (${module} ${ARGN})
|
||||
endforeach ()
|
||||
endmacro ()
|
||||
|
||||
dbms_target_include_directories (PUBLIC "${ClickHouse_SOURCE_DIR}/src" "${ClickHouse_BINARY_DIR}/src")
|
||||
target_include_directories (clickhouse_common_io PUBLIC "${ClickHouse_SOURCE_DIR}/src" "${ClickHouse_BINARY_DIR}/src")
|
||||
|
||||
@ -390,6 +396,7 @@ if (TARGET ch_contrib::cpuid)
|
||||
endif()
|
||||
|
||||
dbms_target_link_libraries(PUBLIC ch_contrib::abseil_swiss_tables)
|
||||
target_link_libraries (clickhouse_common_io PUBLIC ch_contrib::abseil_swiss_tables)
|
||||
|
||||
# Make dbms depend on roaring instead of clickhouse_common_io so that roaring itself can depend on clickhouse_common_io
|
||||
# That way we we can redirect malloc/free functions avoiding circular dependencies
|
||||
@ -589,6 +596,11 @@ if (TARGET ch_contrib::annoy)
|
||||
dbms_target_link_libraries(PUBLIC ch_contrib::annoy)
|
||||
endif()
|
||||
|
||||
if (TARGET ch_rust::skim)
|
||||
# Add only -I, library is needed only for clickhouse-client/clickhouse-local
|
||||
dbms_target_include_directories(PRIVATE $<TARGET_PROPERTY:ch_rust::skim,INTERFACE_INCLUDE_DIRECTORIES>)
|
||||
endif()
|
||||
|
||||
include ("${ClickHouse_SOURCE_DIR}/cmake/add_check.cmake")
|
||||
|
||||
if (ENABLE_TESTS)
|
||||
|
@ -1,3 +1,3 @@
|
||||
if (ENABLE_EXAMPLES)
|
||||
add_subdirectory(examples)
|
||||
endif()
|
||||
endif()
|
||||
|
@ -1,42 +1,30 @@
|
||||
#include <Client/ClientBase.h>
|
||||
#include <Client/LineReader.h>
|
||||
#include <Client/ClientBaseHelpers.h>
|
||||
#include <Client/TestHint.h>
|
||||
#include <Client/InternalTextLogs.h>
|
||||
#include <Client/TestTags.h>
|
||||
|
||||
#include <iostream>
|
||||
#include <filesystem>
|
||||
#include <map>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include <base/argsToConfig.h>
|
||||
#include <base/safeExit.h>
|
||||
#include <Core/Block.h>
|
||||
#include <Core/Protocol.h>
|
||||
#include <Common/DateLUT.h>
|
||||
#include <Common/MemoryTracker.h>
|
||||
#include <base/argsToConfig.h>
|
||||
#include <base/LineReader.h>
|
||||
#include <Common/scope_guard_safe.h>
|
||||
#include <base/safeExit.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/getNumberOfPhysicalCPUCores.h>
|
||||
#include <Common/tests/gtest_global_context.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Core/Block.h>
|
||||
#include <Core/Protocol.h>
|
||||
#include <Formats/FormatFactory.h>
|
||||
#include <Access/AccessControl.h>
|
||||
|
||||
#include "config_version.h"
|
||||
|
||||
#include <Common/UTF8Helpers.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
#include <Common/clearPasswordFromCommandLine.h>
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
#include <Common/NetException.h>
|
||||
#include <Storages/ColumnsDescription.h>
|
||||
|
||||
#include <Client/ClientBaseHelpers.h>
|
||||
#include <Client/TestHint.h>
|
||||
#include "TestTags.h"
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Formats/FormatFactory.h>
|
||||
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Parsers/ParserQuery.h>
|
||||
@ -53,26 +41,36 @@
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Parsers/ASTColumnDeclaration.h>
|
||||
#include <Parsers/Kusto/ParserKQLStatement.h>
|
||||
|
||||
#include <Processors/Formats/Impl/NullFormat.h>
|
||||
#include <Processors/Formats/IInputFormat.h>
|
||||
#include <Processors/Formats/IOutputFormat.h>
|
||||
#include <QueryPipeline/QueryPipeline.h>
|
||||
#include <QueryPipeline/QueryPipelineBuilder.h>
|
||||
#include <Processors/QueryPlan/QueryPlan.h>
|
||||
#include <Processors/QueryPlan/BuildQueryPipelineSettings.h>
|
||||
#include <Processors/QueryPlan/Optimizations/QueryPlanOptimizationSettings.h>
|
||||
#include <Processors/Executors/PullingAsyncPipelineExecutor.h>
|
||||
#include <Processors/Transforms/AddingDefaultsTransform.h>
|
||||
#include <QueryPipeline/QueryPipeline.h>
|
||||
#include <QueryPipeline/QueryPipelineBuilder.h>
|
||||
#include <Interpreters/ReplaceQueryParameterVisitor.h>
|
||||
#include <Interpreters/ProfileEventsExt.h>
|
||||
#include <IO/WriteBufferFromOStream.h>
|
||||
#include <IO/WriteBufferFromFileDescriptor.h>
|
||||
#include <IO/CompressionMethod.h>
|
||||
#include <Client/InternalTextLogs.h>
|
||||
#include <IO/ForkWriteBuffer.h>
|
||||
#include <Parsers/Kusto/ParserKQLStatement.h>
|
||||
|
||||
#include <Access/AccessControl.h>
|
||||
#include <Storages/ColumnsDescription.h>
|
||||
|
||||
#include <boost/algorithm/string/case_conv.hpp>
|
||||
#include <iostream>
|
||||
#include <filesystem>
|
||||
#include <map>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "config_version.h"
|
||||
#include "config.h"
|
||||
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
@ -1036,7 +1034,13 @@ void ClientBase::onEndOfStream()
|
||||
progress_indication.clearProgressOutput(*tty_buf);
|
||||
|
||||
if (output_format)
|
||||
{
|
||||
/// Do our best to estimate the start of the query so the output format matches the one reported by the server
|
||||
bool is_running = false;
|
||||
output_format->setStartTime(
|
||||
clock_gettime_ns(CLOCK_MONOTONIC) - static_cast<UInt64>(progress_indication.elapsedSeconds() * 1000000000), is_running);
|
||||
output_format->finalize();
|
||||
}
|
||||
|
||||
resetOutput();
|
||||
|
||||
@ -1112,6 +1116,8 @@ void ClientBase::onProfileEvents(Block & block)
|
||||
/// Flush all buffers.
|
||||
void ClientBase::resetOutput()
|
||||
{
|
||||
if (output_format)
|
||||
output_format->finalize();
|
||||
output_format.reset();
|
||||
logs_out_stream.reset();
|
||||
|
||||
|
@ -4,7 +4,7 @@
|
||||
#include "config.h"
|
||||
|
||||
#if USE_REPLXX
|
||||
# include <base/ReplxxLineReader.h>
|
||||
# include <Client/ReplxxLineReader.h>
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/isLocalAddress.h>
|
||||
#include <Common/DNSResolver.h>
|
||||
#include <base/setTerminalEcho.h>
|
||||
#include <base/scope_guard.h>
|
||||
|
||||
#include <readpassphrase/readpassphrase.h>
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include <base/LineReader.h>
|
||||
#include <Client/LineReader.h>
|
||||
|
||||
#include <iostream>
|
||||
#include <string_view>
|
||||
@ -65,6 +65,9 @@ void addNewWords(Words & to, const Words & from, Compare comp)
|
||||
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
replxx::Replxx::completions_t LineReader::Suggest::getCompletions(const String & prefix, size_t prefix_length)
|
||||
{
|
||||
std::string_view last_word;
|
||||
@ -202,3 +205,5 @@ LineReader::InputStatus LineReader::readOneLine(const String & prompt)
|
||||
trim(input);
|
||||
return INPUT_LINE;
|
||||
}
|
||||
|
||||
}
|
@ -9,6 +9,9 @@
|
||||
#include <base/types.h>
|
||||
#include <base/defines.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class LineReader
|
||||
{
|
||||
public:
|
||||
@ -68,3 +71,5 @@ protected:
|
||||
virtual InputStatus readOneLine(const String & prompt);
|
||||
virtual void addToHistory(const String &) {}
|
||||
};
|
||||
|
||||
}
|
@ -327,9 +327,7 @@ void QueryFuzzer::fuzzOrderByList(IAST * ast)
|
||||
// Add element
|
||||
if (fuzz_rand() % 50 == 0)
|
||||
{
|
||||
auto pos = list->children.empty()
|
||||
? list->children.begin()
|
||||
: list->children.begin() + fuzz_rand() % list->children.size();
|
||||
auto * pos = list->children.empty() ? list->children.begin() : list->children.begin() + fuzz_rand() % list->children.size();
|
||||
auto col = getRandomColumnLike();
|
||||
if (col)
|
||||
{
|
||||
@ -373,9 +371,7 @@ void QueryFuzzer::fuzzColumnLikeExpressionList(IAST * ast)
|
||||
// Add element
|
||||
if (fuzz_rand() % 50 == 0)
|
||||
{
|
||||
auto pos = impl->children.empty()
|
||||
? impl->children.begin()
|
||||
: impl->children.begin() + fuzz_rand() % impl->children.size();
|
||||
auto * pos = impl->children.empty() ? impl->children.begin() : impl->children.begin() + fuzz_rand() % impl->children.size();
|
||||
auto col = getRandomColumnLike();
|
||||
if (col)
|
||||
impl->children.insert(pos, col);
|
||||
|
@ -1,6 +1,10 @@
|
||||
#include <base/ReplxxLineReader.h>
|
||||
#include <Client/ReplxxLineReader.h>
|
||||
#include <base/errnoToString.h>
|
||||
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/copyData.h>
|
||||
|
||||
#include <stdexcept>
|
||||
#include <chrono>
|
||||
#include <cerrno>
|
||||
@ -108,13 +112,11 @@ void writeRetry(int fd, const std::string & data)
|
||||
}
|
||||
std::string readFile(const std::string & path)
|
||||
{
|
||||
std::ifstream t(path);
|
||||
std::string str;
|
||||
t.seekg(0, std::ios::end);
|
||||
str.reserve(t.tellg());
|
||||
t.seekg(0, std::ios::beg);
|
||||
str.assign((std::istreambuf_iterator<char>(t)), std::istreambuf_iterator<char>());
|
||||
return str;
|
||||
std::string out;
|
||||
DB::WriteBufferFromString out_buffer(out);
|
||||
DB::ReadBufferFromFile in_buffer(path);
|
||||
DB::copyData(in_buffer, out_buffer);
|
||||
return out;
|
||||
}
|
||||
|
||||
/// Simple wrapper for temporary files.
|
||||
@ -269,6 +271,9 @@ void convertHistoryFile(const std::string & path, replxx::Replxx & rx)
|
||||
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
static bool replxx_last_is_delimiter = false;
|
||||
void ReplxxLineReader::setLastIsDelimiter(bool flag)
|
||||
{
|
||||
@ -508,3 +513,5 @@ void ReplxxLineReader::enableBracketedPaste()
|
||||
bracketed_paste_enabled = true;
|
||||
rx.enable_bracketed_paste();
|
||||
}
|
||||
|
||||
}
|
@ -1,9 +1,11 @@
|
||||
#pragma once
|
||||
|
||||
#include "LineReader.h"
|
||||
|
||||
#include <replxx.hxx>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class ReplxxLineReader : public LineReader
|
||||
{
|
||||
public:
|
||||
@ -36,3 +38,5 @@ private:
|
||||
|
||||
std::string editor;
|
||||
};
|
||||
|
||||
}
|
@ -5,8 +5,8 @@
|
||||
#include <Client/Connection.h>
|
||||
#include <Client/IServerConnection.h>
|
||||
#include <Client/LocalConnection.h>
|
||||
#include <Client/LineReader.h>
|
||||
#include <IO/ConnectionTimeouts.h>
|
||||
#include <base/LineReader.h>
|
||||
#include <thread>
|
||||
|
||||
|
||||
|
@ -1,16 +0,0 @@
|
||||
#pragma once
|
||||
#include <cstddef>
|
||||
|
||||
/// This is a structure which is returned by MemoryTracker.
|
||||
/// Methods onAlloc/onFree should be called after actual memory allocation if it succeed.
|
||||
/// For now, it will only collect allocation trace with sample_probability.
|
||||
struct AllocationTrace
|
||||
{
|
||||
AllocationTrace() = default;
|
||||
explicit AllocationTrace(double sample_probability_);
|
||||
|
||||
void onAlloc(void * ptr, size_t size) const;
|
||||
void onFree(void * ptr, size_t size) const;
|
||||
|
||||
double sample_probability = 0;
|
||||
};
|
@ -92,10 +92,8 @@ public:
|
||||
void * alloc(size_t size, size_t alignment = 0)
|
||||
{
|
||||
checkSize(size);
|
||||
auto trace = CurrentMemoryTracker::alloc(size);
|
||||
void * ptr = allocNoTrack(size, alignment);
|
||||
trace.onAlloc(ptr, size);
|
||||
return ptr;
|
||||
CurrentMemoryTracker::alloc(size);
|
||||
return allocNoTrack(size, alignment);
|
||||
}
|
||||
|
||||
/// Free memory range.
|
||||
@ -105,8 +103,7 @@ public:
|
||||
{
|
||||
checkSize(size);
|
||||
freeNoTrack(buf, size);
|
||||
auto trace = CurrentMemoryTracker::free(size);
|
||||
trace.onFree(buf, size);
|
||||
CurrentMemoryTracker::free(size);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -132,16 +129,13 @@ public:
|
||||
&& alignment <= MALLOC_MIN_ALIGNMENT)
|
||||
{
|
||||
/// Resize malloc'd memory region with no special alignment requirement.
|
||||
auto trace = CurrentMemoryTracker::realloc(old_size, new_size);
|
||||
trace.onFree(buf, old_size);
|
||||
CurrentMemoryTracker::realloc(old_size, new_size);
|
||||
|
||||
void * new_buf = ::realloc(buf, new_size);
|
||||
if (nullptr == new_buf)
|
||||
DB::throwFromErrno(fmt::format("Allocator: Cannot realloc from {} to {}.", ReadableSize(old_size), ReadableSize(new_size)), DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY);
|
||||
|
||||
buf = new_buf;
|
||||
trace.onAlloc(buf, new_size);
|
||||
|
||||
if constexpr (clear_memory)
|
||||
if (new_size > old_size)
|
||||
memset(reinterpret_cast<char *>(buf) + old_size, 0, new_size - old_size);
|
||||
@ -149,8 +143,7 @@ public:
|
||||
else if (old_size >= MMAP_THRESHOLD && new_size >= MMAP_THRESHOLD)
|
||||
{
|
||||
/// Resize mmap'd memory region.
|
||||
auto trace = CurrentMemoryTracker::realloc(old_size, new_size);
|
||||
trace.onFree(buf, old_size);
|
||||
CurrentMemoryTracker::realloc(old_size, new_size);
|
||||
|
||||
// On apple and freebsd self-implemented mremap used (common/mremap.h)
|
||||
buf = clickhouse_mremap(buf, old_size, new_size, MREMAP_MAYMOVE,
|
||||
@ -159,17 +152,14 @@ public:
|
||||
DB::throwFromErrno(fmt::format("Allocator: Cannot mremap memory chunk from {} to {}.",
|
||||
ReadableSize(old_size), ReadableSize(new_size)), DB::ErrorCodes::CANNOT_MREMAP);
|
||||
|
||||
trace.onAlloc(buf, new_size);
|
||||
/// No need for zero-fill, because mmap guarantees it.
|
||||
}
|
||||
else if (new_size < MMAP_THRESHOLD)
|
||||
{
|
||||
/// Small allocs that requires a copy. Assume there's enough memory in system. Call CurrentMemoryTracker once.
|
||||
auto trace = CurrentMemoryTracker::realloc(old_size, new_size);
|
||||
trace.onFree(buf, old_size);
|
||||
CurrentMemoryTracker::realloc(old_size, new_size);
|
||||
|
||||
void * new_buf = allocNoTrack(new_size, alignment);
|
||||
trace.onAlloc(new_buf, new_size);
|
||||
memcpy(new_buf, buf, std::min(old_size, new_size));
|
||||
freeNoTrack(buf, old_size);
|
||||
buf = new_buf;
|
||||
|
@ -30,24 +30,21 @@ struct AllocatorWithMemoryTracking
|
||||
throw std::bad_alloc();
|
||||
|
||||
size_t bytes = n * sizeof(T);
|
||||
auto trace = CurrentMemoryTracker::alloc(bytes);
|
||||
CurrentMemoryTracker::alloc(bytes);
|
||||
|
||||
T * p = static_cast<T *>(malloc(bytes));
|
||||
if (!p)
|
||||
throw std::bad_alloc();
|
||||
|
||||
trace.onAlloc(p, bytes);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
void deallocate(T * p, size_t n) noexcept
|
||||
{
|
||||
size_t bytes = n * sizeof(T);
|
||||
|
||||
free(p);
|
||||
auto trace = CurrentMemoryTracker::free(bytes);
|
||||
trace.onFree(p, bytes);
|
||||
|
||||
size_t bytes = n * sizeof(T);
|
||||
CurrentMemoryTracker::free(bytes);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -37,7 +37,7 @@ MemoryTracker * getMemoryTracker()
|
||||
|
||||
using DB::current_thread;
|
||||
|
||||
AllocationTrace CurrentMemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceeded)
|
||||
void CurrentMemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceeded)
|
||||
{
|
||||
#ifdef MEMORY_TRACKER_DEBUG_CHECKS
|
||||
if (unlikely(memory_tracker_always_throw_logical_error_on_allocation))
|
||||
@ -55,9 +55,8 @@ AllocationTrace CurrentMemoryTracker::allocImpl(Int64 size, bool throw_if_memory
|
||||
|
||||
if (will_be > current_thread->untracked_memory_limit)
|
||||
{
|
||||
auto res = memory_tracker->allocImpl(will_be, throw_if_memory_exceeded);
|
||||
memory_tracker->allocImpl(will_be, throw_if_memory_exceeded);
|
||||
current_thread->untracked_memory = 0;
|
||||
return res;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -69,40 +68,36 @@ AllocationTrace CurrentMemoryTracker::allocImpl(Int64 size, bool throw_if_memory
|
||||
/// total_memory_tracker only, ignore untracked_memory
|
||||
else
|
||||
{
|
||||
return memory_tracker->allocImpl(size, throw_if_memory_exceeded);
|
||||
memory_tracker->allocImpl(size, throw_if_memory_exceeded);
|
||||
}
|
||||
|
||||
return AllocationTrace(memory_tracker->getSampleProbability());
|
||||
}
|
||||
|
||||
return AllocationTrace(0);
|
||||
}
|
||||
|
||||
void CurrentMemoryTracker::check()
|
||||
{
|
||||
if (auto * memory_tracker = getMemoryTracker())
|
||||
std::ignore = memory_tracker->allocImpl(0, true);
|
||||
memory_tracker->allocImpl(0, true);
|
||||
}
|
||||
|
||||
AllocationTrace CurrentMemoryTracker::alloc(Int64 size)
|
||||
void CurrentMemoryTracker::alloc(Int64 size)
|
||||
{
|
||||
bool throw_if_memory_exceeded = true;
|
||||
return allocImpl(size, throw_if_memory_exceeded);
|
||||
allocImpl(size, throw_if_memory_exceeded);
|
||||
}
|
||||
|
||||
AllocationTrace CurrentMemoryTracker::allocNoThrow(Int64 size)
|
||||
void CurrentMemoryTracker::allocNoThrow(Int64 size)
|
||||
{
|
||||
bool throw_if_memory_exceeded = false;
|
||||
return allocImpl(size, throw_if_memory_exceeded);
|
||||
allocImpl(size, throw_if_memory_exceeded);
|
||||
}
|
||||
|
||||
AllocationTrace CurrentMemoryTracker::realloc(Int64 old_size, Int64 new_size)
|
||||
void CurrentMemoryTracker::realloc(Int64 old_size, Int64 new_size)
|
||||
{
|
||||
Int64 addition = new_size - old_size;
|
||||
return addition > 0 ? alloc(addition) : free(-addition);
|
||||
addition > 0 ? alloc(addition) : free(-addition);
|
||||
}
|
||||
|
||||
AllocationTrace CurrentMemoryTracker::free(Int64 size)
|
||||
void CurrentMemoryTracker::free(Int64 size)
|
||||
{
|
||||
if (auto * memory_tracker = getMemoryTracker())
|
||||
{
|
||||
@ -111,20 +106,15 @@ AllocationTrace CurrentMemoryTracker::free(Int64 size)
|
||||
current_thread->untracked_memory -= size;
|
||||
if (current_thread->untracked_memory < -current_thread->untracked_memory_limit)
|
||||
{
|
||||
Int64 untracked_memory = current_thread->untracked_memory;
|
||||
memory_tracker->free(-current_thread->untracked_memory);
|
||||
current_thread->untracked_memory = 0;
|
||||
return memory_tracker->free(-untracked_memory);
|
||||
}
|
||||
}
|
||||
/// total_memory_tracker only, ignore untracked_memory
|
||||
else
|
||||
{
|
||||
return memory_tracker->free(size);
|
||||
memory_tracker->free(size);
|
||||
}
|
||||
|
||||
return AllocationTrace(memory_tracker->getSampleProbability());
|
||||
}
|
||||
|
||||
return AllocationTrace(0);
|
||||
}
|
||||
|
||||
|
@ -1,20 +1,19 @@
|
||||
#pragma once
|
||||
|
||||
#include <base/types.h>
|
||||
#include <Common/AllocationTrace.h>
|
||||
|
||||
/// Convenience methods, that use current thread's memory_tracker if it is available.
|
||||
struct CurrentMemoryTracker
|
||||
{
|
||||
/// Call the following functions before calling of corresponding operations with memory allocators.
|
||||
[[nodiscard]] static AllocationTrace alloc(Int64 size);
|
||||
[[nodiscard]] static AllocationTrace allocNoThrow(Int64 size);
|
||||
[[nodiscard]] static AllocationTrace realloc(Int64 old_size, Int64 new_size);
|
||||
static void alloc(Int64 size);
|
||||
static void allocNoThrow(Int64 size);
|
||||
static void realloc(Int64 old_size, Int64 new_size);
|
||||
|
||||
/// This function should be called after memory deallocation.
|
||||
[[nodiscard]] static AllocationTrace free(Int64 size);
|
||||
static void free(Int64 size);
|
||||
static void check();
|
||||
|
||||
private:
|
||||
[[nodiscard]] static AllocationTrace allocImpl(Int64 size, bool throw_if_memory_exceeded);
|
||||
static void allocImpl(Int64 size, bool throw_if_memory_exceeded);
|
||||
};
|
||||
|
@ -1204,6 +1204,11 @@ public:
|
||||
return res;
|
||||
}
|
||||
|
||||
template <typename DateOrTime>
|
||||
inline DateTimeComponents toDateTimeComponents(DateOrTime v) const
|
||||
{
|
||||
return toDateTimeComponents(lut[toLUTIndex(v)].date);
|
||||
}
|
||||
|
||||
inline UInt64 toNumYYYYMMDDhhmmss(Time t) const
|
||||
{
|
||||
|
@ -57,8 +57,7 @@ public:
|
||||
}
|
||||
|
||||
/// Do not count guard page in memory usage.
|
||||
auto trace = CurrentMemoryTracker::alloc(num_pages * page_size);
|
||||
trace.onAlloc(vp, num_pages * page_size);
|
||||
CurrentMemoryTracker::alloc(num_pages * page_size);
|
||||
|
||||
boost::context::stack_context sctx;
|
||||
sctx.size = num_bytes;
|
||||
@ -78,7 +77,6 @@ public:
|
||||
::munmap(vp, sctx.size);
|
||||
|
||||
/// Do not count guard page in memory usage.
|
||||
auto trace = CurrentMemoryTracker::free(sctx.size - page_size);
|
||||
trace.onFree(vp, sctx.size - page_size);
|
||||
CurrentMemoryTracker::free(sctx.size - page_size);
|
||||
}
|
||||
};
|
||||
|
@ -31,7 +31,7 @@ private:
|
||||
|
||||
void init(time_t time, const DateLUTImpl & time_zone)
|
||||
{
|
||||
DateLUTImpl::DateTimeComponents components = time_zone.toDateTimeComponents(time);
|
||||
DateLUTImpl::DateTimeComponents components = time_zone.toDateTimeComponents(static_cast<DateLUTImpl::Time>(time));
|
||||
|
||||
m_year = components.date.year;
|
||||
m_month = components.date.month;
|
||||
|
@ -1,7 +1,6 @@
|
||||
#include "MemoryTracker.h"
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/VariableContext.h>
|
||||
#include <Common/TraceSender.h>
|
||||
#include <Common/Exception.h>
|
||||
@ -83,53 +82,6 @@ inline std::string_view toDescription(OvercommitResult result)
|
||||
}
|
||||
}
|
||||
|
||||
bool shouldTrackAllocation(DB::Float64 probability, void * ptr)
|
||||
{
|
||||
return sipHash64(uintptr_t(ptr)) < std::numeric_limits<uint64_t>::max() * probability;
|
||||
}
|
||||
|
||||
AllocationTrace updateAllocationTrace(AllocationTrace trace, const std::optional<double> & sample_probability)
|
||||
{
|
||||
if (unlikely(sample_probability))
|
||||
return AllocationTrace(*sample_probability);
|
||||
|
||||
return trace;
|
||||
}
|
||||
|
||||
AllocationTrace getAllocationTrace(std::optional<double> & sample_probability)
|
||||
{
|
||||
if (unlikely(sample_probability))
|
||||
return AllocationTrace(*sample_probability);
|
||||
|
||||
return AllocationTrace(0);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
AllocationTrace::AllocationTrace(double sample_probability_) : sample_probability(sample_probability_) {}
|
||||
|
||||
void AllocationTrace::onAlloc(void * ptr, size_t size) const
|
||||
{
|
||||
if (likely(sample_probability == 0))
|
||||
return;
|
||||
|
||||
if (sample_probability < 1 && !shouldTrackAllocation(sample_probability, ptr))
|
||||
return;
|
||||
|
||||
MemoryTrackerBlockerInThread untrack_lock(VariableContext::Global);
|
||||
DB::TraceSender::send(DB::TraceType::MemorySample, StackTrace(), {.size = Int64(size), .ptr = ptr});
|
||||
}
|
||||
|
||||
void AllocationTrace::onFree(void * ptr, size_t size) const
|
||||
{
|
||||
if (likely(sample_probability == 0))
|
||||
return;
|
||||
|
||||
if (sample_probability < 1 && !shouldTrackAllocation(sample_probability, ptr))
|
||||
return;
|
||||
|
||||
MemoryTrackerBlockerInThread untrack_lock(VariableContext::Global);
|
||||
DB::TraceSender::send(DB::TraceType::MemorySample, StackTrace(), {.size = -Int64(size), .ptr = ptr});
|
||||
}
|
||||
|
||||
namespace ProfileEvents
|
||||
@ -183,7 +135,7 @@ void MemoryTracker::logMemoryUsage(Int64 current) const
|
||||
}
|
||||
|
||||
|
||||
AllocationTrace MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceeded, MemoryTracker * query_tracker)
|
||||
void MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceeded, MemoryTracker * query_tracker)
|
||||
{
|
||||
if (size < 0)
|
||||
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Negative size ({}) is passed to MemoryTracker. It is a bug.", size);
|
||||
@ -202,14 +154,9 @@ AllocationTrace MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceed
|
||||
|
||||
/// Since the MemoryTrackerBlockerInThread should respect the level, we should go to the next parent.
|
||||
if (auto * loaded_next = parent.load(std::memory_order_relaxed))
|
||||
{
|
||||
MemoryTracker * tracker = level == VariableContext::Process ? this : query_tracker;
|
||||
return updateAllocationTrace(
|
||||
loaded_next->allocImpl(size, throw_if_memory_exceeded, tracker),
|
||||
sample_probability);
|
||||
}
|
||||
|
||||
return getAllocationTrace(sample_probability);
|
||||
loaded_next->allocImpl(size, throw_if_memory_exceeded,
|
||||
level == VariableContext::Process ? this : query_tracker);
|
||||
return;
|
||||
}
|
||||
|
||||
/** Using memory_order_relaxed means that if allocations are done simultaneously,
|
||||
@ -236,6 +183,14 @@ AllocationTrace MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceed
|
||||
allocation_traced = true;
|
||||
}
|
||||
|
||||
std::bernoulli_distribution sample(sample_probability);
|
||||
if (unlikely(sample_probability > 0.0 && sample(thread_local_rng)))
|
||||
{
|
||||
MemoryTrackerBlockerInThread untrack_lock(VariableContext::Global);
|
||||
DB::TraceSender::send(DB::TraceType::MemorySample, StackTrace(), {.size = size});
|
||||
allocation_traced = true;
|
||||
}
|
||||
|
||||
std::bernoulli_distribution fault(fault_probability);
|
||||
if (unlikely(fault_probability > 0.0 && fault(thread_local_rng)))
|
||||
{
|
||||
@ -354,22 +309,16 @@ AllocationTrace MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceed
|
||||
}
|
||||
|
||||
if (auto * loaded_next = parent.load(std::memory_order_relaxed))
|
||||
{
|
||||
MemoryTracker * tracker = level == VariableContext::Process ? this : query_tracker;
|
||||
return updateAllocationTrace(
|
||||
loaded_next->allocImpl(size, throw_if_memory_exceeded, tracker),
|
||||
sample_probability);
|
||||
}
|
||||
|
||||
return getAllocationTrace(sample_probability);
|
||||
loaded_next->allocImpl(size, throw_if_memory_exceeded,
|
||||
level == VariableContext::Process ? this : query_tracker);
|
||||
}
|
||||
|
||||
void MemoryTracker::adjustWithUntrackedMemory(Int64 untracked_memory)
|
||||
{
|
||||
if (untracked_memory > 0)
|
||||
std::ignore = allocImpl(untracked_memory, /*throw_if_memory_exceeded*/ false);
|
||||
allocImpl(untracked_memory, /*throw_if_memory_exceeded*/ false);
|
||||
else
|
||||
std::ignore = free(-untracked_memory);
|
||||
free(-untracked_memory);
|
||||
}
|
||||
|
||||
bool MemoryTracker::updatePeak(Int64 will_be, bool log_memory_usage)
|
||||
@ -388,7 +337,8 @@ bool MemoryTracker::updatePeak(Int64 will_be, bool log_memory_usage)
|
||||
return false;
|
||||
}
|
||||
|
||||
AllocationTrace MemoryTracker::free(Int64 size)
|
||||
|
||||
void MemoryTracker::free(Int64 size)
|
||||
{
|
||||
if (MemoryTrackerBlockerInThread::isBlocked(level))
|
||||
{
|
||||
@ -403,9 +353,15 @@ AllocationTrace MemoryTracker::free(Int64 size)
|
||||
|
||||
/// Since the MemoryTrackerBlockerInThread should respect the level, we should go to the next parent.
|
||||
if (auto * loaded_next = parent.load(std::memory_order_relaxed))
|
||||
return updateAllocationTrace(loaded_next->free(size), sample_probability);
|
||||
loaded_next->free(size);
|
||||
return;
|
||||
}
|
||||
|
||||
return getAllocationTrace(sample_probability);
|
||||
std::bernoulli_distribution sample(sample_probability);
|
||||
if (unlikely(sample_probability > 0.0 && sample(thread_local_rng)))
|
||||
{
|
||||
MemoryTrackerBlockerInThread untrack_lock(VariableContext::Global);
|
||||
DB::TraceSender::send(DB::TraceType::MemorySample, StackTrace(), {.size = -size});
|
||||
}
|
||||
|
||||
Int64 accounted_size = size;
|
||||
@ -433,15 +389,12 @@ AllocationTrace MemoryTracker::free(Int64 size)
|
||||
if (auto * overcommit_tracker_ptr = overcommit_tracker.load(std::memory_order_relaxed))
|
||||
overcommit_tracker_ptr->tryContinueQueryExecutionAfterFree(accounted_size);
|
||||
|
||||
AllocationTrace res = getAllocationTrace(sample_probability);
|
||||
if (auto * loaded_next = parent.load(std::memory_order_relaxed))
|
||||
res = updateAllocationTrace(loaded_next->free(size), sample_probability);
|
||||
loaded_next->free(size);
|
||||
|
||||
auto metric_loaded = metric.load(std::memory_order_relaxed);
|
||||
if (metric_loaded != CurrentMetrics::end())
|
||||
CurrentMetrics::sub(metric_loaded, accounted_size);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
@ -525,14 +478,3 @@ void MemoryTracker::setOrRaiseProfilerLimit(Int64 value)
|
||||
while ((value == 0 || old_value < value) && !profiler_limit.compare_exchange_weak(old_value, value))
|
||||
;
|
||||
}
|
||||
|
||||
double MemoryTracker::getSampleProbability()
|
||||
{
|
||||
if (sample_probability)
|
||||
return *sample_probability;
|
||||
|
||||
if (auto * loaded_next = parent.load(std::memory_order_relaxed))
|
||||
return loaded_next->getSampleProbability();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2,11 +2,9 @@
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <optional>
|
||||
#include <base/types.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/VariableContext.h>
|
||||
#include <Common/AllocationTrace.h>
|
||||
|
||||
#if !defined(NDEBUG)
|
||||
#define MEMORY_TRACKER_DEBUG_CHECKS
|
||||
@ -67,7 +65,7 @@ private:
|
||||
double fault_probability = 0;
|
||||
|
||||
/// To randomly sample allocations and deallocations in trace_log.
|
||||
std::optional<double> sample_probability;
|
||||
double sample_probability = 0;
|
||||
|
||||
/// Singly-linked list. All information will be passed to subsequent memory trackers also (it allows to implement trackers hierarchy).
|
||||
/// In terms of tree nodes it is the list of parents. Lifetime of these trackers should "include" lifetime of current tracker.
|
||||
@ -92,8 +90,8 @@ private:
|
||||
|
||||
/// allocImpl(...) and free(...) should not be used directly
|
||||
friend struct CurrentMemoryTracker;
|
||||
[[nodiscard]] AllocationTrace allocImpl(Int64 size, bool throw_if_memory_exceeded, MemoryTracker * query_tracker = nullptr);
|
||||
[[nodiscard]] AllocationTrace free(Int64 size);
|
||||
void allocImpl(Int64 size, bool throw_if_memory_exceeded, MemoryTracker * query_tracker = nullptr);
|
||||
void free(Int64 size);
|
||||
public:
|
||||
|
||||
static constexpr auto USAGE_EVENT_NAME = "MemoryTrackerUsage";
|
||||
@ -148,8 +146,6 @@ public:
|
||||
sample_probability = value;
|
||||
}
|
||||
|
||||
double getSampleProbability();
|
||||
|
||||
void setProfilerStep(Int64 value)
|
||||
{
|
||||
profiler_step = value;
|
||||
|
@ -28,5 +28,4 @@ public:
|
||||
}
|
||||
|
||||
friend class MemoryTracker;
|
||||
friend struct AllocationTrace;
|
||||
};
|
||||
|
@ -72,11 +72,11 @@ ShellCommand::~ShellCommand()
|
||||
if (process_terminated_normally)
|
||||
return;
|
||||
|
||||
LOG_TRACE(getLogger(), "Will kill shell command pid {} with SIGTERM", pid);
|
||||
LOG_TRACE(getLogger(), "Will kill shell command pid {} with signal {}", pid, config.terminate_in_destructor_strategy.termination_signal);
|
||||
|
||||
int retcode = kill(pid, SIGTERM);
|
||||
int retcode = kill(pid, config.terminate_in_destructor_strategy.termination_signal);
|
||||
if (retcode != 0)
|
||||
LOG_WARNING(getLogger(), "Cannot kill shell command pid {} errno '{}'", pid, errnoToString());
|
||||
LOG_WARNING(getLogger(), "Cannot kill shell command pid {}, error: '{}'", pid, errnoToString());
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -27,18 +27,18 @@ namespace DB
|
||||
class ShellCommand final
|
||||
{
|
||||
public:
|
||||
|
||||
~ShellCommand();
|
||||
|
||||
struct DestructorStrategy final
|
||||
{
|
||||
explicit DestructorStrategy(bool terminate_in_destructor_, size_t wait_for_normal_exit_before_termination_seconds_ = 0)
|
||||
: terminate_in_destructor(terminate_in_destructor_)
|
||||
explicit DestructorStrategy(bool terminate_in_destructor_, int termination_signal_, size_t wait_for_normal_exit_before_termination_seconds_ = 0)
|
||||
: terminate_in_destructor(terminate_in_destructor_), termination_signal(termination_signal_)
|
||||
, wait_for_normal_exit_before_termination_seconds(wait_for_normal_exit_before_termination_seconds_)
|
||||
{
|
||||
}
|
||||
|
||||
bool terminate_in_destructor;
|
||||
int termination_signal;
|
||||
|
||||
/// If terminate in destructor is true, command will wait until send SIGTERM signal to created process
|
||||
size_t wait_for_normal_exit_before_termination_seconds = 0;
|
||||
@ -64,7 +64,7 @@ public:
|
||||
|
||||
bool pipe_stdin_only = false;
|
||||
|
||||
DestructorStrategy terminate_in_destructor_strategy = DestructorStrategy(false);
|
||||
DestructorStrategy terminate_in_destructor_strategy = DestructorStrategy(false, 0);
|
||||
};
|
||||
|
||||
/// Run the command using /bin/sh -c.
|
||||
|
@ -40,6 +40,10 @@ public:
|
||||
* Pass CLOCK_MONOTONIC_COARSE, if you need better performance with acceptable cost of several milliseconds of inaccuracy.
|
||||
*/
|
||||
explicit Stopwatch(clockid_t clock_type_ = CLOCK_MONOTONIC) : clock_type(clock_type_) { start(); }
|
||||
explicit Stopwatch(clockid_t clock_type_, UInt64 start_nanoseconds, bool is_running_)
|
||||
: start_ns(start_nanoseconds), clock_type(clock_type_), is_running(is_running_)
|
||||
{
|
||||
}
|
||||
|
||||
void start() { start_ns = nanoseconds(); is_running = true; }
|
||||
void stop() { stop_ns = nanoseconds(); is_running = false; }
|
||||
@ -51,6 +55,8 @@ public:
|
||||
UInt64 elapsedMilliseconds() const { return elapsedNanoseconds() / 1000000UL; }
|
||||
double elapsedSeconds() const { return static_cast<double>(elapsedNanoseconds()) / 1000000000ULL; }
|
||||
|
||||
UInt64 getStart() { return start_ns; }
|
||||
|
||||
private:
|
||||
UInt64 start_ns = 0;
|
||||
UInt64 stop_ns = 0;
|
||||
|
@ -188,13 +188,10 @@ void ThreadStatus::updatePerformanceCounters()
|
||||
}
|
||||
}
|
||||
|
||||
void ThreadStatus::assertState(const std::initializer_list<int> & permitted_states, const char * description) const
|
||||
void ThreadStatus::assertState(ThreadState permitted_state, const char * description) const
|
||||
{
|
||||
for (auto permitted_state : permitted_states)
|
||||
{
|
||||
if (getCurrentState() == permitted_state)
|
||||
return;
|
||||
}
|
||||
if (getCurrentState() == permitted_state)
|
||||
return;
|
||||
|
||||
if (description)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected thread state {}: {}", getCurrentState(), description);
|
||||
|
@ -87,10 +87,6 @@ public:
|
||||
LogsLevel client_logs_level = LogsLevel::none;
|
||||
|
||||
String query;
|
||||
/// Query without new lines (see toOneLineQuery())
|
||||
/// Used to print in case of fatal error
|
||||
/// (to avoid calling extra code in the fatal error handler)
|
||||
String one_line_query;
|
||||
UInt64 normalized_query_hash = 0;
|
||||
|
||||
std::vector<ProfileEventsCountersAndMemory> finished_threads_counters_memory;
|
||||
@ -296,7 +292,7 @@ protected:
|
||||
void logToQueryThreadLog(QueryThreadLog & thread_log, const String & current_database, std::chrono::time_point<std::chrono::system_clock> now);
|
||||
|
||||
|
||||
void assertState(const std::initializer_list<int> & permitted_states, const char * description = nullptr) const;
|
||||
void assertState(ThreadState permitted_state, const char * description = nullptr) const;
|
||||
|
||||
|
||||
private:
|
||||
|
@ -33,7 +33,6 @@ void TraceSender::send(TraceType trace_type, const StackTrace & stack_trace, Ext
|
||||
+ sizeof(TraceType) /// trace type
|
||||
+ sizeof(UInt64) /// thread_id
|
||||
+ sizeof(Int64) /// size
|
||||
+ sizeof(void *) /// ptr
|
||||
+ sizeof(ProfileEvents::Event) /// event
|
||||
+ sizeof(ProfileEvents::Count); /// increment
|
||||
|
||||
@ -75,7 +74,6 @@ void TraceSender::send(TraceType trace_type, const StackTrace & stack_trace, Ext
|
||||
writePODBinary(trace_type, out);
|
||||
writePODBinary(thread_id, out);
|
||||
writePODBinary(extras.size, out);
|
||||
writePODBinary(UInt64(extras.ptr), out);
|
||||
writePODBinary(extras.event, out);
|
||||
writePODBinary(extras.increment, out);
|
||||
|
||||
|
@ -28,9 +28,8 @@ class TraceSender
|
||||
public:
|
||||
struct Extras
|
||||
{
|
||||
/// size, ptr - for memory tracing is the amount of memory allocated; for other trace types it is 0.
|
||||
/// size - for memory tracing is the amount of memory allocated; for other trace types it is 0.
|
||||
Int64 size{};
|
||||
void * ptr = nullptr;
|
||||
/// Event type and increment for 'ProfileEvent' trace type; for other trace types defaults.
|
||||
ProfileEvents::Event event{ProfileEvents::end()};
|
||||
ProfileEvents::Count increment{};
|
||||
|
@ -342,7 +342,6 @@ ZooKeeper::ZooKeeper(
|
||||
default_acls.emplace_back(std::move(acl));
|
||||
}
|
||||
|
||||
|
||||
/// It makes sense (especially, for async requests) to inject a fault in two places:
|
||||
/// pushRequest (before request is sent) and receiveEvent (after request was executed).
|
||||
if (0 < args.send_fault_probability && args.send_fault_probability <= 1)
|
||||
|
@ -9,11 +9,7 @@ extern "C" void * clickhouse_malloc(size_t size)
|
||||
{
|
||||
void * res = malloc(size);
|
||||
if (res)
|
||||
{
|
||||
AllocationTrace trace;
|
||||
size_t actual_size = Memory::trackMemory(size, trace);
|
||||
trace.onAlloc(res, actual_size);
|
||||
}
|
||||
Memory::trackMemory(size);
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -21,29 +17,17 @@ extern "C" void * clickhouse_calloc(size_t number_of_members, size_t size)
|
||||
{
|
||||
void * res = calloc(number_of_members, size);
|
||||
if (res)
|
||||
{
|
||||
AllocationTrace trace;
|
||||
size_t actual_size = Memory::trackMemory(number_of_members * size, trace);
|
||||
trace.onAlloc(res, actual_size);
|
||||
}
|
||||
Memory::trackMemory(number_of_members * size);
|
||||
return res;
|
||||
}
|
||||
|
||||
extern "C" void * clickhouse_realloc(void * ptr, size_t size)
|
||||
{
|
||||
if (ptr)
|
||||
{
|
||||
AllocationTrace trace;
|
||||
size_t actual_size = Memory::untrackMemory(ptr, trace);
|
||||
trace.onFree(ptr, actual_size);
|
||||
}
|
||||
Memory::untrackMemory(ptr);
|
||||
void * res = realloc(ptr, size);
|
||||
if (res)
|
||||
{
|
||||
AllocationTrace trace;
|
||||
size_t actual_size = Memory::trackMemory(size, trace);
|
||||
trace.onAlloc(res, actual_size);
|
||||
}
|
||||
Memory::trackMemory(size);
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -58,9 +42,7 @@ extern "C" void * clickhouse_reallocarray(void * ptr, size_t number_of_members,
|
||||
|
||||
extern "C" void clickhouse_free(void * ptr)
|
||||
{
|
||||
AllocationTrace trace;
|
||||
size_t actual_size = Memory::untrackMemory(ptr, trace);
|
||||
trace.onFree(ptr, actual_size);
|
||||
Memory::untrackMemory(ptr);
|
||||
free(ptr);
|
||||
}
|
||||
|
||||
@ -68,10 +50,6 @@ extern "C" int clickhouse_posix_memalign(void ** memptr, size_t alignment, size_
|
||||
{
|
||||
int res = posix_memalign(memptr, alignment, size);
|
||||
if (res == 0)
|
||||
{
|
||||
AllocationTrace trace;
|
||||
size_t actual_size = Memory::trackMemory(size, trace);
|
||||
trace.onAlloc(*memptr, actual_size);
|
||||
}
|
||||
Memory::trackMemory(size);
|
||||
return res;
|
||||
}
|
||||
|
@ -112,19 +112,16 @@ inline ALWAYS_INLINE size_t getActualAllocationSize(size_t size, TAlign... align
|
||||
|
||||
template <std::same_as<std::align_val_t>... TAlign>
|
||||
requires DB::OptionalArgument<TAlign...>
|
||||
inline ALWAYS_INLINE size_t trackMemory(std::size_t size, AllocationTrace & trace, TAlign... align)
|
||||
inline ALWAYS_INLINE void trackMemory(std::size_t size, TAlign... align)
|
||||
{
|
||||
std::size_t actual_size = getActualAllocationSize(size, align...);
|
||||
trace = CurrentMemoryTracker::allocNoThrow(actual_size);
|
||||
return actual_size;
|
||||
CurrentMemoryTracker::allocNoThrow(actual_size);
|
||||
}
|
||||
|
||||
template <std::same_as<std::align_val_t>... TAlign>
|
||||
requires DB::OptionalArgument<TAlign...>
|
||||
inline ALWAYS_INLINE size_t untrackMemory(void * ptr [[maybe_unused]], AllocationTrace & trace, std::size_t size [[maybe_unused]] = 0, TAlign... align [[maybe_unused]]) noexcept
|
||||
inline ALWAYS_INLINE void untrackMemory(void * ptr [[maybe_unused]], std::size_t size [[maybe_unused]] = 0, TAlign... align [[maybe_unused]]) noexcept
|
||||
{
|
||||
std::size_t actual_size = 0;
|
||||
|
||||
try
|
||||
{
|
||||
#if USE_JEMALLOC
|
||||
@ -133,26 +130,23 @@ inline ALWAYS_INLINE size_t untrackMemory(void * ptr [[maybe_unused]], Allocatio
|
||||
if (likely(ptr != nullptr))
|
||||
{
|
||||
if constexpr (sizeof...(TAlign) == 1)
|
||||
actual_size = sallocx(ptr, MALLOCX_ALIGN(alignToSizeT(align...)));
|
||||
CurrentMemoryTracker::free(sallocx(ptr, MALLOCX_ALIGN(alignToSizeT(align...))));
|
||||
else
|
||||
actual_size = sallocx(ptr, 0);
|
||||
CurrentMemoryTracker::free(sallocx(ptr, 0));
|
||||
}
|
||||
#else
|
||||
if (size)
|
||||
actual_size = size;
|
||||
CurrentMemoryTracker::free(size);
|
||||
# if defined(_GNU_SOURCE)
|
||||
/// It's innaccurate resource free for sanitizers. malloc_usable_size() result is greater or equal to allocated size.
|
||||
else
|
||||
actual_size = malloc_usable_size(ptr);
|
||||
CurrentMemoryTracker::free(malloc_usable_size(ptr));
|
||||
# endif
|
||||
#endif
|
||||
trace = CurrentMemoryTracker::free(actual_size);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
|
||||
return actual_size;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -50,74 +50,50 @@ static struct InitializeJemallocZoneAllocatorForOSX
|
||||
|
||||
void * operator new(std::size_t size)
|
||||
{
|
||||
AllocationTrace trace;
|
||||
std::size_t actual_size = Memory::trackMemory(size, trace);
|
||||
void * ptr = Memory::newImpl(size);
|
||||
trace.onAlloc(ptr, actual_size);
|
||||
return ptr;
|
||||
Memory::trackMemory(size);
|
||||
return Memory::newImpl(size);
|
||||
}
|
||||
|
||||
void * operator new(std::size_t size, std::align_val_t align)
|
||||
{
|
||||
AllocationTrace trace;
|
||||
std::size_t actual_size = Memory::trackMemory(size, trace, align);
|
||||
void * ptr = Memory::newImpl(size, align);
|
||||
trace.onAlloc(ptr, actual_size);
|
||||
return ptr;
|
||||
Memory::trackMemory(size, align);
|
||||
return Memory::newImpl(size, align);
|
||||
}
|
||||
|
||||
void * operator new[](std::size_t size)
|
||||
{
|
||||
AllocationTrace trace;
|
||||
std::size_t actual_size = Memory::trackMemory(size, trace);
|
||||
void * ptr = Memory::newImpl(size);
|
||||
trace.onAlloc(ptr, actual_size);
|
||||
return ptr;
|
||||
Memory::trackMemory(size);
|
||||
return Memory::newImpl(size);
|
||||
}
|
||||
|
||||
void * operator new[](std::size_t size, std::align_val_t align)
|
||||
{
|
||||
AllocationTrace trace;
|
||||
std::size_t actual_size = Memory::trackMemory(size, trace, align);
|
||||
void * ptr = Memory::newImpl(size, align);
|
||||
trace.onAlloc(ptr, actual_size);
|
||||
return ptr;
|
||||
Memory::trackMemory(size, align);
|
||||
return Memory::newImpl(size, align);
|
||||
}
|
||||
|
||||
void * operator new(std::size_t size, const std::nothrow_t &) noexcept
|
||||
{
|
||||
AllocationTrace trace;
|
||||
std::size_t actual_size = Memory::trackMemory(size, trace);
|
||||
void * ptr = Memory::newNoExept(size);
|
||||
trace.onAlloc(ptr, actual_size);
|
||||
return ptr;
|
||||
Memory::trackMemory(size);
|
||||
return Memory::newNoExept(size);
|
||||
}
|
||||
|
||||
void * operator new[](std::size_t size, const std::nothrow_t &) noexcept
|
||||
{
|
||||
AllocationTrace trace;
|
||||
std::size_t actual_size = Memory::trackMemory(size, trace);
|
||||
void * ptr = Memory::newNoExept(size);
|
||||
trace.onAlloc(ptr, actual_size);
|
||||
return ptr;
|
||||
Memory::trackMemory(size);
|
||||
return Memory::newNoExept(size);
|
||||
}
|
||||
|
||||
void * operator new(std::size_t size, std::align_val_t align, const std::nothrow_t &) noexcept
|
||||
{
|
||||
AllocationTrace trace;
|
||||
std::size_t actual_size = Memory::trackMemory(size, trace, align);
|
||||
void * ptr = Memory::newNoExept(size, align);
|
||||
trace.onAlloc(ptr, actual_size);
|
||||
return ptr;
|
||||
Memory::trackMemory(size, align);
|
||||
return Memory::newNoExept(size, align);
|
||||
}
|
||||
|
||||
void * operator new[](std::size_t size, std::align_val_t align, const std::nothrow_t &) noexcept
|
||||
{
|
||||
AllocationTrace trace;
|
||||
std::size_t actual_size = Memory::trackMemory(size, trace, align);
|
||||
void * ptr = Memory::newNoExept(size, align);
|
||||
trace.onAlloc(ptr, actual_size);
|
||||
return ptr;
|
||||
Memory::trackMemory(size, align);
|
||||
return Memory::newNoExept(size, align);
|
||||
}
|
||||
|
||||
/// delete
|
||||
@ -133,64 +109,48 @@ void * operator new[](std::size_t size, std::align_val_t align, const std::nothr
|
||||
|
||||
void operator delete(void * ptr) noexcept
|
||||
{
|
||||
AllocationTrace trace;
|
||||
std::size_t actual_size = Memory::untrackMemory(ptr, trace);
|
||||
trace.onFree(ptr, actual_size);
|
||||
Memory::untrackMemory(ptr);
|
||||
Memory::deleteImpl(ptr);
|
||||
}
|
||||
|
||||
void operator delete(void * ptr, std::align_val_t align) noexcept
|
||||
{
|
||||
AllocationTrace trace;
|
||||
std::size_t actual_size = Memory::untrackMemory(ptr, trace, 0, align);
|
||||
trace.onFree(ptr, actual_size);
|
||||
Memory::untrackMemory(ptr, 0, align);
|
||||
Memory::deleteImpl(ptr);
|
||||
}
|
||||
|
||||
void operator delete[](void * ptr) noexcept
|
||||
{
|
||||
AllocationTrace trace;
|
||||
std::size_t actual_size = Memory::untrackMemory(ptr, trace);
|
||||
trace.onFree(ptr, actual_size);
|
||||
Memory::untrackMemory(ptr);
|
||||
Memory::deleteImpl(ptr);
|
||||
}
|
||||
|
||||
void operator delete[](void * ptr, std::align_val_t align) noexcept
|
||||
{
|
||||
AllocationTrace trace;
|
||||
std::size_t actual_size = Memory::untrackMemory(ptr, trace, 0, align);
|
||||
trace.onFree(ptr, actual_size);
|
||||
Memory::untrackMemory(ptr, 0, align);
|
||||
Memory::deleteImpl(ptr);
|
||||
}
|
||||
|
||||
void operator delete(void * ptr, std::size_t size) noexcept
|
||||
{
|
||||
AllocationTrace trace;
|
||||
std::size_t actual_size = Memory::untrackMemory(ptr, trace, size);
|
||||
trace.onFree(ptr, actual_size);
|
||||
Memory::untrackMemory(ptr, size);
|
||||
Memory::deleteSized(ptr, size);
|
||||
}
|
||||
|
||||
void operator delete(void * ptr, std::size_t size, std::align_val_t align) noexcept
|
||||
{
|
||||
AllocationTrace trace;
|
||||
std::size_t actual_size = Memory::untrackMemory(ptr, trace, size, align);
|
||||
trace.onFree(ptr, actual_size);
|
||||
Memory::untrackMemory(ptr, size, align);
|
||||
Memory::deleteSized(ptr, size, align);
|
||||
}
|
||||
|
||||
void operator delete[](void * ptr, std::size_t size) noexcept
|
||||
{
|
||||
AllocationTrace trace;
|
||||
std::size_t actual_size = Memory::untrackMemory(ptr, trace, size);
|
||||
trace.onFree(ptr, actual_size);
|
||||
Memory::untrackMemory(ptr, size);
|
||||
Memory::deleteSized(ptr, size);
|
||||
}
|
||||
|
||||
void operator delete[](void * ptr, std::size_t size, std::align_val_t align) noexcept
|
||||
{
|
||||
AllocationTrace trace;
|
||||
std::size_t actual_size = Memory::untrackMemory(ptr, trace, size, align);
|
||||
trace.onFree(ptr, actual_size);
|
||||
Memory::untrackMemory(ptr, size, align);
|
||||
Memory::deleteSized(ptr, size, align);
|
||||
}
|
||||
|
@ -148,31 +148,30 @@ std::vector<String> Settings::getAllRegisteredNames() const
|
||||
|
||||
void Settings::set(std::string_view name, const Field & value)
|
||||
{
|
||||
BaseSettings::set(name, value);
|
||||
|
||||
if (name == "compatibility")
|
||||
applyCompatibilitySetting();
|
||||
applyCompatibilitySetting(value.get<String>());
|
||||
/// If we change setting that was changed by compatibility setting before
|
||||
/// we should remove it from settings_changed_by_compatibility_setting,
|
||||
/// otherwise the next time we will change compatibility setting
|
||||
/// this setting will be changed too (and we don't want it).
|
||||
else if (settings_changed_by_compatibility_setting.contains(name))
|
||||
settings_changed_by_compatibility_setting.erase(name);
|
||||
|
||||
BaseSettings::set(name, value);
|
||||
}
|
||||
|
||||
void Settings::applyCompatibilitySetting()
|
||||
void Settings::applyCompatibilitySetting(const String & compatibility_value)
|
||||
{
|
||||
/// First, revert all changes applied by previous compatibility setting
|
||||
for (const auto & setting_name : settings_changed_by_compatibility_setting)
|
||||
resetToDefault(setting_name);
|
||||
|
||||
settings_changed_by_compatibility_setting.clear();
|
||||
String compatibility = getString("compatibility");
|
||||
/// If setting value is empty, we don't need to change settings
|
||||
if (compatibility.empty())
|
||||
if (compatibility_value.empty())
|
||||
return;
|
||||
|
||||
ClickHouseVersion version(compatibility);
|
||||
ClickHouseVersion version(compatibility_value);
|
||||
/// Iterate through ClickHouse version in descending order and apply reversed
|
||||
/// changes for each version that is higher that version from compatibility setting
|
||||
for (auto it = settings_changes_history.rbegin(); it != settings_changes_history.rend(); ++it)
|
||||
|
@ -771,7 +771,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value)
|
||||
M(Bool, input_format_json_try_infer_numbers_from_strings, true, "Try to infer numbers from string fields while schema inference", 0) \
|
||||
M(Bool, input_format_json_validate_types_from_metadata, true, "For JSON/JSONCompact/JSONColumnsWithMetadata input formats this controls whether format parser should check if data types from input metadata match data types of the corresponding columns from the table", 0) \
|
||||
M(Bool, input_format_json_read_numbers_as_strings, false, "Allow to parse numbers as strings in JSON input formats", 0) \
|
||||
M(Bool, input_format_json_read_objects_as_strings, false, "Allow to parse JSON objects as strings in JSON input formats", 0) \
|
||||
M(Bool, input_format_json_read_objects_as_strings, true, "Allow to parse JSON objects as strings in JSON input formats", 0) \
|
||||
M(Bool, input_format_try_infer_integers, true, "Try to infer integers instead of floats while schema inference in text formats", 0) \
|
||||
M(Bool, input_format_try_infer_dates, true, "Try to infer dates from string fields while schema inference in text formats", 0) \
|
||||
M(Bool, input_format_try_infer_datetimes, true, "Try to infer datetimes from string fields while schema inference in text formats", 0) \
|
||||
@ -928,7 +928,7 @@ struct Settings : public BaseSettings<SettingsTraits>, public IHints<2, Settings
|
||||
void setDefaultValue(const String & name) { resetToDefault(name); }
|
||||
|
||||
private:
|
||||
void applyCompatibilitySetting();
|
||||
void applyCompatibilitySetting(const String & compatibility);
|
||||
|
||||
std::unordered_set<std::string_view> settings_changed_by_compatibility_setting;
|
||||
};
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <Core/Field.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <map>
|
||||
|
||||
@ -28,7 +29,8 @@ public:
|
||||
for (const auto & split_element : split)
|
||||
{
|
||||
size_t component;
|
||||
if (!tryParse(component, split_element))
|
||||
ReadBufferFromString buf(split_element);
|
||||
if (!tryReadIntText(component, buf) || !buf.eof())
|
||||
throw Exception{ErrorCodes::BAD_ARGUMENTS, "Cannot parse ClickHouse version here: {}", version};
|
||||
components.push_back(component);
|
||||
}
|
||||
@ -78,6 +80,7 @@ namespace SettingsChangesHistory
|
||||
/// It's used to implement `compatibility` setting (see https://github.com/ClickHouse/ClickHouse/issues/35972)
|
||||
static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> settings_changes_history =
|
||||
{
|
||||
{"23.1", {{"input_format_json_read_objects_as_strings", 0, 1, "Enable reading nested json objects as strings while object type is experimental"}}},
|
||||
{"22.12", {{"max_size_to_preallocate_for_aggregation", 10'000'000, 100'000'000, "This optimizes performance"},
|
||||
{"query_plan_aggregation_in_order", 0, 1, "Enable some refactoring around query plan"},
|
||||
{"format_binary_max_string_size", 0, 1_GiB, "Prevent allocating large amount of memory"}}},
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
#include <Daemon/BaseDaemon.h>
|
||||
#include <Daemon/SentryWriter.h>
|
||||
#include <Parsers/toOneLineQuery.h>
|
||||
#include <base/errnoToString.h>
|
||||
|
||||
#include <sys/stat.h>
|
||||
@ -303,7 +304,7 @@ private:
|
||||
|
||||
if (auto thread_group = thread_ptr->getThreadGroup())
|
||||
{
|
||||
query = thread_group->one_line_query;
|
||||
query = DB::toOneLineQuery(thread_group->query);
|
||||
}
|
||||
|
||||
if (auto logs_queue = thread_ptr->getInternalTextLogsQueue())
|
||||
|
@ -11,7 +11,7 @@ if (OS_DARWIN AND NOT USE_STATIC_LIBRARIES)
|
||||
target_link_libraries (daemon PUBLIC -Wl,-undefined,dynamic_lookup)
|
||||
endif()
|
||||
|
||||
target_link_libraries (daemon PUBLIC loggers common PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||
target_link_libraries (daemon PUBLIC loggers common PRIVATE clickhouse_parsers clickhouse_common_io clickhouse_common_config)
|
||||
|
||||
if (TARGET ch_contrib::sentry)
|
||||
target_link_libraries (daemon PRIVATE ch_contrib::sentry dbms)
|
||||
|
@ -67,7 +67,7 @@ String DataTypeAggregateFunction::getNameImpl(bool with_version) const
|
||||
if (!parameters.empty())
|
||||
{
|
||||
stream << '(';
|
||||
for (size_t i = 0; i < parameters.size(); ++i)
|
||||
for (size_t i = 0, size = parameters.size(); i < size; ++i)
|
||||
{
|
||||
if (i)
|
||||
stream << ", ";
|
||||
|
@ -30,10 +30,26 @@ namespace ErrorCodes
|
||||
void DataTypeCustomSimpleAggregateFunction::checkSupportedFunctions(const AggregateFunctionPtr & function)
|
||||
{
|
||||
/// TODO Make it sane.
|
||||
static const std::vector<String> supported_functions{"any", "anyLast", "min",
|
||||
"max", "sum", "sumWithOverflow", "groupBitAnd", "groupBitOr", "groupBitXor",
|
||||
"sumMap", "minMap", "maxMap", "groupArrayArray", "groupUniqArrayArray",
|
||||
"sumMappedArrays", "minMappedArrays", "maxMappedArrays"};
|
||||
static const std::vector<String> supported_functions{
|
||||
"any",
|
||||
"anyLast",
|
||||
"min",
|
||||
"max",
|
||||
"sum",
|
||||
"sumWithOverflow",
|
||||
"groupBitAnd",
|
||||
"groupBitOr",
|
||||
"groupBitXor",
|
||||
"sumMap",
|
||||
"minMap",
|
||||
"maxMap",
|
||||
"groupArrayArray",
|
||||
"groupArrayLastArray",
|
||||
"groupUniqArrayArray",
|
||||
"sumMappedArrays",
|
||||
"minMappedArrays",
|
||||
"maxMappedArrays",
|
||||
};
|
||||
|
||||
// check function
|
||||
if (std::find(std::begin(supported_functions), std::end(supported_functions), function->getName()) == std::end(supported_functions))
|
||||
|
@ -8,12 +8,58 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
void transformTypesRecursively(DataTypes & types, std::function<void(DataTypes &, const TypeIndexesSet &)> transform_simple_types, std::function<void(DataTypes &, const TypeIndexesSet &)> transform_complex_types)
|
||||
TypeIndexesSet getTypesIndexes(const DataTypes & types)
|
||||
{
|
||||
TypeIndexesSet type_indexes;
|
||||
for (const auto & type : types)
|
||||
type_indexes.insert(type->getTypeId());
|
||||
return type_indexes;
|
||||
}
|
||||
|
||||
void transformTypesRecursively(DataTypes & types, std::function<void(DataTypes &, const TypeIndexesSet &)> transform_simple_types, std::function<void(DataTypes &, const TypeIndexesSet &)> transform_complex_types)
|
||||
{
|
||||
TypeIndexesSet type_indexes = getTypesIndexes(types);
|
||||
|
||||
/// Nullable
|
||||
if (type_indexes.contains(TypeIndex::Nullable))
|
||||
{
|
||||
std::vector<UInt8> is_nullable;
|
||||
is_nullable.reserve(types.size());
|
||||
DataTypes nested_types;
|
||||
nested_types.reserve(types.size());
|
||||
for (const auto & type : types)
|
||||
{
|
||||
if (const DataTypeNullable * type_nullable = typeid_cast<const DataTypeNullable *>(type.get()))
|
||||
{
|
||||
is_nullable.push_back(1);
|
||||
nested_types.push_back(type_nullable->getNestedType());
|
||||
}
|
||||
else
|
||||
{
|
||||
is_nullable.push_back(0);
|
||||
nested_types.push_back(type);
|
||||
}
|
||||
}
|
||||
|
||||
transformTypesRecursively(nested_types, transform_simple_types, transform_complex_types);
|
||||
for (size_t i = 0; i != types.size(); ++i)
|
||||
{
|
||||
/// Type could be changed so it cannot be inside Nullable anymore.
|
||||
if (is_nullable[i] && nested_types[i]->canBeInsideNullable())
|
||||
types[i] = makeNullable(nested_types[i]);
|
||||
else
|
||||
types[i] = nested_types[i];
|
||||
}
|
||||
|
||||
if (transform_complex_types)
|
||||
{
|
||||
/// Some types could be changed.
|
||||
type_indexes = getTypesIndexes(types);
|
||||
transform_complex_types(types, type_indexes);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/// Arrays
|
||||
if (type_indexes.contains(TypeIndex::Array))
|
||||
@ -114,42 +160,6 @@ void transformTypesRecursively(DataTypes & types, std::function<void(DataTypes &
|
||||
return;
|
||||
}
|
||||
|
||||
/// Nullable
|
||||
if (type_indexes.contains(TypeIndex::Nullable))
|
||||
{
|
||||
std::vector<UInt8> is_nullable;
|
||||
is_nullable.reserve(types.size());
|
||||
DataTypes nested_types;
|
||||
nested_types.reserve(types.size());
|
||||
for (const auto & type : types)
|
||||
{
|
||||
if (const DataTypeNullable * type_nullable = typeid_cast<const DataTypeNullable *>(type.get()))
|
||||
{
|
||||
is_nullable.push_back(1);
|
||||
nested_types.push_back(type_nullable->getNestedType());
|
||||
}
|
||||
else
|
||||
{
|
||||
is_nullable.push_back(0);
|
||||
nested_types.push_back(type);
|
||||
}
|
||||
}
|
||||
|
||||
transformTypesRecursively(nested_types, transform_simple_types, transform_complex_types);
|
||||
for (size_t i = 0; i != types.size(); ++i)
|
||||
{
|
||||
if (is_nullable[i])
|
||||
types[i] = makeNullable(nested_types[i]);
|
||||
else
|
||||
types[i] = nested_types[i];
|
||||
}
|
||||
|
||||
if (transform_complex_types)
|
||||
transform_complex_types(types, type_indexes);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
transform_simple_types(types, type_indexes);
|
||||
}
|
||||
|
||||
|
@ -401,7 +401,7 @@ void DatabaseReplicated::createEmptyLogEntry(const ZooKeeperPtr & current_zookee
|
||||
|
||||
bool DatabaseReplicated::waitForReplicaToProcessAllEntries(UInt64 timeout_ms)
|
||||
{
|
||||
if (!ddl_worker)
|
||||
if (!ddl_worker || is_probably_dropped)
|
||||
return false;
|
||||
return ddl_worker->waitForReplicaToProcessAllEntries(timeout_ms);
|
||||
}
|
||||
@ -473,9 +473,10 @@ void DatabaseReplicated::startupTables(ThreadPool & thread_pool, LoadingStrictne
|
||||
chassert(!TSA_SUPPRESS_WARNING_FOR_READ(tables_metadata_digest));
|
||||
TSA_SUPPRESS_WARNING_FOR_WRITE(tables_metadata_digest) = digest;
|
||||
|
||||
ddl_worker = std::make_unique<DatabaseReplicatedDDLWorker>(this, getContext());
|
||||
if (is_probably_dropped)
|
||||
return;
|
||||
|
||||
ddl_worker = std::make_unique<DatabaseReplicatedDDLWorker>(this, getContext());
|
||||
ddl_worker->startup();
|
||||
}
|
||||
|
||||
@ -491,7 +492,7 @@ bool DatabaseReplicated::checkDigestValid(const ContextPtr & local_context, bool
|
||||
LOG_TEST(log, "Current in-memory metadata digest: {}", tables_metadata_digest);
|
||||
|
||||
/// Database is probably being dropped
|
||||
if (!local_context->getZooKeeperMetadataTransaction() && !ddl_worker->isCurrentlyActive())
|
||||
if (!local_context->getZooKeeperMetadataTransaction() && (!ddl_worker || !ddl_worker->isCurrentlyActive()))
|
||||
return true;
|
||||
|
||||
UInt64 local_digest = 0;
|
||||
@ -1019,8 +1020,51 @@ ASTPtr DatabaseReplicated::parseQueryFromMetadataInZooKeeper(const String & node
|
||||
return ast;
|
||||
}
|
||||
|
||||
void DatabaseReplicated::dropReplica(
|
||||
DatabaseReplicated * database, const String & database_zookeeper_path, const String & full_replica_name)
|
||||
{
|
||||
assert(!database || database_zookeeper_path == database->zookeeper_path);
|
||||
|
||||
if (full_replica_name.find('/') != std::string::npos)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Invalid replica name: {}", full_replica_name);
|
||||
|
||||
auto zookeeper = Context::getGlobalContextInstance()->getZooKeeper();
|
||||
|
||||
String database_mark = zookeeper->get(database_zookeeper_path);
|
||||
if (database_mark != REPLICATED_DATABASE_MARK)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path {} does not look like a path of Replicated database", database_zookeeper_path);
|
||||
|
||||
String database_replica_path = fs::path(database_zookeeper_path) / "replicas" / full_replica_name;
|
||||
if (!zookeeper->exists(database_replica_path))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Replica {} does not exist (database path: {})",
|
||||
full_replica_name, database_zookeeper_path);
|
||||
|
||||
if (zookeeper->exists(database_replica_path + "/active"))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Replica {} is active, cannot drop it (database path: {})",
|
||||
full_replica_name, database_zookeeper_path);
|
||||
|
||||
zookeeper->set(database_replica_path, DROPPED_MARK, -1);
|
||||
/// Notify other replicas that cluster configuration was changed (if we can)
|
||||
if (database)
|
||||
database->createEmptyLogEntry(zookeeper);
|
||||
|
||||
zookeeper->tryRemoveRecursive(database_replica_path);
|
||||
if (zookeeper->tryRemove(database_zookeeper_path + "/replicas") == Coordination::Error::ZOK)
|
||||
{
|
||||
/// It was the last replica, remove all metadata
|
||||
zookeeper->tryRemoveRecursive(database_zookeeper_path);
|
||||
}
|
||||
}
|
||||
|
||||
void DatabaseReplicated::drop(ContextPtr context_)
|
||||
{
|
||||
if (is_probably_dropped)
|
||||
{
|
||||
/// Don't need to drop anything from ZooKeeper
|
||||
DatabaseAtomic::drop(context_);
|
||||
return;
|
||||
}
|
||||
|
||||
auto current_zookeeper = getZooKeeper();
|
||||
current_zookeeper->set(replica_path, DROPPED_MARK, -1);
|
||||
createEmptyLogEntry(current_zookeeper);
|
||||
@ -1038,8 +1082,6 @@ void DatabaseReplicated::drop(ContextPtr context_)
|
||||
|
||||
void DatabaseReplicated::stopReplication()
|
||||
{
|
||||
if (is_probably_dropped)
|
||||
return;
|
||||
if (ddl_worker)
|
||||
ddl_worker->shutdown();
|
||||
}
|
||||
@ -1055,7 +1097,7 @@ void DatabaseReplicated::shutdown()
|
||||
void DatabaseReplicated::dropTable(ContextPtr local_context, const String & table_name, bool sync)
|
||||
{
|
||||
auto txn = local_context->getZooKeeperMetadataTransaction();
|
||||
assert(!ddl_worker->isCurrentlyActive() || txn || startsWith(table_name, ".inner_id."));
|
||||
assert(!ddl_worker || !ddl_worker->isCurrentlyActive() || txn || startsWith(table_name, ".inner_id."));
|
||||
if (txn && txn->isInitialQuery() && !txn->isCreateOrReplaceQuery())
|
||||
{
|
||||
String metadata_zk_path = zookeeper_path + "/metadata/" + escapeForFileName(table_name);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user