Merge branch 'master' of https://github.com/ClickHouse/ClickHouse into master

This commit is contained in:
Dmitriy 2020-10-31 16:56:30 +03:00
commit 8b64d7be56
239 changed files with 5156 additions and 1534 deletions

View File

@ -409,7 +409,7 @@
## ClickHouse release 20.6 ## ClickHouse release 20.6
### ClickHouse release v20.6.3.28-stable ### ClickHouse release v20.6.3.28-stable
#### New Feature #### New Feature
@ -2362,7 +2362,7 @@ No changes compared to v20.4.3.16-stable.
* `Live View` table engine refactoring. [#8519](https://github.com/ClickHouse/ClickHouse/pull/8519) ([vzakaznikov](https://github.com/vzakaznikov)) * `Live View` table engine refactoring. [#8519](https://github.com/ClickHouse/ClickHouse/pull/8519) ([vzakaznikov](https://github.com/vzakaznikov))
* Add additional checks for external dictionaries created from DDL-queries. [#8127](https://github.com/ClickHouse/ClickHouse/pull/8127) ([alesapin](https://github.com/alesapin)) * Add additional checks for external dictionaries created from DDL-queries. [#8127](https://github.com/ClickHouse/ClickHouse/pull/8127) ([alesapin](https://github.com/alesapin))
* Fix error `Column ... already exists` while using `FINAL` and `SAMPLE` together, e.g. `select count() from table final sample 1/2`. Fixes [#5186](https://github.com/ClickHouse/ClickHouse/issues/5186). [#7907](https://github.com/ClickHouse/ClickHouse/pull/7907) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) * Fix error `Column ... already exists` while using `FINAL` and `SAMPLE` together, e.g. `select count() from table final sample 1/2`. Fixes [#5186](https://github.com/ClickHouse/ClickHouse/issues/5186). [#7907](https://github.com/ClickHouse/ClickHouse/pull/7907) ([Nikolai Kochetov](https://github.com/KochetovNicolai))
* Now table the first argument of `joinGet` function can be table indentifier. [#7707](https://github.com/ClickHouse/ClickHouse/pull/7707) ([Amos Bird](https://github.com/amosbird)) * Now table the first argument of `joinGet` function can be table identifier. [#7707](https://github.com/ClickHouse/ClickHouse/pull/7707) ([Amos Bird](https://github.com/amosbird))
* Allow using `MaterializedView` with subqueries above `Kafka` tables. [#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([filimonov](https://github.com/filimonov)) * Allow using `MaterializedView` with subqueries above `Kafka` tables. [#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([filimonov](https://github.com/filimonov))
* Now background moves between disks run it the seprate thread pool. [#7670](https://github.com/ClickHouse/ClickHouse/pull/7670) ([Vladimir Chebotarev](https://github.com/excitoon)) * Now background moves between disks run it the seprate thread pool. [#7670](https://github.com/ClickHouse/ClickHouse/pull/7670) ([Vladimir Chebotarev](https://github.com/excitoon))
* `SYSTEM RELOAD DICTIONARY` now executes synchronously. [#8240](https://github.com/ClickHouse/ClickHouse/pull/8240) ([Vitaly Baranov](https://github.com/vitlibar)) * `SYSTEM RELOAD DICTIONARY` now executes synchronously. [#8240](https://github.com/ClickHouse/ClickHouse/pull/8240) ([Vitaly Baranov](https://github.com/vitlibar))

View File

@ -59,25 +59,6 @@ set(CMAKE_DEBUG_POSTFIX "d" CACHE STRING "Generate debug library name with a pos
# For more info see https://cmake.org/cmake/help/latest/prop_gbl/USE_FOLDERS.html # For more info see https://cmake.org/cmake/help/latest/prop_gbl/USE_FOLDERS.html
set_property(GLOBAL PROPERTY USE_FOLDERS ON) set_property(GLOBAL PROPERTY USE_FOLDERS ON)
# cmake 3.9+ needed.
# Usually impractical.
# See also ${ENABLE_THINLTO}
option(ENABLE_IPO "Full link time optimization")
if(ENABLE_IPO)
cmake_policy(SET CMP0069 NEW)
include(CheckIPOSupported)
check_ipo_supported(RESULT IPO_SUPPORTED OUTPUT IPO_NOT_SUPPORTED)
if(IPO_SUPPORTED)
message(STATUS "IPO/LTO is supported, enabling")
set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE)
else()
message (${RECONFIGURE_MESSAGE_LEVEL} "IPO/LTO is not supported: <${IPO_NOT_SUPPORTED}>")
endif()
else()
message(STATUS "IPO/LTO not enabled.")
endif()
# Check that submodules are present only if source was downloaded with git # Check that submodules are present only if source was downloaded with git
if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/.git" AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/boost/boost") if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/.git" AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/boost/boost")
message (FATAL_ERROR "Submodules are not initialized. Run\n\tgit submodule update --init --recursive") message (FATAL_ERROR "Submodules are not initialized. Run\n\tgit submodule update --init --recursive")

View File

@ -51,7 +51,7 @@ struct StringRef
}; };
/// Here constexpr doesn't implicate inline, see https://www.viva64.com/en/w/v1043/ /// Here constexpr doesn't implicate inline, see https://www.viva64.com/en/w/v1043/
/// nullptr can't be used because the StringRef values are used in SipHash's pointer arithmetics /// nullptr can't be used because the StringRef values are used in SipHash's pointer arithmetic
/// and the UBSan thinks that something like nullptr + 8 is UB. /// and the UBSan thinks that something like nullptr + 8 is UB.
constexpr const inline char empty_string_ref_addr{}; constexpr const inline char empty_string_ref_addr{};
constexpr const inline StringRef EMPTY_STRING_REF{&empty_string_ref_addr, 0}; constexpr const inline StringRef EMPTY_STRING_REF{&empty_string_ref_addr, 0};

View File

@ -16,8 +16,4 @@ endif ()
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(ppc64le.*|PPC64LE.*)") if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(ppc64le.*|PPC64LE.*)")
set (ARCH_PPC64LE 1) set (ARCH_PPC64LE 1)
# FIXME: move this check into tools.cmake
if (COMPILER_CLANG OR (COMPILER_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8))
message(FATAL_ERROR "Only gcc-8 or higher is supported for powerpc architecture")
endif ()
endif () endif ()

View File

@ -84,3 +84,9 @@ if (LINKER_NAME)
message(STATUS "Using custom linker by name: ${LINKER_NAME}") message(STATUS "Using custom linker by name: ${LINKER_NAME}")
endif () endif ()
if (ARCH_PPC64LE)
if (COMPILER_CLANG OR (COMPILER_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8))
message(FATAL_ERROR "Only gcc-8 or higher is supported for powerpc architecture")
endif ()
endif ()

View File

@ -11,11 +11,11 @@ CFLAGS (GLOBAL -DDBMS_VERSION_MAJOR=${VERSION_MAJOR})
CFLAGS (GLOBAL -DDBMS_VERSION_MINOR=${VERSION_MINOR}) CFLAGS (GLOBAL -DDBMS_VERSION_MINOR=${VERSION_MINOR})
CFLAGS (GLOBAL -DDBMS_VERSION_PATCH=${VERSION_PATCH}) CFLAGS (GLOBAL -DDBMS_VERSION_PATCH=${VERSION_PATCH})
CFLAGS (GLOBAL -DVERSION_FULL=\"\\\"${VERSION_FULL}\\\"\") CFLAGS (GLOBAL -DVERSION_FULL=\"\\\"${VERSION_FULL}\\\"\")
CFLAGS (GLOBAL -DVERSION_MAJOR=${VERSION_MAJOR}) CFLAGS (GLOBAL -DVERSION_MAJOR=${VERSION_MAJOR})
CFLAGS (GLOBAL -DVERSION_MINOR=${VERSION_MINOR}) CFLAGS (GLOBAL -DVERSION_MINOR=${VERSION_MINOR})
CFLAGS (GLOBAL -DVERSION_PATCH=${VERSION_PATCH}) CFLAGS (GLOBAL -DVERSION_PATCH=${VERSION_PATCH})
# TODO: not supported yet, not sure if ya.make supports arithmetics. # TODO: not supported yet, not sure if ya.make supports arithmetic.
CFLAGS (GLOBAL -DVERSION_INTEGER=0) CFLAGS (GLOBAL -DVERSION_INTEGER=0)
CFLAGS (GLOBAL -DVERSION_NAME=\"\\\"${VERSION_NAME}\\\"\") CFLAGS (GLOBAL -DVERSION_NAME=\"\\\"${VERSION_NAME}\\\"\")

View File

@ -192,7 +192,7 @@ set(SRCS
${HDFS3_SOURCE_DIR}/common/FileWrapper.h ${HDFS3_SOURCE_DIR}/common/FileWrapper.h
) )
# old kernels (< 3.17) doens't have SYS_getrandom. Always use POSIX implementation to have better compatibility # old kernels (< 3.17) doesn't have SYS_getrandom. Always use POSIX implementation to have better compatibility
set_source_files_properties(${HDFS3_SOURCE_DIR}/rpc/RpcClient.cpp PROPERTIES COMPILE_FLAGS "-DBOOST_UUID_RANDOM_PROVIDER_FORCE_POSIX=1") set_source_files_properties(${HDFS3_SOURCE_DIR}/rpc/RpcClient.cpp PROPERTIES COMPILE_FLAGS "-DBOOST_UUID_RANDOM_PROVIDER_FORCE_POSIX=1")
# target # target

@ -1 +1 @@
Subproject commit f5638e954a79f50bac7c7a5deaa5a241e0ce8b5f Subproject commit 1485b0de3eaa1508dfe49a5ba1e4aa2a71fd8335

View File

@ -0,0 +1,8 @@
# post / preinstall scripts (not needed, we do it in Dockerfile)
alpine-root/install/*
# docs (looks useless)
alpine-root/usr/share/doc/*
# packages, etc. (used by prepare.sh)
alpine-root/tgz-packages/*

1
docker/server/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
alpine-root/*

View File

@ -0,0 +1,26 @@
FROM alpine
ENV LANG=en_US.UTF-8 \
LANGUAGE=en_US:en \
LC_ALL=en_US.UTF-8 \
TZ=UTC \
CLICKHOUSE_CONFIG=/etc/clickhouse-server/config.xml
COPY alpine-root/ /
# from https://github.com/ClickHouse/ClickHouse/blob/master/debian/clickhouse-server.postinst
RUN addgroup clickhouse \
&& adduser -S -H -h /nonexistent -s /bin/false -G clickhouse -g "ClickHouse server" clickhouse \
&& chown clickhouse:clickhouse /var/lib/clickhouse \
&& chmod 700 /var/lib/clickhouse \
&& chown root:clickhouse /var/log/clickhouse-server \
&& chmod 775 /var/log/clickhouse-server \
&& chmod +x /entrypoint.sh \
&& apk add --no-cache su-exec
EXPOSE 9000 8123 9009
VOLUME /var/lib/clickhouse \
/var/log/clickhouse-server
ENTRYPOINT ["/entrypoint.sh"]

59
docker/server/alpine-build.sh Executable file
View File

@ -0,0 +1,59 @@
#!/bin/bash
set -x
REPO_CHANNEL="${REPO_CHANNEL:-stable}" # lts / testing / prestable / etc
REPO_URL="${REPO_URL:-"https://repo.yandex.ru/clickhouse/tgz/${REPO_CHANNEL}"}"
VERSION="${VERSION:-20.9.3.45}"
# where original files live
DOCKER_BUILD_FOLDER="${BASH_SOURCE%/*}"
# we will create root for our image here
CONTAINER_ROOT_FOLDER="${DOCKER_BUILD_FOLDER}/alpine-root"
# where to put downloaded tgz
TGZ_PACKAGES_FOLDER="${CONTAINER_ROOT_FOLDER}/tgz-packages"
# clean up the root from old runs
rm -rf "$CONTAINER_ROOT_FOLDER"
mkdir -p "$TGZ_PACKAGES_FOLDER"
PACKAGES=( "clickhouse-client" "clickhouse-server" "clickhouse-common-static" )
# download tars from the repo
for package in "${PACKAGES[@]}"
do
wget -q --show-progress "${REPO_URL}/${package}-${VERSION}.tgz" -O "${TGZ_PACKAGES_FOLDER}/${package}-${VERSION}.tgz"
done
# unpack tars
for package in "${PACKAGES[@]}"
do
tar xvzf "${TGZ_PACKAGES_FOLDER}/${package}-${VERSION}.tgz" --strip-components=2 -C "$CONTAINER_ROOT_FOLDER"
done
# prepare few more folders
mkdir -p "${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/users.d" \
"${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/config.d" \
"${CONTAINER_ROOT_FOLDER}/var/log/clickhouse-server" \
"${CONTAINER_ROOT_FOLDER}/var/lib/clickhouse" \
"${CONTAINER_ROOT_FOLDER}/docker-entrypoint-initdb.d" \
"${CONTAINER_ROOT_FOLDER}/lib64"
cp "${DOCKER_BUILD_FOLDER}/docker_related_config.xml" "${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/config.d/"
cp "${DOCKER_BUILD_FOLDER}/entrypoint.alpine.sh" "${CONTAINER_ROOT_FOLDER}/entrypoint.sh"
## get glibc components from ubuntu 20.04 and put them to expected place
docker pull ubuntu:20.04
ubuntu20image=$(docker create --rm ubuntu:20.04)
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libc.so.6 "${CONTAINER_ROOT_FOLDER}/lib"
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libdl.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libm.so.6 "${CONTAINER_ROOT_FOLDER}/lib"
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libpthread.so.0 "${CONTAINER_ROOT_FOLDER}/lib"
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/librt.so.1 "${CONTAINER_ROOT_FOLDER}/lib"
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libnss_dns.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libresolv.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
docker cp -L ${ubuntu20image}:/lib64/ld-linux-x86-64.so.2 "${CONTAINER_ROOT_FOLDER}/lib64"
docker build "$DOCKER_BUILD_FOLDER" -f Dockerfile.alpine -t "yandex/clickhouse-server:${VERSION}-alpine" --pull

View File

@ -0,0 +1,152 @@
#!/bin/sh
#set -x
DO_CHOWN=1
if [ "$CLICKHOUSE_DO_NOT_CHOWN" = 1 ]; then
DO_CHOWN=0
fi
CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
# support --user
if [ "$(id -u)" = "0" ]; then
USER=$CLICKHOUSE_UID
GROUP=$CLICKHOUSE_GID
# busybox has setuidgid & chpst buildin
gosu="su-exec $USER:$GROUP"
else
USER="$(id -u)"
GROUP="$(id -g)"
gosu=""
DO_CHOWN=0
fi
# set some vars
CLICKHOUSE_CONFIG="${CLICKHOUSE_CONFIG:-/etc/clickhouse-server/config.xml}"
# port is needed to check if clickhouse-server is ready for connections
HTTP_PORT="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=http_port)"
# get CH directories locations
DATA_DIR="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=path || true)"
TMP_DIR="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=tmp_path || true)"
USER_PATH="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=user_files_path || true)"
LOG_PATH="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=logger.log || true)"
LOG_DIR="$(dirname $LOG_PATH || true)"
ERROR_LOG_PATH="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=logger.errorlog || true)"
ERROR_LOG_DIR="$(dirname $ERROR_LOG_PATH || true)"
FORMAT_SCHEMA_PATH="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=format_schema_path || true)"
CLICKHOUSE_USER="${CLICKHOUSE_USER:-default}"
CLICKHOUSE_PASSWORD="${CLICKHOUSE_PASSWORD:-}"
CLICKHOUSE_DB="${CLICKHOUSE_DB:-}"
for dir in "$DATA_DIR" \
"$ERROR_LOG_DIR" \
"$LOG_DIR" \
"$TMP_DIR" \
"$USER_PATH" \
"$FORMAT_SCHEMA_PATH"
do
# check if variable not empty
[ -z "$dir" ] && continue
# ensure directories exist
if ! mkdir -p "$dir"; then
echo "Couldn't create necessary directory: $dir"
exit 1
fi
if [ "$DO_CHOWN" = "1" ]; then
# ensure proper directories permissions
chown -R "$USER:$GROUP" "$dir"
elif [ "$(stat -c %u "$dir")" != "$USER" ]; then
echo "Necessary directory '$dir' isn't owned by user with id '$USER'"
exit 1
fi
done
# if clickhouse user is defined - create it (user "default" already exists out of box)
if [ -n "$CLICKHOUSE_USER" ] && [ "$CLICKHOUSE_USER" != "default" ] || [ -n "$CLICKHOUSE_PASSWORD" ]; then
echo "$0: create new user '$CLICKHOUSE_USER' instead 'default'"
cat <<EOT > /etc/clickhouse-server/users.d/default-user.xml
<yandex>
<!-- Docs: <https://clickhouse.tech/docs/en/operations/settings/settings_users/> -->
<users>
<!-- Remove default user -->
<default remove="remove">
</default>
<${CLICKHOUSE_USER}>
<profile>default</profile>
<networks>
<ip>::/0</ip>
</networks>
<password>${CLICKHOUSE_PASSWORD}</password>
<quota>default</quota>
</${CLICKHOUSE_USER}>
</users>
</yandex>
EOT
fi
if [ -n "$(ls /docker-entrypoint-initdb.d/)" ] || [ -n "$CLICKHOUSE_DB" ]; then
# Listen only on localhost until the initialization is done
$gosu /usr/bin/clickhouse-server --config-file=$CLICKHOUSE_CONFIG -- --listen_host=127.0.0.1 &
pid="$!"
# check if clickhouse is ready to accept connections
# will try to send ping clickhouse via http_port (max 6 retries, with 1 sec timeout and 1 sec delay between retries)
tries=6
while ! wget --spider -T 1 -q "http://localhost:$HTTP_PORT/ping" 2>/dev/null; do
if [ "$tries" -le "0" ]; then
echo >&2 'ClickHouse init process failed.'
exit 1
fi
tries=$(( tries-1 ))
sleep 1
done
if [ ! -z "$CLICKHOUSE_PASSWORD" ]; then
printf -v WITH_PASSWORD '%s %q' "--password" "$CLICKHOUSE_PASSWORD"
fi
clickhouseclient="clickhouse-client --multiquery -u $CLICKHOUSE_USER $WITH_PASSWORD "
# create default database, if defined
if [ -n "$CLICKHOUSE_DB" ]; then
echo "$0: create database '$CLICKHOUSE_DB'"
"$clickhouseclient" -q "CREATE DATABASE IF NOT EXISTS $CLICKHOUSE_DB";
fi
for f in /docker-entrypoint-initdb.d/*; do
case "$f" in
*.sh)
if [ -x "$f" ]; then
echo "$0: running $f"
"$f"
else
echo "$0: sourcing $f"
. "$f"
fi
;;
*.sql) echo "$0: running $f"; cat "$f" | "$clickhouseclient" ; echo ;;
*.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "$clickhouseclient"; echo ;;
*) echo "$0: ignoring $f" ;;
esac
echo
done
if ! kill -s TERM "$pid" || ! wait "$pid"; then
echo >&2 'Finishing of ClickHouse init process failed.'
exit 1
fi
fi
# if no args passed to `docker run` or first argument start with `--`, then the user is passing clickhouse-server arguments
if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
exec $gosu /usr/bin/clickhouse-server --config-file=$CLICKHOUSE_CONFIG "$@"
fi
# Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image
exec "$@"

View File

@ -63,7 +63,7 @@ function configure
# Make copies of the original db for both servers. Use hardlinks instead # Make copies of the original db for both servers. Use hardlinks instead
# of copying to save space. Before that, remove preprocessed configs and # of copying to save space. Before that, remove preprocessed configs and
# system tables, because sharing them between servers with hardlinks may # system tables, because sharing them between servers with hardlinks may
# lead to weird effects. # lead to weird effects.
rm -r left/db ||: rm -r left/db ||:
rm -r right/db ||: rm -r right/db ||:
rm -r db0/preprocessed_configs ||: rm -r db0/preprocessed_configs ||:
@ -82,7 +82,7 @@ function restart
export MALLOC_CONF="confirm_conf:true" export MALLOC_CONF="confirm_conf:true"
set -m # Spawn servers in their own process groups set -m # Spawn servers in their own process groups
left/clickhouse-server --config-file=left/config/config.xml \ left/clickhouse-server --config-file=left/config/config.xml \
-- --path left/db --user_files_path left/db/user_files \ -- --path left/db --user_files_path left/db/user_files \
&>> left-server-log.log & &>> left-server-log.log &
@ -208,7 +208,7 @@ function run_tests
echo test "$test_name" echo test "$test_name"
# Don't profile if we're past the time limit. # Don't profile if we're past the time limit.
# Use awk because bash doesn't support floating point arithmetics. # Use awk because bash doesn't support floating point arithmetic.
profile_seconds=$(awk "BEGIN { print ($profile_seconds_left > 0 ? 10 : 0) }") profile_seconds=$(awk "BEGIN { print ($profile_seconds_left > 0 ? 10 : 0) }")
TIMEFORMAT=$(printf "$test_name\t%%3R\t%%3U\t%%3S\n") TIMEFORMAT=$(printf "$test_name\t%%3R\t%%3U\t%%3S\n")
@ -541,10 +541,10 @@ create table queries engine File(TSVWithNamesAndTypes, 'report/queries.tsv')
as select as select
abs(diff) > report_threshold and abs(diff) > stat_threshold as changed_fail, abs(diff) > report_threshold and abs(diff) > stat_threshold as changed_fail,
abs(diff) > report_threshold - 0.05 and abs(diff) > stat_threshold as changed_show, abs(diff) > report_threshold - 0.05 and abs(diff) > stat_threshold as changed_show,
not changed_fail and stat_threshold > report_threshold + 0.10 as unstable_fail, not changed_fail and stat_threshold > report_threshold + 0.10 as unstable_fail,
not changed_show and stat_threshold > report_threshold - 0.05 as unstable_show, not changed_show and stat_threshold > report_threshold - 0.05 as unstable_show,
left, right, diff, stat_threshold, left, right, diff, stat_threshold,
if(report_threshold > 0, report_threshold, 0.10) as report_threshold, if(report_threshold > 0, report_threshold, 0.10) as report_threshold,
query_metric_stats.test test, query_metric_stats.query_index query_index, query_metric_stats.test test, query_metric_stats.query_index query_index,
@ -767,7 +767,7 @@ create table all_tests_report engine File(TSV, 'report/all-queries.tsv') as
-- The threshold for 2) is significantly larger than the threshold for 1), to -- The threshold for 2) is significantly larger than the threshold for 1), to
-- avoid jitter. -- avoid jitter.
create view shortness create view shortness
as select as select
(test, query_index) in (test, query_index) in
(select * from file('analyze/marked-short-queries.tsv', TSV, (select * from file('analyze/marked-short-queries.tsv', TSV,
'test text, query_index int')) 'test text, query_index int'))

View File

@ -35,7 +35,7 @@ RUN apt-get update \
ENV TZ=Europe/Moscow ENV TZ=Europe/Moscow
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
RUN pip3 install urllib3 testflows==1.6.57 docker-compose docker dicttoxml kazoo tzlocal RUN pip3 install urllib3 testflows==1.6.59 docker-compose docker dicttoxml kazoo tzlocal
ENV DOCKER_CHANNEL stable ENV DOCKER_CHANNEL stable
ENV DOCKER_VERSION 17.09.1-ce ENV DOCKER_VERSION 17.09.1-ce

View File

@ -51,7 +51,7 @@ Optional parameters:
- `rabbitmq_row_delimiter` Delimiter character, which ends the message. - `rabbitmq_row_delimiter` Delimiter character, which ends the message.
- `rabbitmq_schema` Parameter that must be used if the format requires a schema definition. For example, [Capn Proto](https://capnproto.org/) requires the path to the schema file and the name of the root `schema.capnp:Message` object. - `rabbitmq_schema` Parameter that must be used if the format requires a schema definition. For example, [Capn Proto](https://capnproto.org/) requires the path to the schema file and the name of the root `schema.capnp:Message` object.
- `rabbitmq_num_consumers` The number of consumers per table. Default: `1`. Specify more consumers if the throughput of one consumer is insufficient. - `rabbitmq_num_consumers` The number of consumers per table. Default: `1`. Specify more consumers if the throughput of one consumer is insufficient.
- `rabbitmq_num_queues` The number of queues per consumer. Default: `1`. Specify more queues if the capacity of one queue per consumer is insufficient. - `rabbitmq_num_queues` Total number of queues. Default: `1`. Increasing this number can significantly improve performance.
- `rabbitmq_queue_base` - Specify a hint for queue names. Use cases of this setting are described below. - `rabbitmq_queue_base` - Specify a hint for queue names. Use cases of this setting are described below.
- `rabbitmq_deadletter_exchange` - Specify name for a [dead letter exchange](https://www.rabbitmq.com/dlx.html). You can create another table with this exchange name and collect messages in cases when they are republished to dead letter exchange. By default dead letter exchange is not specified. - `rabbitmq_deadletter_exchange` - Specify name for a [dead letter exchange](https://www.rabbitmq.com/dlx.html). You can create another table with this exchange name and collect messages in cases when they are republished to dead letter exchange. By default dead letter exchange is not specified.
- `rabbitmq_persistent` - If set to 1 (true), in insert query delivery mode will be set to 2 (marks messages as 'persistent'). Default: `0`. - `rabbitmq_persistent` - If set to 1 (true), in insert query delivery mode will be set to 2 (marks messages as 'persistent'). Default: `0`.
@ -148,4 +148,5 @@ Example:
- `_channel_id` - ChannelID, on which consumer, who received the message, was declared. - `_channel_id` - ChannelID, on which consumer, who received the message, was declared.
- `_delivery_tag` - DeliveryTag of the received message. Scoped per channel. - `_delivery_tag` - DeliveryTag of the received message. Scoped per channel.
- `_redelivered` - `redelivered` flag of the message. - `_redelivered` - `redelivered` flag of the message.
- `_message_id` - MessageID of the received message; non-empty if was set, when message was published. - `_message_id` - messageID of the received message; non-empty if was set, when message was published.
- `_timestamp` - timestamp of the received message; non-empty if was set, when message was published.

View File

@ -2148,7 +2148,34 @@ Result:
└───────────────┘ └───────────────┘
``` ```
[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide --> ## output_format_pretty_row_numbers {#output_format_pretty_row_numbers}
Adds row numbers to output in the [Pretty](../../interfaces/formats.md#pretty) format.
Possible values:
- 0 — Output without row numbers.
- 1 — Output with row numbers.
Default value: `0`.
**Example**
Query:
```sql
SET output_format_pretty_row_numbers = 1;
SELECT TOP 3 name, value FROM system.settings;
```
Result:
```text
┌─name────────────────────┬─value───┐
1. │ min_compress_block_size │ 65536 │
2. │ max_compress_block_size │ 1048576 │
3. │ max_block_size │ 65505 │
└─────────────────────────┴─────────┘
```
## allow_experimental_bigint_types {#allow_experimental_bigint_types} ## allow_experimental_bigint_types {#allow_experimental_bigint_types}
@ -2160,3 +2187,5 @@ Possible values:
- 0 — The bigint data type is disabled. - 0 — The bigint data type is disabled.
Default value: `0`. Default value: `0`.
[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide -->

View File

@ -80,4 +80,4 @@ Code: 43. DB::Exception: Received from localhost:9000. DB::Exception: Wrong argu
## See Also {#see-also} ## See Also {#see-also}
- [INTERVAL](../../../sql-reference/operators/index.md#operator-interval) operator - [INTERVAL](../../../sql-reference/operators/index.md#operator-interval) operator
- [toInterval](../../../sql-reference/functions/type-conversion-functions.md#function-tointerval) type convertion functions - [toInterval](../../../sql-reference/functions/type-conversion-functions.md#function-tointerval) type conversion functions

View File

@ -23,8 +23,6 @@ SELECT
└─────────────────────┴────────────┴────────────┴─────────────────────┘ └─────────────────────┴────────────┴────────────┴─────────────────────┘
``` ```
Only time zones that differ from UTC by a whole number of hours are supported.
## toTimeZone {#totimezone} ## toTimeZone {#totimezone}
Convert time or date and time to the specified time zone. Convert time or date and time to the specified time zone.

View File

@ -551,7 +551,7 @@ formatReadableTimeDelta(column[, maximum_unit])
**Parameters** **Parameters**
- `column` — A column with numeric time delta. - `column` — A column with numeric time delta.
- `maximum_unit` — Optional. Maximum unit to show. Acceptable values seconds, minutes, hours, days, months, years. - `maximum_unit` — Optional. Maximum unit to show. Acceptable values seconds, minutes, hours, days, months, years.
Example: Example:
@ -626,7 +626,12 @@ neighbor(column, offset[, default_value])
``` ```
The result of the function depends on the affected data blocks and the order of data in the block. The result of the function depends on the affected data blocks and the order of data in the block.
If you make a subquery with ORDER BY and call the function from outside the subquery, you can get the expected result.
!!! warning "Warning"
It can reach the neighbor rows only inside the currently processed data block.
The rows order used during the calculation of `neighbor` can differ from the order of rows returned to the user.
To prevent that you can make a subquery with ORDER BY and call the function from outside the subquery.
**Parameters** **Parameters**
@ -731,8 +736,13 @@ Result:
Calculates the difference between successive row values in the data block. Calculates the difference between successive row values in the data block.
Returns 0 for the first row and the difference from the previous row for each subsequent row. Returns 0 for the first row and the difference from the previous row for each subsequent row.
!!! warning "Warning"
It can reach the previos row only inside the currently processed data block.
The result of the function depends on the affected data blocks and the order of data in the block. The result of the function depends on the affected data blocks and the order of data in the block.
If you make a subquery with ORDER BY and call the function from outside the subquery, you can get the expected result.
The rows order used during the calculation of `runningDifference` can differ from the order of rows returned to the user.
To prevent that you can make a subquery with ORDER BY and call the function from outside the subquery.
Example: Example:
@ -1584,7 +1594,7 @@ isDecimalOverflow(d, [p])
**Parameters** **Parameters**
- `d` — value. [Decimal](../../sql-reference/data-types/decimal.md). - `d` — value. [Decimal](../../sql-reference/data-types/decimal.md).
- `p` — precision. Optional. If omitted, the initial presicion of the first argument is used. Using of this paratemer could be helpful for data extraction to another DBMS or file. [UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges). - `p` — precision. Optional. If omitted, the initial precision of the first argument is used. Using of this paratemer could be helpful for data extraction to another DBMS or file. [UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges).
**Returned values** **Returned values**

View File

@ -169,7 +169,7 @@ SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL
**See Also** **See Also**
- [Interval](../../sql-reference/data-types/special-data-types/interval.md) data type - [Interval](../../sql-reference/data-types/special-data-types/interval.md) data type
- [toInterval](../../sql-reference/functions/type-conversion-functions.md#function-tointerval) type convertion functions - [toInterval](../../sql-reference/functions/type-conversion-functions.md#function-tointerval) type conversion functions
## Logical Negation Operator {#logical-negation-operator} ## Logical Negation Operator {#logical-negation-operator}

View File

@ -121,7 +121,7 @@ Defines storage time for values. Can be specified only for MergeTree-family tabl
## Column Compression Codecs {#codecs} ## Column Compression Codecs {#codecs}
By default, ClickHouse applies the `lz4` compression method. For `MergeTree`-engine family you can change the default compression method in the [compression](../../../operations/server-configuration-parameters/settings.md#server-settings-compression) section of a server configuration. By default, ClickHouse applies the `lz4` compression method. For `MergeTree`-engine family you can change the default compression method in the [compression](../../../operations/server-configuration-parameters/settings.md#server-settings-compression) section of a server configuration.
You can also define the compression method for each individual column in the `CREATE TABLE` query. You can also define the compression method for each individual column in the `CREATE TABLE` query.
@ -138,7 +138,7 @@ ENGINE = <Engine>
... ...
``` ```
The `Default` codec can be specified to reference default compression which may dependend on different settings (and properties of data) in runtime. The `Default` codec can be specified to reference default compression which may depend on different settings (and properties of data) in runtime.
Example: `value UInt64 CODEC(Default)` — the same as lack of codec specification. Example: `value UInt64 CODEC(Default)` — the same as lack of codec specification.
Also you can remove current CODEC from the column and use default compression from config.xml: Also you can remove current CODEC from the column and use default compression from config.xml:
@ -149,7 +149,7 @@ ALTER TABLE codec_example MODIFY COLUMN float_value CODEC(Default);
Codecs can be combined in a pipeline, for example, `CODEC(Delta, Default)`. Codecs can be combined in a pipeline, for example, `CODEC(Delta, Default)`.
To select the best codec combination for you project, pass benchmarks similar to described in the Altinity [New Encodings to Improve ClickHouse Efficiency](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse) article. One thing to note is that codec can't be applied for ALIAS column type. To select the best codec combination for you project, pass benchmarks similar to described in the Altinity [New Encodings to Improve ClickHouse Efficiency](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse) article. One thing to note is that codec can't be applied for ALIAS column type.
!!! warning "Warning" !!! warning "Warning"
You cant decompress ClickHouse database files with external utilities like `lz4`. Instead, use the special [clickhouse-compressor](https://github.com/ClickHouse/ClickHouse/tree/master/programs/compressor) utility. You cant decompress ClickHouse database files with external utilities like `lz4`. Instead, use the special [clickhouse-compressor](https://github.com/ClickHouse/ClickHouse/tree/master/programs/compressor) utility.

View File

@ -13,12 +13,61 @@ Basic query format:
INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ... INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ...
``` ```
The query can specify a list of columns to insert `[(c1, c2, c3)]`. In this case, the rest of the columns are filled with: You can specify a list of columns to insert using the `(c1, c2, c3)` or `COLUMNS(c1,c2,c3)` syntax.
Instead of listing all the required columns you can use the `(* EXCEPT(column_list))` syntax.
For example, consider the table:
``` sql
SHOW CREATE insert_select_testtable;
```
```
┌─statement────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
│ CREATE TABLE insert_select_testtable
(
`a` Int8,
`b` String,
`c` Int8
)
ENGINE = MergeTree()
ORDER BY a
SETTINGS index_granularity = 8192 │
└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```
``` sql
INSERT INTO insert_select_testtable (*) VALUES (1, 'a', 1) ;
```
If you want to insert data in all the columns, except 'b', you need to pass so many values how many columns you chose in parenthesis then:
``` sql
INSERT INTO insert_select_testtable (* EXCEPT(b)) Values (2, 2);
```
``` sql
SELECT * FROM insert_select_testtable;
```
```
┌─a─┬─b─┬─c─┐
│ 2 │ │ 2 │
└───┴───┴───┘
┌─a─┬─b─┬─c─┐
│ 1 │ a │ 1 │
└───┴───┴───┘
```
In this example, we see that the second inserted row has `a` and `c` columns filled by the passed values, and `b` filled with value by default.
If a list of columns doesn't include all existing columns, the rest of the columns are filled with:
- The values calculated from the `DEFAULT` expressions specified in the table definition. - The values calculated from the `DEFAULT` expressions specified in the table definition.
- Zeros and empty strings, if `DEFAULT` expressions are not defined. - Zeros and empty strings, if `DEFAULT` expressions are not defined.
If [strict_insert_defaults=1](../../operations/settings/settings.md), columns that do not have `DEFAULT` defined must be listed in the query. If [strict\_insert\_defaults=1](../../operations/settings/settings.md), columns that do not have `DEFAULT` defined must be listed in the query.
Data can be passed to the INSERT in any [format](../../interfaces/formats.md#formats) supported by ClickHouse. The format must be specified explicitly in the query: Data can be passed to the INSERT in any [format](../../interfaces/formats.md#formats) supported by ClickHouse. The format must be specified explicitly in the query:

View File

@ -4,13 +4,17 @@ toc_title: WITH
# WITH Clause {#with-clause} # WITH Clause {#with-clause}
This section provides support for Common Table Expressions ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)), so the results of `WITH` clause can be used in the rest of `SELECT` query. Clickhouse supports Common Table Expressions ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)), that is provides to use results of `WITH` clause in the rest of `SELECT` query. Named subqueries can be included to the current and child query context in places where table objects are allowed. Recursion is prevented by hiding the current level CTEs from the WITH expression.
## Limitations {#limitations} ## Syntax
1. Recursive queries are not supported. ``` sql
2. When subquery is used inside WITH section, its result should be scalar with exactly one row. WITH <expression> AS <identifier>
3. Expressions results are not available in subqueries. ```
or
``` sql
WITH <identifier> AS <subquery expression>
```
## Examples {#examples} ## Examples {#examples}
@ -22,10 +26,10 @@ SELECT *
FROM hits FROM hits
WHERE WHERE
EventDate = toDate(ts_upper_bound) AND EventDate = toDate(ts_upper_bound) AND
EventTime <= ts_upper_bound EventTime <= ts_upper_bound;
``` ```
**Example 2:** Evicting sum(bytes) expression result from SELECT clause column list **Example 2:** Evicting a sum(bytes) expression result from the SELECT clause column list
``` sql ``` sql
WITH sum(bytes) as s WITH sum(bytes) as s
@ -34,10 +38,10 @@ SELECT
table table
FROM system.parts FROM system.parts
GROUP BY table GROUP BY table
ORDER BY s ORDER BY s;
``` ```
**Example 3:** Using results of scalar subquery **Example 3:** Using results of a scalar subquery
``` sql ``` sql
/* this example would return TOP 10 of most huge tables */ /* this example would return TOP 10 of most huge tables */
@ -53,27 +57,14 @@ SELECT
FROM system.parts FROM system.parts
GROUP BY table GROUP BY table
ORDER BY table_disk_usage DESC ORDER BY table_disk_usage DESC
LIMIT 10 LIMIT 10;
``` ```
**Example 4:** Re-using expression in subquery **Example 4:** Reusing expression in a subquery
As a workaround for current limitation for expression usage in subqueries, you may duplicate it.
``` sql ``` sql
WITH ['hello'] AS hello WITH test1 AS (SELECT i + 1, j + 1 FROM test1)
SELECT SELECT * FROM test1;
hello,
*
FROM
(
WITH ['hello'] AS hello
SELECT hello
)
``` ```
``` text [Original article](https://clickhouse.tech/docs/en/sql-reference/statements/select/with/) <!--hide-->
┌─hello─────┬─hello─────┐
│ ['hello'] │ ['hello'] │
└───────────┴───────────┘
```

View File

@ -45,7 +45,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
- `rabbitmq_row_delimiter` символ-разделитель, который завершает сообщение. - `rabbitmq_row_delimiter` символ-разделитель, который завершает сообщение.
- `rabbitmq_schema` опциональный параметр, необходимый, если используется формат, требующий определения схемы. Например, [Capn Proto](https://capnproto.org/) требует путь к файлу со схемой и название корневого объекта `schema.capnp:Message`. - `rabbitmq_schema` опциональный параметр, необходимый, если используется формат, требующий определения схемы. Например, [Capn Proto](https://capnproto.org/) требует путь к файлу со схемой и название корневого объекта `schema.capnp:Message`.
- `rabbitmq_num_consumers` количество потребителей на таблицу. По умолчанию: `1`. Укажите больше потребителей, если пропускная способность одного потребителя недостаточна. - `rabbitmq_num_consumers` количество потребителей на таблицу. По умолчанию: `1`. Укажите больше потребителей, если пропускная способность одного потребителя недостаточна.
- `rabbitmq_num_queues` количество очередей на потребителя. По умолчанию: `1`. Укажите больше потребителей, если пропускная способность одной очереди на потребителя недостаточна. - `rabbitmq_num_queues` количество очередей. По умолчанию: `1`. Большее число очередей может сильно увеличить пропускную способность.
- `rabbitmq_queue_base` - настройка для имен очередей. Сценарии использования описаны ниже. - `rabbitmq_queue_base` - настройка для имен очередей. Сценарии использования описаны ниже.
- `rabbitmq_persistent` - флаг, от которого зависит настройка 'durable' для сообщений при запросах `INSERT`. По умолчанию: `0`. - `rabbitmq_persistent` - флаг, от которого зависит настройка 'durable' для сообщений при запросах `INSERT`. По умолчанию: `0`.
- `rabbitmq_skip_broken_messages` максимальное количество некорректных сообщений в блоке. Если `rabbitmq_skip_broken_messages = N`, то движок отбрасывает `N` сообщений, которые не получилось обработать. Одно сообщение в точности соответствует одной записи (строке). Значение по умолчанию 0. - `rabbitmq_skip_broken_messages` максимальное количество некорректных сообщений в блоке. Если `rabbitmq_skip_broken_messages = N`, то движок отбрасывает `N` сообщений, которые не получилось обработать. Одно сообщение в точности соответствует одной записи (строке). Значение по умолчанию 0.
@ -140,4 +140,5 @@ Example:
- `_channel_id` - идентификатор канала `ChannelID`, на котором было получено сообщение. - `_channel_id` - идентификатор канала `ChannelID`, на котором было получено сообщение.
- `_delivery_tag` - значение `DeliveryTag` полученного сообщения. Уникально в рамках одного канала. - `_delivery_tag` - значение `DeliveryTag` полученного сообщения. Уникально в рамках одного канала.
- `_redelivered` - флаг `redelivered`. (Не равно нулю, если есть возможность, что сообщение было получено более, чем одним каналом.) - `_redelivered` - флаг `redelivered`. (Не равно нулю, если есть возможность, что сообщение было получено более, чем одним каналом.)
- `_message_id` - значение `MessageID` полученного сообщения. Данное поле непусто, если указано в параметрах при отправке сообщения. - `_message_id` - значение поля `messageID` полученного сообщения. Данное поле непусто, если указано в параметрах при отправке сообщения.
- `_timestamp` - значение поля `timestamp` полученного сообщения. Данное поле непусто, если указано в параметрах при отправке сообщения.

View File

@ -1977,6 +1977,48 @@ SELECT range(number) FROM system.numbers LIMIT 5 FORMAT PrettyCompactNoEscapes;
└───────────────┘ └───────────────┘
``` ```
## output_format_pretty_row_numbers {#output_format_pretty_row_numbers}
Включает режим отображения номеров строк для запросов, выводимых в формате [Pretty](../../interfaces/formats.md#pretty).
Возможные значения:
- 0 — номера строк не выводятся.
- 1 — номера строк выводятся.
Значение по умолчанию: `0`.
**Пример**
Запрос:
```sql
SET output_format_pretty_row_numbers = 1;
SELECT TOP 3 name, value FROM system.settings;
```
Результат:
```text
┌─name────────────────────┬─value───┐
1. │ min_compress_block_size │ 65536 │
2. │ max_compress_block_size │ 1048576 │
3. │ max_block_size │ 65505 │
└─────────────────────────┴─────────┘
```
## allow_experimental_bigint_types {#allow_experimental_bigint_types}
Включает или отключает поддержку целочисленных значений, превышающих максимальное значение, допустимое для типа `int`.
Возможные значения:
- 1 — большие целочисленные значения поддерживаются.
- 0 — большие целочисленные значения не поддерживаются.
Значение по умолчанию: `0`.
## lock_acquire_timeout {#lock_acquire_timeout} ## lock_acquire_timeout {#lock_acquire_timeout}
Устанавливает, сколько секунд сервер ожидает возможности выполнить блокировку таблицы. Устанавливает, сколько секунд сервер ожидает возможности выполнить блокировку таблицы.

View File

@ -13,7 +13,55 @@ toc_title: INSERT INTO
INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ... INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ...
``` ```
В запросе можно указать список столбцов для вставки `[(c1, c2, c3)]`. В этом случае, в остальные столбцы записываются: Вы можете указать список столбцов для вставки, используя следующий синтаксис: `(c1, c2, c3)` или `COLUMNS(c1,c2,c3)`.
Можно не перечислять все необходимые столбцы, а использовать синтаксис `(* EXCEPT(column_list))`.
В качестве примера рассмотрим таблицу:
``` sql
SHOW CREATE insert_select_testtable
```
```
┌─statement────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
│ CREATE TABLE insert_select_testtable
(
`a` Int8,
`b` String,
`c` Int8
)
ENGINE = MergeTree()
ORDER BY a
SETTINGS index_granularity = 8192 │
└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```
``` sql
INSERT INTO insert_select_testtable (*) VALUES (1, 'a', 1)
```
Если вы хотите вставить данные во все столбцы, кроме 'b', вам нужно передать столько значений, сколько столбцов вы указали в скобках:
``` sql
INSERT INTO insert_select_testtable (* EXCEPT(b)) Values (2, 2)
```
``` sql
SELECT * FROM insert_select_testtable
```
```
┌─a─┬─b─┬─c─┐
│ 2 │ │ 2 │
└───┴───┴───┘
┌─a─┬─b─┬─c─┐
│ 1 │ a │ 1 │
└───┴───┴───┘
```
В этом примере мы видим, что вторая строка содержит столбцы `a` и `c`, заполненные переданными значениями и `b`, заполненный значением по умолчанию.
Если список столбцов не включает все существующие столбцы, то все остальные столбцы заполняются следующим образом:
- Значения, вычисляемые из `DEFAULT` выражений, указанных в определении таблицы. - Значения, вычисляемые из `DEFAULT` выражений, указанных в определении таблицы.
- Нули и пустые строки, если `DEFAULT` не определены. - Нули и пустые строки, если `DEFAULT` не определены.

View File

@ -2,18 +2,21 @@
toc_title: WITH toc_title: WITH
--- ---
# Секция WITH {#sektsiia-with} # Секция WITH {#with-clause}
Данная секция представляет собой [Common Table Expressions](https://ru.wikipedia.org/wiki/Иерархические_и_рекурсивныеапросы_в_SQL), то есть позволяет использовать результаты выражений из секции `WITH` в остальной части `SELECT` запроса. Clickhouse поддерживает [Общие табличные выражения](https://ru.wikipedia.org/wiki/Иерархические_и_рекурсивныеапросы_в_SQL), то есть позволяет использовать результаты выражений из секции `WITH` в остальной части `SELECT` запроса. Именованные подзапросы могут быть включены в текущий и дочерний контекст запроса в тех местах, где разрешены табличные объекты. Рекурсия предотвращается путем скрытия общего табличного выражения текущего уровня из выражения `WITH`.
## Синтаксис
``` sql
WITH <expression> AS <identifier>
```
или
``` sql
WITH <identifier> AS <subquery expression>
```
### Ограничения ## Примеры
1. Рекурсивные запросы не поддерживаются
2. Если в качестве выражения используется подзапрос, то результат должен содержать ровно одну строку
3. Результаты выражений нельзя переиспользовать во вложенных запросах
В дальнейшем, результаты выражений можно использовать в секции SELECT.
### Примеры
**Пример 1:** Использование константного выражения как «переменной» **Пример 1:** Использование константного выражения как «переменной»
@ -23,7 +26,7 @@ SELECT *
FROM hits FROM hits
WHERE WHERE
EventDate = toDate(ts_upper_bound) AND EventDate = toDate(ts_upper_bound) AND
EventTime <= ts_upper_bound EventTime <= ts_upper_bound;
``` ```
**Пример 2:** Выкидывание выражения sum(bytes) из списка колонок в SELECT **Пример 2:** Выкидывание выражения sum(bytes) из списка колонок в SELECT
@ -35,7 +38,7 @@ SELECT
table table
FROM system.parts FROM system.parts
GROUP BY table GROUP BY table
ORDER BY s ORDER BY s;
``` ```
**Пример 3:** Использование результатов скалярного подзапроса **Пример 3:** Использование результатов скалярного подзапроса
@ -54,27 +57,14 @@ SELECT
FROM system.parts FROM system.parts
GROUP BY table GROUP BY table
ORDER BY table_disk_usage DESC ORDER BY table_disk_usage DESC
LIMIT 10 LIMIT 10;
``` ```
**Пример 4:** Переиспользование выражения **Пример 4:** Переиспользование выражения
В настоящий момент, переиспользование выражения из секции WITH внутри подзапроса возможно только через дублирование.
``` sql ``` sql
WITH ['hello'] AS hello WITH test1 AS (SELECT i + 1, j + 1 FROM test1)
SELECT SELECT * FROM test1;
hello,
*
FROM
(
WITH ['hello'] AS hello
SELECT hello
)
``` ```
``` text [Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/statements/select/with/) <!--hide-->
┌─hello─────┬─hello─────┐
│ ['hello'] │ ['hello'] │
└───────────┴───────────┘
```

View File

@ -218,6 +218,8 @@ private:
QueryFuzzer fuzzer; QueryFuzzer fuzzer;
int query_fuzzer_runs = 0; int query_fuzzer_runs = 0;
std::optional<Suggest> suggest;
/// We will format query_id in interactive mode in various ways, the default is just to print Query id: ... /// We will format query_id in interactive mode in various ways, the default is just to print Query id: ...
std::vector<std::pair<String, String>> query_id_formats; std::vector<std::pair<String, String>> query_id_formats;
@ -577,10 +579,11 @@ private:
if (print_time_to_stderr) if (print_time_to_stderr)
throw Exception("time option could be specified only in non-interactive mode", ErrorCodes::BAD_ARGUMENTS); throw Exception("time option could be specified only in non-interactive mode", ErrorCodes::BAD_ARGUMENTS);
suggest.emplace();
if (server_revision >= Suggest::MIN_SERVER_REVISION && !config().getBool("disable_suggestion", false)) if (server_revision >= Suggest::MIN_SERVER_REVISION && !config().getBool("disable_suggestion", false))
{ {
/// Load suggestion data from the server. /// Load suggestion data from the server.
Suggest::instance().load(connection_parameters, config().getInt("suggestion_limit")); suggest->load(connection_parameters, config().getInt("suggestion_limit"));
} }
/// Load command history if present. /// Load command history if present.
@ -607,7 +610,7 @@ private:
highlight_callback = highlight; highlight_callback = highlight;
ReplxxLineReader lr( ReplxxLineReader lr(
Suggest::instance(), *suggest,
history_file, history_file,
config().has("multiline"), config().has("multiline"),
query_extenders, query_extenders,
@ -615,7 +618,7 @@ private:
highlight_callback); highlight_callback);
#elif defined(USE_READLINE) && USE_READLINE #elif defined(USE_READLINE) && USE_READLINE
ReadlineLineReader lr(Suggest::instance(), history_file, config().has("multiline"), query_extenders, query_delimiters); ReadlineLineReader lr(*suggest, history_file, config().has("multiline"), query_extenders, query_delimiters);
#else #else
LineReader lr(history_file, config().has("multiline"), query_extenders, query_delimiters); LineReader lr(history_file, config().has("multiline"), query_extenders, query_delimiters);
#endif #endif

View File

@ -18,10 +18,11 @@ namespace ErrorCodes
class Suggest : public LineReader::Suggest, boost::noncopyable class Suggest : public LineReader::Suggest, boost::noncopyable
{ {
public: public:
static Suggest & instance() Suggest();
~Suggest()
{ {
static Suggest instance; if (loading_thread.joinable())
return instance; loading_thread.join();
} }
void load(const ConnectionParameters & connection_parameters, size_t suggestion_limit); void load(const ConnectionParameters & connection_parameters, size_t suggestion_limit);
@ -30,12 +31,6 @@ public:
static constexpr int MIN_SERVER_REVISION = 54406; static constexpr int MIN_SERVER_REVISION = 54406;
private: private:
Suggest();
~Suggest()
{
if (loading_thread.joinable())
loading_thread.join();
}
void loadImpl(Connection & connection, const ConnectionTimeouts & timeouts, size_t suggestion_limit); void loadImpl(Connection & connection, const ConnectionTimeouts & timeouts, size_t suggestion_limit);
void fetch(Connection & connection, const ConnectionTimeouts & timeouts, const std::string & query); void fetch(Connection & connection, const ConnectionTimeouts & timeouts, const std::string & query);

View File

@ -152,7 +152,7 @@ void LocalServer::tryInitPath()
default_path = parent_folder / fmt::format("clickhouse-local-{}-{}-{}", getpid(), time(nullptr), randomSeed()); default_path = parent_folder / fmt::format("clickhouse-local-{}-{}-{}", getpid(), time(nullptr), randomSeed());
if (exists(default_path)) if (exists(default_path))
throw Exception(ErrorCodes::FILE_ALREADY_EXISTS, "Unsuccessfull attempt to create working directory: {} exist!", default_path.string()); throw Exception(ErrorCodes::FILE_ALREADY_EXISTS, "Unsuccessful attempt to create working directory: {} exist!", default_path.string());
create_directory(default_path); create_directory(default_path);
temporary_directory_to_delete = default_path; temporary_directory_to_delete = default_path;

View File

@ -270,7 +270,7 @@
This parameter is mandatory and cannot be empty. This parameter is mandatory and cannot be empty.
roles - section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server. roles - section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server.
If no roles are specified, user will not be able to perform any actions after authentication. If no roles are specified, user will not be able to perform any actions after authentication.
If any of the listed roles is not defined locally at the time of authentication, the authenthication attept If any of the listed roles is not defined locally at the time of authentication, the authenthication attempt
will fail as if the provided password was incorrect. will fail as if the provided password was incorrect.
Example: Example:
<ldap> <ldap>

View File

@ -100,7 +100,7 @@ namespace
if (res & alter_table) if (res & alter_table)
res |= alter_view; res |= alter_view;
/// CREATE TABLE (on any database/table) => CREATE_TEMPORARY_TABLE (global) /// CREATE TABLE (on any database/table) => CREATE_TEMPORARY_TABLE (global)
static const AccessFlags create_temporary_table = AccessType::CREATE_TEMPORARY_TABLE; static const AccessFlags create_temporary_table = AccessType::CREATE_TEMPORARY_TABLE;
if ((level == 0) && (max_flags_with_children & create_table)) if ((level == 0) && (max_flags_with_children & create_table))
res |= create_temporary_table; res |= create_temporary_table;

View File

@ -585,7 +585,7 @@ void IAccessStorage::throwInvalidPassword()
void IAccessStorage::throwCannotAuthenticate(const String & user_name) void IAccessStorage::throwCannotAuthenticate(const String & user_name)
{ {
/// We use the same message for all authentification failures because we don't want to give away any unnecessary information for security reasons, /// We use the same message for all authentication failures because we don't want to give away any unnecessary information for security reasons,
/// only the log will show the exact reason. /// only the log will show the exact reason.
throw Exception(user_name + ": Authentication failed: password is incorrect or there is no user with such name", ErrorCodes::AUTHENTICATION_FAILED); throw Exception(user_name + ": Authentication failed: password is incorrect or there is no user with such name", ErrorCodes::AUTHENTICATION_FAILED);
} }

View File

@ -5,7 +5,6 @@ PEERDIR(
clickhouse/src/Common clickhouse/src/Common
) )
CFLAGS(-g0)
SRCS( SRCS(
AccessControlManager.cpp AccessControlManager.cpp

View File

@ -4,7 +4,6 @@ PEERDIR(
clickhouse/src/Common clickhouse/src/Common
) )
CFLAGS(-g0)
SRCS( SRCS(
<? find . -name '*.cpp' | sed 's/^\.\// /' | sort ?> <? find . -name '*.cpp' | sed 's/^\.\// /' | sort ?>

View File

@ -296,7 +296,7 @@ public:
{ {
typename ColumnVector<T>::Container & data_to = assert_cast<ColumnVector<T> &>(arr_to.getData()).getData(); typename ColumnVector<T>::Container & data_to = assert_cast<ColumnVector<T> &>(arr_to.getData()).getData();
if constexpr (is_big_int_v<T>) if constexpr (is_big_int_v<T>)
// is data_to empty? we should probaly use std::vector::insert then // is data_to empty? we should probably use std::vector::insert then
for (auto it = this->data(place).value.begin(); it != this->data(place).value.end(); it++) for (auto it = this->data(place).value.begin(); it != this->data(place).value.end(); it++)
data_to.push_back(*it); data_to.push_back(*it);
else else

View File

@ -20,9 +20,17 @@
namespace ErrorCodes namespace ErrorCodes
{ {
extern const int BAD_ARGUMENTS; extern const int BAD_ARGUMENTS;
} }
#if defined(OS_DARWIN)
extern "C"
{
double lgammal_r(double x, int * signgamp);
}
#endif
namespace DB namespace DB
{ {
@ -98,7 +106,7 @@ struct AggregateFunctionStudentTTestData final
Float64 getSSquared() const Float64 getSSquared() const
{ {
/// The original formulae looks like /// The original formulae looks like
/// \frac{\sum_{i = 1}^{n_x}{(x_i - \bar{x}) ^ 2} + \sum_{i = 1}^{n_y}{(y_i - \bar{y}) ^ 2}}{n_x + n_y - 2} /// \frac{\sum_{i = 1}^{n_x}{(x_i - \bar{x}) ^ 2} + \sum_{i = 1}^{n_y}{(y_i - \bar{y}) ^ 2}}{n_x + n_y - 2}
/// But we made some mathematical transformations not to store original sequences. /// But we made some mathematical transformations not to store original sequences.
/// Also we dropped sqrt, because later it will be squared later. /// Also we dropped sqrt, because later it will be squared later.
@ -150,7 +158,8 @@ struct AggregateFunctionStudentTTestData final
const Float64 t = getTStatisticSquared(); const Float64 t = getTStatisticSquared();
auto f = [&v] (double x) { return std::pow(x, v/2 - 1) / std::sqrt(1 - x); }; auto f = [&v] (double x) { return std::pow(x, v/2 - 1) / std::sqrt(1 - x); };
Float64 numenator = integrateSimpson(0, v / (t + v), f); Float64 numenator = integrateSimpson(0, v / (t + v), f);
Float64 denominator = std::exp(std::lgammal(v/2) + std::lgammal(0.5) - std::lgammal(v/2 + 0.5)); int unused;
Float64 denominator = std::exp(lgammal_r(v / 2, &unused) + lgammal_r(0.5, &unused) - lgammal_r(v / 2 + 0.5, &unused));
return numenator / denominator; return numenator / denominator;
} }

View File

@ -18,11 +18,20 @@
#include <type_traits> #include <type_traits>
namespace ErrorCodes namespace ErrorCodes
{ {
extern const int BAD_ARGUMENTS; extern const int BAD_ARGUMENTS;
} }
#if defined(OS_DARWIN)
extern "C"
{
double lgammal_r(double x, int * signgamp);
}
#endif
namespace DB namespace DB
{ {
@ -159,9 +168,10 @@ struct AggregateFunctionWelchTTestData final
{ {
const Float64 v = getDegreesOfFreedom(); const Float64 v = getDegreesOfFreedom();
const Float64 t = getTStatisticSquared(); const Float64 t = getTStatisticSquared();
auto f = [&v] (double x) { return std::pow(x, v/2 - 1) / std::sqrt(1 - x); }; auto f = [&v] (double x) { return std::pow(x, v / 2 - 1) / std::sqrt(1 - x); };
Float64 numenator = integrateSimpson(0, v / (t + v), f); Float64 numenator = integrateSimpson(0, v / (t + v), f);
Float64 denominator = std::exp(std::lgammal(v/2) + std::lgammal(0.5) - std::lgammal(v/2 + 0.5)); int unused;
Float64 denominator = std::exp(lgammal_r(v / 2, &unused) + lgammal_r(0.5, &unused) - lgammal_r(v / 2 + 0.5, &unused));
return numenator / denominator; return numenator / denominator;
} }

View File

@ -39,8 +39,8 @@ namespace ErrorCodes
namespace detail namespace detail
{ {
const size_t DEFAULT_SAMPLE_COUNT = 8192; const size_t DEFAULT_MAX_SAMPLE_SIZE = 8192;
const auto MAX_SKIP_DEGREE = sizeof(UInt32) * 8; const auto MAX_SKIP_DEGREE = sizeof(UInt32) * 8;
} }
/// What if there is not a single value - throw an exception, or return 0 or NaN in the case of double? /// What if there is not a single value - throw an exception, or return 0 or NaN in the case of double?
@ -50,6 +50,7 @@ enum class ReservoirSamplerDeterministicOnEmpty
RETURN_NAN_OR_ZERO, RETURN_NAN_OR_ZERO,
}; };
template <typename T, template <typename T,
ReservoirSamplerDeterministicOnEmpty OnEmpty = ReservoirSamplerDeterministicOnEmpty::THROW> ReservoirSamplerDeterministicOnEmpty OnEmpty = ReservoirSamplerDeterministicOnEmpty::THROW>
class ReservoirSamplerDeterministic class ReservoirSamplerDeterministic
@ -60,8 +61,8 @@ class ReservoirSamplerDeterministic
} }
public: public:
ReservoirSamplerDeterministic(const size_t sample_count_ = DEFAULT_SAMPLE_COUNT) ReservoirSamplerDeterministic(const size_t max_sample_size_ = detail::DEFAULT_MAX_SAMPLE_SIZE)
: sample_count{sample_count_} : max_sample_size{max_sample_size_}
{ {
} }
@ -131,8 +132,8 @@ public:
void merge(const ReservoirSamplerDeterministic & b) void merge(const ReservoirSamplerDeterministic & b)
{ {
if (sample_count != b.sample_count) if (max_sample_size != b.max_sample_size)
throw Poco::Exception("Cannot merge ReservoirSamplerDeterministic's with different sample_count"); throw Poco::Exception("Cannot merge ReservoirSamplerDeterministic's with different max sample size");
sorted = false; sorted = false;
if (b.skip_degree > skip_degree) if (b.skip_degree > skip_degree)
@ -150,11 +151,16 @@ public:
void read(DB::ReadBuffer & buf) void read(DB::ReadBuffer & buf)
{ {
DB::readIntBinary<size_t>(sample_count, buf); size_t size = 0;
DB::readIntBinary<size_t>(size, buf);
DB::readIntBinary<size_t>(total_values, buf); DB::readIntBinary<size_t>(total_values, buf);
samples.resize(std::min(total_values, sample_count));
for (size_t i = 0; i < samples.size(); ++i) /// Compatibility with old versions.
if (size > total_values)
size = total_values;
samples.resize(size);
for (size_t i = 0; i < size; ++i)
DB::readPODBinary(samples[i], buf); DB::readPODBinary(samples[i], buf);
sorted = false; sorted = false;
@ -162,10 +168,11 @@ public:
void write(DB::WriteBuffer & buf) const void write(DB::WriteBuffer & buf) const
{ {
DB::writeIntBinary<size_t>(sample_count, buf); size_t size = samples.size();
DB::writeIntBinary<size_t>(size, buf);
DB::writeIntBinary<size_t>(total_values, buf); DB::writeIntBinary<size_t>(total_values, buf);
for (size_t i = 0; i < std::min(sample_count, total_values); ++i) for (size_t i = 0; i < size; ++i)
DB::writePODBinary(samples[i], buf); DB::writePODBinary(samples[i], buf);
} }
@ -174,18 +181,19 @@ private:
using Element = std::pair<T, UInt32>; using Element = std::pair<T, UInt32>;
using Array = DB::PODArray<Element, 64>; using Array = DB::PODArray<Element, 64>;
size_t sample_count; const size_t max_sample_size; /// Maximum amount of stored values.
size_t total_values{}; size_t total_values = 0; /// How many values were inserted (regardless if they remain in sample or not).
bool sorted{}; bool sorted = false;
Array samples; Array samples;
UInt8 skip_degree{}; UInt8 skip_degree = 0; /// The number N determining that we save only one per 2^N elements in average.
void insertImpl(const T & v, const UInt32 hash) void insertImpl(const T & v, const UInt32 hash)
{ {
/// @todo why + 1? I don't quite recall /// Make a room for plus one element.
while (samples.size() + 1 >= sample_count) while (samples.size() >= max_sample_size)
{ {
if (++skip_degree > detail::MAX_SKIP_DEGREE) ++skip_degree;
if (skip_degree > detail::MAX_SKIP_DEGREE)
throw DB::Exception{"skip_degree exceeds maximum value", DB::ErrorCodes::MEMORY_LIMIT_EXCEEDED}; throw DB::Exception{"skip_degree exceeds maximum value", DB::ErrorCodes::MEMORY_LIMIT_EXCEEDED};
thinOut(); thinOut();
} }
@ -195,35 +203,17 @@ private:
void thinOut() void thinOut()
{ {
auto size = samples.size(); samples.resize(std::distance(samples.begin(),
for (size_t i = 0; i < size;) std::remove_if(samples.begin(), samples.end(), [this](const auto & elem){ return !good(elem.second); })));
{ sorted = false;
if (!good(samples[i].second))
{
/// swap current element with the last one
std::swap(samples[size - 1], samples[i]);
--size;
}
else
++i;
}
if (size != samples.size())
{
samples.resize(size);
sorted = false;
}
} }
void sortIfNeeded() void sortIfNeeded()
{ {
if (sorted) if (sorted)
return; return;
std::sort(samples.begin(), samples.end(), [](const auto & lhs, const auto & rhs) { return lhs.first < rhs.first; });
sorted = true; sorted = true;
std::sort(samples.begin(), samples.end(), [] (const std::pair<T, UInt32> & lhs, const std::pair<T, UInt32> & rhs)
{
return lhs.first < rhs.first;
});
} }
template <typename ResultType> template <typename ResultType>

View File

@ -5,7 +5,6 @@ PEERDIR(
clickhouse/src/Common clickhouse/src/Common
) )
CFLAGS(-g0)
SRCS( SRCS(
AggregateFunctionAggThrow.cpp AggregateFunctionAggThrow.cpp

View File

@ -4,7 +4,6 @@ PEERDIR(
clickhouse/src/Common clickhouse/src/Common
) )
CFLAGS(-g0)
SRCS( SRCS(
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F GroupBitmap | sed 's/^\.\// /' | sort ?> <? find . -name '*.cpp' | grep -v -F tests | grep -v -F GroupBitmap | sed 's/^\.\// /' | sort ?>

View File

@ -6,7 +6,6 @@ PEERDIR(
contrib/libs/poco/NetSSL_OpenSSL contrib/libs/poco/NetSSL_OpenSSL
) )
CFLAGS(-g0)
SRCS( SRCS(
Connection.cpp Connection.cpp

View File

@ -5,7 +5,6 @@ PEERDIR(
contrib/libs/poco/NetSSL_OpenSSL contrib/libs/poco/NetSSL_OpenSSL
) )
CFLAGS(-g0)
SRCS( SRCS(
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?> <? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>

View File

@ -344,7 +344,7 @@ void ColumnNullable::updatePermutation(bool reverse, size_t limit, int null_dire
/// Shift all NULL values to the end. /// Shift all NULL values to the end.
for (const auto & [first, last] : equal_ranges) for (const auto & [first, last] : equal_ranges)
{ {
/// Current interval is righter than limit. /// Current interval is righter than limit.
if (limit && first > limit) if (limit && first > limit)
break; break;

View File

@ -82,7 +82,7 @@ public:
* @see DB::ColumnUnique * @see DB::ColumnUnique
* *
* The most common example uses https://clickhouse.tech/docs/en/sql-reference/data-types/lowcardinality/ columns. * The most common example uses https://clickhouse.tech/docs/en/sql-reference/data-types/lowcardinality/ columns.
* Consider data type @e LC(String). The inner type here is @e String which is more or less a contigous memory * Consider data type @e LC(String). The inner type here is @e String which is more or less a contiguous memory
* region, so it can be easily represented as a @e StringRef. So we pass that ref to this function and get its * region, so it can be easily represented as a @e StringRef. So we pass that ref to this function and get its
* index in the dictionary, which can be used to operate with the indices column. * index in the dictionary, which can be used to operate with the indices column.
*/ */

View File

@ -13,7 +13,6 @@ PEERDIR(
contrib/libs/pdqsort contrib/libs/pdqsort
) )
CFLAGS(-g0)
SRCS( SRCS(
Collator.cpp Collator.cpp

View File

@ -54,6 +54,7 @@
M(LocalThread, "Number of threads in local thread pools. Should be similar to GlobalThreadActive.") \ M(LocalThread, "Number of threads in local thread pools. Should be similar to GlobalThreadActive.") \
M(LocalThreadActive, "Number of threads in local thread pools running a task.") \ M(LocalThreadActive, "Number of threads in local thread pools running a task.") \
M(DistributedFilesToInsert, "Number of pending files to process for asynchronous insertion into Distributed tables. Number of files for every shard is summed.") \ M(DistributedFilesToInsert, "Number of pending files to process for asynchronous insertion into Distributed tables. Number of files for every shard is summed.") \
M(TablesToDropQueueSize, "Number of dropped tables, that are waiting for background data removal.") \
namespace CurrentMetrics namespace CurrentMetrics
{ {

View File

@ -5,15 +5,15 @@
namespace DB namespace DB
{ {
/// Helper class, that recieves file descriptor and does fsync for it in destructor. /// Helper class, that receives file descriptor and does fsync for it in destructor.
/// It's used to keep descriptor open, while doing some operations with it, and do fsync at the end. /// It's used to keep descriptor open, while doing some operations with it, and do fsync at the end.
/// Guaranties of sequence 'close-reopen-fsync' may depend on kernel version. /// Guaranties of sequence 'close-reopen-fsync' may depend on kernel version.
/// Source: linux-fsdevel mailing-list https://marc.info/?l=linux-fsdevel&m=152535409207496 /// Source: linux-fsdevel mailing-list https://marc.info/?l=linux-fsdevel&m=152535409207496
class FileSyncGuard class FileSyncGuard
{ {
public: public:
/// NOTE: If you have already opened descriptor, it's preffered to use /// NOTE: If you have already opened descriptor, it's preferred to use
/// this constructor instead of construnctor with path. /// this constructor instead of constructor with path.
FileSyncGuard(const DiskPtr & disk_, int fd_) : disk(disk_), fd(fd_) {} FileSyncGuard(const DiskPtr & disk_, int fd_) : disk(disk_), fd(fd_) {}
FileSyncGuard(const DiskPtr & disk_, const String & path) FileSyncGuard(const DiskPtr & disk_, const String & path)

View File

@ -234,13 +234,13 @@ void ThreadPoolImpl<Thread>::worker(typename std::list<Thread>::iterator thread_
std::is_same_v<Thread, std::thread> ? CurrentMetrics::GlobalThreadActive : CurrentMetrics::LocalThreadActive); std::is_same_v<Thread, std::thread> ? CurrentMetrics::GlobalThreadActive : CurrentMetrics::LocalThreadActive);
job(); job();
/// job should be reseted before decrementing scheduled_jobs to /// job should be reset before decrementing scheduled_jobs to
/// ensure that the Job destroyed before wait() returns. /// ensure that the Job destroyed before wait() returns.
job = {}; job = {};
} }
catch (...) catch (...)
{ {
/// job should be reseted before decrementing scheduled_jobs to /// job should be reset before decrementing scheduled_jobs to
/// ensure that the Job destroyed before wait() returns. /// ensure that the Job destroyed before wait() returns.
job = {}; job = {};

View File

@ -172,7 +172,7 @@ protected:
void finalizeQueryProfiler(); void finalizeQueryProfiler();
void logToQueryThreadLog(QueryThreadLog & thread_log); void logToQueryThreadLog(QueryThreadLog & thread_log, const String & current_database);
void assertState(const std::initializer_list<int> & permitted_states, const char * description = nullptr) const; void assertState(const std::initializer_list<int> & permitted_states, const char * description = nullptr) const;

View File

@ -152,7 +152,7 @@ void TraceCollector::run()
if (trace_log) if (trace_log)
{ {
// time and time_in_microseconds are both being constructed from the same timespec so that the // time and time_in_microseconds are both being constructed from the same timespec so that the
// times will be equal upto the precision of a second. // times will be equal up to the precision of a second.
struct timespec ts; struct timespec ts;
clock_gettime(CLOCK_REALTIME, &ts); clock_gettime(CLOCK_REALTIME, &ts);

View File

@ -1288,13 +1288,13 @@ void ZooKeeper::receiveEvent()
response->removeRootPath(root_path); response->removeRootPath(root_path);
} }
/// Instead of setting the watch in sendEvent, set it in receiveEvent becuase need to check the response. /// Instead of setting the watch in sendEvent, set it in receiveEvent because need to check the response.
/// The watch shouldn't be set if the node does not exist and it will never exist like sequential ephemeral nodes. /// The watch shouldn't be set if the node does not exist and it will never exist like sequential ephemeral nodes.
/// By using getData() instead of exists(), a watch won't be set if the node doesn't exist. /// By using getData() instead of exists(), a watch won't be set if the node doesn't exist.
if (request_info.watch) if (request_info.watch)
{ {
bool add_watch = false; bool add_watch = false;
/// 3 indicates the ZooKeeperExistsRequest. /// 3 indicates the ZooKeeperExistsRequest.
// For exists, we set the watch on both node exist and nonexist case. // For exists, we set the watch on both node exist and nonexist case.
// For other case like getData, we only set the watch when node exists. // For other case like getData, we only set the watch when node exists.
if (request_info.request->getOpNum() == 3) if (request_info.request->getOpNum() == 3)

View File

@ -21,7 +21,6 @@ PEERDIR(
INCLUDE(${ARCADIA_ROOT}/clickhouse/cmake/yandex/ya.make.versions.inc) INCLUDE(${ARCADIA_ROOT}/clickhouse/cmake/yandex/ya.make.versions.inc)
CFLAGS(-g0)
SRCS( SRCS(
ActionLock.cpp ActionLock.cpp

View File

@ -20,7 +20,6 @@ PEERDIR(
INCLUDE(${ARCADIA_ROOT}/clickhouse/cmake/yandex/ya.make.versions.inc) INCLUDE(${ARCADIA_ROOT}/clickhouse/cmake/yandex/ya.make.versions.inc)
CFLAGS(-g0)
SRCS( SRCS(
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?> <? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>

View File

@ -185,9 +185,9 @@ void CompressedReadBufferBase::decompress(char * to, size_t size_decompressed, s
} }
else else
{ {
throw Exception("Data compressed with different methods, given method byte " throw Exception("Data compressed with different methods, given method byte 0x"
+ getHexUIntLowercase(method) + getHexUIntLowercase(method)
+ ", previous method byte " + ", previous method byte 0x"
+ getHexUIntLowercase(codec->getMethodByte()), + getHexUIntLowercase(codec->getMethodByte()),
ErrorCodes::CANNOT_DECOMPRESS); ErrorCodes::CANNOT_DECOMPRESS);
} }

View File

@ -87,7 +87,7 @@ ASTPtr CompressionCodecFactory::validateCodecAndGetPreprocessedAST(const ASTPtr
else else
throw Exception("Unexpected AST element for compression codec", ErrorCodes::UNEXPECTED_AST_STRUCTURE); throw Exception("Unexpected AST element for compression codec", ErrorCodes::UNEXPECTED_AST_STRUCTURE);
/// Default codec replaced with current default codec which may dependend on different /// Default codec replaced with current default codec which may depend on different
/// settings (and properties of data) in runtime. /// settings (and properties of data) in runtime.
CompressionCodecPtr result_codec; CompressionCodecPtr result_codec;
if (codec_family_name == DEFAULT_CODEC_NAME) if (codec_family_name == DEFAULT_CODEC_NAME)

View File

@ -26,7 +26,7 @@ void ICompressionCodec::setCodecDescription(const String & codec_name, const AST
std::shared_ptr<ASTFunction> result = std::make_shared<ASTFunction>(); std::shared_ptr<ASTFunction> result = std::make_shared<ASTFunction>();
result->name = "CODEC"; result->name = "CODEC";
/// Special case for codec Multiple, which doens't have name. It's just list /// Special case for codec Multiple, which doesn't have name. It's just list
/// of other codecs. /// of other codecs.
if (codec_name.empty()) if (codec_name.empty())
{ {

View File

@ -12,7 +12,6 @@ PEERDIR(
contrib/libs/zstd contrib/libs/zstd
) )
CFLAGS(-g0)
SRCS( SRCS(
CachedCompressedReadBuffer.cpp CachedCompressedReadBuffer.cpp

View File

@ -11,7 +11,6 @@ PEERDIR(
contrib/libs/zstd contrib/libs/zstd
) )
CFLAGS(-g0)
SRCS( SRCS(
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?> <? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>

View File

@ -85,6 +85,9 @@ void GTIDSets::update(const GTID & other)
ErrorCodes::LOGICAL_ERROR); ErrorCodes::LOGICAL_ERROR);
} }
/// Try to shirnk Sequence interval.
GTIDSet::tryShirnk(set, i, current);
/// Sequence, extend the interval. /// Sequence, extend the interval.
if (other.seq_no == current.end) if (other.seq_no == current.end)
{ {
@ -116,6 +119,16 @@ void GTIDSets::update(const GTID & other)
sets.emplace_back(set); sets.emplace_back(set);
} }
void GTIDSet::tryShirnk(GTIDSet & set, unsigned int i, GTIDSet::Interval & current)
{
if (i != set.intervals.size() -1)
{
auto & next = set.intervals[i+1];
if (current.end == next.start)
set.tryMerge(i);
}
}
String GTIDSets::toString() const String GTIDSets::toString() const
{ {
WriteBufferFromOwnString buffer; WriteBufferFromOwnString buffer;

View File

@ -26,6 +26,8 @@ public:
std::vector<Interval> intervals; std::vector<Interval> intervals;
void tryMerge(size_t i); void tryMerge(size_t i);
static void tryShirnk(GTIDSet & set, unsigned int i, Interval & current);
}; };
class GTIDSets class GTIDSets

View File

@ -705,7 +705,7 @@ namespace MySQLReplication
break; break;
} }
default: default:
throw ReplicationError("Position update with unsupport event", ErrorCodes::LOGICAL_ERROR); throw ReplicationError("Position update with unsupported event", ErrorCodes::LOGICAL_ERROR);
} }
} }

View File

@ -169,6 +169,8 @@ class IColumn;
M(Milliseconds, read_backoff_min_interval_between_events_ms, 1000, "Settings to reduce the number of threads in case of slow reads. Do not pay attention to the event, if the previous one has passed less than a certain amount of time.", 0) \ M(Milliseconds, read_backoff_min_interval_between_events_ms, 1000, "Settings to reduce the number of threads in case of slow reads. Do not pay attention to the event, if the previous one has passed less than a certain amount of time.", 0) \
M(UInt64, read_backoff_min_events, 2, "Settings to reduce the number of threads in case of slow reads. The number of events after which the number of threads will be reduced.", 0) \ M(UInt64, read_backoff_min_events, 2, "Settings to reduce the number of threads in case of slow reads. The number of events after which the number of threads will be reduced.", 0) \
\ \
M(UInt64, read_backoff_min_concurrency, 1, "Settings to try keeping the minimal number of threads in case of slow reads.", 0) \
\
M(Float, memory_tracker_fault_probability, 0., "For testing of `exception safety` - throw an exception every time you allocate memory with the specified probability.", 0) \ M(Float, memory_tracker_fault_probability, 0., "For testing of `exception safety` - throw an exception every time you allocate memory with the specified probability.", 0) \
\ \
M(Bool, enable_http_compression, 0, "Compress the result if the client over HTTP said that it understands data compressed by gzip or deflate.", 0) \ M(Bool, enable_http_compression, 0, "Compress the result if the client over HTTP said that it understands data compressed by gzip or deflate.", 0) \
@ -390,7 +392,7 @@ class IColumn;
M(Bool, alter_partition_verbose_result, false, "Output information about affected parts. Currently works only for FREEZE and ATTACH commands.", 0) \ M(Bool, alter_partition_verbose_result, false, "Output information about affected parts. Currently works only for FREEZE and ATTACH commands.", 0) \
M(Bool, allow_experimental_database_materialize_mysql, false, "Allow to create database with Engine=MaterializeMySQL(...).", 0) \ M(Bool, allow_experimental_database_materialize_mysql, false, "Allow to create database with Engine=MaterializeMySQL(...).", 0) \
M(Bool, system_events_show_zero_values, false, "Include all metrics, even with zero values", 0) \ M(Bool, system_events_show_zero_values, false, "Include all metrics, even with zero values", 0) \
M(MySQLDataTypesSupport, mysql_datatypes_support_level, 0, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal' or 'datetime64'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precison are seen as String on ClickHouse's side.", 0) \ M(MySQLDataTypesSupport, mysql_datatypes_support_level, 0, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal' or 'datetime64'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precision are seen as String on ClickHouse's side.", 0) \
\ \
/** Obsolete settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \ /** Obsolete settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \
\ \

View File

@ -260,6 +260,17 @@ int main(int argc, char ** argv)
"10662d71-9d91-11ea-bbc2-0242ac110003:6-7", "10662d71-9d91-11ea-bbc2-0242ac110003:6-7",
"20662d71-9d91-11ea-bbc2-0242ac110003:9", "20662d71-9d91-11ea-bbc2-0242ac110003:9",
"10662d71-9d91-11ea-bbc2-0242ac110003:6-7,20662d71-9d91-11ea-bbc2-0242ac110003:9"}, "10662d71-9d91-11ea-bbc2-0242ac110003:6-7,20662d71-9d91-11ea-bbc2-0242ac110003:9"},
{"shirnk-sequence",
"10662d71-9d91-11ea-bbc2-0242ac110003:1-3:4-5:7",
"10662d71-9d91-11ea-bbc2-0242ac110003:6",
"10662d71-9d91-11ea-bbc2-0242ac110003:1-7"},
{"shirnk-sequence",
"10662d71-9d91-11ea-bbc2-0242ac110003:1-3:4-5:10",
"10662d71-9d91-11ea-bbc2-0242ac110003:8",
"10662d71-9d91-11ea-bbc2-0242ac110003:1-5:8:10"
}
}; };
for (auto & tc : cases) for (auto & tc : cases)

View File

@ -7,7 +7,6 @@ PEERDIR(
contrib/restricted/boost/libs contrib/restricted/boost/libs
) )
CFLAGS(-g0)
SRCS( SRCS(
BackgroundSchedulePool.cpp BackgroundSchedulePool.cpp

View File

@ -6,7 +6,6 @@ PEERDIR(
contrib/restricted/boost/libs contrib/restricted/boost/libs
) )
CFLAGS(-g0)
SRCS( SRCS(
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?> <? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>

View File

@ -27,7 +27,7 @@ Block SquashingTransform::add(const Block & input_block)
/* /*
* To minimize copying, accept two types of argument: const reference for output * To minimize copying, accept two types of argument: const reference for output
* stream, and rvalue reference for input stream, and decide whether to copy * stream, and rvalue reference for input stream, and decide whether to copy
* inside this function. This allows us not to copy Block unless we absolutely * inside this function. This allows us not to copy Block unless we absolutely
* have to. * have to.
*/ */

View File

@ -8,7 +8,6 @@ PEERDIR(
NO_COMPILER_WARNINGS() NO_COMPILER_WARNINGS()
CFLAGS(-g0)
SRCS( SRCS(
AddingDefaultBlockOutputStream.cpp AddingDefaultBlockOutputStream.cpp

View File

@ -7,7 +7,6 @@ PEERDIR(
NO_COMPILER_WARNINGS() NO_COMPILER_WARNINGS()
CFLAGS(-g0)
SRCS( SRCS(
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?> <? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>

View File

@ -29,7 +29,7 @@ constexpr size_t min(size_t x, size_t y)
} }
/// @note There's no auto scale to larger big integer, only for integral ones. /// @note There's no auto scale to larger big integer, only for integral ones.
/// It's cause of (U)Int64 backward compatibilty and very big performance penalties. /// It's cause of (U)Int64 backward compatibility and very big performance penalties.
constexpr size_t nextSize(size_t size) constexpr size_t nextSize(size_t size)
{ {
if (size < 8) if (size < 8)

View File

@ -6,7 +6,6 @@ PEERDIR(
clickhouse/src/Formats clickhouse/src/Formats
) )
CFLAGS(-g0)
SRCS( SRCS(
convertMySQLDataType.cpp convertMySQLDataType.cpp

View File

@ -5,7 +5,6 @@ PEERDIR(
clickhouse/src/Formats clickhouse/src/Formats
) )
CFLAGS(-g0)
SRCS( SRCS(
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?> <? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>

View File

@ -116,7 +116,7 @@ void DatabaseAtomic::dropTable(const Context &, const String & table_name, bool
} }
tryRemoveSymlink(table_name); tryRemoveSymlink(table_name);
/// Remove the inner table (if any) to avoid deadlock /// Remove the inner table (if any) to avoid deadlock
/// (due to attemp to execute DROP from the worker thread) /// (due to attempt to execute DROP from the worker thread)
if (auto * mv = dynamic_cast<StorageMaterializedView *>(table.get())) if (auto * mv = dynamic_cast<StorageMaterializedView *>(table.get()))
mv->dropInnerTable(no_delay); mv->dropInnerTable(no_delay);
/// Notify DatabaseCatalog that table was dropped. It will remove table data in background. /// Notify DatabaseCatalog that table was dropped. It will remove table data in background.
@ -261,21 +261,29 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora
{ {
DetachedTables not_in_use; DetachedTables not_in_use;
auto table_data_path = getTableDataPath(query); auto table_data_path = getTableDataPath(query);
bool locked_uuid = false;
try try
{ {
std::unique_lock lock{mutex}; std::unique_lock lock{mutex};
if (query.database != database_name) if (query.database != database_name)
throw Exception(ErrorCodes::UNKNOWN_DATABASE, "Database was renamed to `{}`, cannot create table in `{}`", throw Exception(ErrorCodes::UNKNOWN_DATABASE, "Database was renamed to `{}`, cannot create table in `{}`",
database_name, query.database); database_name, query.database);
/// Do some checks before renaming file from .tmp to .sql
not_in_use = cleanupDetachedTables(); not_in_use = cleanupDetachedTables();
assertDetachedTableNotInUse(query.uuid); assertDetachedTableNotInUse(query.uuid);
renameNoReplace(table_metadata_tmp_path, table_metadata_path); /// We will get en exception if some table with the same UUID exists (even if it's detached table or table from another database)
DatabaseCatalog::instance().addUUIDMapping(query.uuid);
locked_uuid = true;
/// It throws if `table_metadata_path` already exists (it's possible if table was detached)
renameNoReplace(table_metadata_tmp_path, table_metadata_path); /// Commit point (a sort of)
attachTableUnlocked(query.table, table, lock); /// Should never throw attachTableUnlocked(query.table, table, lock); /// Should never throw
table_name_to_path.emplace(query.table, table_data_path); table_name_to_path.emplace(query.table, table_data_path);
} }
catch (...) catch (...)
{ {
Poco::File(table_metadata_tmp_path).remove(); Poco::File(table_metadata_tmp_path).remove();
if (locked_uuid)
DatabaseCatalog::instance().removeUUIDMappingFinally(query.uuid);
throw; throw;
} }
tryCreateSymlink(query.table, table_data_path); tryCreateSymlink(query.table, table_data_path);

View File

@ -329,10 +329,4 @@ const StoragePtr & DatabaseLazyIterator::table() const
return current_storage; return current_storage;
} }
void DatabaseLazyIterator::reset()
{
if (current_storage)
current_storage.reset();
}
} }

View File

@ -22,6 +22,10 @@ public:
String getEngineName() const override { return "Lazy"; } String getEngineName() const override { return "Lazy"; }
bool canContainMergeTreeTables() const override { return false; }
bool canContainDistributedTables() const override { return false; }
void loadStoredObjects( void loadStoredObjects(
Context & context, Context & context,
bool has_force_restore_data_flag, bool force_attach) override; bool has_force_restore_data_flag, bool force_attach) override;
@ -122,7 +126,6 @@ public:
bool isValid() const override; bool isValid() const override;
const String & name() const override; const String & name() const override;
const StoragePtr & table() const override; const StoragePtr & table() const override;
void reset() override;
private: private:
const DatabaseLazy & database; const DatabaseLazy & database;

View File

@ -53,6 +53,9 @@ void DatabaseMemory::dropTable(
} }
table->is_dropped = true; table->is_dropped = true;
create_queries.erase(table_name); create_queries.erase(table_name);
UUID table_uuid = table->getStorageID().uuid;
if (table_uuid != UUIDHelpers::Nil)
DatabaseCatalog::instance().removeUUIDMappingFinally(table_uuid);
} }
ASTPtr DatabaseMemory::getCreateDatabaseQuery() const ASTPtr DatabaseMemory::getCreateDatabaseQuery() const

View File

@ -223,6 +223,10 @@ void DatabaseWithDictionaries::removeDictionary(const Context &, const String &
attachDictionary(dictionary_name, attach_info); attachDictionary(dictionary_name, attach_info);
throw; throw;
} }
UUID dict_uuid = attach_info.create_query->as<ASTCreateQuery>()->uuid;
if (dict_uuid != UUIDHelpers::Nil)
DatabaseCatalog::instance().removeUUIDMappingFinally(dict_uuid);
} }
DatabaseDictionariesIteratorPtr DatabaseWithDictionaries::getDictionariesIterator(const FilterByNameFunction & filter_by_dictionary_name) DatabaseDictionariesIteratorPtr DatabaseWithDictionaries::getDictionariesIterator(const FilterByNameFunction & filter_by_dictionary_name)

View File

@ -44,8 +44,6 @@ public:
/// (a database with support for lazy tables loading /// (a database with support for lazy tables loading
/// - it maintains a list of tables but tables are loaded lazily). /// - it maintains a list of tables but tables are loaded lazily).
virtual const StoragePtr & table() const = 0; virtual const StoragePtr & table() const = 0;
/// Reset reference counter to the StoragePtr.
virtual void reset() = 0;
virtual ~IDatabaseTablesIterator() = default; virtual ~IDatabaseTablesIterator() = default;
@ -95,8 +93,6 @@ public:
const String & name() const override { return it->first; } const String & name() const override { return it->first; }
const StoragePtr & table() const override { return it->second; } const StoragePtr & table() const override { return it->second; }
void reset() override { it->second.reset(); }
}; };
/// Copies list of dictionaries and iterates through such snapshot. /// Copies list of dictionaries and iterates through such snapshot.
@ -151,6 +147,10 @@ public:
/// Get name of database engine. /// Get name of database engine.
virtual String getEngineName() const = 0; virtual String getEngineName() const = 0;
virtual bool canContainMergeTreeTables() const { return true; }
virtual bool canContainDistributedTables() const { return true; }
/// Load a set of existing tables. /// Load a set of existing tables.
/// You can call only once, right after the object is created. /// You can call only once, right after the object is created.
virtual void loadStoredObjects(Context & /*context*/, bool /*has_force_restore_data_flag*/, bool /*force_attach*/ = false) {} virtual void loadStoredObjects(Context & /*context*/, bool /*has_force_restore_data_flag*/, bool /*force_attach*/ = false) {}

View File

@ -11,7 +11,7 @@ class Context;
class ASTStorage; class ASTStorage;
#define LIST_OF_CONNECTION_MYSQL_SETTINGS(M) \ #define LIST_OF_CONNECTION_MYSQL_SETTINGS(M) \
M(MySQLDataTypesSupport, mysql_datatypes_support_level, 0, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal' or 'datetime64'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precison are seen as String on ClickHouse's side.", 0) \ M(MySQLDataTypesSupport, mysql_datatypes_support_level, 0, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal' or 'datetime64'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precision are seen as String on ClickHouse's side.", 0) \
/// Settings that should not change after the creation of a database. /// Settings that should not change after the creation of a database.
#define APPLY_FOR_IMMUTABLE_CONNECTION_MYSQL_SETTINGS(M) \ #define APPLY_FOR_IMMUTABLE_CONNECTION_MYSQL_SETTINGS(M) \

View File

@ -42,6 +42,12 @@ public:
String getEngineName() const override { return "MySQL"; } String getEngineName() const override { return "MySQL"; }
bool canContainMergeTreeTables() const override { return false; }
bool canContainDistributedTables() const override { return false; }
bool shouldBeEmptyOnDetach() const override { return false; }
bool empty() const override; bool empty() const override;
DatabaseTablesIteratorPtr getTablesIterator(const Context & context, const FilterByNameFunction & filter_by_table_name) override; DatabaseTablesIteratorPtr getTablesIterator(const Context & context, const FilterByNameFunction & filter_by_table_name) override;

View File

@ -28,11 +28,6 @@ public:
return tables.emplace_back(storage); return tables.emplace_back(storage);
} }
void reset() override
{
tables.clear();
}
UUID uuid() const override { return nested_iterator->uuid(); } UUID uuid() const override { return nested_iterator->uuid(); }
DatabaseMaterializeTablesIterator(DatabaseTablesIteratorPtr nested_iterator_, DatabaseMaterializeMySQL * database_) DatabaseMaterializeTablesIterator(DatabaseTablesIteratorPtr nested_iterator_, DatabaseMaterializeMySQL * database_)

View File

@ -5,7 +5,6 @@ PEERDIR(
clickhouse/src/Common clickhouse/src/Common
) )
CFLAGS(-g0)
SRCS( SRCS(
DatabaseAtomic.cpp DatabaseAtomic.cpp

View File

@ -4,7 +4,6 @@ PEERDIR(
clickhouse/src/Common clickhouse/src/Common
) )
CFLAGS(-g0)
SRCS( SRCS(
<? find . -name '*.cpp' | sed 's/^\.\// /' | sort ?> <? find . -name '*.cpp' | sed 's/^\.\// /' | sort ?>

View File

@ -1467,7 +1467,6 @@ void SSDComplexKeyCacheDictionary::getItemsNumberImpl(
{ {
assert(dict_struct.key); assert(dict_struct.key);
assert(key_columns.size() == key_types.size()); assert(key_columns.size() == key_types.size());
assert(key_columns.size() == dict_struct.key->size());
dict_struct.validateKeyTypes(key_types); dict_struct.validateKeyTypes(key_types);

View File

@ -12,7 +12,6 @@ PEERDIR(
NO_COMPILER_WARNINGS() NO_COMPILER_WARNINGS()
CFLAGS(-g0)
SRCS( SRCS(
CacheDictionary.cpp CacheDictionary.cpp

View File

@ -11,7 +11,6 @@ PEERDIR(
NO_COMPILER_WARNINGS() NO_COMPILER_WARNINGS()
CFLAGS(-g0)
SRCS( SRCS(
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F Trie | sed 's/^\.\// /' | sort ?> <? find . -name '*.cpp' | grep -v -F tests | grep -v -F Trie | sed 's/^\.\// /' | sort ?>

View File

@ -4,7 +4,6 @@ PEERDIR(
clickhouse/src/Common clickhouse/src/Common
) )
CFLAGS(-g0)
SRCS( SRCS(
DiskS3.cpp DiskS3.cpp

View File

@ -5,7 +5,6 @@ PEERDIR(
clickhouse/src/Common clickhouse/src/Common
) )
CFLAGS(-g0)
SRCS( SRCS(
createVolume.cpp createVolume.cpp

View File

@ -4,7 +4,6 @@ PEERDIR(
clickhouse/src/Common clickhouse/src/Common
) )
CFLAGS(-g0)
SRCS( SRCS(
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F S3 | sed 's/^\.\// /' | sort ?> <? find . -name '*.cpp' | grep -v -F tests | grep -v -F S3 | sed 's/^\.\// /' | sort ?>

View File

@ -7,7 +7,6 @@ PEERDIR(
contrib/libs/protoc contrib/libs/protoc
) )
CFLAGS(-g0)
SRCS( SRCS(
FormatFactory.cpp FormatFactory.cpp

View File

@ -6,7 +6,6 @@ PEERDIR(
contrib/libs/protoc contrib/libs/protoc
) )
CFLAGS(-g0)
SRCS( SRCS(
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?> <? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>

View File

@ -326,7 +326,7 @@ struct DecimalBinaryOperation
} }
private: private:
/// there's implicit type convertion here /// there's implicit type conversion here
static NativeResultType apply(NativeResultType a, NativeResultType b) static NativeResultType apply(NativeResultType a, NativeResultType b)
{ {
if constexpr (can_overflow && check_overflow) if constexpr (can_overflow && check_overflow)

View File

@ -577,7 +577,7 @@ private:
auto input_value = input_column->getDataAt(r); auto input_value = input_column->getDataAt(r);
if constexpr (mode == CipherMode::RFC5116_AEAD_AES_GCM) if constexpr (mode == CipherMode::RFC5116_AEAD_AES_GCM)
{ {
// empty plaintext results in empty ciphertext + tag, means there should be atleast tag_size bytes. // empty plaintext results in empty ciphertext + tag, means there should be at least tag_size bytes.
if (input_value.size < tag_size) if (input_value.size < tag_size)
throw Exception(fmt::format("Encrypted data is too short: only {} bytes, " throw Exception(fmt::format("Encrypted data is too short: only {} bytes, "
"should contain at least {} bytes of a tag.", "should contain at least {} bytes of a tag.",

View File

@ -31,6 +31,7 @@ namespace ErrorCodes
{ {
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int ARGUMENT_OUT_OF_BOUND;
extern const int ILLEGAL_COLUMN; extern const int ILLEGAL_COLUMN;
extern const int BAD_ARGUMENTS; extern const int BAD_ARGUMENTS;
} }
@ -84,6 +85,9 @@ enum class TieBreakingMode
Bankers, // use banker's rounding Bankers, // use banker's rounding
}; };
/// For N, no more than the number of digits in the largest type.
using Scale = Int16;
/** Rounding functions for integer values. /** Rounding functions for integer values.
*/ */
@ -416,7 +420,7 @@ private:
using Container = typename ColumnDecimal<T>::Container; using Container = typename ColumnDecimal<T>::Container;
public: public:
static NO_INLINE void apply(const Container & in, Container & out, Int64 scale_arg) static NO_INLINE void apply(const Container & in, Container & out, Scale scale_arg)
{ {
scale_arg = in.getScale() - scale_arg; scale_arg = in.getScale() - scale_arg;
if (scale_arg > 0) if (scale_arg > 0)
@ -458,7 +462,7 @@ class Dispatcher
FloatRoundingImpl<T, rounding_mode, scale_mode>, FloatRoundingImpl<T, rounding_mode, scale_mode>,
IntegerRoundingImpl<T, rounding_mode, scale_mode, tie_breaking_mode>>; IntegerRoundingImpl<T, rounding_mode, scale_mode, tie_breaking_mode>>;
static ColumnPtr apply(const ColumnVector<T> * col, Int64 scale_arg) static ColumnPtr apply(const ColumnVector<T> * col, Scale scale_arg)
{ {
auto col_res = ColumnVector<T>::create(); auto col_res = ColumnVector<T>::create();
@ -487,7 +491,7 @@ class Dispatcher
return col_res; return col_res;
} }
static ColumnPtr apply(const ColumnDecimal<T> * col, Int64 scale_arg) static ColumnPtr apply(const ColumnDecimal<T> * col, Scale scale_arg)
{ {
const typename ColumnDecimal<T>::Container & vec_src = col->getData(); const typename ColumnDecimal<T>::Container & vec_src = col->getData();
@ -501,7 +505,7 @@ class Dispatcher
} }
public: public:
static ColumnPtr apply(const IColumn * column, Int64 scale_arg) static ColumnPtr apply(const IColumn * column, Scale scale_arg)
{ {
if constexpr (IsNumber<T>) if constexpr (IsNumber<T>)
return apply(checkAndGetColumn<ColumnVector<T>>(column), scale_arg); return apply(checkAndGetColumn<ColumnVector<T>>(column), scale_arg);
@ -544,20 +548,25 @@ public:
return arguments[0]; return arguments[0];
} }
static Int64 getScaleArg(ColumnsWithTypeAndName & arguments) static Scale getScaleArg(ColumnsWithTypeAndName & arguments)
{ {
if (arguments.size() == 2) if (arguments.size() == 2)
{ {
const IColumn & scale_column = *arguments[1].column; const IColumn & scale_column = *arguments[1].column;
if (!isColumnConst(scale_column)) if (!isColumnConst(scale_column))
throw Exception("Scale argument for rounding functions must be constant.", ErrorCodes::ILLEGAL_COLUMN); throw Exception("Scale argument for rounding functions must be constant", ErrorCodes::ILLEGAL_COLUMN);
Field scale_field = assert_cast<const ColumnConst &>(scale_column).getField(); Field scale_field = assert_cast<const ColumnConst &>(scale_column).getField();
if (scale_field.getType() != Field::Types::UInt64 if (scale_field.getType() != Field::Types::UInt64
&& scale_field.getType() != Field::Types::Int64) && scale_field.getType() != Field::Types::Int64)
throw Exception("Scale argument for rounding functions must have integer type.", ErrorCodes::ILLEGAL_COLUMN); throw Exception("Scale argument for rounding functions must have integer type", ErrorCodes::ILLEGAL_COLUMN);
return scale_field.get<Int64>(); Int64 scale64 = scale_field.get<Int64>();
if (scale64 > std::numeric_limits<Scale>::max()
|| scale64 < std::numeric_limits<Scale>::min())
throw Exception("Scale argument for rounding function is too large", ErrorCodes::ARGUMENT_OUT_OF_BOUND);
return scale64;
} }
return 0; return 0;
} }
@ -568,7 +577,7 @@ public:
ColumnPtr executeImpl(ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override ColumnPtr executeImpl(ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override
{ {
const ColumnWithTypeAndName & column = arguments[0]; const ColumnWithTypeAndName & column = arguments[0];
Int64 scale_arg = getScaleArg(arguments); Scale scale_arg = getScaleArg(arguments);
ColumnPtr res; ColumnPtr res;
auto call = [&](const auto & types) -> bool auto call = [&](const auto & types) -> bool

View File

@ -131,7 +131,7 @@ public:
for (size_t i = 0; i < input_rows_count; ++i) for (size_t i = 0; i < input_rows_count; ++i)
{ {
/// Virtual call is Ok (neglible comparing to the rest of calculations). /// Virtual call is Ok (negligible comparing to the rest of calculations).
Float64 value = arguments[0].column->getFloat64(i); Float64 value = arguments[0].column->getFloat64(i);
bool is_negative = value < 0; bool is_negative = value < 0;

View File

@ -22,7 +22,7 @@ namespace
{ {
/// Returns 1 if and Decimal value has more digits then it's Precision allow, 0 otherwise. /// Returns 1 if and Decimal value has more digits then it's Precision allow, 0 otherwise.
/// Precision could be set as second argument or omitted. If ommited function uses Decimal presicion of the first argument. /// Precision could be set as second argument or omitted. If omitted function uses Decimal precision of the first argument.
class FunctionIsDecimalOverflow : public IFunction class FunctionIsDecimalOverflow : public IFunction
{ {
public: public:

View File

@ -4,7 +4,6 @@
#if defined(OS_DARWIN) #if defined(OS_DARWIN)
extern "C" extern "C"
{ {
/// Is defined in libglibc-compatibility.a
double lgamma_r(double x, int * signgamp); double lgamma_r(double x, int * signgamp);
} }
#endif #endif

View File

@ -32,7 +32,6 @@ PEERDIR(
) )
# "Arcadia" build is slightly deficient. It lacks many libraries that we need. # "Arcadia" build is slightly deficient. It lacks many libraries that we need.
CFLAGS(-g0)
SRCS( SRCS(
abs.cpp abs.cpp

View File

@ -31,7 +31,6 @@ PEERDIR(
) )
# "Arcadia" build is slightly deficient. It lacks many libraries that we need. # "Arcadia" build is slightly deficient. It lacks many libraries that we need.
CFLAGS(-g0)
SRCS( SRCS(
<? find . -name '*.cpp' | grep -i -v -P 'tests|Bitmap|sumbur|abtesting' | sed 's/^\.\// /' | sort ?> <? find . -name '*.cpp' | grep -i -v -P 'tests|Bitmap|sumbur|abtesting' | sed 's/^\.\// /' | sort ?>

Some files were not shown because too many files have changed in this diff Show More