Merge remote-tracking branch 'upstream/master' into HEAD

This commit is contained in:
Anton Popov 2021-09-10 13:40:48 +03:00
commit 5cff615eca
126 changed files with 765 additions and 378 deletions

View File

@ -80,16 +80,16 @@ include (cmake/find/ccache.cmake)
# ccache ignore it.
option(ENABLE_CHECK_HEAVY_BUILDS "Don't allow C++ translation units to compile too long or to take too much memory while compiling." OFF)
if (ENABLE_CHECK_HEAVY_BUILDS)
# set DATA (since RSS does not work since 2.6.x+) to 2G
# set DATA (since RSS does not work since 2.6.x+) to 5G
set (RLIMIT_DATA 5000000000)
# set VIRT (RLIMIT_AS) to 10G (DATA*10)
set (RLIMIT_AS 10000000000)
# set CPU time limit to 600 seconds
set (RLIMIT_CPU 600)
# set CPU time limit to 800 seconds
set (RLIMIT_CPU 800)
# gcc10/gcc10/clang -fsanitize=memory is too heavy
if (SANITIZE STREQUAL "memory" OR COMPILER_GCC)
set (RLIMIT_DATA 10000000000)
set (RLIMIT_DATA 10000000000) # 10G
endif()
set (CMAKE_CXX_COMPILER_LAUNCHER prlimit --as=${RLIMIT_AS} --data=${RLIMIT_DATA} --cpu=${RLIMIT_CPU} ${CMAKE_CXX_COMPILER_LAUNCHER})

View File

@ -189,7 +189,7 @@ public:
~Pool();
/// Allocates connection.
Entry get(uint64_t wait_timeout);
Entry get(uint64_t wait_timeout = UINT64_MAX);
/// Allocates connection.
/// If database is not accessible, returns empty Entry object.

View File

@ -1,8 +1,10 @@
if (APPLE OR SPLIT_SHARED_LIBRARIES OR NOT ARCH_AMD64 OR SANITIZE STREQUAL "undefined")
set (ENABLE_EMBEDDED_COMPILER OFF CACHE INTERNAL "")
if (APPLE OR NOT ARCH_AMD64 OR SANITIZE STREQUAL "undefined")
set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF)
else()
set (ENABLE_EMBEDDED_COMPILER_DEFAULT ON)
endif()
option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ON)
option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT})
if (NOT ENABLE_EMBEDDED_COMPILER)
set (USE_EMBEDDED_COMPILER 0)

View File

@ -206,12 +206,14 @@ elseif(GTEST_SRC_DIR)
target_compile_definitions(gtest INTERFACE GTEST_HAS_POSIX_RE=0)
endif()
if (USE_EMBEDDED_COMPILER)
function(add_llvm)
# ld: unknown option: --color-diagnostics
if (APPLE)
set (LINKER_SUPPORTS_COLOR_DIAGNOSTICS 0 CACHE INTERNAL "")
endif ()
# Do not adjust RPATH in llvm, since then it will not be able to find libcxx/libcxxabi/libunwind
set (CMAKE_INSTALL_RPATH "ON")
set (LLVM_ENABLE_EH 1 CACHE INTERNAL "")
set (LLVM_ENABLE_RTTI 1 CACHE INTERNAL "")
set (LLVM_ENABLE_PIC 0 CACHE INTERNAL "")
@ -219,13 +221,12 @@ if (USE_EMBEDDED_COMPILER)
# Need to use C++17 since the compilation is not possible with C++20 currently, due to ambiguous operator != etc.
# LLVM project will set its default value for the -std=... but our global setting from CMake will override it.
set (CMAKE_CXX_STANDARD_bak ${CMAKE_CXX_STANDARD})
set (CMAKE_CXX_STANDARD 17)
add_subdirectory (llvm/llvm)
set (CMAKE_CXX_STANDARD ${CMAKE_CXX_STANDARD_bak})
unset (CMAKE_CXX_STANDARD_bak)
endfunction()
if (USE_EMBEDDED_COMPILER)
add_llvm()
endif ()
if (USE_INTERNAL_LIBGSASL_LIBRARY)

View File

@ -1,12 +1,12 @@
{
"docker/packager/deb": {
"name": "yandex/clickhouse-deb-builder",
"name": "clickhouse/deb-builder",
"dependent": [
"docker/packager/unbundled"
]
},
"docker/packager/binary": {
"name": "yandex/clickhouse-binary-builder",
"name": "clickhouse/binary-builder",
"dependent": [
"docker/test/split_build_smoke_test",
"docker/test/pvs",
@ -14,155 +14,150 @@
]
},
"docker/packager/unbundled": {
"name": "yandex/clickhouse-unbundled-builder",
"name": "clickhouse/unbundled-builder",
"dependent": []
},
"docker/test/compatibility/centos": {
"name": "yandex/clickhouse-test-old-centos",
"name": "clickhouse/test-old-centos",
"dependent": []
},
"docker/test/compatibility/ubuntu": {
"name": "yandex/clickhouse-test-old-ubuntu",
"name": "clickhouse/test-old-ubuntu",
"dependent": []
},
"docker/test/integration/base": {
"name": "yandex/clickhouse-integration-test",
"name": "clickhouse/integration-test",
"dependent": []
},
"docker/test/fuzzer": {
"name": "yandex/clickhouse-fuzzer",
"name": "clickhouse/fuzzer",
"dependent": []
},
"docker/test/performance-comparison": {
"name": "yandex/clickhouse-performance-comparison",
"name": "clickhouse/performance-comparison",
"dependent": []
},
"docker/test/pvs": {
"name": "yandex/clickhouse-pvs-test",
"name": "clickhouse/pvs-test",
"dependent": []
},
"docker/test/stateless": {
"name": "yandex/clickhouse-stateless-test",
"name": "clickhouse/stateless-test",
"dependent": [
"docker/test/stateful",
"docker/test/coverage",
"docker/test/unit"
]
},
"docker/test/stateless_pytest": {
"name": "yandex/clickhouse-stateless-pytest",
"dependent": []
},
"docker/test/stateful": {
"name": "yandex/clickhouse-stateful-test",
"name": "clickhouse/stateful-test",
"dependent": [
"docker/test/stress"
]
},
"docker/test/coverage": {
"name": "yandex/clickhouse-test-coverage",
"name": "clickhouse/test-coverage",
"dependent": []
},
"docker/test/unit": {
"name": "yandex/clickhouse-unit-test",
"name": "clickhouse/unit-test",
"dependent": []
},
"docker/test/stress": {
"name": "yandex/clickhouse-stress-test",
"name": "clickhouse/stress-test",
"dependent": []
},
"docker/test/split_build_smoke_test": {
"name": "yandex/clickhouse-split-build-smoke-test",
"name": "clickhouse/split-build-smoke-test",
"dependent": []
},
"docker/test/codebrowser": {
"name": "yandex/clickhouse-codebrowser",
"name": "clickhouse/codebrowser",
"dependent": []
},
"docker/test/integration/runner": {
"name": "yandex/clickhouse-integration-tests-runner",
"name": "clickhouse/integration-tests-runner",
"dependent": []
},
"docker/test/testflows/runner": {
"name": "yandex/clickhouse-testflows-runner",
"name": "clickhouse/testflows-runner",
"dependent": []
},
"docker/test/fasttest": {
"name": "yandex/clickhouse-fasttest",
"name": "clickhouse/fasttest",
"dependent": []
},
"docker/test/style": {
"name": "yandex/clickhouse-style-test",
"name": "clickhouse/style-test",
"dependent": []
},
"docker/test/integration/s3_proxy": {
"name": "yandex/clickhouse-s3-proxy",
"name": "clickhouse/s3-proxy",
"dependent": []
},
"docker/test/integration/resolver": {
"name": "yandex/clickhouse-python-bottle",
"name": "clickhouse/python-bottle",
"dependent": []
},
"docker/test/integration/helper_container": {
"name": "yandex/clickhouse-integration-helper",
"name": "clickhouse/integration-helper",
"dependent": []
},
"docker/test/integration/mysql_golang_client": {
"name": "yandex/clickhouse-mysql-golang-client",
"name": "clickhouse/mysql-golang-client",
"dependent": []
},
"docker/test/integration/mysql_java_client": {
"name": "yandex/clickhouse-mysql-java-client",
"name": "clickhouse/mysql-java-client",
"dependent": []
},
"docker/test/integration/mysql_js_client": {
"name": "yandex/clickhouse-mysql-js-client",
"name": "clickhouse/mysql-js-client",
"dependent": []
},
"docker/test/integration/mysql_php_client": {
"name": "yandex/clickhouse-mysql-php-client",
"name": "clickhouse/mysql-php-client",
"dependent": []
},
"docker/test/integration/postgresql_java_client": {
"name": "yandex/clickhouse-postgresql-java-client",
"name": "clickhouse/postgresql-java-client",
"dependent": []
},
"docker/test/integration/kerberos_kdc": {
"name": "yandex/clickhouse-kerberos-kdc",
"name": "clickhouse/kerberos-kdc",
"dependent": []
},
"docker/test/base": {
"name": "yandex/clickhouse-test-base",
"name": "clickhouse/test-base",
"dependent": [
"docker/test/stateless",
"docker/test/stateless_unbundled",
"docker/test/stateless_pytest",
"docker/test/integration/base",
"docker/test/fuzzer",
"docker/test/keeper-jepsen"
]
},
"docker/packager/unbundled": {
"name": "yandex/clickhouse-unbundled-builder",
"name": "clickhouse/unbundled-builder",
"dependent": [
"docker/test/stateless_unbundled"
]
},
"docker/test/stateless_unbundled": {
"name": "yandex/clickhouse-stateless-unbundled-test",
"name": "clickhouse/stateless-unbundled-test",
"dependent": [
]
},
"docker/test/integration/kerberized_hadoop": {
"name": "yandex/clickhouse-kerberized-hadoop",
"name": "clickhouse/kerberized-hadoop",
"dependent": []
},
"docker/test/sqlancer": {
"name": "yandex/clickhouse-sqlancer-test",
"name": "clickhouse/sqlancer-test",
"dependent": []
},
"docker/test/keeper-jepsen": {
"name": "yandex/clickhouse-keeper-jepsen-test",
"name": "clickhouse/keeper-jepsen-test",
"dependent": []
}
}

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-binary-builder .
# docker build -t clickhouse/binary-builder .
FROM ubuntu:20.04
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-deb-builder .
# docker build -t clickhouse/deb-builder .
FROM ubuntu:20.04
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12

View File

@ -9,9 +9,9 @@ import sys
SCRIPT_PATH = os.path.realpath(__file__)
IMAGE_MAP = {
"deb": "yandex/clickhouse-deb-builder",
"binary": "yandex/clickhouse-binary-builder",
"unbundled": "yandex/clickhouse-unbundled-builder"
"deb": "clickhouse/deb-builder",
"binary": "clickhouse/binary-builder",
"unbundled": "clickhouse/unbundled-builder"
}
def check_image_exists_locally(image_name):

View File

@ -1,5 +1,5 @@
# docker build -t yandex/clickhouse-unbundled-builder .
FROM yandex/clickhouse-deb-builder
# docker build -t clickhouse/unbundled-builder .
FROM clickhouse/deb-builder
RUN export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
&& wget -nv -O /tmp/arrow-keyring.deb "https://apache.jfrog.io/artifactory/arrow/ubuntu/apache-arrow-apt-source-latest-${CODENAME}.deb" \

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-test-base .
# docker build -t clickhouse/test-base .
FROM ubuntu:20.04
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12

View File

@ -1,6 +1,6 @@
# docker build --network=host -t yandex/clickhouse-codebrowser .
# docker run --volume=path_to_repo:/repo_folder --volume=path_to_result:/test_output yandex/clickhouse-codebrowser
FROM yandex/clickhouse-binary-builder
# docker build --network=host -t clickhouse/codebrowser .
# docker run --volume=path_to_repo:/repo_folder --volume=path_to_result:/test_output clickhouse/codebrowser
FROM clickhouse/binary-builder
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-test-old-centos .
# docker build -t clickhouse/test-old-centos .
FROM centos:5
CMD /bin/sh -c "/clickhouse server --config /config/config.xml > /var/log/clickhouse-server/stderr.log 2>&1 & \

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-test-old-ubuntu .
# docker build -t clickhouse/test-old-ubuntu .
FROM ubuntu:12.04
CMD /bin/sh -c "/clickhouse server --config /config/config.xml > /var/log/clickhouse-server/stderr.log 2>&1 & \

View File

@ -1,5 +1,5 @@
# docker build -t yandex/clickhouse-test-coverage .
FROM yandex/clickhouse-stateless-test
# docker build -t clickhouse/test-coverage .
FROM clickhouse/stateless-test
RUN apt-get update -y \
&& env DEBIAN_FRONTEND=noninteractive \

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-fasttest .
# docker build -t clickhouse/fasttest .
FROM ubuntu:20.04
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12

View File

@ -1,5 +1,5 @@
# docker build -t yandex/clickhouse-fuzzer .
FROM yandex/clickhouse-test-base
# docker build -t clickhouse/fuzzer .
FROM clickhouse/test-base
ENV LANG=C.UTF-8
ENV TZ=Europe/Moscow
@ -36,5 +36,5 @@ CMD set -o pipefail \
&& cd /workspace \
&& /run-fuzzer.sh 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee main.log
# docker run --network=host --volume <workspace>:/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> yandex/clickhouse-fuzzer
# docker run --network=host --volume <workspace>:/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/fuzzer

View File

@ -1,5 +1,5 @@
# docker build -t yandex/clickhouse-integration-test .
FROM yandex/clickhouse-test-base
# docker build -t clickhouse/integration-test .
FROM clickhouse/test-base
SHELL ["/bin/bash", "-c"]

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-integration-helper .
# docker build -t clickhouse/integration-helper .
# Helper docker container to run iptables without sudo
FROM alpine

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-kerberized-hadoop .
# docker build -t clickhouse/kerberized-hadoop .
FROM sequenceiq/hadoop-docker:2.7.0
RUN sed -i -e 's/^\#baseurl/baseurl/' /etc/yum.repos.d/CentOS-Base.repo

View File

@ -1,9 +1,9 @@
# docker build -t yandex/clickhouse-kerberos-kdc .
# docker build -t clickhouse/kerberos-kdc .
FROM centos:6
FROM centos:6.6
# old OS to make is faster and smaller
RUN sed -i '/^mirrorlist/s/^/#/;/^#baseurl/{s/#//;s/mirror.centos.org\/centos\/$releasever/vault.centos.org\/6.10/}' /etc/yum.repos.d/*B*
RUN yum install -y krb5-server krb5-libs krb5-auth-dialog krb5-workstation
RUN yum install -y ca-certificates krb5-server krb5-libs krb5-auth-dialog krb5-workstation
EXPOSE 88 749

View File

@ -1,7 +1,7 @@
# docker build -t yandex/clickhouse-mysql-golang-client .
# docker build -t clickhouse/mysql-golang-client .
# MySQL golang client docker container
FROM golang:1.12.2
FROM golang:1.13
RUN go get "github.com/go-sql-driver/mysql"

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-mysql-java-client .
# docker build -t clickhouse/mysql-java-client .
# MySQL Java client docker container
FROM ubuntu:18.04

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-mysql-js-client .
# docker build -t clickhouse/mysql-js-client .
# MySQL JavaScript client docker container
FROM node:8

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-mysql-php-client .
# docker build -t clickhouse/mysql-php-client .
# MySQL PHP client docker container
FROM php:7.3-cli

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-postgresql-java-client .
# docker build -t clickhouse/postgresql-java-client .
# PostgreSQL Java client docker container
FROM ubuntu:18.04

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-python-bottle .
# docker build -t clickhouse/python-bottle .
# Helper docker container to run python bottle apps
FROM python:3

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-integration-tests-runner .
# docker build -t clickhouse/integration-tests-runner .
FROM ubuntu:20.04
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list

View File

@ -1,7 +1,7 @@
version: '2.3'
services:
bridge1:
image: yandex/clickhouse-jdbc-bridge
image: clickhouse/jdbc-bridge
command: |
/bin/bash -c 'cat << EOF > config/datasources/self.json
{

View File

@ -1,7 +1,7 @@
version: '2.3'
services:
zoo1:
image: ${image:-yandex/clickhouse-integration-test}
image: ${image:-clickhouse/integration-test}
restart: always
user: ${user:-}
volumes:
@ -31,7 +31,7 @@ services:
- inet6
- rotate
zoo2:
image: ${image:-yandex/clickhouse-integration-test}
image: ${image:-clickhouse/integration-test}
restart: always
user: ${user:-}
volumes:
@ -61,7 +61,7 @@ services:
- inet6
- rotate
zoo3:
image: ${image:-yandex/clickhouse-integration-test}
image: ${image:-clickhouse/integration-test}
restart: always
user: ${user:-}
volumes:

View File

@ -4,7 +4,7 @@ services:
kerberizedhdfs1:
cap_add:
- DAC_READ_SEARCH
image: yandex/clickhouse-kerberized-hadoop:16621
image: clickhouse/kerberized-hadoop
hostname: kerberizedhdfs1
restart: always
volumes:
@ -22,7 +22,7 @@ services:
entrypoint: /etc/bootstrap.sh -d
hdfskerberos:
image: yandex/clickhouse-kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest}
image: clickhouse/kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest}
hostname: hdfskerberos
volumes:
- ${KERBERIZED_HDFS_DIR}/secrets:/tmp/keytab

View File

@ -50,7 +50,7 @@ services:
- label:disable
kafka_kerberos:
image: yandex/clickhouse-kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest}
image: clickhouse/kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest}
hostname: kafka_kerberos
volumes:
- ${KERBERIZED_KAFKA_DIR}/secrets:/tmp/keytab

View File

@ -19,14 +19,14 @@ services:
# HTTP proxies for Minio.
proxy1:
image: yandex/clickhouse-s3-proxy
image: clickhouse/s3-proxy
expose:
- "8080" # Redirect proxy port
- "80" # Reverse proxy port
- "443" # Reverse proxy port (secure)
proxy2:
image: yandex/clickhouse-s3-proxy
image: clickhouse/s3-proxy
expose:
- "8080"
- "80"
@ -34,7 +34,7 @@ services:
# Empty container to run proxy resolver.
resolver:
image: yandex/clickhouse-python-bottle
image: clickhouse/python-bottle
expose:
- "8080"
tty: true

View File

@ -1,6 +1,6 @@
version: '2.3'
services:
golang1:
image: yandex/clickhouse-mysql-golang-client:${DOCKER_MYSQL_GOLANG_CLIENT_TAG:-latest}
image: clickhouse/mysql-golang-client:${DOCKER_MYSQL_GOLANG_CLIENT_TAG:-latest}
# to keep container running
command: sleep infinity

View File

@ -1,6 +1,6 @@
version: '2.3'
services:
java1:
image: yandex/clickhouse-mysql-java-client:${DOCKER_MYSQL_JAVA_CLIENT_TAG:-latest}
image: clickhouse/mysql-java-client:${DOCKER_MYSQL_JAVA_CLIENT_TAG:-latest}
# to keep container running
command: sleep infinity

View File

@ -1,6 +1,6 @@
version: '2.3'
services:
mysqljs1:
image: yandex/clickhouse-mysql-js-client:${DOCKER_MYSQL_JS_CLIENT_TAG:-latest}
image: clickhouse/mysql-js-client:${DOCKER_MYSQL_JS_CLIENT_TAG:-latest}
# to keep container running
command: sleep infinity

View File

@ -1,6 +1,6 @@
version: '2.3'
services:
php1:
image: yandex/clickhouse-mysql-php-client:${DOCKER_MYSQL_PHP_CLIENT_TAG:-latest}
image: clickhouse/mysql-php-client:${DOCKER_MYSQL_PHP_CLIENT_TAG:-latest}
# to keep container running
command: sleep infinity

View File

@ -1,6 +1,6 @@
version: '2.2'
services:
java:
image: yandex/clickhouse-postgresql-java-client:${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG:-latest}
image: clickhouse/postgresql-java-client:${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG:-latest}
# to keep container running
command: sleep infinity

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-s3-proxy .
# docker build -t clickhouse/s3-proxy .
FROM nginx:alpine
COPY run.sh /run.sh

View File

@ -1,5 +1,5 @@
# docker build -t yandex/clickhouse-keeper-jepsen-test .
FROM yandex/clickhouse-test-base
# docker build -t clickhouse/keeper-jepsen-test .
FROM clickhouse/test-base
ENV DEBIAN_FRONTEND=noninteractive
ENV CLOJURE_VERSION=1.10.3.814

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-performance-comparison .
# docker build -t clickhouse/performance-comparison .
FROM ubuntu:18.04
ENV LANG=C.UTF-8
@ -54,4 +54,4 @@ COPY * /
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
CMD ["bash", "-c", "node=$((RANDOM % $(numactl --hardware | sed -n 's/^.*available:\\(.*\\)nodes.*$/\\1/p'))); echo Will bind to NUMA node $node; numactl --cpunodebind=$node --membind=$node /entrypoint.sh"]
# docker run --network=host --volume <workspace>:/workspace --volume=<output>:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> yandex/clickhouse-performance-comparison
# docker run --network=host --volume <workspace>:/workspace --volume=<output>:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/performance-comparison

View File

@ -116,7 +116,7 @@ pull requests (0 for master) manually.
docker run --network=host --volume=$(pwd)/workspace:/workspace --volume=$(pwd)/output:/output
[-e REF_PR={} -e REF_SHA={}]
-e PR_TO_TEST={} -e SHA_TO_TEST={}
yandex/clickhouse-performance-comparison
clickhouse/performance-comparison
```
Then see the `report.html` in the `output` directory.

View File

@ -1,6 +1,6 @@
# docker build -t yandex/clickhouse-pvs-test .
# docker build -t clickhouse/pvs-test .
FROM yandex/clickhouse-binary-builder
FROM clickhouse/binary-builder
RUN apt-get update --yes \
&& apt-get install \

View File

@ -1,5 +1,5 @@
# docker build -t yandex/clickhouse-split-build-smoke-test .
FROM yandex/clickhouse-binary-builder
# docker build -t clickhouse/split-build-smoke-test .
FROM clickhouse/binary-builder
COPY run.sh /run.sh
COPY process_split_build_smoke_test_result.py /

View File

@ -1,9 +1,9 @@
# docker build -t yandex/clickhouse-sqlancer-test .
# docker build -t clickhouse/sqlancer-test .
FROM ubuntu:20.04
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
RUN apt-get update --yes && env DEBIAN_FRONTEND=noninteractive apt-get install wget unzip git openjdk-14-jdk maven python3 --yes --no-install-recommends
RUN apt-get update --yes && env DEBIAN_FRONTEND=noninteractive apt-get install wget unzip git default-jdk maven python3 --yes --no-install-recommends
RUN wget https://github.com/sqlancer/sqlancer/archive/master.zip -O /sqlancer.zip
RUN mkdir /sqlancer && \
cd /sqlancer && \

View File

@ -1,5 +1,5 @@
# docker build -t yandex/clickhouse-stateful-test .
FROM yandex/clickhouse-stateless-test
# docker build -t clickhouse/stateful-test .
FROM clickhouse/stateless-test
RUN apt-get update -y \
&& env DEBIAN_FRONTEND=noninteractive \

View File

@ -1,5 +1,5 @@
# docker build -t yandex/clickhouse-stateless-test .
FROM yandex/clickhouse-test-base
# docker build -t clickhouse/stateless-test .
FROM clickhouse/test-base
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz"

View File

@ -1,5 +1,5 @@
# docker build -t yandex/clickhouse-stateless-pytest .
FROM yandex/clickhouse-test-base
# docker build -t clickhouse/stateless-pytest .
FROM clickhouse/test-base
RUN apt-get update -y && \
apt-get install -y --no-install-recommends \

View File

@ -1,5 +1,5 @@
# docker build -t yandex/clickhouse-stateless-unbundled-test .
FROM yandex/clickhouse-test-base
# docker build -t clickhouse/stateless-unbundled-test .
FROM clickhouse/test-base
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz"

View File

@ -1,5 +1,5 @@
# docker build -t yandex/clickhouse-stress-test .
FROM yandex/clickhouse-stateful-test
# docker build -t clickhouse/stress-test .
FROM clickhouse/stateful-test
RUN apt-get update -y \
&& env DEBIAN_FRONTEND=noninteractive \

View File

@ -6,7 +6,7 @@ Usage:
```
$ ls $HOME/someclickhouse
clickhouse-client_18.14.9_all.deb clickhouse-common-static_18.14.9_amd64.deb clickhouse-server_18.14.9_all.deb clickhouse-test_18.14.9_all.deb
$ docker run --volume=$HOME/someclickhouse:/package_folder --volume=$HOME/test_output:/test_output yandex/clickhouse-stress-test
$ docker run --volume=$HOME/someclickhouse:/package_folder --volume=$HOME/test_output:/test_output clickhouse/stress-test
Selecting previously unselected package clickhouse-common-static.
(Reading database ... 14442 files and directories currently installed.)
...

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-style-test .
# docker build -t clickhouse/style-test .
FROM ubuntu:20.04
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list

View File

@ -49,7 +49,7 @@ fi
# Build server image (optional) from local packages
if [ -z "${CLICKHOUSE_SERVER_IMAGE}" ]; then
CLICKHOUSE_SERVER_IMAGE="yandex/clickhouse-server:local"
CLICKHOUSE_SERVER_IMAGE="clickhouse/server:local"
if [ "${CLICKHOUSE_PACKAGES_ARG}" != "${NO_REBUILD_FLAG}" ]; then
docker build --network=host \

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-testflows-runner .
# docker build -t clickhouse/testflows-runner .
FROM ubuntu:20.04
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list

View File

@ -1,5 +1,5 @@
# docker build -t yandex/clickhouse-unit-test .
FROM yandex/clickhouse-stateless-test
# docker build -t clickhouse/unit-test .
FROM clickhouse/stateless-test
RUN apt-get install gdb

View File

@ -3499,6 +3499,30 @@ Possible values:
Default value: `0`.
## replication_alter_partitions_sync {#replication-alter-partitions-sync}
Allows to set up waiting for actions to be executed on replicas by [ALTER](../../sql-reference/statements/alter/index.md), [OPTIMIZE](../../sql-reference/statements/optimize.md) or [TRUNCATE](../../sql-reference/statements/truncate.md) queries.
Possible values:
- 0 — Do not wait.
- 1 — Wait for own execution.
- 2 — Wait for everyone.
Default value: `1`.
## replication_wait_for_inactive_replica_timeout {#replication-wait-for-inactive-replica-timeout}
Specifies how long (in seconds) to wait for inactive replicas to execute [ALTER](../../sql-reference/statements/alter/index.md), [OPTIMIZE](../../sql-reference/statements/optimize.md) or [TRUNCATE](../../sql-reference/statements/truncate.md) queries.
Possible values:
- 0 — Do not wait.
- Negative integer — Wait for unlimited time.
- Positive integer — The number of seconds to wait.
Default value: `120` seconds.
## regexp_max_matches_per_row {#regexp-max-matches-per-row}
Sets the maximum number of matches for a single regular expression per row. Use it to protect against memory overload when using greedy regular expression in the [extractAllGroupsHorizontal](../../sql-reference/functions/string-search-functions.md#extractallgroups-horizontal) function.

View File

@ -59,6 +59,10 @@ A lambda function that accepts multiple arguments can also be passed to a higher
For some functions the first argument (the lambda function) can be omitted. In this case, identical mapping is assumed.
## User Defined Functions {#user-defined-functions}
Custom functions can be created using the [CREATE FUNCTION](../statements/create/function.md) statement. To delete these functions use the [DROP FUNCTION](../statements/drop.md#drop-function) statement.
## Error Handling {#error-handling}
Some functions might throw an exception if the data is invalid. In this case, the query is canceled and an error text is returned to the client. For distributed processing, when an exception occurs on one of the servers, the other servers also attempt to abort the query.

View File

@ -78,7 +78,7 @@ mapAdd(arg1, arg2 [, ...])
**Arguments**
Arguments are [maps](../../sql-reference/data-types/map.md) or [tuples](../../sql-reference/data-types/tuple.md#tuplet1-t2) of two [arrays](../../sql-reference/data-types/array.md#data-type-array), where items in the first array represent keys, and the second array contains values for the each key. All key arrays should have same type, and all value arrays should contain items which are promote to the one type ([Int64](../../sql-reference/data-types/int-uint.md#int-ranges), [UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges) or [Float64](../../sql-reference/data-types/float.md#float32-float64)). The common promoted type is used as a type for the result array.
Arguments are [maps](../../sql-reference/data-types/map.md) or [tuples](../../sql-reference/data-types/tuple.md#tuplet1-t2) of two [arrays](../../sql-reference/data-types/array.md#data-type-array), where items in the first array represent keys, and the second array contains values for the each key. All key arrays should have same type, and all value arrays should contain items which are promoted to the one type ([Int64](../../sql-reference/data-types/int-uint.md#int-ranges), [UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges) or [Float64](../../sql-reference/data-types/float.md#float32-float64)). The common promoted type is used as a type for the result array.
**Returned value**
@ -86,7 +86,7 @@ Arguments are [maps](../../sql-reference/data-types/map.md) or [tuples](../../sq
**Example**
Query with a tuple map:
Query with a tuple:
```sql
SELECT mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) as res, toTypeName(res) as type;

View File

@ -43,7 +43,11 @@ Entries for finished mutations are not deleted right away (the number of preserv
For non-replicated tables, all `ALTER` queries are performed synchronously. For replicated tables, the query just adds instructions for the appropriate actions to `ZooKeeper`, and the actions themselves are performed as soon as possible. However, the query can wait for these actions to be completed on all the replicas.
For `ALTER ... ATTACH|DETACH|DROP` queries, you can use the `replication_alter_partitions_sync` setting to set up waiting. Possible values: `0` do not wait; `1` only wait for own execution (default); `2` wait for all.
For all `ALTER` queries, you can use the [replication_alter_partitions_sync](../../../operations/settings/settings.md#replication-alter-partitions-sync) setting to set up waiting.
You can specify how long (in seconds) to wait for inactive replicas to execute all `ALTER` queries with the [replication_wait_for_inactive_replica_timeout](../../../operations/settings/settings.md#replication-wait-for-inactive-replica-timeout) setting.
!!! info "Note"
For all `ALTER` queries, if `replication_alter_partitions_sync = 2` and some replicas are not active for more than the time, specified in the `replication_wait_for_inactive_replica_timeout` setting, then an exception `UNFINISHED` is thrown.
For `ALTER TABLE ... UPDATE|DELETE` queries the synchronicity is defined by the [mutations_sync](../../../operations/settings/settings.md#mutations_sync) setting.

View File

@ -0,0 +1,59 @@
---
toc_priority: 38
toc_title: FUNCTION
---
# CREATE FUNCTION {#create-function}
Creates a user defined function from a lambda expression. The expression must consist of function parameters, constants, operators, or other function calls.
**Syntax**
```sql
CREATE FUNCTION name AS (parameter0, ...) -> expression
```
A function can have an arbitrary number of parameters.
There are a few restrictions:
- The name of a function must be unique among user defined and system functions.
- Recursive functions are not allowed.
- All variables used by a function must be specified in its parameter list.
If any restriction is violated then an exception is raised.
**Example**
Query:
```sql
CREATE FUNCTION linear_equation AS (x, k, b) -> k*x + b;
SELECT number, linear_equation(number, 2, 1) FROM numbers(3);
```
Result:
``` text
┌─number─┬─plus(multiply(2, number), 1)─┐
│ 0 │ 1 │
│ 1 │ 3 │
│ 2 │ 5 │
└────────┴──────────────────────────────┘
```
A [conditional function](../../../sql-reference/functions/conditional-functions.md) is called in a user defined function in the following query:
```sql
CREATE FUNCTION parity_str AS (n) -> if(n % 2, 'odd', 'even');
SELECT number, parity_str(number) FROM numbers(3);
```
Result:
``` text
┌─number─┬─if(modulo(number, 2), 'odd', 'even')─┐
│ 0 │ even │
│ 1 │ odd │
│ 2 │ even │
└────────┴──────────────────────────────────────┘
```

View File

@ -12,6 +12,7 @@ Create queries make a new entity of one of the following kinds:
- [TABLE](../../../sql-reference/statements/create/table.md)
- [VIEW](../../../sql-reference/statements/create/view.md)
- [DICTIONARY](../../../sql-reference/statements/create/dictionary.md)
- [FUNCTION](../../../sql-reference/statements/create/function.md)
- [USER](../../../sql-reference/statements/create/user.md)
- [ROLE](../../../sql-reference/statements/create/role.md)
- [ROW POLICY](../../../sql-reference/statements/create/row-policy.md)

View File

@ -97,4 +97,20 @@ Syntax:
DROP VIEW [IF EXISTS] [db.]name [ON CLUSTER cluster]
```
[Оriginal article](https://clickhouse.tech/docs/en/sql-reference/statements/drop/) <!--hide-->
## DROP FUNCTION {#drop-function}
Deletes a user defined function created by [CREATE FUNCTION](./create/function.md).
System functions can not be dropped.
**Syntax**
``` sql
DROP FUNCTION [IF EXISTS] function_name
```
**Example**
``` sql
CREATE FUNCTION linear_equation AS (x, k, b) -> k*x + b;
DROP FUNCTION linear_equation;
```

View File

@ -107,11 +107,13 @@ Hierarchy of privileges:
- `CREATE TEMPORARY TABLE`
- `CREATE VIEW`
- `CREATE DICTIONARY`
- `CREATE FUNCTION`
- [DROP](#grant-drop)
- `DROP DATABASE`
- `DROP TABLE`
- `DROP VIEW`
- `DROP DICTIONARY`
- `DROP FUNCTION`
- [TRUNCATE](#grant-truncate)
- [OPTIMIZE](#grant-optimize)
- [SHOW](#grant-show)

View File

@ -18,13 +18,17 @@ OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION I
The `OPTMIZE` query is supported for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) family, the [MaterializedView](../../engines/table-engines/special/materializedview.md) and the [Buffer](../../engines/table-engines/special/buffer.md) engines. Other table engines arent supported.
When `OPTIMIZE` is used with the [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family of table engines, ClickHouse creates a task for merging and waits for execution on all nodes (if the `replication_alter_partitions_sync` setting is enabled).
When `OPTIMIZE` is used with the [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family of table engines, ClickHouse creates a task for merging and waits for execution on all replicas (if the [replication_alter_partitions_sync](../../operations/settings/settings.md#replication-alter-partitions-sync) setting is set to `2`) or on current replica (if the [replication_alter_partitions_sync](../../operations/settings/settings.md#replication-alter-partitions-sync) setting is set to `1`).
- If `OPTIMIZE` does not perform a merge for any reason, it does not notify the client. To enable notifications, use the [optimize_throw_if_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop) setting.
- If you specify a `PARTITION`, only the specified partition is optimized. [How to set partition expression](../../sql-reference/statements/alter/index.md#alter-how-to-specify-part-expr).
- If you specify `FINAL`, optimization is performed even when all the data is already in one part. Also merge is forced even if concurrent merges are performed.
- If you specify `DEDUPLICATE`, then completely identical rows (unless by-clause is specified) will be deduplicated (all columns are compared), it makes sense only for the MergeTree engine.
You can specify how long (in seconds) to wait for inactive replicas to execute `OPTIMIZE` queries by the [replication_wait_for_inactive_replica_timeout](../../operations/settings/settings.md#replication-wait-for-inactive-replica-timeout) setting.
!!! info "Note"
If the `replication_alter_partitions_sync` is set to `2` and some replicas are not active for more than the time, specified by the `replication_wait_for_inactive_replica_timeout` setting, then an exception `UNFINISHED` is thrown.
## BY expression {#by-expression}

View File

@ -6,7 +6,7 @@ toc_title: WHERE
`WHERE` clause allows to filter the data that is coming from [FROM](../../../sql-reference/statements/select/from.md) clause of `SELECT`.
If there is a `WHERE` clause, it must contain an expression with the `UInt8` type. This is usually an expression with comparison and logical operators. Rows where this expression evaluates to 0 are expluded from further transformations or result.
If there is a `WHERE` clause, it must contain an expression with the `UInt8` type. This is usually an expression with comparison and logical operators. Rows where this expression evaluates to 0 are excluded from further transformations or result.
`WHERE` expression is evaluated on the ability to use indexes and partition pruning, if the underlying table engine supports that.

View File

@ -12,3 +12,10 @@ TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster]
Removes all data from a table. When the clause `IF EXISTS` is omitted, the query returns an error if the table does not exist.
The `TRUNCATE` query is not supported for [View](../../engines/table-engines/special/view.md), [File](../../engines/table-engines/special/file.md), [URL](../../engines/table-engines/special/url.md), [Buffer](../../engines/table-engines/special/buffer.md) and [Null](../../engines/table-engines/special/null.md) table engines.
You can use the [replication_alter_partitions_sync](../../operations/settings/settings.md#replication-alter-partitions-sync) setting to set up waiting for actions to be executed on replicas.
You can specify how long (in seconds) to wait for inactive replicas to execute `TRUNCATE` queries with the [replication_wait_for_inactive_replica_timeout](../../operations/settings/settings.md#replication-wait-for-inactive-replica-timeout) setting.
!!! info "Note"
If the `replication_alter_partitions_sync` is set to `2` and some replicas are not active for more than the time, specified by the `replication_wait_for_inactive_replica_timeout` setting, then an exception `UNFINISHED` is thrown.

View File

@ -3308,6 +3308,30 @@ SETTINGS index_granularity = 8192 │
Значение по умолчанию: `0`.
## replication_alter_partitions_sync {#replication-alter-partitions-sync}
Позволяет настроить ожидание выполнения действий на репликах запросами [ALTER](../../sql-reference/statements/alter/index.md), [OPTIMIZE](../../sql-reference/statements/optimize.md) или [TRUNCATE](../../sql-reference/statements/truncate.md).
Возможные значения:
- 0 — не ждать.
- 1 — ждать выполнения действий на своей реплике.
- 2 — ждать выполнения действий на всех репликах.
Значение по умолчанию: `1`.
## replication_wait_for_inactive_replica_timeout {#replication-wait-for-inactive-replica-timeout}
Указывает время ожидания (в секундах) выполнения запросов [ALTER](../../sql-reference/statements/alter/index.md), [OPTIMIZE](../../sql-reference/statements/optimize.md) или [TRUNCATE](../../sql-reference/statements/truncate.md) для неактивных реплик.
Возможные значения:
- 0 — не ждать.
- Отрицательное целое число — ждать неограниченное время.
- Положительное целое число — установить соответствующее количество секунд ожидания.
Значение по умолчанию: `120` секунд.
## regexp_max_matches_per_row {#regexp-max-matches-per-row}
Задает максимальное количество совпадений для регулярного выражения. Настройка применяется для защиты памяти от перегрузки при использовании "жадных" квантификаторов в регулярном выражении для функции [extractAllGroupsHorizontal](../../sql-reference/functions/string-search-functions.md#extractallgroups-horizontal).
@ -3316,4 +3340,4 @@ SETTINGS index_granularity = 8192 │
- Положительное целое число.
Значение по умолчанию: `1000`.
Значение по умолчанию: `1000`.

View File

@ -58,6 +58,10 @@ str -> str != Referer
Для некоторых функций первый аргумент (лямбда-функция) может отсутствовать. В этом случае подразумевается тождественное отображение.
## Пользовательские функции {#user-defined-functions}
Функции можно создавать с помощью выражения [CREATE FUNCTION](../statements/create/function.md). Для удаления таких функций используется выражение [DROP FUNCTION](../statements/drop.md#drop-function).
## Обработка ошибок {#obrabotka-oshibok}
Некоторые функции могут кидать исключения в случае ошибочных данных. В этом случае, выполнение запроса прерывается, и текст ошибки выводится клиенту. При распределённой обработке запроса, при возникновении исключения на одном из серверов, на другие серверы пытается отправиться просьба тоже прервать выполнение запроса.

View File

@ -73,22 +73,22 @@ SELECT a['key2'] FROM table_map;
**Синтаксис**
``` sql
mapAdd(Tuple(Array, Array), Tuple(Array, Array) [, ...])
mapAdd(arg1, arg2 [, ...])
```
**Аргументы**
Аргументами являются [кортежи](../../sql-reference/data-types/tuple.md#tuplet1-t2) из двух [массивов](../../sql-reference/data-types/array.md#data-type-array), где элементы в первом массиве представляют ключи, а второй массив содержит значения для каждого ключа.
Аргументами являются контейнеры [Map](../../sql-reference/data-types/map.md) или [кортежи](../../sql-reference/data-types/tuple.md#tuplet1-t2) из двух [массивов](../../sql-reference/data-types/array.md#data-type-array), где элементы в первом массиве представляют ключи, а второй массив содержит значения для каждого ключа.
Все массивы ключей должны иметь один и тот же тип, а все массивы значений должны содержать элементы, которые можно приводить к одному типу ([Int64](../../sql-reference/data-types/int-uint.md#int-ranges), [UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges) или [Float64](../../sql-reference/data-types/float.md#float32-float64)).
Общий приведенный тип используется в качестве типа для результирующего массива.
**Возвращаемое значение**
- Возвращает один [кортеж](../../sql-reference/data-types/tuple.md#tuplet1-t2), в котором первый массив содержит отсортированные ключи, а второй — значения.
- В зависимости от типа аргументов возвращает один [Map](../../sql-reference/data-types/map.md) или [кортеж](../../sql-reference/data-types/tuple.md#tuplet1-t2), в котором первый массив содержит отсортированные ключи, а второй — значения.
**Пример**
Запрос:
Запрос с кортежем:
``` sql
SELECT mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) as res, toTypeName(res) as type;
@ -102,6 +102,20 @@ SELECT mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) as res, toTy
└───────────────┴────────────────────────────────────┘
```
Запрос с контейнером `Map`:
```sql
SELECT mapAdd(map(1,1), map(1,1));
```
Result:
```text
┌─mapAdd(map(1, 1), map(1, 1))─┐
│ {1:2} │
└──────────────────────────────┘
```
## mapSubtract {#function-mapsubtract}
Собирает все ключи и вычитает соответствующие значения.

View File

@ -64,8 +64,11 @@ ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name
Для нереплицируемых таблиц, все запросы `ALTER` выполняются синхронно. Для реплицируемых таблиц, запрос всего лишь добавляет инструкцию по соответствующим действиям в `ZooKeeper`, а сами действия осуществляются при первой возможности. Но при этом, запрос может ждать завершения выполнения этих действий на всех репликах.
Для запросов `ALTER ... ATTACH|DETACH|DROP` можно настроить ожидание, с помощью настройки `replication_alter_partitions_sync`.
Возможные значения: `0` - не ждать, `1` - ждать выполнения только у себя (по умолчанию), `2` - ждать всех.
Для всех запросов `ALTER` можно настроить ожидание с помощью настройки [replication_alter_partitions_sync](../../../operations/settings/settings.md#replication-alter-partitions-sync).
Вы можете указать время ожидания (в секундах) выполнения всех запросов `ALTER` для неактивных реплик с помощью настройки [replication_wait_for_inactive_replica_timeout](../../../operations/settings/settings.md#replication-wait-for-inactive-replica-timeout).
!!! info "Примечание"
Для всех запросов `ALTER` при `replication_alter_partitions_sync = 2` и неактивности некоторых реплик больше времени, заданного настройкой `replication_wait_for_inactive_replica_timeout`, генерируется исключение `UNFINISHED`.
Для запросов `ALTER TABLE ... UPDATE|DELETE` синхронность выполнения определяется настройкой [mutations_sync](../../../operations/settings/settings.md#mutations_sync).

View File

@ -0,0 +1,59 @@
---
toc_priority: 38
toc_title: FUNCTION
---
# CREATE FUNCTION {#create-function}
Создает пользовательскую функцию из лямбда-выражения. Выражение должно состоять из параметров функции, констант, операторов и вызовов других функций.
**Синтаксис**
```sql
CREATE FUNCTION name AS (parameter0, ...) -> expression
```
У функции может быть произвольное число параметров.
Существует несколько ограничений на создаваемые функции:
- Имя функции должно быть уникальным среди всех пользовательских и системных функций.
- Рекурсивные функции запрещены.
- Все переменные, используемые функцией, должны быть перечислены в списке ее параметров.
Если какое-нибудь ограничение нарушается, то при попытке создать функцию возникает исключение.
**Пример**
Запрос:
```sql
CREATE FUNCTION linear_equation AS (x, k, b) -> k*x + b;
SELECT number, linear_equation(number, 2, 1) FROM numbers(3);
```
Результат:
``` text
┌─number─┬─plus(multiply(2, number), 1)─┐
│ 0 │ 1 │
│ 1 │ 3 │
│ 2 │ 5 │
└────────┴──────────────────────────────┘
```
В следующем запросе пользовательская функция вызывает [условную функцию](../../../sql-reference/functions/conditional-functions.md):
```sql
CREATE FUNCTION parity_str AS (n) -> if(n % 2, 'odd', 'even');
SELECT number, parity_str(number) FROM numbers(3);
```
Результат:
``` text
┌─number─┬─if(modulo(number, 2), 'odd', 'even')─┐
│ 0 │ even │
│ 1 │ odd │
│ 2 │ even │
└────────┴──────────────────────────────────────┘
```

View File

@ -12,6 +12,7 @@ toc_title: "Обзор"
- [TABLE](../../../sql-reference/statements/create/table.md)
- [VIEW](../../../sql-reference/statements/create/view.md)
- [DICTIONARY](../../../sql-reference/statements/create/dictionary.md)
- [FUNCTION](../../../sql-reference/statements/create/function.md)
- [USER](../../../sql-reference/statements/create/user.md)
- [ROLE](../../../sql-reference/statements/create/role.md)
- [ROW POLICY](../../../sql-reference/statements/create/row-policy.md)

View File

@ -97,3 +97,20 @@ DROP [SETTINGS] PROFILE [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
DROP VIEW [IF EXISTS] [db.]name [ON CLUSTER cluster]
```
## DROP FUNCTION {#drop-function}
Удаляет пользовательскую функцию, созданную с помощью [CREATE FUNCTION](./create/function.md).
Удалить системные функции нельзя.
**Синтаксис**
``` sql
DROP FUNCTION [IF EXISTS] function_name
```
**Пример**
``` sql
CREATE FUNCTION linear_equation AS (x, k, b) -> k*x + b;
DROP FUNCTION linear_equation;
```

View File

@ -109,11 +109,13 @@ GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION
- `CREATE TEMPORARY TABLE`
- `CREATE VIEW`
- `CREATE DICTIONARY`
- `CREATE FUNCTION`
- [DROP](#grant-drop)
- `DROP DATABASE`
- `DROP TABLE`
- `DROP VIEW`
- `DROP DICTIONARY`
- `DROP FUNCTION`
- [TRUNCATE](#grant-truncate)
- [OPTIMIZE](#grant-optimize)
- [SHOW](#grant-show)

View File

@ -18,7 +18,7 @@ OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION I
Может применяться к таблицам семейства [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md), [MaterializedView](../../engines/table-engines/special/materializedview.md) и [Buffer](../../engines/table-engines/special/buffer.md). Другие движки таблиц не поддерживаются.
Если запрос `OPTIMIZE` применяется к таблицам семейства [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md), ClickHouse создаёт задачу на слияние и ожидает её исполнения на всех узлах (если активирована настройка `replication_alter_partitions_sync`).
Если запрос `OPTIMIZE` применяется к таблицам семейства [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md), ClickHouse создаёт задачу на слияние и ожидает её исполнения на всех репликах (если значение настройки [replication_alter_partitions_sync](../../operations/settings/settings.md#replication-alter-partitions-sync) равно `2`) или на текущей реплике (если значение настройки [replication_alter_partitions_sync](../../operations/settings/settings.md#replication-alter-partitions-sync) равно `1`).
- По умолчанию, если запросу `OPTIMIZE` не удалось выполнить слияние, то
ClickHouse не оповещает клиента. Чтобы включить оповещения, используйте настройку [optimize_throw_if_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop).
@ -26,6 +26,11 @@ ClickHouse не оповещает клиента. Чтобы включить
- Если указать `FINAL`, то оптимизация выполняется даже в том случае, если все данные уже лежат в одном куске данных. Кроме того, слияние является принудительным, даже если выполняются параллельные слияния.
- Если указать `DEDUPLICATE`, то произойдет схлопывание полностью одинаковых строк (сравниваются значения во всех столбцах), имеет смысл только для движка MergeTree.
Вы можете указать время ожидания (в секундах) выполнения запросов `OPTIMIZE` для неактивных реплик с помощью настройки [replication_wait_for_inactive_replica_timeout](../../operations/settings/settings.md#replication-wait-for-inactive-replica-timeout).
!!! info "Примечание"
Если значение настройки `replication_alter_partitions_sync` равно `2` и некоторые реплики не активны больше времени, заданного настройкой `replication_wait_for_inactive_replica_timeout`, то генерируется исключение `UNFINISHED`.
## Выражение BY {#by-expression}
Чтобы выполнить дедупликацию по произвольному набору столбцов, вы можете явно указать список столбцов или использовать любую комбинацию подстановки [`*`](../../sql-reference/statements/select/index.md#asterisk), выражений [`COLUMNS`](../../sql-reference/statements/select/index.md#columns-expression) и [`EXCEPT`](../../sql-reference/statements/select/index.md#except-modifier).

View File

@ -13,4 +13,9 @@ TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster]
Запрос `TRUNCATE` не поддерживается для следующих движков: [View](../../engines/table-engines/special/view.md), [File](../../engines/table-engines/special/file.md), [URL](../../engines/table-engines/special/url.md), [Buffer](../../engines/table-engines/special/buffer.md) и [Null](../../engines/table-engines/special/null.md).
Вы можете настроить ожидание выполнения действий на репликах с помощью настройки [replication_alter_partitions_sync](../../operations/settings/settings.md#replication-alter-partitions-sync).
Вы можете указать время ожидания (в секундах) выполнения запросов `TRUNCATE` для неактивных реплик с помощью настройки [replication_wait_for_inactive_replica_timeout](../../operations/settings/settings.md#replication-wait-for-inactive-replica-timeout).
!!! info "Примечание"
Если значение настройки `replication_alter_partitions_sync` равно `2` и некоторые реплики не активны больше времени, заданного настройкой `replication_wait_for_inactive_replica_timeout`, то генерируется исключение `UNFINISHED`.

View File

@ -971,7 +971,7 @@ if (ThreadFuzzer::instance().isEffective())
global_context->setMMappedFileCache(mmap_cache_size);
#if USE_EMBEDDED_COMPILER
constexpr size_t compiled_expression_cache_size_default = 1024 * 1024 * 1024;
constexpr size_t compiled_expression_cache_size_default = 1024 * 1024 * 128;
size_t compiled_expression_cache_size = config().getUInt64("compiled_expression_cache_size", compiled_expression_cache_size_default);
CompiledExpressionCacheFactory::instance().init(compiled_expression_cache_size);
#endif

View File

@ -331,7 +331,7 @@
<mmap_cache_size>1000</mmap_cache_size>
<!-- Cache size for compiled expressions.-->
<compiled_expression_cache_size>1073741824</compiled_expression_cache_size>
<compiled_expression_cache_size>134217728</compiled_expression_cache_size>
<!-- Path to data directory, with trailing slash. -->
<path>/var/lib/clickhouse/</path>

View File

@ -280,7 +280,7 @@ mark_cache_size: 5368709120
mmap_cache_size: 1000
# Cache size for compiled expressions.
compiled_expression_cache_size: 1073741824
compiled_expression_cache_size: 134217728
# Path to data directory, with trailing slash.
path: /var/lib/clickhouse/

View File

@ -261,7 +261,7 @@ dbms_target_include_directories (PUBLIC "${ClickHouse_SOURCE_DIR}/src" "${ClickH
target_include_directories (clickhouse_common_io PUBLIC "${ClickHouse_SOURCE_DIR}/src" "${ClickHouse_BINARY_DIR}/src")
if (USE_EMBEDDED_COMPILER)
dbms_target_link_libraries (PRIVATE ${REQUIRED_LLVM_LIBRARIES})
dbms_target_link_libraries (PUBLIC ${REQUIRED_LLVM_LIBRARIES})
dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${LLVM_INCLUDE_DIRS})
endif ()

View File

@ -417,11 +417,7 @@ void StackTrace::toStringEveryLine(std::function<void(const std::string &)> call
std::string StackTrace::toString() const
{
/// Calculation of stack trace text is extremely slow.
/// We use simple cache because otherwise the server could be overloaded by trash queries.
static SimpleCache<decltype(toStringImpl), &toStringImpl> func_cached;
return func_cached(frame_pointers, offset, size);
return toStringStatic(frame_pointers, offset, size);
}
std::string StackTrace::toString(void ** frame_pointers_, size_t offset, size_t size)
@ -432,6 +428,23 @@ std::string StackTrace::toString(void ** frame_pointers_, size_t offset, size_t
for (size_t i = 0; i < size; ++i)
frame_pointers_copy[i] = frame_pointers_[i];
static SimpleCache<decltype(toStringImpl), &toStringImpl> func_cached;
return func_cached(frame_pointers_copy, offset, size);
return toStringStatic(frame_pointers_copy, offset, size);
}
static SimpleCache<decltype(toStringImpl), &toStringImpl> & cacheInstance()
{
static SimpleCache<decltype(toStringImpl), &toStringImpl> cache;
return cache;
}
std::string StackTrace::toStringStatic(const StackTrace::FramePointers & frame_pointers, size_t offset, size_t size)
{
/// Calculation of stack trace text is extremely slow.
/// We use simple cache because otherwise the server could be overloaded by trash queries.
return cacheInstance()(frame_pointers, offset, size);
}
void StackTrace::dropCache()
{
cacheInstance().drop();
}

View File

@ -61,6 +61,8 @@ public:
std::string toString() const;
static std::string toString(void ** frame_pointers, size_t offset, size_t size);
static std::string toStringStatic(const FramePointers & frame_pointers, size_t offset, size_t size);
static void dropCache();
static void symbolize(const FramePointers & frame_pointers, size_t offset, size_t size, StackTrace::Frames & frames);
void toStringEveryLine(std::function<void(const std::string &)> callback) const;

View File

@ -462,12 +462,22 @@ String SymbolIndex::getBuildIDHex() const
return build_id_hex;
}
MultiVersion<SymbolIndex>::Version SymbolIndex::instance(bool reload)
MultiVersion<SymbolIndex> & SymbolIndex::instanceImpl()
{
static MultiVersion<SymbolIndex> instance(std::unique_ptr<SymbolIndex>(new SymbolIndex));
if (reload)
instance.set(std::unique_ptr<SymbolIndex>(new SymbolIndex));
return instance.get();
return instance;
}
MultiVersion<SymbolIndex>::Version SymbolIndex::instance()
{
return instanceImpl().get();
}
void SymbolIndex::reload()
{
instanceImpl().set(std::unique_ptr<SymbolIndex>(new SymbolIndex));
/// Also drop stacktrace cache.
StackTrace::dropCache();
}
}

View File

@ -22,7 +22,8 @@ protected:
SymbolIndex() { update(); }
public:
static MultiVersion<SymbolIndex>::Version instance(bool reload = false);
static MultiVersion<SymbolIndex>::Version instance();
static void reload();
struct Symbol
{
@ -60,6 +61,7 @@ private:
Data data;
void update();
static MultiVersion<SymbolIndex> & instanceImpl();
};
}

View File

@ -52,14 +52,12 @@ public:
const std::string & format,
const Block & sample_block,
std::unique_ptr<ShellCommand> && command_,
Poco::Logger * log_,
std::vector<SendDataTask> && send_data_tasks = {},
const ShellCommandSourceConfiguration & configuration_ = {},
std::shared_ptr<ProcessPool> process_pool_ = nullptr)
: SourceWithProgress(sample_block)
, command(std::move(command_))
, configuration(configuration_)
, log(log_)
, process_pool(process_pool_)
{
for (auto && send_data_task : send_data_tasks)
@ -133,7 +131,6 @@ protected:
}
catch (...)
{
tryLogCurrentException(log);
command = nullptr;
throw;
}
@ -176,8 +173,6 @@ private:
size_t current_read_rows = 0;
Poco::Logger * log;
std::shared_ptr<ProcessPool> process_pool;
QueryPipeline pipeline;

View File

@ -72,7 +72,7 @@ Pipe ExecutableDictionarySource::loadAll()
ShellCommand::Config config(configuration.command);
auto process = ShellCommand::execute(config);
Pipe pipe(std::make_unique<ShellCommandSource>(context, configuration.format, sample_block, std::move(process), log));
Pipe pipe(std::make_unique<ShellCommandSource>(context, configuration.format, sample_block, std::move(process)));
return pipe;
}
@ -91,7 +91,7 @@ Pipe ExecutableDictionarySource::loadUpdatedAll()
LOG_TRACE(log, "loadUpdatedAll {}", command_with_update_field);
ShellCommand::Config config(command_with_update_field);
auto process = ShellCommand::execute(config);
Pipe pipe(std::make_unique<ShellCommandSource>(context, configuration.format, sample_block, std::move(process), log));
Pipe pipe(std::make_unique<ShellCommandSource>(context, configuration.format, sample_block, std::move(process)));
return pipe;
}
@ -126,7 +126,7 @@ Pipe ExecutableDictionarySource::getStreamForBlock(const Block & block)
}};
std::vector<ShellCommandSource::SendDataTask> tasks = {std::move(task)};
Pipe pipe(std::make_unique<ShellCommandSource>(context, configuration.format, sample_block, std::move(process), log, std::move(tasks)));
Pipe pipe(std::make_unique<ShellCommandSource>(context, configuration.format, sample_block, std::move(process), std::move(tasks)));
if (configuration.implicit_key)
pipe.addTransform(std::make_shared<TransformWithAdditionalColumns>(block, pipe.getHeader()));

View File

@ -120,7 +120,7 @@ Pipe ExecutablePoolDictionarySource::getStreamForBlock(const Block & block)
ShellCommandSourceConfiguration command_configuration;
command_configuration.read_fixed_number_of_rows = true;
command_configuration.number_of_rows_to_read = rows_to_read;
Pipe pipe(std::make_unique<ShellCommandSource>(context, configuration.format, sample_block, std::move(process), log, std::move(tasks), command_configuration, process_pool));
Pipe pipe(std::make_unique<ShellCommandSource>(context, configuration.format, sample_block, std::move(process), std::move(tasks), command_configuration, process_pool));
if (configuration.implicit_key)
pipe.addTransform(std::make_shared<TransformWithAdditionalColumns>(block, pipe.getHeader()));

View File

@ -2,18 +2,17 @@
#include <IO/ReadBufferFromMemory.h>
namespace DB
{
/** Allows to read from std::string-like object.
*/
/// Allows to read from std::string-like object.
class ReadBufferFromString : public ReadBufferFromMemory
{
public:
/// std::string or something similar
template <typename S>
explicit ReadBufferFromString(const S & s) : ReadBufferFromMemory(s.data(), s.size()) {}
};
explicit ReadBufferFromString(std::string_view s) : ReadBufferFromMemory(s.data(), s.size()) {}
};
}

View File

@ -786,7 +786,7 @@ void NO_INLINE Aggregator::executeWithoutKeyImpl(
AggregatedDataWithoutKey & res,
size_t rows,
AggregateFunctionInstruction * aggregate_instructions,
Arena * arena)
Arena * arena) const
{
#if USE_EMBEDDED_COMPILER
if constexpr (use_compiled_functions)
@ -865,7 +865,7 @@ void NO_INLINE Aggregator::executeOnIntervalWithoutKeyImpl(
void Aggregator::prepareAggregateInstructions(Columns columns, AggregateColumns & aggregate_columns, Columns & materialized_columns,
AggregateFunctionInstructions & aggregate_functions_instructions, NestedColumnsHolder & nested_columns_holder)
AggregateFunctionInstructions & aggregate_functions_instructions, NestedColumnsHolder & nested_columns_holder) const
{
for (size_t i = 0; i < params.aggregates_size; ++i)
aggregate_columns[i].resize(params.aggregates[i].arguments.size());
@ -917,7 +917,7 @@ void Aggregator::prepareAggregateInstructions(Columns columns, AggregateColumns
bool Aggregator::executeOnBlock(const Block & block, AggregatedDataVariants & result,
ColumnRawPtrs & key_columns, AggregateColumns & aggregate_columns, bool & no_more_keys)
ColumnRawPtrs & key_columns, AggregateColumns & aggregate_columns, bool & no_more_keys) const
{
UInt64 num_rows = block.rows();
return executeOnBlock(block.getColumns(), num_rows, result, key_columns, aggregate_columns, no_more_keys);
@ -925,7 +925,7 @@ bool Aggregator::executeOnBlock(const Block & block, AggregatedDataVariants & re
bool Aggregator::executeOnBlock(Columns columns, UInt64 num_rows, AggregatedDataVariants & result,
ColumnRawPtrs & key_columns, AggregateColumns & aggregate_columns, bool & no_more_keys)
ColumnRawPtrs & key_columns, AggregateColumns & aggregate_columns, bool & no_more_keys) const
{
/// `result` will destroy the states of aggregate functions in the destructor
result.aggregator = this;
@ -1058,7 +1058,7 @@ bool Aggregator::executeOnBlock(Columns columns, UInt64 num_rows, AggregatedData
}
void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, const String & tmp_path)
void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, const String & tmp_path) const
{
Stopwatch watch;
size_t rows = data_variants.size();
@ -1130,7 +1130,7 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, co
}
void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants)
void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants) const
{
String tmp_path = params.tmp_volume->getDisk()->getPath();
return writeToTemporaryFile(data_variants, tmp_path);
@ -1192,7 +1192,7 @@ template <typename Method>
void Aggregator::writeToTemporaryFileImpl(
AggregatedDataVariants & data_variants,
Method & method,
IBlockOutputStream & out)
IBlockOutputStream & out) const
{
size_t max_temporary_block_size_rows = 0;
size_t max_temporary_block_size_bytes = 0;
@ -2311,7 +2311,7 @@ void NO_INLINE Aggregator::mergeWithoutKeyStreamsImpl(
block.clear();
}
bool Aggregator::mergeBlock(Block block, AggregatedDataVariants & result, bool & no_more_keys)
bool Aggregator::mergeOnBlock(Block block, AggregatedDataVariants & result, bool & no_more_keys) const
{
/// `result` will destroy the states of aggregate functions in the destructor
result.aggregator = this;
@ -2661,7 +2661,7 @@ void NO_INLINE Aggregator::convertBlockToTwoLevelImpl(
}
std::vector<Block> Aggregator::convertBlockToTwoLevel(const Block & block)
std::vector<Block> Aggregator::convertBlockToTwoLevel(const Block & block) const
{
if (!block)
return {};
@ -2753,7 +2753,7 @@ void Aggregator::destroyWithoutKey(AggregatedDataVariants & result) const
}
void Aggregator::destroyAllAggregateStates(AggregatedDataVariants & result)
void Aggregator::destroyAllAggregateStates(AggregatedDataVariants & result) const
{
if (result.empty())
return;

View File

@ -506,7 +506,7 @@ struct AggregatedDataVariants : private boost::noncopyable
* But this can hardly be done simply because it is planned to put variable-length strings into the same pool.
* In this case, the pool will not be able to know with what offsets objects are stored.
*/
Aggregator * aggregator = nullptr;
const Aggregator * aggregator = nullptr;
size_t keys_size{}; /// Number of keys. NOTE do we need this field?
Sizes key_sizes; /// Dimensions of keys, if keys of fixed length
@ -975,11 +975,14 @@ public:
/// Process one block. Return false if the processing should be aborted (with group_by_overflow_mode = 'break').
bool executeOnBlock(const Block & block, AggregatedDataVariants & result,
ColumnRawPtrs & key_columns, AggregateColumns & aggregate_columns, /// Passed to not create them anew for each block
bool & no_more_keys);
bool & no_more_keys) const;
bool executeOnBlock(Columns columns, UInt64 num_rows, AggregatedDataVariants & result,
ColumnRawPtrs & key_columns, AggregateColumns & aggregate_columns, /// Passed to not create them anew for each block
bool & no_more_keys);
bool & no_more_keys) const;
/// Used for aggregate projection.
bool mergeOnBlock(Block block, AggregatedDataVariants & result, bool & no_more_keys) const;
/** Convert the aggregation data structure into a block.
* If overflow_row = true, then aggregates for rows that are not included in max_rows_to_group_by are put in the first block.
@ -996,8 +999,6 @@ public:
/// Merge partially aggregated blocks separated to buckets into one data structure.
void mergeBlocks(BucketToBlocks bucket_to_blocks, AggregatedDataVariants & result, size_t max_threads);
bool mergeBlock(Block block, AggregatedDataVariants & result, bool & no_more_keys);
/// Merge several partially aggregated blocks into one.
/// Precondition: for all blocks block.info.is_overflows flag must be the same.
/// (either all blocks are from overflow data or none blocks are).
@ -1007,11 +1008,11 @@ public:
/** Split block with partially-aggregated data to many blocks, as if two-level method of aggregation was used.
* This is needed to simplify merging of that data with other results, that are already two-level.
*/
std::vector<Block> convertBlockToTwoLevel(const Block & block);
std::vector<Block> convertBlockToTwoLevel(const Block & block) const;
/// For external aggregation.
void writeToTemporaryFile(AggregatedDataVariants & data_variants, const String & tmp_path);
void writeToTemporaryFile(AggregatedDataVariants & data_variants);
void writeToTemporaryFile(AggregatedDataVariants & data_variants, const String & tmp_path) const;
void writeToTemporaryFile(AggregatedDataVariants & data_variants) const;
bool hasTemporaryFiles() const { return !temporary_files.empty(); }
@ -1083,7 +1084,7 @@ private:
Poco::Logger * log = &Poco::Logger::get("Aggregator");
/// For external aggregation.
TemporaryFiles temporary_files;
mutable TemporaryFiles temporary_files;
#if USE_EMBEDDED_COMPILER
std::shared_ptr<CompiledAggregateFunctionsHolder> compiled_aggregate_functions_holder;
@ -1106,7 +1107,7 @@ private:
/** Call `destroy` methods for states of aggregate functions.
* Used in the exception handler for aggregation, since RAII in this case is not applicable.
*/
void destroyAllAggregateStates(AggregatedDataVariants & result);
void destroyAllAggregateStates(AggregatedDataVariants & result) const;
/// Process one data block, aggregate the data into a hash table.
@ -1136,7 +1137,7 @@ private:
AggregatedDataWithoutKey & res,
size_t rows,
AggregateFunctionInstruction * aggregate_instructions,
Arena * arena);
Arena * arena) const;
static void executeOnIntervalWithoutKeyImpl(
AggregatedDataWithoutKey & res,
@ -1149,7 +1150,7 @@ private:
void writeToTemporaryFileImpl(
AggregatedDataVariants & data_variants,
Method & method,
IBlockOutputStream & out);
IBlockOutputStream & out) const;
/// Merge NULL key data from hash table `src` into `dst`.
template <typename Method, typename Table>
@ -1304,7 +1305,7 @@ private:
AggregateColumns & aggregate_columns,
Columns & materialized_columns,
AggregateFunctionInstructions & instructions,
NestedColumnsHolder & nested_columns_holder);
NestedColumnsHolder & nested_columns_holder) const;
void addSingleKeyToAggregateColumns(
const AggregatedDataVariants & data_variants,

View File

@ -353,7 +353,8 @@ static bool isCompilableFunction(const ActionsDAG::Node & node, const std::unord
static CompileDAG getCompilableDAG(
const ActionsDAG::Node * root,
ActionsDAG::NodeRawConstPtrs & children)
ActionsDAG::NodeRawConstPtrs & children,
const std::unordered_set<const ActionsDAG::Node *> & lazy_executed_nodes)
{
/// Extract CompileDAG from root actions dag node.
@ -376,29 +377,29 @@ static CompileDAG getCompilableDAG(
const auto * node = frame.node;
bool is_compilable_constant = isCompilableConstant(*node);
bool is_compilable_function = isCompilableFunction(*node, {});
bool is_compilable_function = isCompilableFunction(*node, lazy_executed_nodes);
if (!is_compilable_function || is_compilable_constant)
{
CompileDAG::Node compile_node;
compile_node.function = node->function_base;
compile_node.result_type = node->result_type;
CompileDAG::Node compile_node;
compile_node.function = node->function_base;
compile_node.result_type = node->result_type;
if (is_compilable_constant)
{
compile_node.type = CompileDAG::CompileType::CONSTANT;
compile_node.column = node->column;
}
else
{
if (is_compilable_constant)
{
compile_node.type = CompileDAG::CompileType::CONSTANT;
compile_node.column = node->column;
}
else
{
compile_node.type = CompileDAG::CompileType::INPUT;
children.emplace_back(node);
}
}
visited_node_to_compile_dag_position[node] = dag.getNodesCount();
dag.addNode(std::move(compile_node));
stack.pop();
continue;
visited_node_to_compile_dag_position[node] = dag.getNodesCount();
dag.addNode(std::move(compile_node));
stack.pop();
continue;
}
while (frame.next_child_to_visit < node->children.size())
@ -568,7 +569,7 @@ void ActionsDAG::compileFunctions(size_t min_count_to_compile_expression, const
for (auto & node : nodes_to_compile)
{
NodeRawConstPtrs new_children;
auto dag = getCompilableDAG(node, new_children);
auto dag = getCompilableDAG(node, new_children, lazy_executed_nodes);
if (dag.getInputNodesCount() == 0)
continue;

View File

@ -338,7 +338,7 @@ BlockIO InterpreterSystemQuery::execute()
{
#if defined(__ELF__) && !defined(__FreeBSD__)
getContext()->checkAccess(AccessType::SYSTEM_RELOAD_SYMBOLS);
(void)SymbolIndex::instance(true);
SymbolIndex::reload();
break;
#else
throw Exception("SYSTEM RELOAD SYMBOLS is not supported on current platform", ErrorCodes::NOT_IMPLEMENTED);

View File

@ -395,9 +395,14 @@ AggregatingTransform::AggregatingTransform(Block header, AggregatingTransformPar
}
AggregatingTransform::AggregatingTransform(
Block header, AggregatingTransformParamsPtr params_, ManyAggregatedDataPtr many_data_,
size_t current_variant, size_t max_threads_, size_t temporary_data_merge_threads_)
: IProcessor({std::move(header)}, {params_->getHeader()}), params(std::move(params_))
Block header,
AggregatingTransformParamsPtr params_,
ManyAggregatedDataPtr many_data_,
size_t current_variant,
size_t max_threads_,
size_t temporary_data_merge_threads_)
: IProcessor({std::move(header)}, {params_->getHeader()})
, params(std::move(params_))
, key_columns(params->params.keys_size)
, aggregate_columns(params->params.aggregates_size)
, many_data(std::move(many_data_))
@ -525,7 +530,7 @@ void AggregatingTransform::consume(Chunk chunk)
{
auto block = getInputs().front().getHeader().cloneWithColumns(chunk.detachColumns());
block = materializeBlock(block);
if (!params->aggregator.mergeBlock(block, variants, no_more_keys))
if (!params->aggregator.mergeOnBlock(block, variants, no_more_keys))
is_consume_finished = true;
}
else
@ -547,7 +552,7 @@ void AggregatingTransform::initGenerate()
if (variants.empty() && params->params.keys_size == 0 && !params->params.empty_result_for_aggregation_by_empty_set)
{
if (params->only_merge)
params->aggregator.mergeBlock(getInputs().front().getHeader(), variants, no_more_keys);
params->aggregator.mergeOnBlock(getInputs().front().getHeader(), variants, no_more_keys);
else
params->aggregator.executeOnBlock(getInputs().front().getHeader(), variants, key_columns, aggregate_columns, no_more_keys);
}

View File

@ -27,15 +27,38 @@ public:
class IBlockInputStream;
using BlockInputStreamPtr = std::shared_ptr<IBlockInputStream>;
using AggregatorList = std::list<Aggregator>;
using AggregatorListPtr = std::shared_ptr<AggregatorList>;
struct AggregatingTransformParams
{
Aggregator::Params params;
Aggregator aggregator;
/// Each params holds a list of aggregators which are used in query. It's needed because we need
/// to use a pointer of aggregator to proper destroy complex aggregation states on exception
/// (See comments in AggregatedDataVariants). However, this pointer might not be valid because
/// we can have two different aggregators at the same time due to mixed pipeline of aggregate
/// projections, and one of them might gets destroyed before used.
AggregatorListPtr aggregator_list_ptr;
Aggregator & aggregator;
bool final;
bool only_merge = false;
AggregatingTransformParams(const Aggregator::Params & params_, bool final_)
: params(params_), aggregator(params), final(final_) {}
: params(params_)
, aggregator_list_ptr(std::make_shared<AggregatorList>())
, aggregator(*aggregator_list_ptr->emplace(aggregator_list_ptr->end(), params))
, final(final_)
{
}
AggregatingTransformParams(const Aggregator::Params & params_, const AggregatorListPtr & aggregator_list_ptr_, bool final_)
: params(params_)
, aggregator_list_ptr(aggregator_list_ptr_)
, aggregator(*aggregator_list_ptr->emplace(aggregator_list_ptr->end(), params))
, final(final_)
{
}
Block getHeader() const { return aggregator.getHeader(final); }
@ -82,9 +105,13 @@ public:
AggregatingTransform(Block header, AggregatingTransformParamsPtr params_);
/// For Parallel aggregating.
AggregatingTransform(Block header, AggregatingTransformParamsPtr params_,
ManyAggregatedDataPtr many_data, size_t current_variant,
size_t max_threads, size_t temporary_data_merge_threads);
AggregatingTransform(
Block header,
AggregatingTransformParamsPtr params_,
ManyAggregatedDataPtr many_data,
size_t current_variant,
size_t max_threads,
size_t temporary_data_merge_threads);
~AggregatingTransform() override;
String getName() const override { return "AggregatingTransform"; }

View File

@ -908,6 +908,7 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
{
/// Check extra parts at different disks, in order to not allow to miss data parts at undefined disks.
std::unordered_set<String> defined_disk_names;
for (const auto & disk_ptr : disks)
defined_disk_names.insert(disk_ptr->getName());
@ -917,9 +918,10 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
{
for (const auto it = disk->iterateDirectory(relative_data_path); it->isValid(); it->next())
{
MergeTreePartInfo part_info;
if (MergeTreePartInfo::tryParsePartName(it->name(), &part_info, format_version))
throw Exception("Part " + backQuote(it->name()) + " was found on disk " + backQuote(disk_name) + " which is not defined in the storage policy", ErrorCodes::UNKNOWN_DISK);
if (MergeTreePartInfo::tryParsePartName(it->name(), format_version))
throw Exception(ErrorCodes::UNKNOWN_DISK,
"Part {} was found on disk {} which is not defined in the storage policy",
backQuote(it->name()), backQuote(disk_name));
}
}
}
@ -930,10 +932,13 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
for (auto disk_it = disks.rbegin(); disk_it != disks.rend(); ++disk_it)
{
auto disk_ptr = *disk_it;
for (auto it = disk_ptr->iterateDirectory(relative_data_path); it->isValid(); it->next())
{
/// Skip temporary directories, file 'format_version.txt' and directory 'detached'.
if (startsWith(it->name(), "tmp") || it->name() == MergeTreeData::FORMAT_VERSION_FILE_NAME || it->name() == MergeTreeData::DETACHED_DIR_NAME)
if (startsWith(it->name(), "tmp")
|| it->name() == MergeTreeData::FORMAT_VERSION_FILE_NAME
|| it->name() == MergeTreeData::DETACHED_DIR_NAME)
continue;
if (!startsWith(it->name(), MergeTreeWriteAheadLog::WAL_FILE_NAME))
@ -980,25 +985,34 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
{
pool.scheduleOrThrowOnError([&]
{
const auto & part_name = part_names_with_disk.first;
const auto part_disk_ptr = part_names_with_disk.second;
const auto & [part_name, part_disk_ptr] = part_names_with_disk;
MergeTreePartInfo part_info;
if (!MergeTreePartInfo::tryParsePartName(part_name, &part_info, format_version))
auto part_opt = MergeTreePartInfo::tryParsePartName(part_name, format_version);
if (!part_opt)
return;
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + part_name, part_disk_ptr, 0);
auto part = createPart(part_name, part_info, single_disk_volume, part_name);
auto part = createPart(part_name, *part_opt, single_disk_volume, part_name);
bool broken = false;
String part_path = fs::path(relative_data_path) / part_name;
String marker_path = fs::path(part_path) / IMergeTreeDataPart::DELETE_ON_DESTROY_MARKER_FILE_NAME;
if (part_disk_ptr->exists(marker_path))
{
LOG_WARNING(log, "Detaching stale part {}{}, which should have been deleted after a move. That can only happen after unclean restart of ClickHouse after move of a part having an operation blocking that stale copy of part.", getFullPathOnDisk(part_disk_ptr), part_name);
LOG_WARNING(log,
"Detaching stale part {}{}, which should have been deleted after a move. That can only happen "
"after unclean restart of ClickHouse after move of a part having an operation blocking that "
"stale copy of part.",
getFullPathOnDisk(part_disk_ptr), part_name);
std::lock_guard loading_lock(mutex);
broken_parts_to_detach.push_back(part);
++suspicious_broken_parts;
return;
}
@ -1026,25 +1040,34 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
/// Ignore broken parts that can appear as a result of hard server restart.
if (broken)
{
LOG_ERROR(log, "Detaching broken part {}{}. If it happened after update, it is likely because of backward incompability. You need to resolve this manually", getFullPathOnDisk(part_disk_ptr), part_name);
LOG_ERROR(log,
"Detaching broken part {}{}. If it happened after update, it is likely because of backward "
"incompatibility. You need to resolve this manually",
getFullPathOnDisk(part_disk_ptr), part_name);
std::lock_guard loading_lock(mutex);
broken_parts_to_detach.push_back(part);
++suspicious_broken_parts;
return;
}
if (!part->index_granularity_info.is_adaptive)
has_non_adaptive_parts.store(true, std::memory_order_relaxed);
else
has_adaptive_parts.store(true, std::memory_order_relaxed);
part->modification_time = part_disk_ptr->getLastModified(fs::path(relative_data_path) / part_name).epochTime();
/// Assume that all parts are Committed, covered parts will be detected and marked as Outdated later
part->setState(DataPartState::Committed);
std::lock_guard loading_lock(mutex);
if (!data_parts_indexes.insert(part).second)
throw Exception("Part " + part->name + " already exists", ErrorCodes::DUPLICATE_DATA_PART);
throw Exception(ErrorCodes::DUPLICATE_DATA_PART, "Part {} already exists", part->name);
addPartContributionToDataVolume(part);
});
@ -3328,11 +3351,12 @@ RestoreDataTasks MergeTreeData::restoreDataPartsFromBackup(const BackupPtr & bac
Strings part_names = backup->list(data_path_in_backup);
for (const String & part_name : part_names)
{
MergeTreePartInfo part_info;
if (!MergeTreePartInfo::tryParsePartName(part_name, &part_info, format_version))
const auto part_info = MergeTreePartInfo::tryParsePartName(part_name, format_version);
if (!part_info)
continue;
if (!partition_ids.empty() && !partition_ids.contains(part_info.partition_id))
if (!partition_ids.empty() && !partition_ids.contains(part_info->partition_id))
continue;
UInt64 total_size_of_part = 0;
@ -3369,7 +3393,7 @@ RestoreDataTasks MergeTreeData::restoreDataPartsFromBackup(const BackupPtr & bac
}
auto single_disk_volume = std::make_shared<SingleDiskVolume>(disk->getName(), disk, 0);
auto part = createPart(part_name, part_info, single_disk_volume, relative_temp_part_dir);
auto part = createPart(part_name, *part_info, single_disk_volume, relative_temp_part_dir);
part->loadColumnsChecksumsIndexes(false, true);
renameTempPartAndAdd(part, increment);
};
@ -3581,11 +3605,10 @@ std::vector<DetachedPartInfo> MergeTreeData::getDetachedParts() const
{
for (auto it = disk->iterateDirectory(detached_path); it->isValid(); it->next())
{
res.emplace_back();
auto & part = res.back();
DetachedPartInfo::tryParseDetachedPartName(it->name(), part, format_version);
auto part = DetachedPartInfo::parseDetachedPartName(it->name(), format_version);
part.disk = disk->getName();
res.push_back(std::move(part));
}
}
}
@ -3656,7 +3679,7 @@ MergeTreeData::MutableDataPartsVector MergeTreeData::tryLoadPartsToAttach(const
validateDetachedPartName(part_id);
renamed_parts.addPart(part_id, "attaching_" + part_id);
if (MergeTreePartInfo::tryParsePartName(part_id, nullptr, format_version))
if (MergeTreePartInfo::tryParsePartName(part_id, format_version))
name_to_disk[part_id] = getDiskForPart(part_id, source_dir);
}
else
@ -3672,12 +3695,11 @@ MergeTreeData::MutableDataPartsVector MergeTreeData::tryLoadPartsToAttach(const
for (auto it = disk->iterateDirectory(relative_data_path + source_dir); it->isValid(); it->next())
{
const String & name = it->name();
MergeTreePartInfo part_info;
// TODO what if name contains "_tryN" suffix?
/// Parts with prefix in name (e.g. attaching_1_3_3_0, deleting_1_3_3_0) will be ignored
if (!MergeTreePartInfo::tryParsePartName(name, &part_info, format_version)
|| part_info.partition_id != partition_id)
if (auto part_opt = MergeTreePartInfo::tryParsePartName(name, format_version);
!part_opt || part_opt->partition_id != partition_id)
{
continue;
}

View File

@ -267,6 +267,8 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read(
auto many_data = std::make_shared<ManyAggregatedData>(projection_pipe.numOutputPorts() + ordinary_pipe.numOutputPorts());
size_t counter = 0;
AggregatorListPtr aggregator_list_ptr = std::make_shared<AggregatorList>();
// TODO apply in_order_optimization here
auto build_aggregate_pipe = [&](Pipe & pipe, bool projection)
{
@ -306,7 +308,8 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read(
settings.min_count_to_compile_aggregate_expression,
header_before_aggregation); // The source header is also an intermediate header
transform_params = std::make_shared<AggregatingTransformParams>(std::move(params), query_info.projection->aggregate_final);
transform_params = std::make_shared<AggregatingTransformParams>(
std::move(params), aggregator_list_ptr, query_info.projection->aggregate_final);
/// This part is hacky.
/// We want AggregatingTransform to work with aggregate states instead of normal columns.
@ -336,7 +339,8 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read(
settings.compile_aggregate_expressions,
settings.min_count_to_compile_aggregate_expression);
transform_params = std::make_shared<AggregatingTransformParams>(std::move(params), query_info.projection->aggregate_final);
transform_params = std::make_shared<AggregatingTransformParams>(
std::move(params), aggregator_list_ptr, query_info.projection->aggregate_final);
}
pipe.resize(pipe.numOutputPorts(), true, true);

View File

@ -15,13 +15,12 @@ namespace ErrorCodes
MergeTreePartInfo MergeTreePartInfo::fromPartName(const String & part_name, MergeTreeDataFormatVersion format_version)
{
MergeTreePartInfo part_info;
if (!tryParsePartName(part_name, &part_info, format_version))
throw Exception("Unexpected part name: " + part_name, ErrorCodes::BAD_DATA_PART_NAME);
return part_info;
if (auto part_opt = tryParsePartName(part_name, format_version))
return *part_opt;
else
throw Exception(ErrorCodes::BAD_DATA_PART_NAME, "Unexpected part name: {}", part_name);
}
void MergeTreePartInfo::validatePartitionID(const String & partition_id, MergeTreeDataFormatVersion format_version)
{
if (partition_id.empty())
@ -43,22 +42,26 @@ void MergeTreePartInfo::validatePartitionID(const String & partition_id, MergeTr
}
bool MergeTreePartInfo::tryParsePartName(const String & part_name, MergeTreePartInfo * part_info, MergeTreeDataFormatVersion format_version)
std::optional<MergeTreePartInfo> MergeTreePartInfo::tryParsePartName(
std::string_view part_name, MergeTreeDataFormatVersion format_version)
{
ReadBufferFromString in(part_name);
String partition_id;
if (format_version < MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING)
{
UInt32 min_yyyymmdd = 0;
UInt32 max_yyyymmdd = 0;
if (!tryReadIntText(min_yyyymmdd, in)
|| !checkChar('_', in)
|| !tryReadIntText(max_yyyymmdd, in)
|| !checkChar('_', in))
{
return false;
return std::nullopt;
}
partition_id = toString(min_yyyymmdd / 100);
}
else
@ -76,9 +79,7 @@ bool MergeTreePartInfo::tryParsePartName(const String & part_name, MergeTreePart
/// Sanity check
if (partition_id.empty())
{
return false;
}
return std::nullopt;
Int64 min_block_num = 0;
Int64 max_block_num = 0;
@ -91,14 +92,12 @@ bool MergeTreePartInfo::tryParsePartName(const String & part_name, MergeTreePart
|| !checkChar('_', in)
|| !tryReadIntText(level, in))
{
return false;
return std::nullopt;
}
/// Sanity check
if (min_block_num > max_block_num)
{
return false;
}
return std::nullopt;
if (!in.eof())
{
@ -106,29 +105,30 @@ bool MergeTreePartInfo::tryParsePartName(const String & part_name, MergeTreePart
|| !tryReadIntText(mutation, in)
|| !in.eof())
{
return false;
return std::nullopt;
}
}
if (part_info)
MergeTreePartInfo part_info;
part_info.partition_id = std::move(partition_id);
part_info.min_block = min_block_num;
part_info.max_block = max_block_num;
if (level == LEGACY_MAX_LEVEL)
{
part_info->partition_id = std::move(partition_id);
part_info->min_block = min_block_num;
part_info->max_block = max_block_num;
if (level == LEGACY_MAX_LEVEL)
{
/// We (accidentally) had two different max levels until 21.6 and it might cause logical errors like
/// "Part 20170601_20170630_0_2_999999999 intersects 201706_0_1_4294967295".
/// So we replace unexpected max level to make contains(...) method and comparison operators work
/// correctly with such virtual parts. On part name serialization we will use legacy max level to keep the name unchanged.
part_info->use_leagcy_max_level = true;
level = MAX_LEVEL;
}
part_info->level = level;
part_info->mutation = mutation;
/// We (accidentally) had two different max levels until 21.6 and it might cause logical errors like
/// "Part 20170601_20170630_0_2_999999999 intersects 201706_0_1_4294967295".
/// So we replace unexpected max level to make contains(...) method and comparison operators work
/// correctly with such virtual parts. On part name serialization we will use legacy max level to keep the name unchanged.
part_info.use_leagcy_max_level = true;
level = MAX_LEVEL;
}
return true;
part_info.level = level;
part_info.mutation = mutation;
return part_info;
}
@ -235,55 +235,75 @@ String MergeTreePartInfo::getPartNameV0(DayNum left_date, DayNum right_date) con
return wb.str();
}
const std::vector<String> DetachedPartInfo::DETACH_REASONS =
{
"broken",
"unexpected",
"noquorum",
"ignored",
"broken-on-start",
"clone",
"attaching",
"deleting",
"tmp-fetch",
};
bool DetachedPartInfo::tryParseDetachedPartName(const String & dir_name, DetachedPartInfo & part_info,
MergeTreeDataFormatVersion format_version)
DetachedPartInfo DetachedPartInfo::parseDetachedPartName(
std::string_view dir_name, MergeTreeDataFormatVersion format_version)
{
DetachedPartInfo part_info;
part_info.dir_name = dir_name;
/// First, try to find known prefix and parse dir_name as <prefix>_<partname>.
/// First, try to find known prefix and parse dir_name as <prefix>_<part_name>.
/// Arbitrary strings are not allowed for partition_id, so known_prefix cannot be confused with partition_id.
for (const auto & known_prefix : DETACH_REASONS)
for (std::string_view known_prefix : DETACH_REASONS)
{
if (dir_name.starts_with(known_prefix) && known_prefix.size() < dir_name.size() && dir_name[known_prefix.size()] == '_')
if (dir_name.starts_with(known_prefix)
&& known_prefix.size() < dir_name.size()
&& dir_name[known_prefix.size()] == '_')
{
part_info.prefix = known_prefix;
String part_name = dir_name.substr(known_prefix.size() + 1);
bool parsed = MergeTreePartInfo::tryParsePartName(part_name, &part_info, format_version);
return part_info.valid_name = parsed;
const std::string_view part_name = dir_name.substr(known_prefix.size() + 1);
if (auto part_opt = MergeTreePartInfo::tryParsePartName(part_name, format_version))
{
part_info.valid_name = true;
part_info.addParsedPartInfo(*part_opt);
}
else
part_info.valid_name = false;
return part_info;
}
}
/// Next, try to parse dir_name as <part_name>.
if (MergeTreePartInfo::tryParsePartName(dir_name, &part_info, format_version))
return part_info.valid_name = true;
if (auto part_opt = MergeTreePartInfo::tryParsePartName(dir_name, format_version))
{
part_info.valid_name = true;
part_info.addParsedPartInfo(*part_opt);
return part_info;
}
/// Next, as <prefix>_<partname>. Use entire name as prefix if it fails.
part_info.prefix = dir_name;
const auto first_separator = dir_name.find_first_of('_');
const size_t first_separator = dir_name.find_first_of('_');
if (first_separator == String::npos)
return part_info.valid_name = false;
{
part_info.valid_name = false;
return part_info;
}
const auto part_name = dir_name.substr(first_separator + 1,
dir_name.size() - first_separator - 1);
if (!MergeTreePartInfo::tryParsePartName(part_name, &part_info, format_version))
return part_info.valid_name = false;
const std::string_view part_name = dir_name.substr(
first_separator + 1,
dir_name.size() - first_separator - 1);
part_info.prefix = dir_name.substr(0, first_separator);
return part_info.valid_name = true;
if (auto part_opt = MergeTreePartInfo::tryParsePartName(part_name, format_version))
{
part_info.valid_name = true;
part_info.prefix = dir_name.substr(0, first_separator);
part_info.addParsedPartInfo(*part_opt);
}
else
part_info.valid_name = false;
return part_info;
}
void DetachedPartInfo::addParsedPartInfo(const MergeTreePartInfo& part)
{
// Both class are aggregates so it's ok.
static_cast<MergeTreePartInfo &>(*this) = part;
}
}

View File

@ -1,8 +1,10 @@
#pragma once
#include <limits>
#include <optional>
#include <tuple>
#include <vector>
#include <array>
#include <common/types.h>
#include <common/DayNum.h>
#include <Storages/MergeTree/MergeTreeDataFormatVersion.h>
@ -98,7 +100,8 @@ struct MergeTreePartInfo
static MergeTreePartInfo fromPartName(const String & part_name, MergeTreeDataFormatVersion format_version); // -V1071
static bool tryParsePartName(const String & part_name, MergeTreePartInfo * part_info, MergeTreeDataFormatVersion format_version);
static std::optional<MergeTreePartInfo> tryParsePartName(
std::string_view part_name, MergeTreeDataFormatVersion format_version);
static void parseMinMaxDatesFromPartName(const String & part_name, DayNum & min_date, DayNum & max_date);
@ -122,11 +125,27 @@ struct DetachedPartInfo : public MergeTreePartInfo
/// If false, MergeTreePartInfo is in invalid state (directory name was not successfully parsed).
bool valid_name;
static const std::vector<String> DETACH_REASONS;
static constexpr auto DETACH_REASONS = std::to_array<std::string_view>({
"broken",
"unexpected",
"noquorum",
"ignored",
"broken-on-start",
"clone",
"attaching",
"deleting",
"tmp-fetch"
});
/// NOTE: It may parse part info incorrectly.
/// For example, if prefix contain '_' or if DETACH_REASONS doesn't contain prefix.
static bool tryParseDetachedPartName(const String & dir_name, DetachedPartInfo & part_info, MergeTreeDataFormatVersion format_version);
/// For example, if prefix contains '_' or if DETACH_REASONS doesn't contain prefix.
// This method has different semantics with MergeTreePartInfo::tryParsePartName.
// Detached parts are always parsed regardless of their validity.
// DetachedPartInfo::valid_name field specifies whether parsing was successful or not.
static DetachedPartInfo parseDetachedPartName(std::string_view dir_name, MergeTreeDataFormatVersion format_version);
private:
void addParsedPartInfo(const MergeTreePartInfo& part);
};
using DetachedPartsInfo = std::vector<DetachedPartInfo>;

View File

@ -193,10 +193,6 @@ void StorageDictionary::startup()
void StorageDictionary::removeDictionaryConfigurationFromRepository()
{
if (remove_repository_callback_executed)
return;
remove_repository_callback_executed = true;
remove_repository_callback.reset();
}

View File

@ -73,7 +73,6 @@ private:
Poco::Timestamp update_time;
LoadablesConfigurationPtr configuration;
std::atomic<bool> remove_repository_callback_executed = false;
scope_guard remove_repository_callback;
void removeDictionaryConfigurationFromRepository();

View File

@ -183,7 +183,7 @@ Pipe StorageExecutable::read(
configuration.read_number_of_rows_from_process_output = true;
}
Pipe pipe(std::make_unique<ShellCommandSource>(context, format, std::move(sample_block), std::move(process), log, std::move(tasks), configuration, process_pool));
Pipe pipe(std::make_unique<ShellCommandSource>(context, format, std::move(sample_block), std::move(process), std::move(tasks), configuration, process_pool));
return pipe;
}

Some files were not shown because too many files have changed in this diff Show More