Merge branch 'master' of github.com:ClickHouse/ClickHouse into rabbit-fix

This commit is contained in:
kssenii 2021-09-12 13:56:09 +03:00
commit 4e5958d721
184 changed files with 1373 additions and 582 deletions

View File

@ -80,16 +80,16 @@ include (cmake/find/ccache.cmake)
# ccache ignore it.
option(ENABLE_CHECK_HEAVY_BUILDS "Don't allow C++ translation units to compile too long or to take too much memory while compiling." OFF)
if (ENABLE_CHECK_HEAVY_BUILDS)
# set DATA (since RSS does not work since 2.6.x+) to 2G
# set DATA (since RSS does not work since 2.6.x+) to 5G
set (RLIMIT_DATA 5000000000)
# set VIRT (RLIMIT_AS) to 10G (DATA*10)
set (RLIMIT_AS 10000000000)
# set CPU time limit to 600 seconds
set (RLIMIT_CPU 600)
# set CPU time limit to 1000 seconds
set (RLIMIT_CPU 1000)
# gcc10/gcc10/clang -fsanitize=memory is too heavy
if (SANITIZE STREQUAL "memory" OR COMPILER_GCC)
set (RLIMIT_DATA 10000000000)
set (RLIMIT_DATA 10000000000) # 10G
endif()
set (CMAKE_CXX_COMPILER_LAUNCHER prlimit --as=${RLIMIT_AS} --data=${RLIMIT_DATA} --cpu=${RLIMIT_CPU} ${CMAKE_CXX_COMPILER_LAUNCHER})

View File

@ -189,7 +189,7 @@ public:
~Pool();
/// Allocates connection.
Entry get(uint64_t wait_timeout);
Entry get(uint64_t wait_timeout = UINT64_MAX);
/// Allocates connection.
/// If database is not accessible, returns empty Entry object.

View File

@ -1,8 +1,10 @@
if (APPLE OR SPLIT_SHARED_LIBRARIES OR NOT ARCH_AMD64 OR SANITIZE STREQUAL "undefined")
set (ENABLE_EMBEDDED_COMPILER OFF CACHE INTERNAL "")
if (APPLE OR NOT ARCH_AMD64 OR SANITIZE STREQUAL "undefined")
set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF)
else()
set (ENABLE_EMBEDDED_COMPILER_DEFAULT ON)
endif()
option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ON)
option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT})
if (NOT ENABLE_EMBEDDED_COMPILER)
set (USE_EMBEDDED_COMPILER 0)

View File

@ -206,12 +206,14 @@ elseif(GTEST_SRC_DIR)
target_compile_definitions(gtest INTERFACE GTEST_HAS_POSIX_RE=0)
endif()
if (USE_EMBEDDED_COMPILER)
function(add_llvm)
# ld: unknown option: --color-diagnostics
if (APPLE)
set (LINKER_SUPPORTS_COLOR_DIAGNOSTICS 0 CACHE INTERNAL "")
endif ()
# Do not adjust RPATH in llvm, since then it will not be able to find libcxx/libcxxabi/libunwind
set (CMAKE_INSTALL_RPATH "ON")
set (LLVM_ENABLE_EH 1 CACHE INTERNAL "")
set (LLVM_ENABLE_RTTI 1 CACHE INTERNAL "")
set (LLVM_ENABLE_PIC 0 CACHE INTERNAL "")
@ -219,13 +221,12 @@ if (USE_EMBEDDED_COMPILER)
# Need to use C++17 since the compilation is not possible with C++20 currently, due to ambiguous operator != etc.
# LLVM project will set its default value for the -std=... but our global setting from CMake will override it.
set (CMAKE_CXX_STANDARD_bak ${CMAKE_CXX_STANDARD})
set (CMAKE_CXX_STANDARD 17)
add_subdirectory (llvm/llvm)
set (CMAKE_CXX_STANDARD ${CMAKE_CXX_STANDARD_bak})
unset (CMAKE_CXX_STANDARD_bak)
endfunction()
if (USE_EMBEDDED_COMPILER)
add_llvm()
endif ()
if (USE_INTERNAL_LIBGSASL_LIBRARY)

View File

@ -3,10 +3,17 @@
# Provides: clickhouse-server
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Required-Start: $network
# Required-Stop: $network
# Should-Start: $time $network
# Should-Stop: $network
# Short-Description: Yandex clickhouse-server daemon
### END INIT INFO
#
# NOTES:
# - Should-* -- script can start if the listed facilities are missing, unlike Required-*
#
# For the documentation [1]:
#
# [1]: https://wiki.debian.org/LSBInitScripts
CLICKHOUSE_USER=clickhouse
CLICKHOUSE_GROUP=${CLICKHOUSE_USER}

View File

@ -1,7 +1,12 @@
[Unit]
Description=ClickHouse Server (analytic DBMS for big data)
Requires=network-online.target
After=network-online.target
# NOTE: that After/Wants=time-sync.target is not enough, you need to ensure
# that the time was adjusted already, if you use systemd-timesyncd you are
# safe, but if you use ntp or some other daemon, you should configure it
# additionaly.
After=time-sync.target network-online.target
Wants=time-sync.target
[Service]
Type=simple
@ -16,4 +21,5 @@ LimitNOFILE=500000
CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE
[Install]
# ClickHouse should not start from the rescue shell (rescue.target).
WantedBy=multi-user.target

View File

@ -1,12 +1,12 @@
{
"docker/packager/deb": {
"name": "yandex/clickhouse-deb-builder",
"name": "clickhouse/deb-builder",
"dependent": [
"docker/packager/unbundled"
]
},
"docker/packager/binary": {
"name": "yandex/clickhouse-binary-builder",
"name": "clickhouse/binary-builder",
"dependent": [
"docker/test/split_build_smoke_test",
"docker/test/pvs",
@ -14,155 +14,150 @@
]
},
"docker/packager/unbundled": {
"name": "yandex/clickhouse-unbundled-builder",
"name": "clickhouse/unbundled-builder",
"dependent": []
},
"docker/test/compatibility/centos": {
"name": "yandex/clickhouse-test-old-centos",
"name": "clickhouse/test-old-centos",
"dependent": []
},
"docker/test/compatibility/ubuntu": {
"name": "yandex/clickhouse-test-old-ubuntu",
"name": "clickhouse/test-old-ubuntu",
"dependent": []
},
"docker/test/integration/base": {
"name": "yandex/clickhouse-integration-test",
"name": "clickhouse/integration-test",
"dependent": []
},
"docker/test/fuzzer": {
"name": "yandex/clickhouse-fuzzer",
"name": "clickhouse/fuzzer",
"dependent": []
},
"docker/test/performance-comparison": {
"name": "yandex/clickhouse-performance-comparison",
"name": "clickhouse/performance-comparison",
"dependent": []
},
"docker/test/pvs": {
"name": "yandex/clickhouse-pvs-test",
"name": "clickhouse/pvs-test",
"dependent": []
},
"docker/test/stateless": {
"name": "yandex/clickhouse-stateless-test",
"name": "clickhouse/stateless-test",
"dependent": [
"docker/test/stateful",
"docker/test/coverage",
"docker/test/unit"
]
},
"docker/test/stateless_pytest": {
"name": "yandex/clickhouse-stateless-pytest",
"dependent": []
},
"docker/test/stateful": {
"name": "yandex/clickhouse-stateful-test",
"name": "clickhouse/stateful-test",
"dependent": [
"docker/test/stress"
]
},
"docker/test/coverage": {
"name": "yandex/clickhouse-test-coverage",
"name": "clickhouse/test-coverage",
"dependent": []
},
"docker/test/unit": {
"name": "yandex/clickhouse-unit-test",
"name": "clickhouse/unit-test",
"dependent": []
},
"docker/test/stress": {
"name": "yandex/clickhouse-stress-test",
"name": "clickhouse/stress-test",
"dependent": []
},
"docker/test/split_build_smoke_test": {
"name": "yandex/clickhouse-split-build-smoke-test",
"name": "clickhouse/split-build-smoke-test",
"dependent": []
},
"docker/test/codebrowser": {
"name": "yandex/clickhouse-codebrowser",
"name": "clickhouse/codebrowser",
"dependent": []
},
"docker/test/integration/runner": {
"name": "yandex/clickhouse-integration-tests-runner",
"name": "clickhouse/integration-tests-runner",
"dependent": []
},
"docker/test/testflows/runner": {
"name": "yandex/clickhouse-testflows-runner",
"name": "clickhouse/testflows-runner",
"dependent": []
},
"docker/test/fasttest": {
"name": "yandex/clickhouse-fasttest",
"name": "clickhouse/fasttest",
"dependent": []
},
"docker/test/style": {
"name": "yandex/clickhouse-style-test",
"name": "clickhouse/style-test",
"dependent": []
},
"docker/test/integration/s3_proxy": {
"name": "yandex/clickhouse-s3-proxy",
"name": "clickhouse/s3-proxy",
"dependent": []
},
"docker/test/integration/resolver": {
"name": "yandex/clickhouse-python-bottle",
"name": "clickhouse/python-bottle",
"dependent": []
},
"docker/test/integration/helper_container": {
"name": "yandex/clickhouse-integration-helper",
"name": "clickhouse/integration-helper",
"dependent": []
},
"docker/test/integration/mysql_golang_client": {
"name": "yandex/clickhouse-mysql-golang-client",
"name": "clickhouse/mysql-golang-client",
"dependent": []
},
"docker/test/integration/mysql_java_client": {
"name": "yandex/clickhouse-mysql-java-client",
"name": "clickhouse/mysql-java-client",
"dependent": []
},
"docker/test/integration/mysql_js_client": {
"name": "yandex/clickhouse-mysql-js-client",
"name": "clickhouse/mysql-js-client",
"dependent": []
},
"docker/test/integration/mysql_php_client": {
"name": "yandex/clickhouse-mysql-php-client",
"name": "clickhouse/mysql-php-client",
"dependent": []
},
"docker/test/integration/postgresql_java_client": {
"name": "yandex/clickhouse-postgresql-java-client",
"name": "clickhouse/postgresql-java-client",
"dependent": []
},
"docker/test/integration/kerberos_kdc": {
"name": "yandex/clickhouse-kerberos-kdc",
"name": "clickhouse/kerberos-kdc",
"dependent": []
},
"docker/test/base": {
"name": "yandex/clickhouse-test-base",
"name": "clickhouse/test-base",
"dependent": [
"docker/test/stateless",
"docker/test/stateless_unbundled",
"docker/test/stateless_pytest",
"docker/test/integration/base",
"docker/test/fuzzer",
"docker/test/keeper-jepsen"
]
},
"docker/packager/unbundled": {
"name": "yandex/clickhouse-unbundled-builder",
"name": "clickhouse/unbundled-builder",
"dependent": [
"docker/test/stateless_unbundled"
]
},
"docker/test/stateless_unbundled": {
"name": "yandex/clickhouse-stateless-unbundled-test",
"name": "clickhouse/stateless-unbundled-test",
"dependent": [
]
},
"docker/test/integration/kerberized_hadoop": {
"name": "yandex/clickhouse-kerberized-hadoop",
"name": "clickhouse/kerberized-hadoop",
"dependent": []
},
"docker/test/sqlancer": {
"name": "yandex/clickhouse-sqlancer-test",
"name": "clickhouse/sqlancer-test",
"dependent": []
},
"docker/test/keeper-jepsen": {
"name": "yandex/clickhouse-keeper-jepsen-test",
"name": "clickhouse/keeper-jepsen-test",
"dependent": []
}
}

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-binary-builder .
# docker build -t clickhouse/binary-builder .
FROM ubuntu:20.04
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-deb-builder .
# docker build -t clickhouse/deb-builder .
FROM ubuntu:20.04
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12

View File

@ -9,9 +9,9 @@ import sys
SCRIPT_PATH = os.path.realpath(__file__)
IMAGE_MAP = {
"deb": "yandex/clickhouse-deb-builder",
"binary": "yandex/clickhouse-binary-builder",
"unbundled": "yandex/clickhouse-unbundled-builder"
"deb": "clickhouse/deb-builder",
"binary": "clickhouse/binary-builder",
"unbundled": "clickhouse/unbundled-builder"
}
def check_image_exists_locally(image_name):

View File

@ -1,5 +1,5 @@
# docker build -t yandex/clickhouse-unbundled-builder .
FROM yandex/clickhouse-deb-builder
# docker build -t clickhouse/unbundled-builder .
FROM clickhouse/deb-builder
RUN export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
&& wget -nv -O /tmp/arrow-keyring.deb "https://apache.jfrog.io/artifactory/arrow/ubuntu/apache-arrow-apt-source-latest-${CODENAME}.deb" \

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-test-base .
# docker build -t clickhouse/test-base .
FROM ubuntu:20.04
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12

View File

@ -1,6 +1,6 @@
# docker build --network=host -t yandex/clickhouse-codebrowser .
# docker run --volume=path_to_repo:/repo_folder --volume=path_to_result:/test_output yandex/clickhouse-codebrowser
FROM yandex/clickhouse-binary-builder
# docker build --network=host -t clickhouse/codebrowser .
# docker run --volume=path_to_repo:/repo_folder --volume=path_to_result:/test_output clickhouse/codebrowser
FROM clickhouse/binary-builder
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-test-old-centos .
# docker build -t clickhouse/test-old-centos .
FROM centos:5
CMD /bin/sh -c "/clickhouse server --config /config/config.xml > /var/log/clickhouse-server/stderr.log 2>&1 & \

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-test-old-ubuntu .
# docker build -t clickhouse/test-old-ubuntu .
FROM ubuntu:12.04
CMD /bin/sh -c "/clickhouse server --config /config/config.xml > /var/log/clickhouse-server/stderr.log 2>&1 & \

View File

@ -1,5 +1,5 @@
# docker build -t yandex/clickhouse-test-coverage .
FROM yandex/clickhouse-stateless-test
# docker build -t clickhouse/test-coverage .
FROM clickhouse/stateless-test
RUN apt-get update -y \
&& env DEBIAN_FRONTEND=noninteractive \

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-fasttest .
# docker build -t clickhouse/fasttest .
FROM ubuntu:20.04
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12

View File

@ -1,5 +1,5 @@
# docker build -t yandex/clickhouse-fuzzer .
FROM yandex/clickhouse-test-base
# docker build -t clickhouse/fuzzer .
FROM clickhouse/test-base
ENV LANG=C.UTF-8
ENV TZ=Europe/Moscow
@ -36,5 +36,5 @@ CMD set -o pipefail \
&& cd /workspace \
&& /run-fuzzer.sh 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee main.log
# docker run --network=host --volume <workspace>:/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> yandex/clickhouse-fuzzer
# docker run --network=host --volume <workspace>:/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/fuzzer

View File

@ -1,5 +1,5 @@
# docker build -t yandex/clickhouse-integration-test .
FROM yandex/clickhouse-test-base
# docker build -t clickhouse/integration-test .
FROM clickhouse/test-base
SHELL ["/bin/bash", "-c"]

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-integration-helper .
# docker build -t clickhouse/integration-helper .
# Helper docker container to run iptables without sudo
FROM alpine

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-kerberized-hadoop .
# docker build -t clickhouse/kerberized-hadoop .
FROM sequenceiq/hadoop-docker:2.7.0
RUN sed -i -e 's/^\#baseurl/baseurl/' /etc/yum.repos.d/CentOS-Base.repo

View File

@ -1,9 +1,9 @@
# docker build -t yandex/clickhouse-kerberos-kdc .
# docker build -t clickhouse/kerberos-kdc .
FROM centos:6
FROM centos:6.6
# old OS to make is faster and smaller
RUN sed -i '/^mirrorlist/s/^/#/;/^#baseurl/{s/#//;s/mirror.centos.org\/centos\/$releasever/vault.centos.org\/6.10/}' /etc/yum.repos.d/*B*
RUN yum install -y krb5-server krb5-libs krb5-auth-dialog krb5-workstation
RUN yum install -y ca-certificates krb5-server krb5-libs krb5-auth-dialog krb5-workstation
EXPOSE 88 749

View File

@ -1,7 +1,7 @@
# docker build -t yandex/clickhouse-mysql-golang-client .
# docker build -t clickhouse/mysql-golang-client .
# MySQL golang client docker container
FROM golang:1.12.2
FROM golang:1.13
RUN go get "github.com/go-sql-driver/mysql"

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-mysql-java-client .
# docker build -t clickhouse/mysql-java-client .
# MySQL Java client docker container
FROM ubuntu:18.04

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-mysql-js-client .
# docker build -t clickhouse/mysql-js-client .
# MySQL JavaScript client docker container
FROM node:8

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-mysql-php-client .
# docker build -t clickhouse/mysql-php-client .
# MySQL PHP client docker container
FROM php:7.3-cli

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-postgresql-java-client .
# docker build -t clickhouse/postgresql-java-client .
# PostgreSQL Java client docker container
FROM ubuntu:18.04

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-python-bottle .
# docker build -t clickhouse/python-bottle .
# Helper docker container to run python bottle apps
FROM python:3

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-integration-tests-runner .
# docker build -t clickhouse/integration-tests-runner .
FROM ubuntu:20.04
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list

View File

@ -1,7 +1,7 @@
version: '2.3'
services:
bridge1:
image: yandex/clickhouse-jdbc-bridge
image: clickhouse/jdbc-bridge
command: |
/bin/bash -c 'cat << EOF > config/datasources/self.json
{

View File

@ -1,7 +1,7 @@
version: '2.3'
services:
zoo1:
image: ${image:-yandex/clickhouse-integration-test}
image: ${image:-clickhouse/integration-test}
restart: always
user: ${user:-}
volumes:
@ -31,7 +31,7 @@ services:
- inet6
- rotate
zoo2:
image: ${image:-yandex/clickhouse-integration-test}
image: ${image:-clickhouse/integration-test}
restart: always
user: ${user:-}
volumes:
@ -61,7 +61,7 @@ services:
- inet6
- rotate
zoo3:
image: ${image:-yandex/clickhouse-integration-test}
image: ${image:-clickhouse/integration-test}
restart: always
user: ${user:-}
volumes:

View File

@ -4,7 +4,7 @@ services:
kerberizedhdfs1:
cap_add:
- DAC_READ_SEARCH
image: yandex/clickhouse-kerberized-hadoop:16621
image: clickhouse/kerberized-hadoop
hostname: kerberizedhdfs1
restart: always
volumes:
@ -22,7 +22,7 @@ services:
entrypoint: /etc/bootstrap.sh -d
hdfskerberos:
image: yandex/clickhouse-kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest}
image: clickhouse/kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest}
hostname: hdfskerberos
volumes:
- ${KERBERIZED_HDFS_DIR}/secrets:/tmp/keytab

View File

@ -50,7 +50,7 @@ services:
- label:disable
kafka_kerberos:
image: yandex/clickhouse-kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest}
image: clickhouse/kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest}
hostname: kafka_kerberos
volumes:
- ${KERBERIZED_KAFKA_DIR}/secrets:/tmp/keytab

View File

@ -19,14 +19,14 @@ services:
# HTTP proxies for Minio.
proxy1:
image: yandex/clickhouse-s3-proxy
image: clickhouse/s3-proxy
expose:
- "8080" # Redirect proxy port
- "80" # Reverse proxy port
- "443" # Reverse proxy port (secure)
proxy2:
image: yandex/clickhouse-s3-proxy
image: clickhouse/s3-proxy
expose:
- "8080"
- "80"
@ -34,7 +34,7 @@ services:
# Empty container to run proxy resolver.
resolver:
image: yandex/clickhouse-python-bottle
image: clickhouse/python-bottle
expose:
- "8080"
tty: true

View File

@ -1,6 +1,6 @@
version: '2.3'
services:
golang1:
image: yandex/clickhouse-mysql-golang-client:${DOCKER_MYSQL_GOLANG_CLIENT_TAG:-latest}
image: clickhouse/mysql-golang-client:${DOCKER_MYSQL_GOLANG_CLIENT_TAG:-latest}
# to keep container running
command: sleep infinity

View File

@ -1,6 +1,6 @@
version: '2.3'
services:
java1:
image: yandex/clickhouse-mysql-java-client:${DOCKER_MYSQL_JAVA_CLIENT_TAG:-latest}
image: clickhouse/mysql-java-client:${DOCKER_MYSQL_JAVA_CLIENT_TAG:-latest}
# to keep container running
command: sleep infinity

View File

@ -1,6 +1,6 @@
version: '2.3'
services:
mysqljs1:
image: yandex/clickhouse-mysql-js-client:${DOCKER_MYSQL_JS_CLIENT_TAG:-latest}
image: clickhouse/mysql-js-client:${DOCKER_MYSQL_JS_CLIENT_TAG:-latest}
# to keep container running
command: sleep infinity

View File

@ -1,6 +1,6 @@
version: '2.3'
services:
php1:
image: yandex/clickhouse-mysql-php-client:${DOCKER_MYSQL_PHP_CLIENT_TAG:-latest}
image: clickhouse/mysql-php-client:${DOCKER_MYSQL_PHP_CLIENT_TAG:-latest}
# to keep container running
command: sleep infinity

View File

@ -1,6 +1,6 @@
version: '2.2'
services:
java:
image: yandex/clickhouse-postgresql-java-client:${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG:-latest}
image: clickhouse/postgresql-java-client:${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG:-latest}
# to keep container running
command: sleep infinity

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-s3-proxy .
# docker build -t clickhouse/s3-proxy .
FROM nginx:alpine
COPY run.sh /run.sh

View File

@ -1,5 +1,5 @@
# docker build -t yandex/clickhouse-keeper-jepsen-test .
FROM yandex/clickhouse-test-base
# docker build -t clickhouse/keeper-jepsen-test .
FROM clickhouse/test-base
ENV DEBIAN_FRONTEND=noninteractive
ENV CLOJURE_VERSION=1.10.3.814

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-performance-comparison .
# docker build -t clickhouse/performance-comparison .
FROM ubuntu:18.04
ENV LANG=C.UTF-8
@ -54,4 +54,4 @@ COPY * /
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
CMD ["bash", "-c", "node=$((RANDOM % $(numactl --hardware | sed -n 's/^.*available:\\(.*\\)nodes.*$/\\1/p'))); echo Will bind to NUMA node $node; numactl --cpunodebind=$node --membind=$node /entrypoint.sh"]
# docker run --network=host --volume <workspace>:/workspace --volume=<output>:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> yandex/clickhouse-performance-comparison
# docker run --network=host --volume <workspace>:/workspace --volume=<output>:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/performance-comparison

View File

@ -116,7 +116,7 @@ pull requests (0 for master) manually.
docker run --network=host --volume=$(pwd)/workspace:/workspace --volume=$(pwd)/output:/output
[-e REF_PR={} -e REF_SHA={}]
-e PR_TO_TEST={} -e SHA_TO_TEST={}
yandex/clickhouse-performance-comparison
clickhouse/performance-comparison
```
Then see the `report.html` in the `output` directory.

View File

@ -1,6 +1,6 @@
# docker build -t yandex/clickhouse-pvs-test .
# docker build -t clickhouse/pvs-test .
FROM yandex/clickhouse-binary-builder
FROM clickhouse/binary-builder
RUN apt-get update --yes \
&& apt-get install \

View File

@ -1,5 +1,5 @@
# docker build -t yandex/clickhouse-split-build-smoke-test .
FROM yandex/clickhouse-binary-builder
# docker build -t clickhouse/split-build-smoke-test .
FROM clickhouse/binary-builder
COPY run.sh /run.sh
COPY process_split_build_smoke_test_result.py /

View File

@ -1,9 +1,9 @@
# docker build -t yandex/clickhouse-sqlancer-test .
# docker build -t clickhouse/sqlancer-test .
FROM ubuntu:20.04
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
RUN apt-get update --yes && env DEBIAN_FRONTEND=noninteractive apt-get install wget unzip git openjdk-14-jdk maven python3 --yes --no-install-recommends
RUN apt-get update --yes && env DEBIAN_FRONTEND=noninteractive apt-get install wget unzip git default-jdk maven python3 --yes --no-install-recommends
RUN wget https://github.com/sqlancer/sqlancer/archive/master.zip -O /sqlancer.zip
RUN mkdir /sqlancer && \
cd /sqlancer && \

View File

@ -1,5 +1,5 @@
# docker build -t yandex/clickhouse-stateful-test .
FROM yandex/clickhouse-stateless-test
# docker build -t clickhouse/stateful-test .
FROM clickhouse/stateless-test
RUN apt-get update -y \
&& env DEBIAN_FRONTEND=noninteractive \

View File

@ -1,5 +1,5 @@
# docker build -t yandex/clickhouse-stateless-test .
FROM yandex/clickhouse-test-base
# docker build -t clickhouse/stateless-test .
FROM clickhouse/test-base
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz"

View File

@ -1,5 +1,5 @@
# docker build -t yandex/clickhouse-stateless-pytest .
FROM yandex/clickhouse-test-base
# docker build -t clickhouse/stateless-pytest .
FROM clickhouse/test-base
RUN apt-get update -y && \
apt-get install -y --no-install-recommends \

View File

@ -1,5 +1,5 @@
# docker build -t yandex/clickhouse-stateless-unbundled-test .
FROM yandex/clickhouse-test-base
# docker build -t clickhouse/stateless-unbundled-test .
FROM clickhouse/test-base
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz"

View File

@ -1,5 +1,5 @@
# docker build -t yandex/clickhouse-stress-test .
FROM yandex/clickhouse-stateful-test
# docker build -t clickhouse/stress-test .
FROM clickhouse/stateful-test
RUN apt-get update -y \
&& env DEBIAN_FRONTEND=noninteractive \

View File

@ -6,7 +6,7 @@ Usage:
```
$ ls $HOME/someclickhouse
clickhouse-client_18.14.9_all.deb clickhouse-common-static_18.14.9_amd64.deb clickhouse-server_18.14.9_all.deb clickhouse-test_18.14.9_all.deb
$ docker run --volume=$HOME/someclickhouse:/package_folder --volume=$HOME/test_output:/test_output yandex/clickhouse-stress-test
$ docker run --volume=$HOME/someclickhouse:/package_folder --volume=$HOME/test_output:/test_output clickhouse/stress-test
Selecting previously unselected package clickhouse-common-static.
(Reading database ... 14442 files and directories currently installed.)
...

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-style-test .
# docker build -t clickhouse/style-test .
FROM ubuntu:20.04
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list

View File

@ -49,7 +49,7 @@ fi
# Build server image (optional) from local packages
if [ -z "${CLICKHOUSE_SERVER_IMAGE}" ]; then
CLICKHOUSE_SERVER_IMAGE="yandex/clickhouse-server:local"
CLICKHOUSE_SERVER_IMAGE="clickhouse/server:local"
if [ "${CLICKHOUSE_PACKAGES_ARG}" != "${NO_REBUILD_FLAG}" ]; then
docker build --network=host \

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-testflows-runner .
# docker build -t clickhouse/testflows-runner .
FROM ubuntu:20.04
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list

View File

@ -1,5 +1,5 @@
# docker build -t yandex/clickhouse-unit-test .
FROM yandex/clickhouse-stateless-test
# docker build -t clickhouse/unit-test .
FROM clickhouse/stateless-test
RUN apt-get install gdb

View File

@ -210,4 +210,4 @@ ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-
## See also
- [S3 table function](../../../sql-reference/table-functions/s3.md)
- [s3 table function](../../../sql-reference/table-functions/s3.md)

View File

@ -3499,6 +3499,30 @@ Possible values:
Default value: `0`.
## replication_alter_partitions_sync {#replication-alter-partitions-sync}
Allows to set up waiting for actions to be executed on replicas by [ALTER](../../sql-reference/statements/alter/index.md), [OPTIMIZE](../../sql-reference/statements/optimize.md) or [TRUNCATE](../../sql-reference/statements/truncate.md) queries.
Possible values:
- 0 — Do not wait.
- 1 — Wait for own execution.
- 2 — Wait for everyone.
Default value: `1`.
## replication_wait_for_inactive_replica_timeout {#replication-wait-for-inactive-replica-timeout}
Specifies how long (in seconds) to wait for inactive replicas to execute [ALTER](../../sql-reference/statements/alter/index.md), [OPTIMIZE](../../sql-reference/statements/optimize.md) or [TRUNCATE](../../sql-reference/statements/truncate.md) queries.
Possible values:
- 0 — Do not wait.
- Negative integer — Wait for unlimited time.
- Positive integer — The number of seconds to wait.
Default value: `120` seconds.
## regexp_max_matches_per_row {#regexp-max-matches-per-row}
Sets the maximum number of matches for a single regular expression per row. Use it to protect against memory overload when using greedy regular expression in the [extractAllGroupsHorizontal](../../sql-reference/functions/string-search-functions.md#extractallgroups-horizontal) function.

View File

@ -87,7 +87,23 @@ The function is using uppercase letters `A-F` and not using any prefixes (like `
For integer arguments, it prints hex digits (“nibbles”) from the most significant to least significant (big-endian or “human-readable” order). It starts with the most significant non-zero byte (leading zero bytes are omitted) but always prints both digits of every byte even if the leading digit is zero.
**Example**
Values of type [Date](../../sql-reference/data-types/date.md) and [DateTime](../../sql-reference/data-types/datetime.md) are formatted as corresponding integers (the number of days since Epoch for Date and the value of Unix Timestamp for DateTime).
For [String](../../sql-reference/data-types/string.md) and [FixedString](../../sql-reference/data-types/fixedstring.md), all bytes are simply encoded as two hexadecimal numbers. Zero bytes are not omitted.
Values of [Float](../../sql-reference/data-types/float.md) and [Decimal](../../sql-reference/data-types/decimal.md) types are encoded as their representation in memory. As we support little-endian architecture, they are encoded in little-endian. Zero leading/trailing bytes are not omitted.
**Arguments**
- `arg` — A value to convert to hexadecimal. Types: [String](../../sql-reference/data-types/string.md), [UInt](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md), [Decimal](../../sql-reference/data-types/decimal.md), [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md).
**Returned value**
- A string with the hexadecimal representation of the argument.
Type: [String](../../sql-reference/data-types/string.md).
**Examples**
Query:
@ -101,28 +117,10 @@ Result:
01
```
Values of type `Date` and `DateTime` are formatted as corresponding integers (the number of days since Epoch for Date and the value of Unix Timestamp for DateTime).
For `String` and `FixedString`, all bytes are simply encoded as two hexadecimal numbers. Zero bytes are not omitted.
Values of floating point and Decimal types are encoded as their representation in memory. As we support little-endian architecture, they are encoded in little-endian. Zero leading/trailing bytes are not omitted.
**Arguments**
- `arg` — A value to convert to hexadecimal. Types: [String](../../sql-reference/data-types/string.md), [UInt](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md), [Decimal](../../sql-reference/data-types/decimal.md), [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md).
**Returned value**
- A string with the hexadecimal representation of the argument.
Type: `String`.
**Example**
Query:
``` sql
SELECT hex(toFloat32(number)) as hex_presentation FROM numbers(15, 2);
SELECT hex(toFloat32(number)) AS hex_presentation FROM numbers(15, 2);
```
Result:
@ -137,7 +135,7 @@ Result:
Query:
``` sql
SELECT hex(toFloat64(number)) as hex_presentation FROM numbers(15, 2);
SELECT hex(toFloat64(number)) AS hex_presentation FROM numbers(15, 2);
```
Result:
@ -210,52 +208,52 @@ Result:
Returns a string containing the arguments binary representation.
Alias: `BIN`.
**Syntax**
``` sql
bin(arg)
```
Alias: `BIN`.
For integer arguments, it prints bin digits from the most significant to least significant (big-endian or “human-readable” order). It starts with the most significant non-zero byte (leading zero bytes are omitted) but always prints eight digits of every byte if the leading digit is zero.
**Example**
Values of type [Date](../../sql-reference/data-types/date.md) and [DateTime](../../sql-reference/data-types/datetime.md) are formatted as corresponding integers (the number of days since Epoch for `Date` and the value of Unix Timestamp for `DateTime`).
Query:
For [String](../../sql-reference/data-types/string.md) and [FixedString](../../sql-reference/data-types/fixedstring.md), all bytes are simply encoded as eight binary numbers. Zero bytes are not omitted.
``` sql
SELECT bin(1);
```
Result:
``` text
00000001
```
Values of type `Date` and `DateTime` are formatted as corresponding integers (the number of days since Epoch for Date and the value of Unix Timestamp for DateTime).
For `String` and `FixedString`, all bytes are simply encoded as eight binary numbers. Zero bytes are not omitted.
Values of floating-point and Decimal types are encoded as their representation in memory. As we support little-endian architecture, they are encoded in little-endian. Zero leading/trailing bytes are not omitted.
Values of [Float](../../sql-reference/data-types/float.md) and [Decimal](../../sql-reference/data-types/decimal.md) types are encoded as their representation in memory. As we support little-endian architecture, they are encoded in little-endian. Zero leading/trailing bytes are not omitted.
**Arguments**
- `arg` — A value to convert to binary. Types: [String](../../sql-reference/data-types/string.md), [UInt](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md), [Decimal](../../sql-reference/data-types/decimal.md), [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md).
- `arg` — A value to convert to binary. [String](../../sql-reference/data-types/string.md), [FixedString](../../sql-reference/data-types/fixedstring.md), [UInt](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md), [Decimal](../../sql-reference/data-types/decimal.md), [Date](../../sql-reference/data-types/date.md), or [DateTime](../../sql-reference/data-types/datetime.md).
**Returned value**
- A string with the binary representation of the argument.
Type: `String`.
Type: [String](../../sql-reference/data-types/string.md).
**Example**
**Examples**
Query:
``` sql
SELECT bin(toFloat32(number)) as bin_presentation FROM numbers(15, 2);
SELECT bin(14);
```
Result:
``` text
┌─bin(14)──┐
│ 00001110 │
└──────────┘
```
Query:
``` sql
SELECT bin(toFloat32(number)) AS bin_presentation FROM numbers(15, 2);
```
Result:
@ -270,7 +268,7 @@ Result:
Query:
``` sql
SELECT bin(toFloat64(number)) as bin_presentation FROM numbers(15, 2);
SELECT bin(toFloat64(number)) AS bin_presentation FROM numbers(15, 2);
```
Result:
@ -284,14 +282,7 @@ Result:
## unbin {#unbinstr}
Performs the opposite operation of [bin](#bin). It interprets each pair of binary digits (in the argument) as a number and converts it to the byte represented by the number. The return value is a binary string (BLOB).
If you want to convert the result to a number, you can use the [reverse](../../sql-reference/functions/string-functions.md#reverse) and [reinterpretAs<Type>](../../sql-reference/functions/type-conversion-functions.md#type-conversion-functions) functions.
!!! note "Note"
If `unbin` is invoked from within the `clickhouse-client`, binary strings display using UTF-8.
Alias: `UNBIN`.
Interprets each pair of binary digits (in the argument) as a number and converts it to the byte represented by the number. The functions performs the opposite operation to [bin](#bin).
**Syntax**
@ -299,11 +290,18 @@ Alias: `UNBIN`.
unbin(arg)
```
Alias: `UNBIN`.
For a numeric argument `unbin()` does not return the inverse of `bin()`. If you want to convert the result to a number, you can use the [reverse](../../sql-reference/functions/string-functions.md#reverse) and [reinterpretAs<Type>](../../sql-reference/functions/type-conversion-functions.md#reinterpretasuint8163264) functions.
!!! note "Note"
If `unbin` is invoked from within the `clickhouse-client`, binary strings are displayed using UTF-8.
Supports binary digits `0` and `1`. The number of binary digits does not have to be multiples of eight. If the argument string contains anything other than binary digits, some implementation-defined result is returned (an exception isnt thrown).
**Arguments**
- `arg` — A string containing any number of binary digits. Type: [String](../../sql-reference/data-types/string.md).
Supports binary digits `0-1`. The number of binary digits does not have to be multiples of eight. If the argument string contains anything other than binary digits, some implementation-defined result is returned (an exception isnt thrown). For a numeric argument the inverse of bin(N) is not performed by unbin().
- `arg` — A string containing any number of binary digits. [String](../../sql-reference/data-types/string.md).
**Returned value**
@ -311,7 +309,7 @@ Supports binary digits `0-1`. The number of binary digits does not have to be mu
Type: [String](../../sql-reference/data-types/string.md).
**Example**
**Examples**
Query:
@ -330,14 +328,14 @@ Result:
Query:
``` sql
SELECT reinterpretAsUInt64(reverse(unbin('1010'))) AS num;
SELECT reinterpretAsUInt64(reverse(unbin('1110'))) AS num;
```
Result:
``` text
┌─num─┐
│ 10
│ 14
└─────┘
```
@ -396,7 +394,7 @@ Result:
Query:
``` sql
select bitPositionsToArray(toInt8(-1)) as bit_positions;
SELECT bitPositionsToArray(toInt8(-1)) AS bit_positions;
```
Result:

View File

@ -59,6 +59,10 @@ A lambda function that accepts multiple arguments can also be passed to a higher
For some functions the first argument (the lambda function) can be omitted. In this case, identical mapping is assumed.
## User Defined Functions {#user-defined-functions}
Custom functions can be created using the [CREATE FUNCTION](../statements/create/function.md) statement. To delete these functions use the [DROP FUNCTION](../statements/drop.md#drop-function) statement.
## Error Handling {#error-handling}
Some functions might throw an exception if the data is invalid. In this case, the query is canceled and an error text is returned to the client. For distributed processing, when an exception occurs on one of the servers, the other servers also attempt to abort the query.

View File

@ -78,7 +78,7 @@ mapAdd(arg1, arg2 [, ...])
**Arguments**
Arguments are [maps](../../sql-reference/data-types/map.md) or [tuples](../../sql-reference/data-types/tuple.md#tuplet1-t2) of two [arrays](../../sql-reference/data-types/array.md#data-type-array), where items in the first array represent keys, and the second array contains values for the each key. All key arrays should have same type, and all value arrays should contain items which are promote to the one type ([Int64](../../sql-reference/data-types/int-uint.md#int-ranges), [UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges) or [Float64](../../sql-reference/data-types/float.md#float32-float64)). The common promoted type is used as a type for the result array.
Arguments are [maps](../../sql-reference/data-types/map.md) or [tuples](../../sql-reference/data-types/tuple.md#tuplet1-t2) of two [arrays](../../sql-reference/data-types/array.md#data-type-array), where items in the first array represent keys, and the second array contains values for the each key. All key arrays should have same type, and all value arrays should contain items which are promoted to the one type ([Int64](../../sql-reference/data-types/int-uint.md#int-ranges), [UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges) or [Float64](../../sql-reference/data-types/float.md#float32-float64)). The common promoted type is used as a type for the result array.
**Returned value**
@ -86,7 +86,7 @@ Arguments are [maps](../../sql-reference/data-types/map.md) or [tuples](../../sq
**Example**
Query with a tuple map:
Query with a tuple:
```sql
SELECT mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) as res, toTypeName(res) as type;

View File

@ -43,7 +43,11 @@ Entries for finished mutations are not deleted right away (the number of preserv
For non-replicated tables, all `ALTER` queries are performed synchronously. For replicated tables, the query just adds instructions for the appropriate actions to `ZooKeeper`, and the actions themselves are performed as soon as possible. However, the query can wait for these actions to be completed on all the replicas.
For `ALTER ... ATTACH|DETACH|DROP` queries, you can use the `replication_alter_partitions_sync` setting to set up waiting. Possible values: `0` do not wait; `1` only wait for own execution (default); `2` wait for all.
For all `ALTER` queries, you can use the [replication_alter_partitions_sync](../../../operations/settings/settings.md#replication-alter-partitions-sync) setting to set up waiting.
You can specify how long (in seconds) to wait for inactive replicas to execute all `ALTER` queries with the [replication_wait_for_inactive_replica_timeout](../../../operations/settings/settings.md#replication-wait-for-inactive-replica-timeout) setting.
!!! info "Note"
For all `ALTER` queries, if `replication_alter_partitions_sync = 2` and some replicas are not active for more than the time, specified in the `replication_wait_for_inactive_replica_timeout` setting, then an exception `UNFINISHED` is thrown.
For `ALTER TABLE ... UPDATE|DELETE` queries the synchronicity is defined by the [mutations_sync](../../../operations/settings/settings.md#mutations_sync) setting.

View File

@ -0,0 +1,59 @@
---
toc_priority: 38
toc_title: FUNCTION
---
# CREATE FUNCTION {#create-function}
Creates a user defined function from a lambda expression. The expression must consist of function parameters, constants, operators, or other function calls.
**Syntax**
```sql
CREATE FUNCTION name AS (parameter0, ...) -> expression
```
A function can have an arbitrary number of parameters.
There are a few restrictions:
- The name of a function must be unique among user defined and system functions.
- Recursive functions are not allowed.
- All variables used by a function must be specified in its parameter list.
If any restriction is violated then an exception is raised.
**Example**
Query:
```sql
CREATE FUNCTION linear_equation AS (x, k, b) -> k*x + b;
SELECT number, linear_equation(number, 2, 1) FROM numbers(3);
```
Result:
``` text
┌─number─┬─plus(multiply(2, number), 1)─┐
│ 0 │ 1 │
│ 1 │ 3 │
│ 2 │ 5 │
└────────┴──────────────────────────────┘
```
A [conditional function](../../../sql-reference/functions/conditional-functions.md) is called in a user defined function in the following query:
```sql
CREATE FUNCTION parity_str AS (n) -> if(n % 2, 'odd', 'even');
SELECT number, parity_str(number) FROM numbers(3);
```
Result:
``` text
┌─number─┬─if(modulo(number, 2), 'odd', 'even')─┐
│ 0 │ even │
│ 1 │ odd │
│ 2 │ even │
└────────┴──────────────────────────────────────┘
```

View File

@ -12,6 +12,7 @@ Create queries make a new entity of one of the following kinds:
- [TABLE](../../../sql-reference/statements/create/table.md)
- [VIEW](../../../sql-reference/statements/create/view.md)
- [DICTIONARY](../../../sql-reference/statements/create/dictionary.md)
- [FUNCTION](../../../sql-reference/statements/create/function.md)
- [USER](../../../sql-reference/statements/create/user.md)
- [ROLE](../../../sql-reference/statements/create/role.md)
- [ROW POLICY](../../../sql-reference/statements/create/row-policy.md)

View File

@ -97,4 +97,20 @@ Syntax:
DROP VIEW [IF EXISTS] [db.]name [ON CLUSTER cluster]
```
[Оriginal article](https://clickhouse.tech/docs/en/sql-reference/statements/drop/) <!--hide-->
## DROP FUNCTION {#drop-function}
Deletes a user defined function created by [CREATE FUNCTION](./create/function.md).
System functions can not be dropped.
**Syntax**
``` sql
DROP FUNCTION [IF EXISTS] function_name
```
**Example**
``` sql
CREATE FUNCTION linear_equation AS (x, k, b) -> k*x + b;
DROP FUNCTION linear_equation;
```

View File

@ -107,11 +107,13 @@ Hierarchy of privileges:
- `CREATE TEMPORARY TABLE`
- `CREATE VIEW`
- `CREATE DICTIONARY`
- `CREATE FUNCTION`
- [DROP](#grant-drop)
- `DROP DATABASE`
- `DROP TABLE`
- `DROP VIEW`
- `DROP DICTIONARY`
- `DROP FUNCTION`
- [TRUNCATE](#grant-truncate)
- [OPTIMIZE](#grant-optimize)
- [SHOW](#grant-show)

View File

@ -18,13 +18,17 @@ OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION I
The `OPTMIZE` query is supported for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) family, the [MaterializedView](../../engines/table-engines/special/materializedview.md) and the [Buffer](../../engines/table-engines/special/buffer.md) engines. Other table engines arent supported.
When `OPTIMIZE` is used with the [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family of table engines, ClickHouse creates a task for merging and waits for execution on all nodes (if the `replication_alter_partitions_sync` setting is enabled).
When `OPTIMIZE` is used with the [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family of table engines, ClickHouse creates a task for merging and waits for execution on all replicas (if the [replication_alter_partitions_sync](../../operations/settings/settings.md#replication-alter-partitions-sync) setting is set to `2`) or on current replica (if the [replication_alter_partitions_sync](../../operations/settings/settings.md#replication-alter-partitions-sync) setting is set to `1`).
- If `OPTIMIZE` does not perform a merge for any reason, it does not notify the client. To enable notifications, use the [optimize_throw_if_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop) setting.
- If you specify a `PARTITION`, only the specified partition is optimized. [How to set partition expression](../../sql-reference/statements/alter/index.md#alter-how-to-specify-part-expr).
- If you specify `FINAL`, optimization is performed even when all the data is already in one part. Also merge is forced even if concurrent merges are performed.
- If you specify `DEDUPLICATE`, then completely identical rows (unless by-clause is specified) will be deduplicated (all columns are compared), it makes sense only for the MergeTree engine.
You can specify how long (in seconds) to wait for inactive replicas to execute `OPTIMIZE` queries by the [replication_wait_for_inactive_replica_timeout](../../operations/settings/settings.md#replication-wait-for-inactive-replica-timeout) setting.
!!! info "Note"
If the `replication_alter_partitions_sync` is set to `2` and some replicas are not active for more than the time, specified by the `replication_wait_for_inactive_replica_timeout` setting, then an exception `UNFINISHED` is thrown.
## BY expression {#by-expression}

View File

@ -6,7 +6,7 @@ toc_title: WHERE
`WHERE` clause allows to filter the data that is coming from [FROM](../../../sql-reference/statements/select/from.md) clause of `SELECT`.
If there is a `WHERE` clause, it must contain an expression with the `UInt8` type. This is usually an expression with comparison and logical operators. Rows where this expression evaluates to 0 are expluded from further transformations or result.
If there is a `WHERE` clause, it must contain an expression with the `UInt8` type. This is usually an expression with comparison and logical operators. Rows where this expression evaluates to 0 are excluded from further transformations or result.
`WHERE` expression is evaluated on the ability to use indexes and partition pruning, if the underlying table engine supports that.

View File

@ -12,3 +12,10 @@ TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster]
Removes all data from a table. When the clause `IF EXISTS` is omitted, the query returns an error if the table does not exist.
The `TRUNCATE` query is not supported for [View](../../engines/table-engines/special/view.md), [File](../../engines/table-engines/special/file.md), [URL](../../engines/table-engines/special/url.md), [Buffer](../../engines/table-engines/special/buffer.md) and [Null](../../engines/table-engines/special/null.md) table engines.
You can use the [replication_alter_partitions_sync](../../operations/settings/settings.md#replication-alter-partitions-sync) setting to set up waiting for actions to be executed on replicas.
You can specify how long (in seconds) to wait for inactive replicas to execute `TRUNCATE` queries with the [replication_wait_for_inactive_replica_timeout](../../operations/settings/settings.md#replication-wait-for-inactive-replica-timeout) setting.
!!! info "Note"
If the `replication_alter_partitions_sync` is set to `2` and some replicas are not active for more than the time, specified by the `replication_wait_for_inactive_replica_timeout` setting, then an exception `UNFINISHED` is thrown.

View File

@ -3,7 +3,7 @@ toc_priority: 45
toc_title: s3
---
# S3 Table Function {#s3-table-function}
# s3 Table Function {#s3-table-function}
Provides table-like interface to select/insert files in [Amazon S3](https://aws.amazon.com/s3/). This table function is similar to [hdfs](../../sql-reference/table-functions/hdfs.md), but provides S3-specific features.
@ -125,6 +125,30 @@ INSERT INTO FUNCTION s3('https://storage.yandexcloud.net/my-test-bucket-768/test
SELECT name, value FROM existing_table;
```
## Partitioned Write {#partitioned-write}
If you specify `PARTITION BY` expression when inserting data into `S3` table, a separate file is created for each partition value. Splitting the data into separate files helps to improve reading operations efficiency.
**Examples**
1. Using partition ID in a key creates separate files:
```sql
INSERT INTO TABLE FUNCTION
s3('http://bucket.amazonaws.com/my_bucket/file_{_partition_id}.csv', 'CSV', 'a String, b UInt32, c UInt32')
PARTITION BY a VALUES ('x', 2, 3), ('x', 4, 5), ('y', 11, 12), ('y', 13, 14), ('z', 21, 22), ('z', 23, 24);
```
As a result, the data is written into three files: `file_x.csv`, `file_y.csv`, and `file_z.csv`.
2. Using partition ID in a bucket name creates files in different buckets:
```sql
INSERT INTO TABLE FUNCTION
s3('http://bucket.amazonaws.com/my_bucket_{_partition_id}/file.csv', 'CSV', 'a UInt32, b UInt32, c UInt32')
PARTITION BY a VALUES (1, 2, 3), (1, 4, 5), (10, 11, 12), (10, 13, 14), (20, 21, 22), (20, 23, 24);
```
As a result, the data is written into three files in different buckets: `my_bucket_1/file.csv`, `my_bucket_10/file.csv`, and `my_bucket_20/file.csv`.
**See Also**
- [S3 engine](../../engines/table-engines/integrations/s3.md)

View File

@ -151,4 +151,4 @@ ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-
**Смотрите также**
- [Табличная функция S3](../../../sql-reference/table-functions/s3.md)
- [Табличная функция s3](../../../sql-reference/table-functions/s3.md)

View File

@ -3308,6 +3308,30 @@ SETTINGS index_granularity = 8192 │
Значение по умолчанию: `0`.
## replication_alter_partitions_sync {#replication-alter-partitions-sync}
Позволяет настроить ожидание выполнения действий на репликах запросами [ALTER](../../sql-reference/statements/alter/index.md), [OPTIMIZE](../../sql-reference/statements/optimize.md) или [TRUNCATE](../../sql-reference/statements/truncate.md).
Возможные значения:
- 0 — не ждать.
- 1 — ждать выполнения действий на своей реплике.
- 2 — ждать выполнения действий на всех репликах.
Значение по умолчанию: `1`.
## replication_wait_for_inactive_replica_timeout {#replication-wait-for-inactive-replica-timeout}
Указывает время ожидания (в секундах) выполнения запросов [ALTER](../../sql-reference/statements/alter/index.md), [OPTIMIZE](../../sql-reference/statements/optimize.md) или [TRUNCATE](../../sql-reference/statements/truncate.md) для неактивных реплик.
Возможные значения:
- 0 — не ждать.
- Отрицательное целое число — ждать неограниченное время.
- Положительное целое число — установить соответствующее количество секунд ожидания.
Значение по умолчанию: `120` секунд.
## regexp_max_matches_per_row {#regexp-max-matches-per-row}
Задает максимальное количество совпадений для регулярного выражения. Настройка применяется для защиты памяти от перегрузки при использовании "жадных" квантификаторов в регулярном выражении для функции [extractAllGroupsHorizontal](../../sql-reference/functions/string-search-functions.md#extractallgroups-horizontal).
@ -3316,4 +3340,4 @@ SETTINGS index_granularity = 8192 │
- Положительное целое число.
Значение по умолчанию: `1000`.
Значение по умолчанию: `1000`.

View File

@ -17,13 +17,13 @@ char(number_1, [number_2, ..., number_n]);
**Аргументы**
- `number_1, number_2, ..., number_n` — числовые аргументы, которые интерпретируются как целые числа. Типы: [Int](../../sql-reference/functions/encoding-functions.md), [Float](../../sql-reference/functions/encoding-functions.md).
- `number_1, number_2, ..., number_n` — числовые аргументы, которые интерпретируются как целые числа. Типы: [Int](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md).
**Возвращаемое значение**
- Строка из соответствующих байт.
Тип: `String`.
Тип: [String](../../sql-reference/data-types/string.md).
**Пример**
@ -73,61 +73,57 @@ SELECT char(0xE4, 0xBD, 0xA0, 0xE5, 0xA5, 0xBD) AS hello;
## hex {#hex}
Returns a string containing the arguments hexadecimal representation.
Возвращает строку, содержащую шестнадцатеричное представление аргумента.
Синоним: `HEX`.
**Syntax**
**Синтаксис**
``` sql
hex(arg)
```
The function is using uppercase letters `A-F` and not using any prefixes (like `0x`) or suffixes (like `h`).
Функция использует прописные буквы `A-F` и не использует никаких префиксов (например, `0x`) или суффиксов (например, `h`).
For integer arguments, it prints hex digits («nibbles») from the most significant to least significant (big endian or «human readable» order). It starts with the most significant non-zero byte (leading zero bytes are omitted) but always prints both digits of every byte even if leading digit is zero.
Для целочисленных аргументов возвращает шестнадцатеричные цифры от наиболее до наименее значимых (`big endian`, человекочитаемый порядок).Он начинается с самого значимого ненулевого байта (начальные нулевые байты опущены), но всегда выводит обе цифры каждого байта, даже если начальная цифра равна нулю.
Example:
Значения типа [Date](../../sql-reference/data-types/date.md) и [DateTime](../../sql-reference/data-types/datetime.md) формируются как соответствующие целые числа (количество дней с момента Unix-эпохи для `Date` и значение Unix Timestamp для `DateTime`).
**Example**
Для [String](../../sql-reference/data-types/string.md) и [FixedString](../../sql-reference/data-types/fixedstring.md), все байты просто кодируются как два шестнадцатеричных числа. Нулевые байты не опущены.
Query:
Значения [Float](../../sql-reference/data-types/float.md) и [Decimal](../../sql-reference/data-types/decimal.md) кодируются как их представление в памяти. Поскольку ClickHouse поддерживает архитектуру `little-endian`, они кодируются от младшего к старшему байту. Нулевые начальные/конечные байты не опущены.
**Аргументы**
- `arg` — значение для преобразования в шестнадцатеричное. [String](../../sql-reference/data-types/string.md), [UInt](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md), [Decimal](../../sql-reference/data-types/decimal.md), [Date](../../sql-reference/data-types/date.md) или [DateTime](../../sql-reference/data-types/datetime.md).
**Возвращаемое значение**
- Строка — шестнадцатеричное представление аргумента.
Тип: [String](../../sql-reference/data-types/string.md).
**Примеры**
Запрос:
``` sql
SELECT hex(1);
```
Result:
Результат:
``` text
01
```
Values of type `Date` and `DateTime` are formatted as corresponding integers (the number of days since Epoch for Date and the value of Unix Timestamp for DateTime).
For `String` and `FixedString`, all bytes are simply encoded as two hexadecimal numbers. Zero bytes are not omitted.
Values of floating point and Decimal types are encoded as their representation in memory. As we support little endian architecture, they are encoded in little endian. Zero leading/trailing bytes are not omitted.
**Parameters**
- `arg` — A value to convert to hexadecimal. Types: [String](../../sql-reference/functions/encoding-functions.md), [UInt](../../sql-reference/functions/encoding-functions.md), [Float](../../sql-reference/functions/encoding-functions.md), [Decimal](../../sql-reference/functions/encoding-functions.md), [Date](../../sql-reference/functions/encoding-functions.md) or [DateTime](../../sql-reference/functions/encoding-functions.md).
**Returned value**
- A string with the hexadecimal representation of the argument.
Type: `String`.
**Example**
Query:
Запрос:
``` sql
SELECT hex(toFloat32(number)) as hex_presentation FROM numbers(15, 2);
SELECT hex(toFloat32(number)) AS hex_presentation FROM numbers(15, 2);
```
Result:
Результат:
``` text
┌─hex_presentation─┐
@ -136,13 +132,13 @@ Result:
└──────────────────┘
```
Query:
Запрос:
``` sql
SELECT hex(toFloat64(number)) as hex_presentation FROM numbers(15, 2);
SELECT hex(toFloat64(number)) AS hex_presentation FROM numbers(15, 2);
```
Result:
Результат:
``` text
┌─hex_presentation─┐
@ -208,6 +204,141 @@ SELECT reinterpretAsUInt64(reverse(unhex('FFF'))) AS num;
└──────┘
```
## bin {#bin}
Возвращает строку, содержащую бинарное представление аргумента.
**Синтаксис**
``` sql
bin(arg)
```
Синоним: `BIN`.
Для целочисленных аргументов возвращаются двоичные числа от наиболее значимого до наименее значимого (`big-endian`, человекочитаемый порядок). Порядок начинается с самого значимого ненулевого байта (начальные нулевые байты опущены), но всегда возвращает восемь цифр каждого байта, если начальная цифра равна нулю.
Значения типа [Date](../../sql-reference/data-types/date.md) и [DateTime](../../sql-reference/data-types/datetime.md) формируются как соответствующие целые числа (количество дней с момента Unix-эпохи для `Date` и значение Unix Timestamp для `DateTime`).
Для [String](../../sql-reference/data-types/string.md) и [FixedString](../../sql-reference/data-types/fixedstring.md) все байты кодируются как восемь двоичных чисел. Нулевые байты не опущены.
Значения [Float](../../sql-reference/data-types/float.md) и [Decimal](../../sql-reference/data-types/decimal.md) кодируются как их представление в памяти. Поскольку ClickHouse поддерживает архитектуру `little-endian`, они кодируются от младшего к старшему байту. Нулевые начальные/конечные байты не опущены.
**Аргументы**
- `arg` — значение для преобразования в двоичный код. [String](../../sql-reference/data-types/string.md), [FixedString](../../sql-reference/data-types/fixedstring.md), [UInt](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md), [Decimal](../../sql-reference/data-types/decimal.md), [Date](../../sql-reference/data-types/date.md) или [DateTime](../../sql-reference/data-types/datetime.md).
**Возвращаемое значение**
- Бинарная строка (BLOB) — двоичное представление аргумента.
Тип: [String](../../sql-reference/data-types/string.md).
**Примеры**
Запрос:
``` sql
SELECT bin(14);
```
Результат:
``` text
┌─bin(14)──┐
│ 00001110 │
└──────────┘
```
Запрос:
``` sql
SELECT bin(toFloat32(number)) AS bin_presentation FROM numbers(15, 2);
```
Результат:
``` text
┌─bin_presentation─────────────────┐
│ 00000000000000000111000001000001 │
│ 00000000000000001000000001000001 │
└──────────────────────────────────┘
```
Запрос:
``` sql
SELECT bin(toFloat64(number)) AS bin_presentation FROM numbers(15, 2);
```
Результат:
``` text
┌─bin_presentation─────────────────────────────────────────────────┐
│ 0000000000000000000000000000000000000000000000000010111001000000 │
│ 0000000000000000000000000000000000000000000000000011000001000000 │
└──────────────────────────────────────────────────────────────────┘
```
## unbin {#unbinstr}
Интерпретирует каждую пару двоичных цифр аргумента как число и преобразует его в байт, представленный числом. Функция выполняет операцию, противоположную [bin](#bin).
**Синтаксис**
``` sql
unbin(arg)
```
Синоним: `UNBIN`.
Для числового аргумента `unbin()` не возвращает значение, обратное результату `bin()`. Чтобы преобразовать результат в число, используйте функции [reverse](../../sql-reference/functions/string-functions.md#reverse) и [reinterpretAs<Type>](../../sql-reference/functions/type-conversion-functions.md#reinterpretasuint8163264).
!!! note "Примечание"
Если `unbin` вызывается из клиента `clickhouse-client`, бинарная строка возвращается в кодировке UTF-8.
Поддерживает двоичные цифры `0` и `1`. Количество двоичных цифр не обязательно должно быть кратно восьми. Если строка аргумента содержит что-либо, кроме двоичных цифр, возвращается некоторый результат, определенный реализацией (ошибки не возникает).
**Аргументы**
- `arg` — строка, содержащая любое количество двоичных цифр. [String](../../sql-reference/data-types/string.md).
**Возвращаемое значение**
- Бинарная строка (BLOB).
Тип: [String](../../sql-reference/data-types/string.md).
**Примеры**
Запрос:
``` sql
SELECT UNBIN('001100000011000100110010'), UNBIN('0100110101111001010100110101000101001100');
```
Результат:
``` text
┌─unbin('001100000011000100110010')─┬─unbin('0100110101111001010100110101000101001100')─┐
│ 012 │ MySQL │
└───────────────────────────────────┴───────────────────────────────────────────────────┘
```
Запрос:
``` sql
SELECT reinterpretAsUInt64(reverse(unbin('1110'))) AS num;
```
Результат:
``` text
┌─num─┐
│ 14 │
└─────┘
```
## UUIDStringToNum(str) {#uuidstringtonumstr}
Принимает строку, содержащую 36 символов в формате `123e4567-e89b-12d3-a456-426655440000`, и возвращает в виде набора байт в FixedString(16).
@ -263,7 +394,7 @@ SELECT bitPositionsToArray(toInt8(1)) AS bit_positions;
Запрос:
``` sql
select bitPositionsToArray(toInt8(-1)) as bit_positions;
SELECT bitPositionsToArray(toInt8(-1)) AS bit_positions;
```
Результат:

View File

@ -58,6 +58,10 @@ str -> str != Referer
Для некоторых функций первый аргумент (лямбда-функция) может отсутствовать. В этом случае подразумевается тождественное отображение.
## Пользовательские функции {#user-defined-functions}
Функции можно создавать с помощью выражения [CREATE FUNCTION](../statements/create/function.md). Для удаления таких функций используется выражение [DROP FUNCTION](../statements/drop.md#drop-function).
## Обработка ошибок {#obrabotka-oshibok}
Некоторые функции могут кидать исключения в случае ошибочных данных. В этом случае, выполнение запроса прерывается, и текст ошибки выводится клиенту. При распределённой обработке запроса, при возникновении исключения на одном из серверов, на другие серверы пытается отправиться просьба тоже прервать выполнение запроса.

View File

@ -73,22 +73,22 @@ SELECT a['key2'] FROM table_map;
**Синтаксис**
``` sql
mapAdd(Tuple(Array, Array), Tuple(Array, Array) [, ...])
mapAdd(arg1, arg2 [, ...])
```
**Аргументы**
Аргументами являются [кортежи](../../sql-reference/data-types/tuple.md#tuplet1-t2) из двух [массивов](../../sql-reference/data-types/array.md#data-type-array), где элементы в первом массиве представляют ключи, а второй массив содержит значения для каждого ключа.
Аргументами являются контейнеры [Map](../../sql-reference/data-types/map.md) или [кортежи](../../sql-reference/data-types/tuple.md#tuplet1-t2) из двух [массивов](../../sql-reference/data-types/array.md#data-type-array), где элементы в первом массиве представляют ключи, а второй массив содержит значения для каждого ключа.
Все массивы ключей должны иметь один и тот же тип, а все массивы значений должны содержать элементы, которые можно приводить к одному типу ([Int64](../../sql-reference/data-types/int-uint.md#int-ranges), [UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges) или [Float64](../../sql-reference/data-types/float.md#float32-float64)).
Общий приведенный тип используется в качестве типа для результирующего массива.
**Возвращаемое значение**
- Возвращает один [кортеж](../../sql-reference/data-types/tuple.md#tuplet1-t2), в котором первый массив содержит отсортированные ключи, а второй — значения.
- В зависимости от типа аргументов возвращает один [Map](../../sql-reference/data-types/map.md) или [кортеж](../../sql-reference/data-types/tuple.md#tuplet1-t2), в котором первый массив содержит отсортированные ключи, а второй — значения.
**Пример**
Запрос:
Запрос с кортежем:
``` sql
SELECT mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) as res, toTypeName(res) as type;
@ -102,6 +102,20 @@ SELECT mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) as res, toTy
└───────────────┴────────────────────────────────────┘
```
Запрос с контейнером `Map`:
```sql
SELECT mapAdd(map(1,1), map(1,1));
```
Result:
```text
┌─mapAdd(map(1, 1), map(1, 1))─┐
│ {1:2} │
└──────────────────────────────┘
```
## mapSubtract {#function-mapsubtract}
Собирает все ключи и вычитает соответствующие значения.

View File

@ -64,8 +64,11 @@ ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name
Для нереплицируемых таблиц, все запросы `ALTER` выполняются синхронно. Для реплицируемых таблиц, запрос всего лишь добавляет инструкцию по соответствующим действиям в `ZooKeeper`, а сами действия осуществляются при первой возможности. Но при этом, запрос может ждать завершения выполнения этих действий на всех репликах.
Для запросов `ALTER ... ATTACH|DETACH|DROP` можно настроить ожидание, с помощью настройки `replication_alter_partitions_sync`.
Возможные значения: `0` - не ждать, `1` - ждать выполнения только у себя (по умолчанию), `2` - ждать всех.
Для всех запросов `ALTER` можно настроить ожидание с помощью настройки [replication_alter_partitions_sync](../../../operations/settings/settings.md#replication-alter-partitions-sync).
Вы можете указать время ожидания (в секундах) выполнения всех запросов `ALTER` для неактивных реплик с помощью настройки [replication_wait_for_inactive_replica_timeout](../../../operations/settings/settings.md#replication-wait-for-inactive-replica-timeout).
!!! info "Примечание"
Для всех запросов `ALTER` при `replication_alter_partitions_sync = 2` и неактивности некоторых реплик больше времени, заданного настройкой `replication_wait_for_inactive_replica_timeout`, генерируется исключение `UNFINISHED`.
Для запросов `ALTER TABLE ... UPDATE|DELETE` синхронность выполнения определяется настройкой [mutations_sync](../../../operations/settings/settings.md#mutations_sync).

View File

@ -0,0 +1,59 @@
---
toc_priority: 38
toc_title: FUNCTION
---
# CREATE FUNCTION {#create-function}
Создает пользовательскую функцию из лямбда-выражения. Выражение должно состоять из параметров функции, констант, операторов и вызовов других функций.
**Синтаксис**
```sql
CREATE FUNCTION name AS (parameter0, ...) -> expression
```
У функции может быть произвольное число параметров.
Существует несколько ограничений на создаваемые функции:
- Имя функции должно быть уникальным среди всех пользовательских и системных функций.
- Рекурсивные функции запрещены.
- Все переменные, используемые функцией, должны быть перечислены в списке ее параметров.
Если какое-нибудь ограничение нарушается, то при попытке создать функцию возникает исключение.
**Пример**
Запрос:
```sql
CREATE FUNCTION linear_equation AS (x, k, b) -> k*x + b;
SELECT number, linear_equation(number, 2, 1) FROM numbers(3);
```
Результат:
``` text
┌─number─┬─plus(multiply(2, number), 1)─┐
│ 0 │ 1 │
│ 1 │ 3 │
│ 2 │ 5 │
└────────┴──────────────────────────────┘
```
В следующем запросе пользовательская функция вызывает [условную функцию](../../../sql-reference/functions/conditional-functions.md):
```sql
CREATE FUNCTION parity_str AS (n) -> if(n % 2, 'odd', 'even');
SELECT number, parity_str(number) FROM numbers(3);
```
Результат:
``` text
┌─number─┬─if(modulo(number, 2), 'odd', 'even')─┐
│ 0 │ even │
│ 1 │ odd │
│ 2 │ even │
└────────┴──────────────────────────────────────┘
```

View File

@ -12,6 +12,7 @@ toc_title: "Обзор"
- [TABLE](../../../sql-reference/statements/create/table.md)
- [VIEW](../../../sql-reference/statements/create/view.md)
- [DICTIONARY](../../../sql-reference/statements/create/dictionary.md)
- [FUNCTION](../../../sql-reference/statements/create/function.md)
- [USER](../../../sql-reference/statements/create/user.md)
- [ROLE](../../../sql-reference/statements/create/role.md)
- [ROW POLICY](../../../sql-reference/statements/create/row-policy.md)

View File

@ -97,3 +97,20 @@ DROP [SETTINGS] PROFILE [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
DROP VIEW [IF EXISTS] [db.]name [ON CLUSTER cluster]
```
## DROP FUNCTION {#drop-function}
Удаляет пользовательскую функцию, созданную с помощью [CREATE FUNCTION](./create/function.md).
Удалить системные функции нельзя.
**Синтаксис**
``` sql
DROP FUNCTION [IF EXISTS] function_name
```
**Пример**
``` sql
CREATE FUNCTION linear_equation AS (x, k, b) -> k*x + b;
DROP FUNCTION linear_equation;
```

View File

@ -109,11 +109,13 @@ GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION
- `CREATE TEMPORARY TABLE`
- `CREATE VIEW`
- `CREATE DICTIONARY`
- `CREATE FUNCTION`
- [DROP](#grant-drop)
- `DROP DATABASE`
- `DROP TABLE`
- `DROP VIEW`
- `DROP DICTIONARY`
- `DROP FUNCTION`
- [TRUNCATE](#grant-truncate)
- [OPTIMIZE](#grant-optimize)
- [SHOW](#grant-show)

View File

@ -18,7 +18,7 @@ OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION I
Может применяться к таблицам семейства [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md), [MaterializedView](../../engines/table-engines/special/materializedview.md) и [Buffer](../../engines/table-engines/special/buffer.md). Другие движки таблиц не поддерживаются.
Если запрос `OPTIMIZE` применяется к таблицам семейства [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md), ClickHouse создаёт задачу на слияние и ожидает её исполнения на всех узлах (если активирована настройка `replication_alter_partitions_sync`).
Если запрос `OPTIMIZE` применяется к таблицам семейства [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md), ClickHouse создаёт задачу на слияние и ожидает её исполнения на всех репликах (если значение настройки [replication_alter_partitions_sync](../../operations/settings/settings.md#replication-alter-partitions-sync) равно `2`) или на текущей реплике (если значение настройки [replication_alter_partitions_sync](../../operations/settings/settings.md#replication-alter-partitions-sync) равно `1`).
- По умолчанию, если запросу `OPTIMIZE` не удалось выполнить слияние, то
ClickHouse не оповещает клиента. Чтобы включить оповещения, используйте настройку [optimize_throw_if_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop).
@ -26,6 +26,11 @@ ClickHouse не оповещает клиента. Чтобы включить
- Если указать `FINAL`, то оптимизация выполняется даже в том случае, если все данные уже лежат в одном куске данных. Кроме того, слияние является принудительным, даже если выполняются параллельные слияния.
- Если указать `DEDUPLICATE`, то произойдет схлопывание полностью одинаковых строк (сравниваются значения во всех столбцах), имеет смысл только для движка MergeTree.
Вы можете указать время ожидания (в секундах) выполнения запросов `OPTIMIZE` для неактивных реплик с помощью настройки [replication_wait_for_inactive_replica_timeout](../../operations/settings/settings.md#replication-wait-for-inactive-replica-timeout).
!!! info "Примечание"
Если значение настройки `replication_alter_partitions_sync` равно `2` и некоторые реплики не активны больше времени, заданного настройкой `replication_wait_for_inactive_replica_timeout`, то генерируется исключение `UNFINISHED`.
## Выражение BY {#by-expression}
Чтобы выполнить дедупликацию по произвольному набору столбцов, вы можете явно указать список столбцов или использовать любую комбинацию подстановки [`*`](../../sql-reference/statements/select/index.md#asterisk), выражений [`COLUMNS`](../../sql-reference/statements/select/index.md#columns-expression) и [`EXCEPT`](../../sql-reference/statements/select/index.md#except-modifier).

View File

@ -13,4 +13,9 @@ TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster]
Запрос `TRUNCATE` не поддерживается для следующих движков: [View](../../engines/table-engines/special/view.md), [File](../../engines/table-engines/special/file.md), [URL](../../engines/table-engines/special/url.md), [Buffer](../../engines/table-engines/special/buffer.md) и [Null](../../engines/table-engines/special/null.md).
Вы можете настроить ожидание выполнения действий на репликах с помощью настройки [replication_alter_partitions_sync](../../operations/settings/settings.md#replication-alter-partitions-sync).
Вы можете указать время ожидания (в секундах) выполнения запросов `TRUNCATE` для неактивных реплик с помощью настройки [replication_wait_for_inactive_replica_timeout](../../operations/settings/settings.md#replication-wait-for-inactive-replica-timeout).
!!! info "Примечание"
Если значение настройки `replication_alter_partitions_sync` равно `2` и некоторые реплики не активны больше времени, заданного настройкой `replication_wait_for_inactive_replica_timeout`, то генерируется исключение `UNFINISHED`.

View File

@ -133,6 +133,30 @@ INSERT INTO FUNCTION s3('https://storage.yandexcloud.net/my-test-bucket-768/test
SELECT name, value FROM existing_table;
```
## Партиционирование при записи данных {#partitioned-write}
Если при добавлении данных в таблицу S3 указать выражение `PARTITION BY`, то для каждого значения ключа партиционирования создается отдельный файл. Это повышает эффективность операций чтения.
**Примеры**
1. При использовании ID партиции в имени ключа создаются отдельные файлы:
```sql
INSERT INTO TABLE FUNCTION
s3('http://bucket.amazonaws.com/my_bucket/file_{_partition_id}.csv', 'CSV', 'a UInt32, b UInt32, c UInt32')
PARTITION BY a VALUES ('x', 2, 3), ('x', 4, 5), ('y', 11, 12), ('y', 13, 14), ('z', 21, 22), ('z', 23, 24);
```
В результате данные будут записаны в три файла: `file_x.csv`, `file_y.csv` и `file_z.csv`.
2. При использовании ID партиции в названии бакета создаются файлы в разных бакетах:
```sql
INSERT INTO TABLE FUNCTION
s3('http://bucket.amazonaws.com/my_bucket_{_partition_id}/file.csv', 'CSV', 'a UInt32, b UInt32, c UInt32')
PARTITION BY a VALUES (1, 2, 3), (1, 4, 5), (10, 11, 12), (10, 13, 14), (20, 21, 22), (20, 23, 24);
```
В результате будут созданы три файла в разных бакетах: `my_bucket_1/file.csv`, `my_bucket_10/file.csv` и `my_bucket_20/file.csv`.
**Смотрите также**
- [Движок таблиц S3](../../engines/table-engines/integrations/s3.md)

View File

@ -1031,19 +1031,30 @@ private:
if (server_exception)
{
bool print_stack_trace = config().getBool("stacktrace", false);
std::cerr << "Received exception from server (version " << server_version << "):" << std::endl
<< getExceptionMessage(*server_exception, print_stack_trace, true) << std::endl;
fmt::print(stderr, "Received exception from server (version {}):\n{}\n",
server_version,
getExceptionMessage(*server_exception, print_stack_trace, true));
if (is_interactive)
std::cerr << std::endl;
{
fmt::print(stderr, "\n");
}
else
{
fmt::print(stderr, "(query: {})\n", full_query);
}
}
if (client_exception)
{
fmt::print(stderr, "Error on processing query '{}':\n{}\n", full_query, client_exception->message());
fmt::print(stderr, "Error on processing query: {}\n", client_exception->message());
if (is_interactive)
{
fmt::print(stderr, "\n");
}
else
{
fmt::print(stderr, "(query: {})\n", full_query);
}
}
// A debug check -- at least some exception must be set, if the error
@ -1244,13 +1255,17 @@ private:
if (!server_exception)
{
error_matches_hint = false;
fmt::print(stderr, "Expected server error code '{}' but got no server error.\n", test_hint.serverError());
fmt::print(stderr, "Expected server error code '{}' but got no server error (query: {}).\n",
test_hint.serverError(),
full_query);
}
else if (server_exception->code() != test_hint.serverError())
{
error_matches_hint = false;
std::cerr << "Expected server error code: " << test_hint.serverError() << " but got: " << server_exception->code()
<< "." << std::endl;
fmt::print(stderr, "Expected server error code: {} but got: {} (query: {}).\n",
test_hint.serverError(),
server_exception->code(),
full_query);
}
}
@ -1259,13 +1274,17 @@ private:
if (!client_exception)
{
error_matches_hint = false;
fmt::print(stderr, "Expected client error code '{}' but got no client error.\n", test_hint.clientError());
fmt::print(stderr, "Expected client error code '{}' but got no client error (query: {}).\n",
test_hint.clientError(),
full_query);
}
else if (client_exception->code() != test_hint.clientError())
{
error_matches_hint = false;
fmt::print(
stderr, "Expected client error code '{}' but got '{}'.\n", test_hint.clientError(), client_exception->code());
fmt::print(stderr, "Expected client error code '{}' but got '{}' (query: {}).\n",
test_hint.clientError(),
client_exception->code(),
full_query);
}
}
@ -1281,13 +1300,17 @@ private:
{
if (test_hint.clientError())
{
fmt::print(stderr, "The query succeeded but the client error '{}' was expected.\n", test_hint.clientError());
fmt::print(stderr, "The query succeeded but the client error '{}' was expected (query: {}).\n",
test_hint.clientError(),
full_query);
error_matches_hint = false;
}
if (test_hint.serverError())
{
fmt::print(stderr, "The query succeeded but the server error '{}' was expected.\n", test_hint.serverError());
fmt::print(stderr, "The query succeeded but the server error '{}' was expected (query: {}).\n",
test_hint.serverError(),
full_query);
error_matches_hint = false;
}
}

View File

@ -962,7 +962,7 @@ if (ThreadFuzzer::instance().isEffective())
global_context->setMMappedFileCache(mmap_cache_size);
#if USE_EMBEDDED_COMPILER
constexpr size_t compiled_expression_cache_size_default = 1024 * 1024 * 1024;
constexpr size_t compiled_expression_cache_size_default = 1024 * 1024 * 128;
size_t compiled_expression_cache_size = config().getUInt64("compiled_expression_cache_size", compiled_expression_cache_size_default);
CompiledExpressionCacheFactory::instance().init(compiled_expression_cache_size);
#endif

View File

@ -331,7 +331,7 @@
<mmap_cache_size>1000</mmap_cache_size>
<!-- Cache size for compiled expressions.-->
<compiled_expression_cache_size>1073741824</compiled_expression_cache_size>
<compiled_expression_cache_size>134217728</compiled_expression_cache_size>
<!-- Path to data directory, with trailing slash. -->
<path>/var/lib/clickhouse/</path>

View File

@ -280,7 +280,7 @@ mark_cache_size: 5368709120
mmap_cache_size: 1000
# Cache size for compiled expressions.
compiled_expression_cache_size: 1073741824
compiled_expression_cache_size: 134217728
# Path to data directory, with trailing slash.
path: /var/lib/clickhouse/

View File

@ -21,6 +21,8 @@ class AggregateFunctionCombinatorArray final : public IAggregateFunctionCombinat
public:
String getName() const override { return "Array"; }
bool supportsNesting() const override { return true; }
DataTypes transformArguments(const DataTypes & arguments) const override
{
if (arguments.empty())

View File

@ -29,6 +29,7 @@ namespace ErrorCodes
{
extern const int UNKNOWN_AGGREGATE_FUNCTION;
extern const int LOGICAL_ERROR;
extern const int ILLEGAL_AGGREGATION;
}
const String & getAggregateFunctionCanonicalNameIfAny(const String & name)
@ -159,13 +160,32 @@ AggregateFunctionPtr AggregateFunctionFactory::getImpl(
if (AggregateFunctionCombinatorPtr combinator = AggregateFunctionCombinatorFactory::instance().tryFindSuffix(name))
{
const std::string & combinator_name = combinator->getName();
if (combinator->isForInternalUsageOnly())
throw Exception("Aggregate function combinator '" + combinator->getName() + "' is only for internal usage", ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION);
throw Exception(ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION,
"Aggregate function combinator '{}' is only for internal usage",
combinator_name);
if (query_context && query_context->getSettingsRef().log_queries)
query_context->addQueryFactoriesInfo(Context::QueryLogFactories::AggregateFunctionCombinator, combinator->getName());
query_context->addQueryFactoriesInfo(Context::QueryLogFactories::AggregateFunctionCombinator, combinator_name);
String nested_name = name.substr(0, name.size() - combinator_name.size());
/// Nested identical combinators (i.e. uniqCombinedIfIf) is not
/// supported (since they even don't work -- silently).
///
/// But non-identical does supported and works, for example
/// uniqCombinedIfMergeIf, it is useful in case when the underlying
/// storage stores AggregateFunction(uniqCombinedIf) and in SELECT you
/// need to filter aggregation result based on another column for
/// example.
if (!combinator->supportsNesting() && nested_name.ends_with(combinator_name))
{
throw Exception(ErrorCodes::ILLEGAL_AGGREGATION,
"Nested identical combinator '{}' is not supported",
combinator_name);
}
String nested_name = name.substr(0, name.size() - combinator->getName().size());
DataTypes nested_types = combinator->transformArguments(argument_types);
Array nested_parameters = combinator->transformParameters(parameters);

View File

@ -10,7 +10,6 @@ namespace ErrorCodes
extern const int LOGICAL_ERROR;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int ILLEGAL_AGGREGATION;
}
class AggregateFunctionCombinatorIf final : public IAggregateFunctionCombinator
@ -37,10 +36,6 @@ public:
const DataTypes & arguments,
const Array & params) const override
{
if (nested_function->getName().find(getName()) != String::npos)
{
throw Exception(ErrorCodes::ILLEGAL_AGGREGATION, "nested function for {0}-combinator must not have {0}-combinator", getName());
}
return std::make_shared<AggregateFunctionIf>(nested_function, arguments, params);
}
};

View File

@ -23,6 +23,9 @@ private:
public:
explicit AggregateFunctionCombinatorOrFill(Kind kind_) : kind(kind_) {}
/// Due to aggregate_functions_null_for_empty
bool supportsNesting() const override { return true; }
String getName() const override
{
return kind == Kind::OrNull ? "OrNull" : "OrDefault";

View File

@ -35,6 +35,10 @@ public:
virtual bool isForInternalUsageOnly() const { return false; }
/** Does combinator supports nesting (of itself, i.e. ArrayArray or IfIf)
*/
virtual bool supportsNesting() const { return false; }
/** From the arguments for combined function (ex: UInt64, UInt8 for sumIf),
* get the arguments for nested function (ex: UInt64 for sum).
* If arguments are not suitable for combined function, throw an exception.

View File

@ -261,7 +261,7 @@ dbms_target_include_directories (PUBLIC "${ClickHouse_SOURCE_DIR}/src" "${ClickH
target_include_directories (clickhouse_common_io PUBLIC "${ClickHouse_SOURCE_DIR}/src" "${ClickHouse_BINARY_DIR}/src")
if (USE_EMBEDDED_COMPILER)
dbms_target_link_libraries (PRIVATE ${REQUIRED_LLVM_LIBRARIES})
dbms_target_link_libraries (PUBLIC ${REQUIRED_LLVM_LIBRARIES})
dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${LLVM_INCLUDE_DIRS})
endif ()

View File

@ -4,6 +4,7 @@
#include <Columns/ColumnNothing.h>
#include <Columns/ColumnsCommon.h>
#include <Columns/ColumnConst.h>
#include <Columns/ColumnLowCardinality.h>
#include <algorithm>
namespace DB
@ -177,19 +178,21 @@ MaskInfo extractMaskFromConstOrNull(
template <bool inverted>
MaskInfo extractMaskImpl(
PaddedPODArray<UInt8> & mask,
const ColumnPtr & column,
const ColumnPtr & col,
UInt8 null_value,
const PaddedPODArray<UInt8> * null_bytemap,
PaddedPODArray<UInt8> * nulls = nullptr)
{
auto column = col->convertToFullColumnIfLowCardinality();
/// Special implementation for Null and Const columns.
if (column->onlyNull() || checkAndGetColumn<ColumnConst>(*column))
return extractMaskFromConstOrNull<inverted>(mask, column, null_value, nulls);
if (const auto * col = checkAndGetColumn<ColumnNullable>(*column))
if (const auto * nullable_column = checkAndGetColumn<ColumnNullable>(*column))
{
const PaddedPODArray<UInt8> & null_map = col->getNullMapData();
return extractMaskImpl<inverted>(mask, col->getNestedColumnPtr(), null_value, &null_map, nulls);
const PaddedPODArray<UInt8> & null_map = nullable_column->getNullMapData();
return extractMaskImpl<inverted>(mask, nullable_column->getNestedColumnPtr(), null_value, &null_map, nulls);
}
MaskInfo mask_info;
@ -314,3 +317,4 @@ void copyMask(const PaddedPODArray<UInt8> & from, PaddedPODArray<UInt8> & to)
}
}

View File

@ -417,11 +417,7 @@ void StackTrace::toStringEveryLine(std::function<void(const std::string &)> call
std::string StackTrace::toString() const
{
/// Calculation of stack trace text is extremely slow.
/// We use simple cache because otherwise the server could be overloaded by trash queries.
static SimpleCache<decltype(toStringImpl), &toStringImpl> func_cached;
return func_cached(frame_pointers, offset, size);
return toStringStatic(frame_pointers, offset, size);
}
std::string StackTrace::toString(void ** frame_pointers_, size_t offset, size_t size)
@ -432,6 +428,23 @@ std::string StackTrace::toString(void ** frame_pointers_, size_t offset, size_t
for (size_t i = 0; i < size; ++i)
frame_pointers_copy[i] = frame_pointers_[i];
static SimpleCache<decltype(toStringImpl), &toStringImpl> func_cached;
return func_cached(frame_pointers_copy, offset, size);
return toStringStatic(frame_pointers_copy, offset, size);
}
static SimpleCache<decltype(toStringImpl), &toStringImpl> & cacheInstance()
{
static SimpleCache<decltype(toStringImpl), &toStringImpl> cache;
return cache;
}
std::string StackTrace::toStringStatic(const StackTrace::FramePointers & frame_pointers, size_t offset, size_t size)
{
/// Calculation of stack trace text is extremely slow.
/// We use simple cache because otherwise the server could be overloaded by trash queries.
return cacheInstance()(frame_pointers, offset, size);
}
void StackTrace::dropCache()
{
cacheInstance().drop();
}

View File

@ -61,6 +61,8 @@ public:
std::string toString() const;
static std::string toString(void ** frame_pointers, size_t offset, size_t size);
static std::string toStringStatic(const FramePointers & frame_pointers, size_t offset, size_t size);
static void dropCache();
static void symbolize(const FramePointers & frame_pointers, size_t offset, size_t size, StackTrace::Frames & frames);
void toStringEveryLine(std::function<void(const std::string &)> callback) const;

View File

@ -462,12 +462,22 @@ String SymbolIndex::getBuildIDHex() const
return build_id_hex;
}
MultiVersion<SymbolIndex>::Version SymbolIndex::instance(bool reload)
MultiVersion<SymbolIndex> & SymbolIndex::instanceImpl()
{
static MultiVersion<SymbolIndex> instance(std::unique_ptr<SymbolIndex>(new SymbolIndex));
if (reload)
instance.set(std::unique_ptr<SymbolIndex>(new SymbolIndex));
return instance.get();
return instance;
}
MultiVersion<SymbolIndex>::Version SymbolIndex::instance()
{
return instanceImpl().get();
}
void SymbolIndex::reload()
{
instanceImpl().set(std::unique_ptr<SymbolIndex>(new SymbolIndex));
/// Also drop stacktrace cache.
StackTrace::dropCache();
}
}

View File

@ -22,7 +22,8 @@ protected:
SymbolIndex() { update(); }
public:
static MultiVersion<SymbolIndex>::Version instance(bool reload = false);
static MultiVersion<SymbolIndex>::Version instance();
static void reload();
struct Symbol
{
@ -60,6 +61,7 @@ private:
Data data;
void update();
static MultiVersion<SymbolIndex> & instanceImpl();
};
}

View File

@ -8,6 +8,7 @@
#include <queue>
#include <list>
#include <optional>
#include <atomic>
#include <boost/heap/priority_queue.hpp>
@ -157,14 +158,16 @@ public:
class ThreadFromGlobalPool
{
public:
ThreadFromGlobalPool() {}
ThreadFromGlobalPool() = default;
template <typename Function, typename... Args>
explicit ThreadFromGlobalPool(Function && func, Args &&... args)
: state(std::make_shared<Poco::Event>())
, thread_id(std::make_shared<std::thread::id>())
{
/// NOTE: If this will throw an exception, the destructor won't be called.
GlobalThreadPool::instance().scheduleOrThrow([
thread_id = thread_id,
state = state,
func = std::forward<Function>(func),
args = std::make_tuple(std::forward<Args>(args)...)]() mutable /// mutable is needed to destroy capture
@ -172,6 +175,8 @@ public:
auto event = std::move(state);
SCOPE_EXIT(event->set());
thread_id = std::make_shared<std::thread::id>(std::this_thread::get_id());
/// This moves are needed to destroy function and arguments before exit.
/// It will guarantee that after ThreadFromGlobalPool::join all captured params are destroyed.
auto function = std::move(func);
@ -194,6 +199,7 @@ public:
if (joinable())
abort();
state = std::move(rhs.state);
thread_id = std::move(rhs.thread_id);
return *this;
}
@ -221,12 +227,18 @@ public:
bool joinable() const
{
return state != nullptr;
if (!state)
return false;
/// Thread cannot join itself.
if (*thread_id == std::this_thread::get_id())
return false;
return true;
}
private:
/// The state used in this object and inside the thread job.
std::shared_ptr<Poco::Event> state;
std::shared_ptr<std::thread::id> thread_id;
};

View File

@ -499,7 +499,7 @@ class IColumn;
M(UInt64, offset, 0, "Offset on read rows from the most 'end' result for select query", 0) \
\
M(UInt64, function_range_max_elements_in_block, 500000000, "Maximum number of values generated by function 'range' per block of data (sum of array sizes for every row in a block, see also 'max_block_size' and 'min_insert_block_size_rows'). It is a safety threshold.", 0) \
M(ShortCircuitFunctionEvaluation, short_circuit_function_evaluation, ShortCircuitFunctionEvaluation::ENABLE, "Setting for short-circuit function evaluation configuration. Possible values: 'enable', 'disable', 'force_enable'", 0) \
M(ShortCircuitFunctionEvaluation, short_circuit_function_evaluation, ShortCircuitFunctionEvaluation::ENABLE, "Setting for short-circuit function evaluation configuration. Possible values: 'enable' - use short-circuit function evaluation for functions that are suitable for it, 'disable' - disable short-circuit function evaluation, 'force_enable' - use short-circuit function evaluation for all functions.", 0) \
\
M(String, local_filesystem_read_method, "pread", "Method of reading data from local filesystem, one of: read, pread, mmap, pread_threadpool.", 0) \
M(Bool, local_filesystem_read_prefetch, false, "Should use prefetching when reading data from local filesystem.", 0) \

Some files were not shown because too many files have changed in this diff Show More