Merge branch 'ClickHouse:master' into master

This commit is contained in:
michael1589 2021-11-30 15:05:24 +08:00 committed by GitHub
commit 93efc75dc7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
156 changed files with 1931 additions and 1147 deletions

View File

@ -211,12 +211,12 @@ jobs:
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
CACHES_PATH: ${{runner.temp}}/../ccaches
CHECK_NAME: 'ClickHouse build check (actions)'
BUILD_NUMBER: 0
BUILD_NAME: 'package_release'
run: |
sudo rm -fr $TEMP_PATH
mkdir -p $TEMP_PATH
cp -r $GITHUB_WORKSPACE $TEMP_PATH
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:
@ -250,12 +250,12 @@ jobs:
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
CACHES_PATH: ${{runner.temp}}/../ccaches
CHECK_NAME: 'ClickHouse build check (actions)'
BUILD_NUMBER: 8
BUILD_NAME: 'binary_release'
run: |
sudo rm -fr $TEMP_PATH
mkdir -p $TEMP_PATH
cp -r $GITHUB_WORKSPACE $TEMP_PATH
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:
@ -289,12 +289,12 @@ jobs:
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
CACHES_PATH: ${{runner.temp}}/../ccaches
CHECK_NAME: 'ClickHouse build check (actions)'
BUILD_NUMBER: 3
BUILD_NAME: 'package_asan'
run: |
sudo rm -fr $TEMP_PATH
mkdir -p $TEMP_PATH
cp -r $GITHUB_WORKSPACE $TEMP_PATH
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:
@ -328,12 +328,12 @@ jobs:
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
CACHES_PATH: ${{runner.temp}}/../ccaches
CHECK_NAME: 'ClickHouse build check (actions)'
BUILD_NUMBER: 4
BUILD_NAME: 'package_ubsan'
run: |
sudo rm -fr $TEMP_PATH
mkdir -p $TEMP_PATH
cp -r $GITHUB_WORKSPACE $TEMP_PATH
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:
@ -367,12 +367,12 @@ jobs:
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
CACHES_PATH: ${{runner.temp}}/../ccaches
CHECK_NAME: 'ClickHouse build check (actions)'
BUILD_NUMBER: 5
BUILD_NAME: 'package_tsan'
run: |
sudo rm -fr $TEMP_PATH
mkdir -p $TEMP_PATH
cp -r $GITHUB_WORKSPACE $TEMP_PATH
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:
@ -406,12 +406,12 @@ jobs:
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
CACHES_PATH: ${{runner.temp}}/../ccaches
CHECK_NAME: 'ClickHouse build check (actions)'
BUILD_NUMBER: 6
BUILD_NAME: 'package_msan'
run: |
sudo rm -fr $TEMP_PATH
mkdir -p $TEMP_PATH
cp -r $GITHUB_WORKSPACE $TEMP_PATH
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:
@ -445,12 +445,12 @@ jobs:
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
CACHES_PATH: ${{runner.temp}}/../ccaches
CHECK_NAME: 'ClickHouse build check (actions)'
BUILD_NUMBER: 7
BUILD_NAME: 'package_debug'
run: |
sudo rm -fr $TEMP_PATH
mkdir -p $TEMP_PATH
cp -r $GITHUB_WORKSPACE $TEMP_PATH
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:
@ -486,13 +486,13 @@ jobs:
IMAGES_PATH: ${{runner.temp}}/images_path
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
CACHES_PATH: ${{runner.temp}}/../ccaches
CHECK_NAME: 'ClickHouse special build check (actions)'
BUILD_NUMBER: 1
CHECK_NAME: 'ClickHouse build check (actions)'
BUILD_NAME: 'binary_splitted'
run: |
sudo rm -fr $TEMP_PATH
mkdir -p $TEMP_PATH
cp -r $GITHUB_WORKSPACE $TEMP_PATH
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:

View File

@ -84,7 +84,7 @@ jobs:
sudo rm -fr $TEMP_PATH
mkdir -p $TEMP_PATH
cp -r $GITHUB_WORKSPACE $TEMP_PATH
cd $REPO_COPY/tests/ci && python3 compatibility_check.py 0
cd $REPO_COPY/tests/ci && python3 compatibility_check.py
- name: Cleanup
if: always()
run: |
@ -142,12 +142,12 @@ jobs:
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
CACHES_PATH: ${{runner.temp}}/../ccaches
CHECK_NAME: 'ClickHouse build check (actions)'
BUILD_NUMBER: 0
BUILD_NAME: 'package_release'
run: |
sudo rm -fr $TEMP_PATH
mkdir -p $TEMP_PATH
cp -r $GITHUB_WORKSPACE $TEMP_PATH
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:
@ -181,12 +181,12 @@ jobs:
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
CACHES_PATH: ${{runner.temp}}/../ccaches
CHECK_NAME: 'ClickHouse build check (actions)'
BUILD_NUMBER: 8
BUILD_NAME: 'binary_release'
run: |
sudo rm -fr $TEMP_PATH
mkdir -p $TEMP_PATH
cp -r $GITHUB_WORKSPACE $TEMP_PATH
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:
@ -219,12 +219,12 @@ jobs:
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
CACHES_PATH: ${{runner.temp}}/../ccaches
CHECK_NAME: 'ClickHouse build check (actions)'
BUILD_NUMBER: 3
BUILD_NAME: 'package_asan'
run: |
sudo rm -fr $TEMP_PATH
mkdir -p $TEMP_PATH
cp -r $GITHUB_WORKSPACE $TEMP_PATH
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:
@ -257,12 +257,12 @@ jobs:
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
CACHES_PATH: ${{runner.temp}}/../ccaches
CHECK_NAME: 'ClickHouse build check (actions)'
BUILD_NUMBER: 4
BUILD_NAME: 'package_ubsan'
run: |
sudo rm -fr $TEMP_PATH
mkdir -p $TEMP_PATH
cp -r $GITHUB_WORKSPACE $TEMP_PATH
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:
@ -295,12 +295,12 @@ jobs:
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
CACHES_PATH: ${{runner.temp}}/../ccaches
CHECK_NAME: 'ClickHouse build check (actions)'
BUILD_NUMBER: 5
BUILD_NAME: 'package_tsan'
run: |
sudo rm -fr $TEMP_PATH
mkdir -p $TEMP_PATH
cp -r $GITHUB_WORKSPACE $TEMP_PATH
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:
@ -333,12 +333,12 @@ jobs:
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
CACHES_PATH: ${{runner.temp}}/../ccaches
CHECK_NAME: 'ClickHouse build check (actions)'
BUILD_NUMBER: 6
BUILD_NAME: 'package_msan'
run: |
sudo rm -fr $TEMP_PATH
mkdir -p $TEMP_PATH
cp -r $GITHUB_WORKSPACE $TEMP_PATH
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:
@ -371,12 +371,12 @@ jobs:
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
CACHES_PATH: ${{runner.temp}}/../ccaches
CHECK_NAME: 'ClickHouse build check (actions)'
BUILD_NUMBER: 7
BUILD_NAME: 'package_debug'
run: |
sudo rm -fr $TEMP_PATH
mkdir -p $TEMP_PATH
cp -r $GITHUB_WORKSPACE $TEMP_PATH
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:
@ -412,13 +412,13 @@ jobs:
IMAGES_PATH: ${{runner.temp}}/images_path
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
CACHES_PATH: ${{runner.temp}}/../ccaches
CHECK_NAME: 'ClickHouse special build check (actions)'
BUILD_NUMBER: 1
CHECK_NAME: 'ClickHouse build check (actions)'
BUILD_NAME: 'binary_splitted'
run: |
sudo rm -fr $TEMP_PATH
mkdir -p $TEMP_PATH
cp -r $GITHUB_WORKSPACE $TEMP_PATH
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:

View File

@ -41,7 +41,7 @@ jobs:
sudo rm -fr $TEMP_PATH
mkdir -p $TEMP_PATH
cp -r $GITHUB_WORKSPACE $TEMP_PATH
cd $REPO_COPY/tests/ci && python3 compatibility_check.py 0
cd $REPO_COPY/tests/ci && python3 compatibility_check.py
- name: Cleanup
if: always()
run: |
@ -72,12 +72,12 @@ jobs:
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
CACHES_PATH: ${{runner.temp}}/../ccaches
CHECK_NAME: 'ClickHouse build check (actions)'
BUILD_NUMBER: 0
BUILD_NAME: 'package_release'
run: |
sudo rm -fr $TEMP_PATH
mkdir -p $TEMP_PATH
cp -r $GITHUB_WORKSPACE $TEMP_PATH
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:
@ -110,12 +110,12 @@ jobs:
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
CACHES_PATH: ${{runner.temp}}/../ccaches
CHECK_NAME: 'ClickHouse build check (actions)'
BUILD_NUMBER: 3
BUILD_NAME: 'package_asan'
run: |
sudo rm -fr $TEMP_PATH
mkdir -p $TEMP_PATH
cp -r $GITHUB_WORKSPACE $TEMP_PATH
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:
@ -148,12 +148,12 @@ jobs:
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
CACHES_PATH: ${{runner.temp}}/../ccaches
CHECK_NAME: 'ClickHouse build check (actions)'
BUILD_NUMBER: 4
BUILD_NAME: 'package_ubsan'
run: |
sudo rm -fr $TEMP_PATH
mkdir -p $TEMP_PATH
cp -r $GITHUB_WORKSPACE $TEMP_PATH
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:
@ -186,12 +186,12 @@ jobs:
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
CACHES_PATH: ${{runner.temp}}/../ccaches
CHECK_NAME: 'ClickHouse build check (actions)'
BUILD_NUMBER: 5
BUILD_NAME: 'package_tsan'
run: |
sudo rm -fr $TEMP_PATH
mkdir -p $TEMP_PATH
cp -r $GITHUB_WORKSPACE $TEMP_PATH
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:
@ -224,12 +224,12 @@ jobs:
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
CACHES_PATH: ${{runner.temp}}/../ccaches
CHECK_NAME: 'ClickHouse build check (actions)'
BUILD_NUMBER: 6
BUILD_NAME: 'package_msan'
run: |
sudo rm -fr $TEMP_PATH
mkdir -p $TEMP_PATH
cp -r $GITHUB_WORKSPACE $TEMP_PATH
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:
@ -262,12 +262,12 @@ jobs:
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
CACHES_PATH: ${{runner.temp}}/../ccaches
CHECK_NAME: 'ClickHouse build check (actions)'
BUILD_NUMBER: 7
BUILD_NAME: 'package_debug'
run: |
sudo rm -fr $TEMP_PATH
mkdir -p $TEMP_PATH
cp -r $GITHUB_WORKSPACE $TEMP_PATH
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:

5
.gitmodules vendored
View File

@ -17,7 +17,7 @@
[submodule "contrib/zlib-ng"]
path = contrib/zlib-ng
url = https://github.com/ClickHouse-Extras/zlib-ng.git
branch = clickhouse-new
branch = clickhouse-2.0.x
[submodule "contrib/googletest"]
path = contrib/googletest
url = https://github.com/google/googletest.git
@ -135,9 +135,6 @@
[submodule "contrib/flatbuffers"]
path = contrib/flatbuffers
url = https://github.com/ClickHouse-Extras/flatbuffers.git
[submodule "contrib/libc-headers"]
path = contrib/libc-headers
url = https://github.com/ClickHouse-Extras/libc-headers.git
[submodule "contrib/replxx"]
path = contrib/replxx
url = https://github.com/ClickHouse-Extras/replxx.git

View File

@ -223,7 +223,7 @@ if (OS_DARWIN)
# from a _specific_ library, which is what we need.
set(WHOLE_ARCHIVE -force_load)
# The `-noall_load` flag is the default and now obsolete.
set(NO_WHOLE_ARCHIVE "")
set(NO_WHOLE_ARCHIVE "-undefined,error") # Effectively, a no-op. Here to avoid empty "-Wl, " sequence to be generated in the command line.
else ()
set(WHOLE_ARCHIVE --whole-archive)
set(NO_WHOLE_ARCHIVE --no-whole-archive)

View File

@ -29,14 +29,6 @@ message(STATUS "Default libraries: ${DEFAULT_LIBS}")
set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS})
set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS})
# glibc-compatibility library relies to constant version of libc headers
# (because minor changes in function attributes between different glibc versions will introduce incompatibilities)
# This is for x86_64. For other architectures we have separate toolchains.
if (ARCH_AMD64 AND NOT CMAKE_CROSSCOMPILING)
set(CMAKE_C_STANDARD_INCLUDE_DIRECTORIES ${ClickHouse_SOURCE_DIR}/contrib/libc-headers/x86_64-linux-gnu ${ClickHouse_SOURCE_DIR}/contrib/libc-headers)
set(CMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES ${ClickHouse_SOURCE_DIR}/contrib/libc-headers/x86_64-linux-gnu ${ClickHouse_SOURCE_DIR}/contrib/libc-headers)
endif ()
# Unfortunately '-pthread' doesn't work with '-nodefaultlibs'.
# Just make sure we have pthreads at all.
set(THREADS_PREFER_PTHREAD_FLAG ON)

View File

@ -22,9 +22,10 @@ if (COMPILER_GCC)
elseif (COMPILER_CLANG)
# Require minimum version of clang/apple-clang
if (CMAKE_CXX_COMPILER_ID MATCHES "AppleClang")
# If you are developer you can figure out what exact versions of AppleClang are Ok,
# simply remove the following line.
# (Experimental!) Specify "-DALLOW_APPLECLANG=ON" when running CMake configuration step, if you want to experiment with using it.
if (NOT ALLOW_APPLECLANG AND NOT DEFINED ENV{ALLOW_APPLECLANG})
message (FATAL_ERROR "AppleClang is not supported, you should install clang from brew. See the instruction: https://clickhouse.com/docs/en/development/build-osx/")
endif ()
# AppleClang 10.0.1 (Xcode 10.2) corresponds to LLVM/Clang upstream version 7.0.0
# AppleClang 11.0.0 (Xcode 11.0) corresponds to LLVM/Clang upstream version 8.0.0

View File

@ -301,9 +301,10 @@ endif()
# instead of controlling it via CMAKE_FOLDER.
function (ensure_target_rooted_in _target _folder)
# Skip INTERFACE library targets, since FOLDER property is not available for them.
# Skip aliases and INTERFACE library targets, since FOLDER property is not available/writable for them.
get_target_property (_target_aliased "${_target}" ALIASED_TARGET)
get_target_property (_target_type "${_target}" TYPE)
if (_target_type STREQUAL "INTERFACE_LIBRARY")
if (_target_aliased OR _target_type STREQUAL "INTERFACE_LIBRARY")
return ()
endif ()

@ -1 +0,0 @@
Subproject commit aa5429bf67a346e48ad60efd88bcefc286644bf3

View File

@ -73,6 +73,11 @@ target_compile_options(cxx PRIVATE -w)
target_link_libraries(cxx PUBLIC cxxabi)
# For __udivmodti4, __divmodti4.
if (OS_DARWIN AND COMPILER_GCC)
target_link_libraries(cxx PRIVATE gcc)
endif ()
install(
TARGETS cxx
EXPORT global

View File

@ -28,11 +28,16 @@ set (SRCS
${SRC_DIR}/src/sentry_unix_pageallocator.c
${SRC_DIR}/src/path/sentry_path_unix.c
${SRC_DIR}/src/symbolizer/sentry_symbolizer_unix.c
${SRC_DIR}/src/modulefinder/sentry_modulefinder_linux.c
${SRC_DIR}/src/transports/sentry_transport_curl.c
${SRC_DIR}/src/backends/sentry_backend_none.c
)
if(APPLE)
list(APPEND SRCS ${SRC_DIR}/src/modulefinder/sentry_modulefinder_apple.c)
else()
list(APPEND SRCS ${SRC_DIR}/src/modulefinder/sentry_modulefinder_linux.c)
endif()
add_library(sentry ${SRCS})
add_library(sentry::sentry ALIAS sentry)

View File

@ -6,4 +6,6 @@ add_library(simdjson ${SIMDJSON_SRC})
target_include_directories(simdjson SYSTEM PUBLIC "${SIMDJSON_INCLUDE_DIR}" PRIVATE "${SIMDJSON_SRC_DIR}")
# simdjson is using its own CPU dispatching and get confused if we enable AVX/AVX2 flags.
target_compile_options(simdjson PRIVATE -mno-avx -mno-avx2)
if(ARCH_AMD64)
target_compile_options(simdjson PRIVATE -mno-avx -mno-avx2)
endif()

2
contrib/zlib-ng vendored

@ -1 +1 @@
Subproject commit 6a5e93b9007782115f7f7e5235dedc81c4f1facb
Subproject commit bffad6f6fe74d6a2f92e2668390664a926c68733

View File

@ -1,8 +1,10 @@
FROM ubuntu:20.04
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=13
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=13
RUN apt-get update \
&& apt-get install ca-certificates lsb-release wget gnupg apt-transport-https \

View File

@ -1,10 +1,12 @@
FROM ubuntu:18.04
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
ARG version=21.12.1.*
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
RUN apt-get update \
&& apt-get install --yes --no-install-recommends \
apt-transport-https \

View File

@ -1,9 +1,11 @@
# docker build -t clickhouse/docs-build .
FROM ubuntu:20.04
ENV LANG=C.UTF-8
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
ENV LANG=C.UTF-8
RUN apt-get update \
&& DEBIAN_FRONTEND=noninteractive apt-get install --yes --no-install-recommends \

View File

@ -1,9 +1,11 @@
# docker build -t clickhouse/binary-builder .
FROM ubuntu:20.04
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=13
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=13
RUN apt-get update \
&& apt-get install \

View File

@ -1,9 +1,11 @@
# docker build -t clickhouse/deb-builder .
FROM ubuntu:20.04
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=13
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=13
RUN apt-get update \
&& apt-get install ca-certificates lsb-release wget gnupg apt-transport-https \

View File

@ -1,5 +1,9 @@
FROM ubuntu:20.04
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
ARG version=21.12.1.*
ARG gosu_ver=1.10
@ -26,8 +30,6 @@ ARG DEBIAN_FRONTEND=noninteractive
# installed to prevent picking those uid / gid by some unrelated software.
# The same uid / gid (101) is used both for alpine and ubuntu.
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
RUN groupadd -r clickhouse --gid=101 \
&& useradd -r -g clickhouse --uid=101 --home-dir=/var/lib/clickhouse --shell=/bin/bash clickhouse \
&& apt-get update \

View File

@ -1,9 +1,11 @@
# docker build -t clickhouse/test-base .
FROM clickhouse/test-util
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=13
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=13
RUN apt-get update \
&& apt-get install ca-certificates lsb-release wget gnupg apt-transport-https \

View File

@ -2,7 +2,9 @@
# docker run --volume=path_to_repo:/repo_folder --volume=path_to_result:/test_output clickhouse/codebrowser
FROM clickhouse/binary-builder
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
RUN apt-get update && apt-get --yes --allow-unauthenticated install clang-9 libllvm9 libclang-9-dev

View File

@ -1,9 +1,11 @@
# docker build -t clickhouse/fasttest .
FROM clickhouse/test-util
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=13
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=13
RUN apt-get update \
&& apt-get install ca-certificates lsb-release wget gnupg apt-transport-https \

View File

@ -174,7 +174,6 @@ function clone_submodules
contrib/double-conversion
contrib/libcxx
contrib/libcxxabi
contrib/libc-headers
contrib/lz4
contrib/zstd
contrib/fastops

View File

@ -1,12 +1,14 @@
# docker build -t clickhouse/fuzzer .
FROM clickhouse/test-base
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
ENV LANG=C.UTF-8
ENV TZ=Europe/Moscow
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
RUN apt-get update \
&& DEBIAN_FRONTEND=noninteractive apt-get install --yes --no-install-recommends \
ca-certificates \

View File

@ -1,7 +1,9 @@
# docker build -t clickhouse/integration-tests-runner .
FROM ubuntu:20.04
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
RUN apt-get update \
&& env DEBIAN_FRONTEND=noninteractive apt-get install --yes \

View File

@ -1,12 +1,14 @@
# docker build -t clickhouse/performance-comparison .
FROM ubuntu:18.04
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
ENV LANG=C.UTF-8
ENV TZ=Europe/Moscow
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
RUN apt-get update \
&& DEBIAN_FRONTEND=noninteractive apt-get install --yes --no-install-recommends \
bash \

View File

@ -1,7 +1,9 @@
# docker build -t clickhouse/sqlancer-test .
FROM ubuntu:20.04
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
RUN apt-get update --yes && env DEBIAN_FRONTEND=noninteractive apt-get install wget unzip git default-jdk maven python3 --yes --no-install-recommends
RUN wget https://github.com/sqlancer/sqlancer/archive/master.zip -O /sqlancer.zip

View File

@ -1,7 +1,9 @@
# docker build -t clickhouse/style-test .
FROM ubuntu:20.04
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
shellcheck \

View File

@ -1,7 +1,9 @@
# docker build -t clickhouse/testflows-runner .
FROM ubuntu:20.04
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
RUN apt-get update \
&& env DEBIAN_FRONTEND=noninteractive apt-get install --yes \

View File

@ -3,15 +3,14 @@ toc_priority: 65
toc_title: Build on Mac OS X
---
# You don't have to build ClickHouse
You can install ClickHouse as follows: https://clickhouse.com/#quick-start
Choose Mac x86 or M1.
# How to Build ClickHouse on Mac OS X {#how-to-build-clickhouse-on-mac-os-x}
Build should work on x86_64 (Intel) and arm64 (Apple Silicon) based macOS 10.15 (Catalina) and higher with Homebrew's vanilla Clang.
It is always recommended to use `clang` compiler. It is possible to use XCode's `AppleClang` or `gcc` but it's strongly discouraged.
!!! info "You don't have to build ClickHouse yourself!"
You can install pre-built ClickHouse as described in [Quick Start](https://clickhouse.com/#quick-start).
Follow `macOS (Intel)` or `macOS (Apple silicon)` installation instructions.
Build should work on x86_64 (Intel) and arm64 (Apple silicon) based macOS 10.15 (Catalina) and higher with Homebrew's vanilla Clang.
It is always recommended to use vanilla `clang` compiler. It is possible to use XCode's `apple-clang` or `gcc` but it's strongly discouraged.
## Install Homebrew {#install-homebrew}
@ -33,8 +32,6 @@ sudo rm -rf /Library/Developer/CommandLineTools
sudo xcode-select --install
```
Reboot.
## Install Required Compilers, Tools, and Libraries {#install-required-compilers-tools-and-libraries}
``` bash
@ -51,40 +48,41 @@ git clone --recursive git@github.com:ClickHouse/ClickHouse.git
## Build ClickHouse {#build-clickhouse}
To build using Homebrew's vanilla Clang compiler:
To build using Homebrew's vanilla Clang compiler (the only **recommended** way):
``` bash
cd ClickHouse
rm -rf build
mkdir build
cd build
cmake -DCMAKE_C_COMPILER=$(brew --prefix llvm)/bin/clang -DCMAKE_CXX_COMPILER=$(brew --prefix llvm)/bin/clang++ -DCMAKE_BUILD_TYPE=RelWithDebInfo ..
cmake -DCMAKE_C_COMPILER=$(brew --prefix llvm)/bin/clang -DCMAKE_CXX_COMPILER=$(brew --prefix llvm)/bin/clang++ -DCMAKE_AR=$(brew --prefix llvm)/bin/llvm-ar -DCMAKE_RANLIB=$(brew --prefix llvm)/bin/llvm-ranlib -DOBJCOPY_PATH=$(brew --prefix llvm)/bin/llvm-objcopy -DCMAKE_BUILD_TYPE=RelWithDebInfo ..
cmake --build . --config RelWithDebInfo
cd ..
# The resulting binary will be created at: ./programs/clickhouse
```
To build using Xcode's native AppleClang compiler (this option is strongly not recommended; use the option above):
To build using Xcode's native AppleClang compiler in Xcode IDE (this option is only for development builds and workflows, and is **not recommended** unless you know what you are doing):
``` bash
cd ClickHouse
rm -rf build
mkdir build
cd build
cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo ..
cmake --build . --config RelWithDebInfo
cd ..
XCODE_IDE=1 ALLOW_APPLECLANG=1 cmake -G Xcode -DCMAKE_BUILD_TYPE=Debug -DENABLE_JEMALLOC=OFF ..
cmake --open .
# ...then, in Xcode IDE select ALL_BUILD scheme and start the building process.
# The resulting binary will be created at: ./programs/Debug/clickhouse
```
To build using Homebrew's vanilla GCC compiler (this option is absolutely not recommended, I'm wondering why do we ever have it):
To build using Homebrew's vanilla GCC compiler (this option is only for development experiments, and is **absolutely not recommended** unless you really know what you are doing):
``` bash
cd ClickHouse
rm -rf build
mkdir build
cd build
cmake -DCMAKE_C_COMPILER=$(brew --prefix gcc)/bin/gcc-11 -DCMAKE_CXX_COMPILER=$(brew --prefix gcc)/bin/g++-11 -DCMAKE_BUILD_TYPE=RelWithDebInfo ..
cmake -DCMAKE_C_COMPILER=$(brew --prefix gcc)/bin/gcc-11 -DCMAKE_CXX_COMPILER=$(brew --prefix gcc)/bin/g++-11 -DCMAKE_AR=$(brew --prefix gcc)/bin/gcc-ar-11 -DCMAKE_RANLIB=$(brew --prefix gcc)/bin/gcc-ranlib-11 -DOBJCOPY_PATH=$(brew --prefix binutils)/bin/objcopy -DCMAKE_BUILD_TYPE=RelWithDebInfo ..
cmake --build . --config RelWithDebInfo
cd ..
# The resulting binary will be created at: ./programs/clickhouse
```
## Caveats {#caveats}
@ -140,9 +138,9 @@ sudo launchctl load -w /Library/LaunchDaemons/limit.maxfiles.plist
To check if its working, use the `ulimit -n` or `launchctl limit maxfiles` commands.
## Run ClickHouse server:
## Running ClickHouse server
```
``` bash
cd ClickHouse
./build/programs/clickhouse-server --config-file ./programs/server/config.xml
```

View File

@ -133,8 +133,7 @@ Example:
SELECT level, sum(total) FROM daily GROUP BY level;
```
To improve performance, received messages are grouped into blocks the size of [max_insert_block_size](../../../operations/server-configuration-parameters/settings.md#settings-max_insert_block_size). If the block wasnt formed within [stream_flush_interval_ms](../../../operations/server-configuration-parameters/settings.md) milliseconds, the data will be flushed to the table regardless of the completeness of the block.
To improve performance, received messages are grouped into blocks the size of [max_insert_block_size](../../../operations/settings/settings/#settings-max_insert_block_size). If the block wasnt formed within [stream_flush_interval_ms](../../../operations/settings/settings/#stream-flush-interval-ms) milliseconds, the data will be flushed to the table regardless of the completeness of the block.
To stop receiving topic data or to change the conversion logic, detach the materialized view:
@ -192,6 +191,6 @@ Example:
**See Also**
- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns)
- [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size)
- [background_message_broker_schedule_pool_size](../../../operations/settings/settings.md#background_message_broker_schedule_pool_size)
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/kafka/) <!--hide-->

View File

@ -24,6 +24,8 @@ The supported formats are:
| [CSVWithNames](#csvwithnames) | ✔ | ✔ |
| [CSVWithNamesAndTypes](#csvwithnamesandtypes) | ✔ | ✔ |
| [CustomSeparated](#format-customseparated) | ✔ | ✔ |
| [CustomSeparatedWithNames](#customseparatedwithnames) | ✔ | ✔ |
| [CustomSeparatedWithNamesAndTypes](#customseparatedwithnamesandtypes) | ✔ | ✔ |
| [Values](#data-format-values) | ✔ | ✔ |
| [Vertical](#vertical) | ✗ | ✔ |
| [JSON](#json) | ✗ | ✔ |
@ -429,8 +431,17 @@ Also prints two header rows with column names and types, similar to [TabSeparate
## CustomSeparated {#format-customseparated}
Similar to [Template](#format-template), but it prints or reads all columns and uses escaping rule from setting `format_custom_escaping_rule` and delimiters from settings `format_custom_field_delimiter`, `format_custom_row_before_delimiter`, `format_custom_row_after_delimiter`, `format_custom_row_between_delimiter`, `format_custom_result_before_delimiter` and `format_custom_result_after_delimiter`, not from format strings.
There is also `CustomSeparatedIgnoreSpaces` format, which is similar to `TemplateIgnoreSpaces`.
Similar to [Template](#format-template), but it prints or reads all names and types of columns and uses escaping rule from [format_custom_escaping_rule](../operations/settings/settings.md#format-custom-escaping-rule) setting and delimiters from [format_custom_field_delimiter](../operations/settings/settings.md#format-custom-field-delimiter), [format_custom_row_before_delimiter](../operations/settings/settings.md#format-custom-row-before-delimiter), [format_custom_row_after_delimiter](../operations/settings/settings.md#format-custom-row-after-delimiter), [format_custom_row_between_delimiter](../operations/settings/settings.md#format-custom-row-between-delimiter), [format_custom_result_before_delimiter](../operations/settings/settings.md#format-custom-result-before-delimiter) and [format_custom_result_after_delimiter](../operations/settings/settings.md#format-custom-result-after-delimiter) settings, not from format strings.
There is also `CustomSeparatedIgnoreSpaces` format, which is similar to [TemplateIgnoreSpaces](#templateignorespaces).
## CustomSeparatedWithNames {#customseparatedwithnames}
Also prints the header row with column names, similar to [TabSeparatedWithNames](#tabseparatedwithnames).
## CustomSeparatedWithNamesAndTypes {#customseparatedwithnamesandtypes}
Also prints two header rows with column names and types, similar to [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes).
## JSON {#json}
@ -1537,12 +1548,15 @@ Each line of imported data is parsed according to the regular expression.
When working with the `Regexp` format, you can use the following settings:
- `format_regexp` — [String](../sql-reference/data-types/string.md). Contains regular expression in the [re2](https://github.com/google/re2/wiki/Syntax) format.
- `format_regexp_escaping_rule` — [String](../sql-reference/data-types/string.md). The following escaping rules are supported:
- CSV (similarly to [CSV](#csv))
- JSON (similarly to [JSONEachRow](#jsoneachrow))
- Escaped (similarly to [TSV](#tabseparated))
- Quoted (similarly to [Values](#data-format-values))
- Raw (extracts subpatterns as a whole, no escaping rules)
- Raw (extracts subpatterns as a whole, no escaping rules, similarly to [TSVRaw](#tabseparatedraw))
- `format_regexp_skip_unmatched` — [UInt8](../sql-reference/data-types/int-uint.md). Defines the need to throw an exeption in case the `format_regexp` expression does not match the imported data. Can be set to `0` or `1`.
**Usage**

View File

@ -69,8 +69,6 @@ If no conditions met for a data part, ClickHouse uses the `lz4` compression.
</compression>
```
<!--
## encryption {#server-settings-encryption}
Configures a command to obtain a key to be used by [encryption codecs](../../sql-reference/statements/create/table.md#create-query-encryption-codecs). Key (or keys) should be written in environment variables or set in the configuration file.
@ -150,7 +148,6 @@ Or it can be set in hex:
Everything mentioned above can be applied for `aes_256_gcm_siv` (but the key must be 32 bytes long).
-->
## custom_settings_prefixes {#custom_settings_prefixes}

View File

@ -4071,3 +4071,54 @@ Possible values:
- 0 — Big files read with only copying data from kernel to userspace.
Default value: `0`.
## format_custom_escaping_rule {#format-custom-escaping-rule}
Sets the field escaping rule for [CustomSeparated](../../interfaces/formats.md#format-customseparated) data format.
Possible values:
- `'Escaped'` — Similarly to [TSV](../../interfaces/formats.md#tabseparated).
- `'Quoted'` — Similarly to [Values](../../interfaces/formats.md#data-format-values).
- `'CSV'` — Similarly to [CSV](../../interfaces/formats.md#csv).
- `'JSON'` — Similarly to [JSONEachRow](../../interfaces/formats.md#jsoneachrow).
- `'XML'` — Similarly to [XML](../../interfaces/formats.md#xml).
- `'Raw'` — Extracts subpatterns as a whole, no escaping rules, similarly to [TSVRaw](../../interfaces/formats.md#tabseparatedraw).
Default value: `'Escaped'`.
## format_custom_field_delimiter {#format-custom-field-delimiter}
Sets the character that is interpreted as a delimiter between the fields for [CustomSeparated](../../interfaces/formats.md#format-customseparated) data format.
Default value: `'\t'`.
## format_custom_row_before_delimiter {#format-custom-row-before-delimiter}
Sets the character that is interpreted as a delimiter before the field of the first column for [CustomSeparated](../../interfaces/formats.md#format-customseparated) data format.
Default value: `''`.
## format_custom_row_after_delimiter {#format-custom-row-after-delimiter}
Sets the character that is interpreted as a delimiter after the field of the last column for [CustomSeparated](../../interfaces/formats.md#format-customseparated) data format.
Default value: `'\n'`.
## format_custom_row_between_delimiter {#format-custom-row-between-delimiter}
Sets the character that is interpreted as a delimiter between the rows for [CustomSeparated](../../interfaces/formats.md#format-customseparated) data format.
Default value: `''`.
## format_custom_result_before_delimiter {#format-custom-result-before-delimiter}
Sets the character that is interpreted as a prefix before the result set for [CustomSeparated](../../interfaces/formats.md#format-customseparated) data format.
Default value: `''`.
## format_custom_result_after_delimiter {#format-custom-result-after-delimiter}
Sets the character that is interpreted as a suffix after the result set for [CustomSeparated](../../interfaces/formats.md#format-customseparated) data format.
Default value: `''`.

View File

@ -10,6 +10,7 @@ Allows formatting input queries.
Keys:
- `--help` or`-h` — Produce help message.
- `--query` — Format queries of any length and complexity.
- `--hilite` — Add syntax highlight with ANSI terminal escape sequences.
- `--oneline` — Format in single line.
- `--quiet` or `-q` — Just check syntax, no output on success.
@ -20,7 +21,22 @@ Keys:
## Examples {#examples}
1. Highlighting and single line:
1. Formatting a query:
```bash
$ clickhouse-format --query "select number from numbers(10) where number%2 order by number desc;"
```
Result:
```text
SELECT number
FROM numbers(10)
WHERE number % 2
ORDER BY number DESC
```
2. Highlighting and single line:
```bash
$ clickhouse-format --oneline --hilite <<< "SELECT sum(number) FROM numbers(5);"
@ -32,7 +48,7 @@ Result:
SELECT sum(number) FROM numbers(5)
```
2. Multiqueries:
3. Multiqueries:
```bash
$ clickhouse-format -n <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);"
@ -53,7 +69,7 @@ FROM
;
```
3. Obfuscating:
4. Obfuscating:
```bash
$ clickhouse-format --seed Hello --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;"
@ -77,7 +93,7 @@ Result:
SELECT horse_tape_summer BETWEEN folklore AND moccasins, CASE WHEN intestine >= 116 THEN nonconformist ELSE FORESTRY END;
```
4. Adding backslash:
5. Adding backslash:
```bash
$ clickhouse-format --backslash <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);"

View File

@ -0,0 +1,148 @@
---
toc_priority: 108
---
## exponentialMovingAverage {#exponential-moving-average}
Сalculates the exponential moving average of values for the determined time.
**Syntax**
```sql
exponentialMovingAverage(x)(value, timestamp)
```
Each `value` corresponds to the determinate `timestamp`. The half-life `x` is the time lag at which the exponential weights decay by one-half. The function returns a weighted average: the older the time point, the less weight the corresponding value is considered to be.
**Arguments**
- `value` — Value. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
- `timestamp` — Timestamp. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
**Parameters**
- `x` — Half-life period. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
**Returned values**
- Returnes an [exponentially smoothed moving average](https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average) of the values for the past `x` time at the latest point of time.
Type: [Float64](../../../sql-reference/data-types/float.md#float32-float64).
**Examples**
Input table:
``` text
┌──temperature─┬─timestamp──┐
│ 95 │ 1 │
│ 95 │ 2 │
│ 95 │ 3 │
│ 96 │ 4 │
│ 96 │ 5 │
│ 96 │ 6 │
│ 96 │ 7 │
│ 97 │ 8 │
│ 97 │ 9 │
│ 97 │ 10 │
│ 97 │ 11 │
│ 98 │ 12 │
│ 98 │ 13 │
│ 98 │ 14 │
│ 98 │ 15 │
│ 99 │ 16 │
│ 99 │ 17 │
│ 99 │ 18 │
│ 100 │ 19 │
│ 100 │ 20 │
└──────────────┴────────────┘
```
Query:
```sql
SELECT exponentialMovingAverage(5)(temperature, timestamp);
```
Result:
``` text
┌──exponentialMovingAverage(5)(temperature, timestamp)──┐
│ 92.25779635374204 │
└───────────────────────────────────────────────────────┘
```
Query:
```sql
SELECT
value,
time,
round(exp_smooth, 3),
bar(exp_smooth, 0, 1, 50) AS bar
FROM
(
SELECT
(number = 0) OR (number >= 25) AS value,
number AS time,
exponentialMovingAverage(10)(value, time) OVER (Rows BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth
FROM numbers(50)
)
```
Result:
``` text
┌─value─┬─time─┬─round(exp_smooth, 3)─┬─bar────────────────────────────────────────┐
│ 1 │ 0 │ 0.067 │ ███▎ │
│ 0 │ 1 │ 0.062 │ ███ │
│ 0 │ 2 │ 0.058 │ ██▊ │
│ 0 │ 3 │ 0.054 │ ██▋ │
│ 0 │ 4 │ 0.051 │ ██▌ │
│ 0 │ 5 │ 0.047 │ ██▎ │
│ 0 │ 6 │ 0.044 │ ██▏ │
│ 0 │ 7 │ 0.041 │ ██ │
│ 0 │ 8 │ 0.038 │ █▊ │
│ 0 │ 9 │ 0.036 │ █▋ │
│ 0 │ 10 │ 0.033 │ █▋ │
│ 0 │ 11 │ 0.031 │ █▌ │
│ 0 │ 12 │ 0.029 │ █▍ │
│ 0 │ 13 │ 0.027 │ █▎ │
│ 0 │ 14 │ 0.025 │ █▎ │
│ 0 │ 15 │ 0.024 │ █▏ │
│ 0 │ 16 │ 0.022 │ █ │
│ 0 │ 17 │ 0.021 │ █ │
│ 0 │ 18 │ 0.019 │ ▊ │
│ 0 │ 19 │ 0.018 │ ▊ │
│ 0 │ 20 │ 0.017 │ ▋ │
│ 0 │ 21 │ 0.016 │ ▋ │
│ 0 │ 22 │ 0.015 │ ▋ │
│ 0 │ 23 │ 0.014 │ ▋ │
│ 0 │ 24 │ 0.013 │ ▋ │
│ 1 │ 25 │ 0.079 │ ███▊ │
│ 1 │ 26 │ 0.14 │ ███████ │
│ 1 │ 27 │ 0.198 │ █████████▊ │
│ 1 │ 28 │ 0.252 │ ████████████▌ │
│ 1 │ 29 │ 0.302 │ ███████████████ │
│ 1 │ 30 │ 0.349 │ █████████████████▍ │
│ 1 │ 31 │ 0.392 │ ███████████████████▌ │
│ 1 │ 32 │ 0.433 │ █████████████████████▋ │
│ 1 │ 33 │ 0.471 │ ███████████████████████▌ │
│ 1 │ 34 │ 0.506 │ █████████████████████████▎ │
│ 1 │ 35 │ 0.539 │ ██████████████████████████▊ │
│ 1 │ 36 │ 0.57 │ ████████████████████████████▌ │
│ 1 │ 37 │ 0.599 │ █████████████████████████████▊ │
│ 1 │ 38 │ 0.626 │ ███████████████████████████████▎ │
│ 1 │ 39 │ 0.651 │ ████████████████████████████████▌ │
│ 1 │ 40 │ 0.674 │ █████████████████████████████████▋ │
│ 1 │ 41 │ 0.696 │ ██████████████████████████████████▋ │
│ 1 │ 42 │ 0.716 │ ███████████████████████████████████▋ │
│ 1 │ 43 │ 0.735 │ ████████████████████████████████████▋ │
│ 1 │ 44 │ 0.753 │ █████████████████████████████████████▋ │
│ 1 │ 45 │ 0.77 │ ██████████████████████████████████████▍ │
│ 1 │ 46 │ 0.785 │ ███████████████████████████████████████▎ │
│ 1 │ 47 │ 0.8 │ ███████████████████████████████████████▊ │
│ 1 │ 48 │ 0.813 │ ████████████████████████████████████████▋ │
│ 1 │ 49 │ 0.825 │ █████████████████████████████████████████▎│
└───────┴──────┴──────────────────────┴────────────────────────────────────────────┘
```

View File

@ -89,9 +89,39 @@ SELECT sipHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00
## sipHash128 {#hash_functions-siphash128}
Calculates SipHash from a string.
Accepts a String-type argument. Returns FixedString(16).
Differs from sipHash64 in that the final xor-folding state is only done up to 128 bits.
Produces a 128-bit [SipHash](https://131002.net/siphash/) hash value. Differs from [sipHash64](#hash_functions-siphash64) in that the final xor-folding state is done up to 128 bits.
**Syntax**
``` sql
sipHash128(par1,...)
```
**Arguments**
The function takes a variable number of input parameters. Arguments can be any of the [supported data types](../../sql-reference/data-types/index.md).
**Returned value**
A 128-bit `SipHash` hash value.
Type: [FixedString(16)](../../sql-reference/data-types/fixedstring.md).
**Example**
Query:
``` sql
SELECT hex(sipHash128('foo', '\x01', 3));
```
Result:
``` text
┌─hex(sipHash128('foo', '', 3))────┐
│ 9DE516A64A414D4B1B609415E4523F24 │
└──────────────────────────────────┘
```
## cityHash64 {#cityhash64}
@ -459,28 +489,36 @@ SELECT murmurHash3_32(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:
Produces a 128-bit [MurmurHash3](https://github.com/aappleby/smhasher) hash value.
**Syntax**
``` sql
murmurHash3_128( expr )
murmurHash3_128(expr)
```
**Arguments**
- `expr`[Expressions](../../sql-reference/syntax.md#syntax-expressions) returning a [String](../../sql-reference/data-types/string.md)-type value.
- `expr`A list of [expressions](../../sql-reference/syntax.md#syntax-expressions). [String](../../sql-reference/data-types/string.md).
**Returned Value**
**Returned value**
A [FixedString(16)](../../sql-reference/data-types/fixedstring.md) data type hash value.
A 128-bit `MurmurHash3` hash value.
Type: [FixedString(16)](../../sql-reference/data-types/fixedstring.md).
**Example**
Query:
``` sql
SELECT hex(murmurHash3_128('example_string')) AS MurmurHash3, toTypeName(MurmurHash3) AS type;
SELECT hex(murmurHash3_128('foo', 'foo', 'foo'));
```
Result:
``` text
┌─MurmurHash3──────────────────────┬─type───┐
368A1A311CB7342253354B548E7E7E71 │ String
└──────────────────────────────────────────┘
┌─hex(murmurHash3_128('foo', 'foo', 'foo'))─┐
F8F7AD9B6CD4CF117A71E277E2EC2931
└──────────────────────────────────────────┘
```
## xxHash32, xxHash64 {#hash-functions-xxhash32}

View File

@ -20,6 +20,8 @@ ClickHouse может принимать (`INSERT`) и отдавать (`SELECT
| [CSV](#csv) | ✔ | ✔ |
| [CSVWithNames](#csvwithnames) | ✔ | ✔ |
| [CustomSeparated](#format-customseparated) | ✔ | ✔ |
| [CustomSeparatedWithNames](#customseparatedwithnames) | ✔ | ✔ |
| [CustomSeparatedWithNamesAndTypes](#customseparatedwithnamesandtypes) | ✔ | ✔ |
| [Values](#data-format-values) | ✔ | ✔ |
| [Vertical](#vertical) | ✗ | ✔ |
| [JSON](#json) | ✗ | ✔ |
@ -368,8 +370,17 @@ $ clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FOR
## CustomSeparated {#format-customseparated}
Аналогичен [Template](#format-template), но выводит (или считывает) все столбцы, используя для них правило экранирования из настройки `format_custom_escaping_rule` и разделители из настроек `format_custom_field_delimiter`, `format_custom_row_before_delimiter`, `format_custom_row_after_delimiter`, `format_custom_row_between_delimiter`, `format_custom_result_before_delimiter` и `format_custom_result_after_delimiter`, а не из форматных строк.
Также существует формат `CustomSeparatedIgnoreSpaces`, аналогичный `TemplateIgnoreSpaces`.
Аналогичен [Template](#format-template), но выводит (или считывает) все имена и типы столбцов, используя для них правило экранирования из настройки [format_custom_escaping_rule](../operations/settings/settings.md#format-custom-escaping-rule) и разделители из настроек [format_custom_field_delimiter](../operations/settings/settings.md#format-custom-field-delimiter), [format_custom_row_before_delimiter](../operations/settings/settings.md#format-custom-row-before-delimiter), [format_custom_row_after_delimiter](../operations/settings/settings.md#format-custom-row-after-delimiter), [format_custom_row_between_delimiter](../operations/settings/settings.md#format-custom-row-between-delimiter), [format_custom_result_before_delimiter](../operations/settings/settings.md#format-custom-result-before-delimiter) и [format_custom_result_after_delimiter](../operations/settings/settings.md#format-custom-result-after-delimiter), а не из форматных строк.
Также существует формат `CustomSeparatedIgnoreSpaces`, аналогичный формату [TemplateIgnoreSpaces](#templateignorespaces).
## CustomSeparatedWithNames {#customseparatedwithnames}
Выводит также заголовок с именами столбцов, аналогичен формату [TabSeparatedWithNames](#tabseparatedwithnames).
## CustomSeparatedWithNamesAndTypes {#customseparatedwithnamesandtypes}
Выводит также два заголовка с именами и типами столбцов, аналогичен формату [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes).
## JSON {#json}
@ -1400,12 +1411,15 @@ SELECT * FROM line_as_string;
При работе с форматом `Regexp` можно использовать следующие параметры:
- `format_regexp` — [String](../sql-reference/data-types/string.md). Строка с регулярным выражением в формате [re2](https://github.com/google/re2/wiki/Syntax).
- `format_regexp_escaping_rule` — [String](../sql-reference/data-types/string.md). Правило сериализации. Поддерживаются следующие правила:
- CSV (как в [CSV](#csv))
- JSON (как в [JSONEachRow](#jsoneachrow))
- Escaped (как в [TSV](#tabseparated))
- Quoted (как в [Values](#data-format-values))
- Raw (данные импортируются как есть, без сериализации)
- `format_regexp_escaping_rule` — [String](../sql-reference/data-types/string.md). Правило экранирования. Поддерживаются следующие правила:
- CSV (как в формате [CSV](#csv))
- JSON (как в формате [JSONEachRow](#jsoneachrow))
- Escaped (как в формате [TSV](#tabseparated))
- Quoted (как в формате [Values](#data-format-values))
- Raw (данные импортируются как есть, без экранирования, как в формате [TSVRaw](#tabseparatedraw))
- `format_regexp_skip_unmatched` — [UInt8](../sql-reference/data-types/int-uint.md). Признак, будет ли генерироваться исключение в случае, если импортируемые данные не соответствуют регулярному выражению `format_regexp`. Может принимать значение `0` или `1`.
**Использование**

View File

@ -131,7 +131,7 @@ ClickHouse проверяет условия для `min_part_size` и `min_part
```xml
<encryption_codecs>
<aes_128_gcm_siv>
<nonce>0123456789101</nonce>
<nonce>012345678910</nonce>
</aes_128_gcm_siv>
</encryption_codecs>
```

View File

@ -3830,3 +3830,54 @@ SELECT * FROM positional_arguments ORDER BY 2,3;
- 0 — большие файлы считываются только с копированием данных из ядра в пространство пользователей.
Значение по умолчанию: `0`.
## format_custom_escaping_rule {#format-custom-escaping-rule}
Устанавливает правило экранирования данных формата [CustomSeparated](../../interfaces/formats.md#format-customseparated).
Возможные значения:
- `'Escaped'` — как в формате [TSV](../../interfaces/formats.md#tabseparated).
- `'Quoted'` — как в формате [Values](../../interfaces/formats.md#data-format-values).
- `'CSV'` — как в формате [CSV](../../interfaces/formats.md#csv).
- `'JSON'` — как в формате [JSONEachRow](../../interfaces/formats.md#jsoneachrow).
- `'XML'` — как в формате [XML](../../interfaces/formats.md#xml).
- `'Raw'` — данные импортируются как есть, без экранирования, как в формате [TSVRaw](../../interfaces/formats.md#tabseparatedraw).
Значение по умолчанию: `'Escaped'`.
## format_custom_field_delimiter {#format-custom-field-delimiter}
Задает символ, который интерпретируется как разделитель между полями данных формата [CustomSeparated](../../interfaces/formats.md#format-customseparated).
Значение по умолчанию: `'\t'`.
## format_custom_row_before_delimiter {#format-custom-row-before-delimiter}
Задает символ, который интерпретируется как разделитель перед полем первого столбца данных формата [CustomSeparated](../../interfaces/formats.md#format-customseparated).
Значение по умолчанию: `''`.
## format_custom_row_after_delimiter {#format-custom-row-after-delimiter}
Задает символ, который интерпретируется как разделитель после поля последнего столбца данных формата [CustomSeparated](../../interfaces/formats.md#format-customseparated).
Значение по умолчанию: `'\n'`.
## format_custom_row_between_delimiter {#format-custom-row-between-delimiter}
Задает символ, который интерпретируется как разделитель между строками данных формата [CustomSeparated](../../interfaces/formats.md#format-customseparated).
Значение по умолчанию: `''`.
## format_custom_result_before_delimiter {#format-custom-result-before-delimiter}
Задает символ, который интерпретируется как префикс перед результирующим набором данных формата [CustomSeparated](../../interfaces/formats.md#format-customseparated).
Значение по умолчанию: `''`.
## format_custom_result_after_delimiter {#format-custom-result-after-delimiter}
Задает символ, который интерпретируется как суффикс после результирующего набора данных формата [CustomSeparated](../../interfaces/formats.md#format-customseparated).
Значение по умолчанию: `''`.

View File

@ -10,6 +10,7 @@ toc_title: clickhouse-format
Ключи:
- `--help` или`-h` — выводит описание ключей.
- `--query` — форматирует запрос любой длины и сложности.
- `--hilite` — добавляет подсветку синтаксиса с экранированием символов.
- `--oneline` — форматирование в одну строку.
- `--quiet` или `-q` — проверяет синтаксис без вывода результата.
@ -20,7 +21,22 @@ toc_title: clickhouse-format
## Примеры {#examples}
1. Подсветка синтаксиса и форматирование в одну строку:
1. Форматирование запроса:
```bash
$ clickhouse-format --query "select number from numbers(10) where number%2 order by number desc;"
```
Результат:
```text
SELECT number
FROM numbers(10)
WHERE number % 2
ORDER BY number DESC
```
2. Подсветка синтаксиса и форматирование в одну строку:
```bash
$ clickhouse-format --oneline --hilite <<< "SELECT sum(number) FROM numbers(5);"
@ -32,7 +48,7 @@ $ clickhouse-format --oneline --hilite <<< "SELECT sum(number) FROM numbers(5);"
SELECT sum(number) FROM numbers(5)
```
2. Несколько запросов в одной строке:
3. Несколько запросов в одной строке:
```bash
$ clickhouse-format -n <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);"
@ -53,7 +69,7 @@ FROM
;
```
3. Обфускация:
4. Обфускация:
```bash
$ clickhouse-format --seed Hello --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;"
@ -77,7 +93,7 @@ $ clickhouse-format --seed World --obfuscate <<< "SELECT cost_first_screen BETWE
SELECT horse_tape_summer BETWEEN folklore AND moccasins, CASE WHEN intestine >= 116 THEN nonconformist ELSE FORESTRY END;
```
4. Добавление обратного слеша:
5. Добавление обратного слеша:
```bash
$ clickhouse-format --backslash <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);"

View File

@ -0,0 +1,148 @@
---
toc_priority: 108
---
## exponentialMovingAverage {#exponential-moving-average}
Вычисляет экспоненциальное скользящее среднее за определенный промежуток времени.
**Синтакис:**
```sql
exponentialMovingAverage(x)(value, timestamp)
```
Каждой точке `timestamp` на временном отрезке соответствует определенное значение `value`. Период полураспада — временной интервал `х`, через который вес значений уменьшается в 2 раза. Функция возвращает взвешенное среднее: чем старше временная точка, тем c меньшим весом считается соответствующее ей значение.
**Аргументы**
- `value` — входные значения. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) или [Decimal](../../../sql-reference/data-types/decimal.md).
- `timestamp` — параметр для упорядочивания значений. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) или [Decimal](../../../sql-reference/data-types/decimal.md).
**Параметры**
- `x` — период полураспада. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) или [Decimal](../../../sql-reference/data-types/decimal.md).
**Возвращаемые значения**
- Возвращает [экспоненциальное скользящее среднее](https://ru.wikipedia.org/wiki/Скользящая_средняя#Экспоненциально_взвешенное_скользящее_среднее) за прошедшее время `x` в последний момент времени.
Тип: [Float64](../../../sql-reference/data-types/float.md#float32-float64).
**Пример**
Исходная таблица:
``` text
┌──temperature─┬─timestamp──┐
│ 95 │ 1 │
│ 95 │ 2 │
│ 95 │ 3 │
│ 96 │ 4 │
│ 96 │ 5 │
│ 96 │ 6 │
│ 96 │ 7 │
│ 97 │ 8 │
│ 97 │ 9 │
│ 97 │ 10 │
│ 97 │ 11 │
│ 98 │ 12 │
│ 98 │ 13 │
│ 98 │ 14 │
│ 98 │ 15 │
│ 99 │ 16 │
│ 99 │ 17 │
│ 99 │ 18 │
│ 100 │ 19 │
│ 100 │ 20 │
└──────────────┴────────────┘
```
Запрос:
```sql
SELECT exponentialMovingAverage(5)(temperature, timestamp);
```
Результат:
``` text
┌──exponentialMovingAverage(5)(temperature, timestamp)──┐
│ 92.25779635374204 │
└───────────────────────────────────────────────────────┘
```
Запрос:
```sql
SELECT
value,
time,
round(exp_smooth, 3),
bar(exp_smooth, 0, 1, 50) AS bar
FROM
(
SELECT
(number = 0) OR (number >= 25) AS value,
number AS time,
exponentialMovingAverage(10)(value, time) OVER (Rows BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth
FROM numbers(50)
)
```
Результат:
``` text
┌─value─┬─time─┬─round(exp_smooth, 3)─┬─bar────────────────────────────────────────┐
│ 1 │ 0 │ 0.067 │ ███▎ │
│ 0 │ 1 │ 0.062 │ ███ │
│ 0 │ 2 │ 0.058 │ ██▊ │
│ 0 │ 3 │ 0.054 │ ██▋ │
│ 0 │ 4 │ 0.051 │ ██▌ │
│ 0 │ 5 │ 0.047 │ ██▎ │
│ 0 │ 6 │ 0.044 │ ██▏ │
│ 0 │ 7 │ 0.041 │ ██ │
│ 0 │ 8 │ 0.038 │ █▊ │
│ 0 │ 9 │ 0.036 │ █▋ │
│ 0 │ 10 │ 0.033 │ █▋ │
│ 0 │ 11 │ 0.031 │ █▌ │
│ 0 │ 12 │ 0.029 │ █▍ │
│ 0 │ 13 │ 0.027 │ █▎ │
│ 0 │ 14 │ 0.025 │ █▎ │
│ 0 │ 15 │ 0.024 │ █▏ │
│ 0 │ 16 │ 0.022 │ █ │
│ 0 │ 17 │ 0.021 │ █ │
│ 0 │ 18 │ 0.019 │ ▊ │
│ 0 │ 19 │ 0.018 │ ▊ │
│ 0 │ 20 │ 0.017 │ ▋ │
│ 0 │ 21 │ 0.016 │ ▋ │
│ 0 │ 22 │ 0.015 │ ▋ │
│ 0 │ 23 │ 0.014 │ ▋ │
│ 0 │ 24 │ 0.013 │ ▋ │
│ 1 │ 25 │ 0.079 │ ███▊ │
│ 1 │ 26 │ 0.14 │ ███████ │
│ 1 │ 27 │ 0.198 │ █████████▊ │
│ 1 │ 28 │ 0.252 │ ████████████▌ │
│ 1 │ 29 │ 0.302 │ ███████████████ │
│ 1 │ 30 │ 0.349 │ █████████████████▍ │
│ 1 │ 31 │ 0.392 │ ███████████████████▌ │
│ 1 │ 32 │ 0.433 │ █████████████████████▋ │
│ 1 │ 33 │ 0.471 │ ███████████████████████▌ │
│ 1 │ 34 │ 0.506 │ █████████████████████████▎ │
│ 1 │ 35 │ 0.539 │ ██████████████████████████▊ │
│ 1 │ 36 │ 0.57 │ ████████████████████████████▌ │
│ 1 │ 37 │ 0.599 │ █████████████████████████████▊ │
│ 1 │ 38 │ 0.626 │ ███████████████████████████████▎ │
│ 1 │ 39 │ 0.651 │ ████████████████████████████████▌ │
│ 1 │ 40 │ 0.674 │ █████████████████████████████████▋ │
│ 1 │ 41 │ 0.696 │ ██████████████████████████████████▋ │
│ 1 │ 42 │ 0.716 │ ███████████████████████████████████▋ │
│ 1 │ 43 │ 0.735 │ ████████████████████████████████████▋ │
│ 1 │ 44 │ 0.753 │ █████████████████████████████████████▋ │
│ 1 │ 45 │ 0.77 │ ██████████████████████████████████████▍ │
│ 1 │ 46 │ 0.785 │ ███████████████████████████████████████▎ │
│ 1 │ 47 │ 0.8 │ ███████████████████████████████████████▊ │
│ 1 │ 48 │ 0.813 │ ████████████████████████████████████████▋ │
│ 1 │ 49 │ 0.825 │ █████████████████████████████████████████▎│
└───────┴──────┴──────────────────────┴────────────────────────────────────────────┘
```

View File

@ -748,7 +748,7 @@ SOURCE(REDIS(
!!! info "Примечание"
Поля `column_family` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `column_family` или `query`.
### PosgreSQL {#dicts-external_dicts_dict_sources-postgresql}
### PostgreSQL {#dicts-external_dicts_dict_sources-postgresql}
Пример настройки:

View File

@ -89,9 +89,39 @@ SELECT sipHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00
## sipHash128 {#hash_functions-siphash128}
Вычисляет SipHash от строки.
Принимает аргумент типа String. Возвращает FixedString(16).
Отличается от sipHash64 тем, что финальный xor-folding состояния делается только до 128 бит.
Генерирует 128-битное хеш-значение [SipHash](https://131002.net/siphash/). Отличается от [sipHash64](#hash_functions-siphash64) тем, что финальный xor-folding состояния делается до 128 бит.
**Синтаксис**
``` sql
sipHash128(par1,...)
```
**Аргументы**
Функция принимает переменное число входных параметров. Аргументы могут быть любого [поддерживаемого типа данных](../../sql-reference/functions/hash-functions.md).
**Возвращаемое значение**
128-битное хеш-значение `SipHash`.
Тип: [FixedString(16)](../../sql-reference/data-types/fixedstring.md).
**Пример**
Запрос:
``` sql
SELECT hex(sipHash128('foo', '\x01', 3));
```
Результат:
``` text
┌─hex(sipHash128('foo', '', 3))────┐
│ 9DE516A64A414D4B1B609415E4523F24 │
└──────────────────────────────────┘
```
## cityHash64 {#cityhash64}
@ -459,30 +489,38 @@ SELECT murmurHash3_32(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:
## murmurHash3_128 {#murmurhash3-128}
Генерирует значение [MurmurHash3](https://github.com/aappleby/smhasher).
Генерирует 128-битное хеш-значение [MurmurHash3](https://github.com/aappleby/smhasher).
**Синтаксис**
``` sql
murmurHash3_128( expr )
murmurHash3_128(expr)
```
**Аргументы**
- `expr`[выражение](../syntax.md#syntax-expressions), возвращающее значение типа [String](../../sql-reference/functions/hash-functions.md).
- `expr`список [выражений](../../sql-reference/syntax.md#syntax-expressions). [String](../../sql-reference/data-types/string.md).
**Возвращаемое значение**
Хэш-значение типа [FixedString(16)](../../sql-reference/functions/hash-functions.md).
128-битное значение хеш-значение `MurmurHash3`.
Тип: [FixedString(16)](../../sql-reference/data-types/fixedstring.md).
**Пример**
Запрос:
``` sql
SELECT hex(murmurHash3_128('example_string')) AS MurmurHash3, toTypeName(MurmurHash3) AS type;
SELECT hex(murmurHash3_128('foo', 'foo', 'foo'));
```
Результат:
``` text
┌─MurmurHash3──────────────────────┬─type───┐
368A1A311CB7342253354B548E7E7E71 │ String
└──────────────────────────────────────────┘
┌─hex(murmurHash3_128('foo', 'foo', 'foo'))─┐
F8F7AD9B6CD4CF117A71E277E2EC2931
└──────────────────────────────────────────┘
```
## xxHash32, xxHash64 {#hash-functions-xxhash32-xxhash64}

View File

@ -492,8 +492,9 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
/// Override the default paths.
/// Data paths.
const std::string data_file = config_d / "data-paths.xml";
if (!fs::exists(data_file))
{
std::string data_file = config_d / "data-paths.xml";
WriteBufferFromFile out(data_file);
out << "<clickhouse>\n"
" <path>" << data_path.string() << "</path>\n"
@ -503,12 +504,14 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
"</clickhouse>\n";
out.sync();
out.finalize();
fs::permissions(data_file, fs::perms::owner_read, fs::perm_options::replace);
fmt::print("Data path configuration override is saved to file {}.\n", data_file);
}
/// Logger.
const std::string logger_file = config_d / "logger.xml";
if (!fs::exists(logger_file))
{
std::string logger_file = config_d / "logger.xml";
WriteBufferFromFile out(logger_file);
out << "<clickhouse>\n"
" <logger>\n"
@ -518,12 +521,14 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
"</clickhouse>\n";
out.sync();
out.finalize();
fs::permissions(logger_file, fs::perms::owner_read, fs::perm_options::replace);
fmt::print("Log path configuration override is saved to file {}.\n", logger_file);
}
/// User directories.
const std::string user_directories_file = config_d / "user-directories.xml";
if (!fs::exists(user_directories_file))
{
std::string user_directories_file = config_d / "user-directories.xml";
WriteBufferFromFile out(user_directories_file);
out << "<clickhouse>\n"
" <user_directories>\n"
@ -534,12 +539,14 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
"</clickhouse>\n";
out.sync();
out.finalize();
fs::permissions(user_directories_file, fs::perms::owner_read, fs::perm_options::replace);
fmt::print("User directory path configuration override is saved to file {}.\n", user_directories_file);
}
/// OpenSSL.
const std::string openssl_file = config_d / "openssl.xml";
if (!fs::exists(openssl_file))
{
std::string openssl_file = config_d / "openssl.xml";
WriteBufferFromFile out(openssl_file);
out << "<clickhouse>\n"
" <openSSL>\n"
@ -552,6 +559,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
"</clickhouse>\n";
out.sync();
out.finalize();
fs::permissions(openssl_file, fs::perms::owner_read, fs::perm_options::replace);
fmt::print("OpenSSL path configuration override is saved to file {}.\n", openssl_file);
}
}
@ -761,12 +769,13 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
#if defined(__linux__)
fmt::print("Setting capabilities for clickhouse binary. This is optional.\n");
std::string command = fmt::format("command -v setcap >/dev/null"
" && echo > {0} && chmod a+x {0} && {0} && setcap 'cap_net_admin,cap_ipc_lock,cap_sys_nice+ep' {0} && {0} && rm {0}"
" && setcap 'cap_net_admin,cap_ipc_lock,cap_sys_nice+ep' {1}"
" && command -v capsh >/dev/null"
" && capsh --has-p=cap_net_admin,cap_ipc_lock,cap_sys_nice+ep >/dev/null 2>&1"
" && setcap 'cap_net_admin,cap_ipc_lock,cap_sys_nice+ep' {0}"
" || echo \"Cannot set 'net_admin' or 'ipc_lock' or 'sys_nice' capability for clickhouse binary."
" This is optional. Taskstats accounting will be disabled."
" To enable taskstats accounting you may add the required capability later manually.\"",
"/tmp/test_setcap.sh", fs::canonical(main_bin_path).string());
fs::canonical(main_bin_path).string());
executeScript(command);
#endif

View File

@ -49,7 +49,7 @@
<!-- Internal port and hostname -->
<hostname>localhost</hostname>
<port>44444</port>
<port>9234</port>
</server>
<!-- Add more servers here -->

View File

@ -14,7 +14,7 @@
<server>
<id>1</id>
<hostname>localhost</hostname>
<port>44444</port>
<port>9234</port>
</server>
</raft_configuration>
</keeper_server>

View File

@ -328,7 +328,11 @@ struct Checker
{
checkRequiredInstructions();
}
} checker __attribute__((init_priority(101))); /// Run before other static initializers.
} checker
#ifndef __APPLE__
__attribute__((init_priority(101))) /// Run before other static initializers.
#endif
;
}

View File

@ -203,6 +203,21 @@ public:
data(place).count += !assert_cast<const ColumnNullable &>(*columns[0]).isNullAt(row_num);
}
void addBatchSinglePlace(
size_t batch_size, AggregateDataPtr place, const IColumn ** columns, Arena *, ssize_t if_argument_pos) const override
{
auto & nc = assert_cast<const ColumnNullable &>(*columns[0]);
if (if_argument_pos >= 0)
{
const auto & flags = assert_cast<const ColumnUInt8 &>(*columns[if_argument_pos]).getData();
data(place).count += countBytesInFilterWithNull(flags, nc.getNullMapData().data());
}
else
{
data(place).count += batch_size - countBytesInFilter(nc.getNullMapData().data(), batch_size);
}
}
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
{
data(place).count += data(rhs).count;

View File

@ -549,10 +549,5 @@ if (ENABLE_TESTS AND USE_GTEST)
clickhouse_common_zookeeper
string_utils)
# For __udivmodti4 referenced in Core/tests/gtest_DecimalFunctions.cpp
if (OS_DARWIN AND COMPILER_GCC)
target_link_libraries(unit_tests_dbms PRIVATE gcc)
endif ()
add_check(unit_tests_dbms)
endif ()

View File

@ -119,6 +119,24 @@ std::optional<Elf::Section> Elf::findSectionByName(const char * name) const
String Elf::getBuildID() const
{
/// Section headers are the first choice for a debuginfo file
if (String build_id; iterateSections([&build_id](const Section & section, size_t)
{
if (section.header.sh_type == SHT_NOTE)
{
build_id = Elf::getBuildID(section.begin(), section.size());
if (!build_id.empty())
{
return true;
}
}
return false;
}))
{
return build_id;
}
/// fallback to PHDR
for (size_t idx = 0; idx < header->e_phnum; ++idx)
{
const ElfPhdr & phdr = program_headers[idx];
@ -126,6 +144,7 @@ String Elf::getBuildID() const
if (phdr.p_type == PT_NOTE)
return getBuildID(mapped + phdr.p_offset, phdr.p_filesz);
}
return {};
}

View File

@ -54,7 +54,8 @@ public:
const char * end() const { return mapped + elf_size; }
size_t size() const { return elf_size; }
/// Obtain build id from PT_NOTES section of program headers. Return empty string if does not exist.
/// Obtain build id from SHT_NOTE of section headers (fallback to PT_NOTES section of program headers).
/// Return empty string if does not exist.
/// The string is returned in binary. Note that "readelf -n ./clickhouse-server" prints it in hex.
String getBuildID() const;
static String getBuildID(const char * nhdr_pos, size_t size);

View File

@ -165,7 +165,7 @@ protected:
std::function<void()> fatal_error_callback;
/// It is used to avoid enabling the query profiler when you have multiple ThreadStatus in the same thread
bool query_profiled_enabled = true;
bool query_profiler_enabled = true;
/// Requires access to query_id.
friend class MemoryTrackerThreadSwitcher;
@ -207,7 +207,8 @@ public:
void disableProfiling()
{
query_profiled_enabled = false;
assert(!query_profiler_real && !query_profiler_cpu);
query_profiler_enabled = false;
}
/// Starts new query and create new thread group for it, current thread becomes master thread of the query

View File

@ -279,37 +279,33 @@ private:
// That may lead later to reading unallocated data from underlying PaddedPODArray
// due to assumption that it is safe to read up to 15 bytes past end.
const auto pad_to_next_block = block_size == 1 ? 0 : 1;
for (size_t r = 0; r < input_rows_count; ++r)
for (size_t row_idx = 0; row_idx < input_rows_count; ++row_idx)
{
resulting_size += (input_column->getDataAt(r).size / block_size + pad_to_next_block) * block_size + 1;
resulting_size += (input_column->getDataAt(row_idx).size / block_size + pad_to_next_block) * block_size + 1;
if constexpr (mode == CipherMode::RFC5116_AEAD_AES_GCM)
resulting_size += tag_size;
}
#if defined(MEMORY_SANITIZER)
encrypted_result_column_data.resize_fill(resulting_size, 0xFF);
#else
encrypted_result_column_data.resize(resulting_size);
#endif
}
auto * encrypted = encrypted_result_column_data.data();
KeyHolder<mode> key_holder;
for (size_t r = 0; r < input_rows_count; ++r)
for (size_t row_idx = 0; row_idx < input_rows_count; ++row_idx)
{
const auto key_value = key_holder.setKey(key_size, key_column->getDataAt(r));
const auto key_value = key_holder.setKey(key_size, key_column->getDataAt(row_idx));
auto iv_value = StringRef{};
if (iv_column)
{
iv_value = iv_column->getDataAt(r);
iv_value = iv_column->getDataAt(row_idx);
/// If the length is zero (empty string is passed) it should be treat as no IV.
if (iv_value.size == 0)
iv_value.data = nullptr;
}
const StringRef input_value = input_column->getDataAt(r);
const StringRef input_value = input_column->getDataAt(row_idx);
if constexpr (mode != CipherMode::MySQLCompatibility)
{
@ -348,7 +344,7 @@ private:
// 1.a.2 Set AAD
if (aad_column)
{
const auto aad_data = aad_column->getDataAt(r);
const auto aad_data = aad_column->getDataAt(row_idx);
int tmp_len = 0;
if (aad_data.size != 0 && EVP_EncryptUpdate(evp_ctx, nullptr, &tmp_len,
reinterpret_cast<const unsigned char *>(aad_data.data), aad_data.size) != 1)
@ -408,7 +404,7 @@ private:
};
/// AES_decrypt(string, key, block_mode[, init_vector])
/// decrypt(string, key, block_mode[, init_vector])
template <typename Impl>
class FunctionDecrypt : public IFunction
{
@ -471,7 +467,9 @@ private:
ColumnPtr result_column;
if (arguments.size() <= 3)
{
result_column = doDecrypt(evp_cipher, input_rows_count, input_column, key_column, nullptr, nullptr);
}
else
{
const auto iv_column = arguments[3].column;
@ -548,12 +546,14 @@ private:
{
size_t resulting_size = 0;
for (size_t r = 0; r < input_rows_count; ++r)
for (size_t row_idx = 0; row_idx < input_rows_count; ++row_idx)
{
size_t string_size = input_column->getDataAt(r).size;
size_t string_size = input_column->getDataAt(row_idx).size;
resulting_size += string_size + 1; /// With terminating zero.
if constexpr (mode == CipherMode::RFC5116_AEAD_AES_GCM)
{
if (string_size > 0)
{
if (string_size < tag_size)
throw Exception("Encrypted data is smaller than the size of additional data for AEAD mode, cannot decrypt.",
@ -562,46 +562,43 @@ private:
resulting_size -= tag_size;
}
}
}
#if defined(MEMORY_SANITIZER)
// Pre-fill result column with values to prevent MSAN from dropping dead on
// aes-X-ecb mode with "WARNING: MemorySanitizer: use-of-uninitialized-value".
// This is most likely to be caused by the underlying assembler implementation:
// see crypto/aes/aesni-x86_64.s, function aesni_ecb_encrypt
// which msan seems to fail instrument correctly.
decrypted_result_column_data.resize_fill(resulting_size, 0xFF);
#else
decrypted_result_column_data.resize(resulting_size);
#endif
}
auto * decrypted = decrypted_result_column_data.data();
KeyHolder<mode> key_holder;
for (size_t r = 0; r < input_rows_count; ++r)
for (size_t row_idx = 0; row_idx < input_rows_count; ++row_idx)
{
// 0: prepare key if required
auto key_value = key_holder.setKey(key_size, key_column->getDataAt(r));
auto key_value = key_holder.setKey(key_size, key_column->getDataAt(row_idx));
auto iv_value = StringRef{};
if (iv_column)
{
iv_value = iv_column->getDataAt(r);
iv_value = iv_column->getDataAt(row_idx);
/// If the length is zero (empty string is passed) it should be treat as no IV.
if (iv_value.size == 0)
iv_value.data = nullptr;
}
auto input_value = input_column->getDataAt(r);
auto input_value = input_column->getDataAt(row_idx);
if constexpr (mode == CipherMode::RFC5116_AEAD_AES_GCM)
{
if (input_value.size > 0)
{
// empty plaintext results in empty ciphertext + tag, means there should be at least tag_size bytes.
if (input_value.size < tag_size)
throw Exception(fmt::format("Encrypted data is too short: only {} bytes, "
"should contain at least {} bytes of a tag.",
input_value.size, block_size, tag_size), ErrorCodes::BAD_ARGUMENTS);
input_value.size -= tag_size;
}
}
if constexpr (mode != CipherMode::MySQLCompatibility)
{
@ -619,8 +616,9 @@ private:
}
}
// Avoid extra work on empty ciphertext/plaintext for some ciphers
if (!(input_value.size == 0 && block_size == 1 && mode != CipherMode::RFC5116_AEAD_AES_GCM))
/// Avoid extra work on empty ciphertext/plaintext. Always decrypt empty to empty.
/// This makes sense for default implementation for NULLs.
if (input_value.size > 0)
{
// 1: Init CTX
if constexpr (mode == CipherMode::RFC5116_AEAD_AES_GCM)
@ -641,7 +639,7 @@ private:
// 1.a.2: Set AAD if present
if (aad_column)
{
StringRef aad_data = aad_column->getDataAt(r);
StringRef aad_data = aad_column->getDataAt(row_idx);
int tmp_len = 0;
if (aad_data.size != 0 && EVP_DecryptUpdate(evp_ctx, nullptr, &tmp_len,
reinterpret_cast<const unsigned char *>(aad_data.data), aad_data.size) != 1)

View File

@ -53,8 +53,6 @@ namespace ErrorCodes
constexpr const char * TASK_PROCESSED_OUT_REASON = "Task has been already processed";
namespace
{
/** Caveats: usage of locks in ZooKeeper is incorrect in 99% of cases,
* and highlights your poor understanding of distributed systems.
@ -104,14 +102,29 @@ public:
void unlock()
{
if (!locked)
return;
locked = false;
if (zookeeper->expired())
{
LOG_WARNING(log, "Lock is lost, because session was expired. Path: {}, message: {}", lock_path, lock_message);
return;
}
Coordination::Stat stat;
std::string dummy;
/// NOTE It will throw if session expired after we checked it above
bool result = zookeeper->tryGet(lock_path, dummy, &stat);
if (result && stat.ephemeralOwner == zookeeper->getClientID())
zookeeper->remove(lock_path, -1);
else if (result)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Lock is lost, it has another owner. Path: {}, message: {}, owner: {}, our id: {}",
lock_path, lock_message, stat.ephemeralOwner, zookeeper->getClientID());
else
LOG_WARNING(log, "Lock is lost. It is normal if session was expired. Path: {}/{}", lock_path, lock_message);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Lock is lost, node does not exist. Path: {}, message: {}", lock_path, lock_message);
}
bool tryLock()
@ -119,18 +132,16 @@ public:
std::string dummy;
Coordination::Error code = zookeeper->tryCreate(lock_path, lock_message, zkutil::CreateMode::Ephemeral, dummy);
if (code == Coordination::Error::ZNODEEXISTS)
if (code == Coordination::Error::ZOK)
{
return false;
locked = true;
}
else if (code == Coordination::Error::ZOK)
{
return true;
}
else
else if (code != Coordination::Error::ZNODEEXISTS)
{
throw Coordination::Exception(code);
}
return locked;
}
private:
@ -139,6 +150,7 @@ private:
std::string lock_path;
std::string lock_message;
Poco::Logger * log;
bool locked = false;
};
@ -148,8 +160,6 @@ std::unique_ptr<ZooKeeperLock> createSimpleZooKeeperLock(
return std::make_unique<ZooKeeperLock>(zookeeper, lock_prefix, lock_name, lock_message);
}
}
DDLWorker::DDLWorker(
int pool_size_,
@ -644,6 +654,10 @@ void DDLWorker::processTask(DDLTaskBase & task, const ZooKeeperPtr & zookeeper)
zookeeper->create(active_node_path, {}, zkutil::CreateMode::Ephemeral);
}
/// We must hold the lock until task execution status is committed to ZooKeeper,
/// otherwise another replica may try to execute query again.
std::unique_ptr<ZooKeeperLock> execute_on_leader_lock;
/// Step 2: Execute query from the task.
if (!task.was_executed)
{
@ -674,7 +688,7 @@ void DDLWorker::processTask(DDLTaskBase & task, const ZooKeeperPtr & zookeeper)
if (task.execute_on_leader)
{
tryExecuteQueryOnLeaderReplica(task, storage, rewritten_query, task.entry_path, zookeeper);
tryExecuteQueryOnLeaderReplica(task, storage, rewritten_query, task.entry_path, zookeeper, execute_on_leader_lock);
}
else
{
@ -761,7 +775,8 @@ bool DDLWorker::tryExecuteQueryOnLeaderReplica(
StoragePtr storage,
const String & rewritten_query,
const String & /*node_path*/,
const ZooKeeperPtr & zookeeper)
const ZooKeeperPtr & zookeeper,
std::unique_ptr<ZooKeeperLock> & execute_on_leader_lock)
{
StorageReplicatedMergeTree * replicated_storage = dynamic_cast<StorageReplicatedMergeTree *>(storage.get());
@ -799,7 +814,7 @@ bool DDLWorker::tryExecuteQueryOnLeaderReplica(
pcg64 rng(randomSeed());
auto lock = createSimpleZooKeeperLock(zookeeper, shard_path, "lock", task.host_id_str);
execute_on_leader_lock = createSimpleZooKeeperLock(zookeeper, shard_path, "lock", task.host_id_str);
Stopwatch stopwatch;
@ -829,7 +844,7 @@ bool DDLWorker::tryExecuteQueryOnLeaderReplica(
throw Exception(ErrorCodes::NOT_A_LEADER, "Cannot execute initial query on non-leader replica");
/// Any replica which is leader tries to take lock
if (status.is_leader && lock->tryLock())
if (status.is_leader && execute_on_leader_lock->tryLock())
{
/// In replicated merge tree we can have multiple leaders. So we can
/// be "leader" and took lock, but another "leader" replica may have
@ -858,8 +873,6 @@ bool DDLWorker::tryExecuteQueryOnLeaderReplica(
executed_by_us = true;
break;
}
lock->unlock();
}
/// Waiting for someone who will execute query and change is_executed_path node

View File

@ -38,7 +38,7 @@ struct DDLTaskBase;
using DDLTaskPtr = std::unique_ptr<DDLTaskBase>;
using ZooKeeperPtr = std::shared_ptr<zkutil::ZooKeeper>;
class AccessRightsElements;
class ZooKeeperLock;
class DDLWorker
{
@ -94,7 +94,8 @@ protected:
StoragePtr storage,
const String & rewritten_query,
const String & node_path,
const ZooKeeperPtr & zookeeper);
const ZooKeeperPtr & zookeeper,
std::unique_ptr<ZooKeeperLock> & execute_on_leader_lock);
bool tryExecuteQuery(const String & query, DDLTaskBase & task, const ZooKeeperPtr & zookeeper);

View File

@ -28,6 +28,8 @@ static bool tryExtractConstValueFromCondition(const ASTPtr & condition, bool & v
}
/// cast of numeric constant in condition to UInt8
/// Note: this solution is ad-hoc and only implemented for yandex.metrica use case.
/// We should allow any constant condition (or maybe remove this optimization completely) later.
if (const auto * function = condition->as<ASTFunction>())
{
if (isFunctionCast(function))
@ -49,6 +51,16 @@ static bool tryExtractConstValueFromCondition(const ASTPtr & condition, bool & v
}
}
}
else if (function->name == "toUInt8" || function->name == "toInt8")
{
if (const auto * expr_list = function->arguments->as<ASTExpressionList>())
{
if (expr_list->children.size() != 1)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} must have exactly two arguments", function->name);
return tryExtractConstValueFromCondition(expr_list->children.at(0), value);
}
}
}
return false;

View File

@ -96,6 +96,22 @@ std::shared_ptr<TSystemLog> createSystemLog(
}
ASTPtr ISystemLog::getCreateTableQueryClean(const StorageID & table_id, ContextPtr context)
{
DatabasePtr database = DatabaseCatalog::instance().getDatabase(table_id.database_name);
ASTPtr old_ast = database->getCreateTableQuery(table_id.table_name, context);
auto & old_create_query_ast = old_ast->as<ASTCreateQuery &>();
/// Reset UUID
old_create_query_ast.uuid = UUIDHelpers::Nil;
/// Existing table has default settings (i.e. `index_granularity = 8192`), reset them.
if (ASTStorage * storage = old_create_query_ast.storage)
{
storage->reset(storage->settings);
}
return old_ast;
}
SystemLogs::SystemLogs(ContextPtr global_context, const Poco::Util::AbstractConfiguration & config)
{
query_log = createSystemLog<QueryLog>(global_context, "system", "query_log", config, "query_log");

View File

@ -61,6 +61,7 @@ namespace DB
namespace ErrorCodes
{
extern const int TIMEOUT_EXCEEDED;
extern const int LOGICAL_ERROR;
}
#define DBMS_SYSTEM_LOG_QUEUE_SIZE 1048576
@ -83,13 +84,18 @@ class ISystemLog
{
public:
virtual String getName() = 0;
virtual ASTPtr getCreateTableQuery() = 0;
//// force -- force table creation (used for SYSTEM FLUSH LOGS)
virtual void flush(bool force = false) = 0;
virtual void prepareTable() = 0;
virtual void startup() = 0;
virtual void shutdown() = 0;
virtual ~ISystemLog() = default;
/// returns CREATE TABLE query, but with removed:
/// - UUID
/// - SETTINGS (for MergeTree)
/// That way it can be used to compare with the SystemLog::getCreateTableQuery()
static ASTPtr getCreateTableQueryClean(const StorageID & table_id, ContextPtr context);
};
@ -171,7 +177,7 @@ public:
return LogElement::name();
}
ASTPtr getCreateTableQuery() override;
ASTPtr getCreateTableQuery();
protected:
Poco::Logger * log;
@ -181,6 +187,8 @@ private:
const StorageID table_id;
const String storage_def;
StoragePtr table;
String create_query;
String old_create_query;
bool is_prepared = false;
const size_t flush_interval_milliseconds;
ThreadFromGlobalPool saving_thread;
@ -228,6 +236,7 @@ SystemLog<LogElement>::SystemLog(
: WithContext(context_)
, table_id(database_name_, table_name_)
, storage_def(storage_def_)
, create_query(serializeAST(*getCreateTableQuery()))
, flush_interval_milliseconds(flush_interval_milliseconds_)
{
assert(database_name_ == DatabaseCatalog::SYSTEM_DATABASE);
@ -520,14 +529,14 @@ void SystemLog<LogElement>::prepareTable()
if (table)
{
auto metadata_columns = table->getInMemoryMetadataPtr()->getColumns();
auto old_query = InterpreterCreateQuery::formatColumns(metadata_columns);
if (old_create_query.empty())
{
old_create_query = serializeAST(*getCreateTableQueryClean(table_id, getContext()));
if (old_create_query.empty())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Empty CREATE QUERY for {}", backQuoteIfNeed(table_id.table_name));
}
auto ordinary_columns = LogElement::getNamesAndTypes();
auto alias_columns = LogElement::getNamesAndAliases();
auto current_query = InterpreterCreateQuery::formatColumns(ordinary_columns, alias_columns);
if (serializeAST(*old_query) != serializeAST(*current_query))
if (old_create_query != create_query)
{
/// Rename the existing table.
int suffix = 0;
@ -553,9 +562,11 @@ void SystemLog<LogElement>::prepareTable()
LOG_DEBUG(
log,
"Existing table {} for system log has obsolete or different structure. Renaming it to {}",
"Existing table {} for system log has obsolete or different structure. Renaming it to {}.\nOld: {}\nNew: {}\n.",
description,
backQuoteIfNeed(to.table));
backQuoteIfNeed(to.table),
old_create_query,
create_query);
auto query_context = Context::createCopy(context);
query_context->makeQueryContext();
@ -573,17 +584,17 @@ void SystemLog<LogElement>::prepareTable()
/// Create the table.
LOG_DEBUG(log, "Creating new table {} for {}", description, LogElement::name());
auto create = getCreateTableQuery();
auto query_context = Context::createCopy(context);
query_context->makeQueryContext();
InterpreterCreateQuery interpreter(create, query_context);
auto create_query_ast = getCreateTableQuery();
InterpreterCreateQuery interpreter(create_query_ast, query_context);
interpreter.setInternal(true);
interpreter.execute();
table = DatabaseCatalog::instance().getTable(table_id, getContext());
old_create_query.clear();
}
is_prepared = true;

View File

@ -310,7 +310,7 @@ void ThreadStatus::resetPerformanceCountersLastUsage()
void ThreadStatus::initQueryProfiler()
{
if (!query_profiled_enabled)
if (!query_profiler_enabled)
return;
/// query profilers are useless without trace collector
@ -326,11 +326,11 @@ void ThreadStatus::initQueryProfiler()
{
if (settings.query_profiler_real_time_period_ns > 0)
query_profiler_real = std::make_unique<QueryProfilerReal>(thread_id,
/* period */ static_cast<UInt32>(settings.query_profiler_real_time_period_ns));
/* period= */ static_cast<UInt32>(settings.query_profiler_real_time_period_ns));
if (settings.query_profiler_cpu_time_period_ns > 0)
query_profiler_cpu = std::make_unique<QueryProfilerCPU>(thread_id,
/* period */ static_cast<UInt32>(settings.query_profiler_cpu_time_period_ns));
/* period= */ static_cast<UInt32>(settings.query_profiler_cpu_time_period_ns));
}
catch (...)
{

View File

@ -14,7 +14,6 @@ namespace ErrorCodes
extern const int TOO_DEEP_AST;
extern const int BAD_ARGUMENTS;
extern const int UNKNOWN_ELEMENT_IN_AST;
extern const int LOGICAL_ERROR;
}
@ -48,23 +47,6 @@ size_t IAST::checkSize(size_t max_size) const
return res;
}
void IAST::reset(IAST *& field)
{
if (field == nullptr)
return;
const auto child = std::find_if(children.begin(), children.end(), [field](const auto & p)
{
return p.get() == field;
});
if (child == children.end())
throw Exception("AST subtree not found in children", ErrorCodes::LOGICAL_ERROR);
children.erase(child);
field = nullptr;
}
IAST::Hash IAST::getTreeHash() const
{

View File

@ -157,7 +157,23 @@ public:
set(field, child);
}
void reset(IAST *& field);
template <typename T>
void reset(T * & field)
{
if (field == nullptr)
return;
const auto child = std::find_if(children.begin(), children.end(), [field](const auto & p)
{
return p.get() == field;
});
if (child == children.end())
throw Exception("AST subtree not found in children", ErrorCodes::LOGICAL_ERROR);
children.erase(child);
field = nullptr;
}
/// Convert to a string.

View File

@ -208,8 +208,14 @@ KeeperTCPHandler::KeeperTCPHandler(IServer & server_, const Poco::Net::StreamSoc
, log(&Poco::Logger::get("KeeperTCPHandler"))
, global_context(Context::createCopy(server.context()))
, keeper_dispatcher(global_context->getKeeperDispatcher())
, operation_timeout(0, global_context->getConfigRef().getUInt("keeper_server.operation_timeout_ms", Coordination::DEFAULT_OPERATION_TIMEOUT_MS) * 1000)
, session_timeout(0, global_context->getConfigRef().getUInt("keeper_server.session_timeout_ms", Coordination::DEFAULT_SESSION_TIMEOUT_MS) * 1000)
, operation_timeout(
0,
global_context->getConfigRef().getUInt(
"keeper_server.coordination_settings.operation_timeout_ms", Coordination::DEFAULT_OPERATION_TIMEOUT_MS) * 1000)
, session_timeout(
0,
global_context->getConfigRef().getUInt(
"keeper_server.coordination_settings.session_timeout_ms", Coordination::DEFAULT_SESSION_TIMEOUT_MS) * 1000)
, poll_wrapper(std::make_unique<SocketInterruptablePollWrapper>(socket_))
, responses(std::make_unique<ThreadSafeResponseQueue>(std::numeric_limits<size_t>::max()))
, last_op(std::make_unique<LastOp>(EMPTY_LAST_OP))

View File

@ -11,8 +11,7 @@ from github import Github
from s3_helper import S3Helper
from get_robot_token import get_best_robot_token
from pr_info import PRInfo
from ci_config import build_config_to_string
from build_download_helper import get_build_config_for_check, get_build_urls
from build_download_helper import get_build_name_for_check, get_build_urls
from docker_pull_helper import get_image_with_version
from commit_status_helper import post_commit_status
from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse
@ -54,11 +53,9 @@ if __name__ == "__main__":
docker_image = get_image_with_version(temp_path, IMAGE_NAME)
build_config = get_build_config_for_check(check_name)
print(build_config)
build_config_str = build_config_to_string(build_config)
print(build_config_str)
urls = get_build_urls(build_config_str, reports_path)
build_name = get_build_name_for_check(check_name)
print(build_name)
urls = get_build_urls(build_name, reports_path)
if not urls:
raise Exception("No build URLs found")

View File

@ -12,19 +12,17 @@ from pr_info import PRInfo
from get_robot_token import get_best_robot_token
from version_helper import get_version_from_repo, update_version_local
from ccache_utils import get_ccache_if_not_exists, upload_ccache
from ci_config import build_config_to_string, CI_CONFIG
from ci_config import CI_CONFIG
from docker_pull_helper import get_image_with_version
def get_build_config(build_check_name, build_number):
def get_build_config(build_check_name, build_name):
if build_check_name == 'ClickHouse build check (actions)':
build_config_name = 'build_config'
elif build_check_name == 'ClickHouse special build check (actions)':
build_config_name = 'special_build_config'
else:
raise Exception(f"Unknown build check name {build_check_name}")
return CI_CONFIG[build_config_name][build_number]
return CI_CONFIG[build_config_name][build_name]
def _can_export_binaries(build_config):
@ -94,9 +92,9 @@ if __name__ == "__main__":
caches_path = os.getenv("CACHES_PATH", temp_path)
build_check_name = sys.argv[1]
build_number = int(sys.argv[2])
build_name = sys.argv[2]
build_config = get_build_config(build_check_name, build_number)
build_config = get_build_config(build_check_name, build_name)
if not os.path.exists(temp_path):
os.makedirs(temp_path)
@ -125,7 +123,6 @@ if __name__ == "__main__":
logging.info("Updated local files with version")
build_name = build_config_to_string(build_config)
logging.info("Build short name %s", build_name)
subprocess.check_call(f"echo 'BUILD_NAME=build_urls_{build_name}' >> $GITHUB_ENV", shell=True)
@ -161,7 +158,12 @@ if __name__ == "__main__":
logging.info("Will upload cache")
upload_ccache(ccache_path, s3_helper, pr_info.number, temp_path)
s3_path_prefix = str(pr_info.number) + "/" + pr_info.sha + "/" + build_check_name.lower().replace(' ', '_') + "/" + build_name
# for release pull requests we use branch names prefixes, not pr numbers
if 'release' in pr_info.labels or 'release-lts' in pr_info.labels:
s3_path_prefix = pr_info.head_ref + "/" + pr_info.sha + "/" + build_name
else:
s3_path_prefix = str(pr_info.number) + "/" + pr_info.sha + "/" + build_name
if os.path.exists(log_path):
log_url = s3_helper.upload_build_file_to_s3(log_path, s3_path_prefix + "/" + os.path.basename(log_path))
logging.info("Log url %s", log_url)

View File

@ -8,17 +8,17 @@ import time
import requests
from ci_config import CI_CONFIG, build_config_to_string
from ci_config import CI_CONFIG
DOWNLOAD_RETRIES_COUNT = 5
def get_build_config_for_check(check_name):
return CI_CONFIG["tests_config"][check_name]['required_build_properties']
def get_build_name_for_check(check_name):
return CI_CONFIG['tests_config'][check_name]['required_build']
def get_build_urls(build_config_str, reports_path):
def get_build_urls(build_name, reports_path):
for root, _, files in os.walk(reports_path):
for f in files:
if build_config_str in f :
if build_name in f :
logging.info("Found build report json %s", f)
with open(os.path.join(root, f), 'r', encoding='utf-8') as file_handler:
build_report = json.load(file_handler)
@ -72,11 +72,8 @@ def download_builds(result_path, build_urls, filter_fn):
dowload_build_with_progress(url, os.path.join(result_path, fname))
def download_builds_filter(check_name, reports_path, result_path, filter_fn=lambda _: True):
build_config = get_build_config_for_check(check_name)
print(build_config)
build_config_str = build_config_to_string(build_config)
print(build_config_str)
urls = get_build_urls(build_config_str, reports_path)
build_name = get_build_name_for_check(check_name)
urls = get_build_urls(build_name, reports_path)
print(urls)
if not urls:

View File

@ -1,8 +1,8 @@
#!/usr/bin/env python3
CI_CONFIG = {
"build_config": [
{
"build_config": {
"package_release": {
"compiler": "clang-13",
"build_type": "",
"sanitizer": "",
@ -13,7 +13,7 @@ CI_CONFIG = {
"tidy": "disable",
"with_coverage": False
},
{
"performance": {
"compiler": "clang-13",
"build_type": "",
"sanitizer": "",
@ -23,7 +23,7 @@ CI_CONFIG = {
"tidy": "disable",
"with_coverage": False
},
{
"binary_gcc": {
"compiler": "gcc-11",
"build_type": "",
"sanitizer": "",
@ -33,7 +33,7 @@ CI_CONFIG = {
"tidy": "disable",
"with_coverage": False
},
{
"package_asan": {
"compiler": "clang-13",
"build_type": "",
"sanitizer": "address",
@ -43,7 +43,7 @@ CI_CONFIG = {
"tidy": "disable",
"with_coverage": False
},
{
"package_ubsan": {
"compiler": "clang-13",
"build_type": "",
"sanitizer": "undefined",
@ -53,7 +53,7 @@ CI_CONFIG = {
"tidy": "disable",
"with_coverage": False
},
{
"package_tsan": {
"compiler": "clang-13",
"build_type": "",
"sanitizer": "thread",
@ -63,7 +63,7 @@ CI_CONFIG = {
"tidy": "disable",
"with_coverage": False
},
{
"package_msan": {
"compiler": "clang-13",
"build_type": "",
"sanitizer": "memory",
@ -73,7 +73,7 @@ CI_CONFIG = {
"tidy": "disable",
"with_coverage": False
},
{
"package_debug": {
"compiler": "clang-13",
"build_type": "debug",
"sanitizer": "",
@ -83,7 +83,7 @@ CI_CONFIG = {
"tidy": "disable",
"with_coverage": False
},
{
"binary_release": {
"compiler": "clang-13",
"build_type": "",
"sanitizer": "",
@ -92,10 +92,8 @@ CI_CONFIG = {
"splitted": "unsplitted",
"tidy": "disable",
"with_coverage": False
}
],
"special_build_config": [
{
},
"package_tidy": {
"compiler": "clang-13",
"build_type": "debug",
"sanitizer": "",
@ -105,7 +103,7 @@ CI_CONFIG = {
"tidy": "enable",
"with_coverage": False
},
{
"binary_splitted": {
"compiler": "clang-13",
"build_type": "",
"sanitizer": "",
@ -115,7 +113,7 @@ CI_CONFIG = {
"tidy": "disable",
"with_coverage": False
},
{
"binary_darwin": {
"compiler": "clang-13-darwin",
"build_type": "",
"sanitizer": "",
@ -125,7 +123,7 @@ CI_CONFIG = {
"tidy": "disable",
"with_coverage": False
},
{
"binary_aarch64": {
"compiler": "clang-13-aarch64",
"build_type": "",
"sanitizer": "",
@ -135,7 +133,7 @@ CI_CONFIG = {
"tidy": "disable",
"with_coverage": False
},
{
"binary_freebsd": {
"compiler": "clang-13-freebsd",
"build_type": "",
"sanitizer": "",
@ -145,7 +143,7 @@ CI_CONFIG = {
"tidy": "disable",
"with_coverage": False
},
{
"binary_darwin_aarch64": {
"compiler": "clang-13-darwin-aarch64",
"build_type": "",
"sanitizer": "",
@ -155,7 +153,7 @@ CI_CONFIG = {
"tidy": "disable",
"with_coverage": False
},
{
"binary_ppc64le": {
"compiler": "clang-13-ppc64le",
"build_type": "",
"sanitizer": "",
@ -165,550 +163,139 @@ CI_CONFIG = {
"tidy": "disable",
"with_coverage": False
}
],
},
"tests_config": {
"Stateful tests (address, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "address",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_asan",
},
"Stateful tests (thread, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "thread",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_tsan",
},
"Stateful tests (memory, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "memory",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_msan",
},
"Stateful tests (ubsan, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "undefined",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_ubsan",
},
"Stateful tests (debug, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "debug",
"sanitizer": "none",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_debug",
},
"Stateful tests (release, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "none",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_release",
},
"Stateful tests (release, DatabaseOrdinary, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "none",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_release",
},
"Stateful tests (release, DatabaseReplicated, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "none",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_release",
},
"Stateless tests (address, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "address",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_asan",
},
"Stateless tests (thread, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "thread",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_tsan",
},
"Stateless tests (memory, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "memory",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_msan",
},
"Stateless tests (ubsan, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "undefined",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_ubsan",
},
"Stateless tests (debug, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "debug",
"sanitizer": "none",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_debug",
},
"Stateless tests (release, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "none",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_release",
},
"Stateless tests (release, wide parts enabled, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "none",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_release",
},
"Stateless tests (release, DatabaseOrdinary, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "none",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_release",
},
"Stateless tests (release, DatabaseReplicated, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "none",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_release",
},
"Stress test (address, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "address",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_asan",
},
"Stress test (thread, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "thread",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_tsan",
},
"Stress test (undefined, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "undefined",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_ubsan",
},
"Stress test (memory, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "memory",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_msan",
},
"Stress test (debug, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "debug",
"sanitizer": "none",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_debug",
},
"Integration tests (asan, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "address",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_asan",
},
"Integration tests (thread, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "thread",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_tsan",
},
"Integration tests (release, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "none",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_release",
},
"Integration tests (memory, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "memory",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_msan",
},
"Integration tests flaky check (asan, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "address",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_asan",
},
"Compatibility check (actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "none",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_release",
},
"Split build smoke test (actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "binary",
"build_type": "relwithdebuginfo",
"sanitizer": "none",
"bundled": "bundled",
"splitted": "splitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "binary_splitted",
},
"Testflows check (actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "none",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_release",
},
"Unit tests (release-gcc, actions)": {
"required_build_properties": {
"compiler": "gcc-11",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "none",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "binary_gcc",
},
"Unit tests (release-clang, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "binary",
"build_type": "relwithdebuginfo",
"sanitizer": "none",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "binary_release",
},
"Unit tests (asan, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "address",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_asan",
},
"Unit tests (msan, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "memory",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_msan",
},
"Unit tests (tsan, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "thread",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_tsan",
},
"Unit tests (ubsan, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "undefined",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_ubsan",
},
"AST fuzzer (debug, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "debug",
"sanitizer": "none",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_debug",
},
"AST fuzzer (ASan, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "address",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_asan",
},
"AST fuzzer (MSan, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "memory",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_msan",
},
"AST fuzzer (TSan, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "thread",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_tsan",
},
"AST fuzzer (UBSan, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "undefined",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_ubsan",
},
"Release (actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "none",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_release",
},
"Stateless tests flaky check (address, actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "deb",
"build_type": "relwithdebuginfo",
"sanitizer": "address",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "package_asan",
},
"ClickHouse Keeper Jepsen (actions)": {
"required_build_properties": {
"compiler": "clang-13",
"package_type": "binary",
"build_type": "relwithdebuginfo",
"sanitizer": "none",
"bundled": "bundled",
"splitted": "unsplitted",
"clang_tidy": "disable",
"with_coverage": False
}
"required_build": "binary_release",
}
}
}
def build_config_to_string(build_config):
if build_config["package_type"] == "performance":
return "performance"
return "_".join([
build_config['compiler'],
build_config['build_type'] if build_config['build_type'] else "relwithdebuginfo",
build_config['sanitizer'] if build_config['sanitizer'] else "none",
build_config['bundled'],
build_config['splitted'],
'tidy' if 'tidy' in build_config and build_config['tidy'] == 'enable' else 'notidy',
'with_coverage' if 'with_coverage' in build_config and build_config['with_coverage'] else 'without_coverage',
build_config['package_type'],
])

View File

@ -163,7 +163,7 @@ def main(github_secret_key, github_app_id, push_to_cloudwatch, delete_offline_ru
if delete_offline_runners:
print("Going to delete offline runners")
for runner in runners:
if runner.offline:
if runner.offline and not runner.busy:
print("Deleting runner", runner)
delete_runner(access_token, runner)

View File

@ -6,7 +6,7 @@ import requests
from unidiff import PatchSet
DIFF_IN_DOCUMENTATION_EXT = [".html", ".md", ".yml", ".txt", ".css", ".js", ".xml", ".ico", ".conf", ".svg", ".png", ".jpg", ".py", ".sh"]
DIFF_IN_DOCUMENTATION_EXT = [".html", ".md", ".yml", ".txt", ".css", ".js", ".xml", ".ico", ".conf", ".svg", ".png", ".jpg", ".py", ".sh", ".json"]
def get_pr_for_commit(sha, ref):
try_get_pr_url = f"https://api.github.com/repos/{os.getenv('GITHUB_REPOSITORY', 'ClickHouse/ClickHouse')}/commits/{sha}/pulls"

View File

@ -34,14 +34,14 @@
<server>
<id>1</id>
<hostname>localhost</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<priority>3</priority>
</server>
<server>
<id>2</id>
<hostname>localhost</hostname>
<port>44445</port>
<port>9235</port>
<can_become_leader>true</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>2</priority>
@ -49,7 +49,7 @@
<server>
<id>3</id>
<hostname>localhost</hostname>
<port>44446</port>
<port>9236</port>
<can_become_leader>true</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>1</priority>

View File

@ -22,7 +22,7 @@
<server>
<id>1</id>
<hostname>localhost</hostname>
<port>44444</port>
<port>9234</port>
</server>
</raft_configuration>
</keeper_server>

View File

@ -16,7 +16,7 @@
<server>
<id>1</id>
<hostname>localhost</hostname>
<port>44444</port>
<port>9234</port>
</server>
</raft_configuration>
</keeper_server>

View File

@ -11,5 +11,5 @@ keeper_server:
server:
id: 1
hostname: localhost
port: 44444
port: 9234

View File

@ -16,7 +16,7 @@
<server>
<id>1</id>
<hostname>localhost</hostname>
<port>44444</port>
<port>9234</port>
</server>
</raft_configuration>
</keeper_server>

View File

@ -11,5 +11,5 @@ keeper_server:
server:
id: 1
hostname: localhost
port: 44444
port: 9234

View File

@ -16,7 +16,7 @@
<server>
<id>1</id>
<hostname>localhost</hostname>
<port>44444</port>
<port>9234</port>
</server>
</raft_configuration>
</keeper_server>

View File

@ -21,7 +21,7 @@
<server>
<id>1</id>
<hostname>localhost</hostname>
<port>44444</port>
<port>9234</port>
</server>
</raft_configuration>
</keeper_server>

View File

@ -20,7 +20,7 @@
<server>
<id>1</id>
<hostname>localhost</hostname>
<port>44444</port>
<port>9234</port>
</server>
</raft_configuration>
</keeper_server>

View File

@ -0,0 +1,22 @@
<clickhouse>
<keeper_server>
<tcp_port>9181</tcp_port>
<server_id>1</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
<coordination_settings>
<operation_timeout_ms>5000</operation_timeout_ms>
<session_timeout_ms>10000</session_timeout_ms>
<raft_logs_level>trace</raft_logs_level>
</coordination_settings>
<raft_configuration>
<server>
<id>1</id>
<hostname>node1</hostname>
<port>9234</port>
</server>
</raft_configuration>
</keeper_server>
</clickhouse>

View File

@ -0,0 +1,8 @@
<clickhouse>
<zookeeper>
<node index="1">
<host>node1</host>
<port>9181</port>
</node>
</zookeeper>
</clickhouse>

View File

@ -17,14 +17,14 @@
<server>
<id>1</id>
<hostname>node1</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<priority>3</priority>
</server>
<server>
<id>2</id>
<hostname>node2</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>false</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>2</priority>
@ -32,7 +32,7 @@
<server>
<id>3</id>
<hostname>node3</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>false</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>1</priority>

View File

@ -17,14 +17,14 @@
<server>
<id>1</id>
<hostname>node1</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<priority>3</priority>
</server>
<server>
<id>2</id>
<hostname>node2</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>false</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>2</priority>
@ -32,7 +32,7 @@
<server>
<id>3</id>
<hostname>node3</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>false</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>1</priority>

View File

@ -17,14 +17,14 @@
<server>
<id>1</id>
<hostname>node1</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<priority>3</priority>
</server>
<server>
<id>2</id>
<hostname>node2</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>false</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>2</priority>
@ -32,7 +32,7 @@
<server>
<id>3</id>
<hostname>node3</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>false</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>1</priority>

View File

@ -7,17 +7,17 @@
<server>
<id>1</id>
<hostname>node1</hostname>
<port>44444</port>
<port>9234</port>
</server>
<server>
<id>2</id>
<hostname>node2</hostname>
<port>44444</port>
<port>9234</port>
</server>
<server>
<id>3</id>
<hostname>node3</hostname>
<port>44444</port>
<port>9234</port>
</server>
</raft_configuration>
</keeper_server>

View File

@ -7,17 +7,17 @@
<server>
<id>1</id>
<hostname>node1</hostname>
<port>44444</port>
<port>9234</port>
</server>
<server>
<id>2</id>
<hostname>node2</hostname>
<port>44444</port>
<port>9234</port>
</server>
<server>
<id>3</id>
<hostname>node3</hostname>
<port>44444</port>
<port>9234</port>
</server>
</raft_configuration>
</keeper_server>

View File

@ -6,17 +6,17 @@
<server>
<id>1</id>
<hostname>node1</hostname>
<port>44444</port>
<port>9234</port>
</server>
<server>
<id>2</id>
<hostname>node2</hostname>
<port>44444</port>
<port>9234</port>
</server>
<server>
<id>3</id>
<hostname>node3</hostname>
<port>44444</port>
<port>9234</port>
</server>
</raft_configuration>
</keeper_server>

View File

@ -17,14 +17,14 @@
<server>
<id>1</id>
<hostname>node1</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<priority>3</priority>
</server>
<server>
<id>2</id>
<hostname>node2</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>2</priority>
@ -32,7 +32,7 @@
<server>
<id>3</id>
<hostname>node3</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>1</priority>

View File

@ -17,14 +17,14 @@
<server>
<id>1</id>
<hostname>node1</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<priority>3</priority>
</server>
<server>
<id>2</id>
<hostname>node2</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>2</priority>
@ -32,7 +32,7 @@
<server>
<id>3</id>
<hostname>node3</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>1</priority>

View File

@ -17,14 +17,14 @@
<server>
<id>1</id>
<hostname>node1</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<priority>3</priority>
</server>
<server>
<id>2</id>
<hostname>node2</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>2</priority>
@ -32,7 +32,7 @@
<server>
<id>3</id>
<hostname>node3</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>1</priority>

View File

@ -16,14 +16,14 @@
<server>
<id>1</id>
<hostname>node1</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<priority>3</priority>
</server>
<server>
<id>2</id>
<hostname>node2</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>2</priority>
@ -31,7 +31,7 @@
<server>
<id>3</id>
<hostname>node3</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>1</priority>

View File

@ -16,14 +16,14 @@
<server>
<id>1</id>
<hostname>node1</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<priority>3</priority>
</server>
<server>
<id>2</id>
<hostname>node2</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>2</priority>
@ -31,7 +31,7 @@
<server>
<id>3</id>
<hostname>node3</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>1</priority>

View File

@ -16,14 +16,14 @@
<server>
<id>1</id>
<hostname>node1</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<priority>3</priority>
</server>
<server>
<id>2</id>
<hostname>node2</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>2</priority>
@ -31,7 +31,7 @@
<server>
<id>3</id>
<hostname>node3</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>1</priority>

View File

@ -16,14 +16,14 @@
<server>
<id>1</id>
<hostname>node1</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<priority>3</priority>
</server>
<server>
<id>2</id>
<hostname>node2</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>2</priority>
@ -31,7 +31,7 @@
<server>
<id>3</id>
<hostname>node3</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>1</priority>

View File

@ -16,14 +16,14 @@
<server>
<id>1</id>
<hostname>node1</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<priority>3</priority>
</server>
<server>
<id>2</id>
<hostname>node2</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>2</priority>
@ -31,7 +31,7 @@
<server>
<id>3</id>
<hostname>node3</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>1</priority>

View File

@ -16,14 +16,14 @@
<server>
<id>1</id>
<hostname>node1</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<priority>3</priority>
</server>
<server>
<id>2</id>
<hostname>node2</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>2</priority>
@ -31,7 +31,7 @@
<server>
<id>3</id>
<hostname>node3</hostname>
<port>44444</port>
<port>9234</port>
<can_become_leader>true</can_become_leader>
<start_as_follower>true</start_as_follower>
<priority>1</priority>

View File

@ -15,7 +15,7 @@
<server>
<id>1</id>
<hostname>node1</hostname>
<port>44444</port>
<port>9234</port>
</server>
</raft_configuration>
</keeper_server>

View File

@ -15,17 +15,17 @@
<server>
<id>1</id>
<hostname>node1</hostname>
<port>44444</port>
<port>9234</port>
</server>
<server>
<id>2</id>
<hostname>node2</hostname>
<port>44444</port>
<port>9234</port>
</server>
<server>
<id>3</id>
<hostname>node3</hostname>
<port>44444</port>
<port>9234</port>
</server>
</raft_configuration>
</keeper_server>

View File

@ -15,17 +15,17 @@
<server>
<id>1</id>
<hostname>node1</hostname>
<port>44444</port>
<port>9234</port>
</server>
<server>
<id>2</id>
<hostname>node2</hostname>
<port>44444</port>
<port>9234</port>
</server>
<server>
<id>3</id>
<hostname>node3</hostname>
<port>44444</port>
<port>9234</port>
</server>
</raft_configuration>
</keeper_server>

View File

@ -15,17 +15,17 @@
<server>
<id>1</id>
<hostname>node1</hostname>
<port>44444</port>
<port>9234</port>
</server>
<server>
<id>2</id>
<hostname>node2</hostname>
<port>44444</port>
<port>9234</port>
</server>
<server>
<id>3</id>
<hostname>node3</hostname>
<port>44444</port>
<port>9234</port>
</server>
</raft_configuration>
</keeper_server>

View File

@ -15,12 +15,12 @@
<server>
<id>1</id>
<hostname>node1</hostname>
<port>44444</port>
<port>9234</port>
</server>
<server>
<id>2</id>
<hostname>node2</hostname>
<port>44444</port>
<port>9234</port>
</server>
</raft_configuration>
</keeper_server>

Some files were not shown because too many files have changed in this diff Show More