From 7e6cd00cee84394a6d2ead085fcb7b30acfca2da Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Mon, 15 Feb 2021 09:31:35 +0300 Subject: [PATCH 01/43] Try to switch to llvm-12/clang-12 --- docker/builder/build.sh | 2 +- docker/packager/binary/Dockerfile | 7 +- docker/packager/deb/Dockerfile | 7 +- docker/packager/packager | 1 + docker/test/codebrowser/Dockerfile | 2 +- docker/test/fuzzer/run-fuzzer.sh | 2 +- docs/en/development/build.md | 10 +-- tests/ci/ci_config.json | 110 ++++++++++++++--------------- 8 files changed, 76 insertions(+), 65 deletions(-) diff --git a/docker/builder/build.sh b/docker/builder/build.sh index d4cf662e91b..7c7a8893751 100755 --- a/docker/builder/build.sh +++ b/docker/builder/build.sh @@ -4,7 +4,7 @@ set -e #ccache -s # uncomment to display CCache statistics mkdir -p /server/build_docker cd /server/build_docker -cmake -G Ninja /server "-DCMAKE_C_COMPILER=$(command -v clang-11)" "-DCMAKE_CXX_COMPILER=$(command -v clang++-11)" +cmake -G Ninja /server "-DCMAKE_C_COMPILER=$(command -v clang-12)" "-DCMAKE_CXX_COMPILER=$(command -v clang++-12)" # Set the number of build jobs to the half of number of virtual CPU cores (rounded up). # By default, ninja use all virtual CPU cores, that leads to very high memory consumption without much improvement in build time. diff --git a/docker/packager/binary/Dockerfile b/docker/packager/binary/Dockerfile index 91036d88d8c..e8071c79a50 100644 --- a/docker/packager/binary/Dockerfile +++ b/docker/packager/binary/Dockerfile @@ -1,7 +1,7 @@ # docker build -t yandex/clickhouse-binary-builder . FROM ubuntu:20.04 -ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11 +ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12 RUN apt-get update \ && apt-get install \ @@ -57,6 +57,11 @@ RUN cat /etc/resolv.conf \ lld-11 \ llvm-11 \ llvm-11-dev \ + clang-12 \ + clang-tidy-12 \ + lld-12 \ + llvm-12 \ + llvm-12-dev \ libicu-dev \ libreadline-dev \ ninja-build \ diff --git a/docker/packager/deb/Dockerfile b/docker/packager/deb/Dockerfile index 8fd89d60f85..42a55ab72bd 100644 --- a/docker/packager/deb/Dockerfile +++ b/docker/packager/deb/Dockerfile @@ -1,7 +1,7 @@ # docker build -t yandex/clickhouse-deb-builder . FROM ubuntu:20.04 -ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11 +ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12 RUN apt-get update \ && apt-get install ca-certificates lsb-release wget gnupg apt-transport-https \ @@ -36,6 +36,11 @@ RUN apt-get update \ && apt-get install \ gcc-9 \ g++-9 \ + clang-12 \ + clang-tidy-12 \ + lld-12 \ + llvm-12 \ + llvm-12-dev \ clang-11 \ clang-tidy-11 \ lld-11 \ diff --git a/docker/packager/packager b/docker/packager/packager index 65c03cc10e3..a681086f955 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -184,6 +184,7 @@ if __name__ == "__main__": parser.add_argument("--build-type", choices=("debug", ""), default="") parser.add_argument("--compiler", choices=("clang-10", "clang-10-darwin", "clang-10-aarch64", "clang-10-freebsd", "clang-11", "clang-11-darwin", "clang-11-aarch64", "clang-11-freebsd", + "clang-12", "clang-12-darwin", "clang-12-aarch64", "clang-12-freebsd", "gcc-9", "gcc-10"), default="gcc-9") parser.add_argument("--sanitizer", choices=("address", "thread", "memory", "undefined", ""), default="") parser.add_argument("--unbundled", action="store_true") diff --git a/docker/test/codebrowser/Dockerfile b/docker/test/codebrowser/Dockerfile index e03f94a85e0..8f6c760c4a0 100644 --- a/docker/test/codebrowser/Dockerfile +++ b/docker/test/codebrowser/Dockerfile @@ -22,7 +22,7 @@ ENV SHA=nosha ENV DATA="data" CMD mkdir -p $BUILD_DIRECTORY && cd $BUILD_DIRECTORY && \ - cmake $SOURCE_DIRECTORY -DCMAKE_CXX_COMPILER=/usr/bin/clang\+\+-11 -DCMAKE_C_COMPILER=/usr/bin/clang-11 -DCMAKE_EXPORT_COMPILE_COMMANDS=ON && \ + cmake $SOURCE_DIRECTORY -DCMAKE_CXX_COMPILER=/usr/bin/clang\+\+-12 -DCMAKE_C_COMPILER=/usr/bin/clang-12 -DCMAKE_EXPORT_COMPILE_COMMANDS=ON && \ mkdir -p $HTML_RESULT_DIRECTORY && \ $CODEGEN -b $BUILD_DIRECTORY -a -o $HTML_RESULT_DIRECTORY -p ClickHouse:$SOURCE_DIRECTORY:$SHA -d $DATA && \ cp -r $STATIC_DATA $HTML_RESULT_DIRECTORY/ &&\ diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index e21f9efae66..b62c573f586 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -12,7 +12,7 @@ stage=${stage:-} script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" echo "$script_dir" repo_dir=ch -BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-11_debug_none_bundled_unsplitted_disable_False_binary"} +BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-12_debug_none_bundled_unsplitted_disable_False_binary"} function clone { diff --git a/docs/en/development/build.md b/docs/en/development/build.md index 3181f26800d..cc184e0302a 100644 --- a/docs/en/development/build.md +++ b/docs/en/development/build.md @@ -23,7 +23,7 @@ $ sudo apt-get install git cmake python ninja-build Or cmake3 instead of cmake on older systems. -### Install clang-11 (recommended) {#install-clang-11} +### Install clang-12 (recommended) {#install-clang-12} On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/)) @@ -33,16 +33,16 @@ sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" For other Linux distribution - check the availability of the [prebuild packages](https://releases.llvm.org/download.html) or build clang [from sources](https://clang.llvm.org/get_started.html). -#### Use clang-11 for Builds {#use-gcc-10-for-builds} +#### Use clang-12 for Builds {#use-clang-12-for-builds} ``` bash -$ export CC=clang-11 -$ export CXX=clang++-11 +$ export CC=clang-12 +$ export CXX=clang++-12 ``` ### Install GCC 10 {#install-gcc-10} -We recommend building ClickHouse with clang-11, GCC-10 also supported, but it is not used for production builds. +We recommend building ClickHouse with clang-12, GCC-10 also supported, but it is not used for production builds. If you want to use GCC-10 there are several ways to install it. diff --git a/tests/ci/ci_config.json b/tests/ci/ci_config.json index 0e467319285..703cdc10fed 100644 --- a/tests/ci/ci_config.json +++ b/tests/ci/ci_config.json @@ -1,7 +1,7 @@ { "build_config": [ { - "compiler": "clang-11", + "compiler": "clang-12", "build-type": "", "sanitizer": "", "package-type": "deb", @@ -12,7 +12,7 @@ "with_coverage": false }, { - "compiler": "clang-11", + "compiler": "clang-12", "build-type": "", "sanitizer": "", "package-type": "performance", @@ -32,7 +32,7 @@ "with_coverage": false }, { - "compiler": "clang-11", + "compiler": "clang-12", "build-type": "", "sanitizer": "address", "package-type": "deb", @@ -42,7 +42,7 @@ "with_coverage": false }, { - "compiler": "clang-11", + "compiler": "clang-12", "build-type": "", "sanitizer": "undefined", "package-type": "deb", @@ -52,7 +52,7 @@ "with_coverage": false }, { - "compiler": "clang-11", + "compiler": "clang-12", "build-type": "", "sanitizer": "thread", "package-type": "deb", @@ -62,7 +62,7 @@ "with_coverage": false }, { - "compiler": "clang-11", + "compiler": "clang-12", "build-type": "", "sanitizer": "memory", "package-type": "deb", @@ -82,7 +82,7 @@ "with_coverage": false }, { - "compiler": "clang-11", + "compiler": "clang-12", "build-type": "debug", "sanitizer": "", "package-type": "deb", @@ -102,7 +102,7 @@ "with_coverage": false }, { - "compiler": "clang-11", + "compiler": "clang-12", "build-type": "", "sanitizer": "", "package-type": "binary", @@ -114,7 +114,7 @@ ], "special_build_config": [ { - "compiler": "clang-11", + "compiler": "clang-12", "build-type": "debug", "sanitizer": "", "package-type": "deb", @@ -124,7 +124,7 @@ "with_coverage": true }, { - "compiler": "clang-11", + "compiler": "clang-12", "build-type": "", "sanitizer": "", "package-type": "binary", @@ -134,7 +134,7 @@ "with_coverage": false }, { - "compiler": "clang-11-darwin", + "compiler": "clang-12-darwin", "build-type": "", "sanitizer": "", "package-type": "binary", @@ -144,7 +144,7 @@ "with_coverage": false }, { - "compiler": "clang-11-aarch64", + "compiler": "clang-12-aarch64", "build-type": "", "sanitizer": "", "package-type": "binary", @@ -154,7 +154,7 @@ "with_coverage": false }, { - "compiler": "clang-11-freebsd", + "compiler": "clang-12-freebsd", "build-type": "", "sanitizer": "", "package-type": "binary", @@ -167,7 +167,7 @@ "tests_config": { "Functional stateful tests (address)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "address", @@ -179,7 +179,7 @@ }, "Functional stateful tests (thread)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "thread", @@ -191,7 +191,7 @@ }, "Functional stateful tests (memory)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "memory", @@ -203,7 +203,7 @@ }, "Functional stateful tests (ubsan)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "undefined", @@ -215,7 +215,7 @@ }, "Functional stateful tests (debug)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "debug", "sanitizer": "none", @@ -227,7 +227,7 @@ }, "Functional stateless tests (ANTLR debug)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "debug", "sanitizer": "none", @@ -239,7 +239,7 @@ }, "Functional stateful tests (release)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "none", @@ -251,7 +251,7 @@ }, "Functional stateful tests (release, DatabaseOrdinary)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "none", @@ -275,7 +275,7 @@ }, "Functional stateless tests (address)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "address", @@ -287,7 +287,7 @@ }, "Functional stateless tests (thread)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "thread", @@ -299,7 +299,7 @@ }, "Functional stateless tests (memory)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "memory", @@ -311,7 +311,7 @@ }, "Functional stateless tests (ubsan)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "undefined", @@ -323,7 +323,7 @@ }, "Functional stateless tests (debug)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "debug", "sanitizer": "none", @@ -335,7 +335,7 @@ }, "Functional stateless tests (release)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "none", @@ -347,7 +347,7 @@ }, "Functional stateless tests (pytest)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "none", @@ -371,7 +371,7 @@ }, "Functional stateless tests (release, wide parts enabled)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "none", @@ -383,7 +383,7 @@ }, "Functional stateless tests (release, DatabaseOrdinary)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "none", @@ -407,7 +407,7 @@ }, "Stress test (address)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "address", @@ -419,7 +419,7 @@ }, "Stress test (thread)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "thread", @@ -431,7 +431,7 @@ }, "Stress test (undefined)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "undefined", @@ -443,7 +443,7 @@ }, "Stress test (memory)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "memory", @@ -455,7 +455,7 @@ }, "Stress test (debug)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "debug", "sanitizer": "none", @@ -467,7 +467,7 @@ }, "Integration tests (asan)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "address", @@ -479,7 +479,7 @@ }, "Integration tests (thread)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "thread", @@ -491,7 +491,7 @@ }, "Integration tests (release)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "none", @@ -503,7 +503,7 @@ }, "Integration tests (memory)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "memory", @@ -515,7 +515,7 @@ }, "Integration tests flaky check (asan)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "address", @@ -527,7 +527,7 @@ }, "Compatibility check": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "none", @@ -539,7 +539,7 @@ }, "Split build smoke test": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "binary", "build_type": "relwithdebuginfo", "sanitizer": "none", @@ -551,7 +551,7 @@ }, "Testflows check": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "none", @@ -575,7 +575,7 @@ }, "Unit tests release clang": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "binary", "build_type": "relwithdebuginfo", "sanitizer": "none", @@ -587,7 +587,7 @@ }, "Unit tests ASAN": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "binary", "build_type": "relwithdebuginfo", "sanitizer": "address", @@ -599,7 +599,7 @@ }, "Unit tests MSAN": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "binary", "build_type": "relwithdebuginfo", "sanitizer": "memory", @@ -611,7 +611,7 @@ }, "Unit tests TSAN": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "binary", "build_type": "relwithdebuginfo", "sanitizer": "thread", @@ -623,7 +623,7 @@ }, "Unit tests UBSAN": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "binary", "build_type": "relwithdebuginfo", "sanitizer": "thread", @@ -635,7 +635,7 @@ }, "AST fuzzer (debug)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "binary", "build_type": "debug", "sanitizer": "none", @@ -647,7 +647,7 @@ }, "AST fuzzer (ASan)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "binary", "build_type": "relwithdebuginfo", "sanitizer": "address", @@ -659,7 +659,7 @@ }, "AST fuzzer (MSan)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "binary", "build_type": "relwithdebuginfo", "sanitizer": "memory", @@ -671,7 +671,7 @@ }, "AST fuzzer (TSan)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "binary", "build_type": "relwithdebuginfo", "sanitizer": "thread", @@ -683,7 +683,7 @@ }, "AST fuzzer (UBSan)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "binary", "build_type": "relwithdebuginfo", "sanitizer": "undefined", @@ -695,7 +695,7 @@ }, "Release": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "none", @@ -707,7 +707,7 @@ }, "Functional stateless tests flaky check (address)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "address", From aabf5307c417a4757470bfff1da1ddb428ef3773 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Fri, 30 Apr 2021 13:26:13 +0300 Subject: [PATCH 02/43] more --- docker/test/keeper-jepsen/run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/keeper-jepsen/run.sh b/docker/test/keeper-jepsen/run.sh index 352585e16e3..8d31b5b7f1c 100644 --- a/docker/test/keeper-jepsen/run.sh +++ b/docker/test/keeper-jepsen/run.sh @@ -2,7 +2,7 @@ set -euo pipefail -CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-11_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"} +CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-12_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"} CLICKHOUSE_REPO_PATH=${CLICKHOUSE_REPO_PATH:=""} From cf277a67846b909dff43d09060e27a75c585ad6f Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Fri, 30 Apr 2021 17:55:38 +0300 Subject: [PATCH 03/43] find llvm --- cmake/find/llvm.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/find/llvm.cmake b/cmake/find/llvm.cmake index e0ba1d9b039..0025cc0f9d3 100644 --- a/cmake/find/llvm.cmake +++ b/cmake/find/llvm.cmake @@ -26,7 +26,7 @@ endif () if (NOT USE_INTERNAL_LLVM_LIBRARY) set (LLVM_PATHS "/usr/local/lib/llvm") - foreach(llvm_v 10 9 8) + foreach(llvm_v 12 11) if (NOT LLVM_FOUND) find_package (LLVM ${llvm_v} CONFIG PATHS ${LLVM_PATHS}) endif () From a4e6a96c8243d8a50907f6d831c2ff91d0477516 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Fri, 30 Apr 2021 19:02:23 +0300 Subject: [PATCH 04/43] fasttest change --- docker/test/fasttest/Dockerfile | 2 +- docker/test/fasttest/run.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/test/fasttest/Dockerfile b/docker/test/fasttest/Dockerfile index 2864f7fc4da..0c7e2af6ec6 100644 --- a/docker/test/fasttest/Dockerfile +++ b/docker/test/fasttest/Dockerfile @@ -1,7 +1,7 @@ # docker build -t yandex/clickhouse-fasttest . FROM ubuntu:20.04 -ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11 +ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12 RUN apt-get update \ && apt-get install ca-certificates lsb-release wget gnupg apt-transport-https \ diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index a7cc398e5c9..d7bf73f4755 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -9,7 +9,7 @@ trap 'kill $(jobs -pr) ||:' EXIT stage=${stage:-} # Compiler version, normally set by Dockerfile -export LLVM_VERSION=${LLVM_VERSION:-11} +export LLVM_VERSION=${LLVM_VERSION:-12} # A variable to pass additional flags to CMake. # Here we explicitly default it to nothing so that bash doesn't complain about From 366a7fe45bbfdbfa6f6ad07bafe293054500c0b5 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Wed, 12 May 2021 18:24:27 +0300 Subject: [PATCH 05/43] linker path required to be specific one --- docker/packager/binary/build.sh | 22 ++++++++++++++++------ docker/packager/packager | 5 ++++- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/docker/packager/binary/build.sh b/docker/packager/binary/build.sh index cf74105fbbb..d746aed76ed 100755 --- a/docker/packager/binary/build.sh +++ b/docker/packager/binary/build.sh @@ -2,14 +2,23 @@ set -x -e -mkdir -p build/cmake/toolchain/darwin-x86_64 -tar xJf MacOSX10.15.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-components=1 +if [ "1" == "${IS_CROSS_DARWIN:0}" ] +then + mkdir -p build/cmake/toolchain/darwin-x86_64 + tar xJf MacOSX10.15.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-components=1 +fi -mkdir -p build/cmake/toolchain/linux-aarch64 -tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build/cmake/toolchain/linux-aarch64 --strip-components=1 +if [ "1" == "${IS_CROSS_ARM:0}" ] +then + mkdir -p build/cmake/toolchain/linux-aarch64 + tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build/cmake/toolchain/linux-aarch64 --strip-components=1 +fi -mkdir -p build/cmake/toolchain/freebsd-x86_64 -tar xJf freebsd-11.3-toolchain.tar.xz -C build/cmake/toolchain/freebsd-x86_64 --strip-components=1 +if [ "1" == "${IS_CROSS_ARM:0}" ] +then + mkdir -p build/cmake/toolchain/freebsd-x86_64 + tar xJf freebsd-11.3-toolchain.tar.xz -C build/cmake/toolchain/freebsd-x86_64 --strip-components=1 +fi # Uncomment to debug ccache. Don't put ccache log in /output right away, or it # will be confusingly packed into the "performance" package. @@ -21,6 +30,7 @@ cd build/build_docker rm -f CMakeCache.txt # Read cmake arguments into array (possibly empty) read -ra CMAKE_FLAGS <<< "${CMAKE_FLAGS:-}" +env cmake --debug-trycompile --verbose=1 -DCMAKE_VERBOSE_MAKEFILE=1 -LA "-DCMAKE_BUILD_TYPE=$BUILD_TYPE" "-DSANITIZE=$SANITIZER" -DENABLE_CHECK_HEAVY_BUILDS=1 "${CMAKE_FLAGS[@]}" .. ccache --show-config ||: diff --git a/docker/packager/packager b/docker/packager/packager index 9b7692b57ae..6c9cfcc7a1a 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -73,9 +73,10 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ # Explicitly use LLD with Clang by default. # Don't force linker for cross-compilation. if is_clang and not is_cross_compile: - cmake_flags.append("-DLINKER_NAME=lld") + cmake_flags.append("-DLINKER_NAME=ld.lld") if is_cross_darwin: + result.append("IS_CROSS_DARWIN=1") cc = compiler[:-len(DARWIN_SUFFIX)] cmake_flags.append("-DCMAKE_AR:FILEPATH=/cctools/bin/x86_64-apple-darwin-ar") cmake_flags.append("-DCMAKE_INSTALL_NAME_TOOL=/cctools/bin/x86_64-apple-darwin-install_name_tool") @@ -83,9 +84,11 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ cmake_flags.append("-DLINKER_NAME=/cctools/bin/x86_64-apple-darwin-ld") cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/darwin/toolchain-x86_64.cmake") elif is_cross_arm: + result.append("IS_CROSS_ARM=1") cc = compiler[:-len(ARM_SUFFIX)] cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-aarch64.cmake") elif is_cross_freebsd: + result.append("IS_CROSS_FREEBSD=1") cc = compiler[:-len(FREEBSD_SUFFIX)] cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/freebsd/toolchain-x86_64.cmake") else: From 783e9b3c1c440787a4a10a6f1d14f19223743aac Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Thu, 13 May 2021 14:13:37 +0300 Subject: [PATCH 06/43] more --- docker/builder/Dockerfile | 2 +- docker/test/base/Dockerfile | 2 +- tests/ci/ci_config.json | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/builder/Dockerfile b/docker/builder/Dockerfile index 199b5217d79..e9ba6b2ccc1 100644 --- a/docker/builder/Dockerfile +++ b/docker/builder/Dockerfile @@ -1,6 +1,6 @@ FROM ubuntu:20.04 -ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11 +ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12 RUN apt-get update \ && apt-get install ca-certificates lsb-release wget gnupg apt-transport-https \ diff --git a/docker/test/base/Dockerfile b/docker/test/base/Dockerfile index 44b9d42d6a1..5e41ee11ea0 100644 --- a/docker/test/base/Dockerfile +++ b/docker/test/base/Dockerfile @@ -1,7 +1,7 @@ # docker build -t yandex/clickhouse-test-base . FROM ubuntu:20.04 -ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11 +ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12 RUN apt-get update \ && apt-get install ca-certificates lsb-release wget gnupg apt-transport-https \ diff --git a/tests/ci/ci_config.json b/tests/ci/ci_config.json index ee0e1a4c09d..f2c70fa1b8e 100644 --- a/tests/ci/ci_config.json +++ b/tests/ci/ci_config.json @@ -253,7 +253,7 @@ }, "Functional stateful tests (release, DatabaseReplicated)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "none", @@ -385,7 +385,7 @@ }, "Functional stateless tests (release, DatabaseReplicated)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "clang-12", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "none", From d3149ae61cd8cfdbf6d7f876db7c73e2c36df960 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Fri, 21 May 2021 17:42:04 +0300 Subject: [PATCH 07/43] more --- cmake/tools.cmake | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cmake/tools.cmake b/cmake/tools.cmake index 8ff94ab867b..f94f4b289a3 100644 --- a/cmake/tools.cmake +++ b/cmake/tools.cmake @@ -79,8 +79,9 @@ endif () if (LINKER_NAME) if (COMPILER_CLANG AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 12.0.0 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 12.0.0)) - set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --ld-path=${LINKER_NAME}") - set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} --ld-path=${LINKER_NAME}") + find_program (LLD_PATH NAMES ${LINKER_NAME}) + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --ld-path=${LLD_PATH}") + set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} --ld-path=${LLD_PATH}") else () set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=${LINKER_NAME}") set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=${LINKER_NAME}") From 50e233680a6d7f259dfc5670eccbb4cae1bda656 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 21 May 2021 01:04:26 +0300 Subject: [PATCH 08/43] LLVM remove non internal build --- cmake/find/llvm.cmake | 125 ++++++++++++++++--------------- cmake/find/termcap.cmake | 28 +++---- contrib/CMakeLists.txt | 2 +- utils/ci/build-normal.sh | 5 -- utils/ci/default-config | 1 - utils/ci/install-libraries.sh | 4 - utils/ci/jobs/quick-build/run.sh | 1 - 7 files changed, 78 insertions(+), 88 deletions(-) diff --git a/cmake/find/llvm.cmake b/cmake/find/llvm.cmake index c2259fc7757..88ce8927497 100644 --- a/cmake/find/llvm.cmake +++ b/cmake/find/llvm.cmake @@ -1,81 +1,82 @@ -if (APPLE OR SPLIT_SHARED_LIBRARIES OR NOT ARCH_AMD64) +if (APPLE OR SPLIT_SHARED_LIBRARIES OR NOT ARCH_AMD64 OR SANITIZE STREQUAL "undefined") set (ENABLE_EMBEDDED_COMPILER OFF CACHE INTERNAL "") endif() option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ON) + # Broken in macos. TODO: update clang, re-test, enable on Apple -if (ENABLE_EMBEDDED_COMPILER AND NOT SPLIT_SHARED_LIBRARIES AND ARCH_AMD64 AND NOT (SANITIZE STREQUAL "undefined")) - option (USE_INTERNAL_LLVM_LIBRARY "Use bundled or system LLVM library." ${NOT_UNBUNDLED}) -endif() +# if (ENABLE_EMBEDDED_COMPILER AND NOT SPLIT_SHARED_LIBRARIES AND ARCH_AMD64 AND NOT (SANITIZE STREQUAL "undefined")) +# option (USE_INTERNAL_LLVM_LIBRARY "Use bundled or system LLVM library." ${NOT_UNBUNDLED}) +# endif() if (NOT ENABLE_EMBEDDED_COMPILER) - if(USE_INTERNAL_LLVM_LIBRARY) - message (${RECONFIGURE_MESSAGE_LEVEL} "Cannot use internal LLVM library with ENABLE_EMBEDDED_COMPILER=OFF") - endif() +# if(USE_INTERNAL_LLVM_LIBRARY) +# message (${RECONFIGURE_MESSAGE_LEVEL} "Cannot use internal LLVM library with ENABLE_EMBEDDED_COMPILER=OFF") +# endif() + set (USE_EMBEDDED_COMPILER 0) return() endif() if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/llvm/llvm/CMakeLists.txt") - if (USE_INTERNAL_LLVM_LIBRARY) - message (WARNING "submodule contrib/llvm is missing. to fix try run: \n git submodule update --init --recursive") - message (${RECONFIGURE_MESSAGE_LEVEL} "Can't fidd internal LLVM library") - endif() - set (MISSING_INTERNAL_LLVM_LIBRARY 1) + # if (USE_INTERNAL_LLVM_LIBRARY) + message (${RECONFIGURE_MESSAGE_LEVEL} "submodule /contrib/llvm is missing. to fix try run: \n git submodule update --init --recursive") + # message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal LLVM library") + # endif() + # set (MISSING_INTERNAL_LLVM_LIBRARY 1) endif () -if (NOT USE_INTERNAL_LLVM_LIBRARY) - set (LLVM_PATHS "/usr/local/lib/llvm" "/usr/lib/llvm") +# if (NOT USE_INTERNAL_LLVM_LIBRARY) +# set (LLVM_PATHS "/usr/local/lib/llvm" "/usr/lib/llvm") +# foreach(llvm_v 12 11.1 11) +# if (NOT LLVM_FOUND) +# find_package (LLVM ${llvm_v} CONFIG PATHS ${LLVM_PATHS}) +# endif () +# endforeach () - foreach(llvm_v 12 11.1 11) - if (NOT LLVM_FOUND) - find_package (LLVM ${llvm_v} CONFIG PATHS ${LLVM_PATHS}) - endif () - endforeach () +# if (LLVM_FOUND) +# # Remove dynamically-linked zlib and libedit from LLVM's dependencies: +# set_target_properties(LLVMSupport PROPERTIES INTERFACE_LINK_LIBRARIES "-lpthread;LLVMDemangle;${ZLIB_LIBRARIES}") +# set_target_properties(LLVMLineEditor PROPERTIES INTERFACE_LINK_LIBRARIES "LLVMSupport") - if (LLVM_FOUND) - # Remove dynamically-linked zlib and libedit from LLVM's dependencies: - set_target_properties(LLVMSupport PROPERTIES INTERFACE_LINK_LIBRARIES "-lpthread;LLVMDemangle;${ZLIB_LIBRARIES}") - set_target_properties(LLVMLineEditor PROPERTIES INTERFACE_LINK_LIBRARIES "LLVMSupport") +# option(LLVM_HAS_RTTI "Enable if LLVM was build with RTTI enabled" ON) +set (USE_EMBEDDED_COMPILER 1) +# else() +# message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find system LLVM") +# set (USE_EMBEDDED_COMPILER 0) +# endif() - option(LLVM_HAS_RTTI "Enable if LLVM was build with RTTI enabled" ON) - set (USE_EMBEDDED_COMPILER 1) - else() - message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find system LLVM") - set (USE_EMBEDDED_COMPILER 0) - endif() +# if (LLVM_FOUND AND OS_LINUX AND USE_LIBCXX AND NOT FORCE_LLVM_WITH_LIBCXX) +# message(WARNING "Option USE_INTERNAL_LLVM_LIBRARY is not set but the LLVM library from OS packages " +# "in Linux is incompatible with libc++ ABI. LLVM Will be disabled. Force: -DFORCE_LLVM_WITH_LIBCXX=ON") +# message (${RECONFIGURE_MESSAGE_LEVEL} "Unsupported LLVM configuration, cannot enable LLVM") +# set (LLVM_FOUND 0) +# set (USE_EMBEDDED_COMPILER 0) +# endif () +# endif() - if (LLVM_FOUND AND OS_LINUX AND USE_LIBCXX AND NOT FORCE_LLVM_WITH_LIBCXX) - message(WARNING "Option USE_INTERNAL_LLVM_LIBRARY is not set but the LLVM library from OS packages " - "in Linux is incompatible with libc++ ABI. LLVM Will be disabled. Force: -DFORCE_LLVM_WITH_LIBCXX=ON") - message (${RECONFIGURE_MESSAGE_LEVEL} "Unsupported LLVM configuration, cannot enable LLVM") - set (LLVM_FOUND 0) - set (USE_EMBEDDED_COMPILER 0) - endif () -endif() - -if(NOT LLVM_FOUND AND NOT MISSING_INTERNAL_LLVM_LIBRARY) - if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_CURRENT_BINARY_DIR) - message(WARNING "Option ENABLE_EMBEDDED_COMPILER is set but internal LLVM library cannot build if build directory is the same as source directory.") - set (LLVM_FOUND 0) - set (USE_EMBEDDED_COMPILER 0) - elseif (SPLIT_SHARED_LIBRARIES) +# if(NOT LLVM_FOUND AND NOT MISSING_INTERNAL_LLVM_LIBRARY) +# if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_CURRENT_BINARY_DIR) +# message(WARNING "Option ENABLE_EMBEDDED_COMPILER is set but internal LLVM library cannot build if build directory is the same as source directory.") +# set (LLVM_FOUND 0) +# set (USE_EMBEDDED_COMPILER 0) +# elseif (SPLIT_SHARED_LIBRARIES) # llvm-tablegen cannot find shared libraries that we build. Probably can be easily fixed. - message(WARNING "Option USE_INTERNAL_LLVM_LIBRARY is not compatible with SPLIT_SHARED_LIBRARIES. Build of LLVM will be disabled.") - set (LLVM_FOUND 0) - set (USE_EMBEDDED_COMPILER 0) - elseif (NOT ARCH_AMD64) + # message(WARNING "Option USE_INTERNAL_LLVM_LIBRARY is not compatible with SPLIT_SHARED_LIBRARIES. Build of LLVM will be disabled.") + # set (LLVM_FOUND 0) + # set (USE_EMBEDDED_COMPILER 0) + # elseif (NOT ARCH_AMD64) # It's not supported yet, but you can help. - message(WARNING "Option USE_INTERNAL_LLVM_LIBRARY is only available for x86_64. Build of LLVM will be disabled.") - set (LLVM_FOUND 0) - set (USE_EMBEDDED_COMPILER 0) - elseif (SANITIZE STREQUAL "undefined") - # llvm-tblgen, that is used during LLVM build, doesn't work with UBSan. - message(WARNING "Option USE_INTERNAL_LLVM_LIBRARY does not work with UBSan, because 'llvm-tblgen' tool from LLVM has undefined behaviour. Build of LLVM will be disabled.") - set (LLVM_FOUND 0) - set (USE_EMBEDDED_COMPILER 0) - else () - set (USE_INTERNAL_LLVM_LIBRARY ON) + # message(WARNING "Option USE_INTERNAL_LLVM_LIBRARY is only available for x86_64. Build of LLVM will be disabled.") + # set (LLVM_FOUND 0) + # set (USE_EMBEDDED_COMPILER 0) + # elseif (SANITIZE STREQUAL "undefined") + # # llvm-tblgen, that is used during LLVM build, doesn't work with UBSan. + # message(WARNING "Option USE_INTERNAL_LLVM_LIBRARY does not work with UBSan, because 'llvm-tblgen' tool from LLVM has undefined behaviour. Build of LLVM will be disabled.") + # set (LLVM_FOUND 0) + # set (USE_EMBEDDED_COMPILER 0) + # else () + # set (USE_INTERNAL_LLVM_LIBRARY ON) set (LLVM_FOUND 1) set (USE_EMBEDDED_COMPILER 1) set (LLVM_VERSION "9.0.0bundled") @@ -87,13 +88,13 @@ if(NOT LLVM_FOUND AND NOT MISSING_INTERNAL_LLVM_LIBRARY) endif() endif() -if (LLVM_FOUND) +# if (LLVM_FOUND) message(STATUS "LLVM include Directory: ${LLVM_INCLUDE_DIRS}") message(STATUS "LLVM library Directory: ${LLVM_LIBRARY_DIRS}") message(STATUS "LLVM C++ compiler flags: ${LLVM_CXXFLAGS}") -else() - message (${RECONFIGURE_MESSAGE_LEVEL} "Can't enable LLVM") -endif() +# else() +# message (${RECONFIGURE_MESSAGE_LEVEL} "Can't enable LLVM") +# endif() # This list was generated by listing all LLVM libraries, compiling the binary and removing all libraries while it still compiles. set (REQUIRED_LLVM_LIBRARIES diff --git a/cmake/find/termcap.cmake b/cmake/find/termcap.cmake index 58454165785..448ef34f3c3 100644 --- a/cmake/find/termcap.cmake +++ b/cmake/find/termcap.cmake @@ -1,17 +1,17 @@ -if (ENABLE_EMBEDDED_COMPILER AND NOT USE_INTERNAL_LLVM_LIBRARY AND USE_STATIC_LIBRARIES) - find_library (TERMCAP_LIBRARY tinfo) - if (NOT TERMCAP_LIBRARY) - find_library (TERMCAP_LIBRARY ncurses) - endif() - if (NOT TERMCAP_LIBRARY) - find_library (TERMCAP_LIBRARY termcap) - endif() +# if (ENABLE_EMBEDDED_COMPILER AND NOT USE_INTERNAL_LLVM_LIBRARY AND USE_STATIC_LIBRARIES) +# find_library (TERMCAP_LIBRARY tinfo) +# if (NOT TERMCAP_LIBRARY) +# find_library (TERMCAP_LIBRARY ncurses) +# endif() +# if (NOT TERMCAP_LIBRARY) +# find_library (TERMCAP_LIBRARY termcap) +# endif() - if (NOT TERMCAP_LIBRARY) - message (FATAL_ERROR "Statically Linking external LLVM requires termcap") - endif() +# if (NOT TERMCAP_LIBRARY) +# message (FATAL_ERROR "Statically Linking external LLVM requires termcap") +# endif() - target_link_libraries(LLVMSupport INTERFACE ${TERMCAP_LIBRARY}) +# target_link_libraries(LLVMSupport INTERFACE ${TERMCAP_LIBRARY}) - message (STATUS "Using termcap: ${TERMCAP_LIBRARY}") -endif() +# message (STATUS "Using termcap: ${TERMCAP_LIBRARY}") +# endif() diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 9eafec23f51..21d26695e33 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -205,7 +205,7 @@ elseif(GTEST_SRC_DIR) target_compile_definitions(gtest INTERFACE GTEST_HAS_POSIX_RE=0) endif() -if (USE_EMBEDDED_COMPILER AND USE_INTERNAL_LLVM_LIBRARY) +if (USE_EMBEDDED_COMPILER) # ld: unknown option: --color-diagnostics if (APPLE) set (LINKER_SUPPORTS_COLOR_DIAGNOSTICS 0 CACHE INTERNAL "") diff --git a/utils/ci/build-normal.sh b/utils/ci/build-normal.sh index b937269c8a3..328bd2c9f51 100755 --- a/utils/ci/build-normal.sh +++ b/utils/ci/build-normal.sh @@ -8,11 +8,6 @@ source default-config mkdir -p "${WORKSPACE}/build" pushd "${WORKSPACE}/build" -if [[ "${ENABLE_EMBEDDED_COMPILER}" == 1 ]]; then - [[ "$USE_LLVM_LIBRARIES_FROM_SYSTEM" == 0 ]] && CMAKE_FLAGS="$CMAKE_FLAGS -DUSE_INTERNAL_LLVM_LIBRARY=1" - [[ "$USE_LLVM_LIBRARIES_FROM_SYSTEM" != 0 ]] && CMAKE_FLAGS="$CMAKE_FLAGS -DUSE_INTERNAL_LLVM_LIBRARY=0" -fi - cmake -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DENABLE_EMBEDDED_COMPILER=${ENABLE_EMBEDDED_COMPILER} $CMAKE_FLAGS ../sources [[ "$BUILD_TARGETS" != 'all' ]] && BUILD_TARGETS_STRING="--target $BUILD_TARGETS" diff --git a/utils/ci/default-config b/utils/ci/default-config index cd6f25ecf9b..b66121cc757 100644 --- a/utils/ci/default-config +++ b/utils/ci/default-config @@ -27,7 +27,6 @@ CLANG_SOURCES_BRANCH=trunk # or tags/RELEASE_600/final GCC_SOURCES_VERSION=latest # or gcc-7.1.0 # install-libraries -USE_LLVM_LIBRARIES_FROM_SYSTEM=0 # 0 or 1 ENABLE_EMBEDDED_COMPILER=1 # build diff --git a/utils/ci/install-libraries.sh b/utils/ci/install-libraries.sh index d7fb856dbed..7615375fbc1 100755 --- a/utils/ci/install-libraries.sh +++ b/utils/ci/install-libraries.sh @@ -5,7 +5,3 @@ source default-config ./install-os-packages.sh libicu-dev ./install-os-packages.sh libreadline-dev - -if [[ "$ENABLE_EMBEDDED_COMPILER" == 1 && "$USE_LLVM_LIBRARIES_FROM_SYSTEM" == 1 ]]; then - ./install-os-packages.sh llvm-libs-5.0 -fi diff --git a/utils/ci/jobs/quick-build/run.sh b/utils/ci/jobs/quick-build/run.sh index 3d755625c8d..af977d14465 100755 --- a/utils/ci/jobs/quick-build/run.sh +++ b/utils/ci/jobs/quick-build/run.sh @@ -15,7 +15,6 @@ SOURCES_METHOD=local COMPILER=clang COMPILER_INSTALL_METHOD=packages COMPILER_PACKAGE_VERSION=6.0 -USE_LLVM_LIBRARIES_FROM_SYSTEM=0 BUILD_METHOD=normal BUILD_TARGETS=clickhouse BUILD_TYPE=Debug From 07556fac2ce0b04907f2ca2c52aacdbb3bf4e73b Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Tue, 25 May 2021 12:52:53 +0300 Subject: [PATCH 09/43] try fix aarch64 --- cmake/tools.cmake | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cmake/tools.cmake b/cmake/tools.cmake index f94f4b289a3..0e213c285d8 100644 --- a/cmake/tools.cmake +++ b/cmake/tools.cmake @@ -77,6 +77,11 @@ if (OS_LINUX AND NOT LINKER_NAME) endif () endif () +if (LINKER_NAME AND NOT LLD_PATH) + find_program (LLD_PATH NAMES "ld.lld-${COMPILER_VERSION_MAJOR}" "lld-${COMPILER_VERSION_MAJOR}" "ld.lld" "lld") + find_program (GOLD_PATH NAMES "ld.gold" "gold") +endif () + if (LINKER_NAME) if (COMPILER_CLANG AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 12.0.0 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 12.0.0)) find_program (LLD_PATH NAMES ${LINKER_NAME}) From 4f711ee038f82a4d506cc3ab8176d5e17bfd3adb Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Tue, 25 May 2021 16:05:30 +0300 Subject: [PATCH 10/43] fix darwin --- cmake/tools.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/tools.cmake b/cmake/tools.cmake index 0e213c285d8..7c15332a51a 100644 --- a/cmake/tools.cmake +++ b/cmake/tools.cmake @@ -77,7 +77,7 @@ if (OS_LINUX AND NOT LINKER_NAME) endif () endif () -if (LINKER_NAME AND NOT LLD_PATH) +if (NOT OS_DARWIN AND LINKER_NAME AND NOT LLD_PATH) find_program (LLD_PATH NAMES "ld.lld-${COMPILER_VERSION_MAJOR}" "lld-${COMPILER_VERSION_MAJOR}" "ld.lld" "lld") find_program (GOLD_PATH NAMES "ld.gold" "gold") endif () From 227eb9fda5a386ec4aa64fa23a203b876be322dc Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Tue, 25 May 2021 18:29:04 +0300 Subject: [PATCH 11/43] try --- cmake/freebsd/toolchain-x86_64.cmake | 2 +- cmake/linux/toolchain-aarch64.cmake | 2 +- cmake/tools.cmake | 5 ----- 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/cmake/freebsd/toolchain-x86_64.cmake b/cmake/freebsd/toolchain-x86_64.cmake index d9839ec74ee..f9e45686db7 100644 --- a/cmake/freebsd/toolchain-x86_64.cmake +++ b/cmake/freebsd/toolchain-x86_64.cmake @@ -10,7 +10,7 @@ set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it set (CMAKE_AR "/usr/bin/ar" CACHE FILEPATH "" FORCE) set (CMAKE_RANLIB "/usr/bin/ranlib" CACHE FILEPATH "" FORCE) -set (LINKER_NAME "lld" CACHE STRING "" FORCE) +set (LINKER_NAME "ld.lld" CACHE STRING "" FORCE) set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld") set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld") diff --git a/cmake/linux/toolchain-aarch64.cmake b/cmake/linux/toolchain-aarch64.cmake index e3924fdc537..b4dc6e45cbb 100644 --- a/cmake/linux/toolchain-aarch64.cmake +++ b/cmake/linux/toolchain-aarch64.cmake @@ -13,7 +13,7 @@ set (CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_D set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64") set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64") -set (LINKER_NAME "lld" CACHE STRING "" FORCE) +set (LINKER_NAME "ld.lld" CACHE STRING "" FORCE) set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld") set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld") diff --git a/cmake/tools.cmake b/cmake/tools.cmake index 7c15332a51a..f94f4b289a3 100644 --- a/cmake/tools.cmake +++ b/cmake/tools.cmake @@ -77,11 +77,6 @@ if (OS_LINUX AND NOT LINKER_NAME) endif () endif () -if (NOT OS_DARWIN AND LINKER_NAME AND NOT LLD_PATH) - find_program (LLD_PATH NAMES "ld.lld-${COMPILER_VERSION_MAJOR}" "lld-${COMPILER_VERSION_MAJOR}" "ld.lld" "lld") - find_program (GOLD_PATH NAMES "ld.gold" "gold") -endif () - if (LINKER_NAME) if (COMPILER_CLANG AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 12.0.0 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 12.0.0)) find_program (LLD_PATH NAMES ${LINKER_NAME}) From c902afddde705547cfc6e89f5c385d7eb30a6108 Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Fri, 5 Mar 2021 16:57:16 +0200 Subject: [PATCH 12/43] Added system.session_log table Which logs all the info about LogIn, LogOut and LogIn Failure events. Additional info that is logged: - User name - event type (LogIn, LogOut, LoginFailure) - Event date\time\time with microseconds - authentication type (same as for IDENTIFIED BY of CREATE USER statement) - array of active settings profiles upon login - array of active roles upon login - array of changed settings with corresponding values - client address and port - interface (TCP\HTTP\MySQL\PostgreSQL, etc.) - client info (name, version info) - optional LoginFailure reason text message. Added some tests to verify that events are properly saved with all necessary info via following interfaces: - TCP - HTTP - MySQL Known limitations - Not tested against named HTTP sessions, PostgreSQL and gRPC, hence those are not guaranteed to work 100% properly. --- docker/test/fasttest/run.sh | 3 + programs/local/LocalServer.cpp | 2 + programs/server/Server.cpp | 7 +- programs/server/config.xml | 8 + programs/server/users.d/session_log_test.xml | 1 + src/Access/AccessControlManager.h | 11 +- src/Access/SettingsProfilesCache.cpp | 1 - src/Access/SettingsProfilesInfo.h | 10 + src/Core/MySQL/Authentication.cpp | 3 + src/Core/MySQL/MySQLSession.h | 19 + src/Core/PostgreSQLProtocol.h | 5 +- src/Interpreters/Context.cpp | 28 +- src/Interpreters/Context.h | 15 + src/Interpreters/InterpreterSetQuery.cpp | 5 + src/Interpreters/InterpreterSystemQuery.cpp | 4 +- src/Interpreters/Session.cpp | 66 +++- src/Interpreters/Session.h | 10 +- src/Interpreters/SessionLog.cpp | 261 ++++++++++++ src/Interpreters/SessionLog.h | 74 ++++ src/Interpreters/SystemLog.cpp | 14 + src/Interpreters/SystemLog.h | 3 + src/Interpreters/ya.make | 2 + .../Formats/Impl/MySQLOutputFormat.h | 2 + src/Server/HTTPHandler.h | 3 +- src/Server/MySQLHandler.cpp | 2 +- src/TableFunctions/TableFunctionMySQL.cpp | 3 +- tests/config/install.sh | 1 + tests/config/users.d/session_log_test.xml | 30 ++ .../0_stateless/01033_quota_dcl.reference | 2 +- .../01702_system_query_log.reference | 2 +- .../01747_system_session_log_long.reference | 218 +++++++++++ .../01747_system_session_log_long.sh | 370 ++++++++++++++++++ tests/queries/skip_list.json | 4 +- 33 files changed, 1164 insertions(+), 25 deletions(-) create mode 120000 programs/server/users.d/session_log_test.xml create mode 100644 src/Core/MySQL/MySQLSession.h create mode 100644 src/Interpreters/SessionLog.cpp create mode 100644 src/Interpreters/SessionLog.h create mode 100644 tests/config/users.d/session_log_test.xml create mode 100644 tests/queries/0_stateless/01747_system_session_log_long.reference create mode 100755 tests/queries/0_stateless/01747_system_session_log_long.sh diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 00af261f6c8..108544779b4 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -399,6 +399,9 @@ function run_tests # depends on Go 02013_zlib_read_after_eof + + # Accesses CH via mysql table function (which is unavailable) + 01747_system_session_log_long ) time clickhouse-test --hung-check -j 8 --order=random --use-skip-list \ diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 2b1b6185321..258743c7e16 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -393,6 +394,7 @@ void LocalServer::processQueries() auto context = session.makeQueryContext(); context->makeSessionContext(); /// initial_create_query requires a session context to be set. context->setCurrentQueryId(""); + applyCmdSettings(context); /// Use the same query_id (and thread group) for all queries diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index ddbc4c4e433..09b6add62d0 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -45,16 +45,21 @@ #include #include #include +#include +#include #include +#include #include +#include #include +#include #include #include #include #include #include -#include #include +#include #include #include #include diff --git a/programs/server/config.xml b/programs/server/config.xml index 510a5e230f8..98c4416da46 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -964,6 +964,14 @@ 1000 + + + system + session_log
+ + toYYYYMM(event_date) + 7500 +
+ + + + + none + + + + + + + + + + + + ::1 + 127.0.0.1 + + session_log_test_xml_profile + default + + + diff --git a/tests/queries/0_stateless/01033_quota_dcl.reference b/tests/queries/0_stateless/01033_quota_dcl.reference index 7478adac441..e732ea2fcd6 100644 --- a/tests/queries/0_stateless/01033_quota_dcl.reference +++ b/tests/queries/0_stateless/01033_quota_dcl.reference @@ -1 +1 @@ -CREATE QUOTA default KEYED BY user_name FOR INTERVAL 1 hour TRACKING ONLY TO default, readonly +CREATE QUOTA default KEYED BY user_name FOR INTERVAL 1 hour TRACKING ONLY TO default, readonly, session_log_test_xml_user diff --git a/tests/queries/0_stateless/01702_system_query_log.reference b/tests/queries/0_stateless/01702_system_query_log.reference index 1f329feac22..3458c2e5ed4 100644 --- a/tests/queries/0_stateless/01702_system_query_log.reference +++ b/tests/queries/0_stateless/01702_system_query_log.reference @@ -8,6 +8,7 @@ GRANT queries REVOKE queries Misc queries ACTUAL LOG CONTENT: + -- fire all kinds of queries and then check if those are present in the system.query_log\nSET log_comment=\'system.query_log logging test\'; Select SELECT \'DROP queries and also a cleanup before the test\'; Drop DROP DATABASE IF EXISTS sqllt SYNC; DROP USER IF EXISTS sqllt_user; @@ -82,5 +83,4 @@ Rename RENAME TABLE sqllt.table TO sqllt.table_new; Rename RENAME TABLE sqllt.table_new TO sqllt.table; Drop TRUNCATE TABLE sqllt.table; Drop DROP TABLE sqllt.table SYNC; - SET log_comment=\'\'; DROP queries and also a cleanup after the test diff --git a/tests/queries/0_stateless/01747_system_session_log_long.reference b/tests/queries/0_stateless/01747_system_session_log_long.reference new file mode 100644 index 00000000000..9ecf7e05421 --- /dev/null +++ b/tests/queries/0_stateless/01747_system_session_log_long.reference @@ -0,0 +1,218 @@ + +# no_password - User with profile from XML +TCP endpoint +TCP 'wrong password' case is skipped for no_password. +HTTP endpoint +HTTP 'wrong password' case is skipped for no_password. +MySQL endpoint +MySQL 'wrong password' case is skipped for no_password. + +# no_password - No profiles no roles +TCP endpoint +TCP 'wrong password' case is skipped for no_password. +HTTP endpoint +HTTP 'wrong password' case is skipped for no_password. +MySQL endpoint +MySQL 'wrong password' case is skipped for no_password. + +# no_password - Two profiles, no roles +TCP endpoint +TCP 'wrong password' case is skipped for no_password. +HTTP endpoint +HTTP 'wrong password' case is skipped for no_password. +MySQL endpoint +MySQL 'wrong password' case is skipped for no_password. + +# no_password - Two profiles and two simple roles +TCP endpoint +TCP 'wrong password' case is skipped for no_password. +HTTP endpoint +HTTP 'wrong password' case is skipped for no_password. +MySQL endpoint +MySQL 'wrong password' case is skipped for no_password. + +# plaintext_password - No profiles no roles +TCP endpoint +HTTP endpoint +MySQL endpoint + +# plaintext_password - Two profiles, no roles +TCP endpoint +HTTP endpoint +MySQL endpoint + +# plaintext_password - Two profiles and two simple roles +TCP endpoint +HTTP endpoint +MySQL endpoint + +# sha256_password - No profiles no roles +TCP endpoint +HTTP endpoint +MySQL endpoint +MySQL 'successful login' case is skipped for sha256_password. + +# sha256_password - Two profiles, no roles +TCP endpoint +HTTP endpoint +MySQL endpoint +MySQL 'successful login' case is skipped for sha256_password. + +# sha256_password - Two profiles and two simple roles +TCP endpoint +HTTP endpoint +MySQL endpoint +MySQL 'successful login' case is skipped for sha256_password. + +# double_sha1_password - No profiles no roles +TCP endpoint +HTTP endpoint +MySQL endpoint + +# double_sha1_password - Two profiles, no roles +TCP endpoint +HTTP endpoint +MySQL endpoint + +# double_sha1_password - Two profiles and two simple roles +TCP endpoint +HTTP endpoint +MySQL endpoint +${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles TCP LoginFailure 1 +${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles TCP LoginSuccess 1 +${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles TCP Logout 1 +${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles HTTP LoginFailure 1 +${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles HTTP LoginSuccess 1 +${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles HTTP Logout 1 +${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles MySQL LoginFailure many +${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles MySQL LoginSuccess 1 +${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles MySQL Logout 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles TCP LoginFailure 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles TCP LoginSuccess 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles TCP Logout 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles HTTP LoginFailure 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles HTTP LoginSuccess 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles HTTP Logout 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles MySQL LoginFailure many +${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles MySQL LoginSuccess 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles MySQL Logout 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles TCP LoginFailure 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles TCP LoginSuccess 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles TCP Logout 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles HTTP LoginFailure 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles HTTP LoginSuccess 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles HTTP Logout 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles MySQL LoginFailure many +${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles MySQL LoginSuccess 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles MySQL Logout 1 +${BASE_USERNAME}_no_password_no_profiles_no_roles TCP LoginSuccess 1 +${BASE_USERNAME}_no_password_no_profiles_no_roles TCP Logout 1 +${BASE_USERNAME}_no_password_no_profiles_no_roles HTTP LoginSuccess 1 +${BASE_USERNAME}_no_password_no_profiles_no_roles HTTP Logout 1 +${BASE_USERNAME}_no_password_no_profiles_no_roles MySQL LoginSuccess 1 +${BASE_USERNAME}_no_password_no_profiles_no_roles MySQL Logout 1 +${BASE_USERNAME}_no_password_two_profiles_no_roles TCP LoginSuccess 1 +${BASE_USERNAME}_no_password_two_profiles_no_roles TCP Logout 1 +${BASE_USERNAME}_no_password_two_profiles_no_roles HTTP LoginSuccess 1 +${BASE_USERNAME}_no_password_two_profiles_no_roles HTTP Logout 1 +${BASE_USERNAME}_no_password_two_profiles_no_roles MySQL LoginSuccess 1 +${BASE_USERNAME}_no_password_two_profiles_no_roles MySQL Logout 1 +${BASE_USERNAME}_no_password_two_profiles_two_roles TCP LoginSuccess 1 +${BASE_USERNAME}_no_password_two_profiles_two_roles TCP Logout 1 +${BASE_USERNAME}_no_password_two_profiles_two_roles HTTP LoginSuccess 1 +${BASE_USERNAME}_no_password_two_profiles_two_roles HTTP Logout 1 +${BASE_USERNAME}_no_password_two_profiles_two_roles MySQL LoginSuccess 1 +${BASE_USERNAME}_no_password_two_profiles_two_roles MySQL Logout 1 +${BASE_USERNAME}_plaintext_password_no_profiles_no_roles TCP LoginFailure 1 +${BASE_USERNAME}_plaintext_password_no_profiles_no_roles TCP LoginSuccess 1 +${BASE_USERNAME}_plaintext_password_no_profiles_no_roles TCP Logout 1 +${BASE_USERNAME}_plaintext_password_no_profiles_no_roles HTTP LoginFailure 1 +${BASE_USERNAME}_plaintext_password_no_profiles_no_roles HTTP LoginSuccess 1 +${BASE_USERNAME}_plaintext_password_no_profiles_no_roles HTTP Logout 1 +${BASE_USERNAME}_plaintext_password_no_profiles_no_roles MySQL LoginFailure many +${BASE_USERNAME}_plaintext_password_no_profiles_no_roles MySQL LoginSuccess 1 +${BASE_USERNAME}_plaintext_password_no_profiles_no_roles MySQL Logout 1 +${BASE_USERNAME}_plaintext_password_two_profiles_no_roles TCP LoginFailure 1 +${BASE_USERNAME}_plaintext_password_two_profiles_no_roles TCP LoginSuccess 1 +${BASE_USERNAME}_plaintext_password_two_profiles_no_roles TCP Logout 1 +${BASE_USERNAME}_plaintext_password_two_profiles_no_roles HTTP LoginFailure 1 +${BASE_USERNAME}_plaintext_password_two_profiles_no_roles HTTP LoginSuccess 1 +${BASE_USERNAME}_plaintext_password_two_profiles_no_roles HTTP Logout 1 +${BASE_USERNAME}_plaintext_password_two_profiles_no_roles MySQL LoginFailure many +${BASE_USERNAME}_plaintext_password_two_profiles_no_roles MySQL LoginSuccess 1 +${BASE_USERNAME}_plaintext_password_two_profiles_no_roles MySQL Logout 1 +${BASE_USERNAME}_plaintext_password_two_profiles_two_roles TCP LoginFailure 1 +${BASE_USERNAME}_plaintext_password_two_profiles_two_roles TCP LoginSuccess 1 +${BASE_USERNAME}_plaintext_password_two_profiles_two_roles TCP Logout 1 +${BASE_USERNAME}_plaintext_password_two_profiles_two_roles HTTP LoginFailure 1 +${BASE_USERNAME}_plaintext_password_two_profiles_two_roles HTTP LoginSuccess 1 +${BASE_USERNAME}_plaintext_password_two_profiles_two_roles HTTP Logout 1 +${BASE_USERNAME}_plaintext_password_two_profiles_two_roles MySQL LoginFailure many +${BASE_USERNAME}_plaintext_password_two_profiles_two_roles MySQL LoginSuccess 1 +${BASE_USERNAME}_plaintext_password_two_profiles_two_roles MySQL Logout 1 +${BASE_USERNAME}_sha256_password_no_profiles_no_roles TCP LoginFailure 1 +${BASE_USERNAME}_sha256_password_no_profiles_no_roles TCP LoginSuccess 1 +${BASE_USERNAME}_sha256_password_no_profiles_no_roles TCP Logout 1 +${BASE_USERNAME}_sha256_password_no_profiles_no_roles HTTP LoginFailure 1 +${BASE_USERNAME}_sha256_password_no_profiles_no_roles HTTP LoginSuccess 1 +${BASE_USERNAME}_sha256_password_no_profiles_no_roles HTTP Logout 1 +${BASE_USERNAME}_sha256_password_no_profiles_no_roles MySQL LoginFailure many +${BASE_USERNAME}_sha256_password_two_profiles_no_roles TCP LoginFailure 1 +${BASE_USERNAME}_sha256_password_two_profiles_no_roles TCP LoginSuccess 1 +${BASE_USERNAME}_sha256_password_two_profiles_no_roles TCP Logout 1 +${BASE_USERNAME}_sha256_password_two_profiles_no_roles HTTP LoginFailure 1 +${BASE_USERNAME}_sha256_password_two_profiles_no_roles HTTP LoginSuccess 1 +${BASE_USERNAME}_sha256_password_two_profiles_no_roles HTTP Logout 1 +${BASE_USERNAME}_sha256_password_two_profiles_no_roles MySQL LoginFailure many +${BASE_USERNAME}_sha256_password_two_profiles_two_roles TCP LoginFailure 1 +${BASE_USERNAME}_sha256_password_two_profiles_two_roles TCP LoginSuccess 1 +${BASE_USERNAME}_sha256_password_two_profiles_two_roles TCP Logout 1 +${BASE_USERNAME}_sha256_password_two_profiles_two_roles HTTP LoginFailure 1 +${BASE_USERNAME}_sha256_password_two_profiles_two_roles HTTP LoginSuccess 1 +${BASE_USERNAME}_sha256_password_two_profiles_two_roles HTTP Logout 1 +${BASE_USERNAME}_sha256_password_two_profiles_two_roles MySQL LoginFailure many +invalid_${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles TCP LoginFailure 1 +invalid_${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles HTTP LoginFailure 1 +invalid_${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles MySQL LoginFailure many +invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles TCP LoginFailure 1 +invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles HTTP LoginFailure 1 +invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles MySQL LoginFailure many +invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles TCP LoginFailure 1 +invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles HTTP LoginFailure 1 +invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles MySQL LoginFailure many +invalid_${BASE_USERNAME}_no_password_no_profiles_no_roles TCP LoginFailure 1 +invalid_${BASE_USERNAME}_no_password_no_profiles_no_roles HTTP LoginFailure 1 +invalid_${BASE_USERNAME}_no_password_no_profiles_no_roles MySQL LoginFailure many +invalid_${BASE_USERNAME}_no_password_two_profiles_no_roles TCP LoginFailure 1 +invalid_${BASE_USERNAME}_no_password_two_profiles_no_roles HTTP LoginFailure 1 +invalid_${BASE_USERNAME}_no_password_two_profiles_no_roles MySQL LoginFailure many +invalid_${BASE_USERNAME}_no_password_two_profiles_two_roles TCP LoginFailure 1 +invalid_${BASE_USERNAME}_no_password_two_profiles_two_roles HTTP LoginFailure 1 +invalid_${BASE_USERNAME}_no_password_two_profiles_two_roles MySQL LoginFailure many +invalid_${BASE_USERNAME}_plaintext_password_no_profiles_no_roles TCP LoginFailure 1 +invalid_${BASE_USERNAME}_plaintext_password_no_profiles_no_roles HTTP LoginFailure 1 +invalid_${BASE_USERNAME}_plaintext_password_no_profiles_no_roles MySQL LoginFailure many +invalid_${BASE_USERNAME}_plaintext_password_two_profiles_no_roles TCP LoginFailure 1 +invalid_${BASE_USERNAME}_plaintext_password_two_profiles_no_roles HTTP LoginFailure 1 +invalid_${BASE_USERNAME}_plaintext_password_two_profiles_no_roles MySQL LoginFailure many +invalid_${BASE_USERNAME}_plaintext_password_two_profiles_two_roles TCP LoginFailure 1 +invalid_${BASE_USERNAME}_plaintext_password_two_profiles_two_roles HTTP LoginFailure 1 +invalid_${BASE_USERNAME}_plaintext_password_two_profiles_two_roles MySQL LoginFailure many +invalid_${BASE_USERNAME}_sha256_password_no_profiles_no_roles TCP LoginFailure 1 +invalid_${BASE_USERNAME}_sha256_password_no_profiles_no_roles HTTP LoginFailure 1 +invalid_${BASE_USERNAME}_sha256_password_no_profiles_no_roles MySQL LoginFailure many +invalid_${BASE_USERNAME}_sha256_password_two_profiles_no_roles TCP LoginFailure 1 +invalid_${BASE_USERNAME}_sha256_password_two_profiles_no_roles HTTP LoginFailure 1 +invalid_${BASE_USERNAME}_sha256_password_two_profiles_no_roles MySQL LoginFailure many +invalid_${BASE_USERNAME}_sha256_password_two_profiles_two_roles TCP LoginFailure 1 +invalid_${BASE_USERNAME}_sha256_password_two_profiles_two_roles HTTP LoginFailure 1 +invalid_${BASE_USERNAME}_sha256_password_two_profiles_two_roles MySQL LoginFailure many +invalid_session_log_test_xml_user TCP LoginFailure 1 +invalid_session_log_test_xml_user HTTP LoginFailure 1 +invalid_session_log_test_xml_user MySQL LoginFailure many +session_log_test_xml_user TCP LoginSuccess 1 +session_log_test_xml_user TCP Logout 1 +session_log_test_xml_user HTTP LoginSuccess 1 +session_log_test_xml_user HTTP Logout 1 +session_log_test_xml_user MySQL LoginSuccess 1 +session_log_test_xml_user MySQL Logout 1 diff --git a/tests/queries/0_stateless/01747_system_session_log_long.sh b/tests/queries/0_stateless/01747_system_session_log_long.sh new file mode 100755 index 00000000000..16b32a08442 --- /dev/null +++ b/tests/queries/0_stateless/01747_system_session_log_long.sh @@ -0,0 +1,370 @@ +#!/usr/bin/env bash + +################################################################################################## +# Verify that login, logout, and login failure events are properly stored in system.session_log +# when different `IDENTIFIED BY` clauses are used on user. +# +# Make sure that system.session_log entries are non-empty and provide enough info on each event. +# +# Using multiple protocols +# * native TCP protocol with CH client +# * HTTP with CURL +# * MySQL - CH server accesses itself via mysql table function, query typically fails (unrelated) +# but auth should be performed properly. +# * PostgreSQL - CH server accesses itself via postgresql table function (currently out of order). +# * gRPC - not done yet +# +# There is way to control how many time a query (e.g. via mysql table function) is retried +# and hence variable number of records in session_log. To mitigate this and simplify final query, +# each auth_type is tested for separate user. That way SELECT DISTINCT doesn't exclude log entries +# from different cases. +# +# All created users added to the ALL_USERNAMES and later cleaned up. +################################################################################################## + +# To minimize amount of error context sent on failed queries when talking to CH via MySQL protocol. +export CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=none + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +set -eu + +# Since there is no way to cleanup system.session_log table, +# make sure that we can identify log entries from this test by a random user name. +readonly BASE_USERNAME="session_log_test_user_$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 32)" +readonly TMP_QUERY_FILE=$(mktemp /tmp/tmp_query.log.XXXXXX) +declare -a ALL_USERNAMES +ALL_USERNAMES+=("${BASE_USERNAME}") + +function reportError() +{ + if [ -s "${TMP_QUERY_FILE}" ] ; + then + echo "!!!!!! ERROR ${CLICKHOUSE_CLIENT} ${*} --queries-file ${TMP_QUERY_FILE}" >&2 + echo "query:" >&2 + cat "${TMP_QUERY_FILE}" >&2 + rm -f "${TMP_QUERY_FILE}" + fi +} + +function executeQuery() +{ + ## Execute query (provided via heredoc or herestring) and print query in case of error. + trap 'rm -f ${TMP_QUERY_FILE}; trap - ERR RETURN' RETURN + # Since we want to report with current values supplied to this function call + # shellcheck disable=SC2064 + trap "reportError $*" ERR + + cat - > "${TMP_QUERY_FILE}" + ${CLICKHOUSE_CLIENT} "${@}" --queries-file "${TMP_QUERY_FILE}" +} + +function cleanup() +{ + local usernames_to_cleanup + usernames_to_cleanup="$(IFS=, ; echo "${ALL_USERNAMES[*]}")" + executeQuery < "${TMP_QUERY_FILE}" + ! ${CLICKHOUSE_CLIENT} "${@}" --multiquery --queries-file "${TMP_QUERY_FILE}" 2>&1 | tee -a ${TMP_QUERY_FILE} +} + +function createUser() +{ + local auth_type="${1}" + local username="${2}" + local password="${3}" + + if [[ "${auth_type}" == "no_password" ]] + then + password="" + + elif [[ "${auth_type}" == "plaintext_password" ]] + then + password="${password}" + + elif [[ "${auth_type}" == "sha256_password" ]] + then + password="$(executeQuery <<< "SELECT hex(SHA256('${password}'))")" + + elif [[ "${auth_type}" == "double_sha1_password" ]] + then + password="$(executeQuery <<< "SELECT hex(SHA1(SHA1('${password}')))")" + + else + echo "Invalid auth_type: ${auth_type}" >&2 + exit 1 + fi + + export RESULTING_PASS="${password}" + if [ -n "${password}" ] + then + password="BY '${password}'" + fi + + executeQuery < 1, 'many', toString(count(*))) -- do not rely on count value since MySQL does arbitrary number of retries +FROM + system.session_log +WHERE + (user LIKE '%session_log_test_xml_user%' OR user LIKE '%${BASE_USERNAME}%') + AND + event_time_microseconds >= test_start_time +GROUP BY + user_name, interface, type +ORDER BY + user_name, interface, type; +EOF \ No newline at end of file diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index 0143cc78dbe..91fca7eb5d5 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -466,7 +466,7 @@ "polygon_dicts", // they use an explicitly specified database "01658_read_file_to_stringcolumn", "01721_engine_file_truncate_on_insert", // It's ok to execute in parallel but not several instances of the same test. - "01702_system_query_log", // It's ok to execute in parallel with oter tests but not several instances of the same test. + "01702_system_query_log", // It's ok to execute in parallel but not several instances of the same test. "01748_dictionary_table_dot", // creates database "00950_dict_get", "01615_random_one_shard_insertion", @@ -514,5 +514,7 @@ "02001_add_default_database_to_system_users", ///create user "02002_row_level_filter_bug", ///create user "02015_system_views" + "02002_row_level_filter_bug", ///create user + "01747_system_session_log_long" // Reads from system.session_log and can't be run in parallel with any other test (since almost any other test writes to session_log) ] } From 3ca0b0c8605f1933d237c870b0038d40401331d4 Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Mon, 30 Aug 2021 19:08:02 +0300 Subject: [PATCH 13/43] Fixed GCC-9 build --- src/Interpreters/SessionLog.cpp | 8 ++++---- src/Interpreters/SessionLog.h | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Interpreters/SessionLog.cpp b/src/Interpreters/SessionLog.cpp index 2d2f1358656..4967cb867c8 100644 --- a/src/Interpreters/SessionLog.cpp +++ b/src/Interpreters/SessionLog.cpp @@ -202,11 +202,11 @@ void SessionLogElement::appendToBlock(MutableColumns & columns) const columns[i++]->insertData(auth_failure_reason.data(), auth_failure_reason.length()); } -void SessionLog::addLoginSuccess(const UUID & session_id, std::optional session_name, const Context & context) +void SessionLog::addLoginSuccess(const UUID & session_id, std::optional session_name, const Context & login_context) { - const auto access = context.getAccess(); - const auto & settings = context.getSettingsRef(); - const auto & client_info = context.getClientInfo(); + const auto access = login_context.getAccess(); + const auto & settings = login_context.getSettingsRef(); + const auto & client_info = login_context.getClientInfo(); DB::SessionLogElement log_entry(session_id, SESSION_LOGIN_SUCCESS); log_entry.client_info = client_info; diff --git a/src/Interpreters/SessionLog.h b/src/Interpreters/SessionLog.h index 2530809f9f9..fddabf45e4e 100644 --- a/src/Interpreters/SessionLog.h +++ b/src/Interpreters/SessionLog.h @@ -66,7 +66,7 @@ class SessionLog : public SystemLog using SystemLog::SystemLog; public: - void addLoginSuccess(const UUID & session_id, std::optional session_name, const Context & context); + void addLoginSuccess(const UUID & session_id, std::optional session_name, const Context & login_context); void addLoginFailure(const UUID & session_id, const ClientInfo & info, const String & user, const Exception & reason); void addLogOut(const UUID & session_id, const String & user, const ClientInfo & client_info); }; From 109d2f63d03ee974948151f4253886c2d66f8d0b Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Mon, 30 Aug 2021 20:37:07 +0300 Subject: [PATCH 14/43] Fixed tests and minor style issues --- programs/local/LocalServer.cpp | 3 +-- programs/server/Server.cpp | 4 ---- src/Core/MySQL/Authentication.cpp | 1 - src/Core/PostgreSQLProtocol.h | 1 - src/Interpreters/Session.cpp | 2 +- src/Interpreters/SystemLog.cpp | 7 +------ tests/queries/skip_list.json | 3 +-- 7 files changed, 4 insertions(+), 17 deletions(-) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 258743c7e16..278101e2c1d 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include @@ -394,7 +393,7 @@ void LocalServer::processQueries() auto context = session.makeQueryContext(); context->makeSessionContext(); /// initial_create_query requires a session context to be set. context->setCurrentQueryId(""); - + applyCmdSettings(context); /// Use the same query_id (and thread group) for all queries diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 09b6add62d0..bf4e2f947dc 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -54,10 +54,6 @@ #include #include #include -#include -#include -#include -#include #include #include #include diff --git a/src/Core/MySQL/Authentication.cpp b/src/Core/MySQL/Authentication.cpp index 76fb6bad833..0eb080892c1 100644 --- a/src/Core/MySQL/Authentication.cpp +++ b/src/Core/MySQL/Authentication.cpp @@ -2,7 +2,6 @@ #include #include #include -#include #include #include #include diff --git a/src/Core/PostgreSQLProtocol.h b/src/Core/PostgreSQLProtocol.h index 6fc69d2d5b2..f0de4bbb843 100644 --- a/src/Core/PostgreSQLProtocol.h +++ b/src/Core/PostgreSQLProtocol.h @@ -7,7 +7,6 @@ #include #include #include -#include #include #include #include diff --git a/src/Interpreters/Session.cpp b/src/Interpreters/Session.cpp index b1f7f4349f4..d8480f3858e 100644 --- a/src/Interpreters/Session.cpp +++ b/src/Interpreters/Session.cpp @@ -309,7 +309,7 @@ void Session::authenticate(const Credentials & credentials_, const Poco::Net::So { user_id = global_context->getAccessControlManager().login(credentials_, address.host()); } - catch(const Exception & e) + catch (const Exception & e) { if (auto session_log = getSessionLog()) session_log->addLoginFailure(session_id, *prepared_client_info, credentials_.getUserName(), e); diff --git a/src/Interpreters/SystemLog.cpp b/src/Interpreters/SystemLog.cpp index dfc16dae49c..2ccb84e1ffa 100644 --- a/src/Interpreters/SystemLog.cpp +++ b/src/Interpreters/SystemLog.cpp @@ -6,14 +6,9 @@ #include #include #include -#include +#include #include #include -#include -#include -#include -#include -#include #include #include diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index 91fca7eb5d5..6442ac5ed12 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -513,8 +513,7 @@ "01530_drop_database_atomic_sync", /// creates database "02001_add_default_database_to_system_users", ///create user "02002_row_level_filter_bug", ///create user - "02015_system_views" - "02002_row_level_filter_bug", ///create user + "02015_system_views", "01747_system_session_log_long" // Reads from system.session_log and can't be run in parallel with any other test (since almost any other test writes to session_log) ] } From ed70ed6f71e60adfb9f1ee5d48cb158e4642c7cb Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 2 Sep 2021 00:52:38 +0300 Subject: [PATCH 15/43] Introduce connection_no_wait setting for MySQL engine. This will allow to avoid superfluous sleep during query execution, since this not only not desired behavoiur, but also may hang the server, since if you will execute enough queries that will use MySQL database but will not allow enough connections (or your MySQL server is too slow) then you may run out of threads in the global thread pool. Also note that right now it is possible to get deadlock when the mysql pool is full, consider the following scenario: - you have m1 and m2 mysql tables - you have q1 and q2 queries, bot queries join m1 and m2 - q1 allocated connection for m1 but cannot allocate connection for m2 - q2 allocated connection for m2 but cannot allocate connection for m1 - but to resolve the lock one should give up on the locking while it is not possible right now... And then you got no free threads and this: # grep -h ^202 /proc/$(pgrep clickhouse-serv)/task/*/syscall | cut -d' ' -f2 | sort | uniq -c | sort -nr | head 1554 0x7ffb60b92fe8 # mutex in mysqlxx::PoolWithFailover::get 1375 0x7ffb9f1c4748 # mutex in ::PoolEntryHelper::~PoolEntryHelper from DB::MultiplexedConnections::invalidateReplica 1160 0x7ffb612918b8 # mutex in mysqlxx::PoolWithFailover::get 42 0x7ffb9f057984 # mutex in ThreadPoolImpl::worker *NOTE: 202 is a `futex` with WAIT* (Went with `syscall` because debugging 10k+ threads is not easy, and eventually it may TRAP) --- base/mysqlxx/Pool.cpp | 29 +++++++++++++++++-- base/mysqlxx/Pool.h | 2 +- base/mysqlxx/PoolWithFailover.cpp | 12 +++++--- base/mysqlxx/PoolWithFailover.h | 6 +++- .../table-engines/integrations/mysql.md | 1 + .../MySQL/MaterializedMySQLSyncThread.cpp | 4 +-- src/Storages/MySQL/MySQLSettings.h | 1 + src/Storages/StorageMySQL.cpp | 10 +++++-- 8 files changed, 51 insertions(+), 14 deletions(-) diff --git a/base/mysqlxx/Pool.cpp b/base/mysqlxx/Pool.cpp index 2f47aa67356..cee386311d4 100644 --- a/base/mysqlxx/Pool.cpp +++ b/base/mysqlxx/Pool.cpp @@ -7,10 +7,22 @@ #endif #include - #include - #include +#include + + +namespace +{ + +inline uint64_t clock_gettime_ns(clockid_t clock_type = CLOCK_MONOTONIC) +{ + struct timespec ts; + clock_gettime(clock_type, &ts); + return uint64_t(ts.tv_sec * 1000000000LL + ts.tv_nsec); +} + +} namespace mysqlxx @@ -124,10 +136,15 @@ Pool::~Pool() } -Pool::Entry Pool::get() +Pool::Entry Pool::get(uint64_t wait_timeout) { std::unique_lock lock(mutex); + uint64_t deadline = 0; + /// UINT64_MAX -- wait indefinitely + if (wait_timeout && wait_timeout != UINT64_MAX) + deadline = clock_gettime_ns() + wait_timeout * 1'000'000'000; + initialize(); for (;;) { @@ -153,6 +170,12 @@ Pool::Entry Pool::get() logger.trace("(%s): Unable to create a new connection: Max number of connections has been reached.", getDescription()); } + if (!wait_timeout) + throw Poco::Exception("mysqlxx::Pool is full (wait is disabled, see connection_wait_timeout setting)"); + + if (deadline && clock_gettime_ns() >= deadline) + throw Poco::Exception("mysqlxx::Pool is full (connection_wait_timeout is exceeded)"); + lock.unlock(); logger.trace("(%s): Sleeping for %d seconds.", getDescription(), MYSQLXX_POOL_SLEEP_ON_CONNECT_FAIL); sleepForSeconds(MYSQLXX_POOL_SLEEP_ON_CONNECT_FAIL); diff --git a/base/mysqlxx/Pool.h b/base/mysqlxx/Pool.h index 530e2c78cf2..08d8b85b4ac 100644 --- a/base/mysqlxx/Pool.h +++ b/base/mysqlxx/Pool.h @@ -189,7 +189,7 @@ public: ~Pool(); /// Allocates connection. - Entry get(); + Entry get(uint64_t wait_timeout); /// Allocates connection. /// If database is not accessible, returns empty Entry object. diff --git a/base/mysqlxx/PoolWithFailover.cpp b/base/mysqlxx/PoolWithFailover.cpp index e317ab7f228..14c0db9ecd5 100644 --- a/base/mysqlxx/PoolWithFailover.cpp +++ b/base/mysqlxx/PoolWithFailover.cpp @@ -21,8 +21,9 @@ PoolWithFailover::PoolWithFailover( const unsigned max_connections_, const size_t max_tries_) : max_tries(max_tries_) + , shareable(config_.getBool(config_name_ + ".share_connection", false)) + , wait_timeout(UINT64_MAX) { - shareable = config_.getBool(config_name_ + ".share_connection", false); if (config_.has(config_name_ + ".replica")) { Poco::Util::AbstractConfiguration::Keys replica_keys; @@ -80,9 +81,11 @@ PoolWithFailover::PoolWithFailover( const std::string & password, unsigned default_connections_, unsigned max_connections_, - size_t max_tries_) + size_t max_tries_, + uint64_t wait_timeout_) : max_tries(max_tries_) , shareable(false) + , wait_timeout(wait_timeout_) { /// Replicas have the same priority, but traversed replicas are moved to the end of the queue. for (const auto & [host, port] : addresses) @@ -101,6 +104,7 @@ PoolWithFailover::PoolWithFailover( PoolWithFailover::PoolWithFailover(const PoolWithFailover & other) : max_tries{other.max_tries} , shareable{other.shareable} + , wait_timeout(other.wait_timeout) { if (shareable) { @@ -140,7 +144,7 @@ PoolWithFailover::Entry PoolWithFailover::get() try { - Entry entry = shareable ? pool->get() : pool->tryGet(); + Entry entry = shareable ? pool->get(wait_timeout) : pool->tryGet(); if (!entry.isNull()) { @@ -172,7 +176,7 @@ PoolWithFailover::Entry PoolWithFailover::get() if (full_pool) { app.logger().error("All connections failed, trying to wait on a full pool " + (*full_pool)->getDescription()); - return (*full_pool)->get(); + return (*full_pool)->get(wait_timeout); } std::stringstream message; diff --git a/base/mysqlxx/PoolWithFailover.h b/base/mysqlxx/PoolWithFailover.h index 1c7a63e76c0..2bd5ec9f30a 100644 --- a/base/mysqlxx/PoolWithFailover.h +++ b/base/mysqlxx/PoolWithFailover.h @@ -80,6 +80,8 @@ namespace mysqlxx std::mutex mutex; /// Can the Pool be shared bool shareable; + /// Timeout for waiting free connection. + uint64_t wait_timeout = 0; public: using Entry = Pool::Entry; @@ -96,6 +98,7 @@ namespace mysqlxx * default_connections Number of connection in pool to each replica at start. * max_connections Maximum number of connections in pool to each replica. * max_tries_ Max number of connection tries. + * wait_timeout_ Timeout for waiting free connection. */ PoolWithFailover( const std::string & config_name_, @@ -117,7 +120,8 @@ namespace mysqlxx const std::string & password, unsigned default_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS, unsigned max_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS, - size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES); + size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES, + uint64_t wait_timeout_ = UINT64_MAX); PoolWithFailover(const PoolWithFailover & other); diff --git a/docs/en/engines/table-engines/integrations/mysql.md b/docs/en/engines/table-engines/integrations/mysql.md index a6402e00bc9..7eac159a645 100644 --- a/docs/en/engines/table-engines/integrations/mysql.md +++ b/docs/en/engines/table-engines/integrations/mysql.md @@ -19,6 +19,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] SETTINGS [connection_pool_size=16, ] [connection_max_tries=3, ] + [connection_wait_timeout=5, ] /* 0 -- do not wait */ [connection_auto_close=true ] ; ``` diff --git a/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp b/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp index 53495aa3cb1..560d2d716c9 100644 --- a/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp +++ b/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp @@ -247,7 +247,7 @@ void MaterializedMySQLSyncThread::assertMySQLAvailable() { try { - checkMySQLVariables(pool.get(), getContext()->getSettingsRef()); + checkMySQLVariables(pool.get(/* wait_timeout= */ UINT64_MAX), getContext()->getSettingsRef()); } catch (const mysqlxx::ConnectionFailed & e) { @@ -729,7 +729,7 @@ void MaterializedMySQLSyncThread::onEvent(Buffers & buffers, const BinlogEventPt { /// Some behaviors(such as changing the value of "binlog_checksum") rotate the binlog file. /// To ensure that the synchronization continues, we need to handle these events - metadata.fetchMasterVariablesValue(pool.get()); + metadata.fetchMasterVariablesValue(pool.get(/* wait_timeout= */ UINT64_MAX)); client.setBinlogChecksum(metadata.binlog_checksum); } else if (receive_event->header.type != HEARTBEAT_EVENT) diff --git a/src/Storages/MySQL/MySQLSettings.h b/src/Storages/MySQL/MySQLSettings.h index da8723c2ea6..872b0607e20 100644 --- a/src/Storages/MySQL/MySQLSettings.h +++ b/src/Storages/MySQL/MySQLSettings.h @@ -17,6 +17,7 @@ class ASTStorage; #define LIST_OF_MYSQL_SETTINGS(M) \ M(UInt64, connection_pool_size, 16, "Size of connection pool (if all connections are in use, the query will wait until some connection will be freed).", 0) \ M(UInt64, connection_max_tries, 3, "Number of retries for pool with failover", 0) \ + M(UInt64, connection_wait_timeout, 5, "Timeout (in seconds) for waiting for free connection (in case of there is already connection_pool_size active connections), 0 - do not wait.", 0) \ M(Bool, connection_auto_close, true, "Auto-close connection after query execution, i.e. disable connection reuse.", 0) \ DECLARE_SETTINGS_TRAITS(MySQLSettingsTraits, LIST_OF_MYSQL_SETTINGS) diff --git a/src/Storages/StorageMySQL.cpp b/src/Storages/StorageMySQL.cpp index 79bb1f59cc7..7f458ef82af 100644 --- a/src/Storages/StorageMySQL.cpp +++ b/src/Storages/StorageMySQL.cpp @@ -267,11 +267,15 @@ void registerStorageMySQL(StorageFactory & factory) throw Exception("connection_pool_size cannot be zero.", ErrorCodes::BAD_ARGUMENTS); auto addresses = parseRemoteDescriptionForExternalDatabase(host_port, max_addresses, 3306); - mysqlxx::PoolWithFailover pool(remote_database, addresses, - username, password, + mysqlxx::PoolWithFailover pool( + remote_database, + addresses, + username, + password, MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS, mysql_settings.connection_pool_size, - mysql_settings.connection_max_tries); + mysql_settings.connection_max_tries, + mysql_settings.connection_wait_timeout); bool replace_query = false; std::string on_duplicate_clause; From 6d5f01a56bb1715c47de8444bfc85b39228f3081 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 2 Sep 2021 22:32:32 +0300 Subject: [PATCH 16/43] Cover MySQL setting connection_wait_timeout --- tests/integration/test_storage_mysql/test.py | 48 ++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/tests/integration/test_storage_mysql/test.py b/tests/integration/test_storage_mysql/test.py index a044528cacf..c7ede8dede4 100644 --- a/tests/integration/test_storage_mysql/test.py +++ b/tests/integration/test_storage_mysql/test.py @@ -3,7 +3,10 @@ from contextlib import contextmanager ## sudo -H pip install PyMySQL import pymysql.cursors import pytest +import time +import threading from helpers.cluster import ClickHouseCluster +from helpers.client import QueryRuntimeException cluster = ClickHouseCluster(__file__) @@ -319,6 +322,51 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL conn.close() +# Check that limited connection_wait_timeout (via connection_pool_size=1) will throw. +def test_settings_connection_wait_timeout(started_cluster): + table_name = 'test_settings_connection_wait_timeout' + node1.query(f'DROP TABLE IF EXISTS {table_name}') + wait_timeout = 2 + + conn = get_mysql_conn(started_cluster, cluster.mysql_ip) + drop_mysql_table(conn, table_name) + create_mysql_table(conn, table_name) + + node1.query(''' + CREATE TABLE {} + ( + id UInt32, + name String, + age UInt32, + money UInt32 + ) + ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse') + SETTINGS connection_wait_timeout={}, connection_pool_size=1 + '''.format(table_name, table_name, wait_timeout) + ) + + node1.query("INSERT INTO {} (id, name) SELECT number, concat('name_', toString(number)) from numbers(10) ".format(table_name)) + + def worker(): + node1.query("SELECT sleepEachRow(1) FROM {}".format(table_name)) + + worker_thread = threading.Thread(target=worker) + worker_thread.start() + + # ensure that first query started in worker_thread + time.sleep(1) + + started = time.time() + with pytest.raises(QueryRuntimeException, match=r"Exception: mysqlxx::Pool is full \(connection_wait_timeout is exceeded\)"): + node1.query("SELECT sleepEachRow(1) FROM {}".format(table_name)) + ended = time.time() + assert (ended - started) >= wait_timeout + + worker_thread.join() + + drop_mysql_table(conn, table_name) + conn.close() + if __name__ == '__main__': with contextmanager(started_cluster)() as cluster: for name, instance in list(cluster.instances.items()): From 69e7e1dff7dd2a14bea037fb8facfad2ffbe8084 Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 3 Sep 2021 14:00:57 +0300 Subject: [PATCH 17/43] Update Dockerfile --- docker/packager/deb/Dockerfile | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docker/packager/deb/Dockerfile b/docker/packager/deb/Dockerfile index b6f23c55aa6..22bba94f250 100644 --- a/docker/packager/deb/Dockerfile +++ b/docker/packager/deb/Dockerfile @@ -37,9 +37,7 @@ RUN curl -O https://clickhouse-datasets.s3.yandex.net/utils/1/dpkg-deb \ RUN apt-get update \ && apt-get install \ alien \ - clang-11 \ clang-12 \ - clang-tidy-11 \ clang-tidy-12 \ cmake \ debhelper \ @@ -47,10 +45,7 @@ RUN apt-get update \ gdb \ git \ gperf \ - lld-11 \ lld-12 \ - llvm-11 \ - llvm-11-dev \ llvm-12 \ llvm-12-dev \ moreutils \ From 4fe3909d74425b0ddf168369c176889a2931caad Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 3 Sep 2021 14:01:47 +0300 Subject: [PATCH 18/43] Update Dockerfile --- docker/packager/binary/Dockerfile | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docker/packager/binary/Dockerfile b/docker/packager/binary/Dockerfile index 05834fec493..f5d496ce97f 100644 --- a/docker/packager/binary/Dockerfile +++ b/docker/packager/binary/Dockerfile @@ -39,8 +39,6 @@ RUN apt-get update \ bash \ build-essential \ ccache \ - clang-11 \ - clang-tidy-11 \ cmake \ curl \ g++-10 \ @@ -50,9 +48,6 @@ RUN apt-get update \ gperf \ libicu-dev \ libreadline-dev \ - lld-11 \ - llvm-11 \ - llvm-11-dev \ clang-12 \ clang-tidy-12 \ lld-12 \ From ac2d9a73a8d62d72114b1ab27a656c922b7f05be Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 4 Sep 2021 13:07:59 +0300 Subject: [PATCH 19/43] User managed slots --- .../materialized-postgresql.md | 14 ++++- docs/en/operations/settings/settings.md | 10 +++- .../DatabaseMaterializedPostgreSQL.cpp | 6 +- .../MaterializedPostgreSQLSettings.h | 2 + .../PostgreSQLReplicationHandler.cpp | 57 ++++++++++++++----- .../PostgreSQL/PostgreSQLReplicationHandler.h | 10 ++-- .../StorageMaterializedPostgreSQL.cpp | 6 +- .../test.py | 41 ++++++++++++- 8 files changed, 119 insertions(+), 27 deletions(-) diff --git a/docs/en/engines/database-engines/materialized-postgresql.md b/docs/en/engines/database-engines/materialized-postgresql.md index 89c7c803bb3..77a5f2af0e0 100644 --- a/docs/en/engines/database-engines/materialized-postgresql.md +++ b/docs/en/engines/database-engines/materialized-postgresql.md @@ -31,6 +31,10 @@ ENGINE = MaterializedPostgreSQL('host:port', ['database' | database], 'user', 'p - [materialized_postgresql_allow_automatic_update](../../operations/settings/settings.md#materialized-postgresql-allow-automatic-update) +- [materialized_postgresql_replication_slot](../../operations/settings/settings.md#materialized-postgresql-replication-slot) + +- [materialized_postgresql_snapshot](../../operations/settings/settings.md#materialized-postgresql-snapshot) + ``` sql CREATE DATABASE database1 ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password') @@ -73,7 +77,7 @@ WHERE oid = 'postgres_table'::regclass; !!! warning "Warning" Replication of [**TOAST**](https://www.postgresql.org/docs/9.5/storage-toast.html) values is not supported. The default value for the data type will be used. - + ## Example of Use {#example-of-use} ``` sql @@ -82,3 +86,11 @@ ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres SELECT * FROM postgresql_db.postgres_table; ``` + +## Notes {#notes} + +- Failover of the logical replication slot. + +Logical Replication Slots which exist on the primary are not available on standby replicas. +So if there is a failover, new primary (the old physical standby) won’t be aware of any slots which were existing with old primary. This will lead to a broken replication from PostgreSQL. +A solution to this is to manage replication slots yourself and define a permanent replication slot (some information can be found [here](https://patroni.readthedocs.io/en/latest/SETTINGS.html)). You'll need to pass slot name via `materialized_postgresql_replication_slot` setting, and it has to be exported with `EXPORT SNAPSHOT` option. The snapshot identifier needs to be passed via `materialized_postgresql_snapshot` setting. diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index a1c7d1aab32..5635321b598 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -3436,6 +3436,14 @@ Possible values: Default value: `0`. +## materialized_postgresql_replication_slot {#materialized-postgresql-replication-slot} + +Allows to have user-managed replication slots. Must be used together with `materialized_postgresql_snapshot`. + +## materialized_postgresql_replication_slot {#materialized-postgresql-replication-slot} + +A text string identifying a snapshot, from which initial dump of tables will be performed. Must be used together with `materialized_postgresql_replication_slot`. + ## allow_experimental_projection_optimization {#allow-experimental-projection-optimization} Enables or disables [projection](../../engines/table-engines/mergetree-family/mergetree.md#projections) optimization when processing `SELECT` queries. @@ -3449,7 +3457,7 @@ Default value: `0`. ## force_optimize_projection {#force-optimize-projection} -Enables or disables the obligatory use of [projections](../../engines/table-engines/mergetree-family/mergetree.md#projections) in `SELECT` queries, when projection optimization is enabled (see [allow_experimental_projection_optimization](#allow-experimental-projection-optimization) setting). +Enables or disables the obligatory use of [projections](../../engines/table-engines/mergetree-family/mergetree.md#projections) in `SELECT` queries, when projection optimization is enabled (see [allow_experimental_projection_optimization](#allow-experimental-projection-optimization) setting). Possible values: diff --git a/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp index c9ea8d12ef2..218dda94d31 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp @@ -61,10 +61,8 @@ void DatabaseMaterializedPostgreSQL::startSynchronization() connection_info, getContext(), is_attach, - settings->materialized_postgresql_max_block_size.value, - settings->materialized_postgresql_allow_automatic_update, - /* is_materialized_postgresql_database = */ true, - settings->materialized_postgresql_tables_list.value); + *settings, + /* is_materialized_postgresql_database = */ true); postgres::Connection connection(connection_info); NameSet tables_to_replicate; diff --git a/src/Storages/PostgreSQL/MaterializedPostgreSQLSettings.h b/src/Storages/PostgreSQL/MaterializedPostgreSQLSettings.h index 1d986b223e9..cc147a01d32 100644 --- a/src/Storages/PostgreSQL/MaterializedPostgreSQLSettings.h +++ b/src/Storages/PostgreSQL/MaterializedPostgreSQLSettings.h @@ -17,6 +17,8 @@ namespace DB M(UInt64, materialized_postgresql_max_block_size, 65536, "Number of row collected before flushing data into table.", 0) \ M(String, materialized_postgresql_tables_list, "", "List of tables for MaterializedPostgreSQL database engine", 0) \ M(Bool, materialized_postgresql_allow_automatic_update, false, "Allow to reload table in the background, when schema changes are detected", 0) \ + M(String, materialized_postgresql_replication_slot, "", "A user-created replication slot", 0) \ + M(String, materialized_postgresql_snapshot, "", "User provided snapshot in case he manages replication slots himself", 0) \ DECLARE_SETTINGS_TRAITS(MaterializedPostgreSQLSettingsTraits, LIST_OF_MATERIALIZED_POSTGRESQL_SETTINGS) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index c8c74d2ddaa..1bda6d13e11 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -32,24 +32,28 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( const postgres::ConnectionInfo & connection_info_, ContextPtr context_, bool is_attach_, - const size_t max_block_size_, - bool allow_automatic_update_, - bool is_materialized_postgresql_database_, - const String tables_list_) + const MaterializedPostgreSQLSettings & replication_settings, + bool is_materialized_postgresql_database_) : log(&Poco::Logger::get("PostgreSQLReplicationHandler")) , context(context_) , is_attach(is_attach_) , remote_database_name(remote_database_name_) , current_database_name(current_database_name_) , connection_info(connection_info_) - , max_block_size(max_block_size_) - , allow_automatic_update(allow_automatic_update_) + , max_block_size(replication_settings.materialized_postgresql_max_block_size) + , allow_automatic_update(replication_settings.materialized_postgresql_allow_automatic_update) , is_materialized_postgresql_database(is_materialized_postgresql_database_) - , tables_list(tables_list_) + , tables_list(replication_settings.materialized_postgresql_tables_list) + , user_provided_snapshot(replication_settings.materialized_postgresql_snapshot) , connection(std::make_shared(connection_info_)) , milliseconds_to_wait(RESCHEDULE_MS) { - replication_slot = fmt::format("{}_ch_replication_slot", replication_identifier); + replication_slot = replication_settings.materialized_postgresql_replication_slot; + if (replication_slot.empty()) + { + user_managed_slot = false; + replication_slot = fmt::format("{}_ch_replication_slot", replication_identifier); + } publication_name = fmt::format("{}_ch_publication", replication_identifier); startup_task = context->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ waitConnectionAndStart(); }); @@ -121,7 +125,20 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) auto initial_sync = [&]() { - createReplicationSlot(tx, start_lsn, snapshot_name); + LOG_TRACE(log, "Starting tables sync load"); + + if (user_managed_slot) + { + if (user_provided_snapshot.empty()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Using a user-defined replication slot must be provided with a snapshot from EXPORT SNAPSHOT when the slot is created." + "Pass it to `materialized_postgresql_snapshot` setting"); + snapshot_name = user_provided_snapshot; + } + else + { + createReplicationSlot(tx, start_lsn, snapshot_name); + } for (const auto & [table_name, storage] : materialized_storages) { @@ -147,12 +164,17 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) /// Recreation of a replication slot imposes reloading of all tables. if (!isReplicationSlotExist(tx, start_lsn, /* temporary */false)) { + if (user_managed_slot) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Having replication slot `{}` from settings, but it does not exist", replication_slot); + initial_sync(); } /// Always drop replication slot if it is CREATE query and not ATTACH. else if (!is_attach || new_publication) { - dropReplicationSlot(tx); + if (!user_managed_slot) + dropReplicationSlot(tx); + initial_sync(); } /// Synchronization and initial load already took place - do not create any new tables, just fetch StoragePtr's @@ -376,6 +398,8 @@ bool PostgreSQLReplicationHandler::isReplicationSlotExist(pqxx::nontransaction & void PostgreSQLReplicationHandler::createReplicationSlot( pqxx::nontransaction & tx, String & start_lsn, String & snapshot_name, bool temporary) { + assert(temporary || !user_managed_slot); + String query_str, slot_name; if (temporary) slot_name = replication_slot + "_tmp"; @@ -401,6 +425,8 @@ void PostgreSQLReplicationHandler::createReplicationSlot( void PostgreSQLReplicationHandler::dropReplicationSlot(pqxx::nontransaction & tx, bool temporary) { + assert(temporary || !user_managed_slot); + std::string slot_name; if (temporary) slot_name = replication_slot + "_tmp"; @@ -433,14 +459,17 @@ void PostgreSQLReplicationHandler::shutdownFinal() connection->execWithRetry([&](pqxx::nontransaction & tx) { - if (isReplicationSlotExist(tx, last_committed_lsn, /* temporary */false)) - dropReplicationSlot(tx, /* temporary */false); + if (isReplicationSlotExist(tx, last_committed_lsn, /* temporary */true)) + dropReplicationSlot(tx, /* temporary */true); }); + if (user_managed_slot) + return; + connection->execWithRetry([&](pqxx::nontransaction & tx) { - if (isReplicationSlotExist(tx, last_committed_lsn, /* temporary */true)) - dropReplicationSlot(tx, /* temporary */true); + if (isReplicationSlotExist(tx, last_committed_lsn, /* temporary */false)) + dropReplicationSlot(tx, /* temporary */false); }); } catch (Exception & e) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 3a0bedc0852..eacf6b69b3b 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -1,6 +1,7 @@ #pragma once #include "MaterializedPostgreSQLConsumer.h" +#include "MaterializedPostgreSQLSettings.h" #include #include @@ -25,10 +26,8 @@ public: const postgres::ConnectionInfo & connection_info_, ContextPtr context_, bool is_attach_, - const size_t max_block_size_, - bool allow_automatic_update_, - bool is_materialized_postgresql_database_, - const String tables_list = ""); + const MaterializedPostgreSQLSettings & replication_settings, + bool is_materialized_postgresql_database_); /// Activate task to be run from a separate thread: wait until connection is available and call startReplication(). void startup(); @@ -108,6 +107,9 @@ private: /// A coma-separated list of tables, which are going to be replicated for database engine. By default, a whole database is replicated. String tables_list; + bool user_managed_slot = true; + String user_provided_snapshot; + String replication_slot, publication_name; /// Shared between replication_consumer and replication_handler, but never accessed concurrently. diff --git a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp index aa27a54cdac..73a685af9b4 100644 --- a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp @@ -64,6 +64,8 @@ StorageMaterializedPostgreSQL::StorageMaterializedPostgreSQL( setInMemoryMetadata(storage_metadata); String replication_identifier = remote_database_name + "_" + remote_table_name_; + replication_settings->materialized_postgresql_tables_list = remote_table_name_; + replication_handler = std::make_unique( replication_identifier, remote_database_name, @@ -71,8 +73,8 @@ StorageMaterializedPostgreSQL::StorageMaterializedPostgreSQL( connection_info, getContext(), is_attach, - replication_settings->materialized_postgresql_max_block_size.value, - /* allow_automatic_update */ false, /* is_materialized_postgresql_database */false); + *replication_settings, + /* is_materialized_postgresql_database */false); if (!is_attach) { diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index 68b42d91fb6..1e7188458a9 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -31,18 +31,30 @@ postgres_table_template_3 = """ key1 Integer NOT NULL, value1 Integer, key2 Integer NOT NULL, value2 Integer NOT NULL) """ -def get_postgres_conn(ip, port, database=False, auto_commit=True, database_name='postgres_database'): +def get_postgres_conn(ip, port, database=False, auto_commit=True, database_name='postgres_database', replication=False): if database == True: conn_string = "host={} port={} dbname='{}' user='postgres' password='mysecretpassword'".format(ip, port, database_name) else: conn_string = "host={} port={} user='postgres' password='mysecretpassword'".format(ip, port) + if replication: + conn_string += " replication='database'" + conn = psycopg2.connect(conn_string) if auto_commit: conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) conn.autocommit = True return conn +def create_replication_slot(conn, slot_name='user_slot'): + cursor = conn.cursor() + cursor.execute('CREATE_REPLICATION_SLOT {} LOGICAL pgoutput EXPORT_SNAPSHOT'.format(slot_name)) + result = cursor.fetchall() + print(result[0][0]) # slot name + print(result[0][1]) # start lsn + print(result[0][2]) # snapshot + return result[0][2] + def create_postgres_db(cursor, name='postgres_database'): cursor.execute("CREATE DATABASE {}".format(name)) @@ -941,6 +953,33 @@ def test_quoting(started_cluster): drop_materialized_db() +def test_user_managed_slots(started_cluster): + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) + cursor = conn.cursor() + table_name = 'test_table' + create_postgres_table(cursor, table_name); + instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(10000)".format(table_name)) + + slot_name = 'user_slot' + replication_connection = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, + database=True, replication=True, auto_commit=True) + snapshot = create_replication_slot(replication_connection, slot_name=slot_name) + create_materialized_db(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + settings=["materialized_postgresql_replication_slot = '{}'".format(slot_name), + "materialized_postgresql_snapshot = '{}'".format(snapshot)]) + check_tables_are_synchronized(table_name); + instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(10000, 10000)".format(table_name)) + check_tables_are_synchronized(table_name); + instance.restart_clickhouse() + instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(20000, 10000)".format(table_name)) + check_tables_are_synchronized(table_name); + drop_postgres_table(cursor, table_name) + drop_materialized_db() + + if __name__ == '__main__': cluster.start() input("Cluster created, press any key to destroy...") From 5f9952d7420d9f173c210abb27d829d0f9708c1e Mon Sep 17 00:00:00 2001 From: alesapin Date: Sat, 4 Sep 2021 15:56:19 +0300 Subject: [PATCH 20/43] Add clang-tidy-12 --- cmake/analysis.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/analysis.cmake b/cmake/analysis.cmake index 267bb34248b..24d8168e2c0 100644 --- a/cmake/analysis.cmake +++ b/cmake/analysis.cmake @@ -6,7 +6,7 @@ if (ENABLE_CLANG_TIDY) message(FATAL_ERROR "clang-tidy requires CMake version at least 3.6.") endif() - find_program (CLANG_TIDY_PATH NAMES "clang-tidy" "clang-tidy-11" "clang-tidy-10" "clang-tidy-9" "clang-tidy-8") + find_program (CLANG_TIDY_PATH NAMES "clang-tidy" "clang-tidy-12" "clang-tidy-11" "clang-tidy-10" "clang-tidy-9" "clang-tidy-8") if (CLANG_TIDY_PATH) message(STATUS From a879c907a228041a94e9dee387a56e5083c273fe Mon Sep 17 00:00:00 2001 From: alesapin Date: Sat, 4 Sep 2021 16:00:42 +0300 Subject: [PATCH 21/43] Update PVS hashsum --- docker/test/pvs/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/pvs/Dockerfile b/docker/test/pvs/Dockerfile index 7bd45ba4018..35e07748845 100644 --- a/docker/test/pvs/Dockerfile +++ b/docker/test/pvs/Dockerfile @@ -28,7 +28,7 @@ RUN apt-get update --yes \ ENV PKG_VERSION="pvs-studio-latest" RUN set -x \ - && export PUBKEY_HASHSUM="686e5eb8b3c543a5c54442c39ec876b6c2d912fe8a729099e600017ae53c877dda3368fe38ed7a66024fe26df6b5892a" \ + && export PUBKEY_HASHSUM="ad369a2e9d8b8c30f5a9f2eb131121739b79c78e03fef0f016ea51871a5f78cd4e6257b270dca0ac3be3d1f19d885516" \ && wget -nv https://files.viva64.com/etc/pubkey.txt -O /tmp/pubkey.txt \ && echo "${PUBKEY_HASHSUM} /tmp/pubkey.txt" | sha384sum -c \ && apt-key add /tmp/pubkey.txt \ From 4df6fa1ae7739f9229a149a239c1a3c681d78b69 Mon Sep 17 00:00:00 2001 From: alesapin Date: Sat, 4 Sep 2021 16:09:12 +0300 Subject: [PATCH 22/43] Remove strange changes --- docker/packager/binary/build.sh | 23 +++++++---------------- docker/packager/packager | 3 --- 2 files changed, 7 insertions(+), 19 deletions(-) diff --git a/docker/packager/binary/build.sh b/docker/packager/binary/build.sh index 26f83649762..71402a2fd66 100755 --- a/docker/packager/binary/build.sh +++ b/docker/packager/binary/build.sh @@ -2,24 +2,15 @@ set -x -e -if [ "1" == "${IS_CROSS_DARWIN:0}" ] -then - mkdir -p build/cmake/toolchain/darwin-x86_64 - tar xJf MacOSX11.0.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-components=1 - ln -sf darwin-x86_64 build/cmake/toolchain/darwin-aarch64 -fi +mkdir -p build/cmake/toolchain/darwin-x86_64 +tar xJf MacOSX11.0.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-components=1 +ln -sf darwin-x86_64 build/cmake/toolchain/darwin-aarch64 -if [ "1" == "${IS_CROSS_ARM:0}" ] -then - mkdir -p build/cmake/toolchain/linux-aarch64 - tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build/cmake/toolchain/linux-aarch64 --strip-components=1 -fi +mkdir -p build/cmake/toolchain/linux-aarch64 +tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build/cmake/toolchain/linux-aarch64 --strip-components=1 -if [ "1" == "${IS_CROSS_ARM:0}" ] -then - mkdir -p build/cmake/toolchain/freebsd-x86_64 - tar xJf freebsd-11.3-toolchain.tar.xz -C build/cmake/toolchain/freebsd-x86_64 --strip-components=1 -fi +mkdir -p build/cmake/toolchain/freebsd-x86_64 +tar xJf freebsd-11.3-toolchain.tar.xz -C build/cmake/toolchain/freebsd-x86_64 --strip-components=1 # Uncomment to debug ccache. Don't put ccache log in /output right away, or it # will be confusingly packed into the "performance" package. diff --git a/docker/packager/packager b/docker/packager/packager index 1f472ed98e1..f37d64e9949 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -78,7 +78,6 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ cmake_flags.append("-DLINKER_NAME=ld.lld") if is_cross_darwin: - result.append("IS_CROSS_DARWIN=1") cc = compiler[:-len(DARWIN_SUFFIX)] cmake_flags.append("-DCMAKE_AR:FILEPATH=/cctools/bin/x86_64-apple-darwin-ar") cmake_flags.append("-DCMAKE_INSTALL_NAME_TOOL=/cctools/bin/x86_64-apple-darwin-install_name_tool") @@ -93,11 +92,9 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ cmake_flags.append("-DLINKER_NAME=/cctools/bin/aarch64-apple-darwin-ld") cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/darwin/toolchain-aarch64.cmake") elif is_cross_arm: - result.append("IS_CROSS_ARM=1") cc = compiler[:-len(ARM_SUFFIX)] cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-aarch64.cmake") elif is_cross_freebsd: - result.append("IS_CROSS_FREEBSD=1") cc = compiler[:-len(FREEBSD_SUFFIX)] cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/freebsd/toolchain-x86_64.cmake") else: From 28517e57fc04427b007bbeddc17741afd39ea90a Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 4 Sep 2021 23:55:59 +0300 Subject: [PATCH 23/43] Fix test --- .../test_postgresql_replica_database_engine/test.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index 1e7188458a9..1dd096087ff 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -55,6 +55,9 @@ def create_replication_slot(conn, slot_name='user_slot'): print(result[0][2]) # snapshot return result[0][2] +def drop_replication_slot(conn, slot_name='user_slot'): + cursor = conn.cursor() + cursor.execute("select pg_drop_replication_slot('{}')".format(slot_name)) def create_postgres_db(cursor, name='postgres_database'): cursor.execute("CREATE DATABASE {}".format(name)) @@ -978,6 +981,7 @@ def test_user_managed_slots(started_cluster): check_tables_are_synchronized(table_name); drop_postgres_table(cursor, table_name) drop_materialized_db() + drop_replication_slot(replication_connection, slot_name) if __name__ == '__main__': From 40d4d64a65e10739b2975e8055490a2a03d0e688 Mon Sep 17 00:00:00 2001 From: alesapin Date: Sun, 5 Sep 2021 15:50:25 +0300 Subject: [PATCH 24/43] Fix PVS Image --- docker/test/pvs/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/pvs/Dockerfile b/docker/test/pvs/Dockerfile index 35e07748845..438f0bd07ec 100644 --- a/docker/test/pvs/Dockerfile +++ b/docker/test/pvs/Dockerfile @@ -38,7 +38,7 @@ RUN set -x \ && dpkg -i "${PKG_VERSION}.deb" CMD echo "Running PVS version $PKG_VERSION" && cd /repo_folder && pvs-studio-analyzer credentials $LICENCE_NAME $LICENCE_KEY -o ./licence.lic \ - && cmake . -D"ENABLE_EMBEDDED_COMPILER"=OFF -D"USE_INTERNAL_PROTOBUF_LIBRARY"=OFF -D"USE_INTERNAL_GRPC_LIBRARY"=OFF \ + && cmake . -D"ENABLE_EMBEDDED_COMPILER"=OFF -D"USE_INTERNAL_PROTOBUF_LIBRARY"=OFF -D"USE_INTERNAL_GRPC_LIBRARY"=OFF -DCMAKE_C_COMPILER=clang-12 -DCMAKE_CXX_COMPILER=clang\+\+-12 \ && ninja re2_st clickhouse_grpc_protos \ && pvs-studio-analyzer analyze -o pvs-studio.log -e contrib -j 4 -l ./licence.lic; \ cp /repo_folder/pvs-studio.log /test_output; \ From 18a7adf0fa17667ef03829d122d88ac23cf93d71 Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Sun, 29 Aug 2021 16:49:30 +0800 Subject: [PATCH 25/43] Fix NOT-IN index optimization when not all keys are used. --- src/Interpreters/Set.cpp | 8 ++++---- src/Interpreters/Set.h | 4 +++- .../01891_not_in_partition_prune.reference | 2 ++ .../0_stateless/01891_not_in_partition_prune.sql | 15 +++++++++++++++ 4 files changed, 24 insertions(+), 5 deletions(-) diff --git a/src/Interpreters/Set.cpp b/src/Interpreters/Set.cpp index 5ab59ba3f07..5304859aeea 100644 --- a/src/Interpreters/Set.cpp +++ b/src/Interpreters/Set.cpp @@ -402,8 +402,8 @@ void Set::checkTypesEqual(size_t set_type_idx, const DataTypePtr & other_type) c + data_types[set_type_idx]->getName() + " on the right", ErrorCodes::TYPE_MISMATCH); } -MergeTreeSetIndex::MergeTreeSetIndex(const Columns & set_elements, std::vector && index_mapping_) - : indexes_mapping(std::move(index_mapping_)) +MergeTreeSetIndex::MergeTreeSetIndex(const Columns & set_elements, std::vector && indexes_mapping_) + : has_all_keys(set_elements.size() == indexes_mapping_.size()), indexes_mapping(std::move(indexes_mapping_)) { std::sort(indexes_mapping.begin(), indexes_mapping.end(), [](const KeyTuplePositionMapping & l, const KeyTuplePositionMapping & r) @@ -548,11 +548,11 @@ BoolMask MergeTreeSetIndex::checkInRange(const std::vector & key_ranges, break; } } - if (one_element_range) + if (one_element_range && has_all_keys) { /// Here we know that there is one element in range. /// The main difference with the normal case is that we can definitely say that - /// condition in this range always TRUE (can_be_false = 0) xor always FALSE (can_be_true = 0). + /// condition in this range is always TRUE (can_be_false = 0) or always FALSE (can_be_true = 0). /// Check if it's an empty range if (!left_included || !right_included) diff --git a/src/Interpreters/Set.h b/src/Interpreters/Set.h index 727a2c144a1..578913dd0d2 100644 --- a/src/Interpreters/Set.h +++ b/src/Interpreters/Set.h @@ -208,7 +208,7 @@ public: std::vector functions; }; - MergeTreeSetIndex(const Columns & set_elements, std::vector && index_mapping_); + MergeTreeSetIndex(const Columns & set_elements, std::vector && indexes_mapping_); size_t size() const { return ordered_set.at(0)->size(); } @@ -217,6 +217,8 @@ public: BoolMask checkInRange(const std::vector & key_ranges, const DataTypes & data_types) const; private: + // If all arguments in tuple are key columns, we can optimize NOT IN when there is only one element. + bool has_all_keys; Columns ordered_set; std::vector indexes_mapping; diff --git a/tests/queries/0_stateless/01891_not_in_partition_prune.reference b/tests/queries/0_stateless/01891_not_in_partition_prune.reference index 628053cd4f8..9d2517ad760 100644 --- a/tests/queries/0_stateless/01891_not_in_partition_prune.reference +++ b/tests/queries/0_stateless/01891_not_in_partition_prune.reference @@ -4,3 +4,5 @@ 7 107 8 108 9 109 +1970-01-01 1 one +1970-01-01 3 three diff --git a/tests/queries/0_stateless/01891_not_in_partition_prune.sql b/tests/queries/0_stateless/01891_not_in_partition_prune.sql index edbfad93e5d..5bf90fdd65c 100644 --- a/tests/queries/0_stateless/01891_not_in_partition_prune.sql +++ b/tests/queries/0_stateless/01891_not_in_partition_prune.sql @@ -8,3 +8,18 @@ set max_rows_to_read = 5; select * from test1 where i not in (1,2,3,4,5) order by i; drop table test1; + +drop table if exists t1; +drop table if exists t2; + +create table t1 (date Date, a Float64, b String) Engine=MergeTree ORDER BY date; +create table t2 (date Date, a Float64, b String) Engine=MergeTree ORDER BY date; + +insert into t1(a, b) values (1, 'one'), (2, 'two'); +insert into t2(a, b) values (2, 'two'), (3, 'three'); + +select date, a, b from t1 where (date, a, b) NOT IN (select date,a,b from t2); +select date, a, b from t2 where (date, a, b) NOT IN (select date,a,b from t1); + +drop table t1; +drop table t2; From 7b8101f289b33bce483a45bc6f85737225a644b2 Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 6 Sep 2021 09:09:35 +0300 Subject: [PATCH 26/43] Minor change --- src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp b/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp index b43e7656084..1fc279bff23 100644 --- a/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp +++ b/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp @@ -625,9 +625,8 @@ bool MaterializedPostgreSQLConsumer::readFromReplicationSlot() tryLogCurrentException(__PRETTY_FUNCTION__); return false; } - catch (const pqxx::broken_connection & e) + catch (const pqxx::broken_connection &) { - LOG_ERROR(log, "Connection error: {}", e.what()); connection->tryUpdateConnection(); return false; } @@ -641,6 +640,7 @@ bool MaterializedPostgreSQLConsumer::readFromReplicationSlot() if (error_message.find("out of relcache_callback_list slots") == std::string::npos) tryLogCurrentException(__PRETTY_FUNCTION__); + connection->tryUpdateConnection(); return false; } catch (const pqxx::conversion_error & e) From 73ef1233efbd301060e33b170c60c01f80e8bac8 Mon Sep 17 00:00:00 2001 From: alesapin Date: Sun, 5 Sep 2021 16:44:14 +0300 Subject: [PATCH 27/43] Fix tidy Fix tidy one more time --- .clang-tidy | 2 ++ src/Compression/CompressionCodecEncrypted.cpp | 2 +- src/IO/WriteBufferFromFile.cpp | 6 ++--- src/IO/WriteBufferFromFile.h | 1 - src/IO/WriteBufferFromFileDescriptor.cpp | 26 +++++++++---------- src/IO/WriteBufferFromFileDescriptor.h | 25 ++++++++++++------ 6 files changed, 35 insertions(+), 27 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index b0971418e0e..ecb8ac6dcbf 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -203,3 +203,5 @@ CheckOptions: value: CamelCase - key: readability-identifier-naming.UsingCase value: CamelCase + - key: modernize-loop-convert.UseCxx20ReverseRanges + value: false diff --git a/src/Compression/CompressionCodecEncrypted.cpp b/src/Compression/CompressionCodecEncrypted.cpp index 6b921fb9c0a..ba3f12c32a0 100644 --- a/src/Compression/CompressionCodecEncrypted.cpp +++ b/src/Compression/CompressionCodecEncrypted.cpp @@ -113,7 +113,7 @@ namespace DB std::string CompressionCodecEncrypted::deriveKey(const std::string_view & master_key) { - std::string_view salt(""); // No salt: derive keys in a deterministic manner. + std::string_view salt; // No salt: derive keys in a deterministic manner. std::string_view info("Codec Encrypted('AES-128-GCM-SIV') key generation key"); std::array result; diff --git a/src/IO/WriteBufferFromFile.cpp b/src/IO/WriteBufferFromFile.cpp index 67cd7ba27d6..270882d0774 100644 --- a/src/IO/WriteBufferFromFile.cpp +++ b/src/IO/WriteBufferFromFile.cpp @@ -32,7 +32,7 @@ WriteBufferFromFile::WriteBufferFromFile( mode_t mode, char * existing_memory, size_t alignment) - : WriteBufferFromFileDescriptor(-1, buf_size, existing_memory, alignment), file_name(file_name_) + : WriteBufferFromFileDescriptor(-1, buf_size, existing_memory, alignment, file_name_) { ProfileEvents::increment(ProfileEvents::FileOpen); @@ -65,9 +65,7 @@ WriteBufferFromFile::WriteBufferFromFile( size_t buf_size, char * existing_memory, size_t alignment) - : - WriteBufferFromFileDescriptor(fd_, buf_size, existing_memory, alignment), - file_name(original_file_name.empty() ? "(fd = " + toString(fd_) + ")" : original_file_name) + : WriteBufferFromFileDescriptor(fd_, buf_size, existing_memory, alignment, original_file_name) { fd_ = -1; } diff --git a/src/IO/WriteBufferFromFile.h b/src/IO/WriteBufferFromFile.h index b7d58638113..584a0221f1a 100644 --- a/src/IO/WriteBufferFromFile.h +++ b/src/IO/WriteBufferFromFile.h @@ -25,7 +25,6 @@ namespace DB class WriteBufferFromFile : public WriteBufferFromFileDescriptor { protected: - std::string file_name; CurrentMetrics::Increment metric_increment{CurrentMetrics::OpenFileForWrite}; public: diff --git a/src/IO/WriteBufferFromFileDescriptor.cpp b/src/IO/WriteBufferFromFileDescriptor.cpp index cd265653bb9..f1afca171d2 100644 --- a/src/IO/WriteBufferFromFileDescriptor.cpp +++ b/src/IO/WriteBufferFromFileDescriptor.cpp @@ -61,7 +61,9 @@ void WriteBufferFromFileDescriptor::nextImpl() if ((-1 == res || 0 == res) && errno != EINTR) { ProfileEvents::increment(ProfileEvents::WriteBufferFromFileDescriptorWriteFailed); - throwFromErrnoWithPath("Cannot write to file " + getFileName(), getFileName(), + + /// Don't use getFileName() here because this method can be called from destructor + throwFromErrnoWithPath("Cannot write to file " + file_name, file_name, ErrorCodes::CANNOT_WRITE_TO_FILE_DESCRIPTOR); } @@ -74,19 +76,17 @@ void WriteBufferFromFileDescriptor::nextImpl() } -/// Name or some description of file. -std::string WriteBufferFromFileDescriptor::getFileName() const -{ - return "(fd = " + toString(fd) + ")"; -} - - WriteBufferFromFileDescriptor::WriteBufferFromFileDescriptor( int fd_, size_t buf_size, char * existing_memory, - size_t alignment) - : WriteBufferFromFileBase(buf_size, existing_memory, alignment), fd(fd_) {} + size_t alignment, + const std::string & file_name_) + : WriteBufferFromFileBase(buf_size, existing_memory, alignment) + , fd(fd_) + , file_name(file_name_.empty() ? "(fd = " + toString(fd) + ")" : file_name_) +{ +} WriteBufferFromFileDescriptor::~WriteBufferFromFileDescriptor() @@ -115,7 +115,7 @@ void WriteBufferFromFileDescriptor::sync() } -off_t WriteBufferFromFileDescriptor::seek(off_t offset, int whence) +off_t WriteBufferFromFileDescriptor::seek(off_t offset, int whence) // NOLINT { off_t res = lseek(fd, offset, whence); if (-1 == res) @@ -125,7 +125,7 @@ off_t WriteBufferFromFileDescriptor::seek(off_t offset, int whence) } -void WriteBufferFromFileDescriptor::truncate(off_t length) +void WriteBufferFromFileDescriptor::truncate(off_t length) // NOLINT { int res = ftruncate(fd, length); if (-1 == res) @@ -133,7 +133,7 @@ void WriteBufferFromFileDescriptor::truncate(off_t length) } -off_t WriteBufferFromFileDescriptor::size() +off_t WriteBufferFromFileDescriptor::size() const { struct stat buf; int res = fstat(fd, &buf); diff --git a/src/IO/WriteBufferFromFileDescriptor.h b/src/IO/WriteBufferFromFileDescriptor.h index 18c0ac64f63..aef332b38b0 100644 --- a/src/IO/WriteBufferFromFileDescriptor.h +++ b/src/IO/WriteBufferFromFileDescriptor.h @@ -13,17 +13,17 @@ class WriteBufferFromFileDescriptor : public WriteBufferFromFileBase protected: int fd; + /// If file has name contains filename, otherwise contains string "(fd=...)" + std::string file_name; + void nextImpl() override; - - /// Name or some description of file. - std::string getFileName() const override; - public: WriteBufferFromFileDescriptor( int fd_ = -1, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, char * existing_memory = nullptr, - size_t alignment = 0); + size_t alignment = 0, + const std::string & file_name_ = ""); /** Could be used before initialization if needed 'fd' was not passed to constructor. * It's not possible to change 'fd' during work. @@ -42,10 +42,19 @@ public: void sync() override; - off_t seek(off_t offset, int whence); - void truncate(off_t length); + /// clang-tidy wants these methods to be const, but + /// they are not const semantically + off_t seek(off_t offset, int whence); // NOLINT + void truncate(off_t length); // NOLINT - off_t size(); + /// Name or some description of file. + std::string getFileName() const override + { + return file_name; + } + + + off_t size() const; }; } From 2e5e017d6d9adbab69de388eb5cad88bfcf4310b Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 6 Sep 2021 10:52:20 +0300 Subject: [PATCH 28/43] Revert "Fix tidy" This reverts commit 73ef1233efbd301060e33b170c60c01f80e8bac8. --- .clang-tidy | 2 -- src/Compression/CompressionCodecEncrypted.cpp | 2 +- src/IO/WriteBufferFromFile.cpp | 6 +++-- src/IO/WriteBufferFromFile.h | 1 + src/IO/WriteBufferFromFileDescriptor.cpp | 26 +++++++++---------- src/IO/WriteBufferFromFileDescriptor.h | 25 ++++++------------ 6 files changed, 27 insertions(+), 35 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index ecb8ac6dcbf..b0971418e0e 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -203,5 +203,3 @@ CheckOptions: value: CamelCase - key: readability-identifier-naming.UsingCase value: CamelCase - - key: modernize-loop-convert.UseCxx20ReverseRanges - value: false diff --git a/src/Compression/CompressionCodecEncrypted.cpp b/src/Compression/CompressionCodecEncrypted.cpp index ba3f12c32a0..6b921fb9c0a 100644 --- a/src/Compression/CompressionCodecEncrypted.cpp +++ b/src/Compression/CompressionCodecEncrypted.cpp @@ -113,7 +113,7 @@ namespace DB std::string CompressionCodecEncrypted::deriveKey(const std::string_view & master_key) { - std::string_view salt; // No salt: derive keys in a deterministic manner. + std::string_view salt(""); // No salt: derive keys in a deterministic manner. std::string_view info("Codec Encrypted('AES-128-GCM-SIV') key generation key"); std::array result; diff --git a/src/IO/WriteBufferFromFile.cpp b/src/IO/WriteBufferFromFile.cpp index 270882d0774..67cd7ba27d6 100644 --- a/src/IO/WriteBufferFromFile.cpp +++ b/src/IO/WriteBufferFromFile.cpp @@ -32,7 +32,7 @@ WriteBufferFromFile::WriteBufferFromFile( mode_t mode, char * existing_memory, size_t alignment) - : WriteBufferFromFileDescriptor(-1, buf_size, existing_memory, alignment, file_name_) + : WriteBufferFromFileDescriptor(-1, buf_size, existing_memory, alignment), file_name(file_name_) { ProfileEvents::increment(ProfileEvents::FileOpen); @@ -65,7 +65,9 @@ WriteBufferFromFile::WriteBufferFromFile( size_t buf_size, char * existing_memory, size_t alignment) - : WriteBufferFromFileDescriptor(fd_, buf_size, existing_memory, alignment, original_file_name) + : + WriteBufferFromFileDescriptor(fd_, buf_size, existing_memory, alignment), + file_name(original_file_name.empty() ? "(fd = " + toString(fd_) + ")" : original_file_name) { fd_ = -1; } diff --git a/src/IO/WriteBufferFromFile.h b/src/IO/WriteBufferFromFile.h index 584a0221f1a..b7d58638113 100644 --- a/src/IO/WriteBufferFromFile.h +++ b/src/IO/WriteBufferFromFile.h @@ -25,6 +25,7 @@ namespace DB class WriteBufferFromFile : public WriteBufferFromFileDescriptor { protected: + std::string file_name; CurrentMetrics::Increment metric_increment{CurrentMetrics::OpenFileForWrite}; public: diff --git a/src/IO/WriteBufferFromFileDescriptor.cpp b/src/IO/WriteBufferFromFileDescriptor.cpp index f1afca171d2..cd265653bb9 100644 --- a/src/IO/WriteBufferFromFileDescriptor.cpp +++ b/src/IO/WriteBufferFromFileDescriptor.cpp @@ -61,9 +61,7 @@ void WriteBufferFromFileDescriptor::nextImpl() if ((-1 == res || 0 == res) && errno != EINTR) { ProfileEvents::increment(ProfileEvents::WriteBufferFromFileDescriptorWriteFailed); - - /// Don't use getFileName() here because this method can be called from destructor - throwFromErrnoWithPath("Cannot write to file " + file_name, file_name, + throwFromErrnoWithPath("Cannot write to file " + getFileName(), getFileName(), ErrorCodes::CANNOT_WRITE_TO_FILE_DESCRIPTOR); } @@ -76,17 +74,19 @@ void WriteBufferFromFileDescriptor::nextImpl() } +/// Name or some description of file. +std::string WriteBufferFromFileDescriptor::getFileName() const +{ + return "(fd = " + toString(fd) + ")"; +} + + WriteBufferFromFileDescriptor::WriteBufferFromFileDescriptor( int fd_, size_t buf_size, char * existing_memory, - size_t alignment, - const std::string & file_name_) - : WriteBufferFromFileBase(buf_size, existing_memory, alignment) - , fd(fd_) - , file_name(file_name_.empty() ? "(fd = " + toString(fd) + ")" : file_name_) -{ -} + size_t alignment) + : WriteBufferFromFileBase(buf_size, existing_memory, alignment), fd(fd_) {} WriteBufferFromFileDescriptor::~WriteBufferFromFileDescriptor() @@ -115,7 +115,7 @@ void WriteBufferFromFileDescriptor::sync() } -off_t WriteBufferFromFileDescriptor::seek(off_t offset, int whence) // NOLINT +off_t WriteBufferFromFileDescriptor::seek(off_t offset, int whence) { off_t res = lseek(fd, offset, whence); if (-1 == res) @@ -125,7 +125,7 @@ off_t WriteBufferFromFileDescriptor::seek(off_t offset, int whence) // NOLINT } -void WriteBufferFromFileDescriptor::truncate(off_t length) // NOLINT +void WriteBufferFromFileDescriptor::truncate(off_t length) { int res = ftruncate(fd, length); if (-1 == res) @@ -133,7 +133,7 @@ void WriteBufferFromFileDescriptor::truncate(off_t length) // NOLINT } -off_t WriteBufferFromFileDescriptor::size() const +off_t WriteBufferFromFileDescriptor::size() { struct stat buf; int res = fstat(fd, &buf); diff --git a/src/IO/WriteBufferFromFileDescriptor.h b/src/IO/WriteBufferFromFileDescriptor.h index aef332b38b0..18c0ac64f63 100644 --- a/src/IO/WriteBufferFromFileDescriptor.h +++ b/src/IO/WriteBufferFromFileDescriptor.h @@ -13,17 +13,17 @@ class WriteBufferFromFileDescriptor : public WriteBufferFromFileBase protected: int fd; - /// If file has name contains filename, otherwise contains string "(fd=...)" - std::string file_name; - void nextImpl() override; + + /// Name or some description of file. + std::string getFileName() const override; + public: WriteBufferFromFileDescriptor( int fd_ = -1, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, char * existing_memory = nullptr, - size_t alignment = 0, - const std::string & file_name_ = ""); + size_t alignment = 0); /** Could be used before initialization if needed 'fd' was not passed to constructor. * It's not possible to change 'fd' during work. @@ -42,19 +42,10 @@ public: void sync() override; - /// clang-tidy wants these methods to be const, but - /// they are not const semantically - off_t seek(off_t offset, int whence); // NOLINT - void truncate(off_t length); // NOLINT + off_t seek(off_t offset, int whence); + void truncate(off_t length); - /// Name or some description of file. - std::string getFileName() const override - { - return file_name; - } - - - off_t size() const; + off_t size(); }; } From d1e91a786056901104eef98b530b54a37b62416d Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 6 Sep 2021 12:16:52 +0300 Subject: [PATCH 29/43] Revert "Revert "Fix tidy"" This reverts commit 2e5e017d6d9adbab69de388eb5cad88bfcf4310b. --- .clang-tidy | 2 ++ src/Compression/CompressionCodecEncrypted.cpp | 2 +- src/IO/WriteBufferFromFile.cpp | 6 ++--- src/IO/WriteBufferFromFile.h | 1 - src/IO/WriteBufferFromFileDescriptor.cpp | 26 +++++++++---------- src/IO/WriteBufferFromFileDescriptor.h | 25 ++++++++++++------ 6 files changed, 35 insertions(+), 27 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index b0971418e0e..ecb8ac6dcbf 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -203,3 +203,5 @@ CheckOptions: value: CamelCase - key: readability-identifier-naming.UsingCase value: CamelCase + - key: modernize-loop-convert.UseCxx20ReverseRanges + value: false diff --git a/src/Compression/CompressionCodecEncrypted.cpp b/src/Compression/CompressionCodecEncrypted.cpp index 6b921fb9c0a..ba3f12c32a0 100644 --- a/src/Compression/CompressionCodecEncrypted.cpp +++ b/src/Compression/CompressionCodecEncrypted.cpp @@ -113,7 +113,7 @@ namespace DB std::string CompressionCodecEncrypted::deriveKey(const std::string_view & master_key) { - std::string_view salt(""); // No salt: derive keys in a deterministic manner. + std::string_view salt; // No salt: derive keys in a deterministic manner. std::string_view info("Codec Encrypted('AES-128-GCM-SIV') key generation key"); std::array result; diff --git a/src/IO/WriteBufferFromFile.cpp b/src/IO/WriteBufferFromFile.cpp index 67cd7ba27d6..270882d0774 100644 --- a/src/IO/WriteBufferFromFile.cpp +++ b/src/IO/WriteBufferFromFile.cpp @@ -32,7 +32,7 @@ WriteBufferFromFile::WriteBufferFromFile( mode_t mode, char * existing_memory, size_t alignment) - : WriteBufferFromFileDescriptor(-1, buf_size, existing_memory, alignment), file_name(file_name_) + : WriteBufferFromFileDescriptor(-1, buf_size, existing_memory, alignment, file_name_) { ProfileEvents::increment(ProfileEvents::FileOpen); @@ -65,9 +65,7 @@ WriteBufferFromFile::WriteBufferFromFile( size_t buf_size, char * existing_memory, size_t alignment) - : - WriteBufferFromFileDescriptor(fd_, buf_size, existing_memory, alignment), - file_name(original_file_name.empty() ? "(fd = " + toString(fd_) + ")" : original_file_name) + : WriteBufferFromFileDescriptor(fd_, buf_size, existing_memory, alignment, original_file_name) { fd_ = -1; } diff --git a/src/IO/WriteBufferFromFile.h b/src/IO/WriteBufferFromFile.h index b7d58638113..584a0221f1a 100644 --- a/src/IO/WriteBufferFromFile.h +++ b/src/IO/WriteBufferFromFile.h @@ -25,7 +25,6 @@ namespace DB class WriteBufferFromFile : public WriteBufferFromFileDescriptor { protected: - std::string file_name; CurrentMetrics::Increment metric_increment{CurrentMetrics::OpenFileForWrite}; public: diff --git a/src/IO/WriteBufferFromFileDescriptor.cpp b/src/IO/WriteBufferFromFileDescriptor.cpp index cd265653bb9..f1afca171d2 100644 --- a/src/IO/WriteBufferFromFileDescriptor.cpp +++ b/src/IO/WriteBufferFromFileDescriptor.cpp @@ -61,7 +61,9 @@ void WriteBufferFromFileDescriptor::nextImpl() if ((-1 == res || 0 == res) && errno != EINTR) { ProfileEvents::increment(ProfileEvents::WriteBufferFromFileDescriptorWriteFailed); - throwFromErrnoWithPath("Cannot write to file " + getFileName(), getFileName(), + + /// Don't use getFileName() here because this method can be called from destructor + throwFromErrnoWithPath("Cannot write to file " + file_name, file_name, ErrorCodes::CANNOT_WRITE_TO_FILE_DESCRIPTOR); } @@ -74,19 +76,17 @@ void WriteBufferFromFileDescriptor::nextImpl() } -/// Name or some description of file. -std::string WriteBufferFromFileDescriptor::getFileName() const -{ - return "(fd = " + toString(fd) + ")"; -} - - WriteBufferFromFileDescriptor::WriteBufferFromFileDescriptor( int fd_, size_t buf_size, char * existing_memory, - size_t alignment) - : WriteBufferFromFileBase(buf_size, existing_memory, alignment), fd(fd_) {} + size_t alignment, + const std::string & file_name_) + : WriteBufferFromFileBase(buf_size, existing_memory, alignment) + , fd(fd_) + , file_name(file_name_.empty() ? "(fd = " + toString(fd) + ")" : file_name_) +{ +} WriteBufferFromFileDescriptor::~WriteBufferFromFileDescriptor() @@ -115,7 +115,7 @@ void WriteBufferFromFileDescriptor::sync() } -off_t WriteBufferFromFileDescriptor::seek(off_t offset, int whence) +off_t WriteBufferFromFileDescriptor::seek(off_t offset, int whence) // NOLINT { off_t res = lseek(fd, offset, whence); if (-1 == res) @@ -125,7 +125,7 @@ off_t WriteBufferFromFileDescriptor::seek(off_t offset, int whence) } -void WriteBufferFromFileDescriptor::truncate(off_t length) +void WriteBufferFromFileDescriptor::truncate(off_t length) // NOLINT { int res = ftruncate(fd, length); if (-1 == res) @@ -133,7 +133,7 @@ void WriteBufferFromFileDescriptor::truncate(off_t length) } -off_t WriteBufferFromFileDescriptor::size() +off_t WriteBufferFromFileDescriptor::size() const { struct stat buf; int res = fstat(fd, &buf); diff --git a/src/IO/WriteBufferFromFileDescriptor.h b/src/IO/WriteBufferFromFileDescriptor.h index 18c0ac64f63..aef332b38b0 100644 --- a/src/IO/WriteBufferFromFileDescriptor.h +++ b/src/IO/WriteBufferFromFileDescriptor.h @@ -13,17 +13,17 @@ class WriteBufferFromFileDescriptor : public WriteBufferFromFileBase protected: int fd; + /// If file has name contains filename, otherwise contains string "(fd=...)" + std::string file_name; + void nextImpl() override; - - /// Name or some description of file. - std::string getFileName() const override; - public: WriteBufferFromFileDescriptor( int fd_ = -1, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, char * existing_memory = nullptr, - size_t alignment = 0); + size_t alignment = 0, + const std::string & file_name_ = ""); /** Could be used before initialization if needed 'fd' was not passed to constructor. * It's not possible to change 'fd' during work. @@ -42,10 +42,19 @@ public: void sync() override; - off_t seek(off_t offset, int whence); - void truncate(off_t length); + /// clang-tidy wants these methods to be const, but + /// they are not const semantically + off_t seek(off_t offset, int whence); // NOLINT + void truncate(off_t length); // NOLINT - off_t size(); + /// Name or some description of file. + std::string getFileName() const override + { + return file_name; + } + + + off_t size() const; }; } From 5c75b93fe8b4842fccbe0570514b00680066ac4a Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 6 Sep 2021 12:17:31 +0300 Subject: [PATCH 30/43] Revert one warning --- src/Compression/CompressionCodecEncrypted.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Compression/CompressionCodecEncrypted.cpp b/src/Compression/CompressionCodecEncrypted.cpp index ba3f12c32a0..47f93eb6202 100644 --- a/src/Compression/CompressionCodecEncrypted.cpp +++ b/src/Compression/CompressionCodecEncrypted.cpp @@ -113,7 +113,8 @@ namespace DB std::string CompressionCodecEncrypted::deriveKey(const std::string_view & master_key) { - std::string_view salt; // No salt: derive keys in a deterministic manner. + /// No salt: derive keys in a deterministic manner. + std::string_view salt(""); // NOLINT std::string_view info("Codec Encrypted('AES-128-GCM-SIV') key generation key"); std::array result; From 209b748fcae2f429be40d4582ceff21b7e6a85e4 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 6 Sep 2021 13:11:19 +0300 Subject: [PATCH 31/43] Add missed level --- src/Interpreters/TextLog.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/TextLog.cpp b/src/Interpreters/TextLog.cpp index baf98b6771d..51ffbdd66ee 100644 --- a/src/Interpreters/TextLog.cpp +++ b/src/Interpreters/TextLog.cpp @@ -26,7 +26,8 @@ NamesAndTypesList TextLogElement::getNamesAndTypes() {"Notice", static_cast(Message::PRIO_NOTICE)}, {"Information", static_cast(Message::PRIO_INFORMATION)}, {"Debug", static_cast(Message::PRIO_DEBUG)}, - {"Trace", static_cast(Message::PRIO_TRACE)} + {"Trace", static_cast(Message::PRIO_TRACE)}, + {"Test", static_cast(Message::PRIO_TEST)}, }); return From fcfe77cb5741de5edccd6c713582893f7d2e24f8 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 6 Sep 2021 13:35:37 +0300 Subject: [PATCH 32/43] Fix allocation in buffer --- src/IO/WriteBufferFromFileDescriptor.cpp | 22 ++++++++++++++++++---- src/IO/WriteBufferFromFileDescriptor.h | 8 ++------ 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/src/IO/WriteBufferFromFileDescriptor.cpp b/src/IO/WriteBufferFromFileDescriptor.cpp index f1afca171d2..96579626dfc 100644 --- a/src/IO/WriteBufferFromFileDescriptor.cpp +++ b/src/IO/WriteBufferFromFileDescriptor.cpp @@ -63,7 +63,10 @@ void WriteBufferFromFileDescriptor::nextImpl() ProfileEvents::increment(ProfileEvents::WriteBufferFromFileDescriptorWriteFailed); /// Don't use getFileName() here because this method can be called from destructor - throwFromErrnoWithPath("Cannot write to file " + file_name, file_name, + String error_file_name = file_name; + if (error_file_name.empty()) + error_file_name = "(fd = " + toString(fd) + ")"; + throwFromErrnoWithPath("Cannot write to file " + error_file_name, error_file_name, ErrorCodes::CANNOT_WRITE_TO_FILE_DESCRIPTOR); } @@ -75,16 +78,18 @@ void WriteBufferFromFileDescriptor::nextImpl() ProfileEvents::increment(ProfileEvents::WriteBufferFromFileDescriptorWriteBytes, bytes_written); } - +/// NOTE: This class can be used as a very low-level building block, for example +/// in trace collector. In such places allocations of memory can be dangerous, +/// so don't allocate anything in this consturctor. WriteBufferFromFileDescriptor::WriteBufferFromFileDescriptor( int fd_, size_t buf_size, char * existing_memory, size_t alignment, - const std::string & file_name_) + std::string file_name_) : WriteBufferFromFileBase(buf_size, existing_memory, alignment) , fd(fd_) - , file_name(file_name_.empty() ? "(fd = " + toString(fd) + ")" : file_name_) + , file_name(std::move(file_name_)) { } @@ -142,4 +147,13 @@ off_t WriteBufferFromFileDescriptor::size() const return buf.st_size; } +std::string WriteBufferFromFileDescriptor::getFileName() const +{ + if (file_name.empty()) + return "(fd = " + toString(fd) + ")"; + + return file_name; +} + + } diff --git a/src/IO/WriteBufferFromFileDescriptor.h b/src/IO/WriteBufferFromFileDescriptor.h index aef332b38b0..cad45067548 100644 --- a/src/IO/WriteBufferFromFileDescriptor.h +++ b/src/IO/WriteBufferFromFileDescriptor.h @@ -23,7 +23,7 @@ public: size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, char * existing_memory = nullptr, size_t alignment = 0, - const std::string & file_name_ = ""); + std::string file_name_ = ""); /** Could be used before initialization if needed 'fd' was not passed to constructor. * It's not possible to change 'fd' during work. @@ -48,11 +48,7 @@ public: void truncate(off_t length); // NOLINT /// Name or some description of file. - std::string getFileName() const override - { - return file_name; - } - + std::string getFileName() const override; off_t size() const; }; From 9db10a7164e0e22a3749d360244da995a38bec44 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 6 Sep 2021 14:34:17 +0300 Subject: [PATCH 33/43] Fix typo: --- src/IO/WriteBufferFromFileDescriptor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/IO/WriteBufferFromFileDescriptor.cpp b/src/IO/WriteBufferFromFileDescriptor.cpp index 96579626dfc..38aaa945362 100644 --- a/src/IO/WriteBufferFromFileDescriptor.cpp +++ b/src/IO/WriteBufferFromFileDescriptor.cpp @@ -80,7 +80,7 @@ void WriteBufferFromFileDescriptor::nextImpl() /// NOTE: This class can be used as a very low-level building block, for example /// in trace collector. In such places allocations of memory can be dangerous, -/// so don't allocate anything in this consturctor. +/// so don't allocate anything in this constructor. WriteBufferFromFileDescriptor::WriteBufferFromFileDescriptor( int fd_, size_t buf_size, From 7bea8200d80b24022c285b252258805a5427e3d2 Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 6 Sep 2021 19:18:09 +0000 Subject: [PATCH 34/43] Revert "Merge pull request #28082 from zhongyuankai/add-system-of-table_views" This reverts commit e5bcfba89e379c458ba15475421fdbcf8614cd0e, reversing changes made to d77f2436517712e0d3678533cd464c60fe5a0aed. --- src/Interpreters/DatabaseCatalog.cpp | 6 -- src/Interpreters/DatabaseCatalog.h | 1 - src/Storages/System/StorageSystemViews.cpp | 68 ------------------- src/Storages/System/StorageSystemViews.h | 24 ------- src/Storages/System/attachSystemTables.cpp | 2 - .../0_stateless/02015_system_views.reference | 1 - .../0_stateless/02015_system_views.sql | 14 ---- tests/queries/skip_list.json | 3 +- 8 files changed, 1 insertion(+), 118 deletions(-) delete mode 100644 src/Storages/System/StorageSystemViews.cpp delete mode 100644 src/Storages/System/StorageSystemViews.h delete mode 100644 tests/queries/0_stateless/02015_system_views.reference delete mode 100644 tests/queries/0_stateless/02015_system_views.sql diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index af60eeeaba3..20ebc0a9ee5 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -617,12 +617,6 @@ Dependencies DatabaseCatalog::getDependencies(const StorageID & from) const return Dependencies(iter->second.begin(), iter->second.end()); } -ViewDependencies DatabaseCatalog::getViewDependencies() const -{ - std::lock_guard lock{databases_mutex}; - return ViewDependencies(view_dependencies.begin(), view_dependencies.end()); -} - void DatabaseCatalog::updateDependency(const StorageID & old_from, const StorageID & old_where, const StorageID & new_from, const StorageID & new_where) diff --git a/src/Interpreters/DatabaseCatalog.h b/src/Interpreters/DatabaseCatalog.h index 72dd28d335b..071b80690df 100644 --- a/src/Interpreters/DatabaseCatalog.h +++ b/src/Interpreters/DatabaseCatalog.h @@ -175,7 +175,6 @@ public: void addDependency(const StorageID & from, const StorageID & where); void removeDependency(const StorageID & from, const StorageID & where); Dependencies getDependencies(const StorageID & from) const; - ViewDependencies getViewDependencies() const; /// For Materialized and Live View void updateDependency(const StorageID & old_from, const StorageID & old_where,const StorageID & new_from, const StorageID & new_where); diff --git a/src/Storages/System/StorageSystemViews.cpp b/src/Storages/System/StorageSystemViews.cpp deleted file mode 100644 index 0bb2724b358..00000000000 --- a/src/Storages/System/StorageSystemViews.cpp +++ /dev/null @@ -1,68 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include - -namespace DB -{ - -class Context; - -NamesAndTypesList StorageSystemViews::getNamesAndTypes() -{ - auto view_type_datatype = std::make_shared(DataTypeEnum8::Values{ - {"Default", static_cast(QueryViewsLogElement::ViewType::DEFAULT)}, - {"Materialized", static_cast(QueryViewsLogElement::ViewType::MATERIALIZED)}, - {"Live", static_cast(QueryViewsLogElement::ViewType::LIVE)}}); - - return { - {"database", std::make_shared()}, - {"name", std::make_shared()}, - {"main_dependency_database", std::make_shared()}, - {"main_dependency_table", std::make_shared()}, - {"view_type", std::move(view_type_datatype)}, - }; -} - -void StorageSystemViews::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const -{ - const auto access = context->getAccess(); - const bool check_access_for_databases = !access->isGranted(AccessType::SHOW_TABLES); - - for (const auto & [table_id, view_ids] : DatabaseCatalog::instance().getViewDependencies()) - { - const bool check_access_for_tables = check_access_for_databases && !access->isGranted(AccessType::SHOW_TABLES, table_id.database_name); - - if (check_access_for_tables && !access->isGranted(AccessType::SHOW_TABLES, table_id.database_name, table_id.table_name)) - continue; - - size_t col_num; - for (const auto & view_id : view_ids) - { - auto view_ptr = DatabaseCatalog::instance().getTable(view_id, context); - QueryViewsLogElement::ViewType type = QueryViewsLogElement::ViewType::DEFAULT; - - if (typeid_cast(view_ptr.get())) - { - type = QueryViewsLogElement::ViewType::MATERIALIZED; - } - else if (typeid_cast(view_ptr.get())) - { - type = QueryViewsLogElement::ViewType::LIVE; - } - - col_num = 0; - res_columns[col_num++]->insert(view_id.database_name); - res_columns[col_num++]->insert(view_id.table_name); - res_columns[col_num++]->insert(table_id.database_name); - res_columns[col_num++]->insert(table_id.table_name); - res_columns[col_num++]->insert(type); - } - } -} - -} diff --git a/src/Storages/System/StorageSystemViews.h b/src/Storages/System/StorageSystemViews.h deleted file mode 100644 index 67fcb79067e..00000000000 --- a/src/Storages/System/StorageSystemViews.h +++ /dev/null @@ -1,24 +0,0 @@ -#pragma once - -#include -#include - -namespace DB -{ - -class StorageSystemViews final : public shared_ptr_helper, public IStorageSystemOneBlock -{ - friend struct shared_ptr_helper; -protected: - using IStorageSystemOneBlock::IStorageSystemOneBlock; - - void fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const override; - -public: - std::string getName() const override { return "SystemViews"; } - - static NamesAndTypesList getNamesAndTypes(); - -}; - -} diff --git a/src/Storages/System/attachSystemTables.cpp b/src/Storages/System/attachSystemTables.cpp index 3656a239adb..95e86487073 100644 --- a/src/Storages/System/attachSystemTables.cpp +++ b/src/Storages/System/attachSystemTables.cpp @@ -44,7 +44,6 @@ #include #include #include -#include #include #include #include @@ -96,7 +95,6 @@ void attachSystemTablesLocal(IDatabase & system_database) attach(system_database, "zeros_mt", true); attach(system_database, "databases"); attach(system_database, "tables"); - attach(system_database, "views"); attach(system_database, "columns"); attach(system_database, "functions"); attach(system_database, "events"); diff --git a/tests/queries/0_stateless/02015_system_views.reference b/tests/queries/0_stateless/02015_system_views.reference deleted file mode 100644 index a1b1b2a9fd3..00000000000 --- a/tests/queries/0_stateless/02015_system_views.reference +++ /dev/null @@ -1 +0,0 @@ -02015_db materialized_view 02015_db view_source_tb Materialized diff --git a/tests/queries/0_stateless/02015_system_views.sql b/tests/queries/0_stateless/02015_system_views.sql deleted file mode 100644 index a6375dcb591..00000000000 --- a/tests/queries/0_stateless/02015_system_views.sql +++ /dev/null @@ -1,14 +0,0 @@ -DROP DATABASE IF EXISTS 02015_db; -CREATE DATABASE IF NOT EXISTS 02015_db; - -DROP TABLE IF EXISTS 02015_db.view_source_tb; -CREATE TABLE IF NOT EXISTS 02015_db.view_source_tb (a UInt8, s String) ENGINE = MergeTree() ORDER BY a; - -DROP TABLE IF EXISTS 02015_db.materialized_view; -CREATE MATERIALIZED VIEW IF NOT EXISTS 02015_db.materialized_view ENGINE = ReplacingMergeTree() ORDER BY a AS SELECT * FROM 02015_db.view_source_tb; - -SELECT * FROM system.views WHERE database='02015_db' and name = 'materialized_view'; - -DROP TABLE IF EXISTS 02015_db.materialized_view; -DROP TABLE IF EXISTS 02015_db.view_source_tb; -DROP DATABASE IF EXISTS 02015_db; diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index 0143cc78dbe..335ed370b9b 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -512,7 +512,6 @@ "01532_execute_merges_on_single_replica", /// static zk path "01530_drop_database_atomic_sync", /// creates database "02001_add_default_database_to_system_users", ///create user - "02002_row_level_filter_bug", ///create user - "02015_system_views" + "02002_row_level_filter_bug" ///create user ] } From cbc10cd3c0a5b266ec261edbf916307f6e61042f Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 6 Sep 2021 21:11:21 +0000 Subject: [PATCH 35/43] Revert "Merge pull request #28397 from zhongyuankai/DOCSUP-13927-document-system_views" This reverts commit d6f89fd9cfbdb39da4be8248007d68df0f09e6d1, reversing changes made to 68f6ecec62b60ec9826da459663e4da0e13d7bfb. --- docs/en/operations/system-tables/views.md | 44 ----------------------- 1 file changed, 44 deletions(-) delete mode 100644 docs/en/operations/system-tables/views.md diff --git a/docs/en/operations/system-tables/views.md b/docs/en/operations/system-tables/views.md deleted file mode 100644 index 8edebf00a91..00000000000 --- a/docs/en/operations/system-tables/views.md +++ /dev/null @@ -1,44 +0,0 @@ -# system.views {#system-views} - -Contains the dependencies of all views and the type to which the view belongs. The metadata of the view comes from the [system.tables](tables.md). - -Columns: - -- `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database the view is in. - -- `name` ([String](../../sql-reference/data-types/string.md)) — Name of the view. - -- `main_dependency_database` ([String](../../sql-reference/data-types/string.md)) — The name of the database on which the view depends. - -- `main_dependency_table` ([String](../../sql-reference/data-types/string.md)) - The name of the table on which the view depends. - -- `view_type` ([Enum8](../../sql-reference/data-types/enum.md)) — Type of the view. Values: - - `'Default' = 1` — [Default views](../../sql-reference/statements/create/view.md#normal). Should not appear in this log. - - `'Materialized' = 2` — [Materialized views](../../sql-reference/statements/create/view.md#materialized). - - `'Live' = 3` — [Live views](../../sql-reference/statements/create/view.md#live-view). - -**Example** - -```sql -SELECT * FROM system.views LIMIT 2 FORMAT Vertical; -``` - -```text -Row 1: -────── -database: default -name: live_view -main_dependency_database: default -main_dependency_table: view_source_tb -view_type: Live - -Row 2: -────── -database: default -name: materialized_view -main_dependency_database: default -main_dependency_table: view_source_tb -view_type: Materialized -``` - -[Original article](https://clickhouse.tech/docs/en/operations/system-tables/views) From 503b7a59f07f5da9d8cad7a6196be1939582e2f2 Mon Sep 17 00:00:00 2001 From: bharatnc Date: Mon, 6 Sep 2021 09:54:59 -0700 Subject: [PATCH 36/43] fix getNumberOfArguments() for s2Rect functions This fixes the value returned by the getNumberOfArguments() by the s2RectAdd and the s2RectContains functions. Only 3 arguments are used by these functions and not 4: - low s2Point of rectangle - high s2Point of rectangle - given s2Point The given s2Point is used to groow the size of the bounding rectangle to include the given S2Point in case of the s2RectAdd function. In case of s2RectContains, the function determines if the bounded rectangle contains the given point. PS: I wonder if it's more apt to call rectAdd as rectGrow since it's used to grow the size of a given rectangle. --- src/Functions/s2RectAdd.cpp | 2 +- src/Functions/s2RectContains.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Functions/s2RectAdd.cpp b/src/Functions/s2RectAdd.cpp index 90578567da2..d9b12ce22a3 100644 --- a/src/Functions/s2RectAdd.cpp +++ b/src/Functions/s2RectAdd.cpp @@ -41,7 +41,7 @@ public: return name; } - size_t getNumberOfArguments() const override { return 4; } + size_t getNumberOfArguments() const override { return 3; } bool useDefaultImplementationForConstants() const override { return true; } diff --git a/src/Functions/s2RectContains.cpp b/src/Functions/s2RectContains.cpp index 5f556c3ec14..27fed9e2031 100644 --- a/src/Functions/s2RectContains.cpp +++ b/src/Functions/s2RectContains.cpp @@ -41,7 +41,7 @@ public: return name; } - size_t getNumberOfArguments() const override { return 4; } + size_t getNumberOfArguments() const override { return 3; } bool useDefaultImplementationForConstants() const override { return true; } From bcc31f1f3e9616940eb8e3ddd3a51b89d368c734 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Mon, 6 Sep 2021 22:42:32 +0300 Subject: [PATCH 37/43] Remove unnecessary changes. --- programs/server/Server.cpp | 3 --- src/Access/AccessControlManager.h | 2 -- src/Access/SettingsProfilesCache.cpp | 1 + src/Core/MySQL/Authentication.cpp | 2 -- src/Core/MySQL/MySQLSession.h | 19 ------------------- src/Core/PostgreSQLProtocol.h | 3 +-- src/Interpreters/Context.cpp | 18 ++---------------- src/Interpreters/Context.h | 12 ------------ src/Interpreters/InterpreterSetQuery.cpp | 5 ----- src/Interpreters/ya.make | 1 - .../Formats/Impl/MySQLOutputFormat.h | 2 -- src/Server/HTTPHandler.h | 3 +-- src/TableFunctions/TableFunctionMySQL.cpp | 3 +-- .../01702_system_query_log.reference | 2 +- 14 files changed, 7 insertions(+), 69 deletions(-) delete mode 100644 src/Core/MySQL/MySQLSession.h diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index bf4e2f947dc..6a19fc9e036 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -48,11 +48,8 @@ #include #include #include -#include #include -#include #include -#include #include #include #include diff --git a/src/Access/AccessControlManager.h b/src/Access/AccessControlManager.h index c20216a291a..79f7073ef69 100644 --- a/src/Access/AccessControlManager.h +++ b/src/Access/AccessControlManager.h @@ -153,8 +153,6 @@ public: const ExternalAuthenticators & getExternalAuthenticators() const; - String getProfileName(const UUID & profile_id) const; - private: class ContextAccessCache; class CustomSettingsPrefixes; diff --git a/src/Access/SettingsProfilesCache.cpp b/src/Access/SettingsProfilesCache.cpp index 20880b94aba..3cd73720c3e 100644 --- a/src/Access/SettingsProfilesCache.cpp +++ b/src/Access/SettingsProfilesCache.cpp @@ -116,6 +116,7 @@ void SettingsProfilesCache::mergeSettingsAndConstraints() } } + void SettingsProfilesCache::mergeSettingsAndConstraintsFor(EnabledSettings & enabled) const { SettingsProfileElements merged_settings; diff --git a/src/Core/MySQL/Authentication.cpp b/src/Core/MySQL/Authentication.cpp index 0eb080892c1..aeb9a411082 100644 --- a/src/Core/MySQL/Authentication.cpp +++ b/src/Core/MySQL/Authentication.cpp @@ -2,8 +2,6 @@ #include #include #include -#include -#include #include #include diff --git a/src/Core/MySQL/MySQLSession.h b/src/Core/MySQL/MySQLSession.h deleted file mode 100644 index 1ba17a40483..00000000000 --- a/src/Core/MySQL/MySQLSession.h +++ /dev/null @@ -1,19 +0,0 @@ -#pragma once - -#include -#include - -namespace DB -{ - -class MySQLSession : public DB::Session -{ -public: - using DB::Session::Session; - - uint8_t sequence_id = 0; - uint32_t client_capabilities = 0; - size_t max_packet_size = 0; -}; - -} diff --git a/src/Core/PostgreSQLProtocol.h b/src/Core/PostgreSQLProtocol.h index f0de4bbb843..2b92258394e 100644 --- a/src/Core/PostgreSQLProtocol.h +++ b/src/Core/PostgreSQLProtocol.h @@ -1,12 +1,11 @@ #pragma once #include -#include -#include #include #include #include #include +#include #include #include #include diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index da7228ebd0f..c4fb89067e2 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -626,20 +626,14 @@ ConfigurationPtr Context::getUsersConfig() return shared->users_config; } -std::shared_ptr Context::getContextAccessForUser(const UUID & user_id) const -{ - return getAccessControlManager().getContextAccess( - user_id, /* current_roles = */ {}, /* use_default_roles = */ true, - settings, current_database, client_info); -} - void Context::setUser(const UUID & user_id_) { auto lock = getLock(); user_id = user_id_; - access = getContextAccessForUser(user_id_); + access = getAccessControlManager().getContextAccess( + user_id_, /* current_roles = */ {}, /* use_default_roles = */ true, settings, current_database, client_info); auto user = access->getUser(); current_roles = std::make_shared>(user->granted_roles.findGranted(user->default_roles)); @@ -1271,14 +1265,6 @@ ContextMutablePtr Context::getBufferContext() const return buffer_context; } -Session * Context::getSessionOrNull() const -{ - if (hasSessionContext()) - return getSession(); - else - return nullptr; -} - const EmbeddedDictionaries & Context::getEmbeddedDictionaries() const { diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index 3e373c493f5..fbf64908f84 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -276,8 +276,6 @@ private: /// XXX: move this stuff to shared part instead. ContextMutablePtr buffer_context; /// Buffer context. Could be equal to this. - /// Non-owning, only here for MySQLOutputFormat to be able to modify sequence_id, see setSession() and getSession() - Session * session = nullptr; /// A flag, used to distinguish between user query and internal query to a database engine (MaterializePostgreSQL). bool is_internal_query = false; @@ -373,8 +371,6 @@ public: /// Normally you shouldn't call this function. Use the Session class to do authentication instead. void setUser(const UUID & user_id_); - std::shared_ptr getContextAccessForUser(const UUID & user_id) const; - UserPtr getUser() const; String getUserName() const; std::optional getUserID() const; @@ -604,14 +600,6 @@ public: ContextMutablePtr getGlobalContext() const; - // Exists only due to MySQLOutputFormat - Session * getSession() const { return getSessionContext()->session; } - void setSession(Session * new_session) - { - session = getSessionContext()->session = new_session; - } - Session * getSessionOrNull() const; - bool hasGlobalContext() const { return !global_context.expired(); } bool isGlobalContext() const { diff --git a/src/Interpreters/InterpreterSetQuery.cpp b/src/Interpreters/InterpreterSetQuery.cpp index 73af2bbe3c0..1c6a4236bf6 100644 --- a/src/Interpreters/InterpreterSetQuery.cpp +++ b/src/Interpreters/InterpreterSetQuery.cpp @@ -9,13 +9,8 @@ namespace DB BlockIO InterpreterSetQuery::execute() { const auto & ast = query_ptr->as(); - getContext()->checkSettingsConstraints(ast.changes); - // Here settings are pushed to the session context and are not visible in the query context getContext()->getSessionContext()->applySettingsChanges(ast.changes); - // Make setting changes also available to the query context. - getContext()->applySettingsChanges(ast.changes); - return {}; } diff --git a/src/Interpreters/ya.make b/src/Interpreters/ya.make index 9263435e003..0bc7cb11cf0 100644 --- a/src/Interpreters/ya.make +++ b/src/Interpreters/ya.make @@ -148,7 +148,6 @@ SRCS( RowRefs.cpp SelectIntersectExceptQueryVisitor.cpp Session.cpp - Session.cpp SessionLog.cpp Set.cpp SetVariants.cpp diff --git a/src/Processors/Formats/Impl/MySQLOutputFormat.h b/src/Processors/Formats/Impl/MySQLOutputFormat.h index a285a6d75f3..a8e1ada3d6a 100644 --- a/src/Processors/Formats/Impl/MySQLOutputFormat.h +++ b/src/Processors/Formats/Impl/MySQLOutputFormat.h @@ -14,7 +14,6 @@ class IColumn; class IDataType; class WriteBuffer; struct FormatSettings; -class MySQLSession; /** A stream for outputting data in a binary line-by-line format. */ @@ -35,7 +34,6 @@ public: private: void initialize(); -private: bool initialized = false; uint32_t client_capabilities = 0; uint8_t * sequence_id = nullptr; diff --git a/src/Server/HTTPHandler.h b/src/Server/HTTPHandler.h index f52958a191d..98f573f8cef 100644 --- a/src/Server/HTTPHandler.h +++ b/src/Server/HTTPHandler.h @@ -83,13 +83,12 @@ private: // The request_credential instance may outlive a single request/response loop. // This happens only when the authentication mechanism requires more than a single request/response exchange (e.g., SPNEGO). - std::shared_ptr request_session; std::unique_ptr request_credentials; // Returns true when the user successfully authenticated, // the session instance will be configured accordingly, and the request_credentials instance will be dropped. // Returns false when the user is not authenticated yet, and the 'Negotiate' response is sent, - // the request_session and request_credentials instances are preserved. + // the session and request_credentials instances are preserved. // Throws an exception if authentication failed. bool authenticateUser( HTTPServerRequest & request, diff --git a/src/TableFunctions/TableFunctionMySQL.cpp b/src/TableFunctions/TableFunctionMySQL.cpp index 92387b13d55..09f9cf8b1f5 100644 --- a/src/TableFunctions/TableFunctionMySQL.cpp +++ b/src/TableFunctions/TableFunctionMySQL.cpp @@ -61,9 +61,8 @@ void TableFunctionMySQL::parseArguments(const ASTPtr & ast_function, ContextPtr user_name = args[3]->as().value.safeGet(); password = args[4]->as().value.safeGet(); - const auto & settings = context->getSettingsRef(); /// Split into replicas if needed. 3306 is the default MySQL port number - const size_t max_addresses = settings.glob_expansion_max_elements; + size_t max_addresses = context->getSettingsRef().glob_expansion_max_elements; auto addresses = parseRemoteDescriptionForExternalDatabase(host_port, max_addresses, 3306); pool.emplace(remote_database_name, addresses, user_name, password); diff --git a/tests/queries/0_stateless/01702_system_query_log.reference b/tests/queries/0_stateless/01702_system_query_log.reference index 3458c2e5ed4..1f329feac22 100644 --- a/tests/queries/0_stateless/01702_system_query_log.reference +++ b/tests/queries/0_stateless/01702_system_query_log.reference @@ -8,7 +8,6 @@ GRANT queries REVOKE queries Misc queries ACTUAL LOG CONTENT: - -- fire all kinds of queries and then check if those are present in the system.query_log\nSET log_comment=\'system.query_log logging test\'; Select SELECT \'DROP queries and also a cleanup before the test\'; Drop DROP DATABASE IF EXISTS sqllt SYNC; DROP USER IF EXISTS sqllt_user; @@ -83,4 +82,5 @@ Rename RENAME TABLE sqllt.table TO sqllt.table_new; Rename RENAME TABLE sqllt.table_new TO sqllt.table; Drop TRUNCATE TABLE sqllt.table; Drop DROP TABLE sqllt.table SYNC; + SET log_comment=\'\'; DROP queries and also a cleanup after the test From 81d3e330870b4a8491cee44a54a45d58cf5da4a4 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 7 Sep 2021 10:36:39 +0300 Subject: [PATCH 38/43] Add crashing test --- ...02015_shard_crash_clang_12_build.reference | 1 + .../02015_shard_crash_clang_12_build.sh | 45 +++++++++++++++++++ 2 files changed, 46 insertions(+) create mode 100644 tests/queries/0_stateless/02015_shard_crash_clang_12_build.reference create mode 100755 tests/queries/0_stateless/02015_shard_crash_clang_12_build.sh diff --git a/tests/queries/0_stateless/02015_shard_crash_clang_12_build.reference b/tests/queries/0_stateless/02015_shard_crash_clang_12_build.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/02015_shard_crash_clang_12_build.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/02015_shard_crash_clang_12_build.sh b/tests/queries/0_stateless/02015_shard_crash_clang_12_build.sh new file mode 100755 index 00000000000..287a9b45f4c --- /dev/null +++ b/tests/queries/0_stateless/02015_shard_crash_clang_12_build.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS local" +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS distributed" + +$CLICKHOUSE_CLIENT --query "CREATE TABLE local (x UInt8) ENGINE = Memory;" +$CLICKHOUSE_CLIENT --query "CREATE TABLE distributed AS local ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), local, x);" + +$CLICKHOUSE_CLIENT --insert_distributed_sync=0 --network_compression_method='zstd' --query "INSERT INTO distributed SELECT number FROM numbers(256);" +$CLICKHOUSE_CLIENT --insert_distributed_sync=0 --network_compression_method='zstd' --query "SYSTEM FLUSH DISTRIBUTED distributed;" + +function select_thread() +{ + while true; do + $CLICKHOUSE_CLIENT --insert_distributed_sync=0 --network_compression_method='zstd' --query "SELECT count() FROM local" >/dev/null + $CLICKHOUSE_CLIENT --insert_distributed_sync=0 --network_compression_method='zstd' --query "SELECT count() FROM distributed" >/dev/null + done +} + +export -f select_thread; + +TIMEOUT=30 + +timeout $TIMEOUT bash -c select_thread 2> /dev/null & +timeout $TIMEOUT bash -c select_thread 2> /dev/null & +timeout $TIMEOUT bash -c select_thread 2> /dev/null & +timeout $TIMEOUT bash -c select_thread 2> /dev/null & +timeout $TIMEOUT bash -c select_thread 2> /dev/null & +timeout $TIMEOUT bash -c select_thread 2> /dev/null & +timeout $TIMEOUT bash -c select_thread 2> /dev/null & +timeout $TIMEOUT bash -c select_thread 2> /dev/null & +timeout $TIMEOUT bash -c select_thread 2> /dev/null & +timeout $TIMEOUT bash -c select_thread 2> /dev/null & + +wait + +$CLICKHOUSE_CLIENT --query "SELECT 1" + +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS local" +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS distributed" From 5ff99314b1590d1a1fbf7d6a8bdbc27618dcb77b Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Tue, 7 Sep 2021 12:51:00 +0300 Subject: [PATCH 39/43] Reduce default settings for S3 multipart upload part size --- src/Core/Settings.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 887964bb233..09dfd347423 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -70,8 +70,8 @@ class IColumn; M(UInt64, idle_connection_timeout, 3600, "Close idle TCP connections after specified number of seconds.", 0) \ M(UInt64, distributed_connections_pool_size, DBMS_DEFAULT_DISTRIBUTED_CONNECTIONS_POOL_SIZE, "Maximum number of connections with one remote server in the pool.", 0) \ M(UInt64, connections_with_failover_max_tries, DBMS_CONNECTION_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES, "The maximum number of attempts to connect to replicas.", 0) \ - M(UInt64, s3_min_upload_part_size, 512*1024*1024, "The minimum size of part to upload during multipart upload to S3.", 0) \ - M(UInt64, s3_max_single_part_upload_size, 64*1024*1024, "The maximum size of object to upload using singlepart upload to S3.", 0) \ + M(UInt64, s3_min_upload_part_size, 32*1024*1024, "The minimum size of part to upload during multipart upload to S3.", 0) \ + M(UInt64, s3_max_single_part_upload_size, 32*1024*1024, "The maximum size of object to upload using singlepart upload to S3.", 0) \ M(UInt64, s3_max_single_read_retries, 4, "The maximum number of retries during single S3 read.", 0) \ M(UInt64, s3_max_redirects, 10, "Max number of S3 redirects hops allowed.", 0) \ M(UInt64, s3_max_connections, 1024, "The maximum number of connections per server.", 0) \ From 978dd19fa242469bbfac1e3701c23d59dd291b05 Mon Sep 17 00:00:00 2001 From: ZhiYong Wang Date: Tue, 7 Sep 2021 19:05:26 +0800 Subject: [PATCH 40/43] Fix coredump in creating distributed table --- src/Storages/StorageDistributed.cpp | 7 ++++++- ...02017_create_distributed_table_coredump.reference | 0 .../02017_create_distributed_table_coredump.sql | 12 ++++++++++++ 3 files changed, 18 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/02017_create_distributed_table_coredump.reference create mode 100644 tests/queries/0_stateless/02017_create_distributed_table_coredump.sql diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index df7d568deb9..1ad80f8aea6 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -1332,7 +1332,12 @@ void registerStorageDistributed(StorageFactory & factory) String remote_table = engine_args[2]->as().value.safeGet(); const auto & sharding_key = engine_args.size() >= 4 ? engine_args[3] : nullptr; - const auto & storage_policy = engine_args.size() >= 5 ? engine_args[4]->as().value.safeGet() : "default"; + String storage_policy = "default"; + if (engine_args.size() >= 5) + { + engine_args[4] = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[4], local_context); + storage_policy = engine_args[4]->as().value.safeGet(); + } /// Check that sharding_key exists in the table and has numeric type. if (sharding_key) diff --git a/tests/queries/0_stateless/02017_create_distributed_table_coredump.reference b/tests/queries/0_stateless/02017_create_distributed_table_coredump.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02017_create_distributed_table_coredump.sql b/tests/queries/0_stateless/02017_create_distributed_table_coredump.sql new file mode 100644 index 00000000000..27c98c3e237 --- /dev/null +++ b/tests/queries/0_stateless/02017_create_distributed_table_coredump.sql @@ -0,0 +1,12 @@ +drop table if exists t; +drop table if exists td1; +drop table if exists td2; +drop table if exists td3; +create table t (val UInt32) engine = MergeTree order by val; +create table td1 engine = Distributed(test_shard_localhost, currentDatabase(), 't') as t; +create table td2 engine = Distributed(test_shard_localhost, currentDatabase(), 't', xxHash32(val), default) as t; +create table td3 engine = Distributed(test_shard_localhost, currentDatabase(), 't', xxHash32(val), 'default') as t; +drop table if exists t; +drop table if exists td1; +drop table if exists td2; +drop table if exists td3; From 69604eab3f72fc8c6e14b387a73dd4166319d680 Mon Sep 17 00:00:00 2001 From: Vitaly Date: Tue, 7 Sep 2021 14:05:55 +0300 Subject: [PATCH 41/43] Add Settings.Names, Settings.Values aliases for system.processes table --- src/Storages/System/StorageSystemProcesses.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Storages/System/StorageSystemProcesses.cpp b/src/Storages/System/StorageSystemProcesses.cpp index e2685af7718..5e6ba37226c 100644 --- a/src/Storages/System/StorageSystemProcesses.cpp +++ b/src/Storages/System/StorageSystemProcesses.cpp @@ -73,7 +73,9 @@ NamesAndAliases StorageSystemProcesses::getNamesAndAliases() return { {"ProfileEvents.Names", {std::make_shared(std::make_shared())}, "mapKeys(ProfileEvents)"}, - {"ProfileEvents.Values", {std::make_shared(std::make_shared())}, "mapValues(ProfileEvents)"} + {"ProfileEvents.Values", {std::make_shared(std::make_shared())}, "mapValues(ProfileEvents)"}, + {"Settings.Names", {std::make_shared(std::make_shared())}, "mapKeys(Settings)" }, + {"Settings.Values", {std::make_shared(std::make_shared())}, "mapValues(Settings)"} }; } From d9ca1e29c3eac0ebb4c8da8a449d0a514a61a2b6 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 7 Sep 2021 15:56:32 +0300 Subject: [PATCH 42/43] Increase stack size for coroutines --- src/Common/FiberStack.h | 9 ++++++++- .../0_stateless/02015_shard_crash_clang_12_build.sh | 2 ++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/Common/FiberStack.h b/src/Common/FiberStack.h index c509540cc9e..aec4befb515 100644 --- a/src/Common/FiberStack.h +++ b/src/Common/FiberStack.h @@ -27,7 +27,12 @@ private: size_t stack_size; size_t page_size = 0; public: - static constexpr size_t default_stack_size = 128 * 1024; /// 64KB was not enough for tests + /// NOTE: If you see random segfaults in CI and stack starts from boost::context::...fiber... + /// probably it worth to try to increase stack size for coroutines. + /// + /// Current value is just enough for all tests in our CI. It's not selected in some special + /// way. We will have 36 pages with 4KB page size. + static constexpr size_t default_stack_size = 144 * 1024; /// 64KB was not enough for tests explicit FiberStack(size_t stack_size_ = default_stack_size) : stack_size(stack_size_) { @@ -43,6 +48,8 @@ public: if (MAP_FAILED == vp) DB::throwFromErrno(fmt::format("FiberStack: Cannot mmap {}.", ReadableSize(num_bytes)), DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY); + /// TODO: make reports on illegal guard page access more clear. + /// Currently we will see segfault and almost random stacktrace. if (-1 == ::mprotect(vp, page_size, PROT_NONE)) { ::munmap(vp, num_bytes); diff --git a/tests/queries/0_stateless/02015_shard_crash_clang_12_build.sh b/tests/queries/0_stateless/02015_shard_crash_clang_12_build.sh index 287a9b45f4c..f6ede6592ff 100755 --- a/tests/queries/0_stateless/02015_shard_crash_clang_12_build.sh +++ b/tests/queries/0_stateless/02015_shard_crash_clang_12_build.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +# This test reproduces crash in case of insufficient coroutines stack size + CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh From 5e133a3cc638b5e16fee5529c258553e6b43d90d Mon Sep 17 00:00:00 2001 From: Dmitrii Kovalkov Date: Wed, 8 Sep 2021 07:58:22 +0300 Subject: [PATCH 43/43] Run generate-ya-make.sh --- src/Storages/ya.make | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Storages/ya.make b/src/Storages/ya.make index 5b246cf5aca..11a1ad212c1 100644 --- a/src/Storages/ya.make +++ b/src/Storages/ya.make @@ -214,7 +214,6 @@ SRCS( System/StorageSystemTables.cpp System/StorageSystemUserDirectories.cpp System/StorageSystemUsers.cpp - System/StorageSystemViews.cpp System/StorageSystemWarnings.cpp System/StorageSystemZeros.cpp System/StorageSystemZooKeeper.cpp