diff --git a/.clang-tidy b/.clang-tidy index b0971418e0e..ecb8ac6dcbf 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -203,3 +203,5 @@ CheckOptions: value: CamelCase - key: readability-identifier-naming.UsingCase value: CamelCase + - key: modernize-loop-convert.UseCxx20ReverseRanges + value: false diff --git a/base/common/logger_useful.h b/base/common/logger_useful.h index d3b4d38d546..e2290a727b4 100644 --- a/base/common/logger_useful.h +++ b/base/common/logger_useful.h @@ -42,6 +42,7 @@ namespace } while (false) +#define LOG_TEST(logger, ...) LOG_IMPL(logger, DB::LogsLevel::test, Poco::Message::PRIO_TEST, __VA_ARGS__) #define LOG_TRACE(logger, ...) LOG_IMPL(logger, DB::LogsLevel::trace, Poco::Message::PRIO_TRACE, __VA_ARGS__) #define LOG_DEBUG(logger, ...) LOG_IMPL(logger, DB::LogsLevel::debug, Poco::Message::PRIO_DEBUG, __VA_ARGS__) #define LOG_INFO(logger, ...) LOG_IMPL(logger, DB::LogsLevel::information, Poco::Message::PRIO_INFORMATION, __VA_ARGS__) diff --git a/base/mysqlxx/Pool.cpp b/base/mysqlxx/Pool.cpp index 2f47aa67356..cee386311d4 100644 --- a/base/mysqlxx/Pool.cpp +++ b/base/mysqlxx/Pool.cpp @@ -7,10 +7,22 @@ #endif #include - #include - #include +#include + + +namespace +{ + +inline uint64_t clock_gettime_ns(clockid_t clock_type = CLOCK_MONOTONIC) +{ + struct timespec ts; + clock_gettime(clock_type, &ts); + return uint64_t(ts.tv_sec * 1000000000LL + ts.tv_nsec); +} + +} namespace mysqlxx @@ -124,10 +136,15 @@ Pool::~Pool() } -Pool::Entry Pool::get() +Pool::Entry Pool::get(uint64_t wait_timeout) { std::unique_lock lock(mutex); + uint64_t deadline = 0; + /// UINT64_MAX -- wait indefinitely + if (wait_timeout && wait_timeout != UINT64_MAX) + deadline = clock_gettime_ns() + wait_timeout * 1'000'000'000; + initialize(); for (;;) { @@ -153,6 +170,12 @@ Pool::Entry Pool::get() logger.trace("(%s): Unable to create a new connection: Max number of connections has been reached.", getDescription()); } + if (!wait_timeout) + throw Poco::Exception("mysqlxx::Pool is full (wait is disabled, see connection_wait_timeout setting)"); + + if (deadline && clock_gettime_ns() >= deadline) + throw Poco::Exception("mysqlxx::Pool is full (connection_wait_timeout is exceeded)"); + lock.unlock(); logger.trace("(%s): Sleeping for %d seconds.", getDescription(), MYSQLXX_POOL_SLEEP_ON_CONNECT_FAIL); sleepForSeconds(MYSQLXX_POOL_SLEEP_ON_CONNECT_FAIL); diff --git a/base/mysqlxx/Pool.h b/base/mysqlxx/Pool.h index 530e2c78cf2..08d8b85b4ac 100644 --- a/base/mysqlxx/Pool.h +++ b/base/mysqlxx/Pool.h @@ -189,7 +189,7 @@ public: ~Pool(); /// Allocates connection. - Entry get(); + Entry get(uint64_t wait_timeout); /// Allocates connection. /// If database is not accessible, returns empty Entry object. diff --git a/base/mysqlxx/PoolWithFailover.cpp b/base/mysqlxx/PoolWithFailover.cpp index e317ab7f228..14c0db9ecd5 100644 --- a/base/mysqlxx/PoolWithFailover.cpp +++ b/base/mysqlxx/PoolWithFailover.cpp @@ -21,8 +21,9 @@ PoolWithFailover::PoolWithFailover( const unsigned max_connections_, const size_t max_tries_) : max_tries(max_tries_) + , shareable(config_.getBool(config_name_ + ".share_connection", false)) + , wait_timeout(UINT64_MAX) { - shareable = config_.getBool(config_name_ + ".share_connection", false); if (config_.has(config_name_ + ".replica")) { Poco::Util::AbstractConfiguration::Keys replica_keys; @@ -80,9 +81,11 @@ PoolWithFailover::PoolWithFailover( const std::string & password, unsigned default_connections_, unsigned max_connections_, - size_t max_tries_) + size_t max_tries_, + uint64_t wait_timeout_) : max_tries(max_tries_) , shareable(false) + , wait_timeout(wait_timeout_) { /// Replicas have the same priority, but traversed replicas are moved to the end of the queue. for (const auto & [host, port] : addresses) @@ -101,6 +104,7 @@ PoolWithFailover::PoolWithFailover( PoolWithFailover::PoolWithFailover(const PoolWithFailover & other) : max_tries{other.max_tries} , shareable{other.shareable} + , wait_timeout(other.wait_timeout) { if (shareable) { @@ -140,7 +144,7 @@ PoolWithFailover::Entry PoolWithFailover::get() try { - Entry entry = shareable ? pool->get() : pool->tryGet(); + Entry entry = shareable ? pool->get(wait_timeout) : pool->tryGet(); if (!entry.isNull()) { @@ -172,7 +176,7 @@ PoolWithFailover::Entry PoolWithFailover::get() if (full_pool) { app.logger().error("All connections failed, trying to wait on a full pool " + (*full_pool)->getDescription()); - return (*full_pool)->get(); + return (*full_pool)->get(wait_timeout); } std::stringstream message; diff --git a/base/mysqlxx/PoolWithFailover.h b/base/mysqlxx/PoolWithFailover.h index 1c7a63e76c0..2bd5ec9f30a 100644 --- a/base/mysqlxx/PoolWithFailover.h +++ b/base/mysqlxx/PoolWithFailover.h @@ -80,6 +80,8 @@ namespace mysqlxx std::mutex mutex; /// Can the Pool be shared bool shareable; + /// Timeout for waiting free connection. + uint64_t wait_timeout = 0; public: using Entry = Pool::Entry; @@ -96,6 +98,7 @@ namespace mysqlxx * default_connections Number of connection in pool to each replica at start. * max_connections Maximum number of connections in pool to each replica. * max_tries_ Max number of connection tries. + * wait_timeout_ Timeout for waiting free connection. */ PoolWithFailover( const std::string & config_name_, @@ -117,7 +120,8 @@ namespace mysqlxx const std::string & password, unsigned default_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS, unsigned max_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS, - size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES); + size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES, + uint64_t wait_timeout_ = UINT64_MAX); PoolWithFailover(const PoolWithFailover & other); diff --git a/cmake/analysis.cmake b/cmake/analysis.cmake index 267bb34248b..24d8168e2c0 100644 --- a/cmake/analysis.cmake +++ b/cmake/analysis.cmake @@ -6,7 +6,7 @@ if (ENABLE_CLANG_TIDY) message(FATAL_ERROR "clang-tidy requires CMake version at least 3.6.") endif() - find_program (CLANG_TIDY_PATH NAMES "clang-tidy" "clang-tidy-11" "clang-tidy-10" "clang-tidy-9" "clang-tidy-8") + find_program (CLANG_TIDY_PATH NAMES "clang-tidy" "clang-tidy-12" "clang-tidy-11" "clang-tidy-10" "clang-tidy-9" "clang-tidy-8") if (CLANG_TIDY_PATH) message(STATUS diff --git a/cmake/find/amqpcpp.cmake b/cmake/find/amqpcpp.cmake index a4a58349508..05e5d2da751 100644 --- a/cmake/find/amqpcpp.cmake +++ b/cmake/find/amqpcpp.cmake @@ -17,7 +17,7 @@ if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/AMQP-CPP/CMakeLists.txt") endif () set (USE_AMQPCPP 1) -set (AMQPCPP_LIBRARY amqp-cpp) +set (AMQPCPP_LIBRARY amqp-cpp ${OPENSSL_LIBRARIES}) set (AMQPCPP_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/AMQP-CPP/include") list (APPEND AMQPCPP_INCLUDE_DIR diff --git a/cmake/freebsd/toolchain-x86_64.cmake b/cmake/freebsd/toolchain-x86_64.cmake index d9839ec74ee..f9e45686db7 100644 --- a/cmake/freebsd/toolchain-x86_64.cmake +++ b/cmake/freebsd/toolchain-x86_64.cmake @@ -10,7 +10,7 @@ set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it set (CMAKE_AR "/usr/bin/ar" CACHE FILEPATH "" FORCE) set (CMAKE_RANLIB "/usr/bin/ranlib" CACHE FILEPATH "" FORCE) -set (LINKER_NAME "lld" CACHE STRING "" FORCE) +set (LINKER_NAME "ld.lld" CACHE STRING "" FORCE) set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld") set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld") diff --git a/cmake/linux/toolchain-aarch64.cmake b/cmake/linux/toolchain-aarch64.cmake index e3924fdc537..b4dc6e45cbb 100644 --- a/cmake/linux/toolchain-aarch64.cmake +++ b/cmake/linux/toolchain-aarch64.cmake @@ -13,7 +13,7 @@ set (CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_D set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64") set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64") -set (LINKER_NAME "lld" CACHE STRING "" FORCE) +set (LINKER_NAME "ld.lld" CACHE STRING "" FORCE) set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld") set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld") diff --git a/cmake/tools.cmake b/cmake/tools.cmake index 8ff94ab867b..f94f4b289a3 100644 --- a/cmake/tools.cmake +++ b/cmake/tools.cmake @@ -79,8 +79,9 @@ endif () if (LINKER_NAME) if (COMPILER_CLANG AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 12.0.0 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 12.0.0)) - set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --ld-path=${LINKER_NAME}") - set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} --ld-path=${LINKER_NAME}") + find_program (LLD_PATH NAMES ${LINKER_NAME}) + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --ld-path=${LLD_PATH}") + set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} --ld-path=${LLD_PATH}") else () set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=${LINKER_NAME}") set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=${LINKER_NAME}") diff --git a/contrib/amqpcpp-cmake/CMakeLists.txt b/contrib/amqpcpp-cmake/CMakeLists.txt index 5637db4cf41..faef7bd4a1c 100644 --- a/contrib/amqpcpp-cmake/CMakeLists.txt +++ b/contrib/amqpcpp-cmake/CMakeLists.txt @@ -41,6 +41,4 @@ target_compile_options (amqp-cpp ) target_include_directories (amqp-cpp SYSTEM PUBLIC "${LIBRARY_DIR}/include") - -target_link_libraries (amqp-cpp PUBLIC ssl) - +target_link_libraries(amqp-cpp PUBLIC ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY}) diff --git a/contrib/boringssl-cmake/CMakeLists.txt b/contrib/boringssl-cmake/CMakeLists.txt index 9d8c6ca6083..4502d6e9d42 100644 --- a/contrib/boringssl-cmake/CMakeLists.txt +++ b/contrib/boringssl-cmake/CMakeLists.txt @@ -15,12 +15,12 @@ if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") endif() if(CMAKE_COMPILER_IS_GNUCXX OR CLANG) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -fvisibility=hidden -fno-common -fno-exceptions -fno-rtti") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -fno-common -fno-exceptions -fno-rtti") if(APPLE AND CLANG) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++") endif() - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=hidden -fno-common") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-common") if((CMAKE_C_COMPILER_VERSION VERSION_GREATER "4.8.99") OR CLANG) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c11") else() diff --git a/contrib/poco b/contrib/poco index 7351c4691b5..46c80daf1b0 160000 --- a/contrib/poco +++ b/contrib/poco @@ -1 +1 @@ -Subproject commit 7351c4691b5d401f59e3959adfc5b4fa263b32da +Subproject commit 46c80daf1b015aa10474ce82e3d24b578c6ae422 diff --git a/docker/builder/Dockerfile b/docker/builder/Dockerfile index abe102e9c80..9a1041ee743 100644 --- a/docker/builder/Dockerfile +++ b/docker/builder/Dockerfile @@ -1,6 +1,6 @@ FROM ubuntu:20.04 -ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11 +ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12 RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list diff --git a/docker/builder/build.sh b/docker/builder/build.sh index d4cf662e91b..7c7a8893751 100755 --- a/docker/builder/build.sh +++ b/docker/builder/build.sh @@ -4,7 +4,7 @@ set -e #ccache -s # uncomment to display CCache statistics mkdir -p /server/build_docker cd /server/build_docker -cmake -G Ninja /server "-DCMAKE_C_COMPILER=$(command -v clang-11)" "-DCMAKE_CXX_COMPILER=$(command -v clang++-11)" +cmake -G Ninja /server "-DCMAKE_C_COMPILER=$(command -v clang-12)" "-DCMAKE_CXX_COMPILER=$(command -v clang++-12)" # Set the number of build jobs to the half of number of virtual CPU cores (rounded up). # By default, ninja use all virtual CPU cores, that leads to very high memory consumption without much improvement in build time. diff --git a/docker/packager/binary/Dockerfile b/docker/packager/binary/Dockerfile index 0393669df48..f5d496ce97f 100644 --- a/docker/packager/binary/Dockerfile +++ b/docker/packager/binary/Dockerfile @@ -1,7 +1,7 @@ # docker build -t yandex/clickhouse-binary-builder . FROM ubuntu:20.04 -ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11 +ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12 RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list @@ -39,8 +39,6 @@ RUN apt-get update \ bash \ build-essential \ ccache \ - clang-11 \ - clang-tidy-11 \ cmake \ curl \ g++-10 \ @@ -50,9 +48,13 @@ RUN apt-get update \ gperf \ libicu-dev \ libreadline-dev \ - lld-11 \ - llvm-11 \ - llvm-11-dev \ + clang-12 \ + clang-tidy-12 \ + lld-12 \ + llvm-12 \ + llvm-12-dev \ + libicu-dev \ + libreadline-dev \ moreutils \ ninja-build \ pigz \ diff --git a/docker/packager/binary/build.sh b/docker/packager/binary/build.sh index b9900e34bf1..71402a2fd66 100755 --- a/docker/packager/binary/build.sh +++ b/docker/packager/binary/build.sh @@ -4,7 +4,6 @@ set -x -e mkdir -p build/cmake/toolchain/darwin-x86_64 tar xJf MacOSX11.0.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-components=1 - ln -sf darwin-x86_64 build/cmake/toolchain/darwin-aarch64 mkdir -p build/cmake/toolchain/linux-aarch64 @@ -23,6 +22,7 @@ cd build/build_docker rm -f CMakeCache.txt # Read cmake arguments into array (possibly empty) read -ra CMAKE_FLAGS <<< "${CMAKE_FLAGS:-}" +env cmake --debug-trycompile --verbose=1 -DCMAKE_VERBOSE_MAKEFILE=1 -LA "-DCMAKE_BUILD_TYPE=$BUILD_TYPE" "-DSANITIZE=$SANITIZER" -DENABLE_CHECK_HEAVY_BUILDS=1 "${CMAKE_FLAGS[@]}" .. ccache --show-config ||: diff --git a/docker/packager/deb/Dockerfile b/docker/packager/deb/Dockerfile index 294c8645455..22bba94f250 100644 --- a/docker/packager/deb/Dockerfile +++ b/docker/packager/deb/Dockerfile @@ -1,7 +1,7 @@ # docker build -t yandex/clickhouse-deb-builder . FROM ubuntu:20.04 -ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11 +ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12 RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list @@ -37,17 +37,17 @@ RUN curl -O https://clickhouse-datasets.s3.yandex.net/utils/1/dpkg-deb \ RUN apt-get update \ && apt-get install \ alien \ - clang-11 \ - clang-tidy-11 \ + clang-12 \ + clang-tidy-12 \ cmake \ debhelper \ devscripts \ gdb \ git \ gperf \ - lld-11 \ - llvm-11 \ - llvm-11-dev \ + lld-12 \ + llvm-12 \ + llvm-12-dev \ moreutils \ ninja-build \ perl \ diff --git a/docker/packager/packager b/docker/packager/packager index 673878bce43..f37d64e9949 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -75,7 +75,7 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ # Explicitly use LLD with Clang by default. # Don't force linker for cross-compilation. if is_clang and not is_cross_compile: - cmake_flags.append("-DLINKER_NAME=lld") + cmake_flags.append("-DLINKER_NAME=ld.lld") if is_cross_darwin: cc = compiler[:-len(DARWIN_SUFFIX)] @@ -204,7 +204,8 @@ if __name__ == "__main__": parser.add_argument("--output-dir", required=True) parser.add_argument("--build-type", choices=("debug", ""), default="") parser.add_argument("--compiler", choices=("clang-11", "clang-11-darwin", "clang-11-darwin-aarch64", "clang-11-aarch64", - "clang-11-freebsd", "gcc-10"), default="clang-11") + "clang-12", "clang-12-darwin", "clang-12-darwin-aarch64", "clang-12-aarch64", + "clang-11-freebsd", "clang-12-freebsd", "gcc-10"), default="clang-12") parser.add_argument("--sanitizer", choices=("address", "thread", "memory", "undefined", ""), default="") parser.add_argument("--unbundled", action="store_true") parser.add_argument("--split-binary", action="store_true") diff --git a/docker/test/base/Dockerfile b/docker/test/base/Dockerfile index 611ef6b7702..88c9e1ae06e 100644 --- a/docker/test/base/Dockerfile +++ b/docker/test/base/Dockerfile @@ -1,7 +1,7 @@ # docker build -t yandex/clickhouse-test-base . FROM ubuntu:20.04 -ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11 +ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12 RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list diff --git a/docker/test/codebrowser/Dockerfile b/docker/test/codebrowser/Dockerfile index 33173ab90f9..ae2aafa76c8 100644 --- a/docker/test/codebrowser/Dockerfile +++ b/docker/test/codebrowser/Dockerfile @@ -11,7 +11,7 @@ RUN apt-get update && apt-get --yes --allow-unauthenticated install clang-9 libl # https://github.com/ClickHouse-Extras/woboq_codebrowser/commit/37e15eaf377b920acb0b48dbe82471be9203f76b RUN git clone https://github.com/ClickHouse-Extras/woboq_codebrowser -RUN cd woboq_codebrowser && cmake . -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER=clang\+\+-9 -DCMAKE_C_COMPILER=clang-9 && make -j +RUN cd woboq_codebrowser && cmake . -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER=clang\+\+-12 -DCMAKE_C_COMPILER=clang-12 && make -j ENV CODEGEN=/woboq_codebrowser/generator/codebrowser_generator ENV CODEINDEX=/woboq_codebrowser/indexgenerator/codebrowser_indexgenerator @@ -24,7 +24,7 @@ ENV SHA=nosha ENV DATA="data" CMD mkdir -p $BUILD_DIRECTORY && cd $BUILD_DIRECTORY && \ - cmake $SOURCE_DIRECTORY -DCMAKE_CXX_COMPILER=/usr/bin/clang\+\+-11 -DCMAKE_C_COMPILER=/usr/bin/clang-11 -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DENABLE_EMBEDDED_COMPILER=0 -DENABLE_S3=0 && \ + cmake $SOURCE_DIRECTORY -DCMAKE_CXX_COMPILER=/usr/bin/clang\+\+-12 -DCMAKE_C_COMPILER=/usr/bin/clang-12 -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DENABLE_EMBEDDED_COMPILER=0 -DENABLE_S3=0 && \ mkdir -p $HTML_RESULT_DIRECTORY && \ $CODEGEN -b $BUILD_DIRECTORY -a -o $HTML_RESULT_DIRECTORY -p ClickHouse:$SOURCE_DIRECTORY:$SHA -d $DATA | ts '%Y-%m-%d %H:%M:%S' && \ cp -r $STATIC_DATA $HTML_RESULT_DIRECTORY/ &&\ diff --git a/docker/test/fasttest/Dockerfile b/docker/test/fasttest/Dockerfile index 2e0bbcd350f..9443cbf496e 100644 --- a/docker/test/fasttest/Dockerfile +++ b/docker/test/fasttest/Dockerfile @@ -1,7 +1,7 @@ # docker build -t yandex/clickhouse-fasttest . FROM ubuntu:20.04 -ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11 +ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12 RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index b7c8240abba..8a6b7f70cba 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -9,7 +9,7 @@ trap 'kill $(jobs -pr) ||:' EXIT stage=${stage:-} # Compiler version, normally set by Dockerfile -export LLVM_VERSION=${LLVM_VERSION:-11} +export LLVM_VERSION=${LLVM_VERSION:-12} # A variable to pass additional flags to CMake. # Here we explicitly default it to nothing so that bash doesn't complain about @@ -401,6 +401,9 @@ function run_tests # depends on Go 02013_zlib_read_after_eof + + # Accesses CH via mysql table function (which is unavailable) + 01747_system_session_log_long ) time clickhouse-test --hung-check -j 8 --order=random --use-skip-list \ diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index 603c35ede54..2247c6a22be 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -12,7 +12,7 @@ stage=${stage:-} script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" echo "$script_dir" repo_dir=ch -BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-11_debug_none_bundled_unsplitted_disable_False_binary"} +BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-12_debug_none_bundled_unsplitted_disable_False_binary"} function clone { diff --git a/docker/test/keeper-jepsen/run.sh b/docker/test/keeper-jepsen/run.sh index 352585e16e3..8d31b5b7f1c 100644 --- a/docker/test/keeper-jepsen/run.sh +++ b/docker/test/keeper-jepsen/run.sh @@ -2,7 +2,7 @@ set -euo pipefail -CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-11_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"} +CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-12_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"} CLICKHOUSE_REPO_PATH=${CLICKHOUSE_REPO_PATH:=""} diff --git a/docker/test/pvs/Dockerfile b/docker/test/pvs/Dockerfile index 7bd45ba4018..438f0bd07ec 100644 --- a/docker/test/pvs/Dockerfile +++ b/docker/test/pvs/Dockerfile @@ -28,7 +28,7 @@ RUN apt-get update --yes \ ENV PKG_VERSION="pvs-studio-latest" RUN set -x \ - && export PUBKEY_HASHSUM="686e5eb8b3c543a5c54442c39ec876b6c2d912fe8a729099e600017ae53c877dda3368fe38ed7a66024fe26df6b5892a" \ + && export PUBKEY_HASHSUM="ad369a2e9d8b8c30f5a9f2eb131121739b79c78e03fef0f016ea51871a5f78cd4e6257b270dca0ac3be3d1f19d885516" \ && wget -nv https://files.viva64.com/etc/pubkey.txt -O /tmp/pubkey.txt \ && echo "${PUBKEY_HASHSUM} /tmp/pubkey.txt" | sha384sum -c \ && apt-key add /tmp/pubkey.txt \ @@ -38,7 +38,7 @@ RUN set -x \ && dpkg -i "${PKG_VERSION}.deb" CMD echo "Running PVS version $PKG_VERSION" && cd /repo_folder && pvs-studio-analyzer credentials $LICENCE_NAME $LICENCE_KEY -o ./licence.lic \ - && cmake . -D"ENABLE_EMBEDDED_COMPILER"=OFF -D"USE_INTERNAL_PROTOBUF_LIBRARY"=OFF -D"USE_INTERNAL_GRPC_LIBRARY"=OFF \ + && cmake . -D"ENABLE_EMBEDDED_COMPILER"=OFF -D"USE_INTERNAL_PROTOBUF_LIBRARY"=OFF -D"USE_INTERNAL_GRPC_LIBRARY"=OFF -DCMAKE_C_COMPILER=clang-12 -DCMAKE_CXX_COMPILER=clang\+\+-12 \ && ninja re2_st clickhouse_grpc_protos \ && pvs-studio-analyzer analyze -o pvs-studio.log -e contrib -j 4 -l ./licence.lic; \ cp /repo_folder/pvs-studio.log /test_output; \ diff --git a/docs/en/development/build.md b/docs/en/development/build.md index be45c1ed5f7..8d1aae13957 100644 --- a/docs/en/development/build.md +++ b/docs/en/development/build.md @@ -23,7 +23,7 @@ $ sudo apt-get install git cmake python ninja-build Or cmake3 instead of cmake on older systems. -### Install clang-11 (recommended) {#install-clang-11} +### Install clang-12 (recommended) {#install-clang-12} On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/)) @@ -33,11 +33,11 @@ sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" For other Linux distribution - check the availability of the [prebuild packages](https://releases.llvm.org/download.html) or build clang [from sources](https://clang.llvm.org/get_started.html). -#### Use clang-11 for Builds +#### Use clang-12 for Builds ``` bash -$ export CC=clang-11 -$ export CXX=clang++-11 +$ export CC=clang-12 +$ export CXX=clang++-12 ``` Gcc can also be used though it is discouraged. diff --git a/docs/en/engines/database-engines/materialized-postgresql.md b/docs/en/engines/database-engines/materialized-postgresql.md index 89c7c803bb3..77a5f2af0e0 100644 --- a/docs/en/engines/database-engines/materialized-postgresql.md +++ b/docs/en/engines/database-engines/materialized-postgresql.md @@ -31,6 +31,10 @@ ENGINE = MaterializedPostgreSQL('host:port', ['database' | database], 'user', 'p - [materialized_postgresql_allow_automatic_update](../../operations/settings/settings.md#materialized-postgresql-allow-automatic-update) +- [materialized_postgresql_replication_slot](../../operations/settings/settings.md#materialized-postgresql-replication-slot) + +- [materialized_postgresql_snapshot](../../operations/settings/settings.md#materialized-postgresql-snapshot) + ``` sql CREATE DATABASE database1 ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password') @@ -73,7 +77,7 @@ WHERE oid = 'postgres_table'::regclass; !!! warning "Warning" Replication of [**TOAST**](https://www.postgresql.org/docs/9.5/storage-toast.html) values is not supported. The default value for the data type will be used. - + ## Example of Use {#example-of-use} ``` sql @@ -82,3 +86,11 @@ ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres SELECT * FROM postgresql_db.postgres_table; ``` + +## Notes {#notes} + +- Failover of the logical replication slot. + +Logical Replication Slots which exist on the primary are not available on standby replicas. +So if there is a failover, new primary (the old physical standby) won’t be aware of any slots which were existing with old primary. This will lead to a broken replication from PostgreSQL. +A solution to this is to manage replication slots yourself and define a permanent replication slot (some information can be found [here](https://patroni.readthedocs.io/en/latest/SETTINGS.html)). You'll need to pass slot name via `materialized_postgresql_replication_slot` setting, and it has to be exported with `EXPORT SNAPSHOT` option. The snapshot identifier needs to be passed via `materialized_postgresql_snapshot` setting. diff --git a/docs/en/engines/table-engines/integrations/mysql.md b/docs/en/engines/table-engines/integrations/mysql.md index a6402e00bc9..7eac159a645 100644 --- a/docs/en/engines/table-engines/integrations/mysql.md +++ b/docs/en/engines/table-engines/integrations/mysql.md @@ -19,6 +19,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] SETTINGS [connection_pool_size=16, ] [connection_max_tries=3, ] + [connection_wait_timeout=5, ] /* 0 -- do not wait */ [connection_auto_close=true ] ; ``` diff --git a/docs/en/engines/table-engines/integrations/rabbitmq.md b/docs/en/engines/table-engines/integrations/rabbitmq.md index 5fb9ce5b151..a3ee1115c00 100644 --- a/docs/en/engines/table-engines/integrations/rabbitmq.md +++ b/docs/en/engines/table-engines/integrations/rabbitmq.md @@ -21,11 +21,12 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], ... ) ENGINE = RabbitMQ SETTINGS - rabbitmq_host_port = 'host:port', + rabbitmq_host_port = 'host:port' [or rabbitmq_address = 'amqp(s)://guest:guest@localhost/vhost'], rabbitmq_exchange_name = 'exchange_name', rabbitmq_format = 'data_format'[,] [rabbitmq_exchange_type = 'exchange_type',] [rabbitmq_routing_key_list = 'key1,key2,...',] + [rabbitmq_secure = 0,] [rabbitmq_row_delimiter = 'delimiter_symbol',] [rabbitmq_schema = '',] [rabbitmq_num_consumers = N,] @@ -59,6 +60,11 @@ Optional parameters: - `rabbitmq_max_block_size` - `rabbitmq_flush_interval_ms` +SSL connection: + +Use either `rabbitmq_secure = 1` or `amqps` in connection address: `rabbitmq_address = 'amqps://guest:guest@localhost/vhost'`. +The default behaviour of the used library is not to check if the created TLS connection is sufficiently secure. Whether the certificate is expired, self-signed, missing or invalid: the connection is simply permitted. More strict checking of certificates can possibly be implemented in the future. + Also format settings can be added along with rabbitmq-related settings. Example: diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index bac67ecf140..68a52dd702e 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -1270,6 +1270,8 @@ You can insert Parquet data from a file into ClickHouse table by the following c $ cat {filename} | clickhouse-client --query="INSERT INTO {some_table} FORMAT Parquet" ``` +To insert data into [Nested](../sql-reference/data-types/nested-data-structures/nested.md) columns as an array of structs values you must switch on the [input_format_parquet_import_nested](../operations/settings/settings.md#input_format_parquet_import_nested) setting. + You can select data from a ClickHouse table and save them into some file in the Parquet format by the following command: ``` bash @@ -1328,6 +1330,8 @@ You can insert Arrow data from a file into ClickHouse table by the following com $ cat filename.arrow | clickhouse-client --query="INSERT INTO some_table FORMAT Arrow" ``` +To insert data into [Nested](../sql-reference/data-types/nested-data-structures/nested.md) columns as an array of structs values you must switch on the [input_format_arrow_import_nested](../operations/settings/settings.md#input_format_arrow_import_nested) setting. + ### Selecting Data {#selecting-data-arrow} You can select data from a ClickHouse table and save them into some file in the Arrow format by the following command: @@ -1384,6 +1388,8 @@ You can insert ORC data from a file into ClickHouse table by the following comma $ cat filename.orc | clickhouse-client --query="INSERT INTO some_table FORMAT ORC" ``` +To insert data into [Nested](../sql-reference/data-types/nested-data-structures/nested.md) columns as an array of structs values you must switch on the [input_format_orc_import_nested](../operations/settings/settings.md#input_format_orc_import_nested) setting. + ### Selecting Data {#selecting-data-2} You can select data from a ClickHouse table and save them into some file in the ORC format by the following command: diff --git a/docs/en/introduction/adopters.md b/docs/en/introduction/adopters.md index d408a3d6849..83cbfde939d 100644 --- a/docs/en/introduction/adopters.md +++ b/docs/en/introduction/adopters.md @@ -25,7 +25,7 @@ toc_title: Adopters | Badoo | Dating | Timeseries | — | — | [Slides in Russian, December 2019](https://presentations.clickhouse.tech/meetup38/forecast.pdf) | | Benocs | Network Telemetry and Analytics | Main Product | — | — | [Slides in English, October 2017](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup9/lpm.pdf) | | BIGO | Video | Computing Platform | — | — | [Blog Article, August 2020](https://www.programmersought.com/article/44544895251/) | -| Bloomberg | Finance, Media | Monitoring | 102 servers | — | [Slides, May 2018](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) | +| Bloomberg | Finance, Media | Monitoring | — | — | [Slides, May 2018](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) | | Bloxy | Blockchain | Analytics | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/4_bloxy.pptx) | | Bytedance | Social platforms | — | — | — | [The ClickHouse Meetup East, October 2020](https://www.youtube.com/watch?v=ckChUkC3Pns) | | CardsMobile | Finance | Analytics | — | — | [VC.ru](https://vc.ru/s/cardsmobile/143449-rukovoditel-gruppy-analiza-dannyh) | diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 68937c870a9..be19f476978 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -1253,7 +1253,7 @@ If this section is specified, the path from [users_config](../../operations/serv The `user_directories` section can contain any number of items, the order of the items means their precedence (the higher the item the higher the precedence). -**Example** +**Examples** ``` xml @@ -1263,13 +1263,23 @@ The `user_directories` section can contain any number of items, the order of the /var/lib/clickhouse/access/ + +``` + +Users, roles, row policies, quotas, and profiles can be also stored in ZooKeeper: + +``` xml + + + /etc/clickhouse-server/users.xml + /clickhouse/access/ ``` -You can also specify settings `memory` — means storing information only in memory, without writing to disk, and `ldap` — means storing information on an LDAP server. +You can also define sections `memory` — means storing information only in memory, without writing to disk, and `ldap` — means storing information on an LDAP server. To add an LDAP server as a remote user directory of users that are not defined locally, define a single `ldap` section with a following parameters: - `server` — one of LDAP server names defined in `ldap_servers` config section. This parameter is mandatory and cannot be empty. diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index a1c7d1aab32..7bd08549cb8 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -260,6 +260,39 @@ If an error occurred while reading rows but the error counter is still less than If both `input_format_allow_errors_num` and `input_format_allow_errors_ratio` are exceeded, ClickHouse throws an exception. +## input_format_parquet_import_nested {#input_format_parquet_import_nested} + +Enables or disables the ability to insert the data into [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) columns as an array of structs in [Parquet](../../interfaces/formats.md#data-format-parquet) input format. + +Possible values: + +- 0 — Data can not be inserted into `Nested` columns as an array of structs. +- 1 — Data can be inserted into `Nested` columns as an array of structs. + +Default value: `0`. + +## input_format_arrow_import_nested {#input_format_arrow_import_nested} + +Enables or disables the ability to insert the data into [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) columns as an array of structs in [Arrow](../../interfaces/formats.md#data_types-matching-arrow) input format. + +Possible values: + +- 0 — Data can not be inserted into `Nested` columns as an array of structs. +- 1 — Data can be inserted into `Nested` columns as an array of structs. + +Default value: `0`. + +## input_format_orc_import_nested {#input_format_orc_import_nested} + +Enables or disables the ability to insert the data into [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) columns as an array of structs in [ORC](../../interfaces/formats.md#data-format-orc) input format. + +Possible values: + +- 0 — Data can not be inserted into `Nested` columns as an array of structs. +- 1 — Data can be inserted into `Nested` columns as an array of structs. + +Default value: `0`. + ## input_format_values_interpret_expressions {#settings-input_format_values_interpret_expressions} Enables or disables the full SQL parser if the fast stream parser can’t parse the data. This setting is used only for the [Values](../../interfaces/formats.md#data-format-values) format at the data insertion. For more information about syntax parsing, see the [Syntax](../../sql-reference/syntax.md) section. @@ -3436,6 +3469,14 @@ Possible values: Default value: `0`. +## materialized_postgresql_replication_slot {#materialized-postgresql-replication-slot} + +Allows to have user-managed replication slots. Must be used together with `materialized_postgresql_snapshot`. + +## materialized_postgresql_replication_slot {#materialized-postgresql-replication-slot} + +A text string identifying a snapshot, from which initial dump of tables will be performed. Must be used together with `materialized_postgresql_replication_slot`. + ## allow_experimental_projection_optimization {#allow-experimental-projection-optimization} Enables or disables [projection](../../engines/table-engines/mergetree-family/mergetree.md#projections) optimization when processing `SELECT` queries. @@ -3449,7 +3490,7 @@ Default value: `0`. ## force_optimize_projection {#force-optimize-projection} -Enables or disables the obligatory use of [projections](../../engines/table-engines/mergetree-family/mergetree.md#projections) in `SELECT` queries, when projection optimization is enabled (see [allow_experimental_projection_optimization](#allow-experimental-projection-optimization) setting). +Enables or disables the obligatory use of [projections](../../engines/table-engines/mergetree-family/mergetree.md#projections) in `SELECT` queries, when projection optimization is enabled (see [allow_experimental_projection_optimization](#allow-experimental-projection-optimization) setting). Possible values: @@ -3457,3 +3498,13 @@ Possible values: - 1 — Projection optimization is obligatory. Default value: `0`. + +## regexp_max_matches_per_row {#regexp-max-matches-per-row} + +Sets the maximum number of matches for a single regular expression per row. Use it to protect against memory overload when using greedy regular expression in the [extractAllGroupsHorizontal](../../sql-reference/functions/string-search-functions.md#extractallgroups-horizontal) function. + +Possible values: + +- Positive integer. + +Default value: `1000`. diff --git a/docs/en/operations/system-tables/views.md b/docs/en/operations/system-tables/views.md deleted file mode 100644 index 8edebf00a91..00000000000 --- a/docs/en/operations/system-tables/views.md +++ /dev/null @@ -1,44 +0,0 @@ -# system.views {#system-views} - -Contains the dependencies of all views and the type to which the view belongs. The metadata of the view comes from the [system.tables](tables.md). - -Columns: - -- `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database the view is in. - -- `name` ([String](../../sql-reference/data-types/string.md)) — Name of the view. - -- `main_dependency_database` ([String](../../sql-reference/data-types/string.md)) — The name of the database on which the view depends. - -- `main_dependency_table` ([String](../../sql-reference/data-types/string.md)) - The name of the table on which the view depends. - -- `view_type` ([Enum8](../../sql-reference/data-types/enum.md)) — Type of the view. Values: - - `'Default' = 1` — [Default views](../../sql-reference/statements/create/view.md#normal). Should not appear in this log. - - `'Materialized' = 2` — [Materialized views](../../sql-reference/statements/create/view.md#materialized). - - `'Live' = 3` — [Live views](../../sql-reference/statements/create/view.md#live-view). - -**Example** - -```sql -SELECT * FROM system.views LIMIT 2 FORMAT Vertical; -``` - -```text -Row 1: -────── -database: default -name: live_view -main_dependency_database: default -main_dependency_table: view_source_tb -view_type: Live - -Row 2: -────── -database: default -name: materialized_view -main_dependency_database: default -main_dependency_table: view_source_tb -view_type: Materialized -``` - -[Original article](https://clickhouse.tech/docs/en/operations/system-tables/views) diff --git a/docs/en/sql-reference/data-types/nested-data-structures/nested.md b/docs/en/sql-reference/data-types/nested-data-structures/nested.md index ec6c613a956..65849f9cd0f 100644 --- a/docs/en/sql-reference/data-types/nested-data-structures/nested.md +++ b/docs/en/sql-reference/data-types/nested-data-structures/nested.md @@ -3,7 +3,9 @@ toc_priority: 57 toc_title: Nested(Name1 Type1, Name2 Type2, ...) --- -# Nested(name1 Type1, Name2 Type2, …) {#nestedname1-type1-name2-type2} +# Nested {#nested} + +## Nested(name1 Type1, Name2 Type2, …) {#nestedname1-type1-name2-type2} A nested data structure is like a table inside a cell. The parameters of a nested data structure – the column names and types – are specified the same way as in a [CREATE TABLE](../../../sql-reference/statements/create/table.md) query. Each table row can correspond to any number of rows in a nested data structure. diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 05f39e2d4e6..9b41042c659 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -1438,9 +1438,9 @@ Result: └───────────────────────────────────────────┘ ``` -## snowflakeToDateTime {#snowflakeToDateTime} +## snowflakeToDateTime {#snowflaketodatetime} -Extract time from snowflake id as DateTime format. +Extracts time from [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) as [DateTime](../data-types/datetime.md) format. **Syntax** @@ -1450,12 +1450,12 @@ snowflakeToDateTime(value [, time_zone]) **Parameters** -- `value` — `snowflake id`, Int64 value. +- `value` — Snowflake ID. [Int64](../data-types/int-uint.md). - `time_zone` — [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](../../sql-reference/data-types/string.md). **Returned value** -- value converted to the `DateTime` data type. +- Input value converted to the [DateTime](../data-types/datetime.md) data type. **Example** @@ -1474,9 +1474,9 @@ Result: └──────────────────────────────────────────────────────────────────┘ ``` -## snowflakeToDateTime64 {#snowflakeToDateTime64} +## snowflakeToDateTime64 {#snowflaketodatetime64} -Extract time from snowflake id as DateTime64 format. +Extracts time from [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) as [DateTime64](../data-types/datetime64.md) format. **Syntax** @@ -1486,12 +1486,12 @@ snowflakeToDateTime64(value [, time_zone]) **Parameters** -- `value` — `snowflake id`, Int64 value. +- `value` — Snowflake ID. [Int64](../data-types/int-uint.md). - `time_zone` — [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](../../sql-reference/data-types/string.md). **Returned value** -- value converted to the `DateTime64` data type. +- Input value converted to the [DateTime64](../data-types/datetime64.md) data type. **Example** @@ -1510,9 +1510,9 @@ Result: └────────────────────────────────────────────────────────────────────┘ ``` -## dateTimeToSnowflake {#dateTimeToSnowflake} +## dateTimeToSnowflake {#datetimetosnowflake} -Convert DateTime to the first snowflake id at the giving time. +Converts [DateTime](../data-types/datetime.md) value to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time. **Syntax** @@ -1524,33 +1524,29 @@ dateTimeToSnowflake(value) - `value` — Date and time. [DateTime](../../sql-reference/data-types/datetime.md). - **Returned value** -- `value` converted to the `Int64` data type as the first snowflake id at that time. +- Input value converted to the [Int64](../data-types/int-uint.md) data type as the first Snowflake ID at that time. **Example** Query: ``` sql -WITH toDateTime('2021-08-15 18:57:56', 'Asia/Shanghai') AS dt -SELECT dateTimeToSnowflake(dt); +WITH toDateTime('2021-08-15 18:57:56', 'Asia/Shanghai') AS dt SELECT dateTimeToSnowflake(dt); ``` Result: ``` text - ┌─dateTimeToSnowflake(dt)─┐ │ 1426860702823350272 │ └─────────────────────────┘ ``` +## dateTime64ToSnowflake {#datetime64tosnowflake} -## dateTime64ToSnowflake {#dateTime64ToSnowflake} - -Convert DateTime64 to the first snowflake id at the giving time. +Convert [DateTime64](../data-types/datetime64.md) to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time. **Syntax** @@ -1562,18 +1558,16 @@ dateTime64ToSnowflake(value) - `value` — Date and time. [DateTime64](../../sql-reference/data-types/datetime64.md). - **Returned value** -- `value` converted to the `Int64` data type as the first snowflake id at that time. +- Input value converted to the [Int64](../data-types/int-uint.md) data type as the first Snowflake ID at that time. **Example** Query: ``` sql -WITH toDateTime64('2021-08-15 18:57:56.492', 3, 'Asia/Shanghai') AS dt64 -SELECT dateTime64ToSnowflake(dt64); +WITH toDateTime64('2021-08-15 18:57:56.492', 3, 'Asia/Shanghai') AS dt64 SELECT dateTime64ToSnowflake(dt64); ``` Result: @@ -1582,4 +1576,4 @@ Result: ┌─dateTime64ToSnowflake(dt64)─┐ │ 1426860704886947840 │ └─────────────────────────────┘ -``` \ No newline at end of file +``` diff --git a/docs/ru/interfaces/formats.md b/docs/ru/interfaces/formats.md index 1dd10e1f76d..970c6c36e9f 100644 --- a/docs/ru/interfaces/formats.md +++ b/docs/ru/interfaces/formats.md @@ -1180,7 +1180,7 @@ ClickHouse поддерживает настраиваемую точность Типы данных столбцов в ClickHouse могут отличаться от типов данных соответствующих полей файла в формате Parquet. При вставке данных ClickHouse интерпретирует типы данных в соответствии с таблицей выше, а затем [приводит](../sql-reference/functions/type-conversion-functions/#type_conversion_function-cast) данные к тому типу, который установлен для столбца таблицы. -### Вставка и выборка данных {#vstavka-i-vyborka-dannykh} +### Вставка и выборка данных {#inserting-and-selecting-data} Чтобы вставить в ClickHouse данные из файла в формате Parquet, выполните команду следующего вида: @@ -1188,6 +1188,8 @@ ClickHouse поддерживает настраиваемую точность $ cat {filename} | clickhouse-client --query="INSERT INTO {some_table} FORMAT Parquet" ``` +Чтобы вставить данные в колонки типа [Nested](../sql-reference/data-types/nested-data-structures/nested.md) в виде массива структур, нужно включить настройку [input_format_parquet_import_nested](../operations/settings/settings.md#input_format_parquet_import_nested). + Чтобы получить данные из таблицы ClickHouse и сохранить их в файл формата Parquet, используйте команду следующего вида: ``` bash @@ -1246,6 +1248,8 @@ ClickHouse поддерживает настраиваемую точность $ cat filename.arrow | clickhouse-client --query="INSERT INTO some_table FORMAT Arrow" ``` +Чтобы вставить данные в колонки типа [Nested](../sql-reference/data-types/nested-data-structures/nested.md) в виде массива структур, нужно включить настройку [input_format_arrow_import_nested](../operations/settings/settings.md#input_format_arrow_import_nested). + ### Вывод данных {#selecting-data-arrow} Чтобы получить данные из таблицы ClickHouse и сохранить их в файл формата Arrow, используйте команду следующего вида: @@ -1294,7 +1298,7 @@ ClickHouse поддерживает настраиваемую точность Типы данных столбцов в таблицах ClickHouse могут отличаться от типов данных для соответствующих полей ORC. При вставке данных ClickHouse интерпретирует типы данных ORC согласно таблице соответствия, а затем [приводит](../sql-reference/functions/type-conversion-functions/#type_conversion_function-cast) данные к типу, установленному для столбца таблицы ClickHouse. -### Вставка данных {#vstavka-dannykh-1} +### Вставка данных {#inserting-data-2} Чтобы вставить в ClickHouse данные из файла в формате ORC, используйте команду следующего вида: @@ -1302,7 +1306,9 @@ ClickHouse поддерживает настраиваемую точность $ cat filename.orc | clickhouse-client --query="INSERT INTO some_table FORMAT ORC" ``` -### Вывод данных {#vyvod-dannykh-1} +Чтобы вставить данные в колонки типа [Nested](../sql-reference/data-types/nested-data-structures/nested.md) в виде массива структур, нужно включить настройку [input_format_orc_import_nested](../operations/settings/settings.md#input_format_orc_import_nested). + +### Вывод данных {#selecting-data-2} Чтобы получить данные из таблицы ClickHouse и сохранить их в файл формата ORC, используйте команду следующего вида: diff --git a/docs/ru/operations/server-configuration-parameters/settings.md b/docs/ru/operations/server-configuration-parameters/settings.md index 8bd364e880c..fbecdbdc15e 100644 --- a/docs/ru/operations/server-configuration-parameters/settings.md +++ b/docs/ru/operations/server-configuration-parameters/settings.md @@ -1200,12 +1200,13 @@ ClickHouse использует ZooKeeper для хранения метадан Секция конфигурационного файла,которая содержит настройки: - Путь к конфигурационному файлу с предустановленными пользователями. - Путь к файлу, в котором содержатся пользователи, созданные при помощи SQL команд. +- Путь к узлу ZooKeeper, где хранятся и реплицируются пользователи, созданные с помощью команд SQL (экспериментальная функциональность). Если эта секция определена, путь из [users_config](../../operations/server-configuration-parameters/settings.md#users-config) и [access_control_path](../../operations/server-configuration-parameters/settings.md#access_control_path) не используется. Секция `user_directories` может содержать любое количество элементов, порядок расположения элементов обозначает их приоритет (чем выше элемент, тем выше приоритет). -**Пример** +**Примеры** ``` xml @@ -1218,7 +1219,20 @@ ClickHouse использует ZooKeeper для хранения метадан ``` -Также вы можете указать настройку `memory` — означает хранение информации только в памяти, без записи на диск, и `ldap` — означает хранения информации на [LDAP-сервере](https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol). +Пользователи, роли, политики доступа к строкам, квоты и профили могут храниться в ZooKeeper: + +``` xml + + + /etc/clickhouse-server/users.xml + + + /clickhouse/access/ + + +``` + +Также вы можете добавить секции `memory` — означает хранение информации только в памяти, без записи на диск, и `ldap` — означает хранения информации на [LDAP-сервере](https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol). Чтобы добавить LDAP-сервер в качестве удаленного каталога пользователей, которые не определены локально, определите один раздел `ldap` со следующими параметрами: - `server` — имя одного из LDAP-серверов, определенных в секции `ldap_servers` конфигурациионного файла. Этот параметр явялется необязательным и может быть пустым. diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index aac9c30658c..9ad300b8c9c 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -237,6 +237,39 @@ ClickHouse применяет настройку в тех случаях, ко В случае превышения `input_format_allow_errors_ratio` ClickHouse генерирует исключение. +## input_format_parquet_import_nested {#input_format_parquet_import_nested} + +Включает или отключает возможность вставки данных в колонки типа [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) в виде массива структур в формате ввода [Parquet](../../interfaces/formats.md#data-format-parquet). + +Возможные значения: + +- 0 — данные не могут быть вставлены в колонки типа `Nested` в виде массива структур. +- 0 — данные могут быть вставлены в колонки типа `Nested` в виде массива структур. + +Значение по умолчанию: `0`. + +## input_format_arrow_import_nested {#input_format_arrow_import_nested} + +Включает или отключает возможность вставки данных в колонки типа [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) в виде массива структур в формате ввода [Arrow](../../interfaces/formats.md#data_types-matching-arrow). + +Возможные значения: + +- 0 — данные не могут быть вставлены в колонки типа `Nested` в виде массива структур. +- 0 — данные могут быть вставлены в колонки типа `Nested` в виде массива структур. + +Значение по умолчанию: `0`. + +## input_format_orc_import_nested {#input_format_orc_import_nested} + +Включает или отключает возможность вставки данных в колонки типа [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) в виде массива структур в формате ввода [ORC](../../interfaces/formats.md#data-format-orc). + +Возможные значения: + +- 0 — данные не могут быть вставлены в колонки типа `Nested` в виде массива структур. +- 0 — данные могут быть вставлены в колонки типа `Nested` в виде массива структур. + +Значение по умолчанию: `0`. + ## input_format_values_interpret_expressions {#settings-input_format_values_interpret_expressions} Включает или отключает парсер SQL, если потоковый парсер не может проанализировать данные. Этот параметр используется только для формата [Values](../../interfaces/formats.md#data-format-values) при вставке данных. Дополнительные сведения о парсерах читайте в разделе [Синтаксис](../../sql-reference/syntax.md). @@ -3273,4 +3306,14 @@ SETTINGS index_granularity = 8192 │ - 0 — Проекции используются опционально. - 1 — Проекции обязательно используются. -Значение по умолчанию: `0`. \ No newline at end of file +Значение по умолчанию: `0`. + +## regexp_max_matches_per_row {#regexp-max-matches-per-row} + +Задает максимальное количество совпадений для регулярного выражения. Настройка применяется для защиты памяти от перегрузки при использовании "жадных" квантификаторов в регулярном выражении для функции [extractAllGroupsHorizontal](../../sql-reference/functions/string-search-functions.md#extractallgroups-horizontal). + +Возможные значения: + +- Положительное целое число. + +Значение по умолчанию: `1000`. \ No newline at end of file diff --git a/docs/ru/sql-reference/data-types/nested-data-structures/nested.md b/docs/ru/sql-reference/data-types/nested-data-structures/nested.md index 718fe77ae95..db957e57502 100644 --- a/docs/ru/sql-reference/data-types/nested-data-structures/nested.md +++ b/docs/ru/sql-reference/data-types/nested-data-structures/nested.md @@ -1,4 +1,6 @@ -# Nested(Name1 Type1, Name2 Type2, …) {#nestedname1-type1-name2-type2} +# Nested {#nested} + +## Nested(Name1 Type1, Name2 Type2, …) {#nestedname1-type1-name2-type2} Вложенная структура данных - это как будто вложенная таблица. Параметры вложенной структуры данных - имена и типы столбцов, указываются так же, как у запроса CREATE. Каждой строке таблицы может соответствовать произвольное количество строк вложенной структуры данных. @@ -95,4 +97,3 @@ LIMIT 10 При запросе DESCRIBE, столбцы вложенной структуры данных перечисляются так же по отдельности. Работоспособность запроса ALTER для элементов вложенных структур данных, является сильно ограниченной. - diff --git a/docs/ru/sql-reference/functions/type-conversion-functions.md b/docs/ru/sql-reference/functions/type-conversion-functions.md index 16639386b67..32f24d1e6c5 100644 --- a/docs/ru/sql-reference/functions/type-conversion-functions.md +++ b/docs/ru/sql-reference/functions/type-conversion-functions.md @@ -1436,3 +1436,144 @@ FROM numbers(3); │ 2,"good" │ └───────────────────────────────────────────┘ ``` + +## snowflakeToDateTime {#snowflaketodatetime} + +Извлекает время из [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) в формате [DateTime](../data-types/datetime.md). + +**Синтаксис** + +``` sql +snowflakeToDateTime(value [, time_zone]) +``` + +**Аргументы** + +- `value` — Snowflake ID. [Int64](../data-types/int-uint.md). +- `time_zone` — [временная зона сервера](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). Функция распознает `time_string` в соответствии с часовым поясом. Необязательный. [String](../../sql-reference/data-types/string.md). + +**Возвращаемое значение** + +- Значение, преобразованное в фомат [DateTime](../data-types/datetime.md). + +**Пример** + +Запрос: + +``` sql +SELECT snowflakeToDateTime(CAST('1426860702823350272', 'Int64'), 'UTC'); +``` + +Результат: + +``` text + +┌─snowflakeToDateTime(CAST('1426860702823350272', 'Int64'), 'UTC')─┐ +│ 2021-08-15 10:57:56 │ +└──────────────────────────────────────────────────────────────────┘ +``` + +## snowflakeToDateTime64 {#snowflaketodatetime64} + +Извлекает время из [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) в формате [DateTime64](../data-types/datetime64.md). + +**Синтаксис** + +``` sql +snowflakeToDateTime64(value [, time_zone]) +``` + +**Аргументы** + +- `value` — Snowflake ID. [Int64](../data-types/int-uint.md). +- `time_zone` — [временная зона сервера](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). Функция распознает `time_string` в соответствии с часовым поясом. Необязательный. [String](../../sql-reference/data-types/string.md). + +**Возвращаемое значение** + +- Значение, преобразованное в фомат [DateTime64](../data-types/datetime64.md). + +**Пример** + +Запрос: + +``` sql +SELECT snowflakeToDateTime64(CAST('1426860802823350272', 'Int64'), 'UTC'); +``` + +Результат: + +``` text + +┌─snowflakeToDateTime64(CAST('1426860802823350272', 'Int64'), 'UTC')─┐ +│ 2021-08-15 10:58:19.841 │ +└────────────────────────────────────────────────────────────────────┘ +``` + +## dateTimeToSnowflake {#datetimetosnowflake} + +Преобразует значение [DateTime](../data-types/datetime.md) в первый идентификатор [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) на текущий момент. + +**Syntax** + +``` sql +dateTimeToSnowflake(value) +``` + +**Аргументы** + +- `value` — дата и время. [DateTime](../../sql-reference/data-types/datetime.md). + +**Возвращаемое значение** + +- Значение, преобразованное в [Int64](../data-types/int-uint.md), как первый идентификатор Snowflake ID в момент выполнения. + +**Пример** + +Запрос: + +``` sql +WITH toDateTime('2021-08-15 18:57:56', 'Asia/Shanghai') AS dt SELECT dateTimeToSnowflake(dt); +``` + +Результат: + +``` text +┌─dateTimeToSnowflake(dt)─┐ +│ 1426860702823350272 │ +└─────────────────────────┘ +``` + +## dateTime64ToSnowflake {#datetime64tosnowflake} + +Преобразует значение [DateTime64](../data-types/datetime64.md) в первый идентификатор [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) на текущий момент. + +**Синтаксис** + +``` sql +dateTime64ToSnowflake(value) +``` + +**Аргументы** + +- `value` — дата и время. [DateTime64](../data-types/datetime64.md). + +**Возвращаемое значение** + +- Значение, преобразованное в [Int64](../data-types/int-uint.md), как первый идентификатор Snowflake ID в момент выполнения. + + +**Пример** + +Запрос: + +``` sql +WITH toDateTime64('2021-08-15 18:57:56.492', 3, 'Asia/Shanghai') AS dt64 SELECT dateTime64ToSnowflake(dt64); +``` + +Результат: + +``` text +┌─dateTime64ToSnowflake(dt64)─┐ +│ 1426860704886947840 │ +└─────────────────────────────┘ +``` diff --git a/docs/ru/sql-reference/statements/create/table.md b/docs/ru/sql-reference/statements/create/table.md index 073cd4fa7c1..77c192b2b26 100644 --- a/docs/ru/sql-reference/statements/create/table.md +++ b/docs/ru/sql-reference/statements/create/table.md @@ -247,6 +247,7 @@ CREATE TABLE codec_example ) ENGINE = MergeTree() ``` + ## Временные таблицы {#temporary-tables} ClickHouse поддерживает временные таблицы со следующими характеристиками: diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 2b1b6185321..278101e2c1d 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -393,6 +393,7 @@ void LocalServer::processQueries() auto context = session.makeQueryContext(); context->makeSessionContext(); /// initial_create_query requires a session context to be set. context->setCurrentQueryId(""); + applyCmdSettings(context); /// Use the same query_id (and thread group) for all queries diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index bf0d33d9c5c..083f9a22d12 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -45,16 +45,14 @@ #include #include #include +#include +#include #include #include #include #include -#include -#include -#include -#include -#include #include +#include #include #include #include @@ -1131,6 +1129,10 @@ if (ThreadFuzzer::instance().isEffective()) global_context->setSystemZooKeeperLogAfterInitializationIfNeeded(); /// After the system database is created, attach virtual system tables (in addition to query_log and part_log) attachSystemTablesServer(*database_catalog.getSystemDatabase(), has_zookeeper); + /// Firstly remove partially dropped databases, to avoid race with MaterializedMySQLSyncThread, + /// that may execute DROP before loadMarkedAsDroppedTables() in background, + /// and so loadMarkedAsDroppedTables() will find it and try to add, and UUID will overlap. + database_catalog.loadMarkedAsDroppedTables(); /// Then, load remaining databases loadMetadata(global_context, default_database); database_catalog.loadDatabases(); diff --git a/programs/server/config.xml b/programs/server/config.xml index 510a5e230f8..32207e2b6b3 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -18,6 +18,7 @@ - information - debug - trace + - test (not for production usage) [1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114 --> @@ -964,6 +965,14 @@ 1000 + + + system + session_log
+ + toYYYYMM(event_date) + 7500 +
+ + + + + none + + + + + + + + + + + + ::1 + 127.0.0.1 + + session_log_test_xml_profile + default + + + diff --git a/tests/integration/test_executable_table_function/test.py b/tests/integration/test_executable_table_function/test.py index f194945fd16..2517072ea8f 100644 --- a/tests/integration/test_executable_table_function/test.py +++ b/tests/integration/test_executable_table_function/test.py @@ -68,3 +68,15 @@ def test_executable_storage_argument(started_cluster): node.query("CREATE TABLE test_table (value String) ENGINE=Executable('test_argument.sh 1', 'TabSeparated')") assert node.query("SELECT * FROM test_table") == 'Key 1\n' node.query("DROP TABLE test_table") + +def test_executable_pool_storage(started_cluster): + node.query("DROP TABLE IF EXISTS test_table") + node.query("CREATE TABLE test_table (value String) ENGINE=ExecutablePool('test_input_process_pool.sh', 'TabSeparated', (SELECT 1))") + assert node.query("SELECT * FROM test_table") == 'Key 1\n' + node.query("DROP TABLE test_table") + +def test_executable_pool_storage_multiple_pipes(started_cluster): + node.query("DROP TABLE IF EXISTS test_table") + node.query("CREATE TABLE test_table (value String) ENGINE=ExecutablePool('test_input_process_pool_multiple_pipes.sh', 'TabSeparated', (SELECT 1), (SELECT 2), (SELECT 3))") + assert node.query("SELECT * FROM test_table") == 'Key from 4 fd 3\nKey from 3 fd 2\nKey from 0 fd 1\n' + node.query("DROP TABLE test_table") diff --git a/tests/integration/test_executable_table_function/user_scripts/test_input_process_pool.sh b/tests/integration/test_executable_table_function/user_scripts/test_input_process_pool.sh new file mode 100755 index 00000000000..ed40a0d5291 --- /dev/null +++ b/tests/integration/test_executable_table_function/user_scripts/test_input_process_pool.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +while read read_data; do printf "1\n"; printf "Key $read_data\n"; done diff --git a/tests/integration/test_executable_table_function/user_scripts/test_input_process_pool_multiple_pipes.sh b/tests/integration/test_executable_table_function/user_scripts/test_input_process_pool_multiple_pipes.sh new file mode 100755 index 00000000000..4408ccae756 --- /dev/null +++ b/tests/integration/test_executable_table_function/user_scripts/test_input_process_pool_multiple_pipes.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +read -t 250 -u 4 read_data_from_4_fd; +read -t 250 -u 3 read_data_from_3_fd; +read -t 250 read_data_from_0_df; + +printf "3\n"; +printf "Key from 4 fd $read_data_from_4_fd\n"; +printf "Key from 3 fd $read_data_from_3_fd\n"; +printf "Key from 0 fd $read_data_from_0_df\n"; diff --git a/tests/integration/test_keeper_two_nodes_cluster/__init__.py b/tests/integration/test_keeper_two_nodes_cluster/__init__.py new file mode 100644 index 00000000000..e5a0d9b4834 --- /dev/null +++ b/tests/integration/test_keeper_two_nodes_cluster/__init__.py @@ -0,0 +1 @@ +#!/usr/bin/env python3 diff --git a/tests/integration/test_keeper_two_nodes_cluster/configs/enable_keeper1.xml b/tests/integration/test_keeper_two_nodes_cluster/configs/enable_keeper1.xml new file mode 100644 index 00000000000..21601ff4cc0 --- /dev/null +++ b/tests/integration/test_keeper_two_nodes_cluster/configs/enable_keeper1.xml @@ -0,0 +1,33 @@ + + + 9181 + 1 + /var/lib/clickhouse/coordination/log + /var/lib/clickhouse/coordination/snapshots + + + 5000 + 10000 + 75 + trace + + + + + 1 + node1 + 44444 + true + 3 + + + 2 + node2 + 44444 + true + true + 2 + + + + diff --git a/tests/integration/test_keeper_two_nodes_cluster/configs/enable_keeper2.xml b/tests/integration/test_keeper_two_nodes_cluster/configs/enable_keeper2.xml new file mode 100644 index 00000000000..baee6b578a0 --- /dev/null +++ b/tests/integration/test_keeper_two_nodes_cluster/configs/enable_keeper2.xml @@ -0,0 +1,33 @@ + + + 9181 + 2 + /var/lib/clickhouse/coordination/log + /var/lib/clickhouse/coordination/snapshots + + + 5000 + 10000 + 75 + trace + + + + + 1 + node1 + 44444 + true + 3 + + + 2 + node2 + 44444 + true + true + 2 + + + + diff --git a/tests/integration/test_keeper_two_nodes_cluster/configs/use_keeper.xml b/tests/integration/test_keeper_two_nodes_cluster/configs/use_keeper.xml new file mode 100644 index 00000000000..740b2afaab9 --- /dev/null +++ b/tests/integration/test_keeper_two_nodes_cluster/configs/use_keeper.xml @@ -0,0 +1,12 @@ + + + + node1 + 9181 + + + node2 + 9181 + + + diff --git a/tests/integration/test_keeper_two_nodes_cluster/test.py b/tests/integration/test_keeper_two_nodes_cluster/test.py new file mode 100644 index 00000000000..e6e3eb37af2 --- /dev/null +++ b/tests/integration/test_keeper_two_nodes_cluster/test.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python3 + +import pytest +from helpers.cluster import ClickHouseCluster +import random +import string +import os +import time +from multiprocessing.dummy import Pool +from helpers.network import PartitionManager +from helpers.test_tools import assert_eq_with_retry + +cluster = ClickHouseCluster(__file__) +node1 = cluster.add_instance('node1', main_configs=['configs/enable_keeper1.xml', 'configs/use_keeper.xml'], stay_alive=True) +node2 = cluster.add_instance('node2', main_configs=['configs/enable_keeper2.xml', 'configs/use_keeper.xml'], stay_alive=True) + +from kazoo.client import KazooClient, KazooState + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + + yield cluster + + finally: + cluster.shutdown() + +def smaller_exception(ex): + return '\n'.join(str(ex).split('\n')[0:2]) + +def wait_node(node): + for _ in range(100): + zk = None + try: + node.query("SELECT * FROM system.zookeeper WHERE path = '/'") + zk = get_fake_zk(node.name, timeout=30.0) + zk.create("/test", sequence=True) + print("node", node.name, "ready") + break + except Exception as ex: + time.sleep(0.2) + print("Waiting until", node.name, "will be ready, exception", ex) + finally: + if zk: + zk.stop() + zk.close() + else: + raise Exception("Can't wait node", node.name, "to become ready") + +def wait_nodes(): + for node in [node1, node2]: + wait_node(node) + + +def get_fake_zk(nodename, timeout=30.0): + _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout) + _fake_zk_instance.start() + return _fake_zk_instance + +def test_read_write_two_nodes(started_cluster): + try: + wait_nodes() + node1_zk = get_fake_zk("node1") + node2_zk = get_fake_zk("node2") + + node1_zk.create("/test_read_write_multinode_node1", b"somedata1") + node2_zk.create("/test_read_write_multinode_node2", b"somedata2") + + # stale reads are allowed + while node1_zk.exists("/test_read_write_multinode_node2") is None: + time.sleep(0.1) + + # stale reads are allowed + while node2_zk.exists("/test_read_write_multinode_node1") is None: + time.sleep(0.1) + + assert node2_zk.get("/test_read_write_multinode_node1")[0] == b"somedata1" + assert node1_zk.get("/test_read_write_multinode_node1")[0] == b"somedata1" + + assert node2_zk.get("/test_read_write_multinode_node2")[0] == b"somedata2" + assert node1_zk.get("/test_read_write_multinode_node2")[0] == b"somedata2" + + finally: + try: + for zk_conn in [node1_zk, node2_zk, node3_zk]: + zk_conn.stop() + zk_conn.close() + except: + pass + +def test_read_write_two_nodes_with_blocade(started_cluster): + try: + wait_nodes() + node1_zk = get_fake_zk("node1", timeout=5.0) + node2_zk = get_fake_zk("node2", timeout=5.0) + + print("Blocking nodes") + with PartitionManager() as pm: + pm.partition_instances(node2, node1) + + # We will respond conection loss but process this query + # after blocade will be removed + with pytest.raises(Exception): + node1_zk.create("/test_read_write_blocked_node1", b"somedata1") + + # This node is not leader and will not process anything + with pytest.raises(Exception): + node2_zk.create("/test_read_write_blocked_node2", b"somedata2") + + + print("Nodes unblocked") + for i in range(10): + try: + node1_zk = get_fake_zk("node1") + node2_zk = get_fake_zk("node2") + break + except: + time.sleep(0.5) + + + for i in range(100): + try: + node1_zk.create("/test_after_block1", b"somedata12") + break + except: + time.sleep(0.1) + else: + raise Exception("node1 cannot recover after blockade") + + print("Node1 created it's value") + + for i in range(100): + try: + node2_zk.create("/test_after_block2", b"somedata12") + break + except: + time.sleep(0.1) + else: + raise Exception("node2 cannot recover after blockade") + + print("Node2 created it's value") + + # stale reads are allowed + while node1_zk.exists("/test_after_block2") is None: + time.sleep(0.1) + + # stale reads are allowed + while node2_zk.exists("/test_after_block1") is None: + time.sleep(0.1) + + assert node1_zk.exists("/test_after_block1") is not None + assert node1_zk.exists("/test_after_block2") is not None + assert node2_zk.exists("/test_after_block1") is not None + assert node2_zk.exists("/test_after_block2") is not None + + finally: + try: + for zk_conn in [node1_zk, node2_zk, node3_zk]: + zk_conn.stop() + zk_conn.close() + except: + pass diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index 68b42d91fb6..1dd096087ff 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -31,18 +31,33 @@ postgres_table_template_3 = """ key1 Integer NOT NULL, value1 Integer, key2 Integer NOT NULL, value2 Integer NOT NULL) """ -def get_postgres_conn(ip, port, database=False, auto_commit=True, database_name='postgres_database'): +def get_postgres_conn(ip, port, database=False, auto_commit=True, database_name='postgres_database', replication=False): if database == True: conn_string = "host={} port={} dbname='{}' user='postgres' password='mysecretpassword'".format(ip, port, database_name) else: conn_string = "host={} port={} user='postgres' password='mysecretpassword'".format(ip, port) + if replication: + conn_string += " replication='database'" + conn = psycopg2.connect(conn_string) if auto_commit: conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) conn.autocommit = True return conn +def create_replication_slot(conn, slot_name='user_slot'): + cursor = conn.cursor() + cursor.execute('CREATE_REPLICATION_SLOT {} LOGICAL pgoutput EXPORT_SNAPSHOT'.format(slot_name)) + result = cursor.fetchall() + print(result[0][0]) # slot name + print(result[0][1]) # start lsn + print(result[0][2]) # snapshot + return result[0][2] + +def drop_replication_slot(conn, slot_name='user_slot'): + cursor = conn.cursor() + cursor.execute("select pg_drop_replication_slot('{}')".format(slot_name)) def create_postgres_db(cursor, name='postgres_database'): cursor.execute("CREATE DATABASE {}".format(name)) @@ -941,6 +956,34 @@ def test_quoting(started_cluster): drop_materialized_db() +def test_user_managed_slots(started_cluster): + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) + cursor = conn.cursor() + table_name = 'test_table' + create_postgres_table(cursor, table_name); + instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(10000)".format(table_name)) + + slot_name = 'user_slot' + replication_connection = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, + database=True, replication=True, auto_commit=True) + snapshot = create_replication_slot(replication_connection, slot_name=slot_name) + create_materialized_db(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + settings=["materialized_postgresql_replication_slot = '{}'".format(slot_name), + "materialized_postgresql_snapshot = '{}'".format(snapshot)]) + check_tables_are_synchronized(table_name); + instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(10000, 10000)".format(table_name)) + check_tables_are_synchronized(table_name); + instance.restart_clickhouse() + instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(20000, 10000)".format(table_name)) + check_tables_are_synchronized(table_name); + drop_postgres_table(cursor, table_name) + drop_materialized_db() + drop_replication_slot(replication_connection, slot_name) + + if __name__ == '__main__': cluster.start() input("Cluster created, press any key to destroy...") diff --git a/tests/integration/test_storage_mysql/test.py b/tests/integration/test_storage_mysql/test.py index a044528cacf..c7ede8dede4 100644 --- a/tests/integration/test_storage_mysql/test.py +++ b/tests/integration/test_storage_mysql/test.py @@ -3,7 +3,10 @@ from contextlib import contextmanager ## sudo -H pip install PyMySQL import pymysql.cursors import pytest +import time +import threading from helpers.cluster import ClickHouseCluster +from helpers.client import QueryRuntimeException cluster = ClickHouseCluster(__file__) @@ -319,6 +322,51 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL conn.close() +# Check that limited connection_wait_timeout (via connection_pool_size=1) will throw. +def test_settings_connection_wait_timeout(started_cluster): + table_name = 'test_settings_connection_wait_timeout' + node1.query(f'DROP TABLE IF EXISTS {table_name}') + wait_timeout = 2 + + conn = get_mysql_conn(started_cluster, cluster.mysql_ip) + drop_mysql_table(conn, table_name) + create_mysql_table(conn, table_name) + + node1.query(''' + CREATE TABLE {} + ( + id UInt32, + name String, + age UInt32, + money UInt32 + ) + ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse') + SETTINGS connection_wait_timeout={}, connection_pool_size=1 + '''.format(table_name, table_name, wait_timeout) + ) + + node1.query("INSERT INTO {} (id, name) SELECT number, concat('name_', toString(number)) from numbers(10) ".format(table_name)) + + def worker(): + node1.query("SELECT sleepEachRow(1) FROM {}".format(table_name)) + + worker_thread = threading.Thread(target=worker) + worker_thread.start() + + # ensure that first query started in worker_thread + time.sleep(1) + + started = time.time() + with pytest.raises(QueryRuntimeException, match=r"Exception: mysqlxx::Pool is full \(connection_wait_timeout is exceeded\)"): + node1.query("SELECT sleepEachRow(1) FROM {}".format(table_name)) + ended = time.time() + assert (ended - started) >= wait_timeout + + worker_thread.join() + + drop_mysql_table(conn, table_name) + conn.close() + if __name__ == '__main__': with contextmanager(started_cluster)() as cluster: for name, instance in list(cluster.instances.items()): diff --git a/tests/queries/0_stateless/01033_quota_dcl.reference b/tests/queries/0_stateless/01033_quota_dcl.reference index 7478adac441..e732ea2fcd6 100644 --- a/tests/queries/0_stateless/01033_quota_dcl.reference +++ b/tests/queries/0_stateless/01033_quota_dcl.reference @@ -1 +1 @@ -CREATE QUOTA default KEYED BY user_name FOR INTERVAL 1 hour TRACKING ONLY TO default, readonly +CREATE QUOTA default KEYED BY user_name FOR INTERVAL 1 hour TRACKING ONLY TO default, readonly, session_log_test_xml_user diff --git a/tests/queries/0_stateless/01480_binary_operator_monotonicity.reference b/tests/queries/0_stateless/01480_binary_operator_monotonicity.reference index 405d3348775..cd7edf71738 100644 --- a/tests/queries/0_stateless/01480_binary_operator_monotonicity.reference +++ b/tests/queries/0_stateless/01480_binary_operator_monotonicity.reference @@ -6,3 +6,4 @@ 0 0 0 +40 4 diff --git a/tests/queries/0_stateless/01480_binary_operator_monotonicity.sql b/tests/queries/0_stateless/01480_binary_operator_monotonicity.sql index 61313de4669..b49e2aa4da5 100644 --- a/tests/queries/0_stateless/01480_binary_operator_monotonicity.sql +++ b/tests/queries/0_stateless/01480_binary_operator_monotonicity.sql @@ -43,3 +43,13 @@ DROP TABLE IF EXISTS binary_op_mono5; DROP TABLE IF EXISTS binary_op_mono6; DROP TABLE IF EXISTS binary_op_mono7; DROP TABLE IF EXISTS binary_op_mono8; + +drop table if exists x; +create table x (i int, j int) engine MergeTree order by i / 10 settings index_granularity = 1; + +insert into x values (10, 1), (20, 2), (30, 3), (40, 4); + +set max_rows_to_read = 3; +select * from x where i > 30; -- converted to i / 10 >= 3, thus needs to read 3 granules. + +drop table x; diff --git a/tests/queries/0_stateless/01710_minmax_count_projection.reference b/tests/queries/0_stateless/01710_minmax_count_projection.reference index 882d808069e..ad9b87b998d 100644 --- a/tests/queries/0_stateless/01710_minmax_count_projection.reference +++ b/tests/queries/0_stateless/01710_minmax_count_projection.reference @@ -2,3 +2,4 @@ 0 9998 5000 1 9999 5000 0 9998 5000 +1 diff --git a/tests/queries/0_stateless/01710_minmax_count_projection.sql b/tests/queries/0_stateless/01710_minmax_count_projection.sql index 3ee19fe8c2e..58af11f01f7 100644 --- a/tests/queries/0_stateless/01710_minmax_count_projection.sql +++ b/tests/queries/0_stateless/01710_minmax_count_projection.sql @@ -11,4 +11,7 @@ select min(i), max(i), count() from d group by _partition_id order by _partition select min(i), max(i), count() from d where _partition_value.1 = 0 group by _partition_id order by _partition_id; select min(i), max(i), count() from d where _partition_value.1 = 10 group by _partition_id order by _partition_id; +-- fuzz crash +select min(i) from d where 1 = _partition_value.1; + drop table d; diff --git a/tests/queries/0_stateless/01747_system_session_log_long.reference b/tests/queries/0_stateless/01747_system_session_log_long.reference new file mode 100644 index 00000000000..9ecf7e05421 --- /dev/null +++ b/tests/queries/0_stateless/01747_system_session_log_long.reference @@ -0,0 +1,218 @@ + +# no_password - User with profile from XML +TCP endpoint +TCP 'wrong password' case is skipped for no_password. +HTTP endpoint +HTTP 'wrong password' case is skipped for no_password. +MySQL endpoint +MySQL 'wrong password' case is skipped for no_password. + +# no_password - No profiles no roles +TCP endpoint +TCP 'wrong password' case is skipped for no_password. +HTTP endpoint +HTTP 'wrong password' case is skipped for no_password. +MySQL endpoint +MySQL 'wrong password' case is skipped for no_password. + +# no_password - Two profiles, no roles +TCP endpoint +TCP 'wrong password' case is skipped for no_password. +HTTP endpoint +HTTP 'wrong password' case is skipped for no_password. +MySQL endpoint +MySQL 'wrong password' case is skipped for no_password. + +# no_password - Two profiles and two simple roles +TCP endpoint +TCP 'wrong password' case is skipped for no_password. +HTTP endpoint +HTTP 'wrong password' case is skipped for no_password. +MySQL endpoint +MySQL 'wrong password' case is skipped for no_password. + +# plaintext_password - No profiles no roles +TCP endpoint +HTTP endpoint +MySQL endpoint + +# plaintext_password - Two profiles, no roles +TCP endpoint +HTTP endpoint +MySQL endpoint + +# plaintext_password - Two profiles and two simple roles +TCP endpoint +HTTP endpoint +MySQL endpoint + +# sha256_password - No profiles no roles +TCP endpoint +HTTP endpoint +MySQL endpoint +MySQL 'successful login' case is skipped for sha256_password. + +# sha256_password - Two profiles, no roles +TCP endpoint +HTTP endpoint +MySQL endpoint +MySQL 'successful login' case is skipped for sha256_password. + +# sha256_password - Two profiles and two simple roles +TCP endpoint +HTTP endpoint +MySQL endpoint +MySQL 'successful login' case is skipped for sha256_password. + +# double_sha1_password - No profiles no roles +TCP endpoint +HTTP endpoint +MySQL endpoint + +# double_sha1_password - Two profiles, no roles +TCP endpoint +HTTP endpoint +MySQL endpoint + +# double_sha1_password - Two profiles and two simple roles +TCP endpoint +HTTP endpoint +MySQL endpoint +${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles TCP LoginFailure 1 +${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles TCP LoginSuccess 1 +${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles TCP Logout 1 +${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles HTTP LoginFailure 1 +${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles HTTP LoginSuccess 1 +${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles HTTP Logout 1 +${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles MySQL LoginFailure many +${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles MySQL LoginSuccess 1 +${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles MySQL Logout 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles TCP LoginFailure 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles TCP LoginSuccess 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles TCP Logout 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles HTTP LoginFailure 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles HTTP LoginSuccess 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles HTTP Logout 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles MySQL LoginFailure many +${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles MySQL LoginSuccess 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles MySQL Logout 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles TCP LoginFailure 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles TCP LoginSuccess 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles TCP Logout 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles HTTP LoginFailure 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles HTTP LoginSuccess 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles HTTP Logout 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles MySQL LoginFailure many +${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles MySQL LoginSuccess 1 +${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles MySQL Logout 1 +${BASE_USERNAME}_no_password_no_profiles_no_roles TCP LoginSuccess 1 +${BASE_USERNAME}_no_password_no_profiles_no_roles TCP Logout 1 +${BASE_USERNAME}_no_password_no_profiles_no_roles HTTP LoginSuccess 1 +${BASE_USERNAME}_no_password_no_profiles_no_roles HTTP Logout 1 +${BASE_USERNAME}_no_password_no_profiles_no_roles MySQL LoginSuccess 1 +${BASE_USERNAME}_no_password_no_profiles_no_roles MySQL Logout 1 +${BASE_USERNAME}_no_password_two_profiles_no_roles TCP LoginSuccess 1 +${BASE_USERNAME}_no_password_two_profiles_no_roles TCP Logout 1 +${BASE_USERNAME}_no_password_two_profiles_no_roles HTTP LoginSuccess 1 +${BASE_USERNAME}_no_password_two_profiles_no_roles HTTP Logout 1 +${BASE_USERNAME}_no_password_two_profiles_no_roles MySQL LoginSuccess 1 +${BASE_USERNAME}_no_password_two_profiles_no_roles MySQL Logout 1 +${BASE_USERNAME}_no_password_two_profiles_two_roles TCP LoginSuccess 1 +${BASE_USERNAME}_no_password_two_profiles_two_roles TCP Logout 1 +${BASE_USERNAME}_no_password_two_profiles_two_roles HTTP LoginSuccess 1 +${BASE_USERNAME}_no_password_two_profiles_two_roles HTTP Logout 1 +${BASE_USERNAME}_no_password_two_profiles_two_roles MySQL LoginSuccess 1 +${BASE_USERNAME}_no_password_two_profiles_two_roles MySQL Logout 1 +${BASE_USERNAME}_plaintext_password_no_profiles_no_roles TCP LoginFailure 1 +${BASE_USERNAME}_plaintext_password_no_profiles_no_roles TCP LoginSuccess 1 +${BASE_USERNAME}_plaintext_password_no_profiles_no_roles TCP Logout 1 +${BASE_USERNAME}_plaintext_password_no_profiles_no_roles HTTP LoginFailure 1 +${BASE_USERNAME}_plaintext_password_no_profiles_no_roles HTTP LoginSuccess 1 +${BASE_USERNAME}_plaintext_password_no_profiles_no_roles HTTP Logout 1 +${BASE_USERNAME}_plaintext_password_no_profiles_no_roles MySQL LoginFailure many +${BASE_USERNAME}_plaintext_password_no_profiles_no_roles MySQL LoginSuccess 1 +${BASE_USERNAME}_plaintext_password_no_profiles_no_roles MySQL Logout 1 +${BASE_USERNAME}_plaintext_password_two_profiles_no_roles TCP LoginFailure 1 +${BASE_USERNAME}_plaintext_password_two_profiles_no_roles TCP LoginSuccess 1 +${BASE_USERNAME}_plaintext_password_two_profiles_no_roles TCP Logout 1 +${BASE_USERNAME}_plaintext_password_two_profiles_no_roles HTTP LoginFailure 1 +${BASE_USERNAME}_plaintext_password_two_profiles_no_roles HTTP LoginSuccess 1 +${BASE_USERNAME}_plaintext_password_two_profiles_no_roles HTTP Logout 1 +${BASE_USERNAME}_plaintext_password_two_profiles_no_roles MySQL LoginFailure many +${BASE_USERNAME}_plaintext_password_two_profiles_no_roles MySQL LoginSuccess 1 +${BASE_USERNAME}_plaintext_password_two_profiles_no_roles MySQL Logout 1 +${BASE_USERNAME}_plaintext_password_two_profiles_two_roles TCP LoginFailure 1 +${BASE_USERNAME}_plaintext_password_two_profiles_two_roles TCP LoginSuccess 1 +${BASE_USERNAME}_plaintext_password_two_profiles_two_roles TCP Logout 1 +${BASE_USERNAME}_plaintext_password_two_profiles_two_roles HTTP LoginFailure 1 +${BASE_USERNAME}_plaintext_password_two_profiles_two_roles HTTP LoginSuccess 1 +${BASE_USERNAME}_plaintext_password_two_profiles_two_roles HTTP Logout 1 +${BASE_USERNAME}_plaintext_password_two_profiles_two_roles MySQL LoginFailure many +${BASE_USERNAME}_plaintext_password_two_profiles_two_roles MySQL LoginSuccess 1 +${BASE_USERNAME}_plaintext_password_two_profiles_two_roles MySQL Logout 1 +${BASE_USERNAME}_sha256_password_no_profiles_no_roles TCP LoginFailure 1 +${BASE_USERNAME}_sha256_password_no_profiles_no_roles TCP LoginSuccess 1 +${BASE_USERNAME}_sha256_password_no_profiles_no_roles TCP Logout 1 +${BASE_USERNAME}_sha256_password_no_profiles_no_roles HTTP LoginFailure 1 +${BASE_USERNAME}_sha256_password_no_profiles_no_roles HTTP LoginSuccess 1 +${BASE_USERNAME}_sha256_password_no_profiles_no_roles HTTP Logout 1 +${BASE_USERNAME}_sha256_password_no_profiles_no_roles MySQL LoginFailure many +${BASE_USERNAME}_sha256_password_two_profiles_no_roles TCP LoginFailure 1 +${BASE_USERNAME}_sha256_password_two_profiles_no_roles TCP LoginSuccess 1 +${BASE_USERNAME}_sha256_password_two_profiles_no_roles TCP Logout 1 +${BASE_USERNAME}_sha256_password_two_profiles_no_roles HTTP LoginFailure 1 +${BASE_USERNAME}_sha256_password_two_profiles_no_roles HTTP LoginSuccess 1 +${BASE_USERNAME}_sha256_password_two_profiles_no_roles HTTP Logout 1 +${BASE_USERNAME}_sha256_password_two_profiles_no_roles MySQL LoginFailure many +${BASE_USERNAME}_sha256_password_two_profiles_two_roles TCP LoginFailure 1 +${BASE_USERNAME}_sha256_password_two_profiles_two_roles TCP LoginSuccess 1 +${BASE_USERNAME}_sha256_password_two_profiles_two_roles TCP Logout 1 +${BASE_USERNAME}_sha256_password_two_profiles_two_roles HTTP LoginFailure 1 +${BASE_USERNAME}_sha256_password_two_profiles_two_roles HTTP LoginSuccess 1 +${BASE_USERNAME}_sha256_password_two_profiles_two_roles HTTP Logout 1 +${BASE_USERNAME}_sha256_password_two_profiles_two_roles MySQL LoginFailure many +invalid_${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles TCP LoginFailure 1 +invalid_${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles HTTP LoginFailure 1 +invalid_${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles MySQL LoginFailure many +invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles TCP LoginFailure 1 +invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles HTTP LoginFailure 1 +invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles MySQL LoginFailure many +invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles TCP LoginFailure 1 +invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles HTTP LoginFailure 1 +invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles MySQL LoginFailure many +invalid_${BASE_USERNAME}_no_password_no_profiles_no_roles TCP LoginFailure 1 +invalid_${BASE_USERNAME}_no_password_no_profiles_no_roles HTTP LoginFailure 1 +invalid_${BASE_USERNAME}_no_password_no_profiles_no_roles MySQL LoginFailure many +invalid_${BASE_USERNAME}_no_password_two_profiles_no_roles TCP LoginFailure 1 +invalid_${BASE_USERNAME}_no_password_two_profiles_no_roles HTTP LoginFailure 1 +invalid_${BASE_USERNAME}_no_password_two_profiles_no_roles MySQL LoginFailure many +invalid_${BASE_USERNAME}_no_password_two_profiles_two_roles TCP LoginFailure 1 +invalid_${BASE_USERNAME}_no_password_two_profiles_two_roles HTTP LoginFailure 1 +invalid_${BASE_USERNAME}_no_password_two_profiles_two_roles MySQL LoginFailure many +invalid_${BASE_USERNAME}_plaintext_password_no_profiles_no_roles TCP LoginFailure 1 +invalid_${BASE_USERNAME}_plaintext_password_no_profiles_no_roles HTTP LoginFailure 1 +invalid_${BASE_USERNAME}_plaintext_password_no_profiles_no_roles MySQL LoginFailure many +invalid_${BASE_USERNAME}_plaintext_password_two_profiles_no_roles TCP LoginFailure 1 +invalid_${BASE_USERNAME}_plaintext_password_two_profiles_no_roles HTTP LoginFailure 1 +invalid_${BASE_USERNAME}_plaintext_password_two_profiles_no_roles MySQL LoginFailure many +invalid_${BASE_USERNAME}_plaintext_password_two_profiles_two_roles TCP LoginFailure 1 +invalid_${BASE_USERNAME}_plaintext_password_two_profiles_two_roles HTTP LoginFailure 1 +invalid_${BASE_USERNAME}_plaintext_password_two_profiles_two_roles MySQL LoginFailure many +invalid_${BASE_USERNAME}_sha256_password_no_profiles_no_roles TCP LoginFailure 1 +invalid_${BASE_USERNAME}_sha256_password_no_profiles_no_roles HTTP LoginFailure 1 +invalid_${BASE_USERNAME}_sha256_password_no_profiles_no_roles MySQL LoginFailure many +invalid_${BASE_USERNAME}_sha256_password_two_profiles_no_roles TCP LoginFailure 1 +invalid_${BASE_USERNAME}_sha256_password_two_profiles_no_roles HTTP LoginFailure 1 +invalid_${BASE_USERNAME}_sha256_password_two_profiles_no_roles MySQL LoginFailure many +invalid_${BASE_USERNAME}_sha256_password_two_profiles_two_roles TCP LoginFailure 1 +invalid_${BASE_USERNAME}_sha256_password_two_profiles_two_roles HTTP LoginFailure 1 +invalid_${BASE_USERNAME}_sha256_password_two_profiles_two_roles MySQL LoginFailure many +invalid_session_log_test_xml_user TCP LoginFailure 1 +invalid_session_log_test_xml_user HTTP LoginFailure 1 +invalid_session_log_test_xml_user MySQL LoginFailure many +session_log_test_xml_user TCP LoginSuccess 1 +session_log_test_xml_user TCP Logout 1 +session_log_test_xml_user HTTP LoginSuccess 1 +session_log_test_xml_user HTTP Logout 1 +session_log_test_xml_user MySQL LoginSuccess 1 +session_log_test_xml_user MySQL Logout 1 diff --git a/tests/queries/0_stateless/01747_system_session_log_long.sh b/tests/queries/0_stateless/01747_system_session_log_long.sh new file mode 100755 index 00000000000..16b32a08442 --- /dev/null +++ b/tests/queries/0_stateless/01747_system_session_log_long.sh @@ -0,0 +1,370 @@ +#!/usr/bin/env bash + +################################################################################################## +# Verify that login, logout, and login failure events are properly stored in system.session_log +# when different `IDENTIFIED BY` clauses are used on user. +# +# Make sure that system.session_log entries are non-empty and provide enough info on each event. +# +# Using multiple protocols +# * native TCP protocol with CH client +# * HTTP with CURL +# * MySQL - CH server accesses itself via mysql table function, query typically fails (unrelated) +# but auth should be performed properly. +# * PostgreSQL - CH server accesses itself via postgresql table function (currently out of order). +# * gRPC - not done yet +# +# There is way to control how many time a query (e.g. via mysql table function) is retried +# and hence variable number of records in session_log. To mitigate this and simplify final query, +# each auth_type is tested for separate user. That way SELECT DISTINCT doesn't exclude log entries +# from different cases. +# +# All created users added to the ALL_USERNAMES and later cleaned up. +################################################################################################## + +# To minimize amount of error context sent on failed queries when talking to CH via MySQL protocol. +export CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=none + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +set -eu + +# Since there is no way to cleanup system.session_log table, +# make sure that we can identify log entries from this test by a random user name. +readonly BASE_USERNAME="session_log_test_user_$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 32)" +readonly TMP_QUERY_FILE=$(mktemp /tmp/tmp_query.log.XXXXXX) +declare -a ALL_USERNAMES +ALL_USERNAMES+=("${BASE_USERNAME}") + +function reportError() +{ + if [ -s "${TMP_QUERY_FILE}" ] ; + then + echo "!!!!!! ERROR ${CLICKHOUSE_CLIENT} ${*} --queries-file ${TMP_QUERY_FILE}" >&2 + echo "query:" >&2 + cat "${TMP_QUERY_FILE}" >&2 + rm -f "${TMP_QUERY_FILE}" + fi +} + +function executeQuery() +{ + ## Execute query (provided via heredoc or herestring) and print query in case of error. + trap 'rm -f ${TMP_QUERY_FILE}; trap - ERR RETURN' RETURN + # Since we want to report with current values supplied to this function call + # shellcheck disable=SC2064 + trap "reportError $*" ERR + + cat - > "${TMP_QUERY_FILE}" + ${CLICKHOUSE_CLIENT} "${@}" --queries-file "${TMP_QUERY_FILE}" +} + +function cleanup() +{ + local usernames_to_cleanup + usernames_to_cleanup="$(IFS=, ; echo "${ALL_USERNAMES[*]}")" + executeQuery < "${TMP_QUERY_FILE}" + ! ${CLICKHOUSE_CLIENT} "${@}" --multiquery --queries-file "${TMP_QUERY_FILE}" 2>&1 | tee -a ${TMP_QUERY_FILE} +} + +function createUser() +{ + local auth_type="${1}" + local username="${2}" + local password="${3}" + + if [[ "${auth_type}" == "no_password" ]] + then + password="" + + elif [[ "${auth_type}" == "plaintext_password" ]] + then + password="${password}" + + elif [[ "${auth_type}" == "sha256_password" ]] + then + password="$(executeQuery <<< "SELECT hex(SHA256('${password}'))")" + + elif [[ "${auth_type}" == "double_sha1_password" ]] + then + password="$(executeQuery <<< "SELECT hex(SHA1(SHA1('${password}')))")" + + else + echo "Invalid auth_type: ${auth_type}" >&2 + exit 1 + fi + + export RESULTING_PASS="${password}" + if [ -n "${password}" ] + then + password="BY '${password}'" + fi + + executeQuery < 1, 'many', toString(count(*))) -- do not rely on count value since MySQL does arbitrary number of retries +FROM + system.session_log +WHERE + (user LIKE '%session_log_test_xml_user%' OR user LIKE '%${BASE_USERNAME}%') + AND + event_time_microseconds >= test_start_time +GROUP BY + user_name, interface, type +ORDER BY + user_name, interface, type; +EOF \ No newline at end of file diff --git a/tests/queries/0_stateless/01891_not_in_partition_prune.reference b/tests/queries/0_stateless/01891_not_in_partition_prune.reference index 628053cd4f8..9d2517ad760 100644 --- a/tests/queries/0_stateless/01891_not_in_partition_prune.reference +++ b/tests/queries/0_stateless/01891_not_in_partition_prune.reference @@ -4,3 +4,5 @@ 7 107 8 108 9 109 +1970-01-01 1 one +1970-01-01 3 three diff --git a/tests/queries/0_stateless/01891_not_in_partition_prune.sql b/tests/queries/0_stateless/01891_not_in_partition_prune.sql index edbfad93e5d..5bf90fdd65c 100644 --- a/tests/queries/0_stateless/01891_not_in_partition_prune.sql +++ b/tests/queries/0_stateless/01891_not_in_partition_prune.sql @@ -8,3 +8,18 @@ set max_rows_to_read = 5; select * from test1 where i not in (1,2,3,4,5) order by i; drop table test1; + +drop table if exists t1; +drop table if exists t2; + +create table t1 (date Date, a Float64, b String) Engine=MergeTree ORDER BY date; +create table t2 (date Date, a Float64, b String) Engine=MergeTree ORDER BY date; + +insert into t1(a, b) values (1, 'one'), (2, 'two'); +insert into t2(a, b) values (2, 'two'), (3, 'three'); + +select date, a, b from t1 where (date, a, b) NOT IN (select date,a,b from t2); +select date, a, b from t2 where (date, a, b) NOT IN (select date,a,b from t1); + +drop table t1; +drop table t2; diff --git a/tests/queries/0_stateless/02015_shard_crash_clang_12_build.reference b/tests/queries/0_stateless/02015_shard_crash_clang_12_build.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/02015_shard_crash_clang_12_build.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/02015_shard_crash_clang_12_build.sh b/tests/queries/0_stateless/02015_shard_crash_clang_12_build.sh new file mode 100755 index 00000000000..f6ede6592ff --- /dev/null +++ b/tests/queries/0_stateless/02015_shard_crash_clang_12_build.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash + +# This test reproduces crash in case of insufficient coroutines stack size + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS local" +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS distributed" + +$CLICKHOUSE_CLIENT --query "CREATE TABLE local (x UInt8) ENGINE = Memory;" +$CLICKHOUSE_CLIENT --query "CREATE TABLE distributed AS local ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), local, x);" + +$CLICKHOUSE_CLIENT --insert_distributed_sync=0 --network_compression_method='zstd' --query "INSERT INTO distributed SELECT number FROM numbers(256);" +$CLICKHOUSE_CLIENT --insert_distributed_sync=0 --network_compression_method='zstd' --query "SYSTEM FLUSH DISTRIBUTED distributed;" + +function select_thread() +{ + while true; do + $CLICKHOUSE_CLIENT --insert_distributed_sync=0 --network_compression_method='zstd' --query "SELECT count() FROM local" >/dev/null + $CLICKHOUSE_CLIENT --insert_distributed_sync=0 --network_compression_method='zstd' --query "SELECT count() FROM distributed" >/dev/null + done +} + +export -f select_thread; + +TIMEOUT=30 + +timeout $TIMEOUT bash -c select_thread 2> /dev/null & +timeout $TIMEOUT bash -c select_thread 2> /dev/null & +timeout $TIMEOUT bash -c select_thread 2> /dev/null & +timeout $TIMEOUT bash -c select_thread 2> /dev/null & +timeout $TIMEOUT bash -c select_thread 2> /dev/null & +timeout $TIMEOUT bash -c select_thread 2> /dev/null & +timeout $TIMEOUT bash -c select_thread 2> /dev/null & +timeout $TIMEOUT bash -c select_thread 2> /dev/null & +timeout $TIMEOUT bash -c select_thread 2> /dev/null & +timeout $TIMEOUT bash -c select_thread 2> /dev/null & + +wait + +$CLICKHOUSE_CLIENT --query "SELECT 1" + +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS local" +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS distributed" diff --git a/tests/queries/0_stateless/02015_system_views.reference b/tests/queries/0_stateless/02015_system_views.reference deleted file mode 100644 index a1b1b2a9fd3..00000000000 --- a/tests/queries/0_stateless/02015_system_views.reference +++ /dev/null @@ -1 +0,0 @@ -02015_db materialized_view 02015_db view_source_tb Materialized diff --git a/tests/queries/0_stateless/02015_system_views.sql b/tests/queries/0_stateless/02015_system_views.sql deleted file mode 100644 index a6375dcb591..00000000000 --- a/tests/queries/0_stateless/02015_system_views.sql +++ /dev/null @@ -1,14 +0,0 @@ -DROP DATABASE IF EXISTS 02015_db; -CREATE DATABASE IF NOT EXISTS 02015_db; - -DROP TABLE IF EXISTS 02015_db.view_source_tb; -CREATE TABLE IF NOT EXISTS 02015_db.view_source_tb (a UInt8, s String) ENGINE = MergeTree() ORDER BY a; - -DROP TABLE IF EXISTS 02015_db.materialized_view; -CREATE MATERIALIZED VIEW IF NOT EXISTS 02015_db.materialized_view ENGINE = ReplacingMergeTree() ORDER BY a AS SELECT * FROM 02015_db.view_source_tb; - -SELECT * FROM system.views WHERE database='02015_db' and name = 'materialized_view'; - -DROP TABLE IF EXISTS 02015_db.materialized_view; -DROP TABLE IF EXISTS 02015_db.view_source_tb; -DROP DATABASE IF EXISTS 02015_db; diff --git a/tests/queries/0_stateless/02017_columns_with_dot.reference b/tests/queries/0_stateless/02017_columns_with_dot.reference new file mode 100644 index 00000000000..5922e56fb56 --- /dev/null +++ b/tests/queries/0_stateless/02017_columns_with_dot.reference @@ -0,0 +1,3 @@ +1 [0,0] 2 [1,1,3] +1 [0,0] 2 [1,1,3] +1 [0,0] 2 [1,1,3] diff --git a/tests/queries/0_stateless/02017_columns_with_dot.sql b/tests/queries/0_stateless/02017_columns_with_dot.sql new file mode 100644 index 00000000000..ae901214d75 --- /dev/null +++ b/tests/queries/0_stateless/02017_columns_with_dot.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS t_with_dots; +CREATE TABLE t_with_dots (id UInt32, arr Array(UInt32), `b.id` UInt32, `b.arr` Array(UInt32)) ENGINE = Log; + +INSERT INTO t_with_dots VALUES (1, [0, 0], 2, [1, 1, 3]); +SELECT * FROM t_with_dots; + +DROP TABLE t_with_dots; + +CREATE TABLE t_with_dots (id UInt32, arr Array(UInt32), `b.id` UInt32, `b.arr` Array(UInt32)) +ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_with_dots VALUES (1, [0, 0], 2, [1, 1, 3]); +SELECT * FROM t_with_dots; + +DROP TABLE t_with_dots; + +CREATE TABLE t_with_dots (id UInt32, arr Array(UInt32), `b.id` UInt32, `b.arr` Array(UInt32)) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO t_with_dots VALUES (1, [0, 0], 2, [1, 1, 3]); +SELECT * FROM t_with_dots; + +DROP TABLE t_with_dots; diff --git a/tests/queries/0_stateless/02017_create_distributed_table_coredump.reference b/tests/queries/0_stateless/02017_create_distributed_table_coredump.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02017_create_distributed_table_coredump.sql b/tests/queries/0_stateless/02017_create_distributed_table_coredump.sql new file mode 100644 index 00000000000..27c98c3e237 --- /dev/null +++ b/tests/queries/0_stateless/02017_create_distributed_table_coredump.sql @@ -0,0 +1,12 @@ +drop table if exists t; +drop table if exists td1; +drop table if exists td2; +drop table if exists td3; +create table t (val UInt32) engine = MergeTree order by val; +create table td1 engine = Distributed(test_shard_localhost, currentDatabase(), 't') as t; +create table td2 engine = Distributed(test_shard_localhost, currentDatabase(), 't', xxHash32(val), default) as t; +create table td3 engine = Distributed(test_shard_localhost, currentDatabase(), 't', xxHash32(val), 'default') as t; +drop table if exists t; +drop table if exists td1; +drop table if exists td2; +drop table if exists td3; diff --git a/tests/queries/0_stateless/02023_transform_or_to_in.reference b/tests/queries/0_stateless/02023_transform_or_to_in.reference new file mode 100644 index 00000000000..aa47d0d46d4 --- /dev/null +++ b/tests/queries/0_stateless/02023_transform_or_to_in.reference @@ -0,0 +1,2 @@ +0 +0 diff --git a/tests/queries/0_stateless/02023_transform_or_to_in.sql b/tests/queries/0_stateless/02023_transform_or_to_in.sql new file mode 100644 index 00000000000..c4ceeb76931 --- /dev/null +++ b/tests/queries/0_stateless/02023_transform_or_to_in.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS t_transform_or; + +CREATE TABLE t_transform_or(B AggregateFunction(uniq, String), A String) Engine=MergeTree ORDER BY (A); + +INSERT INTO t_transform_or SELECT uniqState(''), '0'; + +SELECT uniqMergeIf(B, (A = '1') OR (A = '2') OR (A = '3')) +FROM cluster(test_cluster_two_shards, currentDatabase(), t_transform_or) +SETTINGS legacy_column_name_of_tuple_literal = 0; + +SELECT uniqMergeIf(B, (A = '1') OR (A = '2') OR (A = '3')) +FROM cluster(test_cluster_two_shards, currentDatabase(), t_transform_or) +SETTINGS legacy_column_name_of_tuple_literal = 1; + +DROP TABLE t_transform_or; diff --git a/tests/queries/0_stateless/2014_dict_get_nullable_key.reference b/tests/queries/0_stateless/2014_dict_get_nullable_key.reference new file mode 100644 index 00000000000..08127d35829 --- /dev/null +++ b/tests/queries/0_stateless/2014_dict_get_nullable_key.reference @@ -0,0 +1,13 @@ +Non nullable value only null key +\N +Non nullable value nullable key +Test +\N + +Nullable value only null key +\N +Nullable value nullable key +Test +\N +\N +\N diff --git a/tests/queries/0_stateless/2014_dict_get_nullable_key.sql b/tests/queries/0_stateless/2014_dict_get_nullable_key.sql new file mode 100644 index 00000000000..d6c058b285f --- /dev/null +++ b/tests/queries/0_stateless/2014_dict_get_nullable_key.sql @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS dictionary_non_nullable_source_table; +CREATE TABLE dictionary_non_nullable_source_table (id UInt64, value String) ENGINE=TinyLog; +INSERT INTO dictionary_non_nullable_source_table VALUES (0, 'Test'); + +DROP DICTIONARY IF EXISTS test_dictionary_non_nullable; +CREATE DICTIONARY test_dictionary_non_nullable (id UInt64, value String) PRIMARY KEY id LAYOUT(DIRECT()) SOURCE(CLICKHOUSE(TABLE 'dictionary_non_nullable_source_table')); + +SELECT 'Non nullable value only null key '; +SELECT dictGet('test_dictionary_non_nullable', 'value', NULL); +SELECT 'Non nullable value nullable key'; +SELECT dictGet('test_dictionary_non_nullable', 'value', arrayJoin([toUInt64(0), NULL, 1])); + +DROP DICTIONARY test_dictionary_non_nullable; +DROP TABLE dictionary_non_nullable_source_table; + +DROP TABLE IF EXISTS dictionary_nullable_source_table; +CREATE TABLE dictionary_nullable_source_table (id UInt64, value Nullable(String)) ENGINE=TinyLog; +INSERT INTO dictionary_nullable_source_table VALUES (0, 'Test'), (1, NULL); + +DROP DICTIONARY IF EXISTS test_dictionary_nullable; +CREATE DICTIONARY test_dictionary_nullable (id UInt64, value Nullable(String)) PRIMARY KEY id LAYOUT(DIRECT()) SOURCE(CLICKHOUSE(TABLE 'dictionary_nullable_source_table')); + +SELECT 'Nullable value only null key '; +SELECT dictGet('test_dictionary_nullable', 'value', NULL); +SELECT 'Nullable value nullable key'; +SELECT dictGet('test_dictionary_nullable', 'value', arrayJoin([toUInt64(0), NULL, 1, 2])); + +DROP DICTIONARY test_dictionary_nullable; +DROP TABLE dictionary_nullable_source_table; diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index 0143cc78dbe..f280fd4682e 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -466,7 +466,7 @@ "polygon_dicts", // they use an explicitly specified database "01658_read_file_to_stringcolumn", "01721_engine_file_truncate_on_insert", // It's ok to execute in parallel but not several instances of the same test. - "01702_system_query_log", // It's ok to execute in parallel with oter tests but not several instances of the same test. + "01702_system_query_log", // It's ok to execute in parallel but not several instances of the same test. "01748_dictionary_table_dot", // creates database "00950_dict_get", "01615_random_one_shard_insertion", @@ -513,6 +513,6 @@ "01530_drop_database_atomic_sync", /// creates database "02001_add_default_database_to_system_users", ///create user "02002_row_level_filter_bug", ///create user - "02015_system_views" + "01747_system_session_log_long" // Reads from system.session_log and can't be run in parallel with any other test (since almost any other test writes to session_log) ] } diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 2341552a977..c8885521437 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -9,6 +9,7 @@ v21.7.5.29-stable 2021-07-28 v21.7.4.18-stable 2021-07-17 v21.7.3.14-stable 2021-07-13 v21.7.2.7-stable 2021-07-09 +v21.6.9.7-stable 2021-09-02 v21.6.8.62-stable 2021-07-13 v21.6.7.57-stable 2021-07-09 v21.6.6.51-stable 2021-07-02 @@ -25,6 +26,7 @@ v21.4.6.55-stable 2021-04-30 v21.4.5.46-stable 2021-04-24 v21.4.4.30-stable 2021-04-16 v21.4.3.21-stable 2021-04-12 +v21.3.16.5-lts 2021-09-03 v21.3.15.4-stable 2021-07-10 v21.3.14.1-lts 2021-07-01 v21.3.13.9-lts 2021-06-22