diff --git a/CMakeLists.txt b/CMakeLists.txt index 7c213ace742..1153f418056 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -287,7 +287,7 @@ endif () include(cmake/dbms_glob_sources.cmake) -if (OS_LINUX) +if (OS_LINUX OR OS_ANDROID) include(cmake/linux/default_libs.cmake) elseif (OS_DARWIN) include(cmake/darwin/default_libs.cmake) diff --git a/README.md b/README.md index 0585667dcc2..1dfdf450d8a 100644 --- a/README.md +++ b/README.md @@ -16,5 +16,4 @@ ClickHouse is an open-source column-oriented database management system that all ## Upcoming Events -* [ClickHouse virtual office hours](https://www.eventbrite.com/e/clickhouse-july-virtual-meetup-tickets-111199787558) on July 15, 2020. * [ClickHouse at ByteDance (in Chinese)](https://mp.weixin.qq.com/s/Em-HjPylO8D7WPui4RREAQ) on July 17, 2020. diff --git a/base/common/getThreadId.cpp b/base/common/getThreadId.cpp index 2575aba3844..700c51f21fc 100644 --- a/base/common/getThreadId.cpp +++ b/base/common/getThreadId.cpp @@ -1,6 +1,9 @@ #include -#if defined(OS_LINUX) +#if defined(OS_ANDROID) + #include + #include +#elif defined(OS_LINUX) #include #include #elif defined(OS_FREEBSD) @@ -16,7 +19,9 @@ uint64_t getThreadId() { if (!current_tid) { -#if defined(OS_LINUX) +#if defined(OS_ANDROID) + current_tid = gettid(); +#elif defined(OS_LINUX) current_tid = syscall(SYS_gettid); /// This call is always successful. - man gettid #elif defined(OS_FREEBSD) current_tid = pthread_getthreadid_np(); diff --git a/base/daemon/BaseDaemon.cpp b/base/daemon/BaseDaemon.cpp index c0f454107ab..e9f85da3594 100644 --- a/base/daemon/BaseDaemon.cpp +++ b/base/daemon/BaseDaemon.cpp @@ -9,7 +9,6 @@ #include #include #include -#include #include #include diff --git a/cmake/Modules/FindOpenLDAP.cmake b/cmake/Modules/FindOpenLDAP.cmake index c33eafdcb2e..9c6262fa245 100644 --- a/cmake/Modules/FindOpenLDAP.cmake +++ b/cmake/Modules/FindOpenLDAP.cmake @@ -7,7 +7,7 @@ # # Sets values of: # OPENLDAP_FOUND - TRUE if found -# OPENLDAP_INCLUDE_DIR - path to the include directory +# OPENLDAP_INCLUDE_DIRS - paths to the include directories # OPENLDAP_LIBRARIES - paths to the libldap and liblber libraries # OPENLDAP_LDAP_LIBRARY - paths to the libldap library # OPENLDAP_LBER_LIBRARY - paths to the liblber library @@ -28,11 +28,11 @@ if(OPENLDAP_USE_REENTRANT_LIBS) endif() if(OPENLDAP_ROOT_DIR) - find_path(OPENLDAP_INCLUDE_DIR NAMES "ldap.h" "lber.h" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "include" NO_DEFAULT_PATH) + find_path(OPENLDAP_INCLUDE_DIRS NAMES "ldap.h" "lber.h" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "include" NO_DEFAULT_PATH) find_library(OPENLDAP_LDAP_LIBRARY NAMES "ldap${_r_suffix}" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "lib" NO_DEFAULT_PATH) find_library(OPENLDAP_LBER_LIBRARY NAMES "lber" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "lib" NO_DEFAULT_PATH) else() - find_path(OPENLDAP_INCLUDE_DIR NAMES "ldap.h" "lber.h") + find_path(OPENLDAP_INCLUDE_DIRS NAMES "ldap.h" "lber.h") find_library(OPENLDAP_LDAP_LIBRARY NAMES "ldap${_r_suffix}") find_library(OPENLDAP_LBER_LIBRARY NAMES "lber") endif() @@ -44,10 +44,10 @@ set(OPENLDAP_LIBRARIES ${OPENLDAP_LDAP_LIBRARY} ${OPENLDAP_LBER_LIBRARY}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args( OpenLDAP DEFAULT_MSG - OPENLDAP_INCLUDE_DIR OPENLDAP_LDAP_LIBRARY OPENLDAP_LBER_LIBRARY + OPENLDAP_INCLUDE_DIRS OPENLDAP_LDAP_LIBRARY OPENLDAP_LBER_LIBRARY ) -mark_as_advanced(OPENLDAP_INCLUDE_DIR OPENLDAP_LIBRARIES OPENLDAP_LDAP_LIBRARY OPENLDAP_LBER_LIBRARY) +mark_as_advanced(OPENLDAP_INCLUDE_DIRS OPENLDAP_LIBRARIES OPENLDAP_LDAP_LIBRARY OPENLDAP_LBER_LIBRARY) if(OPENLDAP_USE_STATIC_LIBS) set(CMAKE_FIND_LIBRARY_SUFFIXES ${_orig_CMAKE_FIND_LIBRARY_SUFFIXES}) diff --git a/cmake/find/amqpcpp.cmake b/cmake/find/amqpcpp.cmake index 0868e76c28a..b3e193c72ff 100644 --- a/cmake/find/amqpcpp.cmake +++ b/cmake/find/amqpcpp.cmake @@ -1,4 +1,5 @@ -SET(ENABLE_AMQPCPP ${ENABLE_LIBRARIES}) +option(ENABLE_AMQPCPP "Enalbe AMQP-CPP" ${ENABLE_LIBRARIES}) + if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/AMQP-CPP/CMakeLists.txt") message (WARNING "submodule contrib/AMQP-CPP is missing. to fix try run: \n git submodule update --init --recursive") set (ENABLE_AMQPCPP 0) diff --git a/cmake/find/gtest.cmake b/cmake/find/gtest.cmake index 82ae0f8e229..b41c4cc0af8 100644 --- a/cmake/find/gtest.cmake +++ b/cmake/find/gtest.cmake @@ -1,3 +1,7 @@ +option (ENABLE_GTEST_LIBRARY "Enable gtest library" ${ENABLE_LIBRARIES}) + +if (ENABLE_GTEST_LIBRARY) + option (USE_INTERNAL_GTEST_LIBRARY "Set to FALSE to use system Google Test instead of bundled" ${NOT_UNBUNDLED}) if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/googletest/googletest/CMakeLists.txt") @@ -28,4 +32,6 @@ if((GTEST_INCLUDE_DIRS AND GTEST_BOTH_LIBRARIES) OR GTEST_SRC_DIR) set(USE_GTEST 1) endif() +endif() + message (STATUS "Using gtest=${USE_GTEST}: ${GTEST_INCLUDE_DIRS} : ${GTEST_BOTH_LIBRARIES} : ${GTEST_SRC_DIR}") diff --git a/cmake/find/ldap.cmake b/cmake/find/ldap.cmake index 230727819e4..99c9007d6b5 100644 --- a/cmake/find/ldap.cmake +++ b/cmake/find/ldap.cmake @@ -16,11 +16,16 @@ if (ENABLE_LDAP) set (OPENLDAP_USE_REENTRANT_LIBS 1) if (NOT USE_INTERNAL_LDAP_LIBRARY) - if (APPLE AND NOT OPENLDAP_ROOT_DIR) - set (OPENLDAP_ROOT_DIR "/usr/local/opt/openldap") - endif () + if (OPENLDAP_USE_STATIC_LIBS) + message (WARNING "Unable to use external static OpenLDAP libraries, falling back to the bundled version.") + set (USE_INTERNAL_LDAP_LIBRARY 1) + else () + if (APPLE AND NOT OPENLDAP_ROOT_DIR) + set (OPENLDAP_ROOT_DIR "/usr/local/opt/openldap") + endif () - find_package (OpenLDAP) + find_package (OpenLDAP) + endif () endif () if (NOT OPENLDAP_FOUND AND NOT MISSING_INTERNAL_LDAP_LIBRARY) @@ -54,7 +59,10 @@ if (ENABLE_LDAP) else () set (USE_INTERNAL_LDAP_LIBRARY 1) set (OPENLDAP_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/openldap") - set (OPENLDAP_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/openldap/include") + set (OPENLDAP_INCLUDE_DIRS + "${ClickHouse_SOURCE_DIR}/contrib/openldap-cmake/${_system_name}_${_system_processor}/include" + "${ClickHouse_SOURCE_DIR}/contrib/openldap/include" + ) # Below, 'ldap'/'ldap_r' and 'lber' will be resolved to # the targets defined in contrib/openldap-cmake/CMakeLists.txt if (OPENLDAP_USE_REENTRANT_LIBS) @@ -73,4 +81,4 @@ if (ENABLE_LDAP) endif () endif () -message (STATUS "Using ldap=${USE_LDAP}: ${OPENLDAP_INCLUDE_DIR} : ${OPENLDAP_LIBRARIES}") +message (STATUS "Using ldap=${USE_LDAP}: ${OPENLDAP_INCLUDE_DIRS} : ${OPENLDAP_LIBRARIES}") diff --git a/cmake/find/libgsasl.cmake b/cmake/find/libgsasl.cmake index 801b63899da..e9c45a09010 100644 --- a/cmake/find/libgsasl.cmake +++ b/cmake/find/libgsasl.cmake @@ -1,3 +1,7 @@ +option(ENABLE_GSASL_LIBRARY "Enable gsasl library" ${ENABLE_LIBRARIES}) + +if (ENABLE_GSASL_LIBRARY) + option (USE_INTERNAL_LIBGSASL_LIBRARY "Set to FALSE to use system libgsasl library instead of bundled" ${NOT_UNBUNDLED}) if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libgsasl/src/gsasl.h") @@ -24,4 +28,6 @@ if(LIBGSASL_LIBRARY AND LIBGSASL_INCLUDE_DIR) set (USE_LIBGSASL 1) endif() +endif() + message (STATUS "Using libgsasl=${USE_LIBGSASL}: ${LIBGSASL_INCLUDE_DIR} : ${LIBGSASL_LIBRARY}") diff --git a/cmake/find/msgpack.cmake b/cmake/find/msgpack.cmake index 46344fc162f..102ea619f6a 100644 --- a/cmake/find/msgpack.cmake +++ b/cmake/find/msgpack.cmake @@ -1,3 +1,7 @@ +option (ENABLE_MSGPACK "Enable msgpack library" ${ENABLE_LIBRARIES}) + +if (ENABLE_MSGPACK) + option (USE_INTERNAL_MSGPACK_LIBRARY "Set to FALSE to use system msgpack library instead of bundled" ${NOT_UNBUNDLED}) if (USE_INTERNAL_MSGPACK_LIBRARY) @@ -14,4 +18,10 @@ else() find_path(MSGPACK_INCLUDE_DIR NAMES msgpack.hpp PATHS ${MSGPACK_INCLUDE_PATHS}) endif() -message(STATUS "Using msgpack: ${MSGPACK_INCLUDE_DIR}") +if (MSGPACK_INCLUDE_DIR) + set(USE_MSGPACK 1) +endif() + +endif() + +message(STATUS "Using msgpack=${USE_MSGPACK}: ${MSGPACK_INCLUDE_DIR}") diff --git a/cmake/linux/default_libs.cmake b/cmake/linux/default_libs.cmake index da91ccaa0c2..2a325b4d095 100644 --- a/cmake/linux/default_libs.cmake +++ b/cmake/linux/default_libs.cmake @@ -11,7 +11,12 @@ else () set (BUILTINS_LIBRARY "-lgcc") endif () +if (OS_ANDROID) +# pthread and rt are included in libc +set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${COVERAGE_OPTION} -lc -lm -ldl") +else () set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${COVERAGE_OPTION} -lc -lm -lrt -lpthread -ldl") +endif () message(STATUS "Default libraries: ${DEFAULT_LIBS}") @@ -35,7 +40,11 @@ add_library(global-libs INTERFACE) set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) -add_subdirectory(base/glibc-compatibility) +if (NOT OS_ANDROID) + # Our compatibility layer doesn't build under Android, many errors in musl. + add_subdirectory(base/glibc-compatibility) +endif () + include (cmake/find/unwind.cmake) include (cmake/find/cxx.cmake) diff --git a/cmake/target.cmake b/cmake/target.cmake index 1325758811f..35040e48956 100644 --- a/cmake/target.cmake +++ b/cmake/target.cmake @@ -1,6 +1,11 @@ if (CMAKE_SYSTEM_NAME MATCHES "Linux") set (OS_LINUX 1) add_definitions(-D OS_LINUX) +elseif (CMAKE_SYSTEM_NAME MATCHES "Android") + # This is a toy configuration and not in CI, so expect it to be broken. + # Use cmake flags such as: -DCMAKE_TOOLCHAIN_FILE=~/ch2/android-ndk-r21d/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=28 + set (OS_ANDROID 1) + add_definitions(-D OS_ANDROID) elseif (CMAKE_SYSTEM_NAME MATCHES "FreeBSD") set (OS_FREEBSD 1) add_definitions(-D OS_FREEBSD) @@ -17,7 +22,7 @@ if (CMAKE_CROSSCOMPILING) set (ENABLE_PARQUET OFF CACHE INTERNAL "") set (ENABLE_ICU OFF CACHE INTERNAL "") set (ENABLE_FASTOPS OFF CACHE INTERNAL "") - elseif (OS_LINUX) + elseif (OS_LINUX OR OS_ANDROID) if (ARCH_AARCH64) # FIXME: broken dependencies set (ENABLE_PROTOBUF OFF CACHE INTERNAL "") diff --git a/cmake/tools.cmake b/cmake/tools.cmake index d261b62eca3..95e00ad9951 100644 --- a/cmake/tools.cmake +++ b/cmake/tools.cmake @@ -22,7 +22,7 @@ elseif (COMPILER_CLANG) if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${APPLE_CLANG_MINIMUM_VERSION}) message (FATAL_ERROR "AppleClang compiler version must be at least ${APPLE_CLANG_MINIMUM_VERSION} (Xcode ${XCODE_MINIMUM_VERSION}).") elseif (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11.0.0) - # char8_t is available staring (upstream vanilla) Clang 7, but prior to Clang 8, + # char8_t is available starting (upstream vanilla) Clang 7, but prior to Clang 8, # it is not enabled by -std=c++20 and can be enabled with an explicit -fchar8_t. set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fchar8_t") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fchar8_t") diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index df3cb8a774d..862c905e534 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -102,7 +102,7 @@ if (USE_INTERNAL_SSL_LIBRARY) add_library(OpenSSL::SSL ALIAS ${OPENSSL_SSL_LIBRARY}) endif () -if (ENABLE_LDAP AND USE_INTERNAL_LDAP_LIBRARY) +if (USE_INTERNAL_LDAP_LIBRARY) add_subdirectory (openldap-cmake) endif () diff --git a/docker/images.json b/docker/images.json index 7033dc0d561..4ef400225ba 100644 --- a/docker/images.json +++ b/docker/images.json @@ -88,6 +88,10 @@ "name": "yandex/clickhouse-testflows-runner", "dependent": [] }, + "docker/test/fasttest": { + "name": "yandex/clickhouse-fasttest", + "dependent": [] + }, "docker/test/integration/s3_proxy": { "name": "yandex/clickhouse-s3-proxy", "dependent": [] @@ -96,4 +100,5 @@ "name": "yandex/clickhouse-python-bottle", "dependent": [] } + } diff --git a/docker/packager/binary/build.sh b/docker/packager/binary/build.sh index 791fd18d80c..6b6a02bb55b 100755 --- a/docker/packager/binary/build.sh +++ b/docker/packager/binary/build.sh @@ -33,6 +33,24 @@ then rm /output/clickhouse-odbc-bridge ||: cp -r ../docker/test/performance-comparison /output/scripts ||: + + # We have to know the revision that corresponds to this binary build. + # It is not the nominal SHA from pull/*/head, but the pull/*/merge, which is + # head merged to master by github, at some point after the PR is updated. + # There are some quirks to consider: + # - apparently the real SHA is not recorded in system.build_options; + # - it can change at any time as github pleases, so we can't just record + # the SHA and use it later, it might become inaccessible; + # - CI has an immutable snapshot of repository that it uses for all checks + # for a given nominal SHA, but it is not accessible outside Yandex. + # This is why we add this repository snapshot from CI to the performance test + # package. + mkdir /output/ch + git -C /output/ch init --bare + git -C /output/ch remote add origin /build + git -C /output/ch fetch --no-tags --depth 50 origin HEAD + git -C /output/ch reset --soft FETCH_HEAD + git -C /output/ch log -5 fi # May be set for split build or for performance test. diff --git a/docker/test/fasttest/Dockerfile b/docker/test/fasttest/Dockerfile new file mode 100644 index 00000000000..faa7c875275 --- /dev/null +++ b/docker/test/fasttest/Dockerfile @@ -0,0 +1,65 @@ +# docker build -t yandex/clickhouse-fasttest . +FROM ubuntu:19.10 + +ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz" +ENV COMMIT_SHA='' +ENV PULL_REQUEST_NUMBER='' + +RUN apt-get --allow-unauthenticated update -y && apt-get install --yes wget gnupg +RUN wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - +RUN echo "deb [trusted=yes] http://apt.llvm.org/eoan/ llvm-toolchain-eoan-10 main" >> /etc/apt/sources.list + + +RUN apt-get --allow-unauthenticated update -y \ + && env DEBIAN_FRONTEND=noninteractive \ + apt-get --allow-unauthenticated install --yes --no-install-recommends \ + bash \ + fakeroot \ + ccache \ + software-properties-common \ + apt-transport-https \ + ca-certificates \ + wget \ + bash \ + fakeroot \ + cmake \ + ccache \ + llvm-10 \ + clang-10 \ + lld-10 \ + clang-tidy-10 \ + ninja-build \ + gperf \ + git \ + tzdata \ + gperf \ + rename \ + build-essential \ + expect \ + python \ + python-lxml \ + python-termcolor \ + python-requests \ + unixodbc \ + qemu-user-static \ + sudo \ + moreutils \ + curl \ + brotli + +RUN mkdir -p /tmp/clickhouse-odbc-tmp \ + && wget --quiet -O - ${odbc_driver_url} | tar --strip-components=1 -xz -C /tmp/clickhouse-odbc-tmp \ + && cp /tmp/clickhouse-odbc-tmp/lib64/*.so /usr/local/lib/ \ + && odbcinst -i -d -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbcinst.ini.sample \ + && odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \ + && rm -rf /tmp/clickhouse-odbc-tmp + +# This symlink required by gcc to find lld compiler +RUN ln -s /usr/bin/lld-10 /usr/bin/ld.lld + +ENV TZ=Europe/Moscow +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + + +COPY run.sh / +CMD ["/bin/bash", "/run.sh"] diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh new file mode 100755 index 00000000000..0254f18030a --- /dev/null +++ b/docker/test/fasttest/run.sh @@ -0,0 +1,97 @@ +#!/bin/bash + +set -x -e + +ls -la + +git clone https://github.com/ClickHouse/ClickHouse.git | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/clone_log.txt +cd ClickHouse +CLICKHOUSE_DIR=`pwd` + + +if [ "$PULL_REQUEST_NUMBER" != "0" ]; then + if git fetch origin "+refs/pull/$PULL_REQUEST_NUMBER/merge"; then + git checkout FETCH_HEAD + echo 'Clonned merge head' + else + git fetch + git checkout $COMMIT_SHA + echo 'Checked out to commit' + fi +else + if [ "$COMMIT_SHA" != "" ]; then + git checkout $COMMIT_SHA + fi +fi + +SUBMODULES_TO_UPDATE="contrib/boost contrib/zlib-ng contrib/libxml2 contrib/poco contrib/libunwind contrib/ryu contrib/fmtlib contrib/base64 contrib/cctz contrib/libcpuid contrib/double-conversion contrib/libcxx contrib/libcxxabi contrib/libc-headers contrib/lz4 contrib/zstd contrib/fastops contrib/rapidjson contrib/re2 contrib/sparsehash-c11" + +git submodule update --init --recursive $SUBMODULES_TO_UPDATE | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/submodule_log.txt + +export CMAKE_LIBS_CONFIG="-DENABLE_LIBRARIES=0 -DENABLE_TESTS=0 -DENABLE_UTILS=0 -DENABLE_EMBEDDED_COMPILER=0 -DENABLE_THINLTO=0 -DUSE_UNWIND=1" + +export CCACHE_DIR=/ccache +export CCACHE_BASEDIR=/ClickHouse +export CCACHE_NOHASHDIR=true +export CCACHE_COMPILERCHECK=content +export CCACHE_MAXSIZE=15G + +ccache --show-stats ||: +ccache --zero-stats ||: + +mkdir build +cd build +CLICKHOUSE_BUILD_DIR=`pwd` +cmake .. -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_CXX_COMPILER=clang++-10 -DCMAKE_C_COMPILER=clang-10 $CMAKE_LIBS_CONFIG | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/cmake_log.txt +ninja clickhouse-bundle | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/build_log.txt +ninja install | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/install_log.txt + + +ccache --show-stats ||: + +mkdir -p /etc/clickhouse-server +mkdir -p /etc/clickhouse-client +mkdir -p /etc/clickhouse-server/config.d +mkdir -p /etc/clickhouse-server/users.d +mkdir -p /var/log/clickhouse-server +cp $CLICKHOUSE_DIR/programs/server/config.xml /etc/clickhouse-server/ +cp $CLICKHOUSE_DIR/programs/server/users.xml /etc/clickhouse-server/ + +mkdir -p /etc/clickhouse-server/dict_examples +ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/dict_examples/ +ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/ +ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/ +ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/ +ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/ +ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/ +ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/ +ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/ +ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/ +ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/ +#ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/graphite.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/ +ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/ +ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/ +ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml + +clickhouse-server --config /etc/clickhouse-server/config.xml --daemon + +until clickhouse-client --query "SELECT 1" +do + sleep 0.1 +done + +TESTS_TO_SKIP="parquet avro h3 odbc mysql sha256 _orc_ arrow 01098_temporary_and_external_tables 01083_expressions_in_engine_arguments hdfs 00911_tautological_compare protobuf capnproto java_hash hashing secure 00490_special_line_separators_and_characters_outside_of_bmp 00436_convert_charset 00105_shard_collations 01354_order_by_tuple_collate_const 01292_create_user 01098_msgpack_format 00929_multi_match_edit_distance 00926_multimatch 00834_cancel_http_readonly_queries_on_client_close brotli parallel_alter 00302_http_compression 00417_kill_query 01294_lazy_database_concurrent 01193_metadata_loading base64 01031_mutations_interpreter_and_context json client 01305_replica_create_drop_zookeeper 01092_memory_profiler 01355_ilike 01281_unsucceeded_insert_select_queries_counter live_view limit_memory memory_limit memory_leak 00110_external_sort 00682_empty_parts_merge 00701_rollup 00109_shard_totals_after_having" + +clickhouse-test -j 4 --no-long --testname --shard --zookeeper --skip $TESTS_TO_SKIP 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/test_log.txt + +mv /var/log/clickhouse-server/* /test_output diff --git a/docker/test/integration/runner/compose/docker_compose_minio.yml b/docker/test/integration/runner/compose/docker_compose_minio.yml index 716e32863eb..dbb29f9711a 100644 --- a/docker/test/integration/runner/compose/docker_compose_minio.yml +++ b/docker/test/integration/runner/compose/docker_compose_minio.yml @@ -11,6 +11,7 @@ services: environment: MINIO_ACCESS_KEY: minio MINIO_SECRET_KEY: minio123 + MINIO_PROMETHEUS_AUTH_TYPE: public command: server --address :9001 --certs-dir /certs /data1-1 depends_on: - proxy1 diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 8e6221b5195..378e87f443b 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -498,7 +498,8 @@ create table queries engine File(TSVWithNamesAndTypes, 'report/queries.tsv') left, right, diff, stat_threshold, if(report_threshold > 0, report_threshold, 0.10) as report_threshold, - test, query_index, query_display_name + query_metric_stats.test test, query_metric_stats.query_index query_index, + query_display_name from query_metric_stats left join file('analyze/report-thresholds.tsv', TSV, 'test text, report_threshold float') thresholds @@ -666,7 +667,8 @@ create view query_display_names as select * from create table unstable_query_runs engine File(TSVWithNamesAndTypes, 'unstable-query-runs.$version.rep') as - select test, query_index, query_display_name, query_id + select query_runs.test test, query_runs.query_index query_index, + query_display_name, query_id from query_runs join queries_for_flamegraph on query_runs.test = queries_for_flamegraph.test diff --git a/docker/test/stateful_with_coverage/run.sh b/docker/test/stateful_with_coverage/run.sh index 66877362d10..8928fc28f80 100755 --- a/docker/test/stateful_with_coverage/run.sh +++ b/docker/test/stateful_with_coverage/run.sh @@ -55,18 +55,21 @@ ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-serv ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/; \ ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/; -ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/; \ - ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/; \ - ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/; \ - ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/; \ - ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/; \ - ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/; +ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/ +ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/ +ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/ +ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/ +ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/ +ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/ + +# Retain any pre-existing config and allow ClickHouse to load those if required +ln -s --backup=simple --suffix=_original.xml \ + /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/ service zookeeper start diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 08adc94f535..a221e76f2f0 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -17,7 +17,6 @@ ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/ ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/ ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/ -ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/ ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/ ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/ ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/ @@ -33,6 +32,10 @@ ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/ ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/ ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/ +# Retain any pre-existing config and allow ClickHouse to load it if required +ln -s --backup=simple --suffix=_original.xml \ + /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/ + if [[ -n "$USE_POLYMORPHIC_PARTS" ]] && [[ "$USE_POLYMORPHIC_PARTS" -eq 1 ]]; then ln -s /usr/share/clickhouse-test/config/polymorphic_parts.xml /etc/clickhouse-server/config.d/ fi diff --git a/docker/test/stateless_with_coverage/run.sh b/docker/test/stateless_with_coverage/run.sh index 64e171b2869..64317ee62fd 100755 --- a/docker/test/stateless_with_coverage/run.sh +++ b/docker/test/stateless_with_coverage/run.sh @@ -46,27 +46,30 @@ ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-serv ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/; \ ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/; -ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/; \ - ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/; \ - ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/; \ - ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/; \ - ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/; \ - ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/; \ - ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/graphite.xml /etc/clickhouse-server/config.d/; \ - ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/; \ - ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/; \ - ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/; \ - ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml +ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/ +ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/ +ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/ +ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/ +ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/ +ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/ +ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/graphite.xml /etc/clickhouse-server/config.d/ +ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/ +ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/ +ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/ +ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml + +# Retain any pre-existing config and allow ClickHouse to load it if required +ln -s --backup=simple --suffix=_original.xml \ + /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/ service zookeeper start sleep 5 diff --git a/docker/test/stress/Dockerfile b/docker/test/stress/Dockerfile index 12e2d5579b4..6855a632df4 100644 --- a/docker/test/stress/Dockerfile +++ b/docker/test/stress/Dockerfile @@ -23,28 +23,7 @@ RUN apt-get update -y \ brotli COPY ./stress /stress +COPY run.sh / ENV DATASETS="hits visits" - -CMD dpkg -i package_folder/clickhouse-common-static_*.deb; \ - dpkg -i package_folder/clickhouse-common-static-dbg_*.deb; \ - dpkg -i package_folder/clickhouse-server_*.deb; \ - dpkg -i package_folder/clickhouse-client_*.deb; \ - dpkg -i package_folder/clickhouse-test_*.deb; \ - ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/; \ - ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/; \ - echo "TSAN_OPTIONS='halt_on_error=1 history_size=7 ignore_noninstrumented_modules=1 verbosity=1'" >> /etc/environment; \ - echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment; \ - echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment; \ - service clickhouse-server start && sleep 5 \ - && /s3downloader --dataset-names $DATASETS \ - && chmod 777 -R /var/lib/clickhouse \ - && clickhouse-client --query "ATTACH DATABASE IF NOT EXISTS datasets ENGINE = Ordinary" \ - && clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test" \ - && service clickhouse-server restart && sleep 5 \ - && clickhouse-client --query "SHOW TABLES FROM datasets" \ - && clickhouse-client --query "SHOW TABLES FROM test" \ - && clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits" \ - && clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits" \ - && clickhouse-client --query "SHOW TABLES FROM test" \ - && ./stress --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION" +CMD ["/bin/bash", "/run.sh"] diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh new file mode 100755 index 00000000000..6669f8dd179 --- /dev/null +++ b/docker/test/stress/run.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +set -x + +dpkg -i package_folder/clickhouse-common-static_*.deb +dpkg -i package_folder/clickhouse-common-static-dbg_*.deb +dpkg -i package_folder/clickhouse-server_*.deb +dpkg -i package_folder/clickhouse-client_*.deb +dpkg -i package_folder/clickhouse-test_*.deb + +function wait_server() +{ + counter=0 + until clickhouse-client --query "SELECT 1" + do + if [ "$counter" -gt 120 ] + then + break + fi + sleep 0.5 + counter=$(($counter + 1)) + done +} + +ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/ +ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/ + +echo "TSAN_OPTIONS='halt_on_error=1 history_size=7 ignore_noninstrumented_modules=1 verbosity=1'" >> /etc/environment +echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment +echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment + +service clickhouse-server start + +wait_server + +/s3downloader --dataset-names $DATASETS +chmod 777 -R /var/lib/clickhouse +clickhouse-client --query "ATTACH DATABASE IF NOT EXISTS datasets ENGINE = Ordinary" +clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test" +service clickhouse-server restart + +wait_server + +clickhouse-client --query "SHOW TABLES FROM datasets" +clickhouse-client --query "SHOW TABLES FROM test" +clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits" +clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits" +clickhouse-client --query "SHOW TABLES FROM test" + +./stress --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION" + +service clickhouse-server restart + +wait_server + +clickhouse-client --query "SELECT 'Server successfuly started'" > /test_output/alive_check.txt || echo 'Server failed to start' > /test_output/alive_check.txt diff --git a/docker/test/stress/stress b/docker/test/stress/stress index b5ed4dbd85d..b107dc59829 100755 --- a/docker/test/stress/stress +++ b/docker/test/stress/stress @@ -41,15 +41,6 @@ def run_func_test(cmd, output_prefix, num_processes, skip_tests_option): return pipes -def check_clickhouse_alive(cmd): - try: - logging.info("Checking ClickHouse still alive") - check_call("{} --query \"select 'Still alive'\"".format(cmd), shell=True) - return True - except: - return False - - if __name__ == "__main__": logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') parser = argparse.ArgumentParser(description="ClickHouse script for running stresstest") @@ -65,29 +56,18 @@ if __name__ == "__main__": args = parser.parse_args() func_pipes = [] perf_process = None - try: - perf_process = run_perf_test(args.perf_test_cmd, args.perf_test_xml_path, args.output_folder) - func_pipes = run_func_test(args.test_cmd, args.output_folder, args.num_parallel, args.skip_func_tests) + perf_process = run_perf_test(args.perf_test_cmd, args.perf_test_xml_path, args.output_folder) + func_pipes = run_func_test(args.test_cmd, args.output_folder, args.num_parallel, args.skip_func_tests) - logging.info("Will wait functests to finish") - while True: - retcodes = [] - for p in func_pipes: - if p.poll() is not None: - retcodes.append(p.returncode) - if len(retcodes) == len(func_pipes): - break - logging.info("Finished %s from %s processes", len(retcodes), len(func_pipes)) - time.sleep(5) + logging.info("Will wait functests to finish") + while True: + retcodes = [] + for p in func_pipes: + if p.poll() is not None: + retcodes.append(p.returncode) + if len(retcodes) == len(func_pipes): + break + logging.info("Finished %s from %s processes", len(retcodes), len(func_pipes)) + time.sleep(5) - if not check_clickhouse_alive(args.client_cmd): - raise Exception("Stress failed, results in logs") - else: - logging.info("Stress is ok") - except Exception as ex: - raise ex - finally: - if os.path.exists(args.server_log_folder): - logging.info("Copying server log files") - for log_file in os.listdir(args.server_log_folder): - shutil.copy(os.path.join(args.server_log_folder, log_file), os.path.join(args.output_folder, log_file)) + logging.info("Stress test finished") diff --git a/docker/test/testflows/runner/Dockerfile b/docker/test/testflows/runner/Dockerfile index b922864fefd..f5498535f32 100644 --- a/docker/test/testflows/runner/Dockerfile +++ b/docker/test/testflows/runner/Dockerfile @@ -35,7 +35,7 @@ RUN apt-get update \ ENV TZ=Europe/Moscow RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone -RUN pip3 install urllib3 testflows==1.6.24 docker-compose docker dicttoxml kazoo tzlocal +RUN pip3 install urllib3 testflows==1.6.39 docker-compose docker dicttoxml kazoo tzlocal ENV DOCKER_CHANNEL stable ENV DOCKER_VERSION 17.09.1-ce diff --git a/docs/en/engines/table-engines/special/join.md b/docs/en/engines/table-engines/special/join.md index b8c3a6c83f1..0572627473e 100644 --- a/docs/en/engines/table-engines/special/join.md +++ b/docs/en/engines/table-engines/special/join.md @@ -24,7 +24,7 @@ See the detailed description of the [CREATE TABLE](../../../sql-reference/statem **Engine Parameters** -- `join_strictness` – [JOIN strictness](../../../sql-reference/statements/select/join.md#select-join-strictness). +- `join_strictness` – [JOIN strictness](../../../sql-reference/statements/select/join.md#select-join-types). - `join_type` – [JOIN type](../../../sql-reference/statements/select/join.md#select-join-types). - `k1[, k2, ...]` – Key columns from the `USING` clause that the `JOIN` operation is made with. diff --git a/docs/en/introduction/adopters.md b/docs/en/introduction/adopters.md index 7487b212955..c4f119ebab7 100644 --- a/docs/en/introduction/adopters.md +++ b/docs/en/introduction/adopters.md @@ -44,6 +44,7 @@ toc_title: Adopters | Lawrence Berkeley National Laboratory | Research | Traffic analysis | 1 server | 11.8 TiB | [Slides in English, April 2019](https://www.smitasin.com/presentations/2019-04-17_DOE-NSM.pdf) | | LifeStreet | Ad network | Main product | 75 servers (3 replicas) | 5.27 PiB | [Blog post in Russian, February 2017](https://habr.com/en/post/322620/) | | Mail.ru Cloud Solutions | Cloud services | Main product | — | — | [Article in Russian](https://mcs.mail.ru/help/db-create/clickhouse#) | +| Marilyn | Advertising | Statistics | — | — | [Talk in Russian, June 2017](https://www.youtube.com/watch?v=iXlIgx2khwc) | | MessageBird | Telecommunications | Statistics | — | — | [Slides in English, November 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) | | MGID | Ad network | Web-analytics | — | — | [Blog post in Russian, April 2020](http://gs-studio.com/news-about-it/32777----clickhouse---c) | | OneAPM | Monitorings and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) | diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 1944573230c..da6c6519b36 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -471,7 +471,7 @@ Default value: 0. See also: -- [JOIN strictness](../../sql-reference/statements/select/join.md#select-join-strictness) +- [JOIN strictness](../../sql-reference/statements/select/join.md#join-settings) ## temporary\_files\_codec {#temporary_files_codec} diff --git a/docs/en/sql-reference/statements/select/join.md b/docs/en/sql-reference/statements/select/join.md index 0b42ed1a0d2..7d41930aab6 100644 --- a/docs/en/sql-reference/statements/select/join.md +++ b/docs/en/sql-reference/statements/select/join.md @@ -11,7 +11,7 @@ Syntax: ``` sql SELECT FROM -[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN +[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN (ON )|(USING ) ... ``` @@ -33,17 +33,13 @@ Additional join types available in ClickHouse: - `LEFT SEMI JOIN` and `RIGHT SEMI JOIN`, a whitelist on “join keys”, without producing a cartesian product. - `LEFT ANTI JOIN` and `RIGHT ANTI JOIN`, a blacklist on “join keys”, without producing a cartesian product. +- `LEFT ANY JOIN`, `RIGHT ANY JOIN` and `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types. +- `ASOF JOIN` and `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below. -## Strictness {#select-join-strictness} - -Modifies how matching by “join keys” is performed - -- `ALL` — The standard `JOIN` behavior in SQL as described above. The default. -- `ANY` — Partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types. -- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` usage is described below. +## Setting {#join-settings} !!! note "Note" - The default strictness value can be overriden using [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) setting. + The default join type can be overriden using [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) setting. Also the behavior of ClickHouse server for `ANY JOIN` operations depends on the [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys) setting. diff --git a/docs/es/engines/table-engines/special/join.md b/docs/es/engines/table-engines/special/join.md index bb8b0e513d9..a553a0d24ef 100644 --- a/docs/es/engines/table-engines/special/join.md +++ b/docs/es/engines/table-engines/special/join.md @@ -23,7 +23,7 @@ Vea la descripción detallada del [CREATE TABLE](../../../sql-reference/statemen **Parámetros del motor** -- `join_strictness` – [ÚNETE a la rigurosidad](../../../sql-reference/statements/select/join.md#select-join-strictness). +- `join_strictness` – [ÚNETE a la rigurosidad](../../../sql-reference/statements/select/join.md#select-join-types). - `join_type` – [Tipo de unión](../../../sql-reference/statements/select/join.md#select-join-types). - `k1[, k2, ...]` – Key columns from the `USING` cláusula que el `JOIN` operación se hace con. diff --git a/docs/es/sql-reference/statements/select/join.md b/docs/es/sql-reference/statements/select/join.md index 986cd3a11a3..158731d679c 100644 --- a/docs/es/sql-reference/statements/select/join.md +++ b/docs/es/sql-reference/statements/select/join.md @@ -12,7 +12,7 @@ Sintaxis: ``` sql SELECT FROM -[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN +[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN (ON )|(USING ) ... ``` @@ -34,14 +34,10 @@ Tipos de unión adicionales disponibles en ClickHouse: - `LEFT SEMI JOIN` y `RIGHT SEMI JOIN`, una lista blanca en “join keys”, sin producir un producto cartesiano. - `LEFT ANTI JOIN` y `RIGHT ANTI JOIN`, una lista negra sobre “join keys”, sin producir un producto cartesiano. +- `LEFT ANY JOIN`, `RIGHT ANY JOIN` and `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types. +- `ASOF JOIN` and `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below. -## Rigor {#select-join-strictness} - -Modifica cómo coincidir por “join keys” se realiza - -- `ALL` — The standard `JOIN` comportamiento en SQL como se describió anteriormente. Predeterminado. -- `ANY` — Partially (for opposite side of `LEFT` y `RIGHT`) o completamente (para `INNER` y `FULL`) deshabilita el producto cartesiano para `JOIN` tipo. -- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` el uso se describe a continuación. +## Setting {#join-settings} !!! note "Nota" El valor de rigor predeterminado se puede anular usando [Por favor, introduzca su dirección de correo electrónico](../../../operations/settings/settings.md#settings-join_default_strictness) configuración. diff --git a/docs/fa/engines/table-engines/special/join.md b/docs/fa/engines/table-engines/special/join.md index d35246b5fc3..07518b5b897 100644 --- a/docs/fa/engines/table-engines/special/join.md +++ b/docs/fa/engines/table-engines/special/join.md @@ -23,7 +23,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] **پارامترهای موتور** -- `join_strictness` – [پیوستن به سختی](../../../sql-reference/statements/select/join.md#select-join-strictness). +- `join_strictness` – [پیوستن به سختی](../../../sql-reference/statements/select/join.md#select-join-types). - `join_type` – [پیوستن به نوع](../../../sql-reference/statements/select/join.md#select-join-types). - `k1[, k2, ...]` – Key columns from the `USING` بند که `JOIN` عملیات با ساخته شده. diff --git a/docs/fa/sql-reference/statements/select/join.md b/docs/fa/sql-reference/statements/select/join.md index 21e19c124fd..c77049fb280 100644 --- a/docs/fa/sql-reference/statements/select/join.md +++ b/docs/fa/sql-reference/statements/select/join.md @@ -12,7 +12,7 @@ machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd ``` sql SELECT FROM -[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN +[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN (ON )|(USING ) ... ``` @@ -34,15 +34,12 @@ FROM - `LEFT SEMI JOIN` و `RIGHT SEMI JOIN`, یک لیست سفید در “join keys”, بدون تولید محصول دکارتی. - `LEFT ANTI JOIN` و `RIGHT ANTI JOIN`, لیست سیاه در “join keys”, بدون تولید محصول دکارتی. +- `LEFT ANY JOIN`, `RIGHT ANY JOIN` و `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types. +- `ASOF JOIN` و `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below. -## سختی {#select-join-strictness} +## Setting {#join-settings} تغییر چگونگی تطبیق توسط “join keys” انجام شده است - -- `ALL` — The standard `JOIN` رفتار در گذاشتن همانطور که در بالا توضیح. به طور پیش فرض. -- `ANY` — Partially (for opposite side of `LEFT` و `RIGHT`) یا به طور کامل (برای `INNER` و `FULL`) غیر فعال محصول دکارتی برای استاندارد `JOIN` انواع. -- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` استفاده در زیر توضیح داده شده است. - !!! note "یادداشت" مقدار سختگیرانه پیش فرض را می توان با استفاده از لغو [بررسی اجمالی](../../../operations/settings/settings.md#settings-join_default_strictness) تنظیمات. diff --git a/docs/fr/engines/table-engines/special/join.md b/docs/fr/engines/table-engines/special/join.md index a3b33a7aa32..01b14911e26 100644 --- a/docs/fr/engines/table-engines/special/join.md +++ b/docs/fr/engines/table-engines/special/join.md @@ -23,7 +23,7 @@ Voir la description détaillée de la [CREATE TABLE](../../../sql-reference/stat **Les Paramètres Du Moteur** -- `join_strictness` – [ADHÉRER à la rigueur](../../../sql-reference/statements/select/join.md#select-join-strictness). +- `join_strictness` – [ADHÉRER à la rigueur](../../../sql-reference/statements/select/join.md#select-join-types). - `join_type` – [Type de jointure](../../../sql-reference/statements/select/join.md#select-join-types). - `k1[, k2, ...]` – Key columns from the `USING` la clause que l' `JOIN` l'opération est faite avec de la. diff --git a/docs/fr/sql-reference/statements/select/join.md b/docs/fr/sql-reference/statements/select/join.md index d802f68e4bf..335086349d3 100644 --- a/docs/fr/sql-reference/statements/select/join.md +++ b/docs/fr/sql-reference/statements/select/join.md @@ -12,7 +12,7 @@ Syntaxe: ``` sql SELECT FROM -[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN +[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN (ON )|(USING ) ... ``` @@ -34,14 +34,10 @@ Autres types de jointure disponibles dans ClickHouse: - `LEFT SEMI JOIN` et `RIGHT SEMI JOIN` une liste blanche sur “join keys”, sans produire un produit cartésien. - `LEFT ANTI JOIN` et `RIGHT ANTI JOIN` une liste noire sur “join keys”, sans produire un produit cartésien. +- `LEFT ANY JOIN`, `RIGHT ANY JOIN` et `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types. +- `ASOF JOIN` et `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below. -## Rigueur {#select-join-strictness} - -Modifie la façon dont la correspondance par “join keys” est effectué - -- `ALL` — The standard `JOIN` comportement en SQL comme décrit ci-dessus. Défaut. -- `ANY` — Partially (for opposite side of `LEFT` et `RIGHT`) ou complètement (pour `INNER` et `FULL`) désactive le produit cartésien de la norme `JOIN` type. -- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` l'utilisation est décrite ci-dessous. +## Setting {#join-settings} !!! note "Note" La valeur de rigueur par défaut peut être remplacée à l'aide [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) paramètre. diff --git a/docs/ja/engines/table-engines/special/join.md b/docs/ja/engines/table-engines/special/join.md index 4a3cc351e51..e88bc12a484 100644 --- a/docs/ja/engines/table-engines/special/join.md +++ b/docs/ja/engines/table-engines/special/join.md @@ -23,7 +23,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] **エンジン変数** -- `join_strictness` – [厳密に結合する](../../../sql-reference/statements/select/join.md#select-join-strictness). +- `join_strictness` – [厳密に結合する](../../../sql-reference/statements/select/join.md#select-join-types). - `join_type` – [結合タイプ](../../../sql-reference/statements/select/join.md#select-join-types). - `k1[, k2, ...]` – Key columns from the `USING` 句は、 `JOIN` 操作はでなされる。 diff --git a/docs/ru/engines/table-engines/special/join.md b/docs/ru/engines/table-engines/special/join.md index a5ed68f0959..2caf1187d50 100644 --- a/docs/ru/engines/table-engines/special/join.md +++ b/docs/ru/engines/table-engines/special/join.md @@ -16,7 +16,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] **Параметры движка** -- `join_strictness` – [строгость JOIN](../../../engines/table-engines/special/join.md#select-join-strictness). +- `join_strictness` – [строгость JOIN](../../../engines/table-engines/special/join.md#select-join-types). - `join_type` – [тип JOIN](../../../engines/table-engines/special/join.md#select-join-types). - `k1[, k2, ...]` – ключевые столбцы секции `USING` с которыми выполняется операция `JOIN`. diff --git a/docs/ru/sql-reference/statements/select/join.md b/docs/ru/sql-reference/statements/select/join.md index 26e7ae8257e..de5e3cdb76c 100644 --- a/docs/ru/sql-reference/statements/select/join.md +++ b/docs/ru/sql-reference/statements/select/join.md @@ -7,7 +7,7 @@ Join создаёт новую таблицу путем объединения ``` sql SELECT FROM -[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN +[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN (ON )|(USING ) ... ``` @@ -29,18 +29,14 @@ FROM - `LEFT SEMI JOIN` и `RIGHT SEMI JOIN`, белый список по ключам соединения, не производит декартово произведение. - `LEFT ANTI JOIN` и `RIGHT ANTI JOIN`, черный список по ключам соединения, не производит декартово произведение. +- `LEFT ANY JOIN`, `RIGHT ANY JOIN` и `INNER ANY JOIN`, Частично (для противоположных сторон `LEFT` и `RIGHT`) или полностью (для `INNER` и `FULL`) отключает декартово произведение для стандартых видов `JOIN`. +- `ASOF JOIN` и `LEFT ASOF JOIN`, Для соединения последовательностей по нечеткому совпадению. Использование `ASOF JOIN` описано ниже. -## Строгость {#select-join-strictness} - -Изменяет способ сопоставления по ключам соединения: - -- `ALL` — стандартное поведение `JOIN` в SQL, как описано выше. По умолчанию. -- `ANY` — Частично (для противоположных сторон `LEFT` и `RIGHT`) или полностью (для `INNER` и `FULL`) отключает декартово произведение для стандартых видов `JOIN`. -- `ASOF` — Для соединения последовательностей по нечеткому совпадению. Использование `ASOF JOIN` описано ниже. +## Настройки {#join-settings} !!! note "Примечание" Значение строгости по умолчанию может быть переопределено с помощью настройки [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness). - + ### Использование ASOF JOIN {#asof-join-usage} `ASOF JOIN` применим в том случае, когда необходимо объединять записи, которые не имеют точного совпадения. diff --git a/docs/tools/deploy-to-test.sh b/docs/tools/deploy-to-test.sh index 769d51cf5c8..44e7687c905 100755 --- a/docs/tools/deploy-to-test.sh +++ b/docs/tools/deploy-to-test.sh @@ -4,8 +4,11 @@ # This script deploys ClickHouse website to your personal test subdomain. # # Before first use of this script: -# 1) Create https://github.com/GIT_USER/clickhouse.github.io repo (replace GIT_USER with your GitHub login) -# 2) Send email on address from https://clickhouse.tech/#contacts asking to create GIT_USER-test.clickhouse.tech domain +# 1) Set up building documentation according to https://github.com/ClickHouse/ClickHouse/tree/master/docs/tools#use-buildpy-use-build-py +# 2) Create https://github.com/GIT_USER/clickhouse.github.io repo (replace GIT_USER with your GitHub login) +# 3) Enable GitHub Pages in settings of this repo +# 4) Add file named CNAME in root of this repo with "GIT_USER-test.clickhouse.tech" content (without quotes) +# 5) Send email on address from https://clickhouse.tech/#contacts asking to create GIT_USER-test.clickhouse.tech domain # set -ex diff --git a/docs/tr/engines/table-engines/special/join.md b/docs/tr/engines/table-engines/special/join.md index bc9182d9823..f7605f1b579 100644 --- a/docs/tr/engines/table-engines/special/join.md +++ b/docs/tr/engines/table-engines/special/join.md @@ -23,7 +23,7 @@ Ayrıntılı açıklamasına bakın [CREATE TABLE](../../../sql-reference/statem **Motor Parametreleri** -- `join_strictness` – [Katılık katılın](../../../sql-reference/statements/select/join.md#select-join-strictness). +- `join_strictness` – [Katılık katılın](../../../sql-reference/statements/select/join.md#select-join-types). - `join_type` – [Birleştirme türü](../../../sql-reference/statements/select/join.md#select-join-types). - `k1[, k2, ...]` – Key columns from the `USING` fık thera: `JOIN` işlemi yapılmamaktadır. diff --git a/docs/zh/engines/table-engines/special/join.md b/docs/zh/engines/table-engines/special/join.md index a94803a401b..22e67ba46d5 100644 --- a/docs/zh/engines/table-engines/special/join.md +++ b/docs/zh/engines/table-engines/special/join.md @@ -24,7 +24,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] **引擎参数** -- `join_strictness` – [JOIN 限制](../../../sql-reference/statements/select/join.md#select-join-strictness). +- `join_strictness` – [JOIN 限制](../../../sql-reference/statements/select/join.md#select-join-types). - `join_type` – [JOIN 类型](../../../sql-reference/statements/select/join.md#select-join-types). - `k1[, k2, ...]` – 进行`JOIN` 操作时 `USING`语句用到的key列 diff --git a/docs/zh/sql-reference/statements/select/join.md b/docs/zh/sql-reference/statements/select/join.md index 47fd0137717..355ed0e617c 100644 --- a/docs/zh/sql-reference/statements/select/join.md +++ b/docs/zh/sql-reference/statements/select/join.md @@ -13,7 +13,7 @@ Join通过使用一个或多个表的公共值合并来自一个或多个表的 ``` sql SELECT FROM -[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN +[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN (ON )|(USING ) ... ``` @@ -35,14 +35,10 @@ ClickHouse中提供的其他联接类型: - `LEFT SEMI JOIN` 和 `RIGHT SEMI JOIN`,白名单 “join keys”,而不产生笛卡尔积。 - `LEFT ANTI JOIN` 和 `RIGHT ANTI JOIN`,黑名单 “join keys”,而不产生笛卡尔积。 +- `LEFT ANY JOIN`, `RIGHT ANY JOIN` and `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types. +- `ASOF JOIN` and `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below. -## 严格 {#select-join-strictness} - -修改如何匹配 “join keys” 执行 - -- `ALL` — The standard `JOIN` sql中的行为如上所述。 默认值。 -- `ANY` — Partially (for opposite side of `LEFT` 和 `RIGHT`)或完全(为 `INNER` 和 `FULL`)禁用笛卡尔积为标准 `JOIN` 类型。 -- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` 用法描述如下。 +## 严格 {#join-settings} !!! note "注" 可以使用以下方式复盖默认的严格性值 [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) 设置。 diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index c19495a0bb0..a09b7239abc 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -215,6 +215,9 @@ try /// Skip networking + /// Sets external authenticators config (LDAP). + context->setExternalAuthenticatorsConfig(config()); + setupUsers(); /// Limit on total number of concurrently executing queries. diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 919949fd6c1..e1395d87a84 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -295,7 +295,7 @@ int Server::main(const std::vector & /*args*/) #endif /** Context contains all that query execution is dependent: - * settings, available functions, data types, aggregate functions, databases... + * settings, available functions, data types, aggregate functions, databases, ... */ auto shared_context = Context::createShared(); auto global_context = std::make_unique(Context::createGlobal(shared_context.get())); @@ -543,6 +543,7 @@ int Server::main(const std::vector & /*args*/) //buildLoggers(*config, logger()); global_context->setClustersConfig(config); global_context->setMacros(std::make_unique(*config, "macros")); + global_context->setExternalAuthenticatorsConfig(*config); /// Setup protection to avoid accidental DROP for big tables (that are greater than 50 GB by default) if (config->has("max_table_size_to_drop")) diff --git a/programs/server/config.xml b/programs/server/config.xml index 85f6023f2a9..3e01964f0ff 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -215,6 +215,47 @@ /var/lib/clickhouse/access/ + + + + + users.xml diff --git a/programs/server/users.xml b/programs/server/users.xml index 3d95269190b..838b46e6938 100644 --- a/programs/server/users.xml +++ b/programs/server/users.xml @@ -44,6 +44,9 @@ If you want to specify double SHA1, place it in 'password_double_sha1_hex' element. Example: e395796d6546b1b65db9d665cd43f0e858dd4303 + If you want to specify a previously defined LDAP server (see 'ldap_servers' in main config) for authentication, place its name in 'server' element inside 'ldap' element. + Example: my_ldap_server + How to generate decent password: Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-' In first line will be password and in second - corresponding SHA256. diff --git a/src/Access/AccessControlManager.cpp b/src/Access/AccessControlManager.cpp index 94a45e3e1c1..5966c1aff75 100644 --- a/src/Access/AccessControlManager.cpp +++ b/src/Access/AccessControlManager.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -64,7 +65,8 @@ AccessControlManager::AccessControlManager() role_cache(std::make_unique(*this)), row_policy_cache(std::make_unique(*this)), quota_cache(std::make_unique(*this)), - settings_profiles_cache(std::make_unique(*this)) + settings_profiles_cache(std::make_unique(*this)), + external_authenticators(std::make_unique()) { } @@ -79,6 +81,12 @@ void AccessControlManager::setLocalDirectory(const String & directory_path) } +void AccessControlManager::setExternalAuthenticatorsConfig(const Poco::Util::AbstractConfiguration & config) +{ + external_authenticators->setConfig(config, getLogger()); +} + + void AccessControlManager::setUsersConfig(const Poco::Util::AbstractConfiguration & users_config) { auto & users_config_access_storage = dynamic_cast(getStorageByIndex(USERS_CONFIG_ACCESS_STORAGE_INDEX)); @@ -163,4 +171,9 @@ std::shared_ptr AccessControlManager::getProfileSettings( return settings_profiles_cache->getProfileSettings(profile_name); } +const ExternalAuthenticators & AccessControlManager::getExternalAuthenticators() const +{ + return *external_authenticators; +} + } diff --git a/src/Access/AccessControlManager.h b/src/Access/AccessControlManager.h index d244ecd07d2..467b7471423 100644 --- a/src/Access/AccessControlManager.h +++ b/src/Access/AccessControlManager.h @@ -37,6 +37,7 @@ class EnabledSettings; class SettingsProfilesCache; class SettingsProfileElements; class ClientInfo; +class ExternalAuthenticators; struct Settings; @@ -48,6 +49,7 @@ public: ~AccessControlManager(); void setLocalDirectory(const String & directory); + void setExternalAuthenticatorsConfig(const Poco::Util::AbstractConfiguration & config); void setUsersConfig(const Poco::Util::AbstractConfiguration & users_config); void setDefaultProfileName(const String & default_profile_name); @@ -85,6 +87,8 @@ public: std::shared_ptr getProfileSettings(const String & profile_name) const; + const ExternalAuthenticators & getExternalAuthenticators() const; + private: class ContextAccessCache; std::unique_ptr context_access_cache; @@ -92,6 +96,7 @@ private: std::unique_ptr row_policy_cache; std::unique_ptr quota_cache; std::unique_ptr settings_profiles_cache; + std::unique_ptr external_authenticators; }; } diff --git a/src/Access/Authentication.cpp b/src/Access/Authentication.cpp index cb2c9e7a256..d29e2f897e8 100644 --- a/src/Access/Authentication.cpp +++ b/src/Access/Authentication.cpp @@ -1,4 +1,6 @@ #include +#include +#include #include #include @@ -37,6 +39,9 @@ Authentication::Digest Authentication::getPasswordDoubleSHA1() const case DOUBLE_SHA1_PASSWORD: return password_hash; + case LDAP_SERVER: + throw Exception("Cannot get password double SHA1 for user with 'LDAP_SERVER' authentication.", ErrorCodes::BAD_ARGUMENTS); + case MAX_TYPE: break; } @@ -44,7 +49,7 @@ Authentication::Digest Authentication::getPasswordDoubleSHA1() const } -bool Authentication::isCorrectPassword(const String & password_) const +bool Authentication::isCorrectPassword(const String & password_, const String & user_, const ExternalAuthenticators & external_authenticators) const { switch (type) { @@ -75,6 +80,16 @@ bool Authentication::isCorrectPassword(const String & password_) const return encodeSHA1(first_sha1) == password_hash; } + case LDAP_SERVER: + { + auto ldap_server_params = external_authenticators.getLDAPServerParams(server_name); + ldap_server_params.user = user_; + ldap_server_params.password = password_; + + LDAPSimpleAuthClient ldap_client(ldap_server_params); + return ldap_client.check(); + } + case MAX_TYPE: break; } diff --git a/src/Access/Authentication.h b/src/Access/Authentication.h index c410a101cdd..35ff0fa1d32 100644 --- a/src/Access/Authentication.h +++ b/src/Access/Authentication.h @@ -18,6 +18,7 @@ namespace ErrorCodes extern const int NOT_IMPLEMENTED; } +class ExternalAuthenticators; /// Authentication type and encrypted password for checking when an user logins. class Authentication @@ -38,6 +39,9 @@ public: /// This kind of hash is used by the `mysql_native_password` authentication plugin. DOUBLE_SHA1_PASSWORD, + /// Password is checked by a [remote] LDAP server. Connection will be made at each authentication attempt. + LDAP_SERVER, + MAX_TYPE, }; @@ -78,8 +82,14 @@ public: /// Allowed to use for Type::NO_PASSWORD, Type::PLAINTEXT_PASSWORD, Type::DOUBLE_SHA1_PASSWORD. Digest getPasswordDoubleSHA1() const; + /// Sets an external authentication server name. + /// When authentication type is LDAP_SERVER, server name is expected to be the name of a preconfigured LDAP server. + const String & getServerName() const; + void setServerName(const String & server_name_); + /// Checks if the provided password is correct. Returns false if not. - bool isCorrectPassword(const String & password) const; + /// User name and external authenticators' info are used only by some specific authentication type (e.g., LDAP_SERVER). + bool isCorrectPassword(const String & password_, const String & user_, const ExternalAuthenticators & external_authenticators) const; friend bool operator ==(const Authentication & lhs, const Authentication & rhs) { return (lhs.type == rhs.type) && (lhs.password_hash == rhs.password_hash); } friend bool operator !=(const Authentication & lhs, const Authentication & rhs) { return !(lhs == rhs); } @@ -93,6 +103,7 @@ private: Type type = Type::NO_PASSWORD; Digest password_hash; + String server_name; }; @@ -127,6 +138,11 @@ inline const Authentication::TypeInfo & Authentication::TypeInfo::get(Type type_ static const auto info = make_info("DOUBLE_SHA1_PASSWORD"); return info; } + case LDAP_SERVER: + { + static const auto info = make_info("LDAP_SERVER"); + return info; + } case MAX_TYPE: break; } throw Exception("Unknown authentication type: " + std::to_string(static_cast(type_)), ErrorCodes::LOGICAL_ERROR); @@ -176,6 +192,9 @@ inline void Authentication::setPassword(const String & password_) case DOUBLE_SHA1_PASSWORD: return setPasswordHashBinary(encodeDoubleSHA1(password_)); + case LDAP_SERVER: + throw Exception("Cannot specify password for the 'LDAP_SERVER' authentication type", ErrorCodes::LOGICAL_ERROR); + case MAX_TYPE: break; } throw Exception("setPassword(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED); @@ -200,6 +219,8 @@ inline void Authentication::setPasswordHashHex(const String & hash) inline String Authentication::getPasswordHashHex() const { + if (type == LDAP_SERVER) + throw Exception("Cannot get password of a user with the 'LDAP_SERVER' authentication type", ErrorCodes::LOGICAL_ERROR); String hex; hex.resize(password_hash.size() * 2); boost::algorithm::hex(password_hash.begin(), password_hash.end(), hex.data()); @@ -242,9 +263,22 @@ inline void Authentication::setPasswordHashBinary(const Digest & hash) return; } + case LDAP_SERVER: + throw Exception("Cannot specify password for the 'LDAP_SERVER' authentication type", ErrorCodes::LOGICAL_ERROR); + case MAX_TYPE: break; } throw Exception("setPasswordHashBinary(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED); } +inline const String & Authentication::getServerName() const +{ + return server_name; +} + +inline void Authentication::setServerName(const String & server_name_) +{ + server_name = server_name_; +} + } diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index b7b364e8369..d5e48baf110 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -293,7 +293,7 @@ bool ContextAccess::isCorrectPassword(const String & password) const std::lock_guard lock{mutex}; if (!user) return false; - return user->authentication.isCorrectPassword(password); + return user->authentication.isCorrectPassword(password, user_name, manager->getExternalAuthenticators()); } bool ContextAccess::isClientHostAllowed() const diff --git a/src/Access/ExternalAuthenticators.cpp b/src/Access/ExternalAuthenticators.cpp new file mode 100644 index 00000000000..a0c5fbf1a79 --- /dev/null +++ b/src/Access/ExternalAuthenticators.cpp @@ -0,0 +1,182 @@ +#include +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; +} + +namespace +{ + +auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const String & ldap_server_name) +{ + if (ldap_server_name.empty()) + throw Exception("LDAP server name cannot be empty", ErrorCodes::BAD_ARGUMENTS); + + LDAPServerParams params; + + const String ldap_server_config = "ldap_servers." + ldap_server_name; + + const bool has_host = config.has(ldap_server_config + ".host"); + const bool has_port = config.has(ldap_server_config + ".port"); + const bool has_auth_dn_prefix = config.has(ldap_server_config + ".auth_dn_prefix"); + const bool has_auth_dn_suffix = config.has(ldap_server_config + ".auth_dn_suffix"); + const bool has_enable_tls = config.has(ldap_server_config + ".enable_tls"); + const bool has_tls_minimum_protocol_version = config.has(ldap_server_config + ".tls_minimum_protocol_version"); + const bool has_tls_require_cert = config.has(ldap_server_config + ".tls_require_cert"); + const bool has_tls_cert_file = config.has(ldap_server_config + ".tls_cert_file"); + const bool has_tls_key_file = config.has(ldap_server_config + ".tls_key_file"); + const bool has_tls_ca_cert_file = config.has(ldap_server_config + ".tls_ca_cert_file"); + const bool has_tls_ca_cert_dir = config.has(ldap_server_config + ".tls_ca_cert_dir"); + const bool has_tls_cipher_suite = config.has(ldap_server_config + ".tls_cipher_suite"); + + if (!has_host) + throw Exception("Missing 'host' entry", ErrorCodes::BAD_ARGUMENTS); + + params.host = config.getString(ldap_server_config + ".host"); + + if (params.host.empty()) + throw Exception("Empty 'host' entry", ErrorCodes::BAD_ARGUMENTS); + + if (has_auth_dn_prefix) + params.auth_dn_prefix = config.getString(ldap_server_config + ".auth_dn_prefix"); + + if (has_auth_dn_suffix) + params.auth_dn_suffix = config.getString(ldap_server_config + ".auth_dn_suffix"); + + if (has_enable_tls) + { + String enable_tls_lc_str = config.getString(ldap_server_config + ".enable_tls"); + boost::to_lower(enable_tls_lc_str); + + if (enable_tls_lc_str == "starttls") + params.enable_tls = LDAPServerParams::TLSEnable::YES_STARTTLS; + else if (config.getBool(ldap_server_config + ".enable_tls")) + params.enable_tls = LDAPServerParams::TLSEnable::YES; + else + params.enable_tls = LDAPServerParams::TLSEnable::NO; + } + + if (has_tls_minimum_protocol_version) + { + String tls_minimum_protocol_version_lc_str = config.getString(ldap_server_config + ".tls_minimum_protocol_version"); + boost::to_lower(tls_minimum_protocol_version_lc_str); + + if (tls_minimum_protocol_version_lc_str == "ssl2") + params.tls_minimum_protocol_version = LDAPServerParams::TLSProtocolVersion::SSL2; + else if (tls_minimum_protocol_version_lc_str == "ssl3") + params.tls_minimum_protocol_version = LDAPServerParams::TLSProtocolVersion::SSL3; + else if (tls_minimum_protocol_version_lc_str == "tls1.0") + params.tls_minimum_protocol_version = LDAPServerParams::TLSProtocolVersion::TLS1_0; + else if (tls_minimum_protocol_version_lc_str == "tls1.1") + params.tls_minimum_protocol_version = LDAPServerParams::TLSProtocolVersion::TLS1_1; + else if (tls_minimum_protocol_version_lc_str == "tls1.2") + params.tls_minimum_protocol_version = LDAPServerParams::TLSProtocolVersion::TLS1_2; + else + throw Exception("Bad value for 'tls_minimum_protocol_version' entry, allowed values are: 'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2'", ErrorCodes::BAD_ARGUMENTS); + } + + if (has_tls_require_cert) + { + String tls_require_cert_lc_str = config.getString(ldap_server_config + ".tls_require_cert"); + boost::to_lower(tls_require_cert_lc_str); + + if (tls_require_cert_lc_str == "never") + params.tls_require_cert = LDAPServerParams::TLSRequireCert::NEVER; + else if (tls_require_cert_lc_str == "allow") + params.tls_require_cert = LDAPServerParams::TLSRequireCert::ALLOW; + else if (tls_require_cert_lc_str == "try") + params.tls_require_cert = LDAPServerParams::TLSRequireCert::TRY; + else if (tls_require_cert_lc_str == "demand") + params.tls_require_cert = LDAPServerParams::TLSRequireCert::DEMAND; + else + throw Exception("Bad value for 'tls_require_cert' entry, allowed values are: 'never', 'allow', 'try', 'demand'", ErrorCodes::BAD_ARGUMENTS); + } + + if (has_tls_cert_file) + params.tls_cert_file = config.getString(ldap_server_config + ".tls_cert_file"); + + if (has_tls_key_file) + params.tls_key_file = config.getString(ldap_server_config + ".tls_key_file"); + + if (has_tls_ca_cert_file) + params.tls_ca_cert_file = config.getString(ldap_server_config + ".tls_ca_cert_file"); + + if (has_tls_ca_cert_dir) + params.tls_ca_cert_dir = config.getString(ldap_server_config + ".tls_ca_cert_dir"); + + if (has_tls_cipher_suite) + params.tls_cipher_suite = config.getString(ldap_server_config + ".tls_cipher_suite"); + + if (has_port) + { + const auto port = config.getInt64(ldap_server_config + ".port"); + if (port < 0 || port > 65535) + throw Exception("Bad value for 'port' entry", ErrorCodes::BAD_ARGUMENTS); + + params.port = port; + } + else + params.port = (params.enable_tls == LDAPServerParams::TLSEnable::YES ? 636 : 389); + + return params; +} + +void parseAndAddLDAPServers(ExternalAuthenticators & external_authenticators, const Poco::Util::AbstractConfiguration & config, Poco::Logger * log) +{ + Poco::Util::AbstractConfiguration::Keys ldap_server_names; + config.keys("ldap_servers", ldap_server_names); + + for (const auto & ldap_server_name : ldap_server_names) + { + try + { + external_authenticators.setLDAPServerParams(ldap_server_name, parseLDAPServer(config, ldap_server_name)); + } + catch (...) + { + tryLogCurrentException(log, "Could not parse LDAP server " + backQuote(ldap_server_name)); + } + } +} + +} + +void ExternalAuthenticators::reset() +{ + std::scoped_lock lock(mutex); + ldap_server_params.clear(); +} + +void ExternalAuthenticators::setConfig(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log) +{ + std::scoped_lock lock(mutex); + reset(); + parseAndAddLDAPServers(*this, config, log); +} + +void ExternalAuthenticators::setLDAPServerParams(const String & server, const LDAPServerParams & params) +{ + std::scoped_lock lock(mutex); + ldap_server_params.erase(server); + ldap_server_params[server] = params; +} + +LDAPServerParams ExternalAuthenticators::getLDAPServerParams(const String & server) const +{ + std::scoped_lock lock(mutex); + auto it = ldap_server_params.find(server); + if (it == ldap_server_params.end()) + throw Exception("LDAP server '" + server + "' is not configured", ErrorCodes::BAD_ARGUMENTS); + return it->second; +} + +} diff --git a/src/Access/ExternalAuthenticators.h b/src/Access/ExternalAuthenticators.h new file mode 100644 index 00000000000..54af87604a6 --- /dev/null +++ b/src/Access/ExternalAuthenticators.h @@ -0,0 +1,39 @@ +#pragma once + +#include +#include + +#include +#include +#include + + +namespace Poco +{ + class Logger; + + namespace Util + { + class AbstractConfiguration; + } +} + + +namespace DB +{ + +class ExternalAuthenticators +{ +public: + void reset(); + void setConfig(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log); + + void setLDAPServerParams(const String & server, const LDAPServerParams & params); + LDAPServerParams getLDAPServerParams(const String & server) const; + +private: + mutable std::recursive_mutex mutex; + std::map ldap_server_params; +}; + +} diff --git a/src/Access/LDAPClient.cpp b/src/Access/LDAPClient.cpp new file mode 100644 index 00000000000..a85e96ab86c --- /dev/null +++ b/src/Access/LDAPClient.cpp @@ -0,0 +1,331 @@ +#include +#include +#include + +#include + +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; + extern const int FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME; + extern const int LDAP_ERROR; +} + +LDAPClient::LDAPClient(const LDAPServerParams & params_) + : params(params_) +{ +} + +LDAPClient::~LDAPClient() +{ + closeConnection(); +} + +void LDAPClient::openConnection() +{ + const bool graceful_bind_failure = false; + diag(openConnection(graceful_bind_failure)); +} + +#if USE_LDAP + +namespace +{ + auto escapeForLDAP(const String & src) + { + String dest; + dest.reserve(src.size() * 2); + + for (auto ch : src) + { + switch (ch) + { + case ',': + case '\\': + case '#': + case '+': + case '<': + case '>': + case ';': + case '"': + case '=': + dest += '\\'; + break; + } + dest += ch; + } + + return dest; + } +} + +void LDAPClient::diag(const int rc) +{ + if (rc != LDAP_SUCCESS) + { + String text; + const char * raw_err_str = ldap_err2string(rc); + + if (raw_err_str) + text = raw_err_str; + + if (handle) + { + String message; + char * raw_message = nullptr; + ldap_get_option(handle, LDAP_OPT_DIAGNOSTIC_MESSAGE, &raw_message); + + if (raw_message) + { + message = raw_message; + ldap_memfree(raw_message); + raw_message = nullptr; + } + + if (!message.empty()) + { + if (!text.empty()) + text += ": "; + text += message; + } + } + + throw Exception(text, ErrorCodes::LDAP_ERROR); + } +} + +int LDAPClient::openConnection(const bool graceful_bind_failure) +{ + closeConnection(); + + { + LDAPURLDesc url; + std::memset(&url, 0, sizeof(url)); + + url.lud_scheme = const_cast(params.enable_tls == LDAPServerParams::TLSEnable::YES ? "ldaps" : "ldap"); + url.lud_host = const_cast(params.host.c_str()); + url.lud_port = params.port; + url.lud_scope = LDAP_SCOPE_DEFAULT; + + auto * uri = ldap_url_desc2str(&url); + if (!uri) + throw Exception("ldap_url_desc2str() failed", ErrorCodes::LDAP_ERROR); + + SCOPE_EXIT({ ldap_memfree(uri); }); + + diag(ldap_initialize(&handle, uri)); + if (!handle) + throw Exception("ldap_initialize() failed", ErrorCodes::LDAP_ERROR); + } + + { + int value = 0; + switch (params.protocol_version) + { + case LDAPServerParams::ProtocolVersion::V2: value = LDAP_VERSION2; break; + case LDAPServerParams::ProtocolVersion::V3: value = LDAP_VERSION3; break; + } + diag(ldap_set_option(handle, LDAP_OPT_PROTOCOL_VERSION, &value)); + } + + diag(ldap_set_option(handle, LDAP_OPT_RESTART, LDAP_OPT_ON)); + +#ifdef LDAP_OPT_KEEPCONN + diag(ldap_set_option(handle, LDAP_OPT_KEEPCONN, LDAP_OPT_ON)); +#endif + +#ifdef LDAP_OPT_TIMEOUT + { + ::timeval operation_timeout; + operation_timeout.tv_sec = params.operation_timeout.count(); + operation_timeout.tv_usec = 0; + diag(ldap_set_option(handle, LDAP_OPT_TIMEOUT, &operation_timeout)); + } +#endif + +#ifdef LDAP_OPT_NETWORK_TIMEOUT + { + ::timeval network_timeout; + network_timeout.tv_sec = params.network_timeout.count(); + network_timeout.tv_usec = 0; + diag(ldap_set_option(handle, LDAP_OPT_NETWORK_TIMEOUT, &network_timeout)); + } +#endif + + { + const int search_timeout = params.search_timeout.count(); + diag(ldap_set_option(handle, LDAP_OPT_TIMELIMIT, &search_timeout)); + } + + { + const int size_limit = params.search_limit; + diag(ldap_set_option(handle, LDAP_OPT_SIZELIMIT, &size_limit)); + } + +#ifdef LDAP_OPT_X_TLS_PROTOCOL_MIN + { + int value = 0; + switch (params.tls_minimum_protocol_version) + { + case LDAPServerParams::TLSProtocolVersion::SSL2: value = LDAP_OPT_X_TLS_PROTOCOL_SSL2; break; + case LDAPServerParams::TLSProtocolVersion::SSL3: value = LDAP_OPT_X_TLS_PROTOCOL_SSL3; break; + case LDAPServerParams::TLSProtocolVersion::TLS1_0: value = LDAP_OPT_X_TLS_PROTOCOL_TLS1_0; break; + case LDAPServerParams::TLSProtocolVersion::TLS1_1: value = LDAP_OPT_X_TLS_PROTOCOL_TLS1_1; break; + case LDAPServerParams::TLSProtocolVersion::TLS1_2: value = LDAP_OPT_X_TLS_PROTOCOL_TLS1_2; break; + } + diag(ldap_set_option(handle, LDAP_OPT_X_TLS_PROTOCOL_MIN, &value)); + } +#endif + +#ifdef LDAP_OPT_X_TLS_REQUIRE_CERT + { + int value = 0; + switch (params.tls_require_cert) + { + case LDAPServerParams::TLSRequireCert::NEVER: value = LDAP_OPT_X_TLS_NEVER; break; + case LDAPServerParams::TLSRequireCert::ALLOW: value = LDAP_OPT_X_TLS_ALLOW; break; + case LDAPServerParams::TLSRequireCert::TRY: value = LDAP_OPT_X_TLS_TRY; break; + case LDAPServerParams::TLSRequireCert::DEMAND: value = LDAP_OPT_X_TLS_DEMAND; break; + } + diag(ldap_set_option(handle, LDAP_OPT_X_TLS_REQUIRE_CERT, &value)); + } +#endif + +#ifdef LDAP_OPT_X_TLS_CERTFILE + if (!params.tls_cert_file.empty()) + diag(ldap_set_option(handle, LDAP_OPT_X_TLS_CERTFILE, params.tls_cert_file.c_str())); +#endif + +#ifdef LDAP_OPT_X_TLS_KEYFILE + if (!params.tls_key_file.empty()) + diag(ldap_set_option(handle, LDAP_OPT_X_TLS_KEYFILE, params.tls_key_file.c_str())); +#endif + +#ifdef LDAP_OPT_X_TLS_CACERTFILE + if (!params.tls_ca_cert_file.empty()) + diag(ldap_set_option(handle, LDAP_OPT_X_TLS_CACERTFILE, params.tls_ca_cert_file.c_str())); +#endif + +#ifdef LDAP_OPT_X_TLS_CACERTDIR + if (!params.tls_ca_cert_dir.empty()) + diag(ldap_set_option(handle, LDAP_OPT_X_TLS_CACERTDIR, params.tls_ca_cert_dir.c_str())); +#endif + +#ifdef LDAP_OPT_X_TLS_CIPHER_SUITE + if (!params.tls_cipher_suite.empty()) + diag(ldap_set_option(handle, LDAP_OPT_X_TLS_CIPHER_SUITE, params.tls_cipher_suite.c_str())); +#endif + +#ifdef LDAP_OPT_X_TLS_NEWCTX + { + const int i_am_a_server = 0; + diag(ldap_set_option(handle, LDAP_OPT_X_TLS_NEWCTX, &i_am_a_server)); + } +#endif + + if (params.enable_tls == LDAPServerParams::TLSEnable::YES_STARTTLS) + diag(ldap_start_tls_s(handle, nullptr, nullptr)); + + int rc = LDAP_OTHER; + + switch (params.sasl_mechanism) + { + case LDAPServerParams::SASLMechanism::SIMPLE: + { + const String dn = params.auth_dn_prefix + escapeForLDAP(params.user) + params.auth_dn_suffix; + + ::berval cred; + cred.bv_val = const_cast(params.password.c_str()); + cred.bv_len = params.password.size(); + + rc = ldap_sasl_bind_s(handle, dn.c_str(), LDAP_SASL_SIMPLE, &cred, nullptr, nullptr, nullptr); + + if (!graceful_bind_failure) + diag(rc); + + break; + } + } + + return rc; +} + +void LDAPClient::closeConnection() noexcept +{ + if (!handle) + return; + + ldap_unbind_ext_s(handle, nullptr, nullptr); + handle = nullptr; +} + +bool LDAPSimpleAuthClient::check() +{ + if (params.user.empty()) + throw Exception("LDAP authentication of a user with an empty name is not allowed", ErrorCodes::BAD_ARGUMENTS); + + if (params.password.empty()) + return false; // Silently reject authentication attempt if the password is empty as if it didn't match. + + SCOPE_EXIT({ closeConnection(); }); + + const bool graceful_bind_failure = true; + const auto rc = openConnection(graceful_bind_failure); + + bool result = false; + + switch (rc) + { + case LDAP_SUCCESS: + { + result = true; + break; + } + + case LDAP_INVALID_CREDENTIALS: + { + result = false; + break; + } + + default: + { + result = false; + diag(rc); + break; + } + } + + return result; +} + +#else // USE_LDAP + +void LDAPClient::diag(const int) +{ + throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME); +} + +int LDAPClient::openConnection(const bool) +{ + throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME); +} + +void LDAPClient::closeConnection() noexcept +{ +} + +bool LDAPSimpleAuthClient::check() +{ + throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME); +} + +#endif // USE_LDAP + +} diff --git a/src/Access/LDAPClient.h b/src/Access/LDAPClient.h new file mode 100644 index 00000000000..5aad2ed3061 --- /dev/null +++ b/src/Access/LDAPClient.h @@ -0,0 +1,55 @@ +#pragma once + +#if !defined(ARCADIA_BUILD) +# include "config_core.h" +#endif + +#include +#include + +#if USE_LDAP +# include +# define MAYBE_NORETURN +#else +# define MAYBE_NORETURN [[noreturn]] +#endif + + +namespace DB +{ + +class LDAPClient +{ +public: + explicit LDAPClient(const LDAPServerParams & params_); + ~LDAPClient(); + + LDAPClient(const LDAPClient &) = delete; + LDAPClient(LDAPClient &&) = delete; + LDAPClient & operator= (const LDAPClient &) = delete; + LDAPClient & operator= (LDAPClient &&) = delete; + +protected: + MAYBE_NORETURN void diag(const int rc); + MAYBE_NORETURN void openConnection(); + int openConnection(const bool graceful_bind_failure = false); + void closeConnection() noexcept; + +protected: + const LDAPServerParams params; +#if USE_LDAP + LDAP * handle = nullptr; +#endif +}; + +class LDAPSimpleAuthClient + : private LDAPClient +{ +public: + using LDAPClient::LDAPClient; + bool check(); +}; + +} + +#undef MAYBE_NORETURN diff --git a/src/Access/LDAPParams.h b/src/Access/LDAPParams.h new file mode 100644 index 00000000000..0d7c7dd17cd --- /dev/null +++ b/src/Access/LDAPParams.h @@ -0,0 +1,76 @@ +#pragma once + +#include + +#include + + +namespace DB +{ + +struct LDAPServerParams +{ + enum class ProtocolVersion + { + V2, + V3 + }; + + enum class TLSEnable + { + NO, + YES_STARTTLS, + YES + }; + + enum class TLSProtocolVersion + { + SSL2, + SSL3, + TLS1_0, + TLS1_1, + TLS1_2 + }; + + enum class TLSRequireCert + { + NEVER, + ALLOW, + TRY, + DEMAND + }; + + enum class SASLMechanism + { + SIMPLE + }; + + ProtocolVersion protocol_version = ProtocolVersion::V3; + + String host; + std::uint16_t port = 636; + + TLSEnable enable_tls = TLSEnable::YES; + TLSProtocolVersion tls_minimum_protocol_version = TLSProtocolVersion::TLS1_2; + TLSRequireCert tls_require_cert = TLSRequireCert::DEMAND; + String tls_cert_file; + String tls_key_file; + String tls_ca_cert_file; + String tls_ca_cert_dir; + String tls_cipher_suite; + + SASLMechanism sasl_mechanism = SASLMechanism::SIMPLE; + + String auth_dn_prefix; + String auth_dn_suffix; + + String user; + String password; + + std::chrono::seconds operation_timeout{40}; + std::chrono::seconds network_timeout{30}; + std::chrono::seconds search_timeout{20}; + std::uint32_t search_limit = 100; +}; + +} diff --git a/src/Access/UsersConfigAccessStorage.cpp b/src/Access/UsersConfigAccessStorage.cpp index 4d7d1b4cdfe..e3fb9104a66 100644 --- a/src/Access/UsersConfigAccessStorage.cpp +++ b/src/Access/UsersConfigAccessStorage.cpp @@ -56,14 +56,15 @@ namespace bool has_password_plaintext = config.has(user_config + ".password"); bool has_password_sha256_hex = config.has(user_config + ".password_sha256_hex"); bool has_password_double_sha1_hex = config.has(user_config + ".password_double_sha1_hex"); + bool has_ldap = config.has(user_config + ".ldap"); - size_t num_password_fields = has_no_password + has_password_plaintext + has_password_sha256_hex + has_password_double_sha1_hex; + size_t num_password_fields = has_no_password + has_password_plaintext + has_password_sha256_hex + has_password_double_sha1_hex + has_ldap; if (num_password_fields > 1) - throw Exception("More than one field of 'password', 'password_sha256_hex', 'password_double_sha1_hex', 'no_password' are used to specify password for user " + user_name + ". Must be only one of them.", + throw Exception("More than one field of 'password', 'password_sha256_hex', 'password_double_sha1_hex', 'no_password', 'ldap' are used to specify password for user " + user_name + ". Must be only one of them.", ErrorCodes::BAD_ARGUMENTS); if (num_password_fields < 1) - throw Exception("Either 'password' or 'password_sha256_hex' or 'password_double_sha1_hex' or 'no_password' must be specified for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS); + throw Exception("Either 'password' or 'password_sha256_hex' or 'password_double_sha1_hex' or 'no_password' or 'ldap' must be specified for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS); if (has_password_plaintext) { @@ -80,6 +81,19 @@ namespace user->authentication = Authentication{Authentication::DOUBLE_SHA1_PASSWORD}; user->authentication.setPasswordHashHex(config.getString(user_config + ".password_double_sha1_hex")); } + else if (has_ldap) + { + bool has_ldap_server = config.has(user_config + ".ldap.server"); + if (!has_ldap_server) + throw Exception("Missing mandatory 'server' in 'ldap', with LDAP server name, for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS); + + const auto ldap_server_name = config.getString(user_config + ".ldap.server"); + if (ldap_server_name.empty()) + throw Exception("LDAP server name cannot be empty for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS); + + user->authentication = Authentication{Authentication::LDAP_SERVER}; + user->authentication.setServerName(ldap_server_name); + } const auto profile_name_config = user_config + ".profile"; if (config.has(profile_name_config)) diff --git a/src/Access/ya.make b/src/Access/ya.make index 77c94b87dfa..175bb86d737 100644 --- a/src/Access/ya.make +++ b/src/Access/ya.make @@ -17,9 +17,11 @@ SRCS( EnabledRolesInfo.cpp EnabledRowPolicies.cpp EnabledSettings.cpp + ExternalAuthenticators.cpp GrantedRoles.cpp IAccessEntity.cpp IAccessStorage.cpp + LDAPClient.cpp MemoryAccessStorage.cpp MultipleAccessStorage.cpp Quota.cpp diff --git a/src/AggregateFunctions/AggregateFunctionAvg.h b/src/AggregateFunctions/AggregateFunctionAvg.h index 95b4836c336..f7bc228bcdd 100644 --- a/src/AggregateFunctions/AggregateFunctionAvg.h +++ b/src/AggregateFunctions/AggregateFunctionAvg.h @@ -20,6 +20,7 @@ template struct AggregateFunctionAvgData { using NumeratorType = T; + using DenominatorType = Denominator; T numerator = 0; Denominator denominator = 0; @@ -73,13 +74,21 @@ public: void serialize(ConstAggregateDataPtr place, WriteBuffer & buf) const override { writeBinary(this->data(place).numerator, buf); - writeBinary(this->data(place).denominator, buf); + + if constexpr (std::is_unsigned_v) + writeVarUInt(this->data(place).denominator, buf); + else /// Floating point denominator type can be used + writeBinary(this->data(place).denominator, buf); } void deserialize(AggregateDataPtr place, ReadBuffer & buf, Arena *) const override { readBinary(this->data(place).numerator, buf); - readBinary(this->data(place).denominator, buf); + + if constexpr (std::is_unsigned_v) + readVarUInt(this->data(place).denominator, buf); + else /// Floating point denominator type can be used + readBinary(this->data(place).denominator, buf); } void insertResultInto(AggregateDataPtr place, IColumn & to, Arena *) const override diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index be5d3ba836d..826b5ad54de 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -332,7 +332,7 @@ if (OPENSSL_CRYPTO_LIBRARY) endif () if (USE_LDAP) - dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${OPENLDAP_INCLUDE_DIR}) + dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${OPENLDAP_INCLUDE_DIRS}) dbms_target_link_libraries (PRIVATE ${OPENLDAP_LIBRARIES}) endif () dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${SPARSEHASH_INCLUDE_DIR}) @@ -370,7 +370,9 @@ endif() target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${DOUBLE_CONVERSION_INCLUDE_DIR}) -target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${MSGPACK_INCLUDE_DIR}) +if (USE_MSGPACK) + target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${MSGPACK_INCLUDE_DIR}) +endif() if (USE_ORC) dbms_target_link_libraries(PUBLIC ${ORC_LIBRARIES}) diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index 7fcd7572039..b91004fa92f 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -498,6 +498,7 @@ namespace ErrorCodes extern const int NOT_A_LEADER = 529; extern const int CANNOT_CONNECT_RABBITMQ = 530; extern const int CANNOT_FSTAT = 531; + extern const int LDAP_ERROR = 532; extern const int KEEPER_EXCEPTION = 999; extern const int POCO_EXCEPTION = 1000; diff --git a/src/Common/FileChecker.cpp b/src/Common/FileChecker.cpp index 687b4dccca7..6cbec3bda77 100644 --- a/src/Common/FileChecker.cpp +++ b/src/Common/FileChecker.cpp @@ -12,6 +12,12 @@ namespace DB { +namespace ErrorCodes +{ + extern const int UNEXPECTED_END_OF_FILE; +} + + FileChecker::FileChecker(DiskPtr disk_, const String & file_info_path_) : disk(std::move(disk_)) { setPath(file_info_path_); @@ -24,19 +30,15 @@ void FileChecker::setPath(const String & file_info_path_) tmp_files_info_path = parentPath(files_info_path) + "tmp_" + fileName(files_info_path); } -void FileChecker::update(const String & file_path) +void FileChecker::update(const String & full_file_path) { initialize(); - updateImpl(file_path); - save(); + map[fileName(full_file_path)] = disk->getFileSize(full_file_path); } -void FileChecker::update(const Strings::const_iterator & begin, const Strings::const_iterator & end) +void FileChecker::setEmpty(const String & full_file_path) { - initialize(); - for (auto it = begin; it != end; ++it) - updateImpl(*it); - save(); + map[fileName(full_file_path)] = 0; } CheckResults FileChecker::check() const @@ -73,6 +75,28 @@ CheckResults FileChecker::check() const return results; } +void FileChecker::repair() +{ + for (const auto & name_size : map) + { + const String & name = name_size.first; + size_t expected_size = name_size.second; + String path = parentPath(files_info_path) + name; + bool exists = disk->exists(path); + auto real_size = exists ? disk->getFileSize(path) : 0; /// No race condition assuming no one else is working with these files. + + if (real_size < expected_size) + throw Exception(ErrorCodes::UNEXPECTED_END_OF_FILE, "Size of {} is less than expected. Size is {} but should be {}.", + path, real_size, expected_size); + + if (real_size > expected_size) + { + LOG_WARNING(&Poco::Logger::get("FileChecker"), "Will truncate file {} that has size {} to size {}", path, real_size, expected_size); + disk->truncateFile(path, expected_size); + } + } +} + void FileChecker::initialize() { if (initialized) @@ -82,11 +106,6 @@ void FileChecker::initialize() initialized = true; } -void FileChecker::updateImpl(const String & file_path) -{ - map[fileName(file_path)] = disk->getFileSize(file_path); -} - void FileChecker::save() const { { diff --git a/src/Common/FileChecker.h b/src/Common/FileChecker.h index 83db397e78c..015d4cadb07 100644 --- a/src/Common/FileChecker.h +++ b/src/Common/FileChecker.h @@ -14,19 +14,25 @@ class FileChecker public: FileChecker(DiskPtr disk_, const String & file_info_path_); void setPath(const String & file_info_path_); - void update(const String & file_path); - void update(const Strings::const_iterator & begin, const Strings::const_iterator & end); + + void update(const String & full_file_path); + void setEmpty(const String & full_file_path); + void save() const; /// Check the files whose parameters are specified in sizes.json CheckResults check() const; + /// Truncate files that have excessive size to the expected size. + /// Throw exception if the file size is less than expected. + /// The purpose of this function is to rollback a group of unfinished writes. + void repair(); + private: /// File name -> size. using Map = std::map; void initialize(); void updateImpl(const String & file_path); - void save() const; void load(Map & local_map, const String & path) const; DiskPtr disk; diff --git a/src/Common/MemoryStatisticsOS.cpp b/src/Common/MemoryStatisticsOS.cpp index 6082d23cbd0..7c65461aeb6 100644 --- a/src/Common/MemoryStatisticsOS.cpp +++ b/src/Common/MemoryStatisticsOS.cpp @@ -1,3 +1,5 @@ +#if defined(OS_LINUX) + #include #include #include @@ -101,3 +103,5 @@ MemoryStatisticsOS::Data MemoryStatisticsOS::get() const } } + +#endif diff --git a/src/Common/MemoryStatisticsOS.h b/src/Common/MemoryStatisticsOS.h index 97caf4e8fbe..1661b62711f 100644 --- a/src/Common/MemoryStatisticsOS.h +++ b/src/Common/MemoryStatisticsOS.h @@ -1,4 +1,5 @@ #pragma once +#if defined(OS_LINUX) #include @@ -38,3 +39,5 @@ private: }; } + +#endif diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index 5c88b2ee849..7adf82570ba 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -212,6 +212,21 @@ M(NotCreatedLogEntryForMerge, "Log entry to merge parts in ReplicatedMergeTree is not created due to concurrent log update by another replica.") \ M(CreatedLogEntryForMutation, "Successfully created log entry to mutate parts in ReplicatedMergeTree.") \ M(NotCreatedLogEntryForMutation, "Log entry to mutate parts in ReplicatedMergeTree is not created due to concurrent log update by another replica.") \ + \ + M(S3ReadMicroseconds, "Time of GET and HEAD requests to S3 storage.") \ + M(S3ReadBytes, "Read bytes (incoming) in GET and HEAD requests to S3 storage.") \ + M(S3ReadRequestsCount, "Number of GET and HEAD requests to S3 storage.") \ + M(S3ReadRequestsErrors, "Number of non-throttling errors in GET and HEAD requests to S3 storage.") \ + M(S3ReadRequestsThrottling, "Number of 429 and 503 errors in GET and HEAD requests to S3 storage.") \ + M(S3ReadRequestsRedirects, "Number of redirects in GET and HEAD requests to S3 storage.") \ + \ + M(S3WriteMicroseconds, "Time of POST, DELETE, PUT and PATCH requests to S3 storage.") \ + M(S3WriteBytes, "Write bytes (outgoing) in POST, DELETE, PUT and PATCH requests to S3 storage.") \ + M(S3WriteRequestsCount, "Number of POST, DELETE, PUT and PATCH requests to S3 storage.") \ + M(S3WriteRequestsErrors, "Number of non-throttling errors in POST, DELETE, PUT and PATCH requests to S3 storage.") \ + M(S3WriteRequestsThrottling, "Number of 429 and 503 errors in POST, DELETE, PUT and PATCH requests to S3 storage.") \ + M(S3WriteRequestsRedirects, "Number of redirects in POST, DELETE, PUT and PATCH requests to S3 storage.") \ + namespace ProfileEvents { diff --git a/src/Common/ZooKeeper/TestKeeper.cpp b/src/Common/ZooKeeper/TestKeeper.cpp index a734d218ff6..1b203d92fb8 100644 --- a/src/Common/ZooKeeper/TestKeeper.cpp +++ b/src/Common/ZooKeeper/TestKeeper.cpp @@ -5,6 +5,7 @@ #include #include +#include namespace Coordination @@ -25,11 +26,14 @@ static String baseName(const String & path) } +using Undo = std::function; + + struct TestKeeperRequest : virtual Request { virtual bool isMutable() const { return false; } virtual ResponsePtr createResponse() const = 0; - virtual ResponsePtr process(TestKeeper::Container & container, int64_t zxid) const = 0; + virtual std::pair process(TestKeeper::Container & container, int64_t zxid) const = 0; virtual void processWatches(TestKeeper::Watches & /*watches*/, TestKeeper::Watches & /*list_watches*/) const {} }; @@ -69,7 +73,7 @@ struct TestKeeperCreateRequest final : CreateRequest, TestKeeperRequest TestKeeperCreateRequest() = default; explicit TestKeeperCreateRequest(const CreateRequest & base) : CreateRequest(base) {} ResponsePtr createResponse() const override; - ResponsePtr process(TestKeeper::Container & container, int64_t zxid) const override; + std::pair process(TestKeeper::Container & container, int64_t zxid) const override; void processWatches(TestKeeper::Watches & node_watches, TestKeeper::Watches & list_watches) const override { @@ -83,7 +87,7 @@ struct TestKeeperRemoveRequest final : RemoveRequest, TestKeeperRequest explicit TestKeeperRemoveRequest(const RemoveRequest & base) : RemoveRequest(base) {} bool isMutable() const override { return true; } ResponsePtr createResponse() const override; - ResponsePtr process(TestKeeper::Container & container, int64_t zxid) const override; + std::pair process(TestKeeper::Container & container, int64_t zxid) const override; void processWatches(TestKeeper::Watches & node_watches, TestKeeper::Watches & list_watches) const override { @@ -94,14 +98,14 @@ struct TestKeeperRemoveRequest final : RemoveRequest, TestKeeperRequest struct TestKeeperExistsRequest final : ExistsRequest, TestKeeperRequest { ResponsePtr createResponse() const override; - ResponsePtr process(TestKeeper::Container & container, int64_t zxid) const override; + std::pair process(TestKeeper::Container & container, int64_t zxid) const override; }; struct TestKeeperGetRequest final : GetRequest, TestKeeperRequest { TestKeeperGetRequest() = default; ResponsePtr createResponse() const override; - ResponsePtr process(TestKeeper::Container & container, int64_t zxid) const override; + std::pair process(TestKeeper::Container & container, int64_t zxid) const override; }; struct TestKeeperSetRequest final : SetRequest, TestKeeperRequest @@ -110,7 +114,7 @@ struct TestKeeperSetRequest final : SetRequest, TestKeeperRequest explicit TestKeeperSetRequest(const SetRequest & base) : SetRequest(base) {} bool isMutable() const override { return true; } ResponsePtr createResponse() const override; - ResponsePtr process(TestKeeper::Container & container, int64_t zxid) const override; + std::pair process(TestKeeper::Container & container, int64_t zxid) const override; void processWatches(TestKeeper::Watches & node_watches, TestKeeper::Watches & list_watches) const override { @@ -121,7 +125,7 @@ struct TestKeeperSetRequest final : SetRequest, TestKeeperRequest struct TestKeeperListRequest final : ListRequest, TestKeeperRequest { ResponsePtr createResponse() const override; - ResponsePtr process(TestKeeper::Container & container, int64_t zxid) const override; + std::pair process(TestKeeper::Container & container, int64_t zxid) const override; }; struct TestKeeperCheckRequest final : CheckRequest, TestKeeperRequest @@ -129,7 +133,7 @@ struct TestKeeperCheckRequest final : CheckRequest, TestKeeperRequest TestKeeperCheckRequest() = default; explicit TestKeeperCheckRequest(const CheckRequest & base) : CheckRequest(base) {} ResponsePtr createResponse() const override; - ResponsePtr process(TestKeeper::Container & container, int64_t zxid) const override; + std::pair process(TestKeeper::Container & container, int64_t zxid) const override; }; struct TestKeeperMultiRequest final : MultiRequest, TestKeeperRequest @@ -169,13 +173,15 @@ struct TestKeeperMultiRequest final : MultiRequest, TestKeeperRequest } ResponsePtr createResponse() const override; - ResponsePtr process(TestKeeper::Container & container, int64_t zxid) const override; + std::pair process(TestKeeper::Container & container, int64_t zxid) const override; }; -ResponsePtr TestKeeperCreateRequest::process(TestKeeper::Container & container, int64_t zxid) const +std::pair TestKeeperCreateRequest::process(TestKeeper::Container & container, int64_t zxid) const { CreateResponse response; + Undo undo; + if (container.count(path)) { response.error = Error::ZNODEEXISTS; @@ -219,7 +225,18 @@ ResponsePtr TestKeeperCreateRequest::process(TestKeeper::Container & container, } response.path_created = path_created; - container.emplace(std::move(path_created), std::move(created_node)); + container.emplace(path_created, std::move(created_node)); + + undo = [&container, path_created, is_sequential = is_sequential, parent_path = it->first] + { + container.erase(path_created); + auto & undo_parent = container.at(parent_path); + --undo_parent.stat.cversion; + --undo_parent.stat.numChildren; + + if (is_sequential) + --undo_parent.seq_num; + }; ++it->second.stat.cversion; ++it->second.stat.numChildren; @@ -228,12 +245,13 @@ ResponsePtr TestKeeperCreateRequest::process(TestKeeper::Container & container, } } - return std::make_shared(response); + return { std::make_shared(response), undo }; } -ResponsePtr TestKeeperRemoveRequest::process(TestKeeper::Container & container, int64_t) const +std::pair TestKeeperRemoveRequest::process(TestKeeper::Container & container, int64_t) const { RemoveResponse response; + Undo undo; auto it = container.find(path); if (it == container.end()) @@ -250,17 +268,26 @@ ResponsePtr TestKeeperRemoveRequest::process(TestKeeper::Container & container, } else { + auto prev_node = it->second; container.erase(it); auto & parent = container.at(parentPath(path)); --parent.stat.numChildren; ++parent.stat.cversion; response.error = Error::ZOK; + + undo = [prev_node, &container, path = path] + { + container.emplace(path, prev_node); + auto & undo_parent = container.at(parentPath(path)); + ++undo_parent.stat.numChildren; + --undo_parent.stat.cversion; + }; } - return std::make_shared(response); + return { std::make_shared(response), undo }; } -ResponsePtr TestKeeperExistsRequest::process(TestKeeper::Container & container, int64_t) const +std::pair TestKeeperExistsRequest::process(TestKeeper::Container & container, int64_t) const { ExistsResponse response; @@ -275,10 +302,10 @@ ResponsePtr TestKeeperExistsRequest::process(TestKeeper::Container & container, response.error = Error::ZNONODE; } - return std::make_shared(response); + return { std::make_shared(response), {} }; } -ResponsePtr TestKeeperGetRequest::process(TestKeeper::Container & container, int64_t) const +std::pair TestKeeperGetRequest::process(TestKeeper::Container & container, int64_t) const { GetResponse response; @@ -294,12 +321,13 @@ ResponsePtr TestKeeperGetRequest::process(TestKeeper::Container & container, int response.error = Error::ZOK; } - return std::make_shared(response); + return { std::make_shared(response), {} }; } -ResponsePtr TestKeeperSetRequest::process(TestKeeper::Container & container, int64_t zxid) const +std::pair TestKeeperSetRequest::process(TestKeeper::Container & container, int64_t zxid) const { SetResponse response; + Undo undo; auto it = container.find(path); if (it == container.end()) @@ -308,6 +336,8 @@ ResponsePtr TestKeeperSetRequest::process(TestKeeper::Container & container, int } else if (version == -1 || version == it->second.stat.version) { + auto prev_node = it->second; + it->second.data = data; ++it->second.stat.version; it->second.stat.mzxid = zxid; @@ -316,16 +346,22 @@ ResponsePtr TestKeeperSetRequest::process(TestKeeper::Container & container, int ++container.at(parentPath(path)).stat.cversion; response.stat = it->second.stat; response.error = Error::ZOK; + + undo = [prev_node, &container, path = path] + { + container.at(path) = prev_node; + --container.at(parentPath(path)).stat.cversion; + }; } else { response.error = Error::ZBADVERSION; } - return std::make_shared(response); + return { std::make_shared(response), undo }; } -ResponsePtr TestKeeperListRequest::process(TestKeeper::Container & container, int64_t) const +std::pair TestKeeperListRequest::process(TestKeeper::Container & container, int64_t) const { ListResponse response; @@ -344,18 +380,22 @@ ResponsePtr TestKeeperListRequest::process(TestKeeper::Container & container, in path_prefix += '/'; /// Fairly inefficient. - for (auto child_it = container.upper_bound(path_prefix); child_it != container.end() && startsWith(child_it->first, path_prefix); ++child_it) + for (auto child_it = container.upper_bound(path_prefix); + child_it != container.end() && startsWith(child_it->first, path_prefix); + ++child_it) + { if (parentPath(child_it->first) == path) response.names.emplace_back(baseName(child_it->first)); + } response.stat = it->second.stat; response.error = Error::ZOK; } - return std::make_shared(response); + return { std::make_shared(response), {} }; } -ResponsePtr TestKeeperCheckRequest::process(TestKeeper::Container & container, int64_t) const +std::pair TestKeeperCheckRequest::process(TestKeeper::Container & container, int64_t) const { CheckResponse response; auto it = container.find(path); @@ -372,38 +412,44 @@ ResponsePtr TestKeeperCheckRequest::process(TestKeeper::Container & container, i response.error = Error::ZOK; } - return std::make_shared(response); + return { std::make_shared(response), {} }; } -ResponsePtr TestKeeperMultiRequest::process(TestKeeper::Container & container, int64_t zxid) const +std::pair TestKeeperMultiRequest::process(TestKeeper::Container & container, int64_t zxid) const { MultiResponse response; response.responses.reserve(requests.size()); - - /// Fairly inefficient. - auto container_copy = container; + std::vector undo_actions; try { for (const auto & request : requests) { const TestKeeperRequest & concrete_request = dynamic_cast(*request); - auto cur_response = concrete_request.process(container, zxid); + auto [ cur_response, undo_action ] = concrete_request.process(container, zxid); response.responses.emplace_back(cur_response); if (cur_response->error != Error::ZOK) { response.error = cur_response->error; - container = container_copy; - return std::make_shared(response); + + for (auto it = undo_actions.rbegin(); it != undo_actions.rend(); ++it) + if (*it) + (*it)(); + + return { std::make_shared(response), {} }; } + else + undo_actions.emplace_back(std::move(undo_action)); } response.error = Error::ZOK; - return std::make_shared(response); + return { std::make_shared(response), {} }; } catch (...) { - container = container_copy; + for (auto it = undo_actions.rbegin(); it != undo_actions.rend(); ++it) + if (*it) + (*it)(); throw; } } @@ -476,7 +522,7 @@ void TestKeeper::processingThread() ++zxid; info.request->addRootPath(root_path); - ResponsePtr response = info.request->process(container, zxid); + auto [response, _] = info.request->process(container, zxid); if (response->error == Error::ZOK) info.request->processWatches(watches, list_watches); diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 8a40868e954..5a53186715c 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -372,6 +372,7 @@ struct Settings : public SettingsCollection M(SettingBool, optimize_duplicate_order_by_and_distinct, true, "Remove duplicate ORDER BY and DISTINCT if it's possible", 0) \ M(SettingBool, optimize_redundant_functions_in_order_by, true, "Remove functions from ORDER BY if its argument is also in ORDER BY", 0) \ M(SettingBool, optimize_if_chain_to_multiif, false, "Replace if(cond1, then1, if(cond2, ...)) chains to multiIf. Currently it's not beneficial for numeric types.", 0) \ + M(SettingBool, optimize_monotonous_functions_in_order_by, true, "Replace monotonous function with its argument in ORDER BY", 0) \ M(SettingBool, allow_experimental_alter_materialized_view_structure, false, "Allow atomic alter on Materialized views. Work in progress.", 0) \ M(SettingBool, enable_early_constant_folding, true, "Enable query optimization where we analyze function and subqueries results and rewrite query if there're constants there", 0) \ \ diff --git a/src/Core/config_core.h.in b/src/Core/config_core.h.in index 5991c12a1f2..bf52bf59975 100644 --- a/src/Core/config_core.h.in +++ b/src/Core/config_core.h.in @@ -10,3 +10,4 @@ #cmakedefine01 USE_INTERNAL_LLVM_LIBRARY #cmakedefine01 USE_SSL #cmakedefine01 USE_OPENCL +#cmakedefine01 USE_LDAP diff --git a/src/Dictionaries/SSDCacheDictionary.cpp b/src/Dictionaries/SSDCacheDictionary.cpp index 7b037f00ea5..1ed9bbf21d0 100644 --- a/src/Dictionaries/SSDCacheDictionary.cpp +++ b/src/Dictionaries/SSDCacheDictionary.cpp @@ -1,4 +1,4 @@ -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) #include "SSDCacheDictionary.h" diff --git a/src/Dictionaries/SSDComplexKeyCacheDictionary.cpp b/src/Dictionaries/SSDComplexKeyCacheDictionary.cpp index bed68c73900..826a61f7312 100644 --- a/src/Dictionaries/SSDComplexKeyCacheDictionary.cpp +++ b/src/Dictionaries/SSDComplexKeyCacheDictionary.cpp @@ -1,4 +1,4 @@ -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) #include "SSDComplexKeyCacheDictionary.h" diff --git a/src/Dictionaries/SSDComplexKeyCacheDictionary.h b/src/Dictionaries/SSDComplexKeyCacheDictionary.h index 665e2a17ed1..89e88982eee 100644 --- a/src/Dictionaries/SSDComplexKeyCacheDictionary.h +++ b/src/Dictionaries/SSDComplexKeyCacheDictionary.h @@ -1,6 +1,6 @@ #pragma once -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) #include "DictionaryStructure.h" #include "IDictionary.h" diff --git a/src/Dictionaries/registerDictionaries.cpp b/src/Dictionaries/registerDictionaries.cpp index 1d3b48b21a9..b5e98db05b4 100644 --- a/src/Dictionaries/registerDictionaries.cpp +++ b/src/Dictionaries/registerDictionaries.cpp @@ -33,7 +33,7 @@ void registerDictionaries() registerDictionaryFlat(factory); registerDictionaryHashed(factory); registerDictionaryCache(factory); -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) registerDictionarySSDCache(factory); registerDictionarySSDComplexKeyCache(factory); #endif diff --git a/src/Disks/DiskLocal.cpp b/src/Disks/DiskLocal.cpp index 68f5ee99a7a..f9e988211da 100644 --- a/src/Disks/DiskLocal.cpp +++ b/src/Disks/DiskLocal.cpp @@ -19,6 +19,7 @@ namespace ErrorCodes extern const int EXCESSIVE_ELEMENT_IN_CONFIG; extern const int PATH_ACCESS_DENIED; extern const int INCORRECT_DISK_INDEX; + extern const int CANNOT_TRUNCATE_FILE; } std::mutex DiskLocal::reservation_mutex; @@ -261,6 +262,13 @@ void DiskLocal::createHardLink(const String & src_path, const String & dst_path) DB::createHardLink(disk_path + src_path, disk_path + dst_path); } +void DiskLocal::truncateFile(const String & path, size_t size) +{ + int res = truncate((disk_path + path).c_str(), size); + if (-1 == res) + throwFromErrnoWithPath("Cannot truncate file " + path, path, ErrorCodes::CANNOT_TRUNCATE_FILE); +} + void DiskLocal::createFile(const String & path) { Poco::File(disk_path + path).createFile(); diff --git a/src/Disks/DiskLocal.h b/src/Disks/DiskLocal.h index 3dab4614d5d..71c4dc0aec9 100644 --- a/src/Disks/DiskLocal.h +++ b/src/Disks/DiskLocal.h @@ -99,6 +99,8 @@ public: void createHardLink(const String & src_path, const String & dst_path) override; + void truncateFile(const String & path, size_t size) override; + const String getType() const override { return "local"; } private: diff --git a/src/Disks/DiskMemory.cpp b/src/Disks/DiskMemory.cpp index 3e43d159ba5..96d9e22c414 100644 --- a/src/Disks/DiskMemory.cpp +++ b/src/Disks/DiskMemory.cpp @@ -408,6 +408,17 @@ void DiskMemory::setReadOnly(const String &) throw Exception("Method setReadOnly is not implemented for memory disks", ErrorCodes::NOT_IMPLEMENTED); } +void DiskMemory::truncateFile(const String & path, size_t size) +{ + std::lock_guard lock(mutex); + + auto file_it = files.find(path); + if (file_it == files.end()) + throw Exception("File '" + path + "' doesn't exist", ErrorCodes::FILE_DOESNT_EXIST); + + file_it->second.data.resize(size); +} + using DiskMemoryPtr = std::shared_ptr; diff --git a/src/Disks/DiskMemory.h b/src/Disks/DiskMemory.h index f7948019fe8..fc265ddef03 100644 --- a/src/Disks/DiskMemory.h +++ b/src/Disks/DiskMemory.h @@ -90,6 +90,8 @@ public: void createHardLink(const String & src_path, const String & dst_path) override; + void truncateFile(const String & path, size_t size) override; + const String getType() const override { return "memory"; } private: diff --git a/src/Disks/IDisk.cpp b/src/Disks/IDisk.cpp index 837ddf1b6b2..9d7424d1286 100644 --- a/src/Disks/IDisk.cpp +++ b/src/Disks/IDisk.cpp @@ -8,6 +8,11 @@ namespace DB { +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + bool IDisk::isDirectoryEmpty(const String & path) { return !iterateDirectory(path)->isValid(); @@ -42,4 +47,9 @@ void IDisk::copy(const String & from_path, const std::shared_ptr & to_dis } } +void IDisk::truncateFile(const String &, size_t) +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Truncate operation is not implemented for disk of type {}", getType()); +} + } diff --git a/src/Disks/IDisk.h b/src/Disks/IDisk.h index 77a52a7a5d6..0a977feb9a1 100644 --- a/src/Disks/IDisk.h +++ b/src/Disks/IDisk.h @@ -172,6 +172,9 @@ public: /// Create hardlink from `src_path` to `dst_path`. virtual void createHardLink(const String & src_path, const String & dst_path) = 0; + /// Truncate file to specified size. + virtual void truncateFile(const String & path, size_t size); + /// Return disk type - "local", "s3", etc. virtual const String getType() const = 0; }; diff --git a/src/Formats/config_formats.h.in b/src/Formats/config_formats.h.in index 77556b91c0c..f6497b4830b 100644 --- a/src/Formats/config_formats.h.in +++ b/src/Formats/config_formats.h.in @@ -9,3 +9,5 @@ #cmakedefine01 USE_ORC #cmakedefine01 USE_ARROW #cmakedefine01 USE_PROTOBUF +#cmakedefine01 USE_MSGPACK + diff --git a/src/Functions/FunctionsExternalDictionaries.h b/src/Functions/FunctionsExternalDictionaries.h index 0ba777193c9..2b69716102a 100644 --- a/src/Functions/FunctionsExternalDictionaries.h +++ b/src/Functions/FunctionsExternalDictionaries.h @@ -29,7 +29,7 @@ #include #include #include -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) #include #include #endif @@ -183,13 +183,13 @@ private: !executeDispatchSimple(block, arguments, result, dict) && !executeDispatchSimple(block, arguments, result, dict) && !executeDispatchSimple(block, arguments, result, dict) && -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) !executeDispatchSimple(block, arguments, result, dict) && #endif !executeDispatchComplex(block, arguments, result, dict) && !executeDispatchComplex(block, arguments, result, dict) && !executeDispatchComplex(block, arguments, result, dict) && -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) !executeDispatchComplex(block, arguments, result, dict) && #endif #if !defined(ARCADIA_BUILD) @@ -339,13 +339,13 @@ private: !executeDispatch(block, arguments, result, dict) && !executeDispatch(block, arguments, result, dict) && !executeDispatch(block, arguments, result, dict) && -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) !executeDispatch(block, arguments, result, dict) && #endif !executeDispatchComplex(block, arguments, result, dict) && !executeDispatchComplex(block, arguments, result, dict) && !executeDispatchComplex(block, arguments, result, dict) && -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) !executeDispatchComplex(block, arguments, result, dict) && #endif #if !defined(ARCADIA_BUILD) @@ -523,13 +523,13 @@ private: !executeDispatch(block, arguments, result, dict) && !executeDispatch(block, arguments, result, dict) && !executeDispatch(block, arguments, result, dict) && -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) !executeDispatch(block, arguments, result, dict) && #endif !executeDispatchComplex(block, arguments, result, dict) && !executeDispatchComplex(block, arguments, result, dict) && !executeDispatchComplex(block, arguments, result, dict) && -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) !executeDispatchComplex(block, arguments, result, dict) && #endif #if !defined(ARCADIA_BUILD) @@ -863,13 +863,13 @@ private: !executeDispatch(block, arguments, result, dict) && !executeDispatch(block, arguments, result, dict) && !executeDispatch(block, arguments, result, dict) && -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) !executeDispatch(block, arguments, result, dict) && #endif !executeDispatchComplex(block, arguments, result, dict) && !executeDispatchComplex(block, arguments, result, dict) && !executeDispatchComplex(block, arguments, result, dict) && -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) !executeDispatchComplex(block, arguments, result, dict) && #endif #if !defined(ARCADIA_BUILD) @@ -1124,13 +1124,13 @@ private: !executeDispatch(block, arguments, result, dict) && !executeDispatch(block, arguments, result, dict) && !executeDispatch(block, arguments, result, dict) && -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) !executeDispatch(block, arguments, result, dict) && #endif !executeDispatchComplex(block, arguments, result, dict) && !executeDispatchComplex(block, arguments, result, dict) && !executeDispatchComplex(block, arguments, result, dict) && -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) !executeDispatchComplex(block, arguments, result, dict) && #endif #if !defined(ARCADIA_BUILD) diff --git a/src/IO/AIOContextPool.cpp b/src/IO/AIOContextPool.cpp index a06d76e86a2..eb9ee58b9e4 100644 --- a/src/IO/AIOContextPool.cpp +++ b/src/IO/AIOContextPool.cpp @@ -1,4 +1,4 @@ -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) #include #include diff --git a/src/IO/AIOContextPool.h b/src/IO/AIOContextPool.h index 15160bc7fb6..9f4047939f4 100644 --- a/src/IO/AIOContextPool.h +++ b/src/IO/AIOContextPool.h @@ -1,6 +1,6 @@ #pragma once -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) #include #include diff --git a/src/IO/BitHelpers.h b/src/IO/BitHelpers.h index 35fe630029c..05eac24f1b0 100644 --- a/src/IO/BitHelpers.h +++ b/src/IO/BitHelpers.h @@ -7,7 +7,7 @@ #include #include -#if defined(__OpenBSD__) || defined(__FreeBSD__) +#if defined(__OpenBSD__) || defined(__FreeBSD__) || defined (__ANDROID__) # include #elif defined(__APPLE__) # include diff --git a/src/IO/ReadBufferAIO.cpp b/src/IO/ReadBufferAIO.cpp index 8b01b67c0c0..abf55021cfb 100644 --- a/src/IO/ReadBufferAIO.cpp +++ b/src/IO/ReadBufferAIO.cpp @@ -1,4 +1,4 @@ -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) #include #include diff --git a/src/IO/ReadBufferAIO.h b/src/IO/ReadBufferAIO.h index 5b2cf247a45..d476865747d 100644 --- a/src/IO/ReadBufferAIO.h +++ b/src/IO/ReadBufferAIO.h @@ -1,6 +1,6 @@ #pragma once -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) #include #include diff --git a/src/IO/ReadBufferFromS3.cpp b/src/IO/ReadBufferFromS3.cpp index 472607d0e21..fd07a7f309a 100644 --- a/src/IO/ReadBufferFromS3.cpp +++ b/src/IO/ReadBufferFromS3.cpp @@ -4,6 +4,7 @@ # include # include +# include # include # include @@ -11,6 +12,12 @@ # include +namespace ProfileEvents +{ + extern const Event S3ReadMicroseconds; + extern const Event S3ReadBytes; +} + namespace DB { namespace ErrorCodes @@ -27,6 +34,7 @@ ReadBufferFromS3::ReadBufferFromS3( { } + bool ReadBufferFromS3::nextImpl() { if (!initialized) @@ -35,9 +43,17 @@ bool ReadBufferFromS3::nextImpl() initialized = true; } - if (!impl->next()) + Stopwatch watch; + auto res = impl->next(); + watch.stop(); + ProfileEvents::increment(ProfileEvents::S3ReadMicroseconds, watch.elapsedMicroseconds()); + + if (!res) return false; internal_buffer = impl->buffer(); + + ProfileEvents::increment(ProfileEvents::S3ReadBytes, internal_buffer.size()); + working_buffer = internal_buffer; return true; } diff --git a/src/IO/S3/PocoHTTPClient.cpp b/src/IO/S3/PocoHTTPClient.cpp index 2a15e3f1c14..efa402c9447 100644 --- a/src/IO/S3/PocoHTTPClient.cpp +++ b/src/IO/S3/PocoHTTPClient.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -14,8 +15,24 @@ #include #include +namespace ProfileEvents +{ + extern const Event S3ReadMicroseconds; + extern const Event S3ReadRequestsCount; + extern const Event S3ReadRequestsErrors; + extern const Event S3ReadRequestsThrottling; + extern const Event S3ReadRequestsRedirects; + + extern const Event S3WriteMicroseconds; + extern const Event S3WriteRequestsCount; + extern const Event S3WriteRequestsErrors; + extern const Event S3WriteRequestsThrottling; + extern const Event S3WriteRequestsRedirects; +} + namespace DB::ErrorCodes { + extern const int NOT_IMPLEMENTED; extern const int TOO_MANY_REDIRECTS; } @@ -62,6 +79,46 @@ void PocoHTTPClient::MakeRequestInternal( auto uri = request.GetUri().GetURIString(); LOG_DEBUG(log, "Make request to: {}", uri); + enum class S3MetricType + { + Microseconds, + Count, + Errors, + Throttling, + Redirects, + + EnumSize, + }; + + auto selectMetric = [&request](S3MetricType type) + { + const ProfileEvents::Event events_map[][2] = { + {ProfileEvents::S3ReadMicroseconds, ProfileEvents::S3WriteMicroseconds}, + {ProfileEvents::S3ReadRequestsCount, ProfileEvents::S3WriteRequestsCount}, + {ProfileEvents::S3ReadRequestsErrors, ProfileEvents::S3WriteRequestsErrors}, + {ProfileEvents::S3ReadRequestsThrottling, ProfileEvents::S3WriteRequestsThrottling}, + {ProfileEvents::S3ReadRequestsRedirects, ProfileEvents::S3WriteRequestsRedirects}, + }; + + static_assert((sizeof(events_map) / sizeof(events_map[0])) == static_cast(S3MetricType::EnumSize)); + + switch (request.GetMethod()) + { + case Aws::Http::HttpMethod::HTTP_GET: + case Aws::Http::HttpMethod::HTTP_HEAD: + return events_map[static_cast(type)][0]; // Read + case Aws::Http::HttpMethod::HTTP_POST: + case Aws::Http::HttpMethod::HTTP_DELETE: + case Aws::Http::HttpMethod::HTTP_PUT: + case Aws::Http::HttpMethod::HTTP_PATCH: + return events_map[static_cast(type)][1]; // Write + } + + throw Exception("Unsupported request method", ErrorCodes::NOT_IMPLEMENTED); + }; + + ProfileEvents::increment(selectMetric(S3MetricType::Count)); + const int MAX_REDIRECT_ATTEMPTS = 10; try { @@ -112,11 +169,15 @@ void PocoHTTPClient::MakeRequestInternal( poco_request.set(header_name, header_value); Poco::Net::HTTPResponse poco_response; + + Stopwatch watch; + auto & request_body_stream = session->sendRequest(poco_request); if (request.GetContentBody()) { LOG_TRACE(log, "Writing request body."); + if (attempt > 0) /// rewind content body buffer. { request.GetContentBody()->clear(); @@ -129,6 +190,9 @@ void PocoHTTPClient::MakeRequestInternal( LOG_TRACE(log, "Receiving response..."); auto & response_body_stream = session->receiveResponse(poco_response); + watch.stop(); + ProfileEvents::increment(selectMetric(S3MetricType::Microseconds), watch.elapsedMicroseconds()); + int status_code = static_cast(poco_response.getStatus()); LOG_DEBUG(log, "Response status: {}, {}", status_code, poco_response.getReason()); @@ -138,6 +202,8 @@ void PocoHTTPClient::MakeRequestInternal( uri = location; LOG_DEBUG(log, "Redirecting request to new location: {}", location); + ProfileEvents::increment(selectMetric(S3MetricType::Redirects)); + continue; } @@ -159,6 +225,15 @@ void PocoHTTPClient::MakeRequestInternal( response->SetClientErrorType(Aws::Client::CoreErrors::NETWORK_CONNECTION); response->SetClientErrorMessage(error_message); + + if (status_code == 429 || status_code == 503) + { // API throttling + ProfileEvents::increment(selectMetric(S3MetricType::Throttling)); + } + else + { + ProfileEvents::increment(selectMetric(S3MetricType::Errors)); + } } else response->GetResponseStream().SetUnderlyingStream(std::make_shared(session, response_body_stream)); @@ -173,6 +248,8 @@ void PocoHTTPClient::MakeRequestInternal( tryLogCurrentException(log, fmt::format("Failed to make request to: {}", uri)); response->SetClientErrorType(Aws::Client::CoreErrors::NETWORK_CONNECTION); response->SetClientErrorMessage(getCurrentExceptionMessage(false)); + + ProfileEvents::increment(selectMetric(S3MetricType::Errors)); } } } diff --git a/src/IO/S3Common.cpp b/src/IO/S3Common.cpp index 20ff38150eb..f5296b65a0d 100644 --- a/src/IO/S3Common.cpp +++ b/src/IO/S3Common.cpp @@ -22,6 +22,12 @@ namespace { + +const char * S3_LOGGER_TAG_NAMES[][2] = { + {"AWSClient", "AWSClient"}, + {"AWSAuthV4Signer", "AWSClient (AWSAuthV4Signer)"}, +}; + const std::pair & convertLogLevel(Aws::Utils::Logging::LogLevel log_level) { static const std::unordered_map> mapping = @@ -40,26 +46,46 @@ const std::pair & convertLogLevel(Aws::U class AWSLogger final : public Aws::Utils::Logging::LogSystemInterface { public: + AWSLogger() + { + for (auto [tag, name] : S3_LOGGER_TAG_NAMES) + tag_loggers[tag] = &Poco::Logger::get(name); + + default_logger = tag_loggers[S3_LOGGER_TAG_NAMES[0][0]]; + } + ~AWSLogger() final = default; Aws::Utils::Logging::LogLevel GetLogLevel() const final { return Aws::Utils::Logging::LogLevel::Trace; } void Log(Aws::Utils::Logging::LogLevel log_level, const char * tag, const char * format_str, ...) final // NOLINT { - const auto & [level, prio] = convertLogLevel(log_level); - LOG_IMPL(log, level, prio, "{}: {}", tag, format_str); + callLogImpl(log_level, tag, format_str); /// FIXME. Variadic arguments? } void LogStream(Aws::Utils::Logging::LogLevel log_level, const char * tag, const Aws::OStringStream & message_stream) final + { + callLogImpl(log_level, tag, message_stream.str().c_str()); + } + + void callLogImpl(Aws::Utils::Logging::LogLevel log_level, const char * tag, const char * message) { const auto & [level, prio] = convertLogLevel(log_level); - LOG_IMPL(log, level, prio, "{}: {}", tag, message_stream.str()); + if (tag_loggers.count(tag) > 0) + { + LOG_IMPL(tag_loggers[tag], level, prio, "{}", message); + } + else + { + LOG_IMPL(default_logger, level, prio, "{}: {}", tag, message); + } } void Flush() final {} private: - Poco::Logger * log = &Poco::Logger::get("AWSClient"); + Poco::Logger * default_logger; + std::unordered_map tag_loggers; }; class S3AuthSigner : public Aws::Client::AWSAuthV4Signer @@ -102,8 +128,10 @@ public: private: const DB::HeaderCollection headers; }; + } + namespace DB { namespace ErrorCodes diff --git a/src/IO/WriteBufferAIO.cpp b/src/IO/WriteBufferAIO.cpp index c542bed16c4..8e0224669f2 100644 --- a/src/IO/WriteBufferAIO.cpp +++ b/src/IO/WriteBufferAIO.cpp @@ -1,4 +1,4 @@ -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) #include #include diff --git a/src/IO/WriteBufferAIO.h b/src/IO/WriteBufferAIO.h index 4fdeac9e9b9..f514acab359 100644 --- a/src/IO/WriteBufferAIO.h +++ b/src/IO/WriteBufferAIO.h @@ -1,6 +1,6 @@ #pragma once -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) #include #include diff --git a/src/IO/WriteBufferFromS3.cpp b/src/IO/WriteBufferFromS3.cpp index 9d51c503bd0..bac14acb9cd 100644 --- a/src/IO/WriteBufferFromS3.cpp +++ b/src/IO/WriteBufferFromS3.cpp @@ -17,6 +17,11 @@ # include +namespace ProfileEvents +{ + extern const Event S3WriteBytes; +} + namespace DB { // S3 protocol does not allow to have multipart upload with more than 10000 parts. @@ -59,6 +64,8 @@ void WriteBufferFromS3::nextImpl() temporary_buffer->write(working_buffer.begin(), offset()); + ProfileEvents::increment(ProfileEvents::S3WriteBytes, offset()); + if (is_multipart) { last_part_size += offset(); diff --git a/src/IO/createReadBufferFromFileBase.cpp b/src/IO/createReadBufferFromFileBase.cpp index 9fa560620dd..c1d4377fdff 100644 --- a/src/IO/createReadBufferFromFileBase.cpp +++ b/src/IO/createReadBufferFromFileBase.cpp @@ -1,6 +1,6 @@ #include #include -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) #include #endif #include @@ -24,7 +24,7 @@ std::unique_ptr createReadBufferFromFileBase( size_t estimated_size, size_t aio_threshold, size_t mmap_threshold, size_t buffer_size_, int flags_, char * existing_memory_, size_t alignment) { -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) if (aio_threshold && estimated_size >= aio_threshold) { /// Attempt to open a file with O_DIRECT diff --git a/src/IO/createWriteBufferFromFileBase.cpp b/src/IO/createWriteBufferFromFileBase.cpp index d20af3ede76..6022457f32e 100644 --- a/src/IO/createWriteBufferFromFileBase.cpp +++ b/src/IO/createWriteBufferFromFileBase.cpp @@ -1,6 +1,6 @@ #include #include -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) #include #endif #include @@ -20,7 +20,7 @@ std::unique_ptr createWriteBufferFromFileBase(const std size_t aio_threshold, size_t buffer_size_, int flags_, mode_t mode, char * existing_memory_, size_t alignment) { -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(OS_LINUX) || defined(__FreeBSD__) if (aio_threshold && estimated_size >= aio_threshold) { /// Attempt to open a file with O_DIRECT diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index 512319375d5..f7abfe8950c 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -368,7 +368,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & } SetPtr prepared_set; - if (functionIsInOrGlobalInOperator(node.name)) + if (checkFunctionIsInOrGlobalInOperator(node)) { /// Let's find the type of the first argument (then getActionsImpl will be called again and will not affect anything). visit(node.arguments->children.at(0), data); @@ -445,7 +445,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & /// Select the name in the next cycle. argument_names.emplace_back(); } - else if (functionIsInOrGlobalInOperator(node.name) && arg == 1 && prepared_set) + else if (checkFunctionIsInOrGlobalInOperator(node) && arg == 1 && prepared_set) { ColumnWithTypeAndName column; column.type = std::make_shared(); diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 2400e374416..bcbde3ef6af 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -621,6 +621,7 @@ void Context::setConfig(const ConfigurationPtr & config) { auto lock = getLock(); shared->config = config; + shared->access_control_manager.setExternalAuthenticatorsConfig(*shared->config); } const Poco::Util::AbstractConfiguration & Context::getConfigRef() const @@ -640,6 +641,11 @@ const AccessControlManager & Context::getAccessControlManager() const return shared->access_control_manager; } +void Context::setExternalAuthenticatorsConfig(const Poco::Util::AbstractConfiguration & config) +{ + auto lock = getLock(); + shared->access_control_manager.setExternalAuthenticatorsConfig(config); +} void Context::setUsersConfig(const ConfigurationPtr & config) { diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index 5a4e959229f..251e7f32311 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -245,6 +245,9 @@ public: AccessControlManager & getAccessControlManager(); const AccessControlManager & getAccessControlManager() const; + /// Sets external authenticators config (LDAP). + void setExternalAuthenticatorsConfig(const Poco::Util::AbstractConfiguration & config); + /** Take the list of users, quotas and configuration profiles from this config. * The list of users is completely replaced. * The accumulated quota values are not reset if the quota is not deleted. diff --git a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp index e2884d99516..ee29d301c6b 100644 --- a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp +++ b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp @@ -180,7 +180,7 @@ void ExecuteScalarSubqueriesMatcher::visit(const ASTFunction & func, ASTPtr & as /// But if an argument is not subquery, than deeper may be scalar subqueries and we need to descend in them. std::vector out; - if (functionIsInOrGlobalInOperator(func.name)) + if (checkFunctionIsInOrGlobalInOperator(func)) { for (auto & child : ast->children) { diff --git a/src/Interpreters/HashJoin.cpp b/src/Interpreters/HashJoin.cpp index 5549d636a48..27294a57675 100644 --- a/src/Interpreters/HashJoin.cpp +++ b/src/Interpreters/HashJoin.cpp @@ -39,6 +39,7 @@ namespace ErrorCodes extern const int INCOMPATIBLE_TYPE_OF_JOIN; extern const int UNSUPPORTED_JOIN_KEYS; extern const int LOGICAL_ERROR; + extern const int SYNTAX_ERROR; extern const int SET_SIZE_LIMIT_EXCEEDED; extern const int TYPE_MISMATCH; } @@ -174,7 +175,7 @@ HashJoin::HashJoin(std::shared_ptr table_join_, const Block & right_s key_columns.pop_back(); if (key_columns.empty()) - throw Exception("ASOF join cannot be done without a joining column", ErrorCodes::LOGICAL_ERROR); + throw Exception("ASOF join cannot be done without a joining column", ErrorCodes::SYNTAX_ERROR); /// this is going to set up the appropriate hash table for the direct lookup part of the join /// However, this does not depend on the size of the asof join key (as that goes into the BST) diff --git a/src/Interpreters/MarkTableIdentifiersVisitor.cpp b/src/Interpreters/MarkTableIdentifiersVisitor.cpp index c7b8701c4a0..7ebe12754dc 100644 --- a/src/Interpreters/MarkTableIdentifiersVisitor.cpp +++ b/src/Interpreters/MarkTableIdentifiersVisitor.cpp @@ -35,7 +35,7 @@ void MarkTableIdentifiersMatcher::visit(ASTTableExpression & table, ASTPtr &, Da void MarkTableIdentifiersMatcher::visit(const ASTFunction & func, ASTPtr &, Data & data) { /// `IN t` can be specified, where t is a table, which is equivalent to `IN (SELECT * FROM t)`. - if (functionIsInOrGlobalInOperator(func.name)) + if (checkFunctionIsInOrGlobalInOperator(func)) { auto & ast = func.arguments->children.at(1); auto opt_name = tryGetIdentifierName(ast); diff --git a/src/Interpreters/MonotonicityCheckVisitor.h b/src/Interpreters/MonotonicityCheckVisitor.h new file mode 100644 index 00000000000..d813f9618e1 --- /dev/null +++ b/src/Interpreters/MonotonicityCheckVisitor.h @@ -0,0 +1,142 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +using Monotonicity = IFunctionBase::Monotonicity; + +/// Checks from bottom to top if function composition is monotonous +class MonotonicityCheckMatcher +{ +public: + struct Data + { + const TablesWithColumns & tables; + const Context & context; + const std::unordered_set & group_by_function_hashes; + Monotonicity monotonicity{true, true, true}; + ASTIdentifier * identifier = nullptr; + DataTypePtr arg_data_type = {}; + + void reject() { monotonicity.is_monotonic = false; } + bool isRejected() const { return !monotonicity.is_monotonic; } + + bool canOptimize(const ASTFunction & ast_function) const + { + /// if GROUP BY contains the same function ORDER BY shouldn't be optimized + auto hash = ast_function.getTreeHash(); + String key = toString(hash.first) + '_' + toString(hash.second); + if (group_by_function_hashes.count(key)) + return false; + + /// if ORDER BY contains aggregate function it shouldn't be optimized + if (AggregateFunctionFactory::instance().isAggregateFunctionName(ast_function.name)) + return false; + + return true; + } + + bool extractIdentifierAndType(const ASTFunction & ast_function) + { + if (identifier) + return true; + + identifier = ast_function.arguments->children[0]->as(); + if (!identifier) + return false; + + auto pos = IdentifierSemantic::getMembership(*identifier); + if (!pos) + pos = IdentifierSemantic::chooseTableColumnMatch(*identifier, tables, true); + if (!pos) + return false; + + if (auto data_type_and_name = tables[*pos].columns.tryGetByName(identifier->shortName())) + { + arg_data_type = data_type_and_name->type; + return true; + } + + return false; + } + }; + + static void visit(const ASTPtr & ast, Data & data) + { + if (const auto * ast_function = ast->as()) + visit(*ast_function, data); + } + + static void visit(const ASTFunction & ast_function, Data & data) + { + if (data.isRejected()) + return; + + /// TODO: monotonicity for fucntions of several arguments + auto arguments = ast_function.arguments; + if (arguments->children.size() != 1) + { + data.reject(); + return; + } + + if (!data.canOptimize(ast_function)) + { + data.reject(); + return; + } + + const auto & function = FunctionFactory::instance().tryGet(ast_function.name, data.context); + if (!function) + { + data.reject(); + return; + } + + /// First time extract the most enclosed identifier and its data type + if (!data.arg_data_type && !data.extractIdentifierAndType(ast_function)) + { + data.reject(); + return; + } + + ColumnsWithTypeAndName args; + args.emplace_back(data.arg_data_type, "tmp"); + auto function_base = function->build(args); + + if (function_base && function_base->hasInformationAboutMonotonicity()) + { + bool is_positive = data.monotonicity.is_positive; + data.monotonicity = function_base->getMonotonicityForRange(*data.arg_data_type, Field(), Field()); + + if (!is_positive) + data.monotonicity.is_positive = !data.monotonicity.is_positive; + data.arg_data_type = function_base->getReturnType(); + } + else + data.reject(); + } + + static bool needChildVisit(const ASTPtr &, const ASTPtr &) + { + return true; + } +}; + +using MonotonicityCheckVisitor = ConstInDepthNodeVisitor; + +} diff --git a/src/Interpreters/SyntaxAnalyzer.cpp b/src/Interpreters/SyntaxAnalyzer.cpp index c874ebe1087..5b4c95956e9 100644 --- a/src/Interpreters/SyntaxAnalyzer.cpp +++ b/src/Interpreters/SyntaxAnalyzer.cpp @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -530,6 +531,46 @@ void optimizeDuplicateOrderByAndDistinct(ASTPtr & query, const Context & context DuplicateDistinctVisitor(distinct_data).visit(query); } +/// Replace monotonous functions in ORDER BY if they don't participate in GROUP BY expression, +/// has a single argument and not an aggregate functions. +void optimizeMonotonousFunctionsInOrderBy(ASTSelectQuery * select_query, const Context & context, + const TablesWithColumns & tables_with_columns) +{ + auto order_by = select_query->orderBy(); + if (!order_by) + return; + + std::unordered_set group_by_hashes; + if (auto group_by = select_query->groupBy()) + { + for (auto & elem : group_by->children) + { + auto hash = elem->getTreeHash(); + String key = toString(hash.first) + '_' + toString(hash.second); + group_by_hashes.insert(key); + } + } + + for (auto & child : order_by->children) + { + auto * order_by_element = child->as(); + auto & ast_func = order_by_element->children[0]; + if (!ast_func->as()) + continue; + + MonotonicityCheckVisitor::Data data{tables_with_columns, context, group_by_hashes}; + MonotonicityCheckVisitor(data).visit(ast_func); + + if (!data.isRejected()) + { + ast_func = data.identifier->clone(); + ast_func->setAlias(""); + if (!data.monotonicity.is_positive) + order_by_element->direction *= -1; + } + } +} + /// If ORDER BY has argument x followed by f(x) transfroms it to ORDER BY x. /// Optimize ORDER BY x, y, f(x), g(x, y), f(h(x)), t(f(x), g(x)) into ORDER BY x, y /// in case if f(), g(), h(), t() are deterministic (in scope of query). @@ -1067,6 +1108,10 @@ SyntaxAnalyzerResultPtr SyntaxAnalyzer::analyzeSelect( if (settings.optimize_redundant_functions_in_order_by) optimizeRedundantFunctionsInOrderBy(select_query, context); + /// Replace monotonous functions with its argument + if (settings.optimize_monotonous_functions_in_order_by) + optimizeMonotonousFunctionsInOrderBy(select_query, context, tables_with_columns); + /// Remove duplicated elements from LIMIT BY clause. optimizeLimitBy(select_query); diff --git a/src/Interpreters/SystemLog.h b/src/Interpreters/SystemLog.h index 7d533a3bab7..5d88e200f6c 100644 --- a/src/Interpreters/SystemLog.h +++ b/src/Interpreters/SystemLog.h @@ -233,46 +233,56 @@ void SystemLog::add(const LogElement & element) /// Otherwise the tests like 01017_uniqCombined_memory_usage.sql will be flacky. auto temporarily_disable_memory_tracker = getCurrentMemoryTrackerActionLock(); - std::lock_guard lock(mutex); + /// Should not log messages under mutex. + bool queue_is_half_full = false; - if (is_shutdown) - return; - - if (queue.size() == DBMS_SYSTEM_LOG_QUEUE_SIZE / 2) { - // The queue more than half full, time to flush. - // We only check for strict equality, because messages are added one - // by one, under exclusive lock, so we will see each message count. - // It is enough to only wake the flushing thread once, after the message - // count increases past half available size. - const uint64_t queue_end = queue_front_index + queue.size(); - if (requested_flush_before < queue_end) - requested_flush_before = queue_end; + std::unique_lock lock(mutex); - flush_event.notify_all(); - LOG_INFO(log, "Queue is half full for system log '{}'.", demangle(typeid(*this).name())); - } + if (is_shutdown) + return; - if (queue.size() >= DBMS_SYSTEM_LOG_QUEUE_SIZE) - { - // Ignore all further entries until the queue is flushed. - // Log a message about that. Don't spam it -- this might be especially - // problematic in case of trace log. Remember what the front index of the - // queue was when we last logged the message. If it changed, it means the - // queue was flushed, and we can log again. - if (queue_front_index != logged_queue_full_at_index) + if (queue.size() == DBMS_SYSTEM_LOG_QUEUE_SIZE / 2) { - logged_queue_full_at_index = queue_front_index; + queue_is_half_full = true; - // TextLog sets its logger level to 0, so this log is a noop and - // there is no recursive logging. - LOG_ERROR(log, "Queue is full for system log '{}' at {}", demangle(typeid(*this).name()), queue_front_index); + // The queue more than half full, time to flush. + // We only check for strict equality, because messages are added one + // by one, under exclusive lock, so we will see each message count. + // It is enough to only wake the flushing thread once, after the message + // count increases past half available size. + const uint64_t queue_end = queue_front_index + queue.size(); + if (requested_flush_before < queue_end) + requested_flush_before = queue_end; + + flush_event.notify_all(); } - return; + if (queue.size() >= DBMS_SYSTEM_LOG_QUEUE_SIZE) + { + // Ignore all further entries until the queue is flushed. + // Log a message about that. Don't spam it -- this might be especially + // problematic in case of trace log. Remember what the front index of the + // queue was when we last logged the message. If it changed, it means the + // queue was flushed, and we can log again. + if (queue_front_index != logged_queue_full_at_index) + { + logged_queue_full_at_index = queue_front_index; + + // TextLog sets its logger level to 0, so this log is a noop and + // there is no recursive logging. + lock.unlock(); + LOG_ERROR(log, "Queue is full for system log '{}' at {}", demangle(typeid(*this).name()), queue_front_index); + } + + return; + } + + queue.push_back(element); } - queue.push_back(element); + if (queue_is_half_full) + LOG_INFO(log, "Queue is half full for system log '{}'.", demangle(typeid(*this).name())); } diff --git a/src/Interpreters/misc.h b/src/Interpreters/misc.h index 30379567366..094dfbbbb81 100644 --- a/src/Interpreters/misc.h +++ b/src/Interpreters/misc.h @@ -1,10 +1,16 @@ #pragma once #include +#include namespace DB { +namespace ErrorCodes +{ + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; +} + inline bool functionIsInOperator(const std::string & name) { return name == "in" || name == "notIn" || name == "nullIn" || name == "notNullIn"; @@ -30,4 +36,19 @@ inline bool functionIsDictGet(const std::string & name) return startsWith(name, "dictGet") || (name == "dictHas") || (name == "dictIsIn"); } +inline bool checkFunctionIsInOrGlobalInOperator(const ASTFunction & func) +{ + if (functionIsInOrGlobalInOperator(func.name)) + { + size_t num_arguments = func.arguments->children.size(); + if (num_arguments != 2) + throw Exception("Wrong number of arguments passed to function in. Expected: 2, passed: " + std::to_string(num_arguments), + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + + return true; + } + + return false; +} + } diff --git a/src/Parsers/ASTCreateUserQuery.cpp b/src/Parsers/ASTCreateUserQuery.cpp index 73993df7fd3..0ccc2232734 100644 --- a/src/Parsers/ASTCreateUserQuery.cpp +++ b/src/Parsers/ASTCreateUserQuery.cpp @@ -33,27 +33,32 @@ namespace } String authentication_type_name = Authentication::TypeInfo::get(authentication_type).name; - std::optional password; + std::optional by_value; - if (show_password) + if (show_password || authentication_type == Authentication::LDAP_SERVER) { switch (authentication_type) { case Authentication::PLAINTEXT_PASSWORD: { - password = authentication.getPassword(); + by_value = authentication.getPassword(); break; } case Authentication::SHA256_PASSWORD: { authentication_type_name = "sha256_hash"; - password = authentication.getPasswordHashHex(); + by_value = authentication.getPasswordHashHex(); break; } case Authentication::DOUBLE_SHA1_PASSWORD: { authentication_type_name = "double_sha1_hash"; - password = authentication.getPasswordHashHex(); + by_value = authentication.getPasswordHashHex(); + break; + } + case Authentication::LDAP_SERVER: + { + by_value = authentication.getServerName(); break; } @@ -65,9 +70,9 @@ namespace settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " IDENTIFIED WITH " << authentication_type_name << (settings.hilite ? IAST::hilite_none : ""); - if (password) + if (by_value) settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " BY " << (settings.hilite ? IAST::hilite_none : "") - << quoteString(*password); + << quoteString(*by_value); } diff --git a/src/Parsers/ASTCreateUserQuery.h b/src/Parsers/ASTCreateUserQuery.h index 5c8f8fcf563..1fbe7eeeb4c 100644 --- a/src/Parsers/ASTCreateUserQuery.h +++ b/src/Parsers/ASTCreateUserQuery.h @@ -13,14 +13,14 @@ class ASTRolesOrUsersSet; class ASTSettingsProfileElements; /** CREATE USER [IF NOT EXISTS | OR REPLACE] name - * [NOT IDENTIFIED | IDENTIFIED [WITH {no_password|plaintext_password|sha256_password|sha256_hash|double_sha1_password|double_sha1_hash}] BY {'password'|'hash'}] + * [NOT IDENTIFIED | IDENTIFIED [WITH {no_password|plaintext_password|sha256_password|sha256_hash|double_sha1_password|double_sha1_hash|ldap_server}] BY {'password'|'hash'|'server_name'}] * [HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] * [DEFAULT ROLE role [,...]] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] * * ALTER USER [IF EXISTS] name * [RENAME TO new_name] - * [NOT IDENTIFIED | IDENTIFIED [WITH {no_password|plaintext_password|sha256_password|sha256_hash|double_sha1_password|double_sha1_hash}] BY {'password'|'hash'}] + * [NOT IDENTIFIED | IDENTIFIED [WITH {no_password|plaintext_password|sha256_password|sha256_hash|double_sha1_password|double_sha1_hash|ldap_server}] BY {'password'|'hash'|'server_name'}] * [[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] * [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] diff --git a/src/Parsers/ParserCreateUserQuery.cpp b/src/Parsers/ParserCreateUserQuery.cpp index 4641c94d592..98faa0b15eb 100644 --- a/src/Parsers/ParserCreateUserQuery.cpp +++ b/src/Parsers/ParserCreateUserQuery.cpp @@ -49,6 +49,7 @@ namespace std::optional type; bool expect_password = false; bool expect_hash = false; + bool expect_server_name = false; if (ParserKeyword{"WITH"}.ignore(pos, expected)) { @@ -57,7 +58,12 @@ namespace if (ParserKeyword{Authentication::TypeInfo::get(check_type).raw_name}.ignore(pos, expected)) { type = check_type; - expect_password = (check_type != Authentication::NO_PASSWORD); + + if (check_type == Authentication::LDAP_SERVER) + expect_server_name = true; + else if (check_type != Authentication::NO_PASSWORD) + expect_password = true; + break; } } @@ -85,21 +91,23 @@ namespace expect_password = true; } - String password; - if (expect_password || expect_hash) + String value; + if (expect_password || expect_hash || expect_server_name) { ASTPtr ast; if (!ParserKeyword{"BY"}.ignore(pos, expected) || !ParserStringLiteral{}.parse(pos, ast, expected)) return false; - password = ast->as().value.safeGet(); + value = ast->as().value.safeGet(); } authentication = Authentication{*type}; if (expect_password) - authentication.setPassword(password); + authentication.setPassword(value); else if (expect_hash) - authentication.setPasswordHashHex(password); + authentication.setPasswordHashHex(value); + else if (expect_server_name) + authentication.setServerName(value); return true; }); diff --git a/src/Parsers/ParserCreateUserQuery.h b/src/Parsers/ParserCreateUserQuery.h index 1628f5ea5b9..69100d1211d 100644 --- a/src/Parsers/ParserCreateUserQuery.h +++ b/src/Parsers/ParserCreateUserQuery.h @@ -7,13 +7,13 @@ namespace DB { /** Parses queries like * CREATE USER [IF NOT EXISTS | OR REPLACE] name - * [NOT IDENTIFIED | IDENTIFIED [WITH {no_password|plaintext_password|sha256_password|sha256_hash|double_sha1_password|double_sha1_hash}] BY {'password'|'hash'}] + * [NOT IDENTIFIED | IDENTIFIED [WITH {no_password|plaintext_password|sha256_password|sha256_hash|double_sha1_password|double_sha1_hash|ldap_server}] BY {'password'|'hash'|'server_name'}] * [HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] * * ALTER USER [IF EXISTS] name * [RENAME TO new_name] - * [NOT IDENTIFIED | IDENTIFIED [WITH {no_password|plaintext_password|sha256_password|sha256_hash|double_sha1_password|double_sha1_hash}] BY {'password'|'hash'}] + * [NOT IDENTIFIED | IDENTIFIED [WITH {no_password|plaintext_password|sha256_password|sha256_hash|double_sha1_password|double_sha1_hash|ldap_server}] BY {'password'|'hash'|'server_name'}] * [[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] */ diff --git a/src/Parsers/ParserTablesInSelectQuery.cpp b/src/Parsers/ParserTablesInSelectQuery.cpp index 7e84925b203..a13baf69420 100644 --- a/src/Parsers/ParserTablesInSelectQuery.cpp +++ b/src/Parsers/ParserTablesInSelectQuery.cpp @@ -103,6 +103,20 @@ bool ParserArrayJoin::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) } +void ParserTablesInSelectQueryElement::parseJoinStrictness(Pos & pos, ASTTableJoin & table_join) +{ + if (ParserKeyword("ANY").ignore(pos)) + table_join.strictness = ASTTableJoin::Strictness::Any; + else if (ParserKeyword("ALL").ignore(pos)) + table_join.strictness = ASTTableJoin::Strictness::All; + else if (ParserKeyword("ASOF").ignore(pos)) + table_join.strictness = ASTTableJoin::Strictness::Asof; + else if (ParserKeyword("SEMI").ignore(pos)) + table_join.strictness = ASTTableJoin::Strictness::Semi; + else if (ParserKeyword("ANTI").ignore(pos) || ParserKeyword("ONLY").ignore(pos)) + table_join.strictness = ASTTableJoin::Strictness::Anti; +} + bool ParserTablesInSelectQueryElement::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { auto res = std::make_shared(); @@ -131,19 +145,12 @@ bool ParserTablesInSelectQueryElement::parseImpl(Pos & pos, ASTPtr & node, Expec else if (ParserKeyword("LOCAL").ignore(pos)) table_join->locality = ASTTableJoin::Locality::Local; - if (ParserKeyword("ANY").ignore(pos)) - table_join->strictness = ASTTableJoin::Strictness::Any; - else if (ParserKeyword("ALL").ignore(pos)) - table_join->strictness = ASTTableJoin::Strictness::All; - else if (ParserKeyword("ASOF").ignore(pos)) - table_join->strictness = ASTTableJoin::Strictness::Asof; - else if (ParserKeyword("SEMI").ignore(pos)) - table_join->strictness = ASTTableJoin::Strictness::Semi; - else if (ParserKeyword("ANTI").ignore(pos) || ParserKeyword("ONLY").ignore(pos)) - table_join->strictness = ASTTableJoin::Strictness::Anti; - else - table_join->strictness = ASTTableJoin::Strictness::Unspecified; + table_join->strictness = ASTTableJoin::Strictness::Unspecified; + /// Legacy: allow JOIN type before JOIN kind + parseJoinStrictness(pos, *table_join); + + bool no_kind = false; if (ParserKeyword("INNER").ignore(pos)) table_join->kind = ASTTableJoin::Kind::Inner; else if (ParserKeyword("LEFT").ignore(pos)) @@ -155,6 +162,20 @@ bool ParserTablesInSelectQueryElement::parseImpl(Pos & pos, ASTPtr & node, Expec else if (ParserKeyword("CROSS").ignore(pos)) table_join->kind = ASTTableJoin::Kind::Cross; else + no_kind = true; + + /// Standard position: JOIN type after JOIN kind + parseJoinStrictness(pos, *table_join); + + /// Optional OUTER keyword for outer joins. + if (table_join->kind == ASTTableJoin::Kind::Left + || table_join->kind == ASTTableJoin::Kind::Right + || table_join->kind == ASTTableJoin::Kind::Full) + { + ParserKeyword("OUTER").ignore(pos); + } + + if (no_kind) { /// Use INNER by default as in another DBMS. if (table_join->strictness == ASTTableJoin::Strictness::Semi || @@ -172,14 +193,6 @@ bool ParserTablesInSelectQueryElement::parseImpl(Pos & pos, ASTPtr & node, Expec (table_join->kind != ASTTableJoin::Kind::Left && table_join->kind != ASTTableJoin::Kind::Right)) throw Exception("SEMI|ANTI JOIN should be LEFT or RIGHT.", ErrorCodes::SYNTAX_ERROR); - /// Optional OUTER keyword for outer joins. - if (table_join->kind == ASTTableJoin::Kind::Left - || table_join->kind == ASTTableJoin::Kind::Right - || table_join->kind == ASTTableJoin::Kind::Full) - { - ParserKeyword("OUTER").ignore(pos); - } - if (!ParserKeyword("JOIN").ignore(pos, expected)) return false; } diff --git a/src/Parsers/ParserTablesInSelectQuery.h b/src/Parsers/ParserTablesInSelectQuery.h index 82e4c3c171c..9e5b591ccbe 100644 --- a/src/Parsers/ParserTablesInSelectQuery.h +++ b/src/Parsers/ParserTablesInSelectQuery.h @@ -6,6 +6,8 @@ namespace DB { +struct ASTTableJoin; + /** List of single or multiple JOIN-ed tables or subqueries in SELECT query, with ARRAY JOINs and SAMPLE, FINAL modifiers. */ class ParserTablesInSelectQuery : public IParserBase @@ -27,6 +29,8 @@ protected: private: bool is_first; + + static void parseJoinStrictness(Pos & pos, ASTTableJoin & table_join); }; diff --git a/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp b/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp index 4581a82f06e..6eb9d1d037c 100644 --- a/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp @@ -1,5 +1,8 @@ -#include #include + +#if USE_MSGPACK + +#include #include #include @@ -209,3 +212,15 @@ void registerInputFormatProcessorMsgPack(FormatFactory & factory) } } + +#else + +namespace DB +{ +class FormatFactory; +void registerInputFormatProcessorMsgPack(FormatFactory &) +{ +} +} + +#endif diff --git a/src/Processors/Formats/Impl/MsgPackRowInputFormat.h b/src/Processors/Formats/Impl/MsgPackRowInputFormat.h index 454d42fae3d..ac44772929a 100644 --- a/src/Processors/Formats/Impl/MsgPackRowInputFormat.h +++ b/src/Processors/Formats/Impl/MsgPackRowInputFormat.h @@ -1,5 +1,13 @@ #pragma once +#if !defined(ARCADIA_BUILD) +# include "config_formats.h" +# include "config_core.h" +#endif + + +#if USE_MSGPACK + #include #include #include @@ -63,3 +71,5 @@ private: }; } + +#endif diff --git a/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp b/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp index cef7b001505..cc0a5f297ea 100644 --- a/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp @@ -1,4 +1,7 @@ #include + +#if USE_MSGPACK + #include #include @@ -144,8 +147,10 @@ void MsgPackRowOutputFormat::write(const Columns & columns, size_t row_num) } } + void registerOutputFormatProcessorMsgPack(FormatFactory & factory) { + factory.registerOutputFormatProcessor("MsgPack", []( WriteBuffer & buf, const Block & sample, @@ -157,3 +162,15 @@ void registerOutputFormatProcessorMsgPack(FormatFactory & factory) } } + +#else + +namespace DB +{ +class FormatFactory; +void registerOutputFormatProcessorMsgPack(FormatFactory &) +{ +} +} + +#endif diff --git a/src/Processors/Formats/Impl/MsgPackRowOutputFormat.h b/src/Processors/Formats/Impl/MsgPackRowOutputFormat.h index 351920eb7c8..00bdfcc21cf 100644 --- a/src/Processors/Formats/Impl/MsgPackRowOutputFormat.h +++ b/src/Processors/Formats/Impl/MsgPackRowOutputFormat.h @@ -1,5 +1,12 @@ #pragma once +#if !defined(ARCADIA_BUILD) +# include "config_formats.h" +# include "config_core.h" +#endif + +#if USE_MSGPACK + #include #include #include @@ -26,3 +33,5 @@ private: }; } + +#endif diff --git a/src/Server/MySQLHandler.cpp b/src/Server/MySQLHandler.cpp index ed7f721e0b6..961cf1286cb 100644 --- a/src/Server/MySQLHandler.cpp +++ b/src/Server/MySQLHandler.cpp @@ -230,7 +230,7 @@ void MySQLHandler::authenticate(const String & user_name, const String & auth_pl // For compatibility with JavaScript MySQL client, Native41 authentication plugin is used when possible (if password is specified using double SHA1). Otherwise SHA256 plugin is used. auto user = connection_context.getAccessControlManager().read(user_name); const DB::Authentication::Type user_auth_type = user->authentication.getType(); - if (user_auth_type != DB::Authentication::DOUBLE_SHA1_PASSWORD && user_auth_type != DB::Authentication::PLAINTEXT_PASSWORD && user_auth_type != DB::Authentication::NO_PASSWORD) + if (user_auth_type == DB::Authentication::SHA256_PASSWORD) { authPluginSSL(); } diff --git a/src/Storages/LiveView/StorageLiveView.cpp b/src/Storages/LiveView/StorageLiveView.cpp index efd0a71b18f..3a6dd86e63e 100644 --- a/src/Storages/LiveView/StorageLiveView.cpp +++ b/src/Storages/LiveView/StorageLiveView.cpp @@ -328,8 +328,14 @@ bool StorageLiveView::getNewBlocks() BlocksPtr new_blocks = std::make_shared(); BlocksMetadataPtr new_blocks_metadata = std::make_shared(); - mergeable_blocks = collectMergeableBlocks(*live_view_context); - Pipes from = blocksToPipes(mergeable_blocks->blocks, mergeable_blocks->sample_block); + /// can't set mergeable_blocks here or anywhere else outside the writeIntoLiveView function + /// as there could be a race codition when the new block has been inserted into + /// the source table by the PushingToViewsBlockOutputStream and this method + /// called before writeIntoLiveView function is called which can lead to + /// the same block added twice to the mergeable_blocks leading to + /// inserted data to be duplicated + auto new_mergeable_blocks = collectMergeableBlocks(*live_view_context); + Pipes from = blocksToPipes(new_mergeable_blocks->blocks, new_mergeable_blocks->sample_block); BlockInputStreamPtr data = completeQuery(std::move(from)); while (Block block = data->read()) diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index 5905272d827..79bbc0e7216 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -1125,6 +1125,83 @@ std::optional KeyCondition::applyMonotonicFunctionsChainToRange( return key_range; } +// Returns whether the condition is one continuous range of the primary key, +// where every field is matched by range or a single element set. +// This allows to use a more efficient lookup with no extra reads. +bool KeyCondition::matchesExactContinuousRange() const +{ + // Not implemented yet. + if (hasMonotonicFunctionsChain()) + return false; + + enum Constraint + { + POINT, + RANGE, + UNKNOWN, + }; + + std::vector column_constraints(key_columns.size(), Constraint::UNKNOWN); + + for (const auto & element : rpn) + { + if (element.function == RPNElement::Function::FUNCTION_AND) + { + continue; + } + + if (element.function == RPNElement::Function::FUNCTION_IN_SET && element.set_index && element.set_index->size() == 1) + { + column_constraints[element.key_column] = Constraint::POINT; + continue; + } + + if (element.function == RPNElement::Function::FUNCTION_IN_RANGE) + { + if (element.range.left == element.range.right) + { + column_constraints[element.key_column] = Constraint::POINT; + } + if (column_constraints[element.key_column] != Constraint::POINT) + { + column_constraints[element.key_column] = Constraint::RANGE; + } + continue; + } + + if (element.function == RPNElement::Function::FUNCTION_UNKNOWN) + { + continue; + } + + return false; + } + + auto min_constraint = column_constraints[0]; + + if (min_constraint > Constraint::RANGE) + { + return false; + } + + for (size_t i = 1; i < key_columns.size(); ++i) + { + if (column_constraints[i] < min_constraint) + { + return false; + } + + if (column_constraints[i] == Constraint::RANGE && min_constraint == Constraint::RANGE) + { + return false; + } + + min_constraint = column_constraints[i]; + } + + return true; +} + BoolMask KeyCondition::checkInHyperrectangle( const std::vector & hyperrectangle, const DataTypes & data_types) const diff --git a/src/Storages/MergeTree/KeyCondition.h b/src/Storages/MergeTree/KeyCondition.h index 16197b9fa69..a37af2d677b 100644 --- a/src/Storages/MergeTree/KeyCondition.h +++ b/src/Storages/MergeTree/KeyCondition.h @@ -309,6 +309,8 @@ public: MonotonicFunctionsChain & functions, DataTypePtr current_type); + bool matchesExactContinuousRange() const; + private: /// The expression is stored as Reverse Polish Notation. struct RPNElement diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 4eee1f1fccb..306bcd9000a 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -1292,7 +1292,7 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( const MergeTreeData::DataPartPtr & part, const StorageMetadataPtr & metadata_snapshot, const KeyCondition & key_condition, - const Settings & settings) + const Settings & settings) const { MarkRanges res; @@ -1306,14 +1306,73 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( /// If index is not used. if (key_condition.alwaysUnknownOrTrue()) { + LOG_TRACE(log, "Not using index on part {}", part->name); + if (has_final_mark) res.push_back(MarkRange(0, marks_count - 1)); else res.push_back(MarkRange(0, marks_count)); + + return res; + } + + size_t used_key_size = key_condition.getMaxKeyColumn() + 1; + + std::function create_field_ref; + /// If there are no monotonic functions, there is no need to save block reference. + /// Passing explicit field to FieldRef allows to optimize ranges and shows better performance. + const auto & primary_key = metadata_snapshot->getPrimaryKey(); + if (key_condition.hasMonotonicFunctionsChain()) + { + auto index_block = std::make_shared(); + for (size_t i = 0; i < used_key_size; ++i) + index_block->insert({index[i], primary_key.data_types[i], primary_key.column_names[i]}); + + create_field_ref = [index_block](size_t row, size_t column, FieldRef & field) + { + field = {index_block.get(), row, column}; + }; } else { - size_t used_key_size = key_condition.getMaxKeyColumn() + 1; + create_field_ref = [&index](size_t row, size_t column, FieldRef & field) + { + index[column]->get(row, field); + }; + } + + /// NOTE Creating temporary Field objects to pass to KeyCondition. + std::vector index_left(used_key_size); + std::vector index_right(used_key_size); + + auto may_be_true_in_range = [&](MarkRange & range) + { + if (range.end == marks_count && !has_final_mark) + { + for (size_t i = 0; i < used_key_size; ++i) + create_field_ref(range.begin, i, index_left[i]); + + return key_condition.mayBeTrueAfter( + used_key_size, index_left.data(), primary_key.data_types); + } + + if (has_final_mark && range.end == marks_count) + range.end -= 1; /// Remove final empty mark. It's useful only for primary key condition. + + for (size_t i = 0; i < used_key_size; ++i) + { + create_field_ref(range.begin, i, index_left[i]); + create_field_ref(range.end, i, index_right[i]); + } + + return key_condition.mayBeTrueInRange( + used_key_size, index_left.data(), index_right.data(), primary_key.data_types); + }; + + if (!key_condition.matchesExactContinuousRange()) + { + // Do exclusion search, where we drop ranges that do not match + size_t min_marks_for_seek = roundRowsOrBytesToMarks( settings.merge_tree_min_rows_for_seek, settings.merge_tree_min_bytes_for_seek, @@ -1321,69 +1380,22 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( part->index_granularity_info.index_granularity_bytes); /** There will always be disjoint suspicious segments on the stack, the leftmost one at the top (back). - * At each step, take the left segment and check if it fits. - * If fits, split it into smaller ones and put them on the stack. If not, discard it. - * If the segment is already of one mark length, add it to response and discard it. - */ + * At each step, take the left segment and check if it fits. + * If fits, split it into smaller ones and put them on the stack. If not, discard it. + * If the segment is already of one mark length, add it to response and discard it. + */ std::vector ranges_stack = { {0, marks_count} }; - std::function create_field_ref; - /// If there are no monotonic functions, there is no need to save block reference. - /// Passing explicit field to FieldRef allows to optimize ranges and shows better performance. - const auto & primary_key = metadata_snapshot->getPrimaryKey(); - if (key_condition.hasMonotonicFunctionsChain()) - { - auto index_block = std::make_shared(); - for (size_t i = 0; i < used_key_size; ++i) - index_block->insert({index[i], primary_key.data_types[i], primary_key.column_names[i]}); - - create_field_ref = [index_block](size_t row, size_t column, FieldRef & field) - { - field = {index_block.get(), row, column}; - }; - } - else - { - create_field_ref = [&index](size_t row, size_t column, FieldRef & field) - { - index[column]->get(row, field); - }; - } - - /// NOTE Creating temporary Field objects to pass to KeyCondition. - std::vector index_left(used_key_size); - std::vector index_right(used_key_size); + size_t steps = 0; while (!ranges_stack.empty()) { MarkRange range = ranges_stack.back(); ranges_stack.pop_back(); - bool may_be_true; - if (range.end == marks_count && !has_final_mark) - { - for (size_t i = 0; i < used_key_size; ++i) - create_field_ref(range.begin, i, index_left[i]); + steps++; - may_be_true = key_condition.mayBeTrueAfter( - used_key_size, index_left.data(), primary_key.data_types); - } - else - { - if (has_final_mark && range.end == marks_count) - range.end -= 1; /// Remove final empty mark. It's useful only for primary key condition. - - for (size_t i = 0; i < used_key_size; ++i) - { - create_field_ref(range.begin, i, index_left[i]); - create_field_ref(range.end, i, index_right[i]); - } - - may_be_true = key_condition.mayBeTrueInRange( - used_key_size, index_left.data(), index_right.data(), primary_key.data_types); - } - - if (!may_be_true) + if (!may_be_true_in_range(range)) continue; if (range.end == range.begin + 1) @@ -1406,6 +1418,76 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( ranges_stack.emplace_back(range.begin, end); } } + + LOG_TRACE(log, "Used generic exclusion search over index for part {} with {} steps", part->name, steps); + } + else + { + // Do inclusion search, where we only look for one range + + size_t steps = 0; + + auto find_leaf = [&](bool left) -> std::optional + { + std::vector stack = {}; + + MarkRange range = {0, marks_count}; + + steps++; + + if (may_be_true_in_range(range)) + stack.emplace_back(range.begin, range.end); + + while (!stack.empty()) + { + range = stack.back(); + stack.pop_back(); + + if (range.end == range.begin + 1) + { + if (left) + return range.begin; + else + return range.end; + } + else + { + std::vector check_order = {}; + + MarkRange left_range = {range.begin, (range.begin + range.end) / 2}; + MarkRange right_range = {(range.begin + range.end) / 2, range.end}; + + if (left) + { + check_order.emplace_back(left_range.begin, left_range.end); + check_order.emplace_back(right_range.begin, right_range.end); + } + else + { + check_order.emplace_back(right_range.begin, right_range.end); + check_order.emplace_back(left_range.begin, left_range.end); + } + + steps++; + + if (may_be_true_in_range(check_order[0])) + { + stack.emplace_back(check_order[0].begin, check_order[0].end); + continue; + } + + stack.emplace_back(check_order[1].begin, check_order[1].end); + } + } + + return std::nullopt; + }; + + auto left_leaf = find_leaf(true); + if (left_leaf) + res.emplace_back(left_leaf.value(), find_leaf(false).value()); + + LOG_TRACE(log, "Used optimized inclusion search over index for part {} with {} steps", part->name, steps); } return res; diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h index 831b690ec62..52d00546a05 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h @@ -95,11 +95,11 @@ private: const KeyCondition & key_condition, const Settings & settings) const; - static MarkRanges markRangesFromPKRange( + MarkRanges markRangesFromPKRange( const MergeTreeData::DataPartPtr & part, const StorageMetadataPtr & metadata_snapshot, const KeyCondition & key_condition, - const Settings & settings); + const Settings & settings) const; MarkRanges filterMarksUsingIndex( MergeTreeIndexPtr index_helper, diff --git a/src/Storages/StorageLog.cpp b/src/Storages/StorageLog.cpp index 39fa1d1af70..9cfc906108a 100644 --- a/src/Storages/StorageLog.cpp +++ b/src/Storages/StorageLog.cpp @@ -127,7 +127,12 @@ public: { try { - writeSuffix(); + if (!done) + { + /// Rollback partial writes. + streams.clear(); + storage.file_checker.repair(); + } } catch (...) { @@ -298,7 +303,6 @@ void LogBlockOutputStream::writeSuffix() { if (done) return; - done = true; WrittenStreams written_streams; IDataType::SerializeBinaryBulkSettings settings; @@ -323,9 +327,12 @@ void LogBlockOutputStream::writeSuffix() column_files.push_back(storage.files[name_stream.first].data_file_path); column_files.push_back(storage.marks_file_path); - storage.file_checker.update(column_files.begin(), column_files.end()); + for (const auto & file : column_files) + storage.file_checker.update(file); + storage.file_checker.save(); streams.clear(); + done = true; } @@ -427,6 +434,7 @@ StorageLog::StorageLog( const StorageID & table_id_, const ColumnsDescription & columns_, const ConstraintsDescription & constraints_, + bool attach, size_t max_compress_block_size_) : IStorage(table_id_) , disk(std::move(disk_)) @@ -442,13 +450,31 @@ StorageLog::StorageLog( if (relative_path_.empty()) throw Exception("Storage " + getName() + " requires data path", ErrorCodes::INCORRECT_FILE_NAME); - /// create directories if they do not exist - disk->createDirectories(table_path); + if (!attach) + { + /// create directories if they do not exist + disk->createDirectories(table_path); + } + else + { + try + { + file_checker.repair(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } + } for (const auto & column : storage_metadata.getColumns().getAllPhysical()) addFiles(column.name, *column.type); marks_file_path = table_path + DBMS_STORAGE_LOG_MARKS_FILE_NAME; + + if (!attach) + for (const auto & file : files) + file_checker.setEmpty(file.second.data_file_path); } @@ -655,7 +681,7 @@ void registerStorageLog(StorageFactory & factory) return StorageLog::create( disk, args.relative_data_path, args.table_id, args.columns, args.constraints, - args.context.getSettings().max_compress_block_size); + args.attach, args.context.getSettings().max_compress_block_size); }, features); } diff --git a/src/Storages/StorageLog.h b/src/Storages/StorageLog.h index d020f906609..96acb1668e2 100644 --- a/src/Storages/StorageLog.h +++ b/src/Storages/StorageLog.h @@ -54,6 +54,7 @@ protected: const StorageID & table_id_, const ColumnsDescription & columns_, const ConstraintsDescription & constraints_, + bool attach, size_t max_compress_block_size_); private: diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 4f7889d63fd..e822e0a25e7 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -4498,7 +4498,11 @@ void StorageReplicatedMergeTree::getReplicaDelays(time_t & out_absolute_delay, t } -void StorageReplicatedMergeTree::fetchPartition(const ASTPtr & partition, const StorageMetadataPtr & metadata_snapshot, const String & from_, const Context & query_context) +void StorageReplicatedMergeTree::fetchPartition( + const ASTPtr & partition, + const StorageMetadataPtr & metadata_snapshot, + const String & from_, + const Context & query_context) { String partition_id = getPartitionIDFromQuery(partition, query_context); diff --git a/src/Storages/StorageStripeLog.cpp b/src/Storages/StorageStripeLog.cpp index e55cc190f80..ae8162d5f1b 100644 --- a/src/Storages/StorageStripeLog.cpp +++ b/src/Storages/StorageStripeLog.cpp @@ -161,11 +161,12 @@ public: , lock(storage.rwlock) , data_out_file(storage.table_path + "data.bin") , data_out_compressed(storage.disk->writeFile(data_out_file, DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Append)) - , data_out(*data_out_compressed, CompressionCodecFactory::instance().getDefaultCodec(), storage.max_compress_block_size) + , data_out(std::make_unique( + *data_out_compressed, CompressionCodecFactory::instance().getDefaultCodec(), storage.max_compress_block_size)) , index_out_file(storage.table_path + "index.mrk") , index_out_compressed(storage.disk->writeFile(index_out_file, DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Append)) - , index_out(*index_out_compressed) - , block_out(data_out, 0, metadata_snapshot->getSampleBlock(), false, &index_out, storage.disk->getFileSize(data_out_file)) + , index_out(std::make_unique(*index_out_compressed)) + , block_out(*data_out, 0, metadata_snapshot->getSampleBlock(), false, index_out.get(), storage.disk->getFileSize(data_out_file)) { } @@ -173,7 +174,16 @@ public: { try { - writeSuffix(); + if (!done) + { + /// Rollback partial writes. + data_out.reset(); + data_out_compressed.reset(); + index_out.reset(); + index_out_compressed.reset(); + + storage.file_checker.repair(); + } } catch (...) { @@ -194,13 +204,14 @@ public: return; block_out.writeSuffix(); - data_out.next(); + data_out->next(); data_out_compressed->next(); - index_out.next(); + index_out->next(); index_out_compressed->next(); storage.file_checker.update(data_out_file); storage.file_checker.update(index_out_file); + storage.file_checker.save(); done = true; } @@ -212,10 +223,10 @@ private: String data_out_file; std::unique_ptr data_out_compressed; - CompressedWriteBuffer data_out; + std::unique_ptr data_out; String index_out_file; std::unique_ptr index_out_compressed; - CompressedWriteBuffer index_out; + std::unique_ptr index_out; NativeBlockOutputStream block_out; bool done = false; @@ -249,6 +260,20 @@ StorageStripeLog::StorageStripeLog( { /// create directories if they do not exist disk->createDirectories(table_path); + + file_checker.setEmpty(table_path + "data.bin"); + file_checker.setEmpty(table_path + "index.mrk"); + } + else + { + try + { + file_checker.repair(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } } } diff --git a/src/Storages/StorageTinyLog.cpp b/src/Storages/StorageTinyLog.cpp index ef8c30cacbe..b68ac6ae5f1 100644 --- a/src/Storages/StorageTinyLog.cpp +++ b/src/Storages/StorageTinyLog.cpp @@ -118,7 +118,12 @@ public: { try { - writeSuffix(); + if (!done) + { + /// Rollback partial writes. + streams.clear(); + storage.file_checker.repair(); + } } catch (...) { @@ -277,11 +282,13 @@ void TinyLogBlockOutputStream::writeSuffix() { if (done) return; - done = true; /// If nothing was written - leave the table in initial state. if (streams.empty()) + { + done = true; return; + } WrittenStreams written_streams; IDataType::SerializeBinaryBulkSettings settings; @@ -303,9 +310,12 @@ void TinyLogBlockOutputStream::writeSuffix() for (auto & pair : streams) column_files.push_back(storage.files[pair.first].data_file_path); - storage.file_checker.update(column_files.begin(), column_files.end()); + for (const auto & file : column_files) + storage.file_checker.update(file); + storage.file_checker.save(); streams.clear(); + done = true; } @@ -352,9 +362,24 @@ StorageTinyLog::StorageTinyLog( /// create directories if they do not exist disk->createDirectories(table_path); } + else + { + try + { + file_checker.repair(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } + } for (const auto & col : storage_metadata.getColumns().getAllPhysical()) addFiles(col.name, *col.type); + + if (!attach) + for (const auto & file : files) + file_checker.setEmpty(file.second.data_file_path); } diff --git a/src/Storages/System/StorageSystemBuildOptions.generated.cpp.in b/src/Storages/System/StorageSystemBuildOptions.generated.cpp.in index 3c3b96b9cff..785e2cf5d29 100644 --- a/src/Storages/System/StorageSystemBuildOptions.generated.cpp.in +++ b/src/Storages/System/StorageSystemBuildOptions.generated.cpp.in @@ -55,6 +55,7 @@ const char * auto_config_build[] "USE_HYPERSCAN", "@ENABLE_HYPERSCAN@", "USE_SIMDJSON", "@USE_SIMDJSON@", "USE_GRPC", "@USE_GRPC@", + "USE_LDAP", "@USE_LDAP@", nullptr, nullptr }; diff --git a/src/Storages/System/StorageSystemUsers.cpp b/src/Storages/System/StorageSystemUsers.cpp index 7f3fe058d9e..95a27c9ecf3 100644 --- a/src/Storages/System/StorageSystemUsers.cpp +++ b/src/Storages/System/StorageSystemUsers.cpp @@ -12,6 +12,10 @@ #include #include #include +#include +#include +#include +#include namespace DB @@ -35,7 +39,7 @@ NamesAndTypesList StorageSystemUsers::getNamesAndTypes() {"id", std::make_shared()}, {"storage", std::make_shared()}, {"auth_type", std::make_shared(getAuthenticationTypeEnumValues())}, - {"auth_params", std::make_shared(std::make_shared())}, + {"auth_params", std::make_shared()}, {"host_ip", std::make_shared(std::make_shared())}, {"host_names", std::make_shared(std::make_shared())}, {"host_names_regexp", std::make_shared(std::make_shared())}, @@ -59,8 +63,7 @@ void StorageSystemUsers::fillData(MutableColumns & res_columns, const Context & auto & column_id = assert_cast(*res_columns[column_index++]).getData(); auto & column_storage = assert_cast(*res_columns[column_index++]); auto & column_auth_type = assert_cast(*res_columns[column_index++]).getData(); - auto & column_auth_params = assert_cast(assert_cast(*res_columns[column_index]).getData()); - auto & column_auth_params_offsets = assert_cast(*res_columns[column_index++]).getOffsets(); + auto & column_auth_params = assert_cast(*res_columns[column_index++]); auto & column_host_ip = assert_cast(assert_cast(*res_columns[column_index]).getData()); auto & column_host_ip_offsets = assert_cast(*res_columns[column_index++]).getOffsets(); auto & column_host_names = assert_cast(assert_cast(*res_columns[column_index]).getData()); @@ -86,7 +89,24 @@ void StorageSystemUsers::fillData(MutableColumns & res_columns, const Context & column_id.push_back(id); column_storage.insertData(storage_name.data(), storage_name.length()); column_auth_type.push_back(static_cast(authentication.getType())); - column_auth_params_offsets.push_back(column_auth_params.size()); + + if (authentication.getType() == Authentication::Type::LDAP_SERVER) + { + Poco::JSON::Object auth_params_json; + + auth_params_json.set("server", authentication.getServerName()); + + std::ostringstream oss; + Poco::JSON::Stringifier::stringify(auth_params_json, oss); + const auto str = oss.str(); + + column_auth_params.insertData(str.data(), str.size()); + } + else + { + static constexpr std::string_view empty_json{"{}"}; + column_auth_params.insertData(empty_json.data(), empty_json.length()); + } if (allowed_hosts.containsAnyHost()) { diff --git a/src/Storages/tests/gtest_storage_log.cpp b/src/Storages/tests/gtest_storage_log.cpp index c97adaf118d..13c96fbab54 100644 --- a/src/Storages/tests/gtest_storage_log.cpp +++ b/src/Storages/tests/gtest_storage_log.cpp @@ -31,7 +31,7 @@ DB::StoragePtr createStorage(DB::DiskPtr & disk) names_and_types.emplace_back("a", std::make_shared()); StoragePtr table = StorageLog::create( - disk, "table/", StorageID("test", "test"), ColumnsDescription{names_and_types}, ConstraintsDescription{}, 1048576); + disk, "table/", StorageID("test", "test"), ColumnsDescription{names_and_types}, ConstraintsDescription{}, false, 1048576); table->startup(); @@ -100,6 +100,7 @@ std::string writeData(int rows, DB::StoragePtr & table, const DB::Context & cont BlockOutputStreamPtr out = table->write({}, metadata_snapshot, context); out->write(block); + out->writeSuffix(); return data; } @@ -115,7 +116,8 @@ std::string readData(DB::StoragePtr & table, const DB::Context & context) QueryProcessingStage::Enum stage = table->getQueryProcessingStage(context); - BlockInputStreamPtr in = std::make_shared(std::move(table->read(column_names, metadata_snapshot, {}, context, stage, 8192, 1)[0])); + BlockInputStreamPtr in = std::make_shared( + std::move(table->read(column_names, metadata_snapshot, {}, context, stage, 8192, 1)[0])); Block sample; { diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 07fe681c2c8..12629628007 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -202,9 +202,8 @@ def run_tests_array(all_tests_with_params): (name, ext) = os.path.splitext(case) try: + sys.stdout.flush() sys.stdout.write("{0:72}".format(name + ": ")) - if run_total == 1: - sys.stdout.flush() if args.skip and any(s in name for s in args.skip): print(MSG_SKIPPED + " - skip") @@ -598,16 +597,14 @@ def main(args): if jobs > run_total: run_total = jobs + batch_size = len(all_tests) / jobs all_tests_array = [] - for n in range(1, 1 + int(run_total)): - start = int(tests_n / run_total * (n - 1)) - end = int(tests_n / run_total * n) - all_tests_array.append([all_tests[start : end], suite, suite_dir, suite_tmp_dir, run_total]) + for i in range(0, len(all_tests), batch_size): + all_tests_array.append((all_tests[i:i+batch_size], suite, suite_dir, suite_tmp_dir, run_total)) if jobs > 1: with closing(multiprocessing.Pool(processes=jobs)) as pool: pool.map(run_tests_array, all_tests_array) - pool.terminate() else: run_tests_array(all_tests_array[int(run_n)-1]) diff --git a/tests/integration/test_backward_compatability/__init__.py b/tests/integration/test_backward_compatibility/__init__.py similarity index 100% rename from tests/integration/test_backward_compatability/__init__.py rename to tests/integration/test_backward_compatibility/__init__.py diff --git a/tests/integration/test_backward_compatability/test.py b/tests/integration/test_backward_compatibility/test.py similarity index 100% rename from tests/integration/test_backward_compatability/test.py rename to tests/integration/test_backward_compatibility/test.py diff --git a/tests/integration/test_backward_compatibility/test_aggregate_function_state_avg.py b/tests/integration/test_backward_compatibility/test_aggregate_function_state_avg.py new file mode 100644 index 00000000000..c9f3acc2e2e --- /dev/null +++ b/tests/integration/test_backward_compatibility/test_aggregate_function_state_avg.py @@ -0,0 +1,52 @@ +import pytest + +import helpers.client as client +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node1 = cluster.add_instance('node1', + with_zookeeper=False, image='yandex/clickhouse-server:19.16.9.37', stay_alive=True, with_installed_binary=True) +node2 = cluster.add_instance('node2', + with_zookeeper=False, image='yandex/clickhouse-server:19.16.9.37', stay_alive=True, with_installed_binary=True) +node3 = cluster.add_instance('node3', with_zookeeper=False) +node4 = cluster.add_instance('node4', with_zookeeper=False) + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + +# We will test that serialization of internal state of "avg" function is compatible between different versions. +# TODO Implement versioning of serialization format for aggregate function states. +# NOTE This test is too ad-hoc. + +def test_backward_compatability(start_cluster): + node1.query("create table tab (x UInt64) engine = Memory") + node2.query("create table tab (x UInt64) engine = Memory") + node3.query("create table tab (x UInt64) engine = Memory") + node4.query("create table tab (x UInt64) engine = Memory") + + node1.query("INSERT INTO tab VALUES (1)") + node2.query("INSERT INTO tab VALUES (2)") + node3.query("INSERT INTO tab VALUES (3)") + node4.query("INSERT INTO tab VALUES (4)") + + assert(node1.query("SELECT avg(x) FROM remote('node{1..4}', default, tab)") == '2.5\n') + assert(node2.query("SELECT avg(x) FROM remote('node{1..4}', default, tab)") == '2.5\n') + assert(node3.query("SELECT avg(x) FROM remote('node{1..4}', default, tab)") == '2.5\n') + assert(node4.query("SELECT avg(x) FROM remote('node{1..4}', default, tab)") == '2.5\n') + + # Also check with persisted aggregate function state + + node1.query("create table state (x AggregateFunction(avg, UInt64)) engine = Log") + node1.query("INSERT INTO state SELECT avgState(arrayJoin(CAST([1, 2, 3, 4] AS Array(UInt64))))") + + assert(node1.query("SELECT avgMerge(x) FROM state") == '2.5\n') + + node1.restart_with_latest_version() + + assert(node1.query("SELECT avgMerge(x) FROM state") == '2.5\n') diff --git a/tests/integration/test_backward_compatability/test_short_strings_aggregation.py b/tests/integration/test_backward_compatibility/test_short_strings_aggregation.py similarity index 100% rename from tests/integration/test_backward_compatability/test_short_strings_aggregation.py rename to tests/integration/test_backward_compatibility/test_short_strings_aggregation.py diff --git a/tests/integration/test_grant_and_revoke/test.py b/tests/integration/test_grant_and_revoke/test.py index cec85a62ef0..9900be4704e 100644 --- a/tests/integration/test_grant_and_revoke/test.py +++ b/tests/integration/test_grant_and_revoke/test.py @@ -200,8 +200,8 @@ def test_introspection(): assert expected_access2 in instance.query("SHOW ACCESS") assert instance.query("SELECT name, storage, auth_type, auth_params, host_ip, host_names, host_names_regexp, host_names_like, default_roles_all, default_roles_list, default_roles_except from system.users WHERE name IN ('A', 'B') ORDER BY name") ==\ - TSV([[ "A", "disk", "no_password", "[]", "['::/0']", "[]", "[]", "[]", 1, "[]", "[]" ], - [ "B", "disk", "no_password", "[]", "['::/0']", "[]", "[]", "[]", 1, "[]", "[]" ]]) + TSV([[ "A", "disk", "no_password", "{}", "['::/0']", "[]", "[]", "[]", 1, "[]", "[]" ], + [ "B", "disk", "no_password", "{}", "['::/0']", "[]", "[]", "[]", 1, "[]", "[]" ]]) assert instance.query("SELECT * from system.grants WHERE user_name IN ('A', 'B') ORDER BY user_name, access_type, grant_option") ==\ TSV([[ "A", "\N", "SELECT", "test", "table", "\N", 0, 0 ], diff --git a/tests/integration/test_backward_compatability/test_string_aggregation.py b/tests/integration/test_profile_events_s3/__init__.py similarity index 100% rename from tests/integration/test_backward_compatability/test_string_aggregation.py rename to tests/integration/test_profile_events_s3/__init__.py diff --git a/tests/integration/test_profile_events_s3/configs/config.d/storage_conf.xml b/tests/integration/test_profile_events_s3/configs/config.d/storage_conf.xml new file mode 100644 index 00000000000..b32770095fc --- /dev/null +++ b/tests/integration/test_profile_events_s3/configs/config.d/storage_conf.xml @@ -0,0 +1,21 @@ + + + + + s3 + http://minio1:9001/root/data/ + minio + minio123 + + + + + +
+ s3 +
+
+
+
+
+
diff --git a/tests/integration/test_profile_events_s3/configs/config.xml b/tests/integration/test_profile_events_s3/configs/config.xml new file mode 100644 index 00000000000..b83cbbac0a7 --- /dev/null +++ b/tests/integration/test_profile_events_s3/configs/config.xml @@ -0,0 +1,35 @@ + + + + trace + /var/log/clickhouse-server/clickhouse-server.log + /var/log/clickhouse-server/clickhouse-server.err.log + 1000M + 10 + + + + system + query_log
+ toYYYYMM(event_date) + 1000 +
+ + 9000 + 127.0.0.1 + + + + true + none + + AcceptCertificateHandler + + + + + 500 + 5368709120 + ./clickhouse/ + users.xml +
diff --git a/tests/integration/test_profile_events_s3/configs/users.xml b/tests/integration/test_profile_events_s3/configs/users.xml new file mode 100644 index 00000000000..95c83bf3dfe --- /dev/null +++ b/tests/integration/test_profile_events_s3/configs/users.xml @@ -0,0 +1,24 @@ + + + + + + + + + + + + ::/0 + + default + default + 1 + + + + + + + + diff --git a/tests/integration/test_profile_events_s3/test.py b/tests/integration/test_profile_events_s3/test.py new file mode 100644 index 00000000000..f98505757bf --- /dev/null +++ b/tests/integration/test_profile_events_s3/test.py @@ -0,0 +1,160 @@ +import logging +import random +import string +import time +import re +import requests + +import pytest +from helpers.cluster import ClickHouseCluster + +logging.getLogger().setLevel(logging.INFO) +logging.getLogger().addHandler(logging.StreamHandler()) + + +@pytest.fixture(scope="module") +def cluster(): + try: + cluster = ClickHouseCluster(__file__) + + cluster.add_instance("node", config_dir="configs", with_minio=True) + + logging.info("Starting cluster...") + cluster.start() + logging.info("Cluster started") + + yield cluster + finally: + cluster.shutdown() + + +init_list = { + "S3ReadMicroseconds" : 0, + "S3ReadBytes" : 0, + "S3ReadRequestsCount" : 0, + "S3ReadRequestsErrorsTotal" : 0, + "S3ReadRequestsErrors503" : 0, + "S3ReadRequestsRedirects" : 0, + "S3WriteMicroseconds" : 0, + "S3WriteBytes" : 0, + "S3WriteRequestsCount" : 0, + "S3WriteRequestsErrorsTotal" : 0, + "S3WriteRequestsErrors503" : 0, + "S3WriteRequestsRedirects" : 0, +} + +def get_s3_events(instance): + result = init_list.copy() + events = instance.query("SELECT event,value FROM system.events WHERE event LIKE 'S3%'").split("\n") + for event in events: + ev = event.split("\t") + if len(ev) == 2: + result[ev[0]] = int(ev[1]) + return result + + +def get_minio_stat(cluster): + result = { + "get_requests" : 0, + "set_requests" : 0, + "errors" : 0, + "rx_bytes" : 0, + "tx_bytes" : 0, + } + stat = requests.get(url="http://{}:{}/minio/prometheus/metrics".format("localhost", cluster.minio_port)).text.split("\n") + for line in stat: + x = re.search("s3_requests_total(\{.*\})?\s(\d+)(\s.*)?", line) + if x != None: + y = re.search(".*api=\"(get|list|head|select).*", x.group(1)) + if y != None: + result["get_requests"] += int(x.group(2)) + else: + result["set_requests"] += int(x.group(2)) + x = re.search("s3_errors_total(\{.*\})?\s(\d+)(\s.*)?", line) + if x != None: + result["errors"] += int(x.group(2)) + x = re.search("s3_rx_bytes_total(\{.*\})?\s([\d\.e\+\-]+)(\s.*)?", line) + if x != None: + result["tx_bytes"] += float(x.group(2)) + x = re.search("s3_tx_bytes_total(\{.*\})?\s([\d\.e\+\-]+)(\s.*)?", line) + if x != None: + result["rx_bytes"] += float(x.group(2)) + return result + + +def get_query_stat(instance, hint): + result = init_list.copy() + instance.query("SYSTEM FLUSH LOGS") + events = instance.query(''' + SELECT ProfileEvents.Names, ProfileEvents.Values + FROM system.query_log + ARRAY JOIN ProfileEvents + WHERE type != 1 AND query LIKE '%{}%' + '''.format(hint.replace("'", "\\'"))).split("\n") + for event in events: + ev = event.split("\t") + if len(ev) == 2: + if ev[0].startswith("S3"): + result[ev[0]] += int(ev[1]) + return result + + +def get_minio_size(cluster): + minio = cluster.minio_client + size = 0 + for obj in minio.list_objects(cluster.minio_bucket, 'data/'): + size += obj.size + return size + + +def test_profile_events(cluster): + instance = cluster.instances["node"] + + instance.query("SYSTEM FLUSH LOGS") + + instance.query("DROP TABLE IF EXISTS test_s3.test_s3") + instance.query("DROP DATABASE IF EXISTS test_s3") + instance.query("CREATE DATABASE IF NOT EXISTS test_s3") + + metrics0 = get_s3_events(instance) + minio0 = get_minio_stat(cluster) + + query1 = "CREATE TABLE test_s3.test_s3 (key UInt32, value UInt32) ENGINE=MergeTree PRIMARY KEY key ORDER BY key SETTINGS storage_policy='s3'" + instance.query(query1) + + size1 = get_minio_size(cluster) + metrics1 = get_s3_events(instance) + minio1 = get_minio_stat(cluster) + + assert metrics1["S3ReadRequestsCount"] - metrics0["S3ReadRequestsCount"] == minio1["get_requests"] - minio0["get_requests"] - 1 # 1 from get_minio_size + assert metrics1["S3WriteRequestsCount"] - metrics0["S3WriteRequestsCount"] == minio1["set_requests"] - minio0["set_requests"] + stat1 = get_query_stat(instance, query1) + for metric in stat1: + assert stat1[metric] == metrics1[metric] - metrics0[metric] + assert metrics1["S3WriteBytes"] - metrics0["S3WriteBytes"] == size1 + + query2 = "INSERT INTO test_s3.test_s3 FORMAT Values" + instance.query(query2 + " (1,1)") + + size2 = get_minio_size(cluster) + metrics2 = get_s3_events(instance) + minio2 = get_minio_stat(cluster) + + assert metrics2["S3ReadRequestsCount"] - metrics1["S3ReadRequestsCount"] == minio2["get_requests"] - minio1["get_requests"] - 1 # 1 from get_minio_size + assert metrics2["S3WriteRequestsCount"] - metrics1["S3WriteRequestsCount"] == minio2["set_requests"] - minio1["set_requests"] + stat2 = get_query_stat(instance, query2) + for metric in stat2: + assert stat2[metric] == metrics2[metric] - metrics1[metric] + assert metrics2["S3WriteBytes"] - metrics1["S3WriteBytes"] == size2 - size1 + + query3 = "SELECT * from test_s3.test_s3" + assert instance.query(query3) == "1\t1\n" + + metrics3 = get_s3_events(instance) + minio3 = get_minio_stat(cluster) + + assert metrics3["S3ReadRequestsCount"] - metrics2["S3ReadRequestsCount"] == minio3["get_requests"] - minio2["get_requests"] + assert metrics3["S3WriteRequestsCount"] - metrics2["S3WriteRequestsCount"] == minio3["set_requests"] - minio2["set_requests"] + stat3 = get_query_stat(instance, query3) + for metric in stat3: + assert stat3[metric] == metrics3[metric] - metrics2[metric] diff --git a/tests/performance/decimal_aggregates.xml b/tests/performance/decimal_aggregates.xml index fa9f947abc9..33a9ad9e4e7 100644 --- a/tests/performance/decimal_aggregates.xml +++ b/tests/performance/decimal_aggregates.xml @@ -1,16 +1,13 @@ - - - definitely_no_such_table - - - 20G + 30G + 0 + 0 CREATE TABLE t (x UInt64, d32 Decimal32(3), d64 Decimal64(4), d128 Decimal128(5)) ENGINE = Memory - INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(200000000) SETTINGS max_threads = 8 + INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(500000000) SETTINGS max_threads = 8 DROP TABLE IF EXISTS t SELECT min(d32), max(d32), argMin(x, d32), argMax(x, d32) FROM t @@ -21,23 +18,23 @@ SELECT avg(d64), sum(d64), sumWithOverflow(d64) FROM t SELECT avg(d128), sum(d128), sumWithOverflow(d128) FROM t - SELECT uniq(d32), uniqCombined(d32), uniqExact(d32), uniqHLL12(d32) FROM t LIMIT 100000 - SELECT uniq(d64), uniqCombined(d64), uniqExact(d64), uniqHLL12(d64) FROM t LIMIT 100000 - SELECT uniq(d128), uniqCombined(d128), uniqExact(d128), uniqHLL12(d128) FROM t LIMIT 100000 + SELECT uniq(d32), uniqCombined(d32), uniqExact(d32), uniqHLL12(d32) FROM (SELECT * FROM t LIMIT 10000000) + SELECT uniq(d64), uniqCombined(d64), uniqExact(d64), uniqHLL12(d64) FROM (SELECT * FROM t LIMIT 10000000) + SELECT uniq(d128), uniqCombined(d128), uniqExact(d128), uniqHLL12(d128) FROM (SELECT * FROM t LIMIT 1000000) - SELECT median(d32), medianExact(d32), medianExactWeighted(d32, 2) FROM t LIMIT 100000 - SELECT median(d64), medianExact(d64), medianExactWeighted(d64, 2) FROM t LIMIT 100000 - SELECT median(d128), medianExact(d128), medianExactWeighted(d128, 2) FROM t LIMIT 100000 + SELECT median(d32), medianExact(d32), medianExactWeighted(d32, 2) FROM (SELECT * FROM t LIMIT 10000000) + SELECT median(d64), medianExact(d64), medianExactWeighted(d64, 2) FROM (SELECT * FROM t LIMIT 1000000) + SELECT median(d128), medianExact(d128), medianExactWeighted(d128, 2) FROM (SELECT * FROM t LIMIT 1000000) - SELECT quantile(d32), quantileExact(d32), quantileExactWeighted(d32, 2) FROM t LIMIT 100000 - SELECT quantile(d64), quantileExact(d64), quantileExactWeighted(d64, 2) FROM t LIMIT 100000 - SELECT quantile(d128), quantileExact(d128), quantileExactWeighted(d128, 2) FROM t LIMIT 100000 + SELECT quantile(d32), quantileExact(d32), quantileExactWeighted(d32, 2) FROM (SELECT * FROM t LIMIT 10000000) + SELECT quantile(d64), quantileExact(d64), quantileExactWeighted(d64, 2) FROM (SELECT * FROM t LIMIT 1000000) + SELECT quantile(d128), quantileExact(d128), quantileExactWeighted(d128, 2) FROM (SELECT * FROM t LIMIT 1000000) - SELECT quantilesExact(0.1, 0.9)(d32), quantilesExactWeighted(0.1, 0.9)(d32, 2) FROM t LIMIT 100000 - SELECT quantilesExact(0.1, 0.9)(d64), quantilesExactWeighted(0.1, 0.9)(d64, 2) FROM t LIMIT 100000 - SELECT quantilesExact(0.1, 0.9)(d128), quantilesExactWeighted(0.1, 0.9)(d128, 2) FROM t LIMIT 100000 + SELECT quantilesExact(0.1, 0.9)(d32), quantilesExactWeighted(0.1, 0.9)(d32, 2) FROM (SELECT * FROM t LIMIT 10000000) + SELECT quantilesExact(0.1, 0.9)(d64), quantilesExactWeighted(0.1, 0.9)(d64, 2) FROM (SELECT * FROM t LIMIT 1000000) + SELECT quantilesExact(0.1, 0.9)(d128), quantilesExactWeighted(0.1, 0.9)(d128, 2) FROM (SELECT * FROM t LIMIT 1000000) SELECT varPop(d32), varSamp(d32), stddevPop(d32) FROM t - SELECT varPop(d64), varSamp(d64), stddevPop(d64) FROM t - SELECT varPop(d128), varSamp(d128), stddevPop(d128) FROM t + SELECT varPop(d64), varSamp(d64), stddevPop(d64) FROM (SELECT * FROM t LIMIT 1000000) + SELECT varPop(d128), varSamp(d128), stddevPop(d128) FROM (SELECT * FROM t LIMIT 1000000) diff --git a/tests/performance/monotonous_order_by.xml b/tests/performance/monotonous_order_by.xml new file mode 100644 index 00000000000..1c58c4e9d0a --- /dev/null +++ b/tests/performance/monotonous_order_by.xml @@ -0,0 +1,9 @@ + + + hits_10m_single + + + SELECT * FROM (SELECT CounterID, EventDate FROM hits_10m_single) ORDER BY toFloat32(toFloat64(toFloat32(toFloat64(CounterID)))) FORMAT Null + SELECT * FROM (SELECT CounterID, EventDate FROM hits_10m_single) ORDER BY toFloat32(toFloat64(toFloat32(toFloat64(CounterID)))) DESC, toFloat32(toFloat64(toFloat32(toFloat64(EventDate)))) ASC FORMAT Null + + diff --git a/tests/queries/0_stateless/00626_replace_partition_from_table.sql b/tests/queries/0_stateless/00626_replace_partition_from_table.sql index c6479a94060..7224224334e 100644 --- a/tests/queries/0_stateless/00626_replace_partition_from_table.sql +++ b/tests/queries/0_stateless/00626_replace_partition_from_table.sql @@ -62,7 +62,7 @@ SELECT count(), sum(d) FROM dst; SELECT 'OPTIMIZE'; SELECT count(), sum(d), uniqExact(_part) FROM dst; -SYSTEM START MERGES; +SYSTEM START MERGES dst; SET optimize_throw_if_noop=1; OPTIMIZE TABLE dst; SELECT count(), sum(d), uniqExact(_part) FROM dst; diff --git a/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh b/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh index e52610f03ba..bb248b5f4e1 100755 --- a/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh +++ b/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh @@ -25,9 +25,15 @@ ${CLICKHOUSE_CLIENT} --query="CREATE TABLE fixed_string_test_table (val FixedStr ${CLICKHOUSE_CLIENT} --query="CREATE TABLE signed_integer_test_table (val Int32) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0;" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE unsigned_integer_test_table (val UInt32) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0;" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE enum_test_table (val Enum16('hello' = 1, 'world' = 2, 'yandex' = 256, 'clickhouse' = 257)) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0;" + ${CLICKHOUSE_CLIENT} --query="CREATE TABLE date_test_table (val Date) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0;" -${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES;" +${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES string_test_table;" +${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES fixed_string_test_table;" +${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES signed_integer_test_table;" +${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES unsigned_integer_test_table;" +${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES enum_test_table;" +${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES date_test_table;" ${CLICKHOUSE_CLIENT} --query="INSERT INTO string_test_table VALUES ('0'), ('2'), ('2');" ${CLICKHOUSE_CLIENT} --query="INSERT INTO fixed_string_test_table VALUES ('0'), ('2'), ('2');" @@ -80,5 +86,3 @@ ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS signed_integer_test_table;" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS unsigned_integer_test_table;" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS enum_test_table;" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS date_test_table;" - -${CLICKHOUSE_CLIENT} --query="SYSTEM START MERGES;" diff --git a/tests/queries/0_stateless/00738_nested_merge_multidimensional_array.sql b/tests/queries/0_stateless/00738_nested_merge_multidimensional_array.sql index f9ccd9623e1..6efeb2e6ef5 100644 --- a/tests/queries/0_stateless/00738_nested_merge_multidimensional_array.sql +++ b/tests/queries/0_stateless/00738_nested_merge_multidimensional_array.sql @@ -1,13 +1,13 @@ DROP TABLE IF EXISTS sites; CREATE TABLE sites (Domain UInt8, `Users.UserID` Array(UInt64), `Users.Dates` Array(Array(Date))) ENGINE = MergeTree ORDER BY Domain SETTINGS vertical_merge_algorithm_min_rows_to_activate = 0, vertical_merge_algorithm_min_columns_to_activate = 0; -SYSTEM STOP MERGES; +SYSTEM STOP MERGES sites; INSERT INTO sites VALUES (1,[1],[[]]); INSERT INTO sites VALUES (2,[1],[['2018-06-22']]); SELECT count(), countArray(Users.Dates), countArrayArray(Users.Dates) FROM sites; -SYSTEM START MERGES; +SYSTEM START MERGES sites; OPTIMIZE TABLE sites FINAL; SELECT count(), countArray(Users.Dates), countArrayArray(Users.Dates) FROM sites; diff --git a/tests/queries/0_stateless/00962_temporary_live_view_watch_live.py b/tests/queries/0_stateless/00962_temporary_live_view_watch_live.py index fb603a43f9e..c6a5251fbee 100755 --- a/tests/queries/0_stateless/00962_temporary_live_view_watch_live.py +++ b/tests/queries/0_stateless/00962_temporary_live_view_watch_live.py @@ -30,6 +30,7 @@ with client(name='client1>', log=log) as client1, client(name='client2>', log=lo client1.send('CREATE LIVE VIEW test.lv WITH TIMEOUT AS SELECT sum(a) FROM test.mt') client1.expect(prompt) client1.send('WATCH test.lv') + client1.expect('_version') client1.expect(r'0.*1' + end_of_block) client2.send('INSERT INTO test.mt VALUES (1),(2),(3)') client1.expect(r'6.*2' + end_of_block) diff --git a/tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.py.disabled b/tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.py.disabled index 69722d9161c..525e7022156 100755 --- a/tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.py.disabled +++ b/tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.py.disabled @@ -30,6 +30,7 @@ with client(name='client1>', log=log) as client1, client(name='client2>', log=lo client1.send('CREATE LIVE VIEW test.lv WITH TIMEOUT 1 AS SELECT sum(a) FROM test.mt') client1.expect(prompt) client1.send('WATCH test.lv') + client1.expect('_version') client1.expect(r'0.*1' + end_of_block) client2.send('INSERT INTO test.mt VALUES (1),(2),(3)') client2.expect(prompt) diff --git a/tests/queries/0_stateless/00965_live_view_watch_heartbeat.py b/tests/queries/0_stateless/00965_live_view_watch_heartbeat.py index 08d94a2341d..a20c9c3919c 100755 --- a/tests/queries/0_stateless/00965_live_view_watch_heartbeat.py +++ b/tests/queries/0_stateless/00965_live_view_watch_heartbeat.py @@ -32,6 +32,7 @@ with client(name='client1>', log=log) as client1, client(name='client2>', log=lo client1.send('CREATE LIVE VIEW test.lv WITH TIMEOUT AS SELECT sum(a) FROM test.mt') client1.expect(prompt) client1.send('WATCH test.lv') + client1.expect('_version') client1.expect(r'0.*1' + end_of_block) client2.send('INSERT INTO test.mt VALUES (1),(2),(3)') client1.expect(r'6.*2' + end_of_block) diff --git a/tests/queries/0_stateless/00979_live_view_watch_continuous_aggregates.py b/tests/queries/0_stateless/00979_live_view_watch_continuous_aggregates.py index d184fea7183..3a67226da80 100755 --- a/tests/queries/0_stateless/00979_live_view_watch_continuous_aggregates.py +++ b/tests/queries/0_stateless/00979_live_view_watch_continuous_aggregates.py @@ -29,8 +29,8 @@ with client(name='client1>', log=log) as client1, client(name='client2>', log=lo client1.expect(prompt) client1.send('CREATE LIVE VIEW test.lv AS SELECT toStartOfDay(time) AS day, location, avg(temperature) FROM test.mt GROUP BY day, location ORDER BY day, location') client1.expect(prompt) - client1.send('WATCH test.lv FORMAT CSV') - client1.expect(r'0.*1' + end_of_block) + client1.send('WATCH test.lv FORMAT CSVWithNames') + client1.expect(r'_version') client2.send("INSERT INTO test.mt VALUES ('2019-01-01 00:00:00','New York',60),('2019-01-01 00:10:00','New York',70)") client2.expect(prompt) client1.expect(r'"2019-01-01 00:00:00","New York",65') diff --git a/tests/queries/0_stateless/00979_live_view_watch_live.py b/tests/queries/0_stateless/00979_live_view_watch_live.py index 784afa14498..04ca070c969 100755 --- a/tests/queries/0_stateless/00979_live_view_watch_live.py +++ b/tests/queries/0_stateless/00979_live_view_watch_live.py @@ -30,6 +30,7 @@ with client(name='client1>', log=log) as client1, client(name='client2>', log=lo client1.send('CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt') client1.expect(prompt) client1.send('WATCH test.lv') + client1.expect('_version') client1.expect(r'0.*1' + end_of_block) client2.send('INSERT INTO test.mt VALUES (1),(2),(3)') client1.expect(r'6.*2' + end_of_block) diff --git a/tests/queries/0_stateless/00979_live_view_watch_live_moving_avg.py b/tests/queries/0_stateless/00979_live_view_watch_live_moving_avg.py index 30d5e6d67b3..ccc824c4d20 100755 --- a/tests/queries/0_stateless/00979_live_view_watch_live_moving_avg.py +++ b/tests/queries/0_stateless/00979_live_view_watch_live_moving_avg.py @@ -30,6 +30,7 @@ with client(name='client1>', log=log) as client1, client(name='client2>', log=lo client1.send('CREATE LIVE VIEW test.lv AS SELECT sum(a)/2 FROM (SELECT a, id FROM ( SELECT a, id FROM test.mt ORDER BY id DESC LIMIT 2 ) ORDER BY id DESC LIMIT 2)') client1.expect(prompt) client1.send('WATCH test.lv') + client1.expect('_version') client1.expect(r'0.*1' + end_of_block) client2.send('INSERT INTO test.mt VALUES (1, 1),(2, 2),(3, 3)') client1.expect(r'2\.5.*2' + end_of_block) diff --git a/tests/queries/0_stateless/00979_live_view_watch_live_with_subquery.py b/tests/queries/0_stateless/00979_live_view_watch_live_with_subquery.py index 44c923d75d8..809b8b0342a 100755 --- a/tests/queries/0_stateless/00979_live_view_watch_live_with_subquery.py +++ b/tests/queries/0_stateless/00979_live_view_watch_live_with_subquery.py @@ -30,6 +30,7 @@ with client(name='client1>', log=log) as client1, client(name='client2>', log=lo client1.send('CREATE LIVE VIEW test.lv AS SELECT * FROM ( SELECT sum(A.a) FROM (SELECT * FROM test.mt) AS A )') client1.expect(prompt) client1.send('WATCH test.lv') + client1.expect('_version') client1.expect(r'0.*1' + end_of_block) client2.send('INSERT INTO test.mt VALUES (1),(2),(3)') client1.expect(r'6.*2' + end_of_block) diff --git a/tests/queries/0_stateless/00988_parallel_parts_removal.sql b/tests/queries/0_stateless/00988_parallel_parts_removal.sql index 0dccd3df048..bff9bbe6d8d 100644 --- a/tests/queries/0_stateless/00988_parallel_parts_removal.sql +++ b/tests/queries/0_stateless/00988_parallel_parts_removal.sql @@ -2,7 +2,7 @@ DROP TABLE IF EXISTS mt; CREATE TABLE mt (x UInt64) ENGINE = MergeTree ORDER BY x SETTINGS max_part_removal_threads = 16, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, old_parts_lifetime = 1, parts_to_delay_insert = 100000, parts_to_throw_insert = 100000; -SYSTEM STOP MERGES; +SYSTEM STOP MERGES mt; SET max_block_size = 1, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; INSERT INTO mt SELECT * FROM numbers(1000); @@ -10,7 +10,7 @@ SET max_block_size = 65536; SELECT count(), sum(x) FROM mt; -SYSTEM START MERGES; +SYSTEM START MERGES mt; OPTIMIZE TABLE mt FINAL; SELECT count(), sum(x) FROM mt; diff --git a/tests/queries/0_stateless/00989_parallel_parts_loading.sql b/tests/queries/0_stateless/00989_parallel_parts_loading.sql index 5e0011483b3..0b4c0501669 100644 --- a/tests/queries/0_stateless/00989_parallel_parts_loading.sql +++ b/tests/queries/0_stateless/00989_parallel_parts_loading.sql @@ -2,7 +2,7 @@ DROP TABLE IF EXISTS mt; CREATE TABLE mt (x UInt64) ENGINE = MergeTree ORDER BY x SETTINGS max_part_loading_threads = 16, parts_to_delay_insert = 100000, parts_to_throw_insert = 100000; -SYSTEM STOP MERGES; +SYSTEM STOP MERGES mt; SET max_block_size = 1, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; INSERT INTO mt SELECT * FROM numbers(1000); @@ -15,5 +15,5 @@ ATTACH TABLE mt; SELECT count(), sum(x) FROM mt; -SYSTEM START MERGES; +SYSTEM START MERGES mt; DROP TABLE mt; diff --git a/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql b/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql index c2d0333bf46..cdefdd9de8a 100644 --- a/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql +++ b/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql @@ -76,7 +76,9 @@ AS GROUP BY id; -- This query has effect only for existing tables, so it must be located after CREATE. -SYSTEM STOP MERGES; +SYSTEM STOP MERGES target_table; +SYSTEM STOP MERGES checkouts; +SYSTEM STOP MERGES logins; -- feed with some initial values INSERT INTO logins SELECT number as id, '2000-01-01 08:00:00' from numbers(50000); @@ -126,5 +128,3 @@ DROP TABLE IF EXISTS mv_logins2target; DROP TABLE IF EXISTS checkouts; DROP TABLE IF EXISTS mv_checkouts2target; DROP TABLE target_table; - -SYSTEM START MERGES; diff --git a/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sh b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sh index d7b8ea3262d..009b400ee7b 100755 --- a/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sh +++ b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sh @@ -24,7 +24,7 @@ $CLICKHOUSE_CLIENT --query "INSERT INTO table_for_rename_replicated SELECT toDat $CLICKHOUSE_CLIENT --query "SELECT value1 FROM table_for_rename_replicated WHERE key = 1;" -$CLICKHOUSE_CLIENT --query "SYSTEM STOP MERGES;" +$CLICKHOUSE_CLIENT --query "SYSTEM STOP MERGES table_for_rename_replicated;" $CLICKHOUSE_CLIENT --query "SHOW CREATE TABLE table_for_rename_replicated;" @@ -49,7 +49,7 @@ $CLICKHOUSE_CLIENT --query "SELECT renamed_value1 FROM table_for_rename_replicat $CLICKHOUSE_CLIENT --query "SELECT * FROM table_for_rename_replicated WHERE key = 1 FORMAT TSVWithNames;" -$CLICKHOUSE_CLIENT --query "SYSTEM START MERGES;" +$CLICKHOUSE_CLIENT --query "SYSTEM START MERGES table_for_rename_replicated;" $CLICKHOUSE_CLIENT --query "SYSTEM SYNC REPLICA table_for_rename_replicated;" diff --git a/tests/queries/0_stateless/01246_insert_into_watch_live_view.py b/tests/queries/0_stateless/01246_insert_into_watch_live_view.py index ee417c68897..7f65a7135d5 100755 --- a/tests/queries/0_stateless/01246_insert_into_watch_live_view.py +++ b/tests/queries/0_stateless/01246_insert_into_watch_live_view.py @@ -38,8 +38,8 @@ with client(name='client1>', log=log) as client1, client(name='client2>', log=lo client3.send('CREATE LIVE VIEW test.lv_sums AS SELECT * FROM test.sums ORDER BY version') client3.expect(prompt) - client3.send("WATCH test.lv_sums FORMAT CSV") - client3.expect(r'0.*1' + end_of_block) + client3.send("WATCH test.lv_sums FORMAT CSVWithNames") + client3.expect('_version') client1.send('INSERT INTO test.sums WATCH test.lv') client1.expect(r'INSERT INTO') diff --git a/tests/queries/0_stateless/01282_system_parts_ttl_info.sql b/tests/queries/0_stateless/01282_system_parts_ttl_info.sql index 3a1b1cc79ce..0caf64bac8d 100644 --- a/tests/queries/0_stateless/01282_system_parts_ttl_info.sql +++ b/tests/queries/0_stateless/01282_system_parts_ttl_info.sql @@ -1,9 +1,9 @@ DROP TABLE IF EXISTS ttl; CREATE TABLE ttl (d DateTime) ENGINE = MergeTree ORDER BY tuple() TTL d + INTERVAL 10 DAY; -SYSTEM STOP MERGES; +SYSTEM STOP MERGES ttl; INSERT INTO ttl VALUES ('2000-01-01 01:02:03'), ('2000-02-03 04:05:06'); SELECT rows, delete_ttl_info_min, delete_ttl_info_max, move_ttl_info.expression, move_ttl_info.min, move_ttl_info.max FROM system.parts WHERE database = currentDatabase() AND table = 'ttl'; -SYSTEM START MERGES; +SYSTEM START MERGES ttl; OPTIMIZE TABLE ttl FINAL; SELECT rows, delete_ttl_info_min, delete_ttl_info_max, move_ttl_info.expression, move_ttl_info.min, move_ttl_info.max FROM system.parts WHERE database = currentDatabase() AND table = 'ttl' AND active; DROP TABLE ttl; diff --git a/tests/queries/0_stateless/01292_create_user.reference b/tests/queries/0_stateless/01292_create_user.reference index 555bd99bd94..922ee54bef4 100644 --- a/tests/queries/0_stateless/01292_create_user.reference +++ b/tests/queries/0_stateless/01292_create_user.reference @@ -95,10 +95,10 @@ CREATE USER u2_01292 DEFAULT ROLE r1_01292, r2_01292 SETTINGS readonly = 1 CREATE USER u3_01292 HOST LIKE \'%.%.myhost.com\' DEFAULT ROLE r1_01292, r2_01292 CREATE USER u4_01292 HOST LIKE \'%.%.myhost.com\' DEFAULT ROLE r1_01292, r2_01292 -- system.users -u1_01292 disk plaintext_password [] [] ['localhost'] [] [] 1 [] [] -u2_01292 disk no_password [] [] [] [] ['%.%.myhost.com'] 0 [] [] -u3_01292 disk sha256_password [] ['192.169.1.1','192.168.0.0/16'] ['localhost'] [] [] 0 ['r1_01292'] [] -u4_01292 disk double_sha1_password [] ['::/0'] [] [] [] 1 [] ['r1_01292'] +u1_01292 disk plaintext_password {} [] ['localhost'] [] [] 1 [] [] +u2_01292 disk no_password {} [] [] [] ['%.%.myhost.com'] 0 [] [] +u3_01292 disk sha256_password {} ['192.169.1.1','192.168.0.0/16'] ['localhost'] [] [] 0 ['r1_01292'] [] +u4_01292 disk double_sha1_password {} ['::/0'] [] [] [] 1 [] ['r1_01292'] -- system.settings_profile_elements \N u1_01292 \N 0 readonly 1 \N \N \N \N \N u2_01292 \N 0 \N \N \N \N \N default diff --git a/tests/queries/0_stateless/01321_monotonous_functions_in_order_by.reference b/tests/queries/0_stateless/01321_monotonous_functions_in_order_by.reference new file mode 100644 index 00000000000..e8e7d754ed9 --- /dev/null +++ b/tests/queries/0_stateless/01321_monotonous_functions_in_order_by.reference @@ -0,0 +1,168 @@ +0 +1 +2 +0 +1 +2 +0 +1 +2 +2 +1 +0 +0 +1 +2 +0 +1 +2 +0 +1 +2 +2 +1 +0 +2 +1 +0 +2 +1 +0 +0 +1 +2 +2 +1 +0 +2 +1 +0 +SELECT number +FROM numbers(3) +ORDER BY number ASC +SELECT number +FROM numbers(3) +ORDER BY abs(toFloat32(number)) ASC +SELECT number +FROM numbers(3) +ORDER BY toFloat32(abs(number)) ASC +SELECT number +FROM numbers(3) +ORDER BY number DESC +SELECT number +FROM numbers(3) +ORDER BY exp(number) ASC +SELECT roundToExp2(number) AS x +FROM numbers(3) +ORDER BY + number ASC, + number ASC +SELECT number AS x +FROM numbers(3) +ORDER BY + number ASC, + number ASC +SELECT number +FROM numbers(3) +ORDER BY number DESC +SELECT number +FROM numbers(3) +ORDER BY abs(toFloat32(number)) DESC +SELECT number +FROM numbers(3) +ORDER BY toFloat32(abs(number)) DESC +SELECT number +FROM numbers(3) +ORDER BY number ASC +SELECT number +FROM numbers(3) +ORDER BY exp(number) DESC +SELECT roundToExp2(number) AS x +FROM numbers(3) +ORDER BY + number DESC, + number DESC +0 +1 +2 +0 +1 +2 +0 +1 +2 +2 +1 +0 +0 +1 +2 +0 +1 +2 +0 +1 +2 +2 +1 +0 +2 +1 +0 +2 +1 +0 +0 +1 +2 +2 +1 +0 +2 +1 +0 +SELECT number +FROM numbers(3) +ORDER BY toFloat32(toFloat64(number)) ASC +SELECT number +FROM numbers(3) +ORDER BY abs(toFloat32(number)) ASC +SELECT number +FROM numbers(3) +ORDER BY toFloat32(abs(number)) ASC +SELECT number +FROM numbers(3) +ORDER BY -number ASC +SELECT number +FROM numbers(3) +ORDER BY exp(number) ASC +SELECT roundToExp2(number) AS x +FROM numbers(3) +ORDER BY + x ASC, + toFloat32(x) ASC +SELECT number AS x +FROM numbers(3) +ORDER BY + toFloat32(x) AS k ASC, + toFloat64(k) ASC +SELECT number +FROM numbers(3) +ORDER BY toFloat32(toFloat64(number)) DESC +SELECT number +FROM numbers(3) +ORDER BY abs(toFloat32(number)) DESC +SELECT number +FROM numbers(3) +ORDER BY toFloat32(abs(number)) DESC +SELECT number +FROM numbers(3) +ORDER BY -number DESC +SELECT number +FROM numbers(3) +ORDER BY exp(number) DESC +SELECT roundToExp2(number) AS x +FROM numbers(3) +ORDER BY + x DESC, + toFloat32(x) DESC diff --git a/tests/queries/0_stateless/01321_monotonous_functions_in_order_by.sql b/tests/queries/0_stateless/01321_monotonous_functions_in_order_by.sql new file mode 100644 index 00000000000..7f4b5881104 --- /dev/null +++ b/tests/queries/0_stateless/01321_monotonous_functions_in_order_by.sql @@ -0,0 +1,59 @@ +SET enable_debug_queries = 1; +SET optimize_monotonous_functions_in_order_by = 1; + +SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number)); +SELECT number FROM numbers(3) ORDER BY abs(toFloat32(number)); +SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number)); +SELECT number FROM numbers(3) ORDER BY -number; +SELECT number FROM numbers(3) ORDER BY exp(number); +SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x, toFloat32(x); +SELECT number AS x FROM numbers(3) ORDER BY toFloat32(x) as k, toFloat64(k); +SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number)) DESC; +SELECT number FROM numbers(3) ORDER BY abs(toFloat32(number)) DESC; +SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number)) DESC; +SELECT number FROM numbers(3) ORDER BY -number DESC; +SELECT number FROM numbers(3) ORDER BY exp(number) DESC; +SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x DESC, toFloat32(x) DESC; +analyze SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number)); +analyze SELECT number FROM numbers(3) ORDER BY abs(toFloat32(number)); +analyze SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number)); +analyze SELECT number FROM numbers(3) ORDER BY -number; +analyze SELECT number FROM numbers(3) ORDER BY exp(number); +analyze SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x, toFloat32(x); +analyze SELECT number AS x FROM numbers(3) ORDER BY toFloat32(x) as k, toFloat64(k); +analyze SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number)) DESC; +analyze SELECT number FROM numbers(3) ORDER BY abs(toFloat32(number)) DESC; +analyze SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number)) DESC; +analyze SELECT number FROM numbers(3) ORDER BY -number DESC; +analyze SELECT number FROM numbers(3) ORDER BY exp(number) DESC; +analyze SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x DESC, toFloat32(x) DESC; + +SET optimize_monotonous_functions_in_order_by = 0; + +SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number)); +SELECT number FROM numbers(3) ORDER BY abs(toFloat32(number)); +SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number)); +SELECT number FROM numbers(3) ORDER BY -number; +SELECT number FROM numbers(3) ORDER BY exp(number); +SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x, toFloat32(x); +SELECT number AS x FROM numbers(3) ORDER BY toFloat32(x) as k, toFloat64(k); +SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number)) DESC; +SELECT number FROM numbers(3) ORDER BY abs(toFloat32(number)) DESC; +SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number)) DESC; +SELECT number FROM numbers(3) ORDER BY -number DESC; +SELECT number FROM numbers(3) ORDER BY exp(number) DESC; +SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x DESC, toFloat32(x) DESC; +analyze SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number)); +analyze SELECT number FROM numbers(3) ORDER BY abs(toFloat32(number)); +analyze SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number)); +analyze SELECT number FROM numbers(3) ORDER BY -number; +analyze SELECT number FROM numbers(3) ORDER BY exp(number); +analyze SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x, toFloat32(x); +analyze SELECT number AS x FROM numbers(3) ORDER BY toFloat32(x) as k, toFloat64(k); +analyze SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number)) DESC; +analyze SELECT number FROM numbers(3) ORDER BY abs(toFloat32(number)) DESC; +analyze SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number)) DESC; +analyze SELECT number FROM numbers(3) ORDER BY -number DESC; +analyze SELECT number FROM numbers(3) ORDER BY exp(number) DESC; +analyze SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x DESC, toFloat32(x) DESC; +-- TODO: exp() should be monotonous function diff --git a/tests/queries/0_stateless/01322_monotonous_order_by_with_different_variables.reference b/tests/queries/0_stateless/01322_monotonous_order_by_with_different_variables.reference new file mode 100644 index 00000000000..cf2935a40bf --- /dev/null +++ b/tests/queries/0_stateless/01322_monotonous_order_by_with_different_variables.reference @@ -0,0 +1,32 @@ +1 4 3 +1 3 3 +2 5 4 +2 2 4 +1 3 3 +1 4 3 +2 2 4 +2 5 4 +2 +1 +2 +1 3 3 +1 4 3 +2 5 4 +2 2 4 +2 +1 4 3 +1 3 3 +2 5 4 +2 2 4 +1 3 3 +1 4 3 +2 2 4 +2 5 4 +2 +1 +2 +1 3 3 +1 4 3 +2 5 4 +2 2 4 +2 diff --git a/tests/queries/0_stateless/01322_monotonous_order_by_with_different_variables.sql b/tests/queries/0_stateless/01322_monotonous_order_by_with_different_variables.sql new file mode 100644 index 00000000000..6fda42cbed1 --- /dev/null +++ b/tests/queries/0_stateless/01322_monotonous_order_by_with_different_variables.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test (x Int8, y Int8, z Int8) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO test VALUES (1, 3, 3), (1, 4, 3), (2, 5, 4), (2, 2, 4); + +SET optimize_monotonous_functions_in_order_by = 1; +SELECT * FROM test ORDER BY toFloat32(x), -y, -z DESC; +SELECT * FROM test ORDER BY toFloat32(x), -(-y), -z DESC; +SELECT max(x) as k FROM test ORDER BY k; +SELECT roundToExp2(x) as k FROM test GROUP BY k ORDER BY k; +SELECT roundToExp2(x) as k, y, z FROM test WHERE k >= 1 ORDER BY k; +SELECT max(x) as k FROM test HAVING k > 0 ORDER BY k; + +SET optimize_monotonous_functions_in_order_by = 0; +SELECT * FROM test ORDER BY toFloat32(x), -y, -z DESC; +SELECT * FROM test ORDER BY toFloat32(x), -(-y), -z DESC; +SELECT max(x) as k FROM test ORDER BY k; +SELECT roundToExp2(x) as k From test GROUP BY k ORDER BY k; +SELECT roundToExp2(x) as k, y, z FROM test WHERE k >= 1 ORDER BY k; +SELECT max(x) as k FROM test HAVING k > 0 ORDER BY k; + +DROP TABLE test; diff --git a/tests/queries/0_stateless/01332_join_type_syntax_position.reference b/tests/queries/0_stateless/01332_join_type_syntax_position.reference new file mode 100644 index 00000000000..66f4ca4a5a8 --- /dev/null +++ b/tests/queries/0_stateless/01332_join_type_syntax_position.reference @@ -0,0 +1,12 @@ +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 diff --git a/tests/queries/0_stateless/01332_join_type_syntax_position.sql b/tests/queries/0_stateless/01332_join_type_syntax_position.sql new file mode 100644 index 00000000000..bb87c7eb425 --- /dev/null +++ b/tests/queries/0_stateless/01332_join_type_syntax_position.sql @@ -0,0 +1,31 @@ +select * from numbers(1) t1 left outer join numbers(1) t2 using number; +select * from numbers(1) t1 right outer join numbers(1) t2 using number; + +select * from numbers(1) t1 left any join numbers(1) t2 using number; +select * from numbers(1) t1 right any join numbers(1) t2 using number; + +select * from numbers(1) t1 left semi join numbers(1) t2 using number; +select * from numbers(1) t1 right semi join numbers(1) t2 using number; + +select * from numbers(1) t1 left anti join numbers(1) t2 using number; +select * from numbers(1) t1 right anti join numbers(1) t2 using number; + +select * from numbers(1) t1 asof join numbers(1) t2 using number; -- { serverError 62 } +select * from numbers(1) t1 left asof join numbers(1) t2 using number; -- { serverError 62 } + +-- legacy + +select * from numbers(1) t1 all left join numbers(1) t2 using number; +select * from numbers(1) t1 all right join numbers(1) t2 using number; + +select * from numbers(1) t1 any left join numbers(1) t2 using number; +select * from numbers(1) t1 any right join numbers(1) t2 using number; + +select * from numbers(1) t1 semi left join numbers(1) t2 using number; +select * from numbers(1) t1 semi right join numbers(1) t2 using number; + +select * from numbers(1) t1 anti left join numbers(1) t2 using number; +select * from numbers(1) t1 anti right join numbers(1) t2 using number; + +select * from numbers(1) t1 asof join numbers(1) t2 using number; -- { serverError 62 } +select * from numbers(1) t1 asof left join numbers(1) t2 using number; -- { serverError 62 } diff --git a/tests/queries/0_stateless/01373_summing_merge_tree_exclude_partition_key.sql b/tests/queries/0_stateless/01373_summing_merge_tree_exclude_partition_key.sql index 60c988a2e2f..790fbca6b73 100644 --- a/tests/queries/0_stateless/01373_summing_merge_tree_exclude_partition_key.sql +++ b/tests/queries/0_stateless/01373_summing_merge_tree_exclude_partition_key.sql @@ -4,7 +4,7 @@ CREATE TABLE tt_01373 (a Int64, d Int64, val Int64) ENGINE = SummingMergeTree PARTITION BY (a) ORDER BY (d); -SYSTEM STOP MERGES; +SYSTEM STOP MERGES tt_01373; INSERT INTO tt_01373 SELECT number%13, number%17, 1 from numbers(1000000); @@ -17,7 +17,7 @@ SELECT count(*) FROM tt_01373 FINAL; SELECT '---'; SELECT a, count() FROM tt_01373 FINAL GROUP BY a ORDER BY a; -SYSTEM START MERGES; +SYSTEM START MERGES tt_01373; OPTIMIZE TABLE tt_01373 FINAL; SELECT '---'; diff --git a/tests/queries/0_stateless/01383_log_broken_table.reference b/tests/queries/0_stateless/01383_log_broken_table.reference new file mode 100644 index 00000000000..1bc7c914e46 --- /dev/null +++ b/tests/queries/0_stateless/01383_log_broken_table.reference @@ -0,0 +1,3 @@ +Testing TinyLog +Testing StripeLog +Testing Log diff --git a/tests/queries/0_stateless/01383_log_broken_table.sh b/tests/queries/0_stateless/01383_log_broken_table.sh new file mode 100755 index 00000000000..2afac11e7c2 --- /dev/null +++ b/tests/queries/0_stateless/01383_log_broken_table.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=none +. $CURDIR/../shell_config.sh + + +function test() +{ + ENGINE=$1 + MAX_MEM=4096 + + echo "Testing $ENGINE" + + $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS log"; + $CLICKHOUSE_CLIENT --query "CREATE TABLE log (x UInt64, y UInt64, z UInt64) ENGINE = $ENGINE"; + + while true; do + MAX_MEM=$((2 * $MAX_MEM)) + + $CLICKHOUSE_CLIENT --query "INSERT INTO log SELECT number, number, number FROM numbers(1000000)" --max_memory_usage $MAX_MEM > ${CLICKHOUSE_TMP}/insert_result 2>&1 + + grep -o -F 'Memory limit' ${CLICKHOUSE_TMP}/insert_result || cat ${CLICKHOUSE_TMP}/insert_result + + $CLICKHOUSE_CLIENT --query "SELECT count(), sum(x + y + z) FROM log" > ${CLICKHOUSE_TMP}/select_result 2>&1; + + grep -o -F 'File not found' ${CLICKHOUSE_TMP}/select_result || cat ${CLICKHOUSE_TMP}/select_result + + [[ $MAX_MEM -gt 200000000 ]] && break; + done + + $CLICKHOUSE_CLIENT --query "DROP TABLE log"; +} + +test TinyLog | grep -v -P '^(Memory limit|0\t0|File not found|[1-9]000000\t)' +test StripeLog | grep -v -P '^(Memory limit|0\t0|File not found|[1-9]000000\t)' +test Log | grep -v -P '^(Memory limit|0\t0|File not found|[1-9]000000\t)' + +rm "${CLICKHOUSE_TMP}/insert_result" +rm "${CLICKHOUSE_TMP}/select_result" diff --git a/tests/queries/0_stateless/01391_join_on_dict_crash.sql b/tests/queries/0_stateless/01391_join_on_dict_crash.sql index 03d98e0cdda..998e0e21745 100644 --- a/tests/queries/0_stateless/01391_join_on_dict_crash.sql +++ b/tests/queries/0_stateless/01391_join_on_dict_crash.sql @@ -12,15 +12,16 @@ CREATE TABLE d_src (id UInt64, country_id UInt8, name String) Engine = Memory; INSERT INTO t VALUES (0, 0); INSERT INTO d_src VALUES (0, 0, 'n'); -CREATE DICTIONARY d (id UInt32, country_id UInt8, name String) PRIMARY KEY id -SOURCE(CLICKHOUSE(host 'localhost' port 9000 user 'default' password '' db 'db_01391' table 'd_src')) -LIFETIME(MIN 300 MAX 360) +CREATE DICTIONARY d (id UInt32, country_id UInt8, name String) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' DB 'db_01391' table 'd_src')) +LIFETIME(MIN 1 MAX 1) LAYOUT(HASHED()); select click_country_id from t cc left join d on toUInt32(d.id) = cc.click_city_id; +DROP DICTIONARY d; DROP TABLE t; DROP TABLE d_src; -DROP DICTIONARY d; DROP DATABASE IF EXISTS db_01391; diff --git a/tests/queries/0_stateless/01397_in_bad_arguments.reference b/tests/queries/0_stateless/01397_in_bad_arguments.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01397_in_bad_arguments.sql b/tests/queries/0_stateless/01397_in_bad_arguments.sql new file mode 100644 index 00000000000..4854abad091 --- /dev/null +++ b/tests/queries/0_stateless/01397_in_bad_arguments.sql @@ -0,0 +1,4 @@ +select in((1, 1, 1, 1)); -- { serverError 42 } +select in(1); -- { serverError 42 } +select in(); -- { serverError 42 } +select in(1, 2, 3); -- { serverError 42 } diff --git a/tests/queries/0_stateless/arcadia_skip_list.txt b/tests/queries/0_stateless/arcadia_skip_list.txt index 55c011e4884..bc0dd67519a 100644 --- a/tests/queries/0_stateless/arcadia_skip_list.txt +++ b/tests/queries/0_stateless/arcadia_skip_list.txt @@ -131,3 +131,4 @@ 01370_client_autocomplete_word_break_characters 01319_optimize_skip_unused_shards_nesting 01376_GROUP_BY_injective_elimination_dictGet +01391_join_on_dict_crash diff --git a/tests/testflows/helpers/cluster.py b/tests/testflows/helpers/cluster.py index e087b3b5b9d..9f86d44124c 100644 --- a/tests/testflows/helpers/cluster.py +++ b/tests/testflows/helpers/cluster.py @@ -167,6 +167,16 @@ class Cluster(object): self.docker_compose += f" --project-directory \"{docker_compose_project_dir}\" --file \"{docker_compose_file_path}\"" self.lock = threading.Lock() + def shell(self, node): + """Returns unique shell terminal to be used. + """ + if node is None: + return Shell() + + return Shell(command=[ + "/bin/bash", "--noediting", "-c", f"{self.docker_compose} exec {node} bash --noediting" + ], name=node) + def bash(self, node, timeout=60): """Returns thread-local bash terminal to a specific node. diff --git a/tests/testflows/ldap/__init__.py b/tests/testflows/ldap/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/testflows/ldap/configs/CA/ca.crt b/tests/testflows/ldap/configs/CA/ca.crt new file mode 100644 index 00000000000..8c71e3afc91 --- /dev/null +++ b/tests/testflows/ldap/configs/CA/ca.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDlTCCAn2gAwIBAgIUJBqw2dHM2DDCZjYSkPOESlvDH6swDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCQ0ExCzAJBgNVBAgMAk9OMQ8wDQYDVQQHDAZPdHRhd2Ex +ETAPBgNVBAoMCEFsdGluaXR5MQswCQYDVQQLDAJRQTENMAsGA1UEAwwEcm9vdDAe +Fw0yMDA2MTExOTAzNDhaFw0zMDA2MDkxOTAzNDhaMFoxCzAJBgNVBAYTAkNBMQsw +CQYDVQQIDAJPTjEPMA0GA1UEBwwGT3R0YXdhMREwDwYDVQQKDAhBbHRpbml0eTEL +MAkGA1UECwwCUUExDTALBgNVBAMMBHJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQC9Irr0zGV+HCI2fZ0ht4hR5It4Sbjz4RwZV8ENRP/+TEz8l9eK +J6ygxhKX7SMYzIs/jS9Gsq4plX1r2ujW1qRf8yLpR4+dGLP+jBRi1drj0XjZXosT +SERjWzgPauWxL9LN8+l26eBAqz6fw5e0W8WRSTgf5iGiCcKOTmaATIUjP0CdfWKK +qpktI4vhe++CXZFJ3usR+8KZ/FwwbCLJM/3J2HnbcXfcaYPYvr1tfqLudKSTbG9H +M3+AVwjctdesc/0sbd51Zsm0ClQptMbuKnDCYauGg61kNkgbgPgRmH9Pzo67DtxF +/WW+PtOzq8xLOifciQ9Piboy9QBSQZGwf4wzAgMBAAGjUzBRMB0GA1UdDgQWBBSi +njya0RDozx3OZTLYFpwqYnlpIDAfBgNVHSMEGDAWgBSinjya0RDozx3OZTLYFpwq +YnlpIDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBAD7VyFg7F +U1C25KFvtauchAOjCW6w7U/b3z1dVZvcQ88/kH1VsLUcfGixlSilUEfPTJsi7OA0 +R5BQdh2GGcjUJv4iqEFGU05KvMVmRRKn08P62+ZhJxKMxG26VzcliRZzCMkI6d0W +lFwI6nM45yeqdHVh5k4xbuJzqpbD9BtXXLI+/Ra9Fx8S9ETA3GdidpZLU5P1VLxq +UuedfqyAVWZXpr6TAURGxouRmRzul9yFzbSUex+MLEIPrstjtEwV3+tBQZJz9xAS +TVPj+Nv3LO7GCq54bdwkq1ioWbSL2hEmABkj6kdW/JwmfhGHf/2rirDVMzrTYw07 +dFJfAZC+FEsv +-----END CERTIFICATE----- diff --git a/tests/testflows/ldap/configs/CA/ca.key b/tests/testflows/ldap/configs/CA/ca.key new file mode 100644 index 00000000000..e7a7f664dcf --- /dev/null +++ b/tests/testflows/ldap/configs/CA/ca.key @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: AES-256-CBC,D06B9754A2069EBB4E77065DC9B605A1 + +FJT794Z6AUuUB5Vp5W2iR6zzCvQUg2dtKoE+xhFdbgC7lmSfA2W/O9fx15Il67Yj +Bbpm9Y6yteUSDQpJrvBdkhXeBkYEa5K1CA+0Jdx98nzwP3KBhHNxVVrTWRc5kniB +LMV3iBQEbAafxgL7gN+EWr3eV7w7ZSqT7D5br/mlBALU62gv2UzwTXLu1CgyNWMC +HIPjIX50Zga+BnhZhtQvM4Yj1gOsn+X6AaEZ3KjTfCDqthYQf2ldswW4gAlPAq83 ++INq9Spx+QG97Z+1XO2DmmGTZL0z+OFLT+3y26/UcftM26ODY09Dcf3gt0n6RIUV +0KsD1qQL0ppu4CHVnbIkOKMBe86qBl+kG8FVmyhgZ8D9ULlF1tpyTVKvHR82V2B5 +ztbc5EY1Fhb+r7OVVJlbCeo/bWmWybODZrpN49x5gGZpM3+8ApaHupGZ+cRFkQKG +rDpqC5gflT3WwFNxtP5noWcV+Gzb3riXNM3c8G5aIpLZwmmaTLK9ahKqMcq4Ljf+ +hir8kuCMqIKt3m7Ceoj4wAHSP8xO0y/cc1WYNb3CI0Emk795aR6IPUw4vDEXHG27 +OLoCJTvl/JKRWJGkdQx8wKAs/uw/qwtbhYoQJccTjfvy4NXH3tpSgxCE8OTWuEch +TAN8ra1PDGAUu+1MeT5gZ9uI1BEU6hXMME4mVRpJdcmw9MVy3V+B6rkUqX3kFAfR +e2ueF5qgIp+A4/UlVe5cKdWAQxu4BnUESLooA7cbgcLypdao9bRx9bXH8S3aNgxW +IdgICpc/v8wAX2yqMe191KgR9Vh1p0RCw/kEGVgWfY/IaQpsaYuq5quZbvr/fN5T +d++ySAMaPysaCadLUdZJLw56uk4Y+PYzR+ygjTX9dCCHedrAU8RYM55FJ/fyD3bQ +Hn9/n7PZyWy6u/TYt6dhlcYxaS3Opzw4eAQB8tGZJRYQ3AKpHpTEC57lXoMnUPKo ++nBmb0+YulylMZdns0WIBJlcv6qzIaNhDMrjyi18n1ezzPIGH7ivUjoXy2FL23q5 +f3aqJK4UUDEDkC8IeZkS+ykYxnohjFDhUyBe5gjryLqdMdy9EerehCWPf425AztX +c/EWPzDl46qmxWhugOlz3Fiw95VlYu0MUDRayHuZiYPplgJypChuU4EHJ+q8V2z3 +BwjSo1bD4nfc8f68qEOtdZ1u/ClcolMwlZQYDJz/DiE4JOcd2Gx4QSF5vaInm0/4 +mMj/ZWna4DAYFbH8IGh7xUPDqeIWhBYlgrD69ajKyay5Vu3La/d2QW20BhX35Ro2 +ZJVR+lfioMmxn4y481H2pv+5gOlGwh02Oa8qLhZBb8W+DvFShNk6mk87eCForFFT +CDgmvfsC/cS2wZkcFTecq6vbjFlt+OF13NCKlcO3wCm44D+bwVPeMrU6HycCVQw7 +SASrnP/th5sJbv11byb2lKgVdVHWk090bqnDwB9H2hGIb9JnPC9ZpaL/mocYyzTi +H9fcBrMYkL09FJGr3Uff7qEY4XQTMlLadXue3iKd19PRgV8cRyKp37MYI9/3iLwv +eYHLtMfrifZahf1ksOPeBphnlfzWo9qqfooUCaGxfSlNPUHhrHZ4aMiRyTE8Xeh2 +-----END RSA PRIVATE KEY----- diff --git a/tests/testflows/ldap/configs/CA/ca.srl b/tests/testflows/ldap/configs/CA/ca.srl new file mode 100644 index 00000000000..66feb9c8a35 --- /dev/null +++ b/tests/testflows/ldap/configs/CA/ca.srl @@ -0,0 +1 @@ +227B125D27B6B1A4B5955361365DF8EC2D7098C1 diff --git a/tests/testflows/ldap/configs/CA/dhparam.pem b/tests/testflows/ldap/configs/CA/dhparam.pem new file mode 100644 index 00000000000..0a96faffd62 --- /dev/null +++ b/tests/testflows/ldap/configs/CA/dhparam.pem @@ -0,0 +1,5 @@ +-----BEGIN DH PARAMETERS----- +MIGHAoGBAJitt2hhnpDViQ5ko2ipBMdjy+bZ6FR/WdZ987R7lQvBkKehPXmxtEyV +AO6ofv5CZSDJokc5bUeBOAtg0EhMTCH82uPdwQvt58jRXcxXBg4JTjkx+oW9LBv2 +FdZsbaX8+SYivmiZ0Jp8T/HBm/4DA9VBS0O5GFRS4C7dHhmSTPfDAgEC +-----END DH PARAMETERS----- diff --git a/tests/testflows/ldap/configs/CA/passphrase.txt b/tests/testflows/ldap/configs/CA/passphrase.txt new file mode 100644 index 00000000000..2cf58b2364c --- /dev/null +++ b/tests/testflows/ldap/configs/CA/passphrase.txt @@ -0,0 +1 @@ +altinity diff --git a/tests/testflows/ldap/configs/clickhouse/common.xml b/tests/testflows/ldap/configs/clickhouse/common.xml new file mode 100644 index 00000000000..df952b28c82 --- /dev/null +++ b/tests/testflows/ldap/configs/clickhouse/common.xml @@ -0,0 +1,6 @@ + + Europe/Moscow + 0.0.0.0 + /var/lib/clickhouse/ + /var/lib/clickhouse/tmp/ + diff --git a/tests/testflows/ldap/configs/clickhouse/config.d/logs.xml b/tests/testflows/ldap/configs/clickhouse/config.d/logs.xml new file mode 100644 index 00000000000..bdf1bbc11c1 --- /dev/null +++ b/tests/testflows/ldap/configs/clickhouse/config.d/logs.xml @@ -0,0 +1,17 @@ + + 3 + + trace + /var/log/clickhouse-server/log.log + /var/log/clickhouse-server/log.err.log + 1000M + 10 + /var/log/clickhouse-server/stderr.log + /var/log/clickhouse-server/stdout.log + + + system + part_log
+ 500 +
+
diff --git a/tests/testflows/ldap/configs/clickhouse/config.d/ports.xml b/tests/testflows/ldap/configs/clickhouse/config.d/ports.xml new file mode 100644 index 00000000000..fbc6cea74c0 --- /dev/null +++ b/tests/testflows/ldap/configs/clickhouse/config.d/ports.xml @@ -0,0 +1,5 @@ + + + 8443 + 9440 + \ No newline at end of file diff --git a/tests/testflows/ldap/configs/clickhouse/config.d/remote.xml b/tests/testflows/ldap/configs/clickhouse/config.d/remote.xml new file mode 100644 index 00000000000..51be2a6e8e3 --- /dev/null +++ b/tests/testflows/ldap/configs/clickhouse/config.d/remote.xml @@ -0,0 +1,107 @@ + + + + + + true + + clickhouse1 + 9000 + + + clickhouse2 + 9000 + + + clickhouse3 + 9000 + + + + + + + true + + clickhouse1 + 9440 + 1 + + + clickhouse2 + 9440 + 1 + + + clickhouse3 + 9440 + 1 + + + + + + + clickhouse1 + 9000 + + + + + clickhouse2 + 9000 + + + + + clickhouse3 + 9000 + + + + + + + clickhouse1 + 9440 + 1 + + + + + clickhouse2 + 9440 + 1 + + + + + clickhouse3 + 9440 + 1 + + + + + diff --git a/tests/testflows/ldap/configs/clickhouse/config.d/ssl.xml b/tests/testflows/ldap/configs/clickhouse/config.d/ssl.xml new file mode 100644 index 00000000000..ca65ffd5e04 --- /dev/null +++ b/tests/testflows/ldap/configs/clickhouse/config.d/ssl.xml @@ -0,0 +1,17 @@ + + + + /etc/clickhouse-server/ssl/server.crt + /etc/clickhouse-server/ssl/server.key + none + true + + + true + none + + AcceptCertificateHandler + + + + diff --git a/tests/testflows/ldap/configs/clickhouse/config.d/storage.xml b/tests/testflows/ldap/configs/clickhouse/config.d/storage.xml new file mode 100644 index 00000000000..618fd6b6d24 --- /dev/null +++ b/tests/testflows/ldap/configs/clickhouse/config.d/storage.xml @@ -0,0 +1,20 @@ + + + + + + 1024 + + + + + + + default + + + + + + + diff --git a/tests/testflows/ldap/configs/clickhouse/config.d/zookeeper.xml b/tests/testflows/ldap/configs/clickhouse/config.d/zookeeper.xml new file mode 100644 index 00000000000..96270e7b645 --- /dev/null +++ b/tests/testflows/ldap/configs/clickhouse/config.d/zookeeper.xml @@ -0,0 +1,10 @@ + + + + + zookeeper + 2181 + + 15000 + + diff --git a/tests/testflows/ldap/configs/clickhouse/config.xml b/tests/testflows/ldap/configs/clickhouse/config.xml new file mode 100644 index 00000000000..d34d2c35253 --- /dev/null +++ b/tests/testflows/ldap/configs/clickhouse/config.xml @@ -0,0 +1,436 @@ + + + + + + trace + /var/log/clickhouse-server/clickhouse-server.log + /var/log/clickhouse-server/clickhouse-server.err.log + 1000M + 10 + + + + 8123 + 9000 + + + + + + + + + /etc/clickhouse-server/server.crt + /etc/clickhouse-server/server.key + + /etc/clickhouse-server/dhparam.pem + none + true + true + sslv2,sslv3 + true + + + + true + true + sslv2,sslv3 + true + + + + RejectCertificateHandler + + + + + + + + + 9009 + + + + + + + + + + + + + + + + + + + + 4096 + 3 + + + 100 + + + + + + 8589934592 + + + 5368709120 + + + + /var/lib/clickhouse/ + + + /var/lib/clickhouse/tmp/ + + + /var/lib/clickhouse/user_files/ + + + /var/lib/clickhouse/access/ + + + users.xml + + + default + + + + + + default + + + + + + + + + false + + + + + + + + localhost + 9000 + + + + + + + localhost + 9000 + + + + + localhost + 9000 + + + + + + + localhost + 9440 + 1 + + + + + + + localhost + 9000 + + + + + localhost + 1 + + + + + + + + + + + + + + + + + 3600 + + + + 3600 + + + 60 + + + + + + + + + + system + query_log
+ + toYYYYMM(event_date) + + 7500 +
+ + + + system + trace_log
+ + toYYYYMM(event_date) + 7500 +
+ + + + system + query_thread_log
+ toYYYYMM(event_date) + 7500 +
+ + + + + + + + + + + + + + + + *_dictionary.xml + + + + + + + + + + /clickhouse/task_queue/ddl + + + + + + + + + + + + + + + + click_cost + any + + 0 + 3600 + + + 86400 + 60 + + + + max + + 0 + 60 + + + 3600 + 300 + + + 86400 + 3600 + + + + + + /var/lib/clickhouse/format_schemas/ + + + +
diff --git a/tests/testflows/ldap/configs/clickhouse/ssl/dhparam.pem b/tests/testflows/ldap/configs/clickhouse/ssl/dhparam.pem new file mode 100644 index 00000000000..2e6cee0798d --- /dev/null +++ b/tests/testflows/ldap/configs/clickhouse/ssl/dhparam.pem @@ -0,0 +1,8 @@ +-----BEGIN DH PARAMETERS----- +MIIBCAKCAQEAua92DDli13gJ+//ZXyGaggjIuidqB0crXfhUlsrBk9BV1hH3i7fR +XGP9rUdk2ubnB3k2ejBStL5oBrkHm9SzUFSQHqfDjLZjKoUpOEmuDc4cHvX1XTR5 +Pr1vf5cd0yEncJWG5W4zyUB8k++SUdL2qaeslSs+f491HBLDYn/h8zCgRbBvxhxb +9qeho1xcbnWeqkN6Kc9bgGozA16P9NLuuLttNnOblkH+lMBf42BSne/TWt3AlGZf +slKmmZcySUhF8aKfJnLKbkBCFqOtFRh8zBA9a7g+BT/lSANATCDPaAk1YVih2EKb +dpc3briTDbRsiqg2JKMI7+VdULY9bh3EawIBAg== +-----END DH PARAMETERS----- diff --git a/tests/testflows/ldap/configs/clickhouse/ssl/server.crt b/tests/testflows/ldap/configs/clickhouse/ssl/server.crt new file mode 100644 index 00000000000..7ade2d96273 --- /dev/null +++ b/tests/testflows/ldap/configs/clickhouse/ssl/server.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIC/TCCAeWgAwIBAgIJANjx1QSR77HBMA0GCSqGSIb3DQEBCwUAMBQxEjAQBgNV +BAMMCWxvY2FsaG9zdDAgFw0xODA3MzAxODE2MDhaGA8yMjkyMDUxNDE4MTYwOFow +FDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAs9uSo6lJG8o8pw0fbVGVu0tPOljSWcVSXH9uiJBwlZLQnhN4SFSFohfI +4K8U1tBDTnxPLUo/V1K9yzoLiRDGMkwVj6+4+hE2udS2ePTQv5oaMeJ9wrs+5c9T +4pOtlq3pLAdm04ZMB1nbrEysceVudHRkQbGHzHp6VG29Fw7Ga6YpqyHQihRmEkTU +7UCYNA+Vk7aDPdMS/khweyTpXYZimaK9f0ECU3/VOeG3fH6Sp2X6FN4tUj/aFXEj +sRmU5G2TlYiSIUMF2JPdhSihfk1hJVALrHPTU38SOL+GyyBRWdNcrIwVwbpvsvPg +pryMSNxnpr0AK0dFhjwnupIv5hJIOQIDAQABo1AwTjAdBgNVHQ4EFgQUjPLb3uYC +kcamyZHK4/EV8jAP0wQwHwYDVR0jBBgwFoAUjPLb3uYCkcamyZHK4/EV8jAP0wQw +DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAM/ocuDvfPus/KpMVD51j +4IdlU8R0vmnYLQ+ygzOAo7+hUWP5j0yvq4ILWNmQX6HNvUggCgFv9bjwDFhb/5Vr +85ieWfTd9+LTjrOzTw4avdGwpX9G+6jJJSSq15tw5ElOIFb/qNA9O4dBiu8vn03C +L/zRSXrARhSqTW5w/tZkUcSTT+M5h28+Lgn9ysx4Ff5vi44LJ1NnrbJbEAIYsAAD ++UA+4MBFKx1r6hHINULev8+lCfkpwIaeS8RL+op4fr6kQPxnULw8wT8gkuc8I4+L +P9gg/xDHB44T3ADGZ5Ib6O0DJaNiToO6rnoaaxs0KkotbvDWvRoxEytSbXKoYjYp +0g== +-----END CERTIFICATE----- diff --git a/tests/testflows/ldap/configs/clickhouse/ssl/server.key b/tests/testflows/ldap/configs/clickhouse/ssl/server.key new file mode 100644 index 00000000000..f0fb61ac443 --- /dev/null +++ b/tests/testflows/ldap/configs/clickhouse/ssl/server.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCz25KjqUkbyjyn +DR9tUZW7S086WNJZxVJcf26IkHCVktCeE3hIVIWiF8jgrxTW0ENOfE8tSj9XUr3L +OguJEMYyTBWPr7j6ETa51LZ49NC/mhox4n3Cuz7lz1Pik62WreksB2bThkwHWdus +TKxx5W50dGRBsYfMenpUbb0XDsZrpimrIdCKFGYSRNTtQJg0D5WTtoM90xL+SHB7 +JOldhmKZor1/QQJTf9U54bd8fpKnZfoU3i1SP9oVcSOxGZTkbZOViJIhQwXYk92F +KKF+TWElUAusc9NTfxI4v4bLIFFZ01ysjBXBum+y8+CmvIxI3GemvQArR0WGPCe6 +ki/mEkg5AgMBAAECggEATrbIBIxwDJOD2/BoUqWkDCY3dGevF8697vFuZKIiQ7PP +TX9j4vPq0DfsmDjHvAPFkTHiTQXzlroFik3LAp+uvhCCVzImmHq0IrwvZ9xtB43f +7Pkc5P6h1l3Ybo8HJ6zRIY3TuLtLxuPSuiOMTQSGRL0zq3SQ5DKuGwkz+kVjHXUN +MR2TECFwMHKQ5VLrC+7PMpsJYyOMlDAWhRfUalxC55xOXTpaN8TxNnwQ8K2ISVY5 +212Jz/a4hn4LdwxSz3Tiu95PN072K87HLWx3EdT6vW4Ge5P/A3y+smIuNAlanMnu +plHBRtpATLiTxZt/n6npyrfQVbYjSH7KWhB8hBHtaQKBgQDh9Cq1c/KtqDtE0Ccr +/r9tZNTUwBE6VP+3OJeKdEdtsfuxjOCkS1oAjgBJiSDOiWPh1DdoDeVZjPKq6pIu +Mq12OE3Doa8znfCXGbkSzEKOb2unKZMJxzrz99kXt40W5DtrqKPNb24CNqTiY8Aa +CjtcX+3weat82VRXvph6U8ltMwKBgQDLxjiQQzNoY7qvg7CwJCjf9qq8jmLK766g +1FHXopqS+dTxDLM8eJSRrpmxGWJvNeNc1uPhsKsKgotqAMdBUQTf7rSTbt4MyoH5 +bUcRLtr+0QTK9hDWMOOvleqNXha68vATkohWYfCueNsC60qD44o8RZAS6UNy3ENq +cM1cxqe84wKBgQDKkHutWnooJtajlTxY27O/nZKT/HA1bDgniMuKaz4R4Gr1PIez +on3YW3V0d0P7BP6PWRIm7bY79vkiMtLEKdiKUGWeyZdo3eHvhDb/3DCawtau8L2K +GZsHVp2//mS1Lfz7Qh8/L/NedqCQ+L4iWiPnZ3THjjwn3CoZ05ucpvrAMwKBgB54 +nay039MUVq44Owub3KDg+dcIU62U+cAC/9oG7qZbxYPmKkc4oL7IJSNecGHA5SbU +2268RFdl/gLz6tfRjbEOuOHzCjFPdvAdbysanpTMHLNc6FefJ+zxtgk9sJh0C4Jh +vxFrw9nTKKzfEl12gQ1SOaEaUIO0fEBGbe8ZpauRAoGAMAlGV+2/K4ebvAJKOVTa +dKAzQ+TD2SJmeR1HZmKDYddNqwtZlzg3v4ZhCk4eaUmGeC1Bdh8MDuB3QQvXz4Dr +vOIP4UVaOr+uM+7TgAgVnP4/K6IeJGzUDhX93pmpWhODfdu/oojEKVcpCojmEmS1 +KCBtmIrQLqzMpnBpLNuSY+Q= +-----END PRIVATE KEY----- diff --git a/tests/testflows/ldap/configs/clickhouse/users.xml b/tests/testflows/ldap/configs/clickhouse/users.xml new file mode 100644 index 00000000000..86b2cd9e1e3 --- /dev/null +++ b/tests/testflows/ldap/configs/clickhouse/users.xml @@ -0,0 +1,133 @@ + + + + + + + + 10000000000 + + + 0 + + + random + + + + + 1 + + + + + + + + + + + + + ::/0 + + + + default + + + default + + + 1 + + + + + + + + + + + + + + + + + 3600 + + + 0 + 0 + 0 + 0 + 0 + + + + diff --git a/tests/testflows/ldap/configs/clickhouse1/config.d/macros.xml b/tests/testflows/ldap/configs/clickhouse1/config.d/macros.xml new file mode 100644 index 00000000000..6cdcc1b440c --- /dev/null +++ b/tests/testflows/ldap/configs/clickhouse1/config.d/macros.xml @@ -0,0 +1,8 @@ + + + + clickhouse1 + 01 + 01 + + diff --git a/tests/testflows/ldap/configs/clickhouse2/config.d/macros.xml b/tests/testflows/ldap/configs/clickhouse2/config.d/macros.xml new file mode 100644 index 00000000000..a114a9ce4ab --- /dev/null +++ b/tests/testflows/ldap/configs/clickhouse2/config.d/macros.xml @@ -0,0 +1,8 @@ + + + + clickhouse2 + 01 + 02 + + diff --git a/tests/testflows/ldap/configs/clickhouse3/config.d/macros.xml b/tests/testflows/ldap/configs/clickhouse3/config.d/macros.xml new file mode 100644 index 00000000000..904a27b0172 --- /dev/null +++ b/tests/testflows/ldap/configs/clickhouse3/config.d/macros.xml @@ -0,0 +1,8 @@ + + + + clickhouse3 + 01 + 03 + + diff --git a/tests/testflows/ldap/configs/ldap1/config/export.ldif b/tests/testflows/ldap/configs/ldap1/config/export.ldif new file mode 100644 index 00000000000..621dd32ca0c --- /dev/null +++ b/tests/testflows/ldap/configs/ldap1/config/export.ldif @@ -0,0 +1,64 @@ +# LDIF Export for dc=company,dc=com +# Server: openldap (openldap) +# Search Scope: sub +# Search Filter: (objectClass=*) +# Total Entries: 7 +# +# Generated by phpLDAPadmin (http://phpldapadmin.sourceforge.net) on May 22, 2020 5:51 pm +# Version: 1.2.5 + +# Entry 1: dc=company,dc=com +#dn: dc=company,dc=com +#dc: company +#o: company +#objectclass: top +#objectclass: dcObject +#objectclass: organization + +# Entry 2: cn=admin,dc=company,dc=com +#dn: cn=admin,dc=company,dc=com +#cn: admin +#description: LDAP administrator +#objectclass: simpleSecurityObject +#objectclass: organizationalRole +#userpassword: {SSHA}eUEupkQCTvq9SkrxfWGSe5rX+orrjVbF + +# Entry 3: ou=groups,dc=company,dc=com +dn: ou=groups,dc=company,dc=com +objectclass: organizationalUnit +objectclass: top +ou: groups + +# Entry 4: cn=admin,ou=groups,dc=company,dc=com +dn: cn=admin,ou=groups,dc=company,dc=com +cn: admin +gidnumber: 500 +objectclass: posixGroup +objectclass: top + +# Entry 5: cn=users,ou=groups,dc=company,dc=com +dn: cn=users,ou=groups,dc=company,dc=com +cn: users +gidnumber: 501 +objectclass: posixGroup +objectclass: top + +# Entry 6: ou=users,dc=company,dc=com +dn: ou=users,dc=company,dc=com +objectclass: organizationalUnit +objectclass: top +ou: users + +# Entry 7: cn=user1,ou=users,dc=company,dc=com +dn: cn=user1,ou=users,dc=company,dc=com +cn: user1 +gidnumber: 501 +givenname: John +homedirectory: /home/users/user1 +objectclass: inetOrgPerson +objectclass: posixAccount +objectclass: top +sn: User +uid: user1 +uidnumber: 1101 +userpassword: user1 diff --git a/tests/testflows/ldap/configs/ldap2/certs/ca.crt b/tests/testflows/ldap/configs/ldap2/certs/ca.crt new file mode 100644 index 00000000000..8c71e3afc91 --- /dev/null +++ b/tests/testflows/ldap/configs/ldap2/certs/ca.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDlTCCAn2gAwIBAgIUJBqw2dHM2DDCZjYSkPOESlvDH6swDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCQ0ExCzAJBgNVBAgMAk9OMQ8wDQYDVQQHDAZPdHRhd2Ex +ETAPBgNVBAoMCEFsdGluaXR5MQswCQYDVQQLDAJRQTENMAsGA1UEAwwEcm9vdDAe +Fw0yMDA2MTExOTAzNDhaFw0zMDA2MDkxOTAzNDhaMFoxCzAJBgNVBAYTAkNBMQsw +CQYDVQQIDAJPTjEPMA0GA1UEBwwGT3R0YXdhMREwDwYDVQQKDAhBbHRpbml0eTEL +MAkGA1UECwwCUUExDTALBgNVBAMMBHJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQC9Irr0zGV+HCI2fZ0ht4hR5It4Sbjz4RwZV8ENRP/+TEz8l9eK +J6ygxhKX7SMYzIs/jS9Gsq4plX1r2ujW1qRf8yLpR4+dGLP+jBRi1drj0XjZXosT +SERjWzgPauWxL9LN8+l26eBAqz6fw5e0W8WRSTgf5iGiCcKOTmaATIUjP0CdfWKK +qpktI4vhe++CXZFJ3usR+8KZ/FwwbCLJM/3J2HnbcXfcaYPYvr1tfqLudKSTbG9H +M3+AVwjctdesc/0sbd51Zsm0ClQptMbuKnDCYauGg61kNkgbgPgRmH9Pzo67DtxF +/WW+PtOzq8xLOifciQ9Piboy9QBSQZGwf4wzAgMBAAGjUzBRMB0GA1UdDgQWBBSi +njya0RDozx3OZTLYFpwqYnlpIDAfBgNVHSMEGDAWgBSinjya0RDozx3OZTLYFpwq +YnlpIDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBAD7VyFg7F +U1C25KFvtauchAOjCW6w7U/b3z1dVZvcQ88/kH1VsLUcfGixlSilUEfPTJsi7OA0 +R5BQdh2GGcjUJv4iqEFGU05KvMVmRRKn08P62+ZhJxKMxG26VzcliRZzCMkI6d0W +lFwI6nM45yeqdHVh5k4xbuJzqpbD9BtXXLI+/Ra9Fx8S9ETA3GdidpZLU5P1VLxq +UuedfqyAVWZXpr6TAURGxouRmRzul9yFzbSUex+MLEIPrstjtEwV3+tBQZJz9xAS +TVPj+Nv3LO7GCq54bdwkq1ioWbSL2hEmABkj6kdW/JwmfhGHf/2rirDVMzrTYw07 +dFJfAZC+FEsv +-----END CERTIFICATE----- diff --git a/tests/testflows/ldap/configs/ldap2/certs/dhparam.pem b/tests/testflows/ldap/configs/ldap2/certs/dhparam.pem new file mode 100644 index 00000000000..0a96faffd62 --- /dev/null +++ b/tests/testflows/ldap/configs/ldap2/certs/dhparam.pem @@ -0,0 +1,5 @@ +-----BEGIN DH PARAMETERS----- +MIGHAoGBAJitt2hhnpDViQ5ko2ipBMdjy+bZ6FR/WdZ987R7lQvBkKehPXmxtEyV +AO6ofv5CZSDJokc5bUeBOAtg0EhMTCH82uPdwQvt58jRXcxXBg4JTjkx+oW9LBv2 +FdZsbaX8+SYivmiZ0Jp8T/HBm/4DA9VBS0O5GFRS4C7dHhmSTPfDAgEC +-----END DH PARAMETERS----- diff --git a/tests/testflows/ldap/configs/ldap2/certs/ldap.crt b/tests/testflows/ldap/configs/ldap2/certs/ldap.crt new file mode 100644 index 00000000000..9167cbf861d --- /dev/null +++ b/tests/testflows/ldap/configs/ldap2/certs/ldap.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDQDCCAigCFCJ7El0ntrGktZVTYTZd+OwtcJjBMA0GCSqGSIb3DQEBCwUAMFox +CzAJBgNVBAYTAkNBMQswCQYDVQQIDAJPTjEPMA0GA1UEBwwGT3R0YXdhMREwDwYD +VQQKDAhBbHRpbml0eTELMAkGA1UECwwCUUExDTALBgNVBAMMBHJvb3QwHhcNMjAw +NjExMTkxMTQzWhcNMzAwNjA5MTkxMTQzWjBfMQswCQYDVQQGEwJDQTELMAkGA1UE +CAwCT04xDzANBgNVBAcMBk90dGF3YTERMA8GA1UECgwIQWx0aW5pdHkxCzAJBgNV +BAsMAlFBMRIwEAYDVQQDDAlvcGVubGRhcDIwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQC0Mbn//U56URavMgXm82FWP6vBdKuRydFX/L0M5XLlnAtk/IXG +/T+4t7nOBJxWmTp/xpsPtSMALE4eFJpEUEqlpVbG5DfBzVWcYOWoMeRAcHWCDkzr +PkB6I0dfF0Mm5hoaDhn+ZXjBWvoh/IlJdAnPg5mlejflJBQ7xtFC9eN6WjldXuRO +vyntGNuMfVLgITHwXuH2yZ98G0mFO6TU/9dRY/Z3D6RTSzKdb17Yk/VnG+ry92u2 +0sgXIBvhuJuC3ksWLArwwFoMl8DVa05D4O2H76goGdCcQ0KzqBV8RPXAh3UcgP2e +Zu90p2EGIhIk+sZTCkPd4dorxjL9nkRR86HdAgMBAAEwDQYJKoZIhvcNAQELBQAD +ggEBAJWiCxJaTksv/BTsh/etxlDY5eHwqStqIuiovEQ8bhGAcKJ3bfWd/YTb8DUS +hrLvXrXdOVC+U8PqPFXBpdOqcm5Dc233z52VgUCb+0EKv3lAzgKXRIo32h52skdK +NnRrCHDeDzgfEIXR4MEJ99cLEaxWyXQhremmTYWHYznry9/4NYz40gCDxHn9dJAi +KxFyDNxhtuKs58zp4PrBoo+542JurAoLPtRGOhdXpU2RkQVU/ho38HsAXDStAB5D +vAoSxPuMHKgo17ffrb0oqU3didwaA9fIsz7Mr6RxmI7X03s7hLzNBq9FCqu0U3RR +CX4zWGFNJu/ieSGVWLYKQzbYxp8= +-----END CERTIFICATE----- diff --git a/tests/testflows/ldap/configs/ldap2/certs/ldap.csr b/tests/testflows/ldap/configs/ldap2/certs/ldap.csr new file mode 100644 index 00000000000..bf569f727d6 --- /dev/null +++ b/tests/testflows/ldap/configs/ldap2/certs/ldap.csr @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICpDCCAYwCAQAwXzELMAkGA1UEBhMCQ0ExCzAJBgNVBAgMAk9OMQ8wDQYDVQQH +DAZPdHRhd2ExETAPBgNVBAoMCEFsdGluaXR5MQswCQYDVQQLDAJRQTESMBAGA1UE +AwwJb3BlbmxkYXAyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtDG5 +//1OelEWrzIF5vNhVj+rwXSrkcnRV/y9DOVy5ZwLZPyFxv0/uLe5zgScVpk6f8ab +D7UjACxOHhSaRFBKpaVWxuQ3wc1VnGDlqDHkQHB1gg5M6z5AeiNHXxdDJuYaGg4Z +/mV4wVr6IfyJSXQJz4OZpXo35SQUO8bRQvXjelo5XV7kTr8p7RjbjH1S4CEx8F7h +9smffBtJhTuk1P/XUWP2dw+kU0synW9e2JP1Zxvq8vdrttLIFyAb4bibgt5LFiwK +8MBaDJfA1WtOQ+Dth++oKBnQnENCs6gVfET1wId1HID9nmbvdKdhBiISJPrGUwpD +3eHaK8Yy/Z5EUfOh3QIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAEzIjZQOT5R7 +mEJg+RFpCSIoPn3xJ4/VMMyWqA3bTGZKpb4S6GxgsierY/87kPL7jZrMdGYB4Dc3 +2M3VWZGXlYo8vctH1zLE9VW6CzosUpl20lhdgydoCMz3RQqdJyK8aGeFTeLtk7G/ +TRCCUFUE6jaA+VtaCPCnOJSff3jUf76xguEu7dgTZgCKV7dtBqald8gIzF3D+AJJ +7pEN2UrC3UR0xpe2cj2GhndQJ+WsIyft3zpNFzAO13j8ZPibuVP7oDWcW3ixNCWC +213aeRVplJGof8Eo6llDxP+6Fwp1YmOoQmwB1Xm3t4ADn7FLJ14LONLB7q40KviG +RyLyqu3IVOI= +-----END CERTIFICATE REQUEST----- diff --git a/tests/testflows/ldap/configs/ldap2/certs/ldap.key b/tests/testflows/ldap/configs/ldap2/certs/ldap.key new file mode 100644 index 00000000000..5ab3a3f8b59 --- /dev/null +++ b/tests/testflows/ldap/configs/ldap2/certs/ldap.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAtDG5//1OelEWrzIF5vNhVj+rwXSrkcnRV/y9DOVy5ZwLZPyF +xv0/uLe5zgScVpk6f8abD7UjACxOHhSaRFBKpaVWxuQ3wc1VnGDlqDHkQHB1gg5M +6z5AeiNHXxdDJuYaGg4Z/mV4wVr6IfyJSXQJz4OZpXo35SQUO8bRQvXjelo5XV7k +Tr8p7RjbjH1S4CEx8F7h9smffBtJhTuk1P/XUWP2dw+kU0synW9e2JP1Zxvq8vdr +ttLIFyAb4bibgt5LFiwK8MBaDJfA1WtOQ+Dth++oKBnQnENCs6gVfET1wId1HID9 +nmbvdKdhBiISJPrGUwpD3eHaK8Yy/Z5EUfOh3QIDAQABAoIBADugMMIKWcuTxYPX +c6iGZHEbxIPRTWyCcalB0nTQAAMGbabPAJ1l8432DZ+kWu806OybFXhPIfPOtVKy +0pFEWE8TtPE/V0vj3C5Qye2sBLFmBRwyCzXUdZV00wseMXRPs9dnTyalAR5KMnbI +j80kfpKSI2dkV9aU57UYBuq3Xrx/TCGItwL769D4ZZW9BvbpiTZApQQFZ0gwUFFn +btPXGU9Ti8H4mfBuZWL+5CaZdqOo76+CXvMPaUK0F9MJp4yX3XxQLRNH3qz/Tyn7 +h7QOOo0XTqoUmzRw0N9QRVH5LRdSE5yq3aF9aFKjNW59exz+62pufOFadngzkpkn +OKCzgWkCgYEA4mOWWMzdYwMn3GtfG7whqlqy7wOmMkNb81zTDQejHBV98dnj0AHr +deurfKWzHrAh3DXo6tFeqUIgXabhBPS/0dEx/S5sgLFmuUZP05EUYahfWBgzzmM9 +C6Oe5xIMLzxsZCJczolsfkEsoFe4o0vkvuLYoQrQL7InzewcDy8cUxsCgYEAy8Na +YCnanSNDY03Bulcni+5sF+opaHseeki1pv3nlw8TwsWuZF9ApS+yL7ck9jJjxBRR +RC3KGmpoqIr0vTmUYS946ngQWXPE90zfuhJfM+NRv/q0oCjH0qAcxRbTkls5On9v +oxJ8rO7gD6K85eHqasWdbCVzdZrobOXzay37tmcCgYBfyUUmw190cjReZauzH3Gb +E48b5A5gu/Fe0cqWe8G+szU7rDZgnz9SAGnpbm6QMHPTKZgoKngD42+wUFhq8Wdr +zjh5aDgOZ4EQKTjDSmI2Q7g7nNnmnESK9SrZl+BB6C3wXD2qQaj+7nKEUTlVFlpt +jaucz+dwFtASp7Djl8pDOwKBgEtr2c3ycArt/ImLRIP2spqm+7e2YvFbcSKOOz6+ +iLRvTj8v8KcSYtlB2FC1F6dRa4AujQ4RbNduP6LzHDfWUkfOzJDtNBAIPAXVnJJB +LqAEKkRHRghqT9x0i3GgS1vHDF3MwcO4mhFgserXr9ffUWeIEgbvrdcAKbv1Oa6Y +bK1NAoGAGPm8ISmboDJynjBl9wMrkcy23Pwg9kmyocdWUHh0zMLDKriZNKYB6u/U +C+/RTfkohPoHPzkeqWiHp7z3JhMItYUfTkNW6vMCxEGc0NEN6ZyMIjtiDPGN1n6O +E7jmODFmj1AQICQGdV5SHp+yKvKyb0YHKyDwETbs4SZBXxVvjEw= +-----END RSA PRIVATE KEY----- diff --git a/tests/testflows/ldap/configs/ldap2/config/export.ldif b/tests/testflows/ldap/configs/ldap2/config/export.ldif new file mode 100644 index 00000000000..6766aaae6f1 --- /dev/null +++ b/tests/testflows/ldap/configs/ldap2/config/export.ldif @@ -0,0 +1,64 @@ +# LDIF Export for dc=company,dc=com +# Server: openldap (openldap) +# Search Scope: sub +# Search Filter: (objectClass=*) +# Total Entries: 7 +# +# Generated by phpLDAPadmin (http://phpldapadmin.sourceforge.net) on May 22, 2020 5:51 pm +# Version: 1.2.5 + +# Entry 1: dc=company,dc=com +#dn: dc=company,dc=com +#dc: company +#o: company +#objectclass: top +#objectclass: dcObject +#objectclass: organization + +# Entry 2: cn=admin,dc=company,dc=com +#dn: cn=admin,dc=company,dc=com +#cn: admin +#description: LDAP administrator +#objectclass: simpleSecurityObject +#objectclass: organizationalRole +#userpassword: {SSHA}eUEupkQCTvq9SkrxfWGSe5rX+orrjVbF + +# Entry 3: ou=groups,dc=company,dc=com +dn: ou=groups,dc=company,dc=com +objectclass: organizationalUnit +objectclass: top +ou: groups + +# Entry 4: cn=admin,ou=groups,dc=company,dc=com +dn: cn=admin,ou=groups,dc=company,dc=com +cn: admin +gidnumber: 500 +objectclass: posixGroup +objectclass: top + +# Entry 5: cn=users,ou=groups,dc=company,dc=com +dn: cn=users,ou=groups,dc=company,dc=com +cn: users +gidnumber: 501 +objectclass: posixGroup +objectclass: top + +# Entry 6: ou=users,dc=company,dc=com +dn: ou=users,dc=company,dc=com +objectclass: organizationalUnit +objectclass: top +ou: users + +# Entry 7: cn=user2,ou=users,dc=company,dc=com +dn: cn=user2,ou=users,dc=company,dc=com +cn: user2 +gidnumber: 501 +givenname: John +homedirectory: /home/users/user2 +objectclass: inetOrgPerson +objectclass: posixAccount +objectclass: top +sn: User +uid: user2 +uidnumber: 1002 +userpassword: user2 diff --git a/tests/testflows/ldap/configs/ldap3/certs/ca.crt b/tests/testflows/ldap/configs/ldap3/certs/ca.crt new file mode 100644 index 00000000000..8c71e3afc91 --- /dev/null +++ b/tests/testflows/ldap/configs/ldap3/certs/ca.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDlTCCAn2gAwIBAgIUJBqw2dHM2DDCZjYSkPOESlvDH6swDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCQ0ExCzAJBgNVBAgMAk9OMQ8wDQYDVQQHDAZPdHRhd2Ex +ETAPBgNVBAoMCEFsdGluaXR5MQswCQYDVQQLDAJRQTENMAsGA1UEAwwEcm9vdDAe +Fw0yMDA2MTExOTAzNDhaFw0zMDA2MDkxOTAzNDhaMFoxCzAJBgNVBAYTAkNBMQsw +CQYDVQQIDAJPTjEPMA0GA1UEBwwGT3R0YXdhMREwDwYDVQQKDAhBbHRpbml0eTEL +MAkGA1UECwwCUUExDTALBgNVBAMMBHJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQC9Irr0zGV+HCI2fZ0ht4hR5It4Sbjz4RwZV8ENRP/+TEz8l9eK +J6ygxhKX7SMYzIs/jS9Gsq4plX1r2ujW1qRf8yLpR4+dGLP+jBRi1drj0XjZXosT +SERjWzgPauWxL9LN8+l26eBAqz6fw5e0W8WRSTgf5iGiCcKOTmaATIUjP0CdfWKK +qpktI4vhe++CXZFJ3usR+8KZ/FwwbCLJM/3J2HnbcXfcaYPYvr1tfqLudKSTbG9H +M3+AVwjctdesc/0sbd51Zsm0ClQptMbuKnDCYauGg61kNkgbgPgRmH9Pzo67DtxF +/WW+PtOzq8xLOifciQ9Piboy9QBSQZGwf4wzAgMBAAGjUzBRMB0GA1UdDgQWBBSi +njya0RDozx3OZTLYFpwqYnlpIDAfBgNVHSMEGDAWgBSinjya0RDozx3OZTLYFpwq +YnlpIDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBAD7VyFg7F +U1C25KFvtauchAOjCW6w7U/b3z1dVZvcQ88/kH1VsLUcfGixlSilUEfPTJsi7OA0 +R5BQdh2GGcjUJv4iqEFGU05KvMVmRRKn08P62+ZhJxKMxG26VzcliRZzCMkI6d0W +lFwI6nM45yeqdHVh5k4xbuJzqpbD9BtXXLI+/Ra9Fx8S9ETA3GdidpZLU5P1VLxq +UuedfqyAVWZXpr6TAURGxouRmRzul9yFzbSUex+MLEIPrstjtEwV3+tBQZJz9xAS +TVPj+Nv3LO7GCq54bdwkq1ioWbSL2hEmABkj6kdW/JwmfhGHf/2rirDVMzrTYw07 +dFJfAZC+FEsv +-----END CERTIFICATE----- diff --git a/tests/testflows/ldap/configs/ldap3/certs/dhparam.pem b/tests/testflows/ldap/configs/ldap3/certs/dhparam.pem new file mode 100644 index 00000000000..0a96faffd62 --- /dev/null +++ b/tests/testflows/ldap/configs/ldap3/certs/dhparam.pem @@ -0,0 +1,5 @@ +-----BEGIN DH PARAMETERS----- +MIGHAoGBAJitt2hhnpDViQ5ko2ipBMdjy+bZ6FR/WdZ987R7lQvBkKehPXmxtEyV +AO6ofv5CZSDJokc5bUeBOAtg0EhMTCH82uPdwQvt58jRXcxXBg4JTjkx+oW9LBv2 +FdZsbaX8+SYivmiZ0Jp8T/HBm/4DA9VBS0O5GFRS4C7dHhmSTPfDAgEC +-----END DH PARAMETERS----- diff --git a/tests/testflows/ldap/configs/ldap3/certs/ldap.crt b/tests/testflows/ldap/configs/ldap3/certs/ldap.crt new file mode 100644 index 00000000000..9167cbf861d --- /dev/null +++ b/tests/testflows/ldap/configs/ldap3/certs/ldap.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDQDCCAigCFCJ7El0ntrGktZVTYTZd+OwtcJjBMA0GCSqGSIb3DQEBCwUAMFox +CzAJBgNVBAYTAkNBMQswCQYDVQQIDAJPTjEPMA0GA1UEBwwGT3R0YXdhMREwDwYD +VQQKDAhBbHRpbml0eTELMAkGA1UECwwCUUExDTALBgNVBAMMBHJvb3QwHhcNMjAw +NjExMTkxMTQzWhcNMzAwNjA5MTkxMTQzWjBfMQswCQYDVQQGEwJDQTELMAkGA1UE +CAwCT04xDzANBgNVBAcMBk90dGF3YTERMA8GA1UECgwIQWx0aW5pdHkxCzAJBgNV +BAsMAlFBMRIwEAYDVQQDDAlvcGVubGRhcDIwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQC0Mbn//U56URavMgXm82FWP6vBdKuRydFX/L0M5XLlnAtk/IXG +/T+4t7nOBJxWmTp/xpsPtSMALE4eFJpEUEqlpVbG5DfBzVWcYOWoMeRAcHWCDkzr +PkB6I0dfF0Mm5hoaDhn+ZXjBWvoh/IlJdAnPg5mlejflJBQ7xtFC9eN6WjldXuRO +vyntGNuMfVLgITHwXuH2yZ98G0mFO6TU/9dRY/Z3D6RTSzKdb17Yk/VnG+ry92u2 +0sgXIBvhuJuC3ksWLArwwFoMl8DVa05D4O2H76goGdCcQ0KzqBV8RPXAh3UcgP2e +Zu90p2EGIhIk+sZTCkPd4dorxjL9nkRR86HdAgMBAAEwDQYJKoZIhvcNAQELBQAD +ggEBAJWiCxJaTksv/BTsh/etxlDY5eHwqStqIuiovEQ8bhGAcKJ3bfWd/YTb8DUS +hrLvXrXdOVC+U8PqPFXBpdOqcm5Dc233z52VgUCb+0EKv3lAzgKXRIo32h52skdK +NnRrCHDeDzgfEIXR4MEJ99cLEaxWyXQhremmTYWHYznry9/4NYz40gCDxHn9dJAi +KxFyDNxhtuKs58zp4PrBoo+542JurAoLPtRGOhdXpU2RkQVU/ho38HsAXDStAB5D +vAoSxPuMHKgo17ffrb0oqU3didwaA9fIsz7Mr6RxmI7X03s7hLzNBq9FCqu0U3RR +CX4zWGFNJu/ieSGVWLYKQzbYxp8= +-----END CERTIFICATE----- diff --git a/tests/testflows/ldap/configs/ldap3/certs/ldap.csr b/tests/testflows/ldap/configs/ldap3/certs/ldap.csr new file mode 100644 index 00000000000..bf569f727d6 --- /dev/null +++ b/tests/testflows/ldap/configs/ldap3/certs/ldap.csr @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICpDCCAYwCAQAwXzELMAkGA1UEBhMCQ0ExCzAJBgNVBAgMAk9OMQ8wDQYDVQQH +DAZPdHRhd2ExETAPBgNVBAoMCEFsdGluaXR5MQswCQYDVQQLDAJRQTESMBAGA1UE +AwwJb3BlbmxkYXAyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtDG5 +//1OelEWrzIF5vNhVj+rwXSrkcnRV/y9DOVy5ZwLZPyFxv0/uLe5zgScVpk6f8ab +D7UjACxOHhSaRFBKpaVWxuQ3wc1VnGDlqDHkQHB1gg5M6z5AeiNHXxdDJuYaGg4Z +/mV4wVr6IfyJSXQJz4OZpXo35SQUO8bRQvXjelo5XV7kTr8p7RjbjH1S4CEx8F7h +9smffBtJhTuk1P/XUWP2dw+kU0synW9e2JP1Zxvq8vdrttLIFyAb4bibgt5LFiwK +8MBaDJfA1WtOQ+Dth++oKBnQnENCs6gVfET1wId1HID9nmbvdKdhBiISJPrGUwpD +3eHaK8Yy/Z5EUfOh3QIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAEzIjZQOT5R7 +mEJg+RFpCSIoPn3xJ4/VMMyWqA3bTGZKpb4S6GxgsierY/87kPL7jZrMdGYB4Dc3 +2M3VWZGXlYo8vctH1zLE9VW6CzosUpl20lhdgydoCMz3RQqdJyK8aGeFTeLtk7G/ +TRCCUFUE6jaA+VtaCPCnOJSff3jUf76xguEu7dgTZgCKV7dtBqald8gIzF3D+AJJ +7pEN2UrC3UR0xpe2cj2GhndQJ+WsIyft3zpNFzAO13j8ZPibuVP7oDWcW3ixNCWC +213aeRVplJGof8Eo6llDxP+6Fwp1YmOoQmwB1Xm3t4ADn7FLJ14LONLB7q40KviG +RyLyqu3IVOI= +-----END CERTIFICATE REQUEST----- diff --git a/tests/testflows/ldap/configs/ldap3/certs/ldap.key b/tests/testflows/ldap/configs/ldap3/certs/ldap.key new file mode 100644 index 00000000000..5ab3a3f8b59 --- /dev/null +++ b/tests/testflows/ldap/configs/ldap3/certs/ldap.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAtDG5//1OelEWrzIF5vNhVj+rwXSrkcnRV/y9DOVy5ZwLZPyF +xv0/uLe5zgScVpk6f8abD7UjACxOHhSaRFBKpaVWxuQ3wc1VnGDlqDHkQHB1gg5M +6z5AeiNHXxdDJuYaGg4Z/mV4wVr6IfyJSXQJz4OZpXo35SQUO8bRQvXjelo5XV7k +Tr8p7RjbjH1S4CEx8F7h9smffBtJhTuk1P/XUWP2dw+kU0synW9e2JP1Zxvq8vdr +ttLIFyAb4bibgt5LFiwK8MBaDJfA1WtOQ+Dth++oKBnQnENCs6gVfET1wId1HID9 +nmbvdKdhBiISJPrGUwpD3eHaK8Yy/Z5EUfOh3QIDAQABAoIBADugMMIKWcuTxYPX +c6iGZHEbxIPRTWyCcalB0nTQAAMGbabPAJ1l8432DZ+kWu806OybFXhPIfPOtVKy +0pFEWE8TtPE/V0vj3C5Qye2sBLFmBRwyCzXUdZV00wseMXRPs9dnTyalAR5KMnbI +j80kfpKSI2dkV9aU57UYBuq3Xrx/TCGItwL769D4ZZW9BvbpiTZApQQFZ0gwUFFn +btPXGU9Ti8H4mfBuZWL+5CaZdqOo76+CXvMPaUK0F9MJp4yX3XxQLRNH3qz/Tyn7 +h7QOOo0XTqoUmzRw0N9QRVH5LRdSE5yq3aF9aFKjNW59exz+62pufOFadngzkpkn +OKCzgWkCgYEA4mOWWMzdYwMn3GtfG7whqlqy7wOmMkNb81zTDQejHBV98dnj0AHr +deurfKWzHrAh3DXo6tFeqUIgXabhBPS/0dEx/S5sgLFmuUZP05EUYahfWBgzzmM9 +C6Oe5xIMLzxsZCJczolsfkEsoFe4o0vkvuLYoQrQL7InzewcDy8cUxsCgYEAy8Na +YCnanSNDY03Bulcni+5sF+opaHseeki1pv3nlw8TwsWuZF9ApS+yL7ck9jJjxBRR +RC3KGmpoqIr0vTmUYS946ngQWXPE90zfuhJfM+NRv/q0oCjH0qAcxRbTkls5On9v +oxJ8rO7gD6K85eHqasWdbCVzdZrobOXzay37tmcCgYBfyUUmw190cjReZauzH3Gb +E48b5A5gu/Fe0cqWe8G+szU7rDZgnz9SAGnpbm6QMHPTKZgoKngD42+wUFhq8Wdr +zjh5aDgOZ4EQKTjDSmI2Q7g7nNnmnESK9SrZl+BB6C3wXD2qQaj+7nKEUTlVFlpt +jaucz+dwFtASp7Djl8pDOwKBgEtr2c3ycArt/ImLRIP2spqm+7e2YvFbcSKOOz6+ +iLRvTj8v8KcSYtlB2FC1F6dRa4AujQ4RbNduP6LzHDfWUkfOzJDtNBAIPAXVnJJB +LqAEKkRHRghqT9x0i3GgS1vHDF3MwcO4mhFgserXr9ffUWeIEgbvrdcAKbv1Oa6Y +bK1NAoGAGPm8ISmboDJynjBl9wMrkcy23Pwg9kmyocdWUHh0zMLDKriZNKYB6u/U +C+/RTfkohPoHPzkeqWiHp7z3JhMItYUfTkNW6vMCxEGc0NEN6ZyMIjtiDPGN1n6O +E7jmODFmj1AQICQGdV5SHp+yKvKyb0YHKyDwETbs4SZBXxVvjEw= +-----END RSA PRIVATE KEY----- diff --git a/tests/testflows/ldap/configs/ldap3/config/export.ldif b/tests/testflows/ldap/configs/ldap3/config/export.ldif new file mode 100644 index 00000000000..6ac9a995efd --- /dev/null +++ b/tests/testflows/ldap/configs/ldap3/config/export.ldif @@ -0,0 +1,64 @@ +# LDIF Export for dc=company,dc=com +# Server: openldap (openldap) +# Search Scope: sub +# Search Filter: (objectClass=*) +# Total Entries: 7 +# +# Generated by phpLDAPadmin (http://phpldapadmin.sourceforge.net) on May 22, 2020 5:51 pm +# Version: 1.2.5 + +# Entry 1: dc=company,dc=com +#dn: dc=company,dc=com +#dc: company +#o: company +#objectclass: top +#objectclass: dcObject +#objectclass: organization + +# Entry 2: cn=admin,dc=company,dc=com +#dn: cn=admin,dc=company,dc=com +#cn: admin +#description: LDAP administrator +#objectclass: simpleSecurityObject +#objectclass: organizationalRole +#userpassword: {SSHA}eUEupkQCTvq9SkrxfWGSe5rX+orrjVbF + +# Entry 3: ou=groups,dc=company,dc=com +dn: ou=groups,dc=company,dc=com +objectclass: organizationalUnit +objectclass: top +ou: groups + +# Entry 4: cn=admin,ou=groups,dc=company,dc=com +dn: cn=admin,ou=groups,dc=company,dc=com +cn: admin +gidnumber: 500 +objectclass: posixGroup +objectclass: top + +# Entry 5: cn=users,ou=groups,dc=company,dc=com +dn: cn=users,ou=groups,dc=company,dc=com +cn: users +gidnumber: 501 +objectclass: posixGroup +objectclass: top + +# Entry 6: ou=users,dc=company,dc=com +dn: ou=users,dc=company,dc=com +objectclass: organizationalUnit +objectclass: top +ou: users + +# Entry 7: cn=user3,ou=users,dc=company,dc=com +dn: cn=user3,ou=users,dc=company,dc=com +cn: user3 +gidnumber: 501 +givenname: John +homedirectory: /home/users/user3 +objectclass: inetOrgPerson +objectclass: posixAccount +objectclass: top +sn: User +uid: user3 +uidnumber: 1003 +userpassword: user3 diff --git a/tests/testflows/ldap/configs/ldap4/config/export.ldif b/tests/testflows/ldap/configs/ldap4/config/export.ldif new file mode 100644 index 00000000000..36afdb4e350 --- /dev/null +++ b/tests/testflows/ldap/configs/ldap4/config/export.ldif @@ -0,0 +1,64 @@ +# LDIF Export for dc=company,dc=com +# Server: openldap (openldap) +# Search Scope: sub +# Search Filter: (objectClass=*) +# Total Entries: 7 +# +# Generated by phpLDAPadmin (http://phpldapadmin.sourceforge.net) on May 22, 2020 5:51 pm +# Version: 1.2.5 + +# Entry 1: dc=company,dc=com +#dn: dc=company,dc=com +#dc: company +#o: company +#objectclass: top +#objectclass: dcObject +#objectclass: organization + +# Entry 2: cn=admin,dc=company,dc=com +#dn: cn=admin,dc=company,dc=com +#cn: admin +#description: LDAP administrator +#objectclass: simpleSecurityObject +#objectclass: organizationalRole +#userpassword: {SSHA}eUEupkQCTvq9SkrxfWGSe5rX+orrjVbF + +# Entry 3: ou=groups,dc=company,dc=com +dn: ou=groups,dc=company,dc=com +objectclass: organizationalUnit +objectclass: top +ou: groups + +# Entry 4: cn=admin,ou=groups,dc=company,dc=com +dn: cn=admin,ou=groups,dc=company,dc=com +cn: admin +gidnumber: 500 +objectclass: posixGroup +objectclass: top + +# Entry 5: cn=users,ou=groups,dc=company,dc=com +dn: cn=users,ou=groups,dc=company,dc=com +cn: users +gidnumber: 501 +objectclass: posixGroup +objectclass: top + +# Entry 6: ou=users,dc=company,dc=com +dn: ou=users,dc=company,dc=com +objectclass: organizationalUnit +objectclass: top +ou: users + +# Entry 7: cn=user4,ou=users,dc=company,dc=com +dn: cn=user4,ou=users,dc=company,dc=com +cn: user4 +gidnumber: 501 +givenname: John +homedirectory: /home/users/user4 +objectclass: inetOrgPerson +objectclass: posixAccount +objectclass: top +sn: User +uid: user4 +uidnumber: 1004 +userpassword: user4 diff --git a/tests/testflows/ldap/configs/ldap5/config/export.ldif b/tests/testflows/ldap/configs/ldap5/config/export.ldif new file mode 100644 index 00000000000..bc3d2ff75fc --- /dev/null +++ b/tests/testflows/ldap/configs/ldap5/config/export.ldif @@ -0,0 +1,64 @@ +# LDIF Export for dc=company,dc=com +# Server: openldap (openldap) +# Search Scope: sub +# Search Filter: (objectClass=*) +# Total Entries: 7 +# +# Generated by phpLDAPadmin (http://phpldapadmin.sourceforge.net) on May 22, 2020 5:51 pm +# Version: 1.2.5 + +# Entry 1: dc=company,dc=com +#dn: dc=company,dc=com +#dc: company +#o: company +#objectclass: top +#objectclass: dcObject +#objectclass: organization + +# Entry 2: cn=admin,dc=company,dc=com +#dn: cn=admin,dc=company,dc=com +#cn: admin +#description: LDAP administrator +#objectclass: simpleSecurityObject +#objectclass: organizationalRole +#userpassword: {SSHA}eUEupkQCTvq9SkrxfWGSe5rX+orrjVbF + +# Entry 3: ou=groups,dc=company,dc=com +dn: ou=groups,dc=company,dc=com +objectclass: organizationalUnit +objectclass: top +ou: groups + +# Entry 4: cn=admin,ou=groups,dc=company,dc=com +dn: cn=admin,ou=groups,dc=company,dc=com +cn: admin +gidnumber: 500 +objectclass: posixGroup +objectclass: top + +# Entry 5: cn=users,ou=groups,dc=company,dc=com +dn: cn=users,ou=groups,dc=company,dc=com +cn: users +gidnumber: 501 +objectclass: posixGroup +objectclass: top + +# Entry 6: ou=users,dc=company,dc=com +dn: ou=users,dc=company,dc=com +objectclass: organizationalUnit +objectclass: top +ou: users + +# Entry 7: cn=user5,ou=users,dc=company,dc=com +dn: cn=user5,ou=users,dc=company,dc=com +cn: user5 +gidnumber: 501 +givenname: John +homedirectory: /home/users/user5 +objectclass: inetOrgPerson +objectclass: posixAccount +objectclass: top +sn: User +uid: user5 +uidnumber: 1005 +userpassword: user5 diff --git a/tests/testflows/ldap/configs/ldap5/ldap2/certs/ca.crt b/tests/testflows/ldap/configs/ldap5/ldap2/certs/ca.crt new file mode 100644 index 00000000000..8c71e3afc91 --- /dev/null +++ b/tests/testflows/ldap/configs/ldap5/ldap2/certs/ca.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDlTCCAn2gAwIBAgIUJBqw2dHM2DDCZjYSkPOESlvDH6swDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCQ0ExCzAJBgNVBAgMAk9OMQ8wDQYDVQQHDAZPdHRhd2Ex +ETAPBgNVBAoMCEFsdGluaXR5MQswCQYDVQQLDAJRQTENMAsGA1UEAwwEcm9vdDAe +Fw0yMDA2MTExOTAzNDhaFw0zMDA2MDkxOTAzNDhaMFoxCzAJBgNVBAYTAkNBMQsw +CQYDVQQIDAJPTjEPMA0GA1UEBwwGT3R0YXdhMREwDwYDVQQKDAhBbHRpbml0eTEL +MAkGA1UECwwCUUExDTALBgNVBAMMBHJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQC9Irr0zGV+HCI2fZ0ht4hR5It4Sbjz4RwZV8ENRP/+TEz8l9eK +J6ygxhKX7SMYzIs/jS9Gsq4plX1r2ujW1qRf8yLpR4+dGLP+jBRi1drj0XjZXosT +SERjWzgPauWxL9LN8+l26eBAqz6fw5e0W8WRSTgf5iGiCcKOTmaATIUjP0CdfWKK +qpktI4vhe++CXZFJ3usR+8KZ/FwwbCLJM/3J2HnbcXfcaYPYvr1tfqLudKSTbG9H +M3+AVwjctdesc/0sbd51Zsm0ClQptMbuKnDCYauGg61kNkgbgPgRmH9Pzo67DtxF +/WW+PtOzq8xLOifciQ9Piboy9QBSQZGwf4wzAgMBAAGjUzBRMB0GA1UdDgQWBBSi +njya0RDozx3OZTLYFpwqYnlpIDAfBgNVHSMEGDAWgBSinjya0RDozx3OZTLYFpwq +YnlpIDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBAD7VyFg7F +U1C25KFvtauchAOjCW6w7U/b3z1dVZvcQ88/kH1VsLUcfGixlSilUEfPTJsi7OA0 +R5BQdh2GGcjUJv4iqEFGU05KvMVmRRKn08P62+ZhJxKMxG26VzcliRZzCMkI6d0W +lFwI6nM45yeqdHVh5k4xbuJzqpbD9BtXXLI+/Ra9Fx8S9ETA3GdidpZLU5P1VLxq +UuedfqyAVWZXpr6TAURGxouRmRzul9yFzbSUex+MLEIPrstjtEwV3+tBQZJz9xAS +TVPj+Nv3LO7GCq54bdwkq1ioWbSL2hEmABkj6kdW/JwmfhGHf/2rirDVMzrTYw07 +dFJfAZC+FEsv +-----END CERTIFICATE----- diff --git a/tests/testflows/ldap/configs/ldap5/ldap2/certs/dhparam.pem b/tests/testflows/ldap/configs/ldap5/ldap2/certs/dhparam.pem new file mode 100644 index 00000000000..0a96faffd62 --- /dev/null +++ b/tests/testflows/ldap/configs/ldap5/ldap2/certs/dhparam.pem @@ -0,0 +1,5 @@ +-----BEGIN DH PARAMETERS----- +MIGHAoGBAJitt2hhnpDViQ5ko2ipBMdjy+bZ6FR/WdZ987R7lQvBkKehPXmxtEyV +AO6ofv5CZSDJokc5bUeBOAtg0EhMTCH82uPdwQvt58jRXcxXBg4JTjkx+oW9LBv2 +FdZsbaX8+SYivmiZ0Jp8T/HBm/4DA9VBS0O5GFRS4C7dHhmSTPfDAgEC +-----END DH PARAMETERS----- diff --git a/tests/testflows/ldap/configs/ldap5/ldap2/certs/ldap.crt b/tests/testflows/ldap/configs/ldap5/ldap2/certs/ldap.crt new file mode 100644 index 00000000000..9167cbf861d --- /dev/null +++ b/tests/testflows/ldap/configs/ldap5/ldap2/certs/ldap.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDQDCCAigCFCJ7El0ntrGktZVTYTZd+OwtcJjBMA0GCSqGSIb3DQEBCwUAMFox +CzAJBgNVBAYTAkNBMQswCQYDVQQIDAJPTjEPMA0GA1UEBwwGT3R0YXdhMREwDwYD +VQQKDAhBbHRpbml0eTELMAkGA1UECwwCUUExDTALBgNVBAMMBHJvb3QwHhcNMjAw +NjExMTkxMTQzWhcNMzAwNjA5MTkxMTQzWjBfMQswCQYDVQQGEwJDQTELMAkGA1UE +CAwCT04xDzANBgNVBAcMBk90dGF3YTERMA8GA1UECgwIQWx0aW5pdHkxCzAJBgNV +BAsMAlFBMRIwEAYDVQQDDAlvcGVubGRhcDIwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQC0Mbn//U56URavMgXm82FWP6vBdKuRydFX/L0M5XLlnAtk/IXG +/T+4t7nOBJxWmTp/xpsPtSMALE4eFJpEUEqlpVbG5DfBzVWcYOWoMeRAcHWCDkzr +PkB6I0dfF0Mm5hoaDhn+ZXjBWvoh/IlJdAnPg5mlejflJBQ7xtFC9eN6WjldXuRO +vyntGNuMfVLgITHwXuH2yZ98G0mFO6TU/9dRY/Z3D6RTSzKdb17Yk/VnG+ry92u2 +0sgXIBvhuJuC3ksWLArwwFoMl8DVa05D4O2H76goGdCcQ0KzqBV8RPXAh3UcgP2e +Zu90p2EGIhIk+sZTCkPd4dorxjL9nkRR86HdAgMBAAEwDQYJKoZIhvcNAQELBQAD +ggEBAJWiCxJaTksv/BTsh/etxlDY5eHwqStqIuiovEQ8bhGAcKJ3bfWd/YTb8DUS +hrLvXrXdOVC+U8PqPFXBpdOqcm5Dc233z52VgUCb+0EKv3lAzgKXRIo32h52skdK +NnRrCHDeDzgfEIXR4MEJ99cLEaxWyXQhremmTYWHYznry9/4NYz40gCDxHn9dJAi +KxFyDNxhtuKs58zp4PrBoo+542JurAoLPtRGOhdXpU2RkQVU/ho38HsAXDStAB5D +vAoSxPuMHKgo17ffrb0oqU3didwaA9fIsz7Mr6RxmI7X03s7hLzNBq9FCqu0U3RR +CX4zWGFNJu/ieSGVWLYKQzbYxp8= +-----END CERTIFICATE----- diff --git a/tests/testflows/ldap/configs/ldap5/ldap2/certs/ldap.csr b/tests/testflows/ldap/configs/ldap5/ldap2/certs/ldap.csr new file mode 100644 index 00000000000..bf569f727d6 --- /dev/null +++ b/tests/testflows/ldap/configs/ldap5/ldap2/certs/ldap.csr @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICpDCCAYwCAQAwXzELMAkGA1UEBhMCQ0ExCzAJBgNVBAgMAk9OMQ8wDQYDVQQH +DAZPdHRhd2ExETAPBgNVBAoMCEFsdGluaXR5MQswCQYDVQQLDAJRQTESMBAGA1UE +AwwJb3BlbmxkYXAyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtDG5 +//1OelEWrzIF5vNhVj+rwXSrkcnRV/y9DOVy5ZwLZPyFxv0/uLe5zgScVpk6f8ab +D7UjACxOHhSaRFBKpaVWxuQ3wc1VnGDlqDHkQHB1gg5M6z5AeiNHXxdDJuYaGg4Z +/mV4wVr6IfyJSXQJz4OZpXo35SQUO8bRQvXjelo5XV7kTr8p7RjbjH1S4CEx8F7h +9smffBtJhTuk1P/XUWP2dw+kU0synW9e2JP1Zxvq8vdrttLIFyAb4bibgt5LFiwK +8MBaDJfA1WtOQ+Dth++oKBnQnENCs6gVfET1wId1HID9nmbvdKdhBiISJPrGUwpD +3eHaK8Yy/Z5EUfOh3QIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAEzIjZQOT5R7 +mEJg+RFpCSIoPn3xJ4/VMMyWqA3bTGZKpb4S6GxgsierY/87kPL7jZrMdGYB4Dc3 +2M3VWZGXlYo8vctH1zLE9VW6CzosUpl20lhdgydoCMz3RQqdJyK8aGeFTeLtk7G/ +TRCCUFUE6jaA+VtaCPCnOJSff3jUf76xguEu7dgTZgCKV7dtBqald8gIzF3D+AJJ +7pEN2UrC3UR0xpe2cj2GhndQJ+WsIyft3zpNFzAO13j8ZPibuVP7oDWcW3ixNCWC +213aeRVplJGof8Eo6llDxP+6Fwp1YmOoQmwB1Xm3t4ADn7FLJ14LONLB7q40KviG +RyLyqu3IVOI= +-----END CERTIFICATE REQUEST----- diff --git a/tests/testflows/ldap/configs/ldap5/ldap2/certs/ldap.key b/tests/testflows/ldap/configs/ldap5/ldap2/certs/ldap.key new file mode 100644 index 00000000000..5ab3a3f8b59 --- /dev/null +++ b/tests/testflows/ldap/configs/ldap5/ldap2/certs/ldap.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAtDG5//1OelEWrzIF5vNhVj+rwXSrkcnRV/y9DOVy5ZwLZPyF +xv0/uLe5zgScVpk6f8abD7UjACxOHhSaRFBKpaVWxuQ3wc1VnGDlqDHkQHB1gg5M +6z5AeiNHXxdDJuYaGg4Z/mV4wVr6IfyJSXQJz4OZpXo35SQUO8bRQvXjelo5XV7k +Tr8p7RjbjH1S4CEx8F7h9smffBtJhTuk1P/XUWP2dw+kU0synW9e2JP1Zxvq8vdr +ttLIFyAb4bibgt5LFiwK8MBaDJfA1WtOQ+Dth++oKBnQnENCs6gVfET1wId1HID9 +nmbvdKdhBiISJPrGUwpD3eHaK8Yy/Z5EUfOh3QIDAQABAoIBADugMMIKWcuTxYPX +c6iGZHEbxIPRTWyCcalB0nTQAAMGbabPAJ1l8432DZ+kWu806OybFXhPIfPOtVKy +0pFEWE8TtPE/V0vj3C5Qye2sBLFmBRwyCzXUdZV00wseMXRPs9dnTyalAR5KMnbI +j80kfpKSI2dkV9aU57UYBuq3Xrx/TCGItwL769D4ZZW9BvbpiTZApQQFZ0gwUFFn +btPXGU9Ti8H4mfBuZWL+5CaZdqOo76+CXvMPaUK0F9MJp4yX3XxQLRNH3qz/Tyn7 +h7QOOo0XTqoUmzRw0N9QRVH5LRdSE5yq3aF9aFKjNW59exz+62pufOFadngzkpkn +OKCzgWkCgYEA4mOWWMzdYwMn3GtfG7whqlqy7wOmMkNb81zTDQejHBV98dnj0AHr +deurfKWzHrAh3DXo6tFeqUIgXabhBPS/0dEx/S5sgLFmuUZP05EUYahfWBgzzmM9 +C6Oe5xIMLzxsZCJczolsfkEsoFe4o0vkvuLYoQrQL7InzewcDy8cUxsCgYEAy8Na +YCnanSNDY03Bulcni+5sF+opaHseeki1pv3nlw8TwsWuZF9ApS+yL7ck9jJjxBRR +RC3KGmpoqIr0vTmUYS946ngQWXPE90zfuhJfM+NRv/q0oCjH0qAcxRbTkls5On9v +oxJ8rO7gD6K85eHqasWdbCVzdZrobOXzay37tmcCgYBfyUUmw190cjReZauzH3Gb +E48b5A5gu/Fe0cqWe8G+szU7rDZgnz9SAGnpbm6QMHPTKZgoKngD42+wUFhq8Wdr +zjh5aDgOZ4EQKTjDSmI2Q7g7nNnmnESK9SrZl+BB6C3wXD2qQaj+7nKEUTlVFlpt +jaucz+dwFtASp7Djl8pDOwKBgEtr2c3ycArt/ImLRIP2spqm+7e2YvFbcSKOOz6+ +iLRvTj8v8KcSYtlB2FC1F6dRa4AujQ4RbNduP6LzHDfWUkfOzJDtNBAIPAXVnJJB +LqAEKkRHRghqT9x0i3GgS1vHDF3MwcO4mhFgserXr9ffUWeIEgbvrdcAKbv1Oa6Y +bK1NAoGAGPm8ISmboDJynjBl9wMrkcy23Pwg9kmyocdWUHh0zMLDKriZNKYB6u/U +C+/RTfkohPoHPzkeqWiHp7z3JhMItYUfTkNW6vMCxEGc0NEN6ZyMIjtiDPGN1n6O +E7jmODFmj1AQICQGdV5SHp+yKvKyb0YHKyDwETbs4SZBXxVvjEw= +-----END RSA PRIVATE KEY----- diff --git a/tests/testflows/ldap/configs/ldap5/ldap2/config/export.ldif b/tests/testflows/ldap/configs/ldap5/ldap2/config/export.ldif new file mode 100644 index 00000000000..c6470176a5e --- /dev/null +++ b/tests/testflows/ldap/configs/ldap5/ldap2/config/export.ldif @@ -0,0 +1,64 @@ +# LDIF Export for dc=company,dc=com +# Server: openldap (openldap) +# Search Scope: sub +# Search Filter: (objectClass=*) +# Total Entries: 7 +# +# Generated by phpLDAPadmin (http://phpldapadmin.sourceforge.net) on May 22, 2020 5:51 pm +# Version: 1.2.5 + +# Entry 1: dc=company,dc=com +#dn: dc=company,dc=com +#dc: company +#o: company +#objectclass: top +#objectclass: dcObject +#objectclass: organization + +# Entry 2: cn=admin,dc=company,dc=com +#dn: cn=admin,dc=company,dc=com +#cn: admin +#description: LDAP administrator +#objectclass: simpleSecurityObject +#objectclass: organizationalRole +#userpassword: {SSHA}eUEupkQCTvq9SkrxfWGSe5rX+orrjVbF + +# Entry 3: ou=groups,dc=company,dc=com +dn: ou=groups,dc=company,dc=com +objectclass: organizationalUnit +objectclass: top +ou: groups + +# Entry 4: cn=admin,ou=groups,dc=company,dc=com +dn: cn=admin,ou=groups,dc=company,dc=com +cn: admin +gidnumber: 500 +objectclass: posixGroup +objectclass: top + +# Entry 5: cn=users,ou=groups,dc=company,dc=com +dn: cn=users,ou=groups,dc=company,dc=com +cn: users +gidnumber: 501 +objectclass: posixGroup +objectclass: top + +# Entry 6: ou=users,dc=company,dc=com +dn: ou=users,dc=company,dc=com +objectclass: organizationalUnit +objectclass: top +ou: users + +# Entry 7: cn=user1,ou=users,dc=company,dc=com +dn: cn=user1,ou=users,dc=company,dc=com +cn: user1 +gidnumber: 501 +givenname: John1 +homedirectory: /home/users/user1 +objectclass: inetOrgPerson +objectclass: posixAccount +objectclass: top +sn: User1 +uid: user1 +uidnumber: 1001 +userpassword: user1 diff --git a/tests/testflows/ldap/docker-compose/clickhouse-service.yml b/tests/testflows/ldap/docker-compose/clickhouse-service.yml new file mode 100644 index 00000000000..9787b37abbb --- /dev/null +++ b/tests/testflows/ldap/docker-compose/clickhouse-service.yml @@ -0,0 +1,28 @@ +version: '2.3' + +services: + clickhouse: + image: yandex/clickhouse-integration-test + expose: + - "9000" + - "9009" + - "8123" + volumes: + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d:/etc/clickhouse-server/config.d" + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.d/:/etc/clickhouse-server/users.d" + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl:/etc/clickhouse-server/ssl" + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.xml:/etc/clickhouse-server/config.xml" + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.xml:/etc/clickhouse-server/users.xml" + - "${CLICKHOUSE_TESTS_SERVER_BIN_PATH:-/usr/bin/clickhouse}:/usr/bin/clickhouse" + - "${CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH:-/usr/bin/clickhouse-odbc-bridge}:/usr/bin/clickhouse-odbc-bridge" + entrypoint: bash -c "clickhouse server --config-file=/etc/clickhouse-server/config.xml --log-file=/var/log/clickhouse-server/clickhouse-server.log --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log" + healthcheck: + test: clickhouse client --query='select 1' + interval: 3s + timeout: 2s + retries: 40 + start_period: 2s + cap_add: + - SYS_PTRACE + security_opt: + - label:disable diff --git a/tests/testflows/ldap/docker-compose/docker-compose.yml b/tests/testflows/ldap/docker-compose/docker-compose.yml new file mode 100644 index 00000000000..c8ff683df58 --- /dev/null +++ b/tests/testflows/ldap/docker-compose/docker-compose.yml @@ -0,0 +1,162 @@ +version: '2.3' + +services: + openldap1: + # plain text + extends: + file: openldap-service.yml + service: openldap + volumes: + - "${CLICKHOUSE_TESTS_DIR}/configs/ldap1/config:/container/service/slapd/assets/config/bootstrap/ldif/custom" + + openldap2: + # TLS - never + extends: + file: openldap-service.yml + service: openldap + environment: + LDAP_TLS: "true" + LDAP_TLS_CRT_FILENAME: "ldap.crt" + LDAP_TLS_KEY_FILENAME: "ldap.key" + LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem" + LDAP_TLS_CA_CRT_FILENAME: "ca.crt" + LDAP_TLS_ENFORCE: "false" + LDAP_TLS_VERIFY_CLIENT: "never" + volumes: + - "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/config:/container/service/slapd/assets/config/bootstrap/ldif/custom" + - "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/certs:/container/service/slapd/assets/certs/" + + openldap3: + # plain text - custom port + extends: + file: openldap-service.yml + service: openldap + expose: + - "3089" + environment: + LDAP_PORT: "3089" + volumes: + - "${CLICKHOUSE_TESTS_DIR}/configs/ldap3/config:/container/service/slapd/assets/config/bootstrap/ldif/custom" + + openldap4: + # TLS - never custom port + extends: + file: openldap-service.yml + service: openldap + expose: + - "3089" + - "6036" + environment: + LDAP_PORT: "3089" + LDAPS_PORT: "6036" + LDAP_TLS: "true" + LDAP_TLS_CRT_FILENAME: "ldap.crt" + LDAP_TLS_KEY_FILENAME: "ldap.key" + LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem" + LDAP_TLS_CA_CRT_FILENAME: "ca.crt" + LDAP_TLS_ENFORCE: "false" + LDAP_TLS_VERIFY_CLIENT: "never" + LDAP_TLS_CIPHER_SUITE: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC" + volumes: + - "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/config:/container/service/slapd/assets/config/bootstrap/ldif/custom" + - "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/certs:/container/service/slapd/assets/certs/" + + openldap5: + # TLS - try + extends: + file: openldap-service.yml + service: openldap + environment: + LDAP_TLS: "true" + LDAP_TLS_CRT_FILENAME: "ldap.crt" + LDAP_TLS_KEY_FILENAME: "ldap.key" + LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem" + LDAP_TLS_CA_CRT_FILENAME: "ca.crt" + LDAP_TLS_ENFORCE: "false" + LDAP_TLS_VERIFY_CLIENT: "try" + volumes: + - "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/config:/container/service/slapd/assets/config/bootstrap/ldif/custom" + - "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/certs:/container/service/slapd/assets/certs/" + + phpldapadmin: + extends: + file: openldap-service.yml + service: phpldapadmin + environment: + PHPLDAPADMIN_LDAP_HOSTS: "openldap1" + depends_on: + openldap1: + condition: service_healthy + + zookeeper: + extends: + file: zookeeper-service.yml + service: zookeeper + + clickhouse1: + extends: + file: clickhouse-service.yml + service: clickhouse + hostname: clickhouse1 + volumes: + - "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/database/:/var/lib/clickhouse/" + - "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/logs/:/var/log/clickhouse-server/" + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/config.d:/etc/clickhouse-server/config.d" + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/users.d:/etc/clickhouse-server/users.d" + depends_on: + zookeeper: + condition: service_healthy + + clickhouse2: + extends: + file: clickhouse-service.yml + service: clickhouse + hostname: clickhouse2 + volumes: + - "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/database/:/var/lib/clickhouse/" + - "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/logs/:/var/log/clickhouse-server/" + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/config.d:/etc/clickhouse-server/config.d" + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/users.d:/etc/clickhouse-server/users.d" + depends_on: + zookeeper: + condition: service_healthy + + clickhouse3: + extends: + file: clickhouse-service.yml + service: clickhouse + hostname: clickhouse3 + volumes: + - "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/database/:/var/lib/clickhouse/" + - "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/logs/:/var/log/clickhouse-server/" + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/config.d:/etc/clickhouse-server/config.d" + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/users.d:/etc/clickhouse-server/users.d" + depends_on: + zookeeper: + condition: service_healthy + + # dummy service which does nothing, but allows to postpone + # 'docker-compose up -d' till all dependecies will go healthy + all_services_ready: + image: hello-world + depends_on: + clickhouse1: + condition: service_healthy + clickhouse2: + condition: service_healthy + clickhouse3: + condition: service_healthy + zookeeper: + condition: service_healthy + openldap1: + condition: service_healthy + openldap2: + condition: service_healthy + openldap3: + condition: service_healthy + openldap4: + condition: service_healthy + openldap5: + condition: service_healthy + phpldapadmin: + condition: service_healthy diff --git a/tests/testflows/ldap/docker-compose/openldap-service.yml b/tests/testflows/ldap/docker-compose/openldap-service.yml new file mode 100644 index 00000000000..56690285756 --- /dev/null +++ b/tests/testflows/ldap/docker-compose/openldap-service.yml @@ -0,0 +1,40 @@ +version: '2.3' + +services: + openldap: + image: osixia/openldap:1.4.0 + command: "--copy-service --loglevel debug" + environment: + LDAP_ORGANIZATION: "company" + LDAP_DOMAIN: "company.com" + LDAP_ADMIN_PASSWORD: "admin" + LDAP_TLS: "false" + expose: + - "389" + - "636" + healthcheck: + test: echo 1 + interval: 3s + timeout: 2s + retries: 5 + start_period: 2s + security_opt: + - label:disable + + + phpldapadmin: + image: osixia/phpldapadmin:0.9.0 + container_name: phpldapadmin + environment: + PHPLDAPADMIN_HTTPS=false: + ports: + - "8080:80" + healthcheck: + test: echo 1 + interval: 3s + timeout: 2s + retries: 5 + start_period: 2s + security_opt: + - label:disable + diff --git a/tests/testflows/ldap/docker-compose/zookeeper-service.yml b/tests/testflows/ldap/docker-compose/zookeeper-service.yml new file mode 100644 index 00000000000..f3df33358be --- /dev/null +++ b/tests/testflows/ldap/docker-compose/zookeeper-service.yml @@ -0,0 +1,18 @@ +version: '2.3' + +services: + zookeeper: + image: zookeeper:3.4.12 + expose: + - "2181" + environment: + ZOO_TICK_TIME: 500 + ZOO_MY_ID: 1 + healthcheck: + test: echo stat | nc localhost 2181 + interval: 3s + timeout: 2s + retries: 5 + start_period: 2s + security_opt: + - label:disable diff --git a/tests/testflows/ldap/regression.py b/tests/testflows/ldap/regression.py new file mode 100755 index 00000000000..567807fc0a8 --- /dev/null +++ b/tests/testflows/ldap/regression.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +import sys +from testflows.core import * + +append_path(sys.path, "..") + +from helpers.cluster import Cluster +from helpers.argparser import argparser +from ldap.requirements import * + +# Cross-outs of known fails +xfails = { + "connection protocols/tls/tls_require_cert='try'": + [(Fail, "can't be tested with self-signed certificates")], + "connection protocols/tls/tls_require_cert='demand'": + [(Fail, "can't be tested with self-signed certificates")], + "connection protocols/starttls/tls_require_cert='try'": + [(Fail, "can't be tested with self-signed certificates")], + "connection protocols/starttls/tls_require_cert='demand'": + [(Fail, "can't be tested with self-signed certificates")], + "connection protocols/tls require cert default demand": + [(Fail, "can't be tested with self-signed certificates")], + "connection protocols/starttls with custom port": + [(Fail, "it seems that starttls is not enabled by default on custom plain-text ports in LDAP server")], + "connection protocols/tls cipher suite": + [(Fail, "can't get it to work")] +} + +@TestFeature +@Name("ldap authentication") +@ArgumentParser(argparser) +@Requirements( + RQ_SRS_007_LDAP_Authentication("1.0") +) +@XFails(xfails) +def regression(self, local, clickhouse_binary_path): + """ClickHouse integration with LDAP regression module. + """ + nodes = { + "clickhouse": ("clickhouse1", "clickhouse2", "clickhouse3"), + } + + with Cluster(local, clickhouse_binary_path, nodes=nodes) as cluster: + self.context.cluster = cluster + + Scenario(run=load("ldap.tests.sanity", "scenario")) + Scenario(run=load("ldap.tests.multiple_servers", "scenario")) + Feature(run=load("ldap.tests.connections", "feature")) + Feature(run=load("ldap.tests.server_config", "feature")) + Feature(run=load("ldap.tests.user_config", "feature")) + Feature(run=load("ldap.tests.authentications", "feature")) + +if main(): + regression() diff --git a/tests/testflows/ldap/requirements/__init__.py b/tests/testflows/ldap/requirements/__init__.py new file mode 100644 index 00000000000..02f7d430154 --- /dev/null +++ b/tests/testflows/ldap/requirements/__init__.py @@ -0,0 +1 @@ +from .requirements import * diff --git a/tests/testflows/ldap/requirements/requirements.py b/tests/testflows/ldap/requirements/requirements.py new file mode 100644 index 00000000000..92491f4318b --- /dev/null +++ b/tests/testflows/ldap/requirements/requirements.py @@ -0,0 +1,928 @@ +# These requirements were auto generated +# from software requirements specification (SRS) +# document by TestFlows v1.6.200623.1103543. +# Do not edit by hand but re-generate instead +# using 'tfs requirements generate' command. +from testflows.core import Requirement + +RQ_SRS_007_LDAP_Authentication = Requirement( + name='RQ.SRS-007.LDAP.Authentication', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support user authentication via an [LDAP] server.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_MultipleServers = Requirement( + name='RQ.SRS-007.LDAP.Authentication.MultipleServers', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying multiple [LDAP] servers that can be used to authenticate\n' + 'users.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_Protocol_PlainText = Requirement( + name='RQ.SRS-007.LDAP.Authentication.Protocol.PlainText', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support user authentication using plain text `ldap://` non secure protocol.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_Protocol_TLS = Requirement( + name='RQ.SRS-007.LDAP.Authentication.Protocol.TLS', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support user authentication using `SSL/TLS` `ldaps://` secure protocol.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_Protocol_StartTLS = Requirement( + name='RQ.SRS-007.LDAP.Authentication.Protocol.StartTLS', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support user authentication using legacy `StartTLS` protocol which is a\n' + 'plain text `ldap://` protocol that is upgraded to [TLS].\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_TLS_Certificate_Validation = Requirement( + name='RQ.SRS-007.LDAP.Authentication.TLS.Certificate.Validation', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support certificate validation used for [TLS] connections.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_TLS_Certificate_SelfSigned = Requirement( + name='RQ.SRS-007.LDAP.Authentication.TLS.Certificate.SelfSigned', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support self-signed certificates for [TLS] connections.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_TLS_Certificate_SpecificCertificationAuthority = Requirement( + name='RQ.SRS-007.LDAP.Authentication.TLS.Certificate.SpecificCertificationAuthority', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support certificates signed by specific Certification Authority for [TLS] connections.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Server_Configuration_Invalid = Requirement( + name='RQ.SRS-007.LDAP.Server.Configuration.Invalid', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL return an error and prohibit user login if [LDAP] server configuration is not valid.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_User_Configuration_Invalid = Requirement( + name='RQ.SRS-007.LDAP.User.Configuration.Invalid', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL return an error and prohibit user login if user configuration is not valid.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_Mechanism_Anonymous = Requirement( + name='RQ.SRS-007.LDAP.Authentication.Mechanism.Anonymous', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL return an error and prohibit authentication using [Anonymous Authentication Mechanism of Simple Bind]\n' + 'authentication mechanism.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_Mechanism_Unauthenticated = Requirement( + name='RQ.SRS-007.LDAP.Authentication.Mechanism.Unauthenticated', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL return an error and prohibit authentication using [Unauthenticated Authentication Mechanism of Simple Bind]\n' + 'authentication mechanism.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_Mechanism_NamePassword = Requirement( + name='RQ.SRS-007.LDAP.Authentication.Mechanism.NamePassword', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL allow authentication using only [Name/Password Authentication Mechanism of Simple Bind]\n' + 'authentication mechanism.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_Valid = Requirement( + name='RQ.SRS-007.LDAP.Authentication.Valid', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL only allow user authentication using [LDAP] server if and only if\n' + 'user name and password match [LDAP] server records for the user.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_Invalid = Requirement( + name='RQ.SRS-007.LDAP.Authentication.Invalid', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL return an error and prohibit authentication if either user name or password\n' + 'do not match [LDAP] server records for the user.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_Invalid_DeletedUser = Requirement( + name='RQ.SRS-007.LDAP.Authentication.Invalid.DeletedUser', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL return an error and prohibit authentication if the user\n' + 'has been deleted from the [LDAP] server.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_UsernameChanged = Requirement( + name='RQ.SRS-007.LDAP.Authentication.UsernameChanged', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL return an error and prohibit authentication if the username is changed\n' + 'on the [LDAP] server.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_PasswordChanged = Requirement( + name='RQ.SRS-007.LDAP.Authentication.PasswordChanged', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL return an error and prohibit authentication if the password \n' + 'for the user is changed on the [LDAP] server.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_LDAPServerRestart = Requirement( + name='RQ.SRS-007.LDAP.Authentication.LDAPServerRestart', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support authenticating users after [LDAP] server is restarted.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_ClickHouseServerRestart = Requirement( + name='RQ.SRS-007.LDAP.Authentication.ClickHouseServerRestart', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support authenticating users after server is restarted.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_Parallel = Requirement( + name='RQ.SRS-007.LDAP.Authentication.Parallel', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support parallel authentication of users using [LDAP] server.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_Parallel_ValidAndInvalid = Requirement( + name='RQ.SRS-007.LDAP.Authentication.Parallel.ValidAndInvalid', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support authentication of valid users and \n' + 'prohibit authentication of invalid users using [LDAP] server \n' + 'in parallel without having invalid attempts affecting valid authentications.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_UnreachableServer = Requirement( + name='RQ.SRS-007.LDAP.UnreachableServer', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL return an error and prohibit user login if [LDAP] server is unreachable.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_Name = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.Name', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL not support empty string as a server name.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_Host = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.Host', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `` parameter to specify [LDAP]\n' + 'server hostname or IP, this parameter SHALL be mandatory and SHALL not be empty. \n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_Port = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.Port', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `` parameter to specify [LDAP] server port.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_Port_Default = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.Port.Default', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL use default port number `636` if `enable_tls` is set to `yes` or `389` otherwise.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_AuthDN_Prefix = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.AuthDN.Prefix', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `` parameter to specify the prefix\n' + 'of value used to construct the DN to bound to during authentication via [LDAP] server.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_AuthDN_Suffix = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.AuthDN.Suffix', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `` parameter to specify the suffix \n' + 'of value used to construct the DN to bound to during authentication via [LDAP] server.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_AuthDN_Value = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.AuthDN.Value', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL construct DN as `auth_dn_prefix + escape(user_name) + auth_dn_suffix` string.\n' + '\n' + "> This implies that auth_dn_suffix should usually have comma ',' as its first non-space character.\n" + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_EnableTLS = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.EnableTLS', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `` parameter to trigger the use of secure connection to the [LDAP] server. \n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_Default = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.Default', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL use `yes` value as the default for `` parameter\n' + 'to enable SSL/TLS `ldaps://` protocol. \n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_No = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.No', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying `no` as the value of `` parameter to enable \n' + 'plain text `ldap://` protocol.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_Yes = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.Yes', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying `yes` as the value of `` parameter to enable \n' + 'SSL/TLS `ldaps://` protocol.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_StartTLS = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.StartTLS', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying `starttls` as the value of `` parameter to enable \n' + 'legacy `StartTLS` protocol that used plain text `ldap://` protocol, upgraded to [TLS].\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_TLSMinimumProtocolVersion = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.TLSMinimumProtocolVersion', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `` parameter to specify \n' + 'the minimum protocol version of SSL/TLS.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_TLSMinimumProtocolVersion_Values = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.TLSMinimumProtocolVersion.Values', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying `ssl2`, `ssl3`, `tls1.0`, `tls1.1`, and `tls1.2`\n' + 'as a value of the `` parameter.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_TLSMinimumProtocolVersion_Default = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.TLSMinimumProtocolVersion.Default', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL set `tls1.2` as the default value of the `` parameter. \n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `` parameter to specify [TLS] peer \n' + 'certificate verification behavior.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Default = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Default', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL use `demand` value as the default for the `` parameter.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Demand = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Demand', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying `demand` as the value of `` parameter to\n' + 'enable requesting of client certificate. If no certificate is provided, or a bad certificate is\n' + 'provided, the session SHALL be immediately terminated.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Allow = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Allow', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying `allow` as the value of `` parameter to\n' + 'enable requesting of client certificate. If no\n' + 'certificate is provided, the session SHALL proceed normally. \n' + 'If a bad certificate is provided, it SHALL be ignored and the session SHALL proceed normally.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Try = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Try', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying `try` as the value of `` parameter to\n' + 'enable requesting of client certificate. If no certificate is provided, the session\n' + 'SHALL proceed normally. If a bad certificate is provided, the session SHALL be \n' + 'immediately terminated.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Never = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Never', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying `never` as the value of `` parameter to\n' + 'disable requesting of client certificate.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_TLSCertFile = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.TLSCertFile', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `` to specify the path to certificate file used by\n' + '[ClickHouse] to establish connection with the [LDAP] server.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_TLSKeyFile = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.TLSKeyFile', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `` to specify the path to key file for the certificate\n' + 'specified by the `` parameter.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_TLSCACertDir = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.TLSCACertDir', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `` parameter to specify to a path to \n' + 'the directory containing [CA] certificates used to verify certificates provided by the [LDAP] server.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_TLSCACertFile = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.TLSCACertFile', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `` parameter to specify a path to a specific \n' + '[CA] certificate file used to verify certificates provided by the [LDAP] server.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_TLSCipherSuite = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.TLSCipherSuite', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `tls_cipher_suite` parameter to specify allowed cipher suites.\n' + 'The value SHALL use the same format as the `ciphersuites` in the [OpenSSL Ciphers].\n' + '\n' + 'For example, \n' + '\n' + '```xml\n' + 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384\n' + '```\n' + '\n' + 'The available suites SHALL depend on the [OpenSSL] library version and variant used to build\n' + '[ClickHouse] and therefore might change.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_Server_Syntax = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following example syntax to create an entry for an [LDAP] server inside the `config.xml`\n' + 'configuration file or of any configuration file inside the `config.d` directory.\n' + '\n' + '```xml\n' + '\n' + ' \n' + ' localhost\n' + ' 636\n' + ' cn=\n' + ' , ou=users, dc=example, dc=com\n' + ' yes\n' + ' tls1.2\n' + ' demand\n' + ' /path/to/tls_cert_file\n' + ' /path/to/tls_key_file\n' + ' /path/to/tls_ca_cert_file\n' + ' /path/to/tls_ca_cert_dir\n' + ' ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384\n' + ' \n' + '\n' + '```\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_User_Syntax = Requirement( + name='RQ.SRS-007.LDAP.Configuration.User.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following example syntax to create a user that is authenticated using\n' + 'an [LDAP] server inside the `users.xml` file or any configuration file inside the `users.d` directory.\n' + '\n' + '```xml\n' + '\n' + ' \n' + ' \n' + ' \n' + ' my_ldap_server\n' + ' \n' + ' \n' + ' \n' + '\n' + '```\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_User_Name_Empty = Requirement( + name='RQ.SRS-007.LDAP.Configuration.User.Name.Empty', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL not support empty string as a user name.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_User_BothPasswordAndLDAP = Requirement( + name='RQ.SRS-007.LDAP.Configuration.User.BothPasswordAndLDAP', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL throw an error if `` is specified for the user and at the same \n' + 'time user configuration contains any of the `` entries.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_User_LDAP_InvalidServerName_NotDefined = Requirement( + name='RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.NotDefined', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL throw an error during any authentification attempt\n' + 'if the name of the [LDAP] server used inside the `` entry \n' + 'is not defined in the `` section. \n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_User_LDAP_InvalidServerName_Empty = Requirement( + name='RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.Empty', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL throw an error during any authentification attempt\n' + 'if the name of the [LDAP] server used inside the `` entry\n' + 'is empty. \n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_User_OnlyOneServer = Requirement( + name='RQ.SRS-007.LDAP.Configuration.User.OnlyOneServer', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying only one [LDAP] server for a given user.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_User_Name_Long = Requirement( + name='RQ.SRS-007.LDAP.Configuration.User.Name.Long', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support long user names of at least 256 bytes\n' + 'to specify users that can be authenticated using an [LDAP] server.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Configuration_User_Name_UTF8 = Requirement( + name='RQ.SRS-007.LDAP.Configuration.User.Name.UTF8', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support user names that contain [UTF-8] characters.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_Username_Empty = Requirement( + name='RQ.SRS-007.LDAP.Authentication.Username.Empty', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL not support authenticating users with empty username.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_Username_Long = Requirement( + name='RQ.SRS-007.LDAP.Authentication.Username.Long', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support authenticating users with a long username of at least 256 bytes.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_Username_UTF8 = Requirement( + name='RQ.SRS-007.LDAP.Authentication.Username.UTF8', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support authentication users with a username that contains [UTF-8] characters.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_Password_Empty = Requirement( + name='RQ.SRS-007.LDAP.Authentication.Password.Empty', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL not support authenticating users with empty passwords\n' + 'even if an empty password is valid for the user and\n' + 'is allowed by the [LDAP] server.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_Password_Long = Requirement( + name='RQ.SRS-007.LDAP.Authentication.Password.Long', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support long password of at least 256 bytes\n' + 'that can be used to authenticate users using an [LDAP] server.\n' + ), + link=None + ) + +RQ_SRS_007_LDAP_Authentication_Password_UTF8 = Requirement( + name='RQ.SRS-007.LDAP.Authentication.Password.UTF8', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support [UTF-8] characters in passwords\n' + 'used to authenticate users using an [LDAP] server.\n' + ), + link=None + ) diff --git a/tests/testflows/ldap/tests/authentications.py b/tests/testflows/ldap/tests/authentications.py new file mode 100644 index 00000000000..4b054fb694e --- /dev/null +++ b/tests/testflows/ldap/tests/authentications.py @@ -0,0 +1,464 @@ +# -*- coding: utf-8 -*- +import random + +from multiprocessing.dummy import Pool +from testflows.core import * +from testflows.asserts import error +from ldap.tests.common import * +from ldap.requirements import * + +servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com" + }, + "openldap2": { + "host": "openldap2", + "port": "636", + "enable_tls": "yes", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "tls_require_cert": "never", + } +} + +@TestStep(When) +@Name("I login as {username} and execute query") +def login_and_execute_query(self, username, password, exitcode=None, message=None, steps=True): + self.context.node.query("SELECT 1", + settings=[("user", username), ("password", password)], + exitcode=exitcode or 0, + message=message, steps=steps) + +@TestScenario +def add_user_to_ldap_and_login(self, server, user=None, ch_user=None, login=None, exitcode=None, message=None): + """Add user to LDAP and ClickHouse and then try to login.""" + self.context.ldap_node = self.context.cluster.node(server) + + if ch_user is None: + ch_user = {} + if login is None: + login = {} + if user is None: + user = {"cn": "myuser", "userpassword": "myuser"} + + with ldap_user(**user) as user: + ch_user["username"] = ch_user.get("username", user["cn"]) + ch_user["server"] = ch_user.get("server", user["_server"]) + + with ldap_authenticated_users(ch_user, config_file=f"ldap_users_{getuid()}.xml", restart=True): + username = login.get("username", user["cn"]) + password = login.get("password", user["userpassword"]) + login_and_execute_query(username=username, password=password, exitcode=exitcode, message=message) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Authentication_Parallel("1.0"), + RQ_SRS_007_LDAP_Authentication_Parallel_ValidAndInvalid("1.0") +) +def parallel_login(self, server, user_count=10, timeout=200): + """Check that login of valid and invalid LDAP authenticated users works in parallel.""" + self.context.ldap_node = self.context.cluster.node(server) + user = None + + users = [{"cn": f"parallel_user{i}", "userpassword": randomword(20)} for i in range(user_count)] + + with ldap_users(*users): + with ldap_authenticated_users(*[{"username": user["cn"], "server": server} for user in users]): + + def login_with_valid_username_and_password(users, i, iterations=10): + with When(f"valid users try to login #{i}"): + for i in range(iterations): + random_user = users[random.randint(0, len(users)-1)] + login_and_execute_query(username=random_user["cn"], password=random_user["userpassword"], steps=False) + + def login_with_valid_username_and_invalid_password(users, i, iterations=10): + with When(f"users try to login with valid username and invalid password #{i}"): + for i in range(iterations): + random_user = users[random.randint(0, len(users)-1)] + login_and_execute_query(username=random_user["cn"], + password=(random_user["userpassword"] + randomword(1)), + exitcode=4, + message=f"DB::Exception: {random_user['cn']}: Authentication failed: password is incorrect or there is no user with such name", + steps=False) + + def login_with_invalid_username_and_valid_password(users, i, iterations=10): + with When(f"users try to login with invalid username and valid password #{i}"): + for i in range(iterations): + random_user = dict(users[random.randint(0, len(users)-1)]) + random_user["cn"] += randomword(1) + login_and_execute_query(username=random_user["cn"], + password=random_user["userpassword"], + exitcode=4, + message=f"DB::Exception: {random_user['cn']}: Authentication failed: password is incorrect or there is no user with such name", + steps=False) + + with When("I login in parallel"): + p = Pool(15) + tasks = [] + for i in range(5): + tasks.append(p.apply_async(login_with_valid_username_and_password, (users, i, 50,))) + tasks.append(p.apply_async(login_with_valid_username_and_invalid_password, (users, i, 50,))) + tasks.append(p.apply_async(login_with_invalid_username_and_valid_password, (users, i, 50,))) + + with Then("it should work"): + for task in tasks: + task.get(timeout=timeout) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Authentication_Invalid("1.0"), + RQ_SRS_007_LDAP_Authentication_Invalid_DeletedUser("1.0") +) +def login_after_user_is_deleted_from_ldap(self, server): + """Check that login fails after user is deleted from LDAP.""" + self.context.ldap_node = self.context.cluster.node(server) + user = None + + try: + with Given(f"I add user to LDAP"): + user = {"cn": "myuser", "userpassword": "myuser"} + user = add_user_to_ldap(**user) + + with ldap_authenticated_users({"username": user["cn"], "server": server}, config_file=f"ldap_users_{getuid()}.xml", restart=True): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + with When("I delete this user from LDAP"): + delete_user_from_ldap(user) + + with Then("when I try to login again it should fail"): + login_and_execute_query(username=user["cn"], password=user["userpassword"], + exitcode=4, + message=f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name" + ) + finally: + with Finally("I make sure LDAP user is deleted"): + if user is not None: + delete_user_from_ldap(user, exitcode=None) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Authentication_Invalid("1.0"), + RQ_SRS_007_LDAP_Authentication_PasswordChanged("1.0") +) +def login_after_user_password_changed_in_ldap(self, server): + """Check that login fails after user password is changed in LDAP.""" + self.context.ldap_node = self.context.cluster.node(server) + user = None + + try: + with Given(f"I add user to LDAP"): + user = {"cn": "myuser", "userpassword": "myuser"} + user = add_user_to_ldap(**user) + + with ldap_authenticated_users({"username": user["cn"], "server": server}, config_file=f"ldap_users_{getuid()}.xml", restart=True): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + with When("I change user password in LDAP"): + change_user_password_in_ldap(user, "newpassword") + + with Then("when I try to login again it should fail"): + login_and_execute_query(username=user["cn"], password=user["userpassword"], + exitcode=4, + message=f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name" + ) + + with And("when I try to login with the new password it should work"): + login_and_execute_query(username=user["cn"], password="newpassword") + + finally: + with Finally("I make sure LDAP user is deleted"): + if user is not None: + delete_user_from_ldap(user, exitcode=None) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Authentication_Invalid("1.0"), + RQ_SRS_007_LDAP_Authentication_UsernameChanged("1.0") +) +def login_after_user_cn_changed_in_ldap(self, server): + """Check that login fails after user cn is changed in LDAP.""" + self.context.ldap_node = self.context.cluster.node(server) + user = None + + try: + with Given(f"I add user to LDAP"): + user = {"cn": "myuser", "userpassword": "myuser"} + user = add_user_to_ldap(**user) + + with ldap_authenticated_users({"username": user["cn"], "server": server}, config_file=f"ldap_users_{getuid()}.xml", restart=True): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + with When("I change user password in LDAP"): + change_user_cn_in_ldap(user, "myuser2") + + with Then("when I try to login again it should fail"): + login_and_execute_query(username=user["cn"], password=user["userpassword"], + exitcode=4, + message=f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name" + ) + finally: + with Finally("I make sure LDAP user is deleted"): + if user is not None: + delete_user_from_ldap(user, exitcode=None) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Authentication_Valid("1.0"), + RQ_SRS_007_LDAP_Authentication_LDAPServerRestart("1.0") +) +def login_after_ldap_server_is_restarted(self, server, timeout=60): + """Check that login succeeds after LDAP server is restarted.""" + self.context.ldap_node = self.context.cluster.node(server) + user = None + + try: + with Given(f"I add user to LDAP"): + user = {"cn": "myuser", "userpassword": getuid()} + user = add_user_to_ldap(**user) + + with ldap_authenticated_users({"username": user["cn"], "server": server}): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + with When("I restart LDAP server"): + self.context.ldap_node.restart() + + with Then("I try to login until it works", description=f"timeout {timeout} sec"): + started = time.time() + while True: + r = self.context.node.query("SELECT 1", + settings=[("user", user["cn"]), ("password", user["userpassword"])], + no_checks=True) + if r.exitcode == 0: + break + assert time.time() - started < timeout, error(r.output) + finally: + with Finally("I make sure LDAP user is deleted"): + if user is not None: + delete_user_from_ldap(user, exitcode=None) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Authentication_Valid("1.0"), + RQ_SRS_007_LDAP_Authentication_ClickHouseServerRestart("1.0") +) +def login_after_clickhouse_server_is_restarted(self, server, timeout=60): + """Check that login succeeds after ClickHouse server is restarted.""" + self.context.ldap_node = self.context.cluster.node(server) + user = None + + try: + with Given(f"I add user to LDAP"): + user = {"cn": "myuser", "userpassword": getuid()} + user = add_user_to_ldap(**user) + + with ldap_authenticated_users({"username": user["cn"], "server": server}): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + with When("I restart ClickHouse server"): + self.context.node.restart() + + with Then("I try to login until it works", description=f"timeout {timeout} sec"): + started = time.time() + while True: + r = self.context.node.query("SELECT 1", + settings=[("user", user["cn"]), ("password", user["userpassword"])], + no_checks=True) + if r.exitcode == 0: + break + assert time.time() - started < timeout, error(r.output) + finally: + with Finally("I make sure LDAP user is deleted"): + if user is not None: + delete_user_from_ldap(user, exitcode=None) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Authentication_Invalid("1.0"), + RQ_SRS_007_LDAP_Authentication_Password_Empty("1.0") +) +def valid_username_with_valid_empty_password(self, server): + """Check that we can't login using valid username that has empty password.""" + user = {"cn": "empty_password", "userpassword": ""} + exitcode = 4 + message = f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name" + + add_user_to_ldap_and_login(user=user, exitcode=exitcode, message=message, server=server) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Authentication_Invalid("1.0"), + RQ_SRS_007_LDAP_Authentication_Password_Empty("1.0") +) +def valid_username_and_invalid_empty_password(self, server): + """Check that we can't login using valid username but invalid empty password.""" + username = "user_non_empty_password" + user = {"cn": username, "userpassword": username} + login = {"password": ""} + + exitcode = 4 + message = f"DB::Exception: {username}: Authentication failed: password is incorrect or there is no user with such name" + + add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Authentication_Valid("1.0") +) +def valid_username_and_password(self, server): + """Check that we can login using valid username and password.""" + username = "valid_username_and_password" + user = {"cn": username, "userpassword": username} + + with When(f"I add user {username} to LDAP and try to login"): + add_user_to_ldap_and_login(user=user, server=server) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Authentication_Invalid("1.0") +) +def valid_username_and_password_invalid_server(self, server=None): + """Check that we can't login using valid username and valid + password but for a different server.""" + self.context.ldap_node = self.context.cluster.node("openldap1") + + user = {"username": "user2", "userpassword": "user2", "server": "openldap1"} + + exitcode = 4 + message = f"DB::Exception: user2: Authentication failed: password is incorrect or there is no user with such name" + + with ldap_authenticated_users(user, config_file=f"ldap_users_{getuid()}.xml", restart=True): + login_and_execute_query(username="user2", password="user2", exitcode=exitcode, message=message) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Authentication_Valid("1.0"), + RQ_SRS_007_LDAP_Authentication_Username_Long("1.0"), + RQ_SRS_007_LDAP_Configuration_User_Name_Long("1.0") +) +def valid_long_username_and_short_password(self, server): + """Check that we can login using valid very long username and short password.""" + username = "long_username_12345678901234567890123456789012345678901234567890123456789012345678901234567890" + user = {"cn": username, "userpassword": "long_username"} + + add_user_to_ldap_and_login(user=user, server=server) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Authentication_Invalid("1.0") +) +def invalid_long_username_and_valid_short_password(self, server): + """Check that we can't login using slightly invalid long username but valid password.""" + username = "long_username_12345678901234567890123456789012345678901234567890123456789012345678901234567890" + user = {"cn": username, "userpassword": "long_username"} + login = {"username": f"{username}?"} + + exitcode = 4 + message=f"DB::Exception: {login['username']}: Authentication failed: password is incorrect or there is no user with such name" + + add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Authentication_Valid("1.0"), + RQ_SRS_007_LDAP_Authentication_Password_Long("1.0") +) +def valid_short_username_and_long_password(self, server): + """Check that we can login using valid short username with very long password.""" + username = "long_password" + user = {"cn": username, "userpassword": "long_password_12345678901234567890123456789012345678901234567890123456789012345678901234567890"} + add_user_to_ldap_and_login(user=user, server=server) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Authentication_Invalid("1.0") +) +def valid_short_username_and_invalid_long_password(self, server): + """Check that we can't login using valid short username and invalid long password.""" + username = "long_password" + user = {"cn": username, "userpassword": "long_password_12345678901234567890123456789012345678901234567890123456789012345678901234567890"} + login = {"password": user["userpassword"] + "1"} + + exitcode = 4 + message=f"DB::Exception: {username}: Authentication failed: password is incorrect or there is no user with such name" + + add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Authentication_Invalid("1.0") +) +def valid_username_and_invalid_password(self, server): + """Check that we can't login using valid username and invalid password.""" + username = "valid_username_and_invalid_password" + user = {"cn": username, "userpassword": username} + login = {"password": user["userpassword"] + "1"} + + exitcode = 4 + message=f"DB::Exception: {username}: Authentication failed: password is incorrect or there is no user with such name" + + add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Authentication_Invalid("1.0") +) +def invalid_username_and_valid_password(self, server): + """Check that we can't login using slightly invalid username but valid password.""" + username = "invalid_username_and_valid_password" + user = {"cn": username, "userpassword": username} + login = {"username": user["cn"] + "1"} + + exitcode = 4 + message=f"DB::Exception: {login['username']}: Authentication failed: password is incorrect or there is no user with such name" + + add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Authentication_Valid("1.0"), + RQ_SRS_007_LDAP_Authentication_Username_UTF8("1.0"), + RQ_SRS_007_LDAP_Configuration_User_Name_UTF8("1.0") +) +def valid_utf8_username_and_ascii_password(self, server): + """Check that we can login using valid utf-8 username with ascii password.""" + username = "utf8_username_Gãńdåłf_Thê_Gręât" + user = {"cn": username, "userpassword": "utf8_username"} + + add_user_to_ldap_and_login(user=user, server=server) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Authentication_Valid("1.0"), + RQ_SRS_007_LDAP_Authentication_Password_UTF8("1.0") +) +def valid_ascii_username_and_utf8_password(self, server): + """Check that we can login using valid ascii username with utf-8 password.""" + username = "utf8_password" + user = {"cn": username, "userpassword": "utf8_password_Gãńdåłf_Thê_Gręât"} + + add_user_to_ldap_and_login(user=user, server=server) + +@TestScenario +def empty_username_and_empty_password(self, server=None): + """Check that we can login using empty username and empty password as + it will use the default user and that has an empty password.""" + login_and_execute_query(username="", password="") + +@TestFeature +@Name("user authentications") +@Requirements( + RQ_SRS_007_LDAP_Authentication_Mechanism_NamePassword("1.0") +) +def feature(self, servers=None, node="clickhouse1"): + self.context.node = self.context.cluster.node(node) + + if servers is None: + servers = globals()["servers"] + + with ldap_servers(servers): + for scenario in loads(current_module(), Scenario): + scenario(server="openldap1") diff --git a/tests/testflows/ldap/tests/common.py b/tests/testflows/ldap/tests/common.py new file mode 100644 index 00000000000..a1f823550f3 --- /dev/null +++ b/tests/testflows/ldap/tests/common.py @@ -0,0 +1,378 @@ +import os +import uuid +import time +import string +import random +import textwrap +import xml.etree.ElementTree as xmltree + +from collections import namedtuple +from contextlib import contextmanager + +import testflows.settings as settings + +from testflows.core import * +from testflows.asserts import error + +def getuid(): + return str(uuid.uuid1()).replace('-', '_') + +xml_with_utf8 = '\n' + +def xml_indent(elem, level=0, by=" "): + i = "\n" + level * by + if len(elem): + if not elem.text or not elem.text.strip(): + elem.text = i + by + if not elem.tail or not elem.tail.strip(): + elem.tail = i + for elem in elem: + xml_indent(elem, level + 1) + if not elem.tail or not elem.tail.strip(): + elem.tail = i + else: + if level and (not elem.tail or not elem.tail.strip()): + elem.tail = i + +def xml_append(root, tag, text): + element = xmltree.Element(tag) + element.text = text + root.append(element) + return element + +Config = namedtuple("Config", "content path name uid preprocessed_name") + +ASCII_CHARS = string.ascii_lowercase + string.ascii_uppercase + string.digits + +def randomword(length, chars=ASCII_CHARS): + return ''.join(random.choice(chars) for i in range(length)) + +def add_config(config, timeout=20, restart=False): + """Add dynamic configuration file to ClickHouse. + + :param node: node + :param config: configuration file description + :param timeout: timeout, default: 20 sec + """ + node = current().context.node + try: + with Given(f"{config.name}"): + if settings.debug: + with When("I output the content of the config"): + debug(config.content) + + with node.cluster.shell(node.name) as bash: + bash.expect(bash.prompt) + bash.send("tail -n 0 -f /var/log/clickhouse-server/clickhouse-server.log") + + with When("I add the config", description=config.path): + command = f"cat < {config.path}\n{config.content}\nHEREDOC" + node.command(command, steps=False, exitcode=0) + + with Then(f"{config.preprocessed_name} should be updated", description=f"timeout {timeout}"): + started = time.time() + command = f"cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name} | grep {config.uid}{' > /dev/null' if not settings.debug else ''}" + while time.time() - started < timeout: + exitcode = node.command(command, steps=False).exitcode + if exitcode == 0: + break + time.sleep(1) + assert exitcode == 0, error() + + if restart: + bash.close() + logsize = node.command("ls -s --block-size=1 /var/log/clickhouse-server/clickhouse-server.log").output.split(" ")[0].strip() + with When("I restart ClickHouse to apply the config changes"): + node.restart(safe=False) + bash.prompt = bash.__class__.prompt + bash.open() + bash.send(f"tail -c +{logsize} -f /var/log/clickhouse-server/clickhouse-server.log") + + with When("I wait for config to be loaded"): + if restart: + bash.expect(f"ConfigReloader: Loaded config '/etc/clickhouse-server/config.xml', performed update on configuration", timeout=timeout) + else: + bash.expect(f"ConfigReloader: Loaded config '/etc/clickhouse-server/{config.preprocessed_name}', performed update on configuration", timeout=timeout) + yield + finally: + with Finally(f"I remove {config.name}"): + with node.cluster.shell(node.name) as bash: + bash.expect(bash.prompt) + bash.send("tail -n 0 -f /var/log/clickhouse-server/clickhouse-server.log") + + with By("removing the config file", description=config.path): + node.command(f"rm -rf {config.path}", exitcode=0) + + with Then(f"{config.preprocessed_name} should be updated"): + started = time.time() + command = f"cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name} | grep '{config.uid}'{' > /dev/null' if not settings.debug else ''}" + while time.time() - started < timeout: + exitcode = node.command(command, steps=False).exitcode + if exitcode == 1: + break + time.sleep(1) + assert exitcode == 1, error() + + with When("I wait for config to be loaded"): + started = time.time() + bash.expect(f"ConfigReloader: Loaded config '/etc/clickhouse-server/{config.preprocessed_name}', performed update on configuration", timeout=timeout) + + +def create_ldap_servers_config_content(servers, config_d_dir="/etc/clickhouse-server/config.d", config_file="ldap_servers.xml"): + """Create LDAP servers configuration content. + """ + uid = getuid() + path = os.path.join(config_d_dir, config_file) + name = config_file + + root = xmltree.fromstring("") + xml_servers = root.find("ldap_servers") + xml_servers.append(xmltree.Comment(text=f"LDAP servers {uid}")) + + for _name, server in servers.items(): + xml_server = xmltree.Element(_name) + for key, value in server.items(): + xml_append(xml_server, key, value) + xml_servers.append(xml_server) + + xml_indent(root) + content = xml_with_utf8 + str(xmltree.tostring(root, short_empty_elements=False, encoding="utf-8"), "utf-8") + + return Config(content, path, name, uid, "config.xml") + +@contextmanager +def ldap_servers(servers, config_d_dir="/etc/clickhouse-server/config.d", config_file="ldap_servers.xml", + timeout=20, restart=False): + """Add LDAP servers configuration. + """ + config = create_ldap_servers_config_content(servers, config_d_dir, config_file) + return add_config(config, restart=restart) + +def create_ldap_users_config_content(*users, config_d_dir="/etc/clickhouse-server/users.d", config_file="ldap_users.xml"): + """Create LDAP users configuration file content. + """ + uid = getuid() + path = os.path.join(config_d_dir, config_file) + name = config_file + + root = xmltree.fromstring("") + xml_users = root.find("users") + xml_users.append(xmltree.Comment(text=f"LDAP users {uid}")) + + for user in users: + xml_user = xmltree.Element(user['username']) + xml_user_server = xmltree.Element("ldap") + xml_append(xml_user_server, "server", user["server"]) + xml_user.append(xml_user_server) + xml_users.append(xml_user) + + xml_indent(root) + content = xml_with_utf8 + str(xmltree.tostring(root, short_empty_elements=False, encoding="utf-8"), "utf-8") + + return Config(content, path, name, uid, "users.xml") + +@contextmanager +def ldap_authenticated_users(*users, config_d_dir="/etc/clickhouse-server/users.d", + config_file=None, timeout=20, restart=True, config=None): + """Add LDAP authenticated user configuration. + """ + if config_file is None: + config_file = f"ldap_users_{getuid()}.xml" + if config is None: + config = create_ldap_users_config_content(*users, config_d_dir=config_d_dir, config_file=config_file) + return add_config(config, restart=restart) + +def invalid_server_config(servers, message=None, tail=13, timeout=20): + """Check that ClickHouse errors when trying to load invalid LDAP servers configuration file. + """ + node = current().context.node + if message is None: + message = "Exception: Failed to merge config with '/etc/clickhouse-server/config.d/ldap_servers.xml'" + + config = create_ldap_servers_config_content(servers) + try: + node.command("echo -e \"%s\" > /var/log/clickhouse-server/clickhouse-server.err.log" % ("-\\n" * tail)) + + with When("I add the config", description=config.path): + command = f"cat < {config.path}\n{config.content}\nHEREDOC" + node.command(command, steps=False, exitcode=0) + + with Then("server shall fail to merge the new config"): + started = time.time() + command = f"tail -n {tail} /var/log/clickhouse-server/clickhouse-server.err.log | grep \"{message}\"" + while time.time() - started < timeout: + exitcode = node.command(command, steps=False).exitcode + if exitcode == 0: + break + time.sleep(1) + assert exitcode == 0, error() + finally: + with Finally(f"I remove {config.name}"): + with By("removing the config file", description=config.path): + node.command(f"rm -rf {config.path}", exitcode=0) + +def invalid_user_config(servers, config, message=None, tail=13, timeout=20): + """Check that ClickHouse errors when trying to load invalid LDAP users configuration file. + """ + node = current().context.node + if message is None: + message = "Exception: Failed to merge config with '/etc/clickhouse-server/users.d/ldap_users.xml'" + + with ldap_servers(servers): + try: + node.command("echo -e \"%s\" > /var/log/clickhouse-server/clickhouse-server.err.log" % ("\\n" * tail)) + with When("I add the config", description=config.path): + command = f"cat < {config.path}\n{config.content}\nHEREDOC" + node.command(command, steps=False, exitcode=0) + + with Then("server shall fail to merge the new config"): + started = time.time() + command = f"tail -n {tail} /var/log/clickhouse-server/clickhouse-server.err.log | grep \"{message}\"" + while time.time() - started < timeout: + exitcode = node.command(command, steps=False).exitcode + if exitcode == 0: + break + time.sleep(1) + assert exitcode == 0, error() + finally: + with Finally(f"I remove {config.name}"): + with By("removing the config file", description=config.path): + node.command(f"rm -rf {config.path}", exitcode=0) + +def add_user_to_ldap(cn, userpassword, givenname=None, homedirectory=None, sn=None, uid=None, uidnumber=None, node=None): + """Add user entry to LDAP.""" + if node is None: + node = current().context.ldap_node + if uid is None: + uid = cn + if givenname is None: + givenname = "John" + if homedirectory is None: + homedirectory = "/home/users" + if sn is None: + sn = "User" + if uidnumber is None: + uidnumber = 2000 + + user = { + "dn": f"cn={cn},ou=users,dc=company,dc=com", + "cn": cn, + "gidnumber": 501, + "givenname": givenname, + "homedirectory": homedirectory, + "objectclass": ["inetOrgPerson", "posixAccount", "top"], + "sn": sn, + "uid": uid, + "uidnumber": uidnumber, + "userpassword": userpassword, + "_server": node.name + } + + lines = [] + for key, value in user.items(): + if key.startswith("_"): + continue + elif key == "objectclass": + for cls in value: + lines.append(f"objectclass: {cls}") + else: + lines.append(f"{key}: {value}") + + ldif = "\n".join(lines) + + r = node.command( + f"echo -e \"{ldif}\" | ldapadd -x -H ldap://localhost -D \"cn=admin,dc=company,dc=com\" -w admin") + assert r.exitcode == 0, error() + + return user + +def delete_user_from_ldap(user, node=None, exitcode=0): + """Delete user entry from LDAP.""" + if node is None: + node = current().context.ldap_node + r = node.command( + f"ldapdelete -x -H ldap://localhost -D \"cn=admin,dc=company,dc=com\" -w admin \"{user['dn']}\"") + if exitcode is not None: + assert r.exitcode == exitcode, error() + +def change_user_password_in_ldap(user, new_password, node=None, exitcode=0): + """Change user password in LDAP.""" + if node is None: + node = current().context.ldap_node + + ldif = (f"dn: {user['dn']}\n" + "changetype: modify\n" + "replace: userpassword\n" + f"userpassword: {new_password}") + + r = node.command( + f"echo -e \"{ldif}\" | ldapmodify -x -H ldap://localhost -D \"cn=admin,dc=company,dc=com\" -w admin") + + if exitcode is not None: + assert r.exitcode == exitcode, error() + +def change_user_cn_in_ldap(user, new_cn, node=None, exitcode=0): + """Change user password in LDAP.""" + if node is None: + node = current().context.ldap_node + + new_user = dict(user) + new_user['dn'] = f"cn={new_cn},ou=users,dc=company,dc=com" + new_user['cn'] = new_cn + + ldif = ( + f"dn: {user['dn']}\n" + "changetype: modrdn\n" + f"newrdn: cn = {new_user['cn']}\n" + f"deleteoldrdn: 1\n" + ) + + r = node.command( + f"echo -e \"{ldif}\" | ldapmodify -x -H ldap://localhost -D \"cn=admin,dc=company,dc=com\" -w admin") + + if exitcode is not None: + assert r.exitcode == exitcode, error() + + return new_user + +@contextmanager +def ldap_user(cn, userpassword, givenname=None, homedirectory=None, sn=None, uid=None, uidnumber=None, node=None): + """Add new user to the LDAP server.""" + try: + user = None + with Given(f"I add user {cn} to LDAP"): + user = add_user_to_ldap(cn, userpassword, givenname, homedirectory, sn, uid, uidnumber, node=node) + yield user + finally: + with Finally(f"I delete user {cn} from LDAP"): + if user is not None: + delete_user_from_ldap(user, node=node) + +@contextmanager +def ldap_users(*users, node=None): + """Add multiple new users to the LDAP server.""" + try: + _users = [] + with Given("I add users to LDAP"): + for user in users: + with By(f"adding user {user['cn']}"): + _users.append(add_user_to_ldap(**user, node=node)) + yield _users + finally: + with Finally(f"I delete users from LDAP"): + for _user in _users: + delete_user_from_ldap(_user, node=node) + +def login(servers, *users, config=None): + """Configure LDAP server and LDAP authenticated users and + try to login and execute a query""" + with ldap_servers(servers): + with ldap_authenticated_users(*users, restart=True, config=config): + for user in users: + if user.get("login", False): + with When(f"I login as {user['username']} and execute query"): + current().context.node.query("SELECT 1", + settings=[("user", user["username"]), ("password", user["password"])], + exitcode=user.get("exitcode", None), + message=user.get("message", None)) + diff --git a/tests/testflows/ldap/tests/connections.py b/tests/testflows/ldap/tests/connections.py new file mode 100644 index 00000000000..410298ba5e1 --- /dev/null +++ b/tests/testflows/ldap/tests/connections.py @@ -0,0 +1,285 @@ +from testflows.core import * +from testflows.asserts import error + +from ldap.tests.common import login +from ldap.requirements import * + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Authentication_Protocol_PlainText("1.0"), + RQ_SRS_007_LDAP_Configuration_Server_EnableTLS("1.0"), + RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_No("1.0"), + RQ_SRS_007_LDAP_Configuration_Server_Port_Default("1.0") +) +def plain_text(self): + """Check that we can perform LDAP user authentication using `plain text` connection protocol. + """ + servers = { + "openldap1": { + "host": "openldap1", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com" + } + } + users = [ + {"server": "openldap1", "username": "user1", "password": "user1", "login": True} + ] + login(servers, *users) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Authentication_Protocol_PlainText("1.0"), + RQ_SRS_007_LDAP_Configuration_Server_Port("1.0") +) +def plain_text_with_custom_port(self): + """Check that we can perform LDAP user authentication using `plain text` connection protocol + with the server that uses custom port. + """ + servers = { + "openldap3": { + "host": "openldap3", + "port": "3089", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com" + } + } + users = [ + {"server": "openldap3", "username": "user3", "password": "user3", "login": True} + ] + login(servers, *users) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Authentication_Protocol_TLS("1.0"), + RQ_SRS_007_LDAP_Configuration_Server_Port("1.0") +) +def tls_with_custom_port(self): + """Check that we can perform LDAP user authentication using `TLS` connection protocol + with the server that uses custom port. + """ + servers = { + "openldap4": { + "host": "openldap4", + "port": "6036", + "tls_require_cert": "never", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com" + } + } + users = [ + {"server": "openldap4", "username": "user4", "password": "user4", "login": True} + ] + login(servers, *users) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Authentication_Protocol_StartTLS("1.0"), + RQ_SRS_007_LDAP_Configuration_Server_Port("1.0") +) +def starttls_with_custom_port(self): + """Check that we can perform LDAP user authentication using `StartTLS` connection protocol + with the server that uses custom port. + """ + servers = { + "openldap4": { + "host": "openldap4", + "port": "3089", + "enable_tls": "starttls", + "tls_require_cert": "never", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com" + } + } + users = [ + {"server": "openldap4", "username": "user4", "password": "user4", "login": True} + ] + login(servers, *users) + +def tls_connection(enable_tls, tls_require_cert): + """Try to login using LDAP user authentication over a TLS connection.""" + servers = { + "openldap2": { + "host": "openldap2", + "enable_tls": enable_tls, + "tls_require_cert": tls_require_cert, + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com" + } + } + users = [ + {"server": "openldap2", "username": "user2", "password": "user2", "login": True} + ] + + requirements = [] + + if tls_require_cert == "never": + requirements = [RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Never("1.0")] + elif tls_require_cert == "allow": + requirements = [RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Allow("1.0")] + elif tls_require_cert == "try": + requirements = [RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Try("1.0")] + elif tls_require_cert == "demand": + requirements = [RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Demand("1.0")] + + with Example(name=f"tls_require_cert='{tls_require_cert}'", requirements=requirements): + login(servers, *users) + +@TestScenario +@Examples("enable_tls tls_require_cert", [ + ("yes", "never"), + ("yes", "allow"), + ("yes", "try"), + ("yes", "demand") +]) +@Requirements( + RQ_SRS_007_LDAP_Authentication_Protocol_TLS("1.0"), + RQ_SRS_007_LDAP_Configuration_Server_EnableTLS("1.0"), + RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_Yes("1.0"), + RQ_SRS_007_LDAP_Configuration_Server_Port_Default("1.0"), + RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert("1.0"), + RQ_SRS_007_LDAP_Configuration_Server_TLSMinimumProtocolVersion_Default("1.0") +) +def tls(self): + """Check that we can perform LDAP user authentication using `TLS` connection protocol. + """ + for example in self.examples: + tls_connection(*example) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_Default("1.0") +) +def tls_enable_tls_default_yes(self): + """Check that the default value for the `enable_tls` is set to `yes`.""" + servers = { + "openldap2": { + "host": "openldap2", + "tls_require_cert": "never", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com" + } + } + users = [ + {"server": "openldap2", "username": "user2", "password": "user2", "login": True} + ] + login(servers, *users) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Default("1.0") +) +def tls_require_cert_default_demand(self): + """Check that the default value for the `tls_require_cert` is set to `demand`.""" + servers = { + "openldap2": { + "host": "openldap2", + "enable_tls": "yes", + "port": "636", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com" + } + } + users = [ + {"server": "openldap2", "username": "user2", "password": "user2", "login": True} + ] + login(servers, *users) + +@TestScenario +@Examples("enable_tls tls_require_cert", [ + ("starttls", "never"), + ("starttls", "allow"), + ("starttls", "try"), + ("starttls", "demand") +]) +@Requirements( + RQ_SRS_007_LDAP_Authentication_Protocol_StartTLS("1.0"), + RQ_SRS_007_LDAP_Configuration_Server_EnableTLS("1.0"), + RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_StartTLS("1.0"), + RQ_SRS_007_LDAP_Configuration_Server_Port_Default("1.0") +) +def starttls(self): + """Check that we can perform LDAP user authentication using legacy `StartTLS` connection protocol. + """ + for example in self.examples: + tls_connection(*example) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Configuration_Server_TLSCipherSuite("1.0") +) +def tls_cipher_suite(self): + """Check that `tls_cipher_suite` parameter can be used specify allowed cipher suites.""" + servers = { + "openldap4": { + "host": "openldap4", + "port": "6036", + "tls_require_cert": "never", + "tls_cipher_suite": "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC", + "tls_minimum_protocol_version": "tls1.2", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com" + } + } + users = [ + {"server": "openldap4", "username": "user4", "password": "user4", "login": True} + ] + login(servers, *users) + +@TestOutline(Scenario) +@Requirements( + RQ_SRS_007_LDAP_Configuration_Server_TLSMinimumProtocolVersion("1.0"), + RQ_SRS_007_LDAP_Configuration_Server_TLSMinimumProtocolVersion_Values("1.0") +) +@Examples("version exitcode message", [ + ("ssl2", None, None), + ("ssl3", None, None), + ("tls1.0", None, None), + ("tls1.1", None, None), + ("tls1.2", None, None), + ("tls1.3", 36, "DB::Exception: LDAP server 'openldap4' is not configured") +]) +def tls_minimum_protocol_version(self, version, exitcode, message): + """Check that `tls_minimum_protocol_version` parameter can be used specify + to specify the minimum protocol version of SSL/TLS.""" + + servers = { + "openldap4": { + "host": "openldap4", + "port": "6036", + "tls_require_cert": "never", + "tls_minimum_protocol_version": version, + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com" + } + } + + users = [{ + "server": "openldap4", "username": "user4", "password": "user4", + "login": True, "exitcode": int(exitcode) if exitcode is not None else None, "message": message + }] + + # Note: this code was an attempt to produce a negative case but did not work + # ldap_node = self.context.cluster.node("openldap4") + # ldif = ( + # "dn: cn=config\n" + # "changetype: modify\n" + # "replace: olcTLSProtocolMin\n" + # "olcTLSProtocolMin: 3.5" + # ) + # + # r = ldap_node.command( + # f"echo -e \"{ldif}\" | ldapmodify -x -H ldaps://localhost:6036 -D \"cn=admin,cn=config\" -w config") + # + # ldap_node.restart() + + login(servers, *users) + +@TestFeature +@Name("connection protocols") +def feature(self, node="clickhouse1"): + self.context.node = self.context.cluster.node(node) + + for scenario in loads(current_module(), Scenario): + scenario() diff --git a/tests/testflows/ldap/tests/multiple_servers.py b/tests/testflows/ldap/tests/multiple_servers.py new file mode 100644 index 00000000000..aefc0116fa2 --- /dev/null +++ b/tests/testflows/ldap/tests/multiple_servers.py @@ -0,0 +1,38 @@ +from testflows.core import * +from testflows.asserts import error + +from ldap.tests.common import login +from ldap.requirements import RQ_SRS_007_LDAP_Authentication_MultipleServers + +@TestScenario +@Name("multiple servers") +@Requirements( + RQ_SRS_007_LDAP_Authentication_MultipleServers("1.0") +) +def scenario(self, node="clickhouse1"): + """Check that multiple LDAP servers can be used to + authenticate users. + """ + self.context.node = self.context.cluster.node(node) + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com" + }, + "openldap2": { + "host": "openldap2", + "port": "636", + "enable_tls": "yes", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "tls_require_cert": "never", + }, + } + users = [ + {"server": "openldap1", "username": "user1", "password": "user1", "login": True}, + {"server": "openldap2", "username": "user2", "password": "user2", "login": True} + ] + login(servers, *users) diff --git a/tests/testflows/ldap/tests/sanity.py b/tests/testflows/ldap/tests/sanity.py new file mode 100644 index 00000000000..9e5d8a2ddd7 --- /dev/null +++ b/tests/testflows/ldap/tests/sanity.py @@ -0,0 +1,42 @@ +from testflows.core import * +from testflows.asserts import error + +from ldap.tests.common import add_user_to_ldap, delete_user_from_ldap + +@TestScenario +@Name("sanity") +def scenario(self, server="openldap1"): + """Check that LDAP server is up and running by + executing ldapsearch, ldapadd, and ldapdelete commands. + """ + self.context.ldap_node = self.context.cluster.node(server) + + with When("I search LDAP database"): + r = self.context.ldap_node.command( + "ldapsearch -x -H ldap://localhost -b \"dc=company,dc=com\" -D \"cn=admin,dc=company,dc=com\" -w admin") + assert r.exitcode == 0, error() + + with Then("I should find an entry for user1"): + assert "dn: cn=user1,ou=users,dc=company,dc=com" in r.output, error() + + with When("I add new user to LDAP"): + user = add_user_to_ldap(cn="myuser", userpassword="myuser") + + with And("I search LDAP database again"): + r = self.context.ldap_node.command( + "ldapsearch -x -H ldap://localhost -b \"dc=company,dc=com\" -D \"cn=admin,dc=company,dc=com\" -w admin") + assert r.exitcode == 0, error() + + with Then("I should find an entry for the new user"): + assert f"dn: {user['dn']}" in r.output, error() + + with When("I delete user from LDAP"): + delete_user_from_ldap(user) + + with And("I search LDAP database again"): + r = self.context.ldap_node.command( + "ldapsearch -x -H ldap://localhost -b \"dc=company,dc=com\" -D \"cn=admin,dc=company,dc=com\" -w admin") + assert r.exitcode == 0, error() + + with Then("I should not find an entry for the deleted user"): + assert f"dn: {user['dn']}" not in r.output, error() diff --git a/tests/testflows/ldap/tests/server_config.py b/tests/testflows/ldap/tests/server_config.py new file mode 100644 index 00000000000..f3d03434afe --- /dev/null +++ b/tests/testflows/ldap/tests/server_config.py @@ -0,0 +1,263 @@ +from testflows.core import * + +from ldap.tests.common import * +from ldap.requirements import * + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0"), + RQ_SRS_007_LDAP_Configuration_Server_Name("1.0") +) +def empty_server_name(self, timeout=20): + """Check that empty string as a server name is not allowed. + """ + servers = {"": {"host": "foo", "port": "389", "enable_tls": "no", + "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + }} + invalid_server_config(servers, timeout=timeout) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0"), + RQ_SRS_007_LDAP_UnreachableServer("1.0") +) +def invalid_host(self): + """Check that server returns an error when LDAP server + host name is invalid. + """ + servers = {"foo": {"host": "foo", "port": "389", "enable_tls": "no"}} + users = [{ + "server": "foo", "username": "user1", "password": "user1", "login": True, + "exitcode": 20, "message": "DB::Exception: Can't contact LDAP server" + }] + login(servers, *users) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0"), + RQ_SRS_007_LDAP_Configuration_Server_Host("1.0") +) +def empty_host(self): + """Check that server returns an error when LDAP server + host value is empty. + """ + servers = {"foo": {"host": "", "port": "389", "enable_tls": "no"}} + users = [{ + "server": "foo", "username": "user1", "password": "user1", "login": True, + "exitcode": 36, "message": "DB::Exception: LDAP server 'foo' is not configured." + }] + login(servers, *users) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0"), + RQ_SRS_007_LDAP_Configuration_Server_Host("1.0") +) +def missing_host(self): + """Check that server returns an error when LDAP server + host is missing. + """ + servers = {"foo": {"port": "389", "enable_tls": "no"}} + users = [{ + "server": "foo", "username": "user1", "password": "user1", "login": True, + "exitcode": 36, "message": "DB::Exception: LDAP server 'foo' is not configured." + }] + login(servers, *users) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0") +) +def invalid_port(self): + """Check that server returns an error when LDAP server + port is not valid. + """ + servers = {"openldap1": {"host": "openldap1", "port": "3890", "enable_tls": "no"}} + users = [{ + "server": "openldap1", "username": "user1", "password": "user1", "login": True, + "exitcode": 20, "message": "DB::Exception: Can't contact LDAP server." + }] + login(servers, *users) + + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0") +) +def invalid_auth_dn_prefix(self): + """Check that server returns an error when LDAP server + port is not valid. + """ + servers = {"openldap1": {"host": "openldap1", "port": "389", "enable_tls": "no", + "auth_dn_prefix": "foo=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + }} + users = [{ + "server": "openldap1", "username": "user1", "password": "user1", "login": True, + "exitcode": 20, "message": "DB::Exception: Invalid DN syntax: invalid DN" + }] + login(servers, *users) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0") +) +def invalid_auth_dn_suffix(self): + """Check that server returns an error when LDAP server + port is not valid. + """ + servers = {"openldap1": {"host": "openldap1", "port": "389", "enable_tls": "no", + "auth_dn_prefix": "cn=", "auth_dn_suffix": ",foo=users,dc=company,dc=com" + }} + users = [{ + "server": "openldap1", "username": "user1", "password": "user1", "login": True, + "exitcode": 20, "message": "DB::Exception: Invalid DN syntax: invalid DN" + }] + login(servers, *users) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0") +) +def invalid_enable_tls_value(self): + """Check that server returns an error when enable_tls + option has invalid value. + """ + servers = {"openldap1": {"host": "openldap1", "port": "389", "enable_tls": "foo", + "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + }} + users = [{ + "server": "openldap1", "username": "user1", "password": "user1", "login": True, + "exitcode": 36, "message": "DB::Exception: LDAP server 'openldap1' is not configured" + }] + login(servers, *users) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0") +) +def invalid_tls_require_cert_value(self): + """Check that server returns an error when tls_require_cert + option has invalid value. + """ + servers = {"openldap2": { + "host": "openldap2", "port": "636", "enable_tls": "yes", + "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "tls_require_cert": "foo", + "ca_cert_dir": "/container/service/slapd/assets/certs/", + "ca_cert_file": "/container/service/slapd/assets/certs/ca.crt" + }} + users = [{ + "server": "openldap2", "username": "user2", "password": "user2", "login": True, + "exitcode": 36, "message": "DB::Exception: LDAP server 'openldap2' is not configured" + }] + login(servers, *users) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0") +) +def empty_ca_cert_dir(self): + """Check that server returns an error when ca_cert_dir is empty. + """ + servers = {"openldap2": {"host": "openldap2", "port": "636", "enable_tls": "yes", + "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "tls_require_cert": "demand", + "ca_cert_dir": "", + "ca_cert_file": "/container/service/slapd/assets/certs/ca.crt" + }} + users = [{ + "server": "openldap2", "username": "user2", "password": "user2", "login": True, + "exitcode": 20, + "message": "DB::Exception: Can't contact LDAP server: error:14000086:SSL routines::certificate verify failed (self signed certificate in certificate chain" + }] + login(servers, *users) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0") +) +def empty_ca_cert_file(self): + """Check that server returns an error when ca_cert_file is empty. + """ + servers = {"openldap2": {"host": "openldap2", "port": "636", "enable_tls": "yes", + "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "tls_require_cert": "demand", + "ca_cert_dir": "/container/service/slapd/assets/certs/", + "ca_cert_file": "" + }} + users = [{ + "server": "openldap2", "username": "user2", "password": "user2", "login": True, + "exitcode": 20, + "message": "Received from localhost:9000. DB::Exception: Can't contact LDAP server: error:14000086:SSL routines::certificate verify failed (self signed certificate in certificate chain)" + }] + login(servers, *users) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Configuration_Server_AuthDN_Value("1.0"), + RQ_SRS_007_LDAP_Configuration_Server_AuthDN_Prefix("1.0"), + RQ_SRS_007_LDAP_Configuration_Server_AuthDN_Suffix("1.0") +) +def auth_dn_value(self): + """Check that server configuration can properly define the `dn` value of the user.""" + servers = { + "openldap1": { + "host": "openldap1", "port": "389", "enable_tls": "no", + "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + }} + user = {"server": "openldap1", "username": "user1", "password": "user1", "login": True} + + login(servers, user) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Configuration_Server_Syntax("1.0") +) +def syntax(self): + """Check that server configuration with valid syntax can be loaded. + ```xml + + + localhost + 636 + cn= + , ou=users, dc=example, dc=com + yes + tls1.2 + demand + /path/to/tls_cert_file + /path/to/tls_key_file + /path/to/tls_ca_cert_file + /path/to/tls_ca_cert_dir + ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384 + + + ``` + """ + servers = { + "openldap2": { + "host": "openldap2", + "port": "389", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "enable_tls": "yes", + "tls_minimum_protocol_version": "tls1.2" , + "tls_require_cert": "demand", + "tls_cert_file": "/container/service/slapd/assets/certs/ldap.crt", + "tls_key_file": "/container/service/slapd/assets/certs/ldap.key", + "tls_ca_cert_file": "/container/service/slapd/assets/certs/ca.crt", + "tls_ca_cert_dir": "/container/service/slapd/assets/certs/", + "tls_cipher_suite": "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384" + } + } + with ldap_servers(servers): + pass + +@TestFeature +@Name("server config") +def feature(self, node="clickhouse1"): + """Check that LDAP server configuration. + """ + self.context.node = self.context.cluster.node(node) + for scenario in loads(current_module(), Scenario): + scenario() diff --git a/tests/testflows/ldap/tests/user_config.py b/tests/testflows/ldap/tests/user_config.py new file mode 100644 index 00000000000..edc85a5877e --- /dev/null +++ b/tests/testflows/ldap/tests/user_config.py @@ -0,0 +1,162 @@ +import xml.etree.ElementTree as xmltree + +from testflows.core import * + +from ldap.tests.common import * +from ldap.requirements import * + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_User_Configuration_Invalid("1.0"), + RQ_SRS_007_LDAP_Configuration_User_Name_Empty("1.0") +) +def empty_user_name(self, timeout=20): + """Check that empty string as a user name is not allowed. + """ + servers = {"openldap1": { + "host": "openldap1", "port": "389", "enable_tls": "no", + "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + }} + users = [{"server": "openldap1", "username": "", "password": "user1", "login": True}] + config = create_ldap_users_config_content(*users) + invalid_user_config(servers, config, timeout=timeout) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_User_Configuration_Invalid("1.0"), + RQ_SRS_007_LDAP_Configuration_User_LDAP_InvalidServerName_Empty("1.0") +) +def empty_server_name(self, timeout=20): + """Check that if server name is an empty string then login is not allowed. + """ + servers = {"openldap1": { + "host": "openldap1", "port": "389", "enable_tls": "no", + "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + }} + users = [{"server": "", "username": "user1", "password": "user1", "login": True, + "errorcode": 4, + "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name" + }] + login(servers, *users) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_User_Configuration_Invalid("1.0"), + RQ_SRS_007_LDAP_Configuration_User_LDAP_InvalidServerName_NotDefined("1.0") +) +def empty_server_not_defined(self, timeout=20): + """Check that if server is not defined then login is not allowed. + """ + servers = {"openldap1": { + "host": "openldap1", "port": "389", "enable_tls": "no", + "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + }} + users = [{"server": "foo", "username": "user1", "password": "user1", "login": True, + "errorcode": 36, + "message": "DB::Exception: LDAP server 'foo' is not configured" + }] + login(servers, *users) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Configuration_User_Syntax("1.0") +) +def valid_user_config(self): + """Check syntax of valid user configuration of LDAP authenticated user.""" + servers = {"openldap1": { + "host": "openldap1", "port": "389", "enable_tls": "no", + "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + }} + users = [{"server": "openldap1", "username": "user1", "password": "user1", "login": True}] + login(servers, *users) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Configuration_User_OnlyOneServer("1.0") +) +def multiple_servers(self, timeout=20): + """Check that user configuration allows to specify only one LDAP server for a given user + and if multiple servers are specified then the first one is used.""" + servers = { + "openldap1": { + "host": "openldap1", "port": "389", "enable_tls": "no", + "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + }, + "openldap2": { + "host": "openldap2", "enable_tls": "yes", "tls_require_cert": "never", + "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + }, + } + user = {"server": "openldap1", "username": "user1", "password": "user1", "login": True} + + with When("I first create regular user configuration file"): + config = create_ldap_users_config_content(user) + + with And("I modify it to add another server"): + root = xmltree.fromstring(config.content) + xml_users = root.find("users") + xml_users.append(xmltree.Comment(text=f"LDAP users {config.uid}")) + xml_user_ldap = xml_users.find(user["username"]).find("ldap") + xml_append(xml_user_ldap, "server", "openldap2") + xml_indent(root) + content = xml_with_utf8 + str(xmltree.tostring(root, short_empty_elements=False, encoding="utf-8"), "utf-8") + + new_config = Config(content, config.path, config.name, config.uid, config.preprocessed_name) + + with Then("I login and expect it to work as the first server shall be used"): + login(servers, user, config=new_config) + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Configuration_User_BothPasswordAndLDAP("1.0") +) +def ldap_and_password(self): + """Check that user can't be authenticated if both `ldap` and `password` + is specified for the same user. We expect an error message to be present in the log + and login attempt to fail. + """ + node = self.context.node + servers = { + "openldap1": { + "host": "openldap1", "port": "389", "enable_tls": "no", + "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + }, + } + user = { + "server": "openldap1", "username": "user1", "password": "user1", "login": True, + "errorcode": 4, + "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name" + } + + with When("I first create regular user configuration file"): + config = create_ldap_users_config_content(user) + + with And("I modify it to add explicit password"): + root = xmltree.fromstring(config.content) + xml_users = root.find("users") + xml_users.append(xmltree.Comment(text=f"LDAP users {config.uid}")) + xml_user = xml_users.find(user["username"]) + xml_append(xml_user, "password", "hellothere") + xml_indent(root) + content = xml_with_utf8 + str(xmltree.tostring(root, short_empty_elements=False, encoding="utf-8"), "utf-8") + + new_config = Config(content, config.path, config.name, config.uid, config.preprocessed_name) + + error_message = "DB::Exception: More than one field of 'password'" + + with Then("I expect an error when I try to load the configuration file", description=error_message): + invalid_user_config(servers, new_config, message=error_message, tail=16) + + with And("I expect the authentication to fail when I try to login"): + login(servers, user, config=new_config) + +@TestFeature +@Name("user config") +def feature(self, node="clickhouse1"): + """Check that server returns an error and prohibits + user login if LDAP users configuration is not valid. + """ + self.context.node = self.context.cluster.node(node) + + for scenario in loads(current_module(), Scenario): + scenario() diff --git a/tests/testflows/regression.py b/tests/testflows/regression.py index 0f74b6e82cd..850c0be2433 100755 --- a/tests/testflows/regression.py +++ b/tests/testflows/regression.py @@ -12,8 +12,10 @@ from helpers.argparser import argparser def regression(self, local, clickhouse_binary_path): """ClickHouse regression. """ - Feature(test=load("example.regression", "regression"))( - local=local, clickhouse_binary_path=clickhouse_binary_path) + args = {"local": local, "clickhouse_binary_path": clickhouse_binary_path} + + Feature(test=load("example.regression", "regression"))(**args) + Feature(test=load("ldap.regression", "regression"))(**args) if main(): regression() diff --git a/utils/ci/jobs/quick-build/run.sh b/utils/ci/jobs/quick-build/run.sh index 10da06f7414..3d755625c8d 100755 --- a/utils/ci/jobs/quick-build/run.sh +++ b/utils/ci/jobs/quick-build/run.sh @@ -21,7 +21,7 @@ BUILD_TARGETS=clickhouse BUILD_TYPE=Debug ENABLE_EMBEDDED_COMPILER=0 -CMAKE_FLAGS="-D CMAKE_C_FLAGS_ADD=-g0 -D CMAKE_CXX_FLAGS_ADD=-g0 -D ENABLE_JEMALLOC=0 -D ENABLE_CAPNP=0 -D ENABLE_RDKAFKA=0 -D ENABLE_UNWIND=0 -D ENABLE_ICU=0 -D ENABLE_POCO_MONGODB=0 -D ENABLE_POCO_REDIS=0 -D ENABLE_POCO_NETSSL=0 -D ENABLE_ODBC=0 -D ENABLE_MYSQL=0 -D ENABLE_SSL=0 -D ENABLE_POCO_NETSSL=0 -D ENABLE_CASSANDRA=0" +CMAKE_FLAGS="-D CMAKE_C_FLAGS_ADD=-g0 -D CMAKE_CXX_FLAGS_ADD=-g0 -D ENABLE_JEMALLOC=0 -D ENABLE_CAPNP=0 -D ENABLE_RDKAFKA=0 -D ENABLE_UNWIND=0 -D ENABLE_ICU=0 -D ENABLE_POCO_MONGODB=0 -D ENABLE_POCO_REDIS=0 -D ENABLE_POCO_NETSSL=0 -D ENABLE_ODBC=0 -D ENABLE_MYSQL=0 -D ENABLE_SSL=0 -D ENABLE_POCO_NETSSL=0 -D ENABLE_CASSANDRA=0 -D ENABLE_LDAP=0" [[ $(uname) == "FreeBSD" ]] && COMPILER_PACKAGE_VERSION=devel && export COMPILER_PATH=/usr/local/bin diff --git a/website/benchmark/hardware/index.html b/website/benchmark/hardware/index.html index 88ddb2d0868..8db3a28ba3d 100644 --- a/website/benchmark/hardware/index.html +++ b/website/benchmark/hardware/index.html @@ -65,7 +65,9 @@ Results for AMD EPYC 7702 are from Peng Gao in sina.com.
Results for Intel NUC are from Alexander Zaitsev, Altinity.
Xeon Gold 6230 server is using 4 x SAMSUNG datacenter class SSD in RAID-10.
Results for Yandex Managed ClickHouse for "cold cache" are biased and should not be compared, because cache was not flushed for every next query.
-Results for AWS Lightsail is from Vamsi Krishna B. +Results for AWS Lightsail is from Vamsi Krishna B.
+Results for Dell XPS laptop and Google Pixel phone is from Alexander Kuzmenkov.
+Results for Android phones for "cold cache" are done without cache flushing, so they are not "cold" and cannot be compared.

diff --git a/website/benchmark/hardware/results/047_dell_xps.json b/website/benchmark/hardware/results/047_dell_xps.json new file mode 100644 index 00000000000..828f743cbc9 --- /dev/null +++ b/website/benchmark/hardware/results/047_dell_xps.json @@ -0,0 +1,56 @@ +[ + { + "system": "Dell XPS 15", + "system_full": "Dell XPS 15, 6 cores, 32 GB RAM", + "cpu_vendor": "Intel", + "cpu_model": "i7-8750H CPU @ 2.20GHz", + "time": "2020-07-15 00:00:00", + "kind": "laptop", + "result": + [ + [0.026, 0.029, 0.001], + [0.087, 0.055, 0.020], + [0.106, 0.065, 0.063], + [0.163, 0.110, 0.107], + [0.312, 0.268, 0.262], + [0.481, 0.449, 0.436], + [0.037, 0.030, 0.024], + [0.024, 0.028, 0.019], + [0.691, 0.622, 0.629], + [0.865, 0.744, 0.742], + [0.364, 0.361, 0.330], + [0.375, 0.341, 0.334], + [1.119, 1.045, 1.055], + [1.445, 1.350, 1.329], + [1.129, 1.040, 1.081], + [1.272, 1.187, 1.239], + [2.854, 2.655, 2.659], + [2.073, 1.673, 1.635], + [5.835, 5.486, 5.675], + [0.220, 0.113, 0.119], + [1.901, 1.753, 1.752], + [2.206, 2.023, 1.996], + [4.556, 4.155, 4.143], + [3.025, 2.424, 2.411], + [0.550, 0.436, 0.427], + [0.418, 0.371, 0.353], + [0.556, 0.438, 0.426], + [2.275, 2.122, 2.094], + [2.594, 2.548, 2.494], + [3.739, 3.788, 3.783], + [1.086, 0.956, 0.962], + [1.908, 1.564, 1.542], + [8.627, 8.810, 8.552], + [6.310, 5.995, 6.061], + [6.197, 6.148, 6.098], + [1.995, 1.916, 1.924], + [0.025, 0.012, 0.006], + [0.005, 0.013, 0.006], + [0.006, 0.004, 0.004], + [0.008, 0.011, 0.005], + [0.009, 0.007, 0.006], + [0.005, 0.007, 0.004], + [0.005, 0.007, 0.011] + ] + } +] diff --git a/website/benchmark/hardware/results/048_pixel_3a.json b/website/benchmark/hardware/results/048_pixel_3a.json new file mode 100644 index 00000000000..d219f41f785 --- /dev/null +++ b/website/benchmark/hardware/results/048_pixel_3a.json @@ -0,0 +1,57 @@ +[ + { + "system": "Google Pixel 3a", + "system_full": "Google Pixel 3a, 8 cores, 4 GB RAM", + "cpu_vendor": "Qualcomm", + "cpu_model": "Snapdragon 670", + "time": "2020-07-15 00:00:00", + "kind": "phone", + "result": + [ + [0.027, 0.032, 0.023], + [0.214, 0.243, 0.272], + [0.498, 0.369, 0.427], + [0.796, 0.549, 0.546], + [1.032, 1.056, 1.073], + [1.961, 1.913, 1.997], + [0.424, 0.403, 0.405], + [0.275, 0.464, 0.245], + [2.622, 2.620, 2.515], + [3.044, 3.038, 3.014], + [1.788, 1.874, 1.917], + [1.639, 1.664, 1.652], + [null, null, null], + [null, null, null], + [5.833, 5.118, 5.167], + [5.307, 5.238, 5.350], + [null, null, null], + [null, null, null], + [null, null, null], + [0.818, 0.509, 0.609], + [6.339, 6.158, 6.137], + [7.109, 6.939, 6.970], + [null, null, null], + [null, null, null], + [2.112, 1.781, 1.724], + [1.493, 1.447, 1.470], + [1.874, 1.792, 1.777], + [6.916, 6.621, 6.670], + [9.732, 9.943, 9.433], + [null, null, null], + [4.333, 3.707, 3.740], + [6.902, 7.630, 7.492], + [null, null, null], + [null, null, null], + [null, null, null], + [null, null, null], + [0.077, 0.039, 0.034], + [0.047, 0.038, 0.041], + [0.040, 0.037, 0.037], + [0.069, 0.047, 0.043], + [0.055, 0.040, 0.046], + [0.053, 0.038, 0.047], + [0.048, 0.031, 0.034] + ] + } +] + diff --git a/website/blog/en/2020/pixel-benchmark.md b/website/blog/en/2020/pixel-benchmark.md new file mode 100644 index 00000000000..b9be7638c38 --- /dev/null +++ b/website/blog/en/2020/pixel-benchmark.md @@ -0,0 +1,84 @@ +--- +title: 'Running ClickHouse on an Android phone' +image: 'https://blog-images.clickhouse.tech/en/2020/pixel-benchmark/main.jpg' +date: '2020-07-16' +author: '[Alexander Kuzmenkov](https://github.com/akuzm)' +tags: ['Android', 'benchmark', 'experiment'] +--- + + +This is a brief description of my experiments with building ClickHouse on Android. If this is your first time hearing about ClickHouse, it is a suriprisingly fast columnar SQL DBMS for real-time reporting. It's normally used in AdTech and the like, deployed on clusters of hundreds of machines, holding up to petabytes of data. But ClickHouse is straightforward to use on a smaller scale as well — you laptop will do, and don't be surprised if you are able to process several gigabytes of data per second on this hardware. There is another kind of small-scale, though pretty powerful, platforms, that is ubiquitous now — smartphones. The conclusion inevitably follows: you must be able to run ClickHouse on your smartphone as well. It's also that I can't help but chuckle at the idea of setting up a high performance mobile OLAP cluster using a dozen of phones. Or also at the idea of seeing the nostalgic `Segmentation fault (core dumped)` on the lovely OLED screen, but I digress. Let's get it going. + +## First cheap attempt + +I heard somewhere that Android uses the Linux kernel, and I can already run familiar UNIX-like shell and tools using [Termux](https://termux.com/). And ClickHouse already supports ARM platform and even publishes a binary built for 64-bit ARM. This binary also doesn't have a lot of dependencies — only a pretty old version of `glibc`. Maybe I can just download a ClickHouse binary from CI to the phone and run it? + +Turns out it's not that simple. + +* The first thing we'll see after trying to run is an absurd error message: `./clickhouse: file is not found`. But it's right there! `strace` helps: what cannot be found is `/lib64/ld-linux-x86-64.so.2`, a linker specified in the ClickHouse binary. The linker, in this case, is a system program that initially loads the application binary and its dependencies before passing control to the application. Android uses a different linker located by another path, this is why we get the error. This problem can be overcome if we call the linker explicitly, e.g. `/system/bin/linker64 $(readlink -f ./clickhouse)`. + +* Immediately we encounter another problem: the linker complains that the binary has a wrong type `ET_EXEC`. What does this mean? Android binaries must support dynamic relocation, so that they can be loaded at any address, probably for ASLR purposes. ClickHouse binaries do not normally use position-independent code, because we have measured that it gives a small performance penalty of about 1%. After tweaking compilation and linking flags to include `-fPIC` as much as possible, and battling some really weird linker errors, we finally arrive at a relocatable binary that has a correct type `ET_DYN`. + +* But it only gets worse. Now it complains about TLS section offset being wrong. After reading some mail archives where I could barely understand a word, I concluded that Android uses some different layout of memory for the section of the executable that holds thread-local variables, and `clang` from Android toolchain is patched to account for this. After that, I had to accept I won't be able to use familiar tools, and reluctantly turned to the Android toolchain. + +## Using the Android toolchain + +Surprisingly, it's rather simple to set up. Our build system uses CMake and already supports cross-compilation — we have CI configurations that cross-compile for Mac, AArch64 Linux and FreeBSD. Android NDK also has integration with CMake and a [manual](https://developer.android.com/ndk/guides/cmake) on how to set it up. Download the Android NDK, add some flags to your `cmake` invocation: `DCMAKE_TOOLCHAIN_FILE=~/android-ndk-r21d/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=28`, and you're done. It (almost) builds. What obstacles do we have this time? + +* Our `glibc` compatibility layer has a lot of compilation errors. It borrows `musl` code to provide functions that are absent from older versions of `glibc`, so that we can run the same binary on a wide range of distros. Being heavily dependent on system headers, it runs into all kinds of differences between Linux and Android, such as the limited scope of `pthread` support or just subtly different API variants. Thankfully we're building for a particular version of Android, so we can just disable this and use all needed functions straight from the system `libc`. +* Some third-party libraries and our CMake files are broken in various unimaginative ways. Just disable everything we can and fix everything we can't. +* Some of our code uses `#if defined(__linux__)` to check for Linux platform. This doesn't always work, because Android also exports `__linux__` but there are some API differences. +* `std::filesystem` is still not fully supported in NDK r21. The support went into r22 that is scheduled for Q3 2020, but I want it right now... Good that we bundle our own forks of `libcxx` and `libcxxabi` to reduce dependencies, and they are fresh enough to fully support C++20. After enabling them, everything works. +* Weird twenty-screens errors in `std::map` or something like that, that are also resolved by using our `libcxx`. + +## On the device + +At last, we have a binary we can actually run. Copy it to the phone, `chmod +x`, `./clickhouse server --config-path db/config.xml`, run some queries, it works! + + + +Feels so good to see my favorite message. + +It's a full-fledged development environment here in Termux, let's install `gdb` and attach it to see where the segfault happens. Run `gdb clickhouse --ex run '--config-path ....'`, wait for it to lauch for a minute, only to see how Android kills Termux becase it is out of memory. Are 4 GB of RAM not enough, after all? Looking at the `clickhouse` binary, its size is a whoppping 1.1 GB. The major part of the bloat is due to the fact that some of our computational code is heavily specialized for particular data types (mostly via C++ templates), and also the fact that we build and link a lot of third-party libraries statically. A non-essential part of the binary is debug symbols, which help to produce good stack traces in error messages. We can remove them with `strip -s ./clickhouse` right here on the phone, and after that, the size becomes more manageable, about 400 MB. Finally we can run `gdb` and see that the segfault is somewhere in `unw_backtrace`: + +``` +Thread 60 "ConfigReloader" received signal SIGSEGV, Segmentation fault. +[Switching to LWP 21873] +0x000000556a73f740 in ?? () + +(gdb) whe 20 +#0 0x000000556a73f740 in ?? () +#1 0x000000556a744028 in ?? () +#2 0x000000556a73e5a0 in ?? () +#3 0x000000556a73d250 in unw_init_local () +#4 0x000000556a73deb8 in unw_backtrace () +#5 0x0000005562aabb54 in StackTrace::tryCapture() () +#6 0x0000005562aabb10 in StackTrace::StackTrace() () +#7 0x0000005562a8d73c in MemoryTracker::alloc(long) () +#8 0x0000005562a8db38 in MemoryTracker::alloc(long) () +#9 0x0000005562a8e8bc in CurrentMemoryTracker::alloc(long) () +#10 0x0000005562a8b88c in operator new[](unsigned long) () +#11 0x0000005569c35f08 in Poco::XML::NamePool::NamePool(unsigned long) () +... +``` + +What is this function, and why do we need it? In this particular stack trace, we're out of memory, and about to throw an exception for that. `unw_backtrace` is called to produce a backtrace for the exception message. But there is another interesting context where we call it. Believe it or not, ClickHouse has a built-in `perf`-like sampling profiler that can save stack traces for CPU time and real time, and also memory allocations. The data is saved into a `system.trace_log` table, so you can build flame graphs for what your query was doing as simple as piping output of an SQL query into `flamegraph.pl`. This is an interesting feature, but what is relevant now is that it sends signals to all threads of the server to interrupt them at some random time and save their current backtraces, using the same `unw_backtrace` function that we know to segfault. We expect query profiler to be used in production environment, so it is enabled by default. After disabling it, we have a functioning ClickHouse server running on Android. + +## Is your phone good enough? + +There is a beaten genre of using data sets and queries of a varying degree of syntheticity to prove that a particular DBMS you work on has performance superior to other, less advanced, DBMSes. We've moved past that, and instead use the DBMS we love as a benchmark of hardware. For this benchmark we use a small 100M rows obfuscated data set from Yandex.Metrica, about 12 GB compressed, and some queries representative of Metrica dashboards. There is [this page](https://clickhouse.tech/benchmark/hardware/) with crowdsourced results for various cloud and traditional servers and even some laptops, but how do the phones compare? Let's find out. Following [the manual](https://clickhouse.tech/docs/en/operations/performance-test/) to download the necessary data to the phone and run the benchmark was pretty straightforward. One problem was that some queries can't run because they use too much memory and the server gets killed by Android, so I had to script around that. Also, I'm not sure how to reset a file system cache on Android, so the 'cold run' data is not correct. The results look pretty good: + + + +My phone is Google Pixel 3a, and it is only 5 times slower on average than my Dell XPS 15 work laptop. The queries where the data doesn't fit into memory and has to go to disk (the flash, I mean) are noticeably slower, up to 20 times, but mostly they don't complete because the server gets killed — it only has about 3 GB of memory available. Overall I think the results look pretty good for the phone. High-end models should be even more performant, reaching performance comparable to some smaller laptops. + +## Conclusion + +This was a rather enjoyable exercise. Running a server on your phone is a nice way to give a demo, so we should probably publish a Termux package for ClickHouse. For this, we have to debug and fix the `unw_backtrace` segfault (I have my fingers crossed that it will be gone after adding `-fno-omit-frame-pointer`), and also fix some quirks that are just commented out for now. Most of the changes required for the Android build are already merged into our master branch. + +Building for Android turned out to be relatively simple — all these experiments and writing took me about four days, and it was the first time I ever did any Android-related programming. The NDK was simple to use, and our code was cross-platform enough so I only had to make minor modifications. If we didn't routinely build for AArch64 and had a hard dependency on SSE 4.2 or something, it would have been a different story. + +But the most important takeout is that now you don't have to obsess over choosing a new phone — just benchmark it with ClickHouse. + + +_2020-07-16 [Alexander Kuzmenkov](https://github.com/akuzm)_ diff --git a/website/blog/en/2020/pixel-bencmhark.md.orig b/website/blog/en/2020/pixel-bencmhark.md.orig new file mode 100644 index 00000000000..c5ea6143c0c --- /dev/null +++ b/website/blog/en/2020/pixel-bencmhark.md.orig @@ -0,0 +1,85 @@ +--- +title: 'Running ClickHouse on an Android phone' +image: 'pixel-benchmark/main.jpg' +date: '2020-07-16' +author: '[Alexander Kuzmenkov](https://github.com/akuzm)' +tags: ['Android', 'benchmark', 'experiment'] +--- + +# Running ClickHouse on an Android phone + +This is a brief description of my experiments with building ClickHouse on Android. If this is your first time hearing about ClickHouse, it is a suriprisingly fast columnar SQL DBMS for real-time reporting. It's normally used in AdTech and the like, deployed on clusters of hundreds of machines, holding up to petabytes of data. But ClickHouse is straightforward to use on a smaller scale as well -- you laptop will do, and don't be surprised if you are able to process several gigabytes of data per second on this hardware. There is another kind of small-scale, though pretty powerful, platforms, that is ubiquitous now -- smartphones. The conclusion inevitably follows: you must be able to run ClickHouse on your smartphone as well. It's also that I can't help but chuckle at the idea of setting up a high performance mobile OLAP cluster using a dozen of phones. Or also at the idea of seeing the nostalgic `Segmentation fault (core dumped)` on the lovely OLED screen, but I digress. Let's get it going. + +## First cheap attempt + +I heard somewhere that Android uses the Linux kernel, and I can already run familiar UNIX-like shell and tools using [Termux](https://termux.com/). And ClickHouse already supports ARM platform and even publishes a binary built for 64-bit ARM. This binary also doesn't have a lot of dependencies -- only a pretty old version of `glibc`. Maybe I can just download a ClickHouse binary from CI to the phone and run it? + +Turns out it's not that simple. + +* The first thing we'll see after trying to run is a cryptic message: `./clickhouse: file is not found`. But it's right there! `strace` helps: what cannot be found is `/lib64/ld-linux-x86-64.so.2`, a linker specified in the ClickHouse binary. The linker, in this case, is a system program that initially loads the application binary and its dependencies before passing control to the application. Android uses a different linker located by another path, this is why we get the error. This problem can be overcome if we call the linker explicitly, e.g. `/system/bin/linker64 $(readlink -f ./clickhouse)`. + +* Immediately we encounter another problem: the linker complains that the binary has a wrong type `ET_EXEC`. What does this mean? Android binaries must support dynamic relocation, so that they can be loaded at any address, probably for ASLR purposes. ClickHouse binaries do not normally use position-independent code, because we have measured that it gives a small performance penalty of about 1%. After tweaking compilation and linking flags to include `-fPIC` as much as possible, and battling some really weird linker errors, we finally arrive at a relocatable binary that has a correct type `ET_DYN`. + +* But it only gets worse. Now it complains about TLS section offset being wrong. After reading some mail archives where I could barely understand a word, I concluded that Android uses some different layout of memory for the section of the executable that holds thread-local variables, and `clang` from Android toolchain is patched to account for this. After that, I had to accept I won't be able to use familiar tools, and reluctantly turned to the Android toolchain. + +## Using the Android toolchain + +Surprisingly, it's rather simple to set up. Our build system uses CMake and already supports cross-compilation -- we have CI configurations that cross-compile for Mac, AArch64 Linux and FreeBSD. Android NDK also has integration with CMake and a [manual](https://developer.android.com/ndk/guides/cmake) on how to set it up. Download the Android NDK, add some flags to your `cmake` invocation: `DCMAKE_TOOLCHAIN_FILE=~/android-ndk-r21d/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=28`, and you're done. It (almost) builds. What obstacles do we have this time? + +* Our `glibc` compatibility layer has a lot of compilation errors. It borrows `musl` code to provide functions that are absent from older versions of `glibc`, so that we can run the same binary on a wide range of distros. Being heavily dependent on system headers, it runs into all kinds of differences between Linux and Android, such as the limited scope of `pthread` support or just subtly different API variants. Thankfully we're building for a particular version of Android, so we can just disable this and use all needed functions straight from the system `libc`. +* Some third-party libraries and our CMake files broken in various unimaginative ways. Just disable everything we can and fix everything we can't. +* Some of our code uses `#if defined(__linux__)` to check for Linux platform. This doesn't always work, because Android also exports `__linux__` but there are some API differences. +* `std::filesystem` is still not fully supported in NDK r21. The support went into r22 that is scheduled for Q3 2020, but I want it right now... Good that we bundle our own forks of `libcxx` and `libcxxabi` to reduce dependencies, and they are fresh enough to fully support C++20. After enabling them, everything works. +* Weird twenty-screens errors in `std::map` or something like that, that are also resolved by using our `libcxx`. + +## On the device + +At last, we have a binary we can actually run. Copy it to the phone, `chmod +x`, `./clickhouse server --config-path db/config.xml`, run some queries, it works! + + + +Feels so good to see my favorite message. + +It's a full-fledged development environment here in Termux, let's install `gdb` and attach it to see where the segfault happens. Run `gdb clickhouse --ex run '--config-path ....'`, wait for it to lauch for a minute, only to see how Android kills Termux becase it is out of memory. Are 4 GB of RAM not enough, after all? Looking at the `clickhouse` binary, its size is a whoppping 1.1 GB. The major part of the bloat is due to the fact that some of our computational code is heavily specialized for particular data types (mostly via C++ templates), and also the fact that we build and link a lot of third-party libraries statically. A non-essential part of the binary is debug symbols, which help to produce good stack traces in error messages. We can remove them with `strip -s ./clickhouse` right here on the phone, and after that, the size becomes more manageable, about 400 MB. Finally we can run `gdb` and see that the segfault is somewhere in `unw_backtrace`: + +``` +Thread 60 "ConfigReloader" received signal SIGSEGV, Segmentation fault. +[Switching to LWP 21873] +0x000000556a73f740 in ?? () + +(gdb) whe 20 +#0 0x000000556a73f740 in ?? () +#1 0x000000556a744028 in ?? () +#2 0x000000556a73e5a0 in ?? () +#3 0x000000556a73d250 in unw_init_local () +#4 0x000000556a73deb8 in unw_backtrace () +#5 0x0000005562aabb54 in StackTrace::tryCapture() () +#6 0x0000005562aabb10 in StackTrace::StackTrace() () +#7 0x0000005562a8d73c in MemoryTracker::alloc(long) () +#8 0x0000005562a8db38 in MemoryTracker::alloc(long) () +#9 0x0000005562a8e8bc in CurrentMemoryTracker::alloc(long) () +#10 0x0000005562a8b88c in operator new[](unsigned long) () +#11 0x0000005569c35f08 in Poco::XML::NamePool::NamePool(unsigned long) () +... +``` + +What is this function, and why do we need it? In this particular stack trace, we're out of memory, and about to throw an exception for that. `unw_backtrace` is called to produce a backtrace for the exception message. But there is another interesting context where we call it. Believe it or not, ClickHouse has a built-in `perf`-like sampling profiler that can save stack traces for CPU time and real time, and also memory allocations. The data is saved into a `system.trace_log` table, so you can build flame graphs for what your query was doing as simple as piping output of an SQL query into `flamegraph.pl`. This is an interesting feature, but what is relevant now is that it sends signals to all threads of the server to interrupt them at some random time and save their current backtraces, using the same `unw_backtrace` function that we know to segfault. We expect query profiler to be used in production environment, so it is enabled by default. After disabling it, we have a functioning ClickHouse server running on Android. + +## Is your phone good enough? + +There is a beaten genre of using data sets and queries of a varying degree of syntheticity to prove that a particular DBMS you work on has performance superior to other, less advanced, DBMSes. We've moved past that, and instead use the DBMS we love as a benchmark of hardware. For this benchmark we use a small 100M rows obfuscated data set from Yandex.Metrica, about 12 GB compressed, and some queries representative of Metrica dashboards. There is [this page](https://clickhouse.tech/benchmark/hardware/) with crowdsourced results for various cloud and traditional servers and even some laptops, but how do the phones compare? Let's find out. Following [the manual](https://clickhouse.tech/docs/en/operations/performance-test/) to download the necessary data to the phone and run the benchmark was pretty straightforward. One problem was that some queries can't run because they use too much memory and the server gets killed by Android, so I had to script around that. Also, I'm not sure how to reset a file system cache on Android, so the 'cold run' data is not correct. The results look pretty good: + + + +My phone is Google Pixel 3a, and it is only 5 times slower on average than my Dell XPS 15 work laptop. The queries where the data doesn't fit into memory and has to go to disk (the flash, I mean) are noticeably slower, up to 20 times, but mostly they don't complete because the server gets killed -- it only has about 3 GB of memory available. Overall I think the results look pretty good for the phone. High-end models should be even more performant, reaching performance comparable to some smaller laptops. + +## Conclusion + +This was a rather enjoyable exercise. Running a server on your phone is a nice way to give a demo, so we should probably publish a Termux package for ClickHouse. For this, we have to debug and fix the `unw_backtrace` segfault (I have my fingers crossed that it will be gone after adding `-fno-omit-frame-pointer`), and also fix some quirks that are just commented out for now. Most of the changes required for the Android build are already merged into our master branch. + +Building for Android turned out to be relatively simple -- all these experiments and writing took me about four days, and it was the first time I ever did any Android-related programming. The NDK was simple to use, and our code was cross-platform enough so I only had to make minor modifications. If we didn't routinely build for AArch64 and had a hard dependency on SSE 4.2 or something, it would have been a different story. + +But the most important takeout is that now you don't have to obsess over choosing a new phone -- just benchmark it with ClickHouse. + + +_2020-07-16 [Alexander Kuzmenkov](https://github.com/akuzm)_ diff --git a/website/css/docs.css b/website/css/docs.css index 14c72f1894b..ecd03a02422 100644 --- a/website/css/docs.css +++ b/website/css/docs.css @@ -1,5 +1,6 @@ details { background: #444451; + color: #eee; padding: 1rem; margin-bottom: 1rem; margin-top: 1rem; @@ -7,7 +8,7 @@ details { summary { font-weight: bold; - color: #fff; + color: #eee; } #sidebar {