Merge branch 'master' into add-test-for-lost-replicas-cleanup

This commit is contained in:
Alexey Milovidov 2020-07-17 08:42:35 +03:00
commit 601c45f4a5
182 changed files with 6519 additions and 372 deletions

View File

@ -7,7 +7,7 @@
#
# Sets values of:
# OPENLDAP_FOUND - TRUE if found
# OPENLDAP_INCLUDE_DIR - path to the include directory
# OPENLDAP_INCLUDE_DIRS - paths to the include directories
# OPENLDAP_LIBRARIES - paths to the libldap and liblber libraries
# OPENLDAP_LDAP_LIBRARY - paths to the libldap library
# OPENLDAP_LBER_LIBRARY - paths to the liblber library
@ -28,11 +28,11 @@ if(OPENLDAP_USE_REENTRANT_LIBS)
endif()
if(OPENLDAP_ROOT_DIR)
find_path(OPENLDAP_INCLUDE_DIR NAMES "ldap.h" "lber.h" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "include" NO_DEFAULT_PATH)
find_path(OPENLDAP_INCLUDE_DIRS NAMES "ldap.h" "lber.h" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "include" NO_DEFAULT_PATH)
find_library(OPENLDAP_LDAP_LIBRARY NAMES "ldap${_r_suffix}" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "lib" NO_DEFAULT_PATH)
find_library(OPENLDAP_LBER_LIBRARY NAMES "lber" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "lib" NO_DEFAULT_PATH)
else()
find_path(OPENLDAP_INCLUDE_DIR NAMES "ldap.h" "lber.h")
find_path(OPENLDAP_INCLUDE_DIRS NAMES "ldap.h" "lber.h")
find_library(OPENLDAP_LDAP_LIBRARY NAMES "ldap${_r_suffix}")
find_library(OPENLDAP_LBER_LIBRARY NAMES "lber")
endif()
@ -44,10 +44,10 @@ set(OPENLDAP_LIBRARIES ${OPENLDAP_LDAP_LIBRARY} ${OPENLDAP_LBER_LIBRARY})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(
OpenLDAP DEFAULT_MSG
OPENLDAP_INCLUDE_DIR OPENLDAP_LDAP_LIBRARY OPENLDAP_LBER_LIBRARY
OPENLDAP_INCLUDE_DIRS OPENLDAP_LDAP_LIBRARY OPENLDAP_LBER_LIBRARY
)
mark_as_advanced(OPENLDAP_INCLUDE_DIR OPENLDAP_LIBRARIES OPENLDAP_LDAP_LIBRARY OPENLDAP_LBER_LIBRARY)
mark_as_advanced(OPENLDAP_INCLUDE_DIRS OPENLDAP_LIBRARIES OPENLDAP_LDAP_LIBRARY OPENLDAP_LBER_LIBRARY)
if(OPENLDAP_USE_STATIC_LIBS)
set(CMAKE_FIND_LIBRARY_SUFFIXES ${_orig_CMAKE_FIND_LIBRARY_SUFFIXES})

View File

@ -1,4 +1,5 @@
option(ENABLE_AMQPCPP "Enalbe AMQP-CPP" ${ENABLE_LIBRARIES})
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/AMQP-CPP/CMakeLists.txt")
message (WARNING "submodule contrib/AMQP-CPP is missing. to fix try run: \n git submodule update --init --recursive")
set (ENABLE_AMQPCPP 0)

View File

@ -1,3 +1,7 @@
option (ENABLE_GTEST_LIBRARY "Enable gtest library" ${ENABLE_LIBRARIES})
if (ENABLE_GTEST_LIBRARY)
option (USE_INTERNAL_GTEST_LIBRARY "Set to FALSE to use system Google Test instead of bundled" ${NOT_UNBUNDLED})
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/googletest/googletest/CMakeLists.txt")
@ -28,4 +32,6 @@ if((GTEST_INCLUDE_DIRS AND GTEST_BOTH_LIBRARIES) OR GTEST_SRC_DIR)
set(USE_GTEST 1)
endif()
endif()
message (STATUS "Using gtest=${USE_GTEST}: ${GTEST_INCLUDE_DIRS} : ${GTEST_BOTH_LIBRARIES} : ${GTEST_SRC_DIR}")

View File

@ -16,11 +16,16 @@ if (ENABLE_LDAP)
set (OPENLDAP_USE_REENTRANT_LIBS 1)
if (NOT USE_INTERNAL_LDAP_LIBRARY)
if (APPLE AND NOT OPENLDAP_ROOT_DIR)
set (OPENLDAP_ROOT_DIR "/usr/local/opt/openldap")
endif ()
if (OPENLDAP_USE_STATIC_LIBS)
message (WARNING "Unable to use external static OpenLDAP libraries, falling back to the bundled version.")
set (USE_INTERNAL_LDAP_LIBRARY 1)
else ()
if (APPLE AND NOT OPENLDAP_ROOT_DIR)
set (OPENLDAP_ROOT_DIR "/usr/local/opt/openldap")
endif ()
find_package (OpenLDAP)
find_package (OpenLDAP)
endif ()
endif ()
if (NOT OPENLDAP_FOUND AND NOT MISSING_INTERNAL_LDAP_LIBRARY)
@ -54,7 +59,10 @@ if (ENABLE_LDAP)
else ()
set (USE_INTERNAL_LDAP_LIBRARY 1)
set (OPENLDAP_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/openldap")
set (OPENLDAP_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/openldap/include")
set (OPENLDAP_INCLUDE_DIRS
"${ClickHouse_SOURCE_DIR}/contrib/openldap-cmake/${_system_name}_${_system_processor}/include"
"${ClickHouse_SOURCE_DIR}/contrib/openldap/include"
)
# Below, 'ldap'/'ldap_r' and 'lber' will be resolved to
# the targets defined in contrib/openldap-cmake/CMakeLists.txt
if (OPENLDAP_USE_REENTRANT_LIBS)
@ -73,4 +81,4 @@ if (ENABLE_LDAP)
endif ()
endif ()
message (STATUS "Using ldap=${USE_LDAP}: ${OPENLDAP_INCLUDE_DIR} : ${OPENLDAP_LIBRARIES}")
message (STATUS "Using ldap=${USE_LDAP}: ${OPENLDAP_INCLUDE_DIRS} : ${OPENLDAP_LIBRARIES}")

View File

@ -1,3 +1,7 @@
option(ENABLE_GSASL_LIBRARY "Enable gsasl library" ${ENABLE_LIBRARIES})
if (ENABLE_GSASL_LIBRARY)
option (USE_INTERNAL_LIBGSASL_LIBRARY "Set to FALSE to use system libgsasl library instead of bundled" ${NOT_UNBUNDLED})
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libgsasl/src/gsasl.h")
@ -24,4 +28,6 @@ if(LIBGSASL_LIBRARY AND LIBGSASL_INCLUDE_DIR)
set (USE_LIBGSASL 1)
endif()
endif()
message (STATUS "Using libgsasl=${USE_LIBGSASL}: ${LIBGSASL_INCLUDE_DIR} : ${LIBGSASL_LIBRARY}")

View File

@ -1,3 +1,7 @@
option (ENABLE_MSGPACK "Enable msgpack library" ${ENABLE_LIBRARIES})
if (ENABLE_MSGPACK)
option (USE_INTERNAL_MSGPACK_LIBRARY "Set to FALSE to use system msgpack library instead of bundled" ${NOT_UNBUNDLED})
if (USE_INTERNAL_MSGPACK_LIBRARY)
@ -14,4 +18,10 @@ else()
find_path(MSGPACK_INCLUDE_DIR NAMES msgpack.hpp PATHS ${MSGPACK_INCLUDE_PATHS})
endif()
message(STATUS "Using msgpack: ${MSGPACK_INCLUDE_DIR}")
if (MSGPACK_INCLUDE_DIR)
set(USE_MSGPACK 1)
endif()
endif()
message(STATUS "Using msgpack=${USE_MSGPACK}: ${MSGPACK_INCLUDE_DIR}")

View File

@ -22,7 +22,7 @@ elseif (COMPILER_CLANG)
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${APPLE_CLANG_MINIMUM_VERSION})
message (FATAL_ERROR "AppleClang compiler version must be at least ${APPLE_CLANG_MINIMUM_VERSION} (Xcode ${XCODE_MINIMUM_VERSION}).")
elseif (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11.0.0)
# char8_t is available staring (upstream vanilla) Clang 7, but prior to Clang 8,
# char8_t is available starting (upstream vanilla) Clang 7, but prior to Clang 8,
# it is not enabled by -std=c++20 and can be enabled with an explicit -fchar8_t.
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fchar8_t")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fchar8_t")

View File

@ -102,7 +102,7 @@ if (USE_INTERNAL_SSL_LIBRARY)
add_library(OpenSSL::SSL ALIAS ${OPENSSL_SSL_LIBRARY})
endif ()
if (ENABLE_LDAP AND USE_INTERNAL_LDAP_LIBRARY)
if (USE_INTERNAL_LDAP_LIBRARY)
add_subdirectory (openldap-cmake)
endif ()

View File

@ -88,6 +88,10 @@
"name": "yandex/clickhouse-testflows-runner",
"dependent": []
},
"docker/test/fasttest": {
"name": "yandex/clickhouse-fasttest",
"dependent": []
},
"docker/test/integration/s3_proxy": {
"name": "yandex/clickhouse-s3-proxy",
"dependent": []
@ -96,4 +100,5 @@
"name": "yandex/clickhouse-python-bottle",
"dependent": []
}
}

View File

@ -0,0 +1,65 @@
# docker build -t yandex/clickhouse-fasttest .
FROM ubuntu:19.10
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz"
ENV COMMIT_SHA=''
ENV PULL_REQUEST_NUMBER=''
RUN apt-get --allow-unauthenticated update -y && apt-get install --yes wget gnupg
RUN wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add -
RUN echo "deb [trusted=yes] http://apt.llvm.org/eoan/ llvm-toolchain-eoan-10 main" >> /etc/apt/sources.list
RUN apt-get --allow-unauthenticated update -y \
&& env DEBIAN_FRONTEND=noninteractive \
apt-get --allow-unauthenticated install --yes --no-install-recommends \
bash \
fakeroot \
ccache \
software-properties-common \
apt-transport-https \
ca-certificates \
wget \
bash \
fakeroot \
cmake \
ccache \
llvm-10 \
clang-10 \
lld-10 \
clang-tidy-10 \
ninja-build \
gperf \
git \
tzdata \
gperf \
rename \
build-essential \
expect \
python \
python-lxml \
python-termcolor \
python-requests \
unixodbc \
qemu-user-static \
sudo \
moreutils \
curl \
brotli
RUN mkdir -p /tmp/clickhouse-odbc-tmp \
&& wget --quiet -O - ${odbc_driver_url} | tar --strip-components=1 -xz -C /tmp/clickhouse-odbc-tmp \
&& cp /tmp/clickhouse-odbc-tmp/lib64/*.so /usr/local/lib/ \
&& odbcinst -i -d -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbcinst.ini.sample \
&& odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \
&& rm -rf /tmp/clickhouse-odbc-tmp
# This symlink required by gcc to find lld compiler
RUN ln -s /usr/bin/lld-10 /usr/bin/ld.lld
ENV TZ=Europe/Moscow
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
COPY run.sh /
CMD ["/bin/bash", "/run.sh"]

97
docker/test/fasttest/run.sh Executable file
View File

@ -0,0 +1,97 @@
#!/bin/bash
set -x -e
ls -la
git clone https://github.com/ClickHouse/ClickHouse.git | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/clone_log.txt
cd ClickHouse
CLICKHOUSE_DIR=`pwd`
if [ "$PULL_REQUEST_NUMBER" != "0" ]; then
if git fetch origin "+refs/pull/$PULL_REQUEST_NUMBER/merge"; then
git checkout FETCH_HEAD
echo 'Clonned merge head'
else
git fetch
git checkout $COMMIT_SHA
echo 'Checked out to commit'
fi
else
if [ "$COMMIT_SHA" != "" ]; then
git checkout $COMMIT_SHA
fi
fi
SUBMODULES_TO_UPDATE="contrib/boost contrib/zlib-ng contrib/libxml2 contrib/poco contrib/libunwind contrib/ryu contrib/fmtlib contrib/base64 contrib/cctz contrib/libcpuid contrib/double-conversion contrib/libcxx contrib/libcxxabi contrib/libc-headers contrib/lz4 contrib/zstd contrib/fastops contrib/rapidjson contrib/re2 contrib/sparsehash-c11"
git submodule update --init --recursive $SUBMODULES_TO_UPDATE | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/submodule_log.txt
export CMAKE_LIBS_CONFIG="-DENABLE_LIBRARIES=0 -DENABLE_TESTS=0 -DENABLE_UTILS=0 -DENABLE_EMBEDDED_COMPILER=0 -DENABLE_THINLTO=0 -DUSE_UNWIND=1"
export CCACHE_DIR=/ccache
export CCACHE_BASEDIR=/ClickHouse
export CCACHE_NOHASHDIR=true
export CCACHE_COMPILERCHECK=content
export CCACHE_MAXSIZE=15G
ccache --show-stats ||:
ccache --zero-stats ||:
mkdir build
cd build
CLICKHOUSE_BUILD_DIR=`pwd`
cmake .. -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_CXX_COMPILER=clang++-10 -DCMAKE_C_COMPILER=clang-10 $CMAKE_LIBS_CONFIG | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/cmake_log.txt
ninja clickhouse-bundle | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/build_log.txt
ninja install | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/install_log.txt
ccache --show-stats ||:
mkdir -p /etc/clickhouse-server
mkdir -p /etc/clickhouse-client
mkdir -p /etc/clickhouse-server/config.d
mkdir -p /etc/clickhouse-server/users.d
mkdir -p /var/log/clickhouse-server
cp $CLICKHOUSE_DIR/programs/server/config.xml /etc/clickhouse-server/
cp $CLICKHOUSE_DIR/programs/server/users.xml /etc/clickhouse-server/
mkdir -p /etc/clickhouse-server/dict_examples
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/dict_examples/
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/
#ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/graphite.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml
clickhouse-server --config /etc/clickhouse-server/config.xml --daemon
until clickhouse-client --query "SELECT 1"
do
sleep 0.1
done
TESTS_TO_SKIP="parquet avro h3 odbc mysql sha256 _orc_ arrow 01098_temporary_and_external_tables 01083_expressions_in_engine_arguments hdfs 00911_tautological_compare protobuf capnproto java_hash hashing secure 00490_special_line_separators_and_characters_outside_of_bmp 00436_convert_charset 00105_shard_collations 01354_order_by_tuple_collate_const 01292_create_user 01098_msgpack_format 00929_multi_match_edit_distance 00926_multimatch 00834_cancel_http_readonly_queries_on_client_close brotli parallel_alter 00302_http_compression 00417_kill_query 01294_lazy_database_concurrent 01193_metadata_loading base64 01031_mutations_interpreter_and_context json client 01305_replica_create_drop_zookeeper 01092_memory_profiler 01355_ilike 01281_unsucceeded_insert_select_queries_counter live_view limit_memory memory_limit memory_leak 00110_external_sort 00682_empty_parts_merge 00701_rollup 00109_shard_totals_after_having"
clickhouse-test -j 4 --no-long --testname --shard --zookeeper --skip $TESTS_TO_SKIP 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/test_log.txt
mv /var/log/clickhouse-server/* /test_output

View File

@ -55,18 +55,21 @@ ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-serv
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/; \
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/;
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/; \
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/; \
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/;
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/
# Retain any pre-existing config and allow ClickHouse to load those if required
ln -s --backup=simple --suffix=_original.xml \
/usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
service zookeeper start

View File

@ -17,7 +17,6 @@ ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/
@ -33,6 +32,10 @@ ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
# Retain any pre-existing config and allow ClickHouse to load it if required
ln -s --backup=simple --suffix=_original.xml \
/usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
if [[ -n "$USE_POLYMORPHIC_PARTS" ]] && [[ "$USE_POLYMORPHIC_PARTS" -eq 1 ]]; then
ln -s /usr/share/clickhouse-test/config/polymorphic_parts.xml /etc/clickhouse-server/config.d/
fi

View File

@ -46,27 +46,30 @@ ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-serv
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/; \
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/;
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/; \
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/; \
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/; \
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/graphite.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/; \
ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/graphite.xml /etc/clickhouse-server/config.d/
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml
# Retain any pre-existing config and allow ClickHouse to load it if required
ln -s --backup=simple --suffix=_original.xml \
/usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
service zookeeper start
sleep 5

View File

@ -35,7 +35,7 @@ RUN apt-get update \
ENV TZ=Europe/Moscow
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
RUN pip3 install urllib3 testflows==1.6.24 docker-compose docker dicttoxml kazoo tzlocal
RUN pip3 install urllib3 testflows==1.6.39 docker-compose docker dicttoxml kazoo tzlocal
ENV DOCKER_CHANNEL stable
ENV DOCKER_VERSION 17.09.1-ce

View File

@ -24,7 +24,7 @@ See the detailed description of the [CREATE TABLE](../../../sql-reference/statem
**Engine Parameters**
- `join_strictness` [JOIN strictness](../../../sql-reference/statements/select/join.md#select-join-strictness).
- `join_strictness` [JOIN strictness](../../../sql-reference/statements/select/join.md#select-join-types).
- `join_type` [JOIN type](../../../sql-reference/statements/select/join.md#select-join-types).
- `k1[, k2, ...]` Key columns from the `USING` clause that the `JOIN` operation is made with.

View File

@ -471,7 +471,7 @@ Default value: 0.
See also:
- [JOIN strictness](../../sql-reference/statements/select/join.md#select-join-strictness)
- [JOIN strictness](../../sql-reference/statements/select/join.md#join-settings)
## temporary\_files\_codec {#temporary_files_codec}

View File

@ -11,7 +11,7 @@ Syntax:
``` sql
SELECT <expr_list>
FROM <left_table>
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN <right_table>
[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN <right_table>
(ON <expr_list>)|(USING <column_list>) ...
```
@ -33,17 +33,13 @@ Additional join types available in ClickHouse:
- `LEFT SEMI JOIN` and `RIGHT SEMI JOIN`, a whitelist on “join keys”, without producing a cartesian product.
- `LEFT ANTI JOIN` and `RIGHT ANTI JOIN`, a blacklist on “join keys”, without producing a cartesian product.
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` and `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
- `ASOF JOIN` and `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
## Strictness {#select-join-strictness}
Modifies how matching by “join keys” is performed
- `ALL` — The standard `JOIN` behavior in SQL as described above. The default.
- `ANY` — Partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
## Setting {#join-settings}
!!! note "Note"
The default strictness value can be overriden using [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) setting.
The default join type can be overriden using [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) setting.
Also the behavior of ClickHouse server for `ANY JOIN` operations depends on the [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys) setting.

View File

@ -23,7 +23,7 @@ Vea la descripción detallada del [CREATE TABLE](../../../sql-reference/statemen
**Parámetros del motor**
- `join_strictness` [ÚNETE a la rigurosidad](../../../sql-reference/statements/select/join.md#select-join-strictness).
- `join_strictness` [ÚNETE a la rigurosidad](../../../sql-reference/statements/select/join.md#select-join-types).
- `join_type` [Tipo de unión](../../../sql-reference/statements/select/join.md#select-join-types).
- `k1[, k2, ...]` Key columns from the `USING` cláusula que el `JOIN` operación se hace con.

View File

@ -12,7 +12,7 @@ Sintaxis:
``` sql
SELECT <expr_list>
FROM <left_table>
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN <right_table>
[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN <right_table>
(ON <expr_list>)|(USING <column_list>) ...
```
@ -34,14 +34,10 @@ Tipos de unión adicionales disponibles en ClickHouse:
- `LEFT SEMI JOIN` y `RIGHT SEMI JOIN`, una lista blanca en “join keys”, sin producir un producto cartesiano.
- `LEFT ANTI JOIN` y `RIGHT ANTI JOIN`, una lista negra sobre “join keys”, sin producir un producto cartesiano.
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` and `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
- `ASOF JOIN` and `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
## Rigor {#select-join-strictness}
Modifica cómo coincidir por “join keys” se realiza
- `ALL` — The standard `JOIN` comportamiento en SQL como se describió anteriormente. Predeterminado.
- `ANY` — Partially (for opposite side of `LEFT` y `RIGHT`) o completamente (para `INNER` y `FULL`) deshabilita el producto cartesiano para `JOIN` tipo.
- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` el uso se describe a continuación.
## Setting {#join-settings}
!!! note "Nota"
El valor de rigor predeterminado se puede anular usando [Por favor, introduzca su dirección de correo electrónico](../../../operations/settings/settings.md#settings-join_default_strictness) configuración.

View File

@ -23,7 +23,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
**پارامترهای موتور**
- `join_strictness` [پیوستن به سختی](../../../sql-reference/statements/select/join.md#select-join-strictness).
- `join_strictness` [پیوستن به سختی](../../../sql-reference/statements/select/join.md#select-join-types).
- `join_type` [پیوستن به نوع](../../../sql-reference/statements/select/join.md#select-join-types).
- `k1[, k2, ...]` Key columns from the `USING` بند که `JOIN` عملیات با ساخته شده.

View File

@ -12,7 +12,7 @@ machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
``` sql
SELECT <expr_list>
FROM <left_table>
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN <right_table>
[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN <right_table>
(ON <expr_list>)|(USING <column_list>) ...
```
@ -34,15 +34,12 @@ FROM <left_table>
- `LEFT SEMI JOIN` و `RIGHT SEMI JOIN`, یک لیست سفید در “join keys”, بدون تولید محصول دکارتی.
- `LEFT ANTI JOIN` و `RIGHT ANTI JOIN`, لیست سیاه در “join keys”, بدون تولید محصول دکارتی.
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` و `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
- `ASOF JOIN` و `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
## سختی {#select-join-strictness}
## Setting {#join-settings}
تغییر چگونگی تطبیق توسط “join keys” انجام شده است
- `ALL` — The standard `JOIN` رفتار در گذاشتن همانطور که در بالا توضیح. به طور پیش فرض.
- `ANY` — Partially (for opposite side of `LEFT` و `RIGHT`) یا به طور کامل (برای `INNER` و `FULL`) غیر فعال محصول دکارتی برای استاندارد `JOIN` انواع.
- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` استفاده در زیر توضیح داده شده است.
!!! note "یادداشت"
مقدار سختگیرانه پیش فرض را می توان با استفاده از لغو [بررسی اجمالی](../../../operations/settings/settings.md#settings-join_default_strictness) تنظیمات.

View File

@ -23,7 +23,7 @@ Voir la description détaillée de la [CREATE TABLE](../../../sql-reference/stat
**Les Paramètres Du Moteur**
- `join_strictness` [ADHÉRER à la rigueur](../../../sql-reference/statements/select/join.md#select-join-strictness).
- `join_strictness` [ADHÉRER à la rigueur](../../../sql-reference/statements/select/join.md#select-join-types).
- `join_type` [Type de jointure](../../../sql-reference/statements/select/join.md#select-join-types).
- `k1[, k2, ...]` Key columns from the `USING` la clause que l' `JOIN` l'opération est faite avec de la.

View File

@ -12,7 +12,7 @@ Syntaxe:
``` sql
SELECT <expr_list>
FROM <left_table>
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN <right_table>
[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN <right_table>
(ON <expr_list>)|(USING <column_list>) ...
```
@ -34,14 +34,10 @@ Autres types de jointure disponibles dans ClickHouse:
- `LEFT SEMI JOIN` et `RIGHT SEMI JOIN` une liste blanche sur “join keys”, sans produire un produit cartésien.
- `LEFT ANTI JOIN` et `RIGHT ANTI JOIN` une liste noire sur “join keys”, sans produire un produit cartésien.
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` et `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
- `ASOF JOIN` et `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
## Rigueur {#select-join-strictness}
Modifie la façon dont la correspondance par “join keys” est effectué
- `ALL` — The standard `JOIN` comportement en SQL comme décrit ci-dessus. Défaut.
- `ANY` — Partially (for opposite side of `LEFT` et `RIGHT`) ou complètement (pour `INNER` et `FULL`) désactive le produit cartésien de la norme `JOIN` type.
- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` l'utilisation est décrite ci-dessous.
## Setting {#join-settings}
!!! note "Note"
La valeur de rigueur par défaut peut être remplacée à l'aide [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) paramètre.

View File

@ -23,7 +23,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
**エンジン変数**
- `join_strictness` [厳密に結合する](../../../sql-reference/statements/select/join.md#select-join-strictness).
- `join_strictness` [厳密に結合する](../../../sql-reference/statements/select/join.md#select-join-types).
- `join_type` [結合タイプ](../../../sql-reference/statements/select/join.md#select-join-types).
- `k1[, k2, ...]` Key columns from the `USING` 句は、 `JOIN` 操作はでなされる。

View File

@ -16,7 +16,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
**Параметры движка**
- `join_strictness` [строгость JOIN](../../../engines/table-engines/special/join.md#select-join-strictness).
- `join_strictness` [строгость JOIN](../../../engines/table-engines/special/join.md#select-join-types).
- `join_type` [тип JOIN](../../../engines/table-engines/special/join.md#select-join-types).
- `k1[, k2, ...]` ключевые столбцы секции `USING` с которыми выполняется операция `JOIN`.

View File

@ -7,7 +7,7 @@ Join создаёт новую таблицу путем объединения
``` sql
SELECT <expr_list>
FROM <left_table>
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN <right_table>
[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN <right_table>
(ON <expr_list>)|(USING <column_list>) ...
```
@ -29,18 +29,14 @@ FROM <left_table>
- `LEFT SEMI JOIN` и `RIGHT SEMI JOIN`, белый список по ключам соединения, не производит декартово произведение.
- `LEFT ANTI JOIN` и `RIGHT ANTI JOIN`, черный список по ключам соединения, не производит декартово произведение.
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` и `INNER ANY JOIN`, Частично (для противоположных сторон `LEFT` и `RIGHT`) или полностью (для `INNER` и `FULL`) отключает декартово произведение для стандартых видов `JOIN`.
- `ASOF JOIN` и `LEFT ASOF JOIN`, Для соединения последовательностей по нечеткому совпадению. Использование `ASOF JOIN` описано ниже.
## Строгость {#select-join-strictness}
Изменяет способ сопоставления по ключам соединения:
- `ALL` — стандартное поведение `JOIN` в SQL, как описано выше. По умолчанию.
- `ANY` — Частично (для противоположных сторон `LEFT` и `RIGHT`) или полностью (для `INNER` и `FULL`) отключает декартово произведение для стандартых видов `JOIN`.
- `ASOF` — Для соединения последовательностей по нечеткому совпадению. Использование `ASOF JOIN` описано ниже.
## Настройки {#join-settings}
!!! note "Примечание"
Значение строгости по умолчанию может быть переопределено с помощью настройки [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness).
### Использование ASOF JOIN {#asof-join-usage}
`ASOF JOIN` применим в том случае, когда необходимо объединять записи, которые не имеют точного совпадения.

View File

@ -4,8 +4,11 @@
# This script deploys ClickHouse website to your personal test subdomain.
#
# Before first use of this script:
# 1) Create https://github.com/GIT_USER/clickhouse.github.io repo (replace GIT_USER with your GitHub login)
# 2) Send email on address from https://clickhouse.tech/#contacts asking to create GIT_USER-test.clickhouse.tech domain
# 1) Set up building documentation according to https://github.com/ClickHouse/ClickHouse/tree/master/docs/tools#use-buildpy-use-build-py
# 2) Create https://github.com/GIT_USER/clickhouse.github.io repo (replace GIT_USER with your GitHub login)
# 3) Enable GitHub Pages in settings of this repo
# 4) Add file named CNAME in root of this repo with "GIT_USER-test.clickhouse.tech" content (without quotes)
# 5) Send email on address from https://clickhouse.tech/#contacts asking to create GIT_USER-test.clickhouse.tech domain
#
set -ex

View File

@ -23,7 +23,7 @@ Ayrıntılııklamasına bakın [CREATE TABLE](../../../sql-reference/statem
**Motor Parametreleri**
- `join_strictness` [Katılık katılın](../../../sql-reference/statements/select/join.md#select-join-strictness).
- `join_strictness` [Katılık katılın](../../../sql-reference/statements/select/join.md#select-join-types).
- `join_type` [Birleştirme türü](../../../sql-reference/statements/select/join.md#select-join-types).
- `k1[, k2, ...]` Key columns from the `USING` fık thera: `JOIN` işlemi yapılmamaktadır.

View File

@ -24,7 +24,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
**引擎参数**
- `join_strictness` [JOIN 限制](../../../sql-reference/statements/select/join.md#select-join-strictness).
- `join_strictness` [JOIN 限制](../../../sql-reference/statements/select/join.md#select-join-types).
- `join_type` [JOIN 类型](../../../sql-reference/statements/select/join.md#select-join-types).
- `k1[, k2, ...]` 进行`JOIN` 操作时 `USING`语句用到的key列

View File

@ -13,7 +13,7 @@ Join通过使用一个或多个表的公共值合并来自一个或多个表的
``` sql
SELECT <expr_list>
FROM <left_table>
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN <right_table>
[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN <right_table>
(ON <expr_list>)|(USING <column_list>) ...
```
@ -35,14 +35,10 @@ ClickHouse中提供的其他联接类型:
- `LEFT SEMI JOIN``RIGHT SEMI JOIN`,白名单 “join keys”而不产生笛卡尔积。
- `LEFT ANTI JOIN``RIGHT ANTI JOIN`,黑名单 “join keys”而不产生笛卡尔积。
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` and `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
- `ASOF JOIN` and `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
## 严格 {#select-join-strictness}
修改如何匹配 “join keys” 执行
- `ALL` — The standard `JOIN` sql中的行为如上所述。 默认值。
- `ANY` — Partially (for opposite side of `LEFT``RIGHT`)或完全(为 `INNER``FULL`)禁用笛卡尔积为标准 `JOIN` 类型。
- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` 用法描述如下。
## 严格 {#join-settings}
!!! note "注"
可以使用以下方式复盖默认的严格性值 [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) 设置。

View File

@ -215,6 +215,9 @@ try
/// Skip networking
/// Sets external authenticators config (LDAP).
context->setExternalAuthenticatorsConfig(config());
setupUsers();
/// Limit on total number of concurrently executing queries.

View File

@ -295,7 +295,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
#endif
/** Context contains all that query execution is dependent:
* settings, available functions, data types, aggregate functions, databases...
* settings, available functions, data types, aggregate functions, databases, ...
*/
auto shared_context = Context::createShared();
auto global_context = std::make_unique<Context>(Context::createGlobal(shared_context.get()));
@ -543,6 +543,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
//buildLoggers(*config, logger());
global_context->setClustersConfig(config);
global_context->setMacros(std::make_unique<Macros>(*config, "macros"));
global_context->setExternalAuthenticatorsConfig(*config);
/// Setup protection to avoid accidental DROP for big tables (that are greater than 50 GB by default)
if (config->has("max_table_size_to_drop"))

View File

@ -215,6 +215,47 @@
<!-- Path to folder where users and roles created by SQL commands are stored. -->
<access_control_path>/var/lib/clickhouse/access/</access_control_path>
<!-- External user directories (LDAP). -->
<ldap_servers>
<!-- List LDAP servers with their connection parameters here to later use them as authenticators for dedicated users,
who have 'ldap' authentication mechanism specified instead of 'password'.
Parameters:
host - LDAP server hostname or IP, this parameter is mandatory and cannot be empty.
port - LDAP server port, default is 636 if enable_tls is set to true, 389 otherwise.
auth_dn_prefix, auth_dn_suffix - prefix and suffix used to construct the DN to bind to.
Effectively, the resulting DN will be constructed as auth_dn_prefix + escape(user_name) + auth_dn_suffix string.
Note, that this implies that auth_dn_suffix should usually have comma ',' as its first non-space character.
enable_tls - flag to trigger use of secure connection to the LDAP server.
Specify 'no' for plain text (ldap://) protocol (not recommended).
Specify 'yes' for LDAP over SSL/TLS (ldaps://) protocol (recommended, the default).
Specify 'starttls' for legacy StartTLS protocol (plain text (ldap://) protocol, upgraded to TLS).
tls_minimum_protocol_version - the minimum protocol version of SSL/TLS.
Accepted values are: 'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2' (the default).
tls_require_cert - SSL/TLS peer certificate verification behavior.
Accepted values are: 'never', 'allow', 'try', 'demand' (the default).
tls_cert_file - path to certificate file.
tls_key_file - path to certificate key file.
tls_ca_cert_file - path to CA certificate file.
tls_ca_cert_dir - path to the directory containing CA certificates.
tls_cipher_suite - allowed cipher suite.
Example:
<my_ldap_server>
<host>localhost</host>
<port>636</port>
<auth_dn_prefix>uid=</auth_dn_prefix>
<auth_dn_suffix>,ou=users,dc=example,dc=com</auth_dn_suffix>
<enable_tls>yes</enable_tls>
<tls_minimum_protocol_version>tls1.2</tls_minimum_protocol_version>
<tls_require_cert>demand</tls_require_cert>
<tls_cert_file>/path/to/tls_cert_file</tls_cert_file>
<tls_key_file>/path/to/tls_key_file</tls_key_file>
<tls_ca_cert_file>/path/to/tls_ca_cert_file</tls_ca_cert_file>
<tls_ca_cert_dir>/path/to/tls_ca_cert_dir</tls_ca_cert_dir>
<tls_cipher_suite>ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384</tls_cipher_suite>
</my_ldap_server>
-->
</ldap_servers>
<!-- Path to configuration file with users, access rights, profiles of settings, quotas. -->
<users_config>users.xml</users_config>

View File

@ -44,6 +44,9 @@
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
If you want to specify a previously defined LDAP server (see 'ldap_servers' in main config) for authentication, place its name in 'server' element inside 'ldap' element.
Example: <ldap><server>my_ldap_server</server></ldap>
How to generate decent password:
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
In first line will be password and in second - corresponding SHA256.

View File

@ -9,6 +9,7 @@
#include <Access/QuotaCache.h>
#include <Access/QuotaUsage.h>
#include <Access/SettingsProfilesCache.h>
#include <Access/ExternalAuthenticators.h>
#include <Core/Settings.h>
#include <Poco/ExpireCache.h>
#include <mutex>
@ -64,7 +65,8 @@ AccessControlManager::AccessControlManager()
role_cache(std::make_unique<RoleCache>(*this)),
row_policy_cache(std::make_unique<RowPolicyCache>(*this)),
quota_cache(std::make_unique<QuotaCache>(*this)),
settings_profiles_cache(std::make_unique<SettingsProfilesCache>(*this))
settings_profiles_cache(std::make_unique<SettingsProfilesCache>(*this)),
external_authenticators(std::make_unique<ExternalAuthenticators>())
{
}
@ -79,6 +81,12 @@ void AccessControlManager::setLocalDirectory(const String & directory_path)
}
void AccessControlManager::setExternalAuthenticatorsConfig(const Poco::Util::AbstractConfiguration & config)
{
external_authenticators->setConfig(config, getLogger());
}
void AccessControlManager::setUsersConfig(const Poco::Util::AbstractConfiguration & users_config)
{
auto & users_config_access_storage = dynamic_cast<UsersConfigAccessStorage &>(getStorageByIndex(USERS_CONFIG_ACCESS_STORAGE_INDEX));
@ -163,4 +171,9 @@ std::shared_ptr<const SettingsChanges> AccessControlManager::getProfileSettings(
return settings_profiles_cache->getProfileSettings(profile_name);
}
const ExternalAuthenticators & AccessControlManager::getExternalAuthenticators() const
{
return *external_authenticators;
}
}

View File

@ -37,6 +37,7 @@ class EnabledSettings;
class SettingsProfilesCache;
class SettingsProfileElements;
class ClientInfo;
class ExternalAuthenticators;
struct Settings;
@ -48,6 +49,7 @@ public:
~AccessControlManager();
void setLocalDirectory(const String & directory);
void setExternalAuthenticatorsConfig(const Poco::Util::AbstractConfiguration & config);
void setUsersConfig(const Poco::Util::AbstractConfiguration & users_config);
void setDefaultProfileName(const String & default_profile_name);
@ -85,6 +87,8 @@ public:
std::shared_ptr<const SettingsChanges> getProfileSettings(const String & profile_name) const;
const ExternalAuthenticators & getExternalAuthenticators() const;
private:
class ContextAccessCache;
std::unique_ptr<ContextAccessCache> context_access_cache;
@ -92,6 +96,7 @@ private:
std::unique_ptr<RowPolicyCache> row_policy_cache;
std::unique_ptr<QuotaCache> quota_cache;
std::unique_ptr<SettingsProfilesCache> settings_profiles_cache;
std::unique_ptr<ExternalAuthenticators> external_authenticators;
};
}

View File

@ -1,4 +1,6 @@
#include <Access/Authentication.h>
#include <Access/ExternalAuthenticators.h>
#include <Access/LDAPClient.h>
#include <Common/Exception.h>
#include <Poco/SHA1Engine.h>
@ -37,6 +39,9 @@ Authentication::Digest Authentication::getPasswordDoubleSHA1() const
case DOUBLE_SHA1_PASSWORD:
return password_hash;
case LDAP_SERVER:
throw Exception("Cannot get password double SHA1 for user with 'LDAP_SERVER' authentication.", ErrorCodes::BAD_ARGUMENTS);
case MAX_TYPE:
break;
}
@ -44,7 +49,7 @@ Authentication::Digest Authentication::getPasswordDoubleSHA1() const
}
bool Authentication::isCorrectPassword(const String & password_) const
bool Authentication::isCorrectPassword(const String & password_, const String & user_, const ExternalAuthenticators & external_authenticators) const
{
switch (type)
{
@ -75,6 +80,16 @@ bool Authentication::isCorrectPassword(const String & password_) const
return encodeSHA1(first_sha1) == password_hash;
}
case LDAP_SERVER:
{
auto ldap_server_params = external_authenticators.getLDAPServerParams(server_name);
ldap_server_params.user = user_;
ldap_server_params.password = password_;
LDAPSimpleAuthClient ldap_client(ldap_server_params);
return ldap_client.check();
}
case MAX_TYPE:
break;
}

View File

@ -18,6 +18,7 @@ namespace ErrorCodes
extern const int NOT_IMPLEMENTED;
}
class ExternalAuthenticators;
/// Authentication type and encrypted password for checking when an user logins.
class Authentication
@ -38,6 +39,9 @@ public:
/// This kind of hash is used by the `mysql_native_password` authentication plugin.
DOUBLE_SHA1_PASSWORD,
/// Password is checked by a [remote] LDAP server. Connection will be made at each authentication attempt.
LDAP_SERVER,
MAX_TYPE,
};
@ -78,8 +82,14 @@ public:
/// Allowed to use for Type::NO_PASSWORD, Type::PLAINTEXT_PASSWORD, Type::DOUBLE_SHA1_PASSWORD.
Digest getPasswordDoubleSHA1() const;
/// Sets an external authentication server name.
/// When authentication type is LDAP_SERVER, server name is expected to be the name of a preconfigured LDAP server.
const String & getServerName() const;
void setServerName(const String & server_name_);
/// Checks if the provided password is correct. Returns false if not.
bool isCorrectPassword(const String & password) const;
/// User name and external authenticators' info are used only by some specific authentication type (e.g., LDAP_SERVER).
bool isCorrectPassword(const String & password_, const String & user_, const ExternalAuthenticators & external_authenticators) const;
friend bool operator ==(const Authentication & lhs, const Authentication & rhs) { return (lhs.type == rhs.type) && (lhs.password_hash == rhs.password_hash); }
friend bool operator !=(const Authentication & lhs, const Authentication & rhs) { return !(lhs == rhs); }
@ -93,6 +103,7 @@ private:
Type type = Type::NO_PASSWORD;
Digest password_hash;
String server_name;
};
@ -127,6 +138,11 @@ inline const Authentication::TypeInfo & Authentication::TypeInfo::get(Type type_
static const auto info = make_info("DOUBLE_SHA1_PASSWORD");
return info;
}
case LDAP_SERVER:
{
static const auto info = make_info("LDAP_SERVER");
return info;
}
case MAX_TYPE: break;
}
throw Exception("Unknown authentication type: " + std::to_string(static_cast<int>(type_)), ErrorCodes::LOGICAL_ERROR);
@ -176,6 +192,9 @@ inline void Authentication::setPassword(const String & password_)
case DOUBLE_SHA1_PASSWORD:
return setPasswordHashBinary(encodeDoubleSHA1(password_));
case LDAP_SERVER:
throw Exception("Cannot specify password for the 'LDAP_SERVER' authentication type", ErrorCodes::LOGICAL_ERROR);
case MAX_TYPE: break;
}
throw Exception("setPassword(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
@ -200,6 +219,8 @@ inline void Authentication::setPasswordHashHex(const String & hash)
inline String Authentication::getPasswordHashHex() const
{
if (type == LDAP_SERVER)
throw Exception("Cannot get password of a user with the 'LDAP_SERVER' authentication type", ErrorCodes::LOGICAL_ERROR);
String hex;
hex.resize(password_hash.size() * 2);
boost::algorithm::hex(password_hash.begin(), password_hash.end(), hex.data());
@ -242,9 +263,22 @@ inline void Authentication::setPasswordHashBinary(const Digest & hash)
return;
}
case LDAP_SERVER:
throw Exception("Cannot specify password for the 'LDAP_SERVER' authentication type", ErrorCodes::LOGICAL_ERROR);
case MAX_TYPE: break;
}
throw Exception("setPasswordHashBinary(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
}
inline const String & Authentication::getServerName() const
{
return server_name;
}
inline void Authentication::setServerName(const String & server_name_)
{
server_name = server_name_;
}
}

View File

@ -293,7 +293,7 @@ bool ContextAccess::isCorrectPassword(const String & password) const
std::lock_guard lock{mutex};
if (!user)
return false;
return user->authentication.isCorrectPassword(password);
return user->authentication.isCorrectPassword(password, user_name, manager->getExternalAuthenticators());
}
bool ContextAccess::isClientHostAllowed() const

View File

@ -0,0 +1,182 @@
#include <Access/ExternalAuthenticators.h>
#include <Common/Exception.h>
#include <Common/quoteString.h>
#include <Poco/Util/AbstractConfiguration.h>
#include <boost/algorithm/string/case_conv.hpp>
namespace DB
{
namespace ErrorCodes
{
extern const int BAD_ARGUMENTS;
}
namespace
{
auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const String & ldap_server_name)
{
if (ldap_server_name.empty())
throw Exception("LDAP server name cannot be empty", ErrorCodes::BAD_ARGUMENTS);
LDAPServerParams params;
const String ldap_server_config = "ldap_servers." + ldap_server_name;
const bool has_host = config.has(ldap_server_config + ".host");
const bool has_port = config.has(ldap_server_config + ".port");
const bool has_auth_dn_prefix = config.has(ldap_server_config + ".auth_dn_prefix");
const bool has_auth_dn_suffix = config.has(ldap_server_config + ".auth_dn_suffix");
const bool has_enable_tls = config.has(ldap_server_config + ".enable_tls");
const bool has_tls_minimum_protocol_version = config.has(ldap_server_config + ".tls_minimum_protocol_version");
const bool has_tls_require_cert = config.has(ldap_server_config + ".tls_require_cert");
const bool has_tls_cert_file = config.has(ldap_server_config + ".tls_cert_file");
const bool has_tls_key_file = config.has(ldap_server_config + ".tls_key_file");
const bool has_tls_ca_cert_file = config.has(ldap_server_config + ".tls_ca_cert_file");
const bool has_tls_ca_cert_dir = config.has(ldap_server_config + ".tls_ca_cert_dir");
const bool has_tls_cipher_suite = config.has(ldap_server_config + ".tls_cipher_suite");
if (!has_host)
throw Exception("Missing 'host' entry", ErrorCodes::BAD_ARGUMENTS);
params.host = config.getString(ldap_server_config + ".host");
if (params.host.empty())
throw Exception("Empty 'host' entry", ErrorCodes::BAD_ARGUMENTS);
if (has_auth_dn_prefix)
params.auth_dn_prefix = config.getString(ldap_server_config + ".auth_dn_prefix");
if (has_auth_dn_suffix)
params.auth_dn_suffix = config.getString(ldap_server_config + ".auth_dn_suffix");
if (has_enable_tls)
{
String enable_tls_lc_str = config.getString(ldap_server_config + ".enable_tls");
boost::to_lower(enable_tls_lc_str);
if (enable_tls_lc_str == "starttls")
params.enable_tls = LDAPServerParams::TLSEnable::YES_STARTTLS;
else if (config.getBool(ldap_server_config + ".enable_tls"))
params.enable_tls = LDAPServerParams::TLSEnable::YES;
else
params.enable_tls = LDAPServerParams::TLSEnable::NO;
}
if (has_tls_minimum_protocol_version)
{
String tls_minimum_protocol_version_lc_str = config.getString(ldap_server_config + ".tls_minimum_protocol_version");
boost::to_lower(tls_minimum_protocol_version_lc_str);
if (tls_minimum_protocol_version_lc_str == "ssl2")
params.tls_minimum_protocol_version = LDAPServerParams::TLSProtocolVersion::SSL2;
else if (tls_minimum_protocol_version_lc_str == "ssl3")
params.tls_minimum_protocol_version = LDAPServerParams::TLSProtocolVersion::SSL3;
else if (tls_minimum_protocol_version_lc_str == "tls1.0")
params.tls_minimum_protocol_version = LDAPServerParams::TLSProtocolVersion::TLS1_0;
else if (tls_minimum_protocol_version_lc_str == "tls1.1")
params.tls_minimum_protocol_version = LDAPServerParams::TLSProtocolVersion::TLS1_1;
else if (tls_minimum_protocol_version_lc_str == "tls1.2")
params.tls_minimum_protocol_version = LDAPServerParams::TLSProtocolVersion::TLS1_2;
else
throw Exception("Bad value for 'tls_minimum_protocol_version' entry, allowed values are: 'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2'", ErrorCodes::BAD_ARGUMENTS);
}
if (has_tls_require_cert)
{
String tls_require_cert_lc_str = config.getString(ldap_server_config + ".tls_require_cert");
boost::to_lower(tls_require_cert_lc_str);
if (tls_require_cert_lc_str == "never")
params.tls_require_cert = LDAPServerParams::TLSRequireCert::NEVER;
else if (tls_require_cert_lc_str == "allow")
params.tls_require_cert = LDAPServerParams::TLSRequireCert::ALLOW;
else if (tls_require_cert_lc_str == "try")
params.tls_require_cert = LDAPServerParams::TLSRequireCert::TRY;
else if (tls_require_cert_lc_str == "demand")
params.tls_require_cert = LDAPServerParams::TLSRequireCert::DEMAND;
else
throw Exception("Bad value for 'tls_require_cert' entry, allowed values are: 'never', 'allow', 'try', 'demand'", ErrorCodes::BAD_ARGUMENTS);
}
if (has_tls_cert_file)
params.tls_cert_file = config.getString(ldap_server_config + ".tls_cert_file");
if (has_tls_key_file)
params.tls_key_file = config.getString(ldap_server_config + ".tls_key_file");
if (has_tls_ca_cert_file)
params.tls_ca_cert_file = config.getString(ldap_server_config + ".tls_ca_cert_file");
if (has_tls_ca_cert_dir)
params.tls_ca_cert_dir = config.getString(ldap_server_config + ".tls_ca_cert_dir");
if (has_tls_cipher_suite)
params.tls_cipher_suite = config.getString(ldap_server_config + ".tls_cipher_suite");
if (has_port)
{
const auto port = config.getInt64(ldap_server_config + ".port");
if (port < 0 || port > 65535)
throw Exception("Bad value for 'port' entry", ErrorCodes::BAD_ARGUMENTS);
params.port = port;
}
else
params.port = (params.enable_tls == LDAPServerParams::TLSEnable::YES ? 636 : 389);
return params;
}
void parseAndAddLDAPServers(ExternalAuthenticators & external_authenticators, const Poco::Util::AbstractConfiguration & config, Poco::Logger * log)
{
Poco::Util::AbstractConfiguration::Keys ldap_server_names;
config.keys("ldap_servers", ldap_server_names);
for (const auto & ldap_server_name : ldap_server_names)
{
try
{
external_authenticators.setLDAPServerParams(ldap_server_name, parseLDAPServer(config, ldap_server_name));
}
catch (...)
{
tryLogCurrentException(log, "Could not parse LDAP server " + backQuote(ldap_server_name));
}
}
}
}
void ExternalAuthenticators::reset()
{
std::scoped_lock lock(mutex);
ldap_server_params.clear();
}
void ExternalAuthenticators::setConfig(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log)
{
std::scoped_lock lock(mutex);
reset();
parseAndAddLDAPServers(*this, config, log);
}
void ExternalAuthenticators::setLDAPServerParams(const String & server, const LDAPServerParams & params)
{
std::scoped_lock lock(mutex);
ldap_server_params.erase(server);
ldap_server_params[server] = params;
}
LDAPServerParams ExternalAuthenticators::getLDAPServerParams(const String & server) const
{
std::scoped_lock lock(mutex);
auto it = ldap_server_params.find(server);
if (it == ldap_server_params.end())
throw Exception("LDAP server '" + server + "' is not configured", ErrorCodes::BAD_ARGUMENTS);
return it->second;
}
}

View File

@ -0,0 +1,39 @@
#pragma once
#include <Access/LDAPParams.h>
#include <Core/Types.h>
#include <map>
#include <memory>
#include <mutex>
namespace Poco
{
class Logger;
namespace Util
{
class AbstractConfiguration;
}
}
namespace DB
{
class ExternalAuthenticators
{
public:
void reset();
void setConfig(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log);
void setLDAPServerParams(const String & server, const LDAPServerParams & params);
LDAPServerParams getLDAPServerParams(const String & server) const;
private:
mutable std::recursive_mutex mutex;
std::map<String, LDAPServerParams> ldap_server_params;
};
}

331
src/Access/LDAPClient.cpp Normal file
View File

@ -0,0 +1,331 @@
#include <Access/LDAPClient.h>
#include <Common/Exception.h>
#include <ext/scope_guard.h>
#include <cstring>
#include <sys/time.h>
namespace DB
{
namespace ErrorCodes
{
extern const int BAD_ARGUMENTS;
extern const int FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME;
extern const int LDAP_ERROR;
}
LDAPClient::LDAPClient(const LDAPServerParams & params_)
: params(params_)
{
}
LDAPClient::~LDAPClient()
{
closeConnection();
}
void LDAPClient::openConnection()
{
const bool graceful_bind_failure = false;
diag(openConnection(graceful_bind_failure));
}
#if USE_LDAP
namespace
{
auto escapeForLDAP(const String & src)
{
String dest;
dest.reserve(src.size() * 2);
for (auto ch : src)
{
switch (ch)
{
case ',':
case '\\':
case '#':
case '+':
case '<':
case '>':
case ';':
case '"':
case '=':
dest += '\\';
break;
}
dest += ch;
}
return dest;
}
}
void LDAPClient::diag(const int rc)
{
if (rc != LDAP_SUCCESS)
{
String text;
const char * raw_err_str = ldap_err2string(rc);
if (raw_err_str)
text = raw_err_str;
if (handle)
{
String message;
char * raw_message = nullptr;
ldap_get_option(handle, LDAP_OPT_DIAGNOSTIC_MESSAGE, &raw_message);
if (raw_message)
{
message = raw_message;
ldap_memfree(raw_message);
raw_message = nullptr;
}
if (!message.empty())
{
if (!text.empty())
text += ": ";
text += message;
}
}
throw Exception(text, ErrorCodes::LDAP_ERROR);
}
}
int LDAPClient::openConnection(const bool graceful_bind_failure)
{
closeConnection();
{
LDAPURLDesc url;
std::memset(&url, 0, sizeof(url));
url.lud_scheme = const_cast<char *>(params.enable_tls == LDAPServerParams::TLSEnable::YES ? "ldaps" : "ldap");
url.lud_host = const_cast<char *>(params.host.c_str());
url.lud_port = params.port;
url.lud_scope = LDAP_SCOPE_DEFAULT;
auto * uri = ldap_url_desc2str(&url);
if (!uri)
throw Exception("ldap_url_desc2str() failed", ErrorCodes::LDAP_ERROR);
SCOPE_EXIT({ ldap_memfree(uri); });
diag(ldap_initialize(&handle, uri));
if (!handle)
throw Exception("ldap_initialize() failed", ErrorCodes::LDAP_ERROR);
}
{
int value = 0;
switch (params.protocol_version)
{
case LDAPServerParams::ProtocolVersion::V2: value = LDAP_VERSION2; break;
case LDAPServerParams::ProtocolVersion::V3: value = LDAP_VERSION3; break;
}
diag(ldap_set_option(handle, LDAP_OPT_PROTOCOL_VERSION, &value));
}
diag(ldap_set_option(handle, LDAP_OPT_RESTART, LDAP_OPT_ON));
#ifdef LDAP_OPT_KEEPCONN
diag(ldap_set_option(handle, LDAP_OPT_KEEPCONN, LDAP_OPT_ON));
#endif
#ifdef LDAP_OPT_TIMEOUT
{
::timeval operation_timeout;
operation_timeout.tv_sec = params.operation_timeout.count();
operation_timeout.tv_usec = 0;
diag(ldap_set_option(handle, LDAP_OPT_TIMEOUT, &operation_timeout));
}
#endif
#ifdef LDAP_OPT_NETWORK_TIMEOUT
{
::timeval network_timeout;
network_timeout.tv_sec = params.network_timeout.count();
network_timeout.tv_usec = 0;
diag(ldap_set_option(handle, LDAP_OPT_NETWORK_TIMEOUT, &network_timeout));
}
#endif
{
const int search_timeout = params.search_timeout.count();
diag(ldap_set_option(handle, LDAP_OPT_TIMELIMIT, &search_timeout));
}
{
const int size_limit = params.search_limit;
diag(ldap_set_option(handle, LDAP_OPT_SIZELIMIT, &size_limit));
}
#ifdef LDAP_OPT_X_TLS_PROTOCOL_MIN
{
int value = 0;
switch (params.tls_minimum_protocol_version)
{
case LDAPServerParams::TLSProtocolVersion::SSL2: value = LDAP_OPT_X_TLS_PROTOCOL_SSL2; break;
case LDAPServerParams::TLSProtocolVersion::SSL3: value = LDAP_OPT_X_TLS_PROTOCOL_SSL3; break;
case LDAPServerParams::TLSProtocolVersion::TLS1_0: value = LDAP_OPT_X_TLS_PROTOCOL_TLS1_0; break;
case LDAPServerParams::TLSProtocolVersion::TLS1_1: value = LDAP_OPT_X_TLS_PROTOCOL_TLS1_1; break;
case LDAPServerParams::TLSProtocolVersion::TLS1_2: value = LDAP_OPT_X_TLS_PROTOCOL_TLS1_2; break;
}
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_PROTOCOL_MIN, &value));
}
#endif
#ifdef LDAP_OPT_X_TLS_REQUIRE_CERT
{
int value = 0;
switch (params.tls_require_cert)
{
case LDAPServerParams::TLSRequireCert::NEVER: value = LDAP_OPT_X_TLS_NEVER; break;
case LDAPServerParams::TLSRequireCert::ALLOW: value = LDAP_OPT_X_TLS_ALLOW; break;
case LDAPServerParams::TLSRequireCert::TRY: value = LDAP_OPT_X_TLS_TRY; break;
case LDAPServerParams::TLSRequireCert::DEMAND: value = LDAP_OPT_X_TLS_DEMAND; break;
}
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_REQUIRE_CERT, &value));
}
#endif
#ifdef LDAP_OPT_X_TLS_CERTFILE
if (!params.tls_cert_file.empty())
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_CERTFILE, params.tls_cert_file.c_str()));
#endif
#ifdef LDAP_OPT_X_TLS_KEYFILE
if (!params.tls_key_file.empty())
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_KEYFILE, params.tls_key_file.c_str()));
#endif
#ifdef LDAP_OPT_X_TLS_CACERTFILE
if (!params.tls_ca_cert_file.empty())
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_CACERTFILE, params.tls_ca_cert_file.c_str()));
#endif
#ifdef LDAP_OPT_X_TLS_CACERTDIR
if (!params.tls_ca_cert_dir.empty())
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_CACERTDIR, params.tls_ca_cert_dir.c_str()));
#endif
#ifdef LDAP_OPT_X_TLS_CIPHER_SUITE
if (!params.tls_cipher_suite.empty())
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_CIPHER_SUITE, params.tls_cipher_suite.c_str()));
#endif
#ifdef LDAP_OPT_X_TLS_NEWCTX
{
const int i_am_a_server = 0;
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_NEWCTX, &i_am_a_server));
}
#endif
if (params.enable_tls == LDAPServerParams::TLSEnable::YES_STARTTLS)
diag(ldap_start_tls_s(handle, nullptr, nullptr));
int rc = LDAP_OTHER;
switch (params.sasl_mechanism)
{
case LDAPServerParams::SASLMechanism::SIMPLE:
{
const String dn = params.auth_dn_prefix + escapeForLDAP(params.user) + params.auth_dn_suffix;
::berval cred;
cred.bv_val = const_cast<char *>(params.password.c_str());
cred.bv_len = params.password.size();
rc = ldap_sasl_bind_s(handle, dn.c_str(), LDAP_SASL_SIMPLE, &cred, nullptr, nullptr, nullptr);
if (!graceful_bind_failure)
diag(rc);
break;
}
}
return rc;
}
void LDAPClient::closeConnection() noexcept
{
if (!handle)
return;
ldap_unbind_ext_s(handle, nullptr, nullptr);
handle = nullptr;
}
bool LDAPSimpleAuthClient::check()
{
if (params.user.empty())
throw Exception("LDAP authentication of a user with an empty name is not allowed", ErrorCodes::BAD_ARGUMENTS);
if (params.password.empty())
return false; // Silently reject authentication attempt if the password is empty as if it didn't match.
SCOPE_EXIT({ closeConnection(); });
const bool graceful_bind_failure = true;
const auto rc = openConnection(graceful_bind_failure);
bool result = false;
switch (rc)
{
case LDAP_SUCCESS:
{
result = true;
break;
}
case LDAP_INVALID_CREDENTIALS:
{
result = false;
break;
}
default:
{
result = false;
diag(rc);
break;
}
}
return result;
}
#else // USE_LDAP
void LDAPClient::diag(const int)
{
throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
}
int LDAPClient::openConnection(const bool)
{
throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
}
void LDAPClient::closeConnection() noexcept
{
}
bool LDAPSimpleAuthClient::check()
{
throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
}
#endif // USE_LDAP
}

55
src/Access/LDAPClient.h Normal file
View File

@ -0,0 +1,55 @@
#pragma once
#if !defined(ARCADIA_BUILD)
# include "config_core.h"
#endif
#include <Access/LDAPParams.h>
#include <Core/Types.h>
#if USE_LDAP
# include <ldap.h>
# define MAYBE_NORETURN
#else
# define MAYBE_NORETURN [[noreturn]]
#endif
namespace DB
{
class LDAPClient
{
public:
explicit LDAPClient(const LDAPServerParams & params_);
~LDAPClient();
LDAPClient(const LDAPClient &) = delete;
LDAPClient(LDAPClient &&) = delete;
LDAPClient & operator= (const LDAPClient &) = delete;
LDAPClient & operator= (LDAPClient &&) = delete;
protected:
MAYBE_NORETURN void diag(const int rc);
MAYBE_NORETURN void openConnection();
int openConnection(const bool graceful_bind_failure = false);
void closeConnection() noexcept;
protected:
const LDAPServerParams params;
#if USE_LDAP
LDAP * handle = nullptr;
#endif
};
class LDAPSimpleAuthClient
: private LDAPClient
{
public:
using LDAPClient::LDAPClient;
bool check();
};
}
#undef MAYBE_NORETURN

76
src/Access/LDAPParams.h Normal file
View File

@ -0,0 +1,76 @@
#pragma once
#include <Core/Types.h>
#include <chrono>
namespace DB
{
struct LDAPServerParams
{
enum class ProtocolVersion
{
V2,
V3
};
enum class TLSEnable
{
NO,
YES_STARTTLS,
YES
};
enum class TLSProtocolVersion
{
SSL2,
SSL3,
TLS1_0,
TLS1_1,
TLS1_2
};
enum class TLSRequireCert
{
NEVER,
ALLOW,
TRY,
DEMAND
};
enum class SASLMechanism
{
SIMPLE
};
ProtocolVersion protocol_version = ProtocolVersion::V3;
String host;
std::uint16_t port = 636;
TLSEnable enable_tls = TLSEnable::YES;
TLSProtocolVersion tls_minimum_protocol_version = TLSProtocolVersion::TLS1_2;
TLSRequireCert tls_require_cert = TLSRequireCert::DEMAND;
String tls_cert_file;
String tls_key_file;
String tls_ca_cert_file;
String tls_ca_cert_dir;
String tls_cipher_suite;
SASLMechanism sasl_mechanism = SASLMechanism::SIMPLE;
String auth_dn_prefix;
String auth_dn_suffix;
String user;
String password;
std::chrono::seconds operation_timeout{40};
std::chrono::seconds network_timeout{30};
std::chrono::seconds search_timeout{20};
std::uint32_t search_limit = 100;
};
}

View File

@ -56,14 +56,15 @@ namespace
bool has_password_plaintext = config.has(user_config + ".password");
bool has_password_sha256_hex = config.has(user_config + ".password_sha256_hex");
bool has_password_double_sha1_hex = config.has(user_config + ".password_double_sha1_hex");
bool has_ldap = config.has(user_config + ".ldap");
size_t num_password_fields = has_no_password + has_password_plaintext + has_password_sha256_hex + has_password_double_sha1_hex;
size_t num_password_fields = has_no_password + has_password_plaintext + has_password_sha256_hex + has_password_double_sha1_hex + has_ldap;
if (num_password_fields > 1)
throw Exception("More than one field of 'password', 'password_sha256_hex', 'password_double_sha1_hex', 'no_password' are used to specify password for user " + user_name + ". Must be only one of them.",
throw Exception("More than one field of 'password', 'password_sha256_hex', 'password_double_sha1_hex', 'no_password', 'ldap' are used to specify password for user " + user_name + ". Must be only one of them.",
ErrorCodes::BAD_ARGUMENTS);
if (num_password_fields < 1)
throw Exception("Either 'password' or 'password_sha256_hex' or 'password_double_sha1_hex' or 'no_password' must be specified for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS);
throw Exception("Either 'password' or 'password_sha256_hex' or 'password_double_sha1_hex' or 'no_password' or 'ldap' must be specified for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS);
if (has_password_plaintext)
{
@ -80,6 +81,19 @@ namespace
user->authentication = Authentication{Authentication::DOUBLE_SHA1_PASSWORD};
user->authentication.setPasswordHashHex(config.getString(user_config + ".password_double_sha1_hex"));
}
else if (has_ldap)
{
bool has_ldap_server = config.has(user_config + ".ldap.server");
if (!has_ldap_server)
throw Exception("Missing mandatory 'server' in 'ldap', with LDAP server name, for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS);
const auto ldap_server_name = config.getString(user_config + ".ldap.server");
if (ldap_server_name.empty())
throw Exception("LDAP server name cannot be empty for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS);
user->authentication = Authentication{Authentication::LDAP_SERVER};
user->authentication.setServerName(ldap_server_name);
}
const auto profile_name_config = user_config + ".profile";
if (config.has(profile_name_config))

View File

@ -17,9 +17,11 @@ SRCS(
EnabledRolesInfo.cpp
EnabledRowPolicies.cpp
EnabledSettings.cpp
ExternalAuthenticators.cpp
GrantedRoles.cpp
IAccessEntity.cpp
IAccessStorage.cpp
LDAPClient.cpp
MemoryAccessStorage.cpp
MultipleAccessStorage.cpp
Quota.cpp

View File

@ -332,7 +332,7 @@ if (OPENSSL_CRYPTO_LIBRARY)
endif ()
if (USE_LDAP)
dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${OPENLDAP_INCLUDE_DIR})
dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${OPENLDAP_INCLUDE_DIRS})
dbms_target_link_libraries (PRIVATE ${OPENLDAP_LIBRARIES})
endif ()
dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${SPARSEHASH_INCLUDE_DIR})
@ -370,7 +370,9 @@ endif()
target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${DOUBLE_CONVERSION_INCLUDE_DIR})
target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${MSGPACK_INCLUDE_DIR})
if (USE_MSGPACK)
target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${MSGPACK_INCLUDE_DIR})
endif()
if (USE_ORC)
dbms_target_link_libraries(PUBLIC ${ORC_LIBRARIES})

View File

@ -498,6 +498,7 @@ namespace ErrorCodes
extern const int NOT_A_LEADER = 529;
extern const int CANNOT_CONNECT_RABBITMQ = 530;
extern const int CANNOT_FSTAT = 531;
extern const int LDAP_ERROR = 532;
extern const int KEEPER_EXCEPTION = 999;
extern const int POCO_EXCEPTION = 1000;

View File

@ -12,6 +12,12 @@
namespace DB
{
namespace ErrorCodes
{
extern const int UNEXPECTED_END_OF_FILE;
}
FileChecker::FileChecker(DiskPtr disk_, const String & file_info_path_) : disk(std::move(disk_))
{
setPath(file_info_path_);
@ -24,19 +30,15 @@ void FileChecker::setPath(const String & file_info_path_)
tmp_files_info_path = parentPath(files_info_path) + "tmp_" + fileName(files_info_path);
}
void FileChecker::update(const String & file_path)
void FileChecker::update(const String & full_file_path)
{
initialize();
updateImpl(file_path);
save();
map[fileName(full_file_path)] = disk->getFileSize(full_file_path);
}
void FileChecker::update(const Strings::const_iterator & begin, const Strings::const_iterator & end)
void FileChecker::setEmpty(const String & full_file_path)
{
initialize();
for (auto it = begin; it != end; ++it)
updateImpl(*it);
save();
map[fileName(full_file_path)] = 0;
}
CheckResults FileChecker::check() const
@ -73,6 +75,28 @@ CheckResults FileChecker::check() const
return results;
}
void FileChecker::repair()
{
for (const auto & name_size : map)
{
const String & name = name_size.first;
size_t expected_size = name_size.second;
String path = parentPath(files_info_path) + name;
bool exists = disk->exists(path);
auto real_size = exists ? disk->getFileSize(path) : 0; /// No race condition assuming no one else is working with these files.
if (real_size < expected_size)
throw Exception(ErrorCodes::UNEXPECTED_END_OF_FILE, "Size of {} is less than expected. Size is {} but should be {}.",
path, real_size, expected_size);
if (real_size > expected_size)
{
LOG_WARNING(&Poco::Logger::get("FileChecker"), "Will truncate file {} that has size {} to size {}", path, real_size, expected_size);
disk->truncateFile(path, expected_size);
}
}
}
void FileChecker::initialize()
{
if (initialized)
@ -82,11 +106,6 @@ void FileChecker::initialize()
initialized = true;
}
void FileChecker::updateImpl(const String & file_path)
{
map[fileName(file_path)] = disk->getFileSize(file_path);
}
void FileChecker::save() const
{
{

View File

@ -14,19 +14,25 @@ class FileChecker
public:
FileChecker(DiskPtr disk_, const String & file_info_path_);
void setPath(const String & file_info_path_);
void update(const String & file_path);
void update(const Strings::const_iterator & begin, const Strings::const_iterator & end);
void update(const String & full_file_path);
void setEmpty(const String & full_file_path);
void save() const;
/// Check the files whose parameters are specified in sizes.json
CheckResults check() const;
/// Truncate files that have excessive size to the expected size.
/// Throw exception if the file size is less than expected.
/// The purpose of this function is to rollback a group of unfinished writes.
void repair();
private:
/// File name -> size.
using Map = std::map<String, UInt64>;
void initialize();
void updateImpl(const String & file_path);
void save() const;
void load(Map & local_map, const String & path) const;
DiskPtr disk;

View File

@ -10,3 +10,4 @@
#cmakedefine01 USE_INTERNAL_LLVM_LIBRARY
#cmakedefine01 USE_SSL
#cmakedefine01 USE_OPENCL
#cmakedefine01 USE_LDAP

View File

@ -19,6 +19,7 @@ namespace ErrorCodes
extern const int EXCESSIVE_ELEMENT_IN_CONFIG;
extern const int PATH_ACCESS_DENIED;
extern const int INCORRECT_DISK_INDEX;
extern const int CANNOT_TRUNCATE_FILE;
}
std::mutex DiskLocal::reservation_mutex;
@ -261,6 +262,13 @@ void DiskLocal::createHardLink(const String & src_path, const String & dst_path)
DB::createHardLink(disk_path + src_path, disk_path + dst_path);
}
void DiskLocal::truncateFile(const String & path, size_t size)
{
int res = truncate((disk_path + path).c_str(), size);
if (-1 == res)
throwFromErrnoWithPath("Cannot truncate file " + path, path, ErrorCodes::CANNOT_TRUNCATE_FILE);
}
void DiskLocal::createFile(const String & path)
{
Poco::File(disk_path + path).createFile();

View File

@ -99,6 +99,8 @@ public:
void createHardLink(const String & src_path, const String & dst_path) override;
void truncateFile(const String & path, size_t size) override;
const String getType() const override { return "local"; }
private:

View File

@ -408,6 +408,17 @@ void DiskMemory::setReadOnly(const String &)
throw Exception("Method setReadOnly is not implemented for memory disks", ErrorCodes::NOT_IMPLEMENTED);
}
void DiskMemory::truncateFile(const String & path, size_t size)
{
std::lock_guard lock(mutex);
auto file_it = files.find(path);
if (file_it == files.end())
throw Exception("File '" + path + "' doesn't exist", ErrorCodes::FILE_DOESNT_EXIST);
file_it->second.data.resize(size);
}
using DiskMemoryPtr = std::shared_ptr<DiskMemory>;

View File

@ -90,6 +90,8 @@ public:
void createHardLink(const String & src_path, const String & dst_path) override;
void truncateFile(const String & path, size_t size) override;
const String getType() const override { return "memory"; }
private:

View File

@ -8,6 +8,11 @@
namespace DB
{
namespace ErrorCodes
{
extern const int NOT_IMPLEMENTED;
}
bool IDisk::isDirectoryEmpty(const String & path)
{
return !iterateDirectory(path)->isValid();
@ -42,4 +47,9 @@ void IDisk::copy(const String & from_path, const std::shared_ptr<IDisk> & to_dis
}
}
void IDisk::truncateFile(const String &, size_t)
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Truncate operation is not implemented for disk of type {}", getType());
}
}

View File

@ -172,6 +172,9 @@ public:
/// Create hardlink from `src_path` to `dst_path`.
virtual void createHardLink(const String & src_path, const String & dst_path) = 0;
/// Truncate file to specified size.
virtual void truncateFile(const String & path, size_t size);
/// Return disk type - "local", "s3", etc.
virtual const String getType() const = 0;
};

View File

@ -9,3 +9,5 @@
#cmakedefine01 USE_ORC
#cmakedefine01 USE_ARROW
#cmakedefine01 USE_PROTOBUF
#cmakedefine01 USE_MSGPACK

View File

@ -368,7 +368,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data &
}
SetPtr prepared_set;
if (functionIsInOrGlobalInOperator(node.name))
if (checkFunctionIsInOrGlobalInOperator(node))
{
/// Let's find the type of the first argument (then getActionsImpl will be called again and will not affect anything).
visit(node.arguments->children.at(0), data);
@ -445,7 +445,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data &
/// Select the name in the next cycle.
argument_names.emplace_back();
}
else if (functionIsInOrGlobalInOperator(node.name) && arg == 1 && prepared_set)
else if (checkFunctionIsInOrGlobalInOperator(node) && arg == 1 && prepared_set)
{
ColumnWithTypeAndName column;
column.type = std::make_shared<DataTypeSet>();

View File

@ -621,6 +621,7 @@ void Context::setConfig(const ConfigurationPtr & config)
{
auto lock = getLock();
shared->config = config;
shared->access_control_manager.setExternalAuthenticatorsConfig(*shared->config);
}
const Poco::Util::AbstractConfiguration & Context::getConfigRef() const
@ -640,6 +641,11 @@ const AccessControlManager & Context::getAccessControlManager() const
return shared->access_control_manager;
}
void Context::setExternalAuthenticatorsConfig(const Poco::Util::AbstractConfiguration & config)
{
auto lock = getLock();
shared->access_control_manager.setExternalAuthenticatorsConfig(config);
}
void Context::setUsersConfig(const ConfigurationPtr & config)
{

View File

@ -245,6 +245,9 @@ public:
AccessControlManager & getAccessControlManager();
const AccessControlManager & getAccessControlManager() const;
/// Sets external authenticators config (LDAP).
void setExternalAuthenticatorsConfig(const Poco::Util::AbstractConfiguration & config);
/** Take the list of users, quotas and configuration profiles from this config.
* The list of users is completely replaced.
* The accumulated quota values are not reset if the quota is not deleted.

View File

@ -180,7 +180,7 @@ void ExecuteScalarSubqueriesMatcher::visit(const ASTFunction & func, ASTPtr & as
/// But if an argument is not subquery, than deeper may be scalar subqueries and we need to descend in them.
std::vector<ASTPtr *> out;
if (functionIsInOrGlobalInOperator(func.name))
if (checkFunctionIsInOrGlobalInOperator(func))
{
for (auto & child : ast->children)
{

View File

@ -39,6 +39,7 @@ namespace ErrorCodes
extern const int INCOMPATIBLE_TYPE_OF_JOIN;
extern const int UNSUPPORTED_JOIN_KEYS;
extern const int LOGICAL_ERROR;
extern const int SYNTAX_ERROR;
extern const int SET_SIZE_LIMIT_EXCEEDED;
extern const int TYPE_MISMATCH;
}
@ -174,7 +175,7 @@ HashJoin::HashJoin(std::shared_ptr<TableJoin> table_join_, const Block & right_s
key_columns.pop_back();
if (key_columns.empty())
throw Exception("ASOF join cannot be done without a joining column", ErrorCodes::LOGICAL_ERROR);
throw Exception("ASOF join cannot be done without a joining column", ErrorCodes::SYNTAX_ERROR);
/// this is going to set up the appropriate hash table for the direct lookup part of the join
/// However, this does not depend on the size of the asof join key (as that goes into the BST)

View File

@ -35,7 +35,7 @@ void MarkTableIdentifiersMatcher::visit(ASTTableExpression & table, ASTPtr &, Da
void MarkTableIdentifiersMatcher::visit(const ASTFunction & func, ASTPtr &, Data & data)
{
/// `IN t` can be specified, where t is a table, which is equivalent to `IN (SELECT * FROM t)`.
if (functionIsInOrGlobalInOperator(func.name))
if (checkFunctionIsInOrGlobalInOperator(func))
{
auto & ast = func.arguments->children.at(1);
auto opt_name = tryGetIdentifierName(ast);

View File

@ -233,46 +233,56 @@ void SystemLog<LogElement>::add(const LogElement & element)
/// Otherwise the tests like 01017_uniqCombined_memory_usage.sql will be flacky.
auto temporarily_disable_memory_tracker = getCurrentMemoryTrackerActionLock();
std::lock_guard lock(mutex);
/// Should not log messages under mutex.
bool queue_is_half_full = false;
if (is_shutdown)
return;
if (queue.size() == DBMS_SYSTEM_LOG_QUEUE_SIZE / 2)
{
// The queue more than half full, time to flush.
// We only check for strict equality, because messages are added one
// by one, under exclusive lock, so we will see each message count.
// It is enough to only wake the flushing thread once, after the message
// count increases past half available size.
const uint64_t queue_end = queue_front_index + queue.size();
if (requested_flush_before < queue_end)
requested_flush_before = queue_end;
std::unique_lock lock(mutex);
flush_event.notify_all();
LOG_INFO(log, "Queue is half full for system log '{}'.", demangle(typeid(*this).name()));
}
if (is_shutdown)
return;
if (queue.size() >= DBMS_SYSTEM_LOG_QUEUE_SIZE)
{
// Ignore all further entries until the queue is flushed.
// Log a message about that. Don't spam it -- this might be especially
// problematic in case of trace log. Remember what the front index of the
// queue was when we last logged the message. If it changed, it means the
// queue was flushed, and we can log again.
if (queue_front_index != logged_queue_full_at_index)
if (queue.size() == DBMS_SYSTEM_LOG_QUEUE_SIZE / 2)
{
logged_queue_full_at_index = queue_front_index;
queue_is_half_full = true;
// TextLog sets its logger level to 0, so this log is a noop and
// there is no recursive logging.
LOG_ERROR(log, "Queue is full for system log '{}' at {}", demangle(typeid(*this).name()), queue_front_index);
// The queue more than half full, time to flush.
// We only check for strict equality, because messages are added one
// by one, under exclusive lock, so we will see each message count.
// It is enough to only wake the flushing thread once, after the message
// count increases past half available size.
const uint64_t queue_end = queue_front_index + queue.size();
if (requested_flush_before < queue_end)
requested_flush_before = queue_end;
flush_event.notify_all();
}
return;
if (queue.size() >= DBMS_SYSTEM_LOG_QUEUE_SIZE)
{
// Ignore all further entries until the queue is flushed.
// Log a message about that. Don't spam it -- this might be especially
// problematic in case of trace log. Remember what the front index of the
// queue was when we last logged the message. If it changed, it means the
// queue was flushed, and we can log again.
if (queue_front_index != logged_queue_full_at_index)
{
logged_queue_full_at_index = queue_front_index;
// TextLog sets its logger level to 0, so this log is a noop and
// there is no recursive logging.
lock.unlock();
LOG_ERROR(log, "Queue is full for system log '{}' at {}", demangle(typeid(*this).name()), queue_front_index);
}
return;
}
queue.push_back(element);
}
queue.push_back(element);
if (queue_is_half_full)
LOG_INFO(log, "Queue is half full for system log '{}'.", demangle(typeid(*this).name()));
}

View File

@ -1,10 +1,16 @@
#pragma once
#include <Common/StringUtils/StringUtils.h>
#include <Parsers/ASTFunction.h>
namespace DB
{
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
}
inline bool functionIsInOperator(const std::string & name)
{
return name == "in" || name == "notIn" || name == "nullIn" || name == "notNullIn";
@ -30,4 +36,19 @@ inline bool functionIsDictGet(const std::string & name)
return startsWith(name, "dictGet") || (name == "dictHas") || (name == "dictIsIn");
}
inline bool checkFunctionIsInOrGlobalInOperator(const ASTFunction & func)
{
if (functionIsInOrGlobalInOperator(func.name))
{
size_t num_arguments = func.arguments->children.size();
if (num_arguments != 2)
throw Exception("Wrong number of arguments passed to function in. Expected: 2, passed: " + std::to_string(num_arguments),
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
return true;
}
return false;
}
}

View File

@ -33,27 +33,32 @@ namespace
}
String authentication_type_name = Authentication::TypeInfo::get(authentication_type).name;
std::optional<String> password;
std::optional<String> by_value;
if (show_password)
if (show_password || authentication_type == Authentication::LDAP_SERVER)
{
switch (authentication_type)
{
case Authentication::PLAINTEXT_PASSWORD:
{
password = authentication.getPassword();
by_value = authentication.getPassword();
break;
}
case Authentication::SHA256_PASSWORD:
{
authentication_type_name = "sha256_hash";
password = authentication.getPasswordHashHex();
by_value = authentication.getPasswordHashHex();
break;
}
case Authentication::DOUBLE_SHA1_PASSWORD:
{
authentication_type_name = "double_sha1_hash";
password = authentication.getPasswordHashHex();
by_value = authentication.getPasswordHashHex();
break;
}
case Authentication::LDAP_SERVER:
{
by_value = authentication.getServerName();
break;
}
@ -65,9 +70,9 @@ namespace
settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " IDENTIFIED WITH " << authentication_type_name
<< (settings.hilite ? IAST::hilite_none : "");
if (password)
if (by_value)
settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " BY " << (settings.hilite ? IAST::hilite_none : "")
<< quoteString(*password);
<< quoteString(*by_value);
}

View File

@ -13,14 +13,14 @@ class ASTRolesOrUsersSet;
class ASTSettingsProfileElements;
/** CREATE USER [IF NOT EXISTS | OR REPLACE] name
* [NOT IDENTIFIED | IDENTIFIED [WITH {no_password|plaintext_password|sha256_password|sha256_hash|double_sha1_password|double_sha1_hash}] BY {'password'|'hash'}]
* [NOT IDENTIFIED | IDENTIFIED [WITH {no_password|plaintext_password|sha256_password|sha256_hash|double_sha1_password|double_sha1_hash|ldap_server}] BY {'password'|'hash'|'server_name'}]
* [HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
* [DEFAULT ROLE role [,...]]
* [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]
*
* ALTER USER [IF EXISTS] name
* [RENAME TO new_name]
* [NOT IDENTIFIED | IDENTIFIED [WITH {no_password|plaintext_password|sha256_password|sha256_hash|double_sha1_password|double_sha1_hash}] BY {'password'|'hash'}]
* [NOT IDENTIFIED | IDENTIFIED [WITH {no_password|plaintext_password|sha256_password|sha256_hash|double_sha1_password|double_sha1_hash|ldap_server}] BY {'password'|'hash'|'server_name'}]
* [[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
* [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ]
* [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]

View File

@ -49,6 +49,7 @@ namespace
std::optional<Authentication::Type> type;
bool expect_password = false;
bool expect_hash = false;
bool expect_server_name = false;
if (ParserKeyword{"WITH"}.ignore(pos, expected))
{
@ -57,7 +58,12 @@ namespace
if (ParserKeyword{Authentication::TypeInfo::get(check_type).raw_name}.ignore(pos, expected))
{
type = check_type;
expect_password = (check_type != Authentication::NO_PASSWORD);
if (check_type == Authentication::LDAP_SERVER)
expect_server_name = true;
else if (check_type != Authentication::NO_PASSWORD)
expect_password = true;
break;
}
}
@ -85,21 +91,23 @@ namespace
expect_password = true;
}
String password;
if (expect_password || expect_hash)
String value;
if (expect_password || expect_hash || expect_server_name)
{
ASTPtr ast;
if (!ParserKeyword{"BY"}.ignore(pos, expected) || !ParserStringLiteral{}.parse(pos, ast, expected))
return false;
password = ast->as<const ASTLiteral &>().value.safeGet<String>();
value = ast->as<const ASTLiteral &>().value.safeGet<String>();
}
authentication = Authentication{*type};
if (expect_password)
authentication.setPassword(password);
authentication.setPassword(value);
else if (expect_hash)
authentication.setPasswordHashHex(password);
authentication.setPasswordHashHex(value);
else if (expect_server_name)
authentication.setServerName(value);
return true;
});

View File

@ -7,13 +7,13 @@ namespace DB
{
/** Parses queries like
* CREATE USER [IF NOT EXISTS | OR REPLACE] name
* [NOT IDENTIFIED | IDENTIFIED [WITH {no_password|plaintext_password|sha256_password|sha256_hash|double_sha1_password|double_sha1_hash}] BY {'password'|'hash'}]
* [NOT IDENTIFIED | IDENTIFIED [WITH {no_password|plaintext_password|sha256_password|sha256_hash|double_sha1_password|double_sha1_hash|ldap_server}] BY {'password'|'hash'|'server_name'}]
* [HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
* [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]
*
* ALTER USER [IF EXISTS] name
* [RENAME TO new_name]
* [NOT IDENTIFIED | IDENTIFIED [WITH {no_password|plaintext_password|sha256_password|sha256_hash|double_sha1_password|double_sha1_hash}] BY {'password'|'hash'}]
* [NOT IDENTIFIED | IDENTIFIED [WITH {no_password|plaintext_password|sha256_password|sha256_hash|double_sha1_password|double_sha1_hash|ldap_server}] BY {'password'|'hash'|'server_name'}]
* [[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
* [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]
*/

View File

@ -103,6 +103,20 @@ bool ParserArrayJoin::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
}
void ParserTablesInSelectQueryElement::parseJoinStrictness(Pos & pos, ASTTableJoin & table_join)
{
if (ParserKeyword("ANY").ignore(pos))
table_join.strictness = ASTTableJoin::Strictness::Any;
else if (ParserKeyword("ALL").ignore(pos))
table_join.strictness = ASTTableJoin::Strictness::All;
else if (ParserKeyword("ASOF").ignore(pos))
table_join.strictness = ASTTableJoin::Strictness::Asof;
else if (ParserKeyword("SEMI").ignore(pos))
table_join.strictness = ASTTableJoin::Strictness::Semi;
else if (ParserKeyword("ANTI").ignore(pos) || ParserKeyword("ONLY").ignore(pos))
table_join.strictness = ASTTableJoin::Strictness::Anti;
}
bool ParserTablesInSelectQueryElement::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
{
auto res = std::make_shared<ASTTablesInSelectQueryElement>();
@ -131,19 +145,12 @@ bool ParserTablesInSelectQueryElement::parseImpl(Pos & pos, ASTPtr & node, Expec
else if (ParserKeyword("LOCAL").ignore(pos))
table_join->locality = ASTTableJoin::Locality::Local;
if (ParserKeyword("ANY").ignore(pos))
table_join->strictness = ASTTableJoin::Strictness::Any;
else if (ParserKeyword("ALL").ignore(pos))
table_join->strictness = ASTTableJoin::Strictness::All;
else if (ParserKeyword("ASOF").ignore(pos))
table_join->strictness = ASTTableJoin::Strictness::Asof;
else if (ParserKeyword("SEMI").ignore(pos))
table_join->strictness = ASTTableJoin::Strictness::Semi;
else if (ParserKeyword("ANTI").ignore(pos) || ParserKeyword("ONLY").ignore(pos))
table_join->strictness = ASTTableJoin::Strictness::Anti;
else
table_join->strictness = ASTTableJoin::Strictness::Unspecified;
table_join->strictness = ASTTableJoin::Strictness::Unspecified;
/// Legacy: allow JOIN type before JOIN kind
parseJoinStrictness(pos, *table_join);
bool no_kind = false;
if (ParserKeyword("INNER").ignore(pos))
table_join->kind = ASTTableJoin::Kind::Inner;
else if (ParserKeyword("LEFT").ignore(pos))
@ -155,6 +162,20 @@ bool ParserTablesInSelectQueryElement::parseImpl(Pos & pos, ASTPtr & node, Expec
else if (ParserKeyword("CROSS").ignore(pos))
table_join->kind = ASTTableJoin::Kind::Cross;
else
no_kind = true;
/// Standard position: JOIN type after JOIN kind
parseJoinStrictness(pos, *table_join);
/// Optional OUTER keyword for outer joins.
if (table_join->kind == ASTTableJoin::Kind::Left
|| table_join->kind == ASTTableJoin::Kind::Right
|| table_join->kind == ASTTableJoin::Kind::Full)
{
ParserKeyword("OUTER").ignore(pos);
}
if (no_kind)
{
/// Use INNER by default as in another DBMS.
if (table_join->strictness == ASTTableJoin::Strictness::Semi ||
@ -172,14 +193,6 @@ bool ParserTablesInSelectQueryElement::parseImpl(Pos & pos, ASTPtr & node, Expec
(table_join->kind != ASTTableJoin::Kind::Left && table_join->kind != ASTTableJoin::Kind::Right))
throw Exception("SEMI|ANTI JOIN should be LEFT or RIGHT.", ErrorCodes::SYNTAX_ERROR);
/// Optional OUTER keyword for outer joins.
if (table_join->kind == ASTTableJoin::Kind::Left
|| table_join->kind == ASTTableJoin::Kind::Right
|| table_join->kind == ASTTableJoin::Kind::Full)
{
ParserKeyword("OUTER").ignore(pos);
}
if (!ParserKeyword("JOIN").ignore(pos, expected))
return false;
}

View File

@ -6,6 +6,8 @@
namespace DB
{
struct ASTTableJoin;
/** List of single or multiple JOIN-ed tables or subqueries in SELECT query, with ARRAY JOINs and SAMPLE, FINAL modifiers.
*/
class ParserTablesInSelectQuery : public IParserBase
@ -27,6 +29,8 @@ protected:
private:
bool is_first;
static void parseJoinStrictness(Pos & pos, ASTTableJoin & table_join);
};

View File

@ -1,5 +1,8 @@
#include <cstdlib>
#include <Processors/Formats/Impl/MsgPackRowInputFormat.h>
#if USE_MSGPACK
#include <cstdlib>
#include <Common/assert_cast.h>
#include <IO/ReadHelpers.h>
@ -209,3 +212,15 @@ void registerInputFormatProcessorMsgPack(FormatFactory & factory)
}
}
#else
namespace DB
{
class FormatFactory;
void registerInputFormatProcessorMsgPack(FormatFactory &)
{
}
}
#endif

View File

@ -1,5 +1,13 @@
#pragma once
#if !defined(ARCADIA_BUILD)
# include "config_formats.h"
# include "config_core.h"
#endif
#if USE_MSGPACK
#include <Processors/Formats/IRowInputFormat.h>
#include <Formats/FormatFactory.h>
#include <IO/PeekableReadBuffer.h>
@ -63,3 +71,5 @@ private:
};
}
#endif

View File

@ -1,4 +1,7 @@
#include <Processors/Formats/Impl/MsgPackRowOutputFormat.h>
#if USE_MSGPACK
#include <Formats/FormatFactory.h>
#include <Common/assert_cast.h>
@ -144,8 +147,10 @@ void MsgPackRowOutputFormat::write(const Columns & columns, size_t row_num)
}
}
void registerOutputFormatProcessorMsgPack(FormatFactory & factory)
{
factory.registerOutputFormatProcessor("MsgPack", [](
WriteBuffer & buf,
const Block & sample,
@ -157,3 +162,15 @@ void registerOutputFormatProcessorMsgPack(FormatFactory & factory)
}
}
#else
namespace DB
{
class FormatFactory;
void registerOutputFormatProcessorMsgPack(FormatFactory &)
{
}
}
#endif

View File

@ -1,5 +1,12 @@
#pragma once
#if !defined(ARCADIA_BUILD)
# include "config_formats.h"
# include "config_core.h"
#endif
#if USE_MSGPACK
#include <Core/Block.h>
#include <IO/WriteBuffer.h>
#include <Processors/Formats/IRowOutputFormat.h>
@ -26,3 +33,5 @@ private:
};
}
#endif

View File

@ -230,7 +230,7 @@ void MySQLHandler::authenticate(const String & user_name, const String & auth_pl
// For compatibility with JavaScript MySQL client, Native41 authentication plugin is used when possible (if password is specified using double SHA1). Otherwise SHA256 plugin is used.
auto user = connection_context.getAccessControlManager().read<User>(user_name);
const DB::Authentication::Type user_auth_type = user->authentication.getType();
if (user_auth_type != DB::Authentication::DOUBLE_SHA1_PASSWORD && user_auth_type != DB::Authentication::PLAINTEXT_PASSWORD && user_auth_type != DB::Authentication::NO_PASSWORD)
if (user_auth_type == DB::Authentication::SHA256_PASSWORD)
{
authPluginSSL();
}

View File

@ -328,8 +328,14 @@ bool StorageLiveView::getNewBlocks()
BlocksPtr new_blocks = std::make_shared<Blocks>();
BlocksMetadataPtr new_blocks_metadata = std::make_shared<BlocksMetadata>();
mergeable_blocks = collectMergeableBlocks(*live_view_context);
Pipes from = blocksToPipes(mergeable_blocks->blocks, mergeable_blocks->sample_block);
/// can't set mergeable_blocks here or anywhere else outside the writeIntoLiveView function
/// as there could be a race codition when the new block has been inserted into
/// the source table by the PushingToViewsBlockOutputStream and this method
/// called before writeIntoLiveView function is called which can lead to
/// the same block added twice to the mergeable_blocks leading to
/// inserted data to be duplicated
auto new_mergeable_blocks = collectMergeableBlocks(*live_view_context);
Pipes from = blocksToPipes(new_mergeable_blocks->blocks, new_mergeable_blocks->sample_block);
BlockInputStreamPtr data = completeQuery(std::move(from));
while (Block block = data->read())

View File

@ -1125,6 +1125,83 @@ std::optional<Range> KeyCondition::applyMonotonicFunctionsChainToRange(
return key_range;
}
// Returns whether the condition is one continuous range of the primary key,
// where every field is matched by range or a single element set.
// This allows to use a more efficient lookup with no extra reads.
bool KeyCondition::matchesExactContinuousRange() const
{
// Not implemented yet.
if (hasMonotonicFunctionsChain())
return false;
enum Constraint
{
POINT,
RANGE,
UNKNOWN,
};
std::vector<Constraint> column_constraints(key_columns.size(), Constraint::UNKNOWN);
for (const auto & element : rpn)
{
if (element.function == RPNElement::Function::FUNCTION_AND)
{
continue;
}
if (element.function == RPNElement::Function::FUNCTION_IN_SET && element.set_index && element.set_index->size() == 1)
{
column_constraints[element.key_column] = Constraint::POINT;
continue;
}
if (element.function == RPNElement::Function::FUNCTION_IN_RANGE)
{
if (element.range.left == element.range.right)
{
column_constraints[element.key_column] = Constraint::POINT;
}
if (column_constraints[element.key_column] != Constraint::POINT)
{
column_constraints[element.key_column] = Constraint::RANGE;
}
continue;
}
if (element.function == RPNElement::Function::FUNCTION_UNKNOWN)
{
continue;
}
return false;
}
auto min_constraint = column_constraints[0];
if (min_constraint > Constraint::RANGE)
{
return false;
}
for (size_t i = 1; i < key_columns.size(); ++i)
{
if (column_constraints[i] < min_constraint)
{
return false;
}
if (column_constraints[i] == Constraint::RANGE && min_constraint == Constraint::RANGE)
{
return false;
}
min_constraint = column_constraints[i];
}
return true;
}
BoolMask KeyCondition::checkInHyperrectangle(
const std::vector<Range> & hyperrectangle,
const DataTypes & data_types) const

View File

@ -309,6 +309,8 @@ public:
MonotonicFunctionsChain & functions,
DataTypePtr current_type);
bool matchesExactContinuousRange() const;
private:
/// The expression is stored as Reverse Polish Notation.
struct RPNElement

View File

@ -1292,7 +1292,7 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange(
const MergeTreeData::DataPartPtr & part,
const StorageMetadataPtr & metadata_snapshot,
const KeyCondition & key_condition,
const Settings & settings)
const Settings & settings) const
{
MarkRanges res;
@ -1306,14 +1306,73 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange(
/// If index is not used.
if (key_condition.alwaysUnknownOrTrue())
{
LOG_TRACE(log, "Not using index on part {}", part->name);
if (has_final_mark)
res.push_back(MarkRange(0, marks_count - 1));
else
res.push_back(MarkRange(0, marks_count));
return res;
}
size_t used_key_size = key_condition.getMaxKeyColumn() + 1;
std::function<void(size_t, size_t, FieldRef &)> create_field_ref;
/// If there are no monotonic functions, there is no need to save block reference.
/// Passing explicit field to FieldRef allows to optimize ranges and shows better performance.
const auto & primary_key = metadata_snapshot->getPrimaryKey();
if (key_condition.hasMonotonicFunctionsChain())
{
auto index_block = std::make_shared<Block>();
for (size_t i = 0; i < used_key_size; ++i)
index_block->insert({index[i], primary_key.data_types[i], primary_key.column_names[i]});
create_field_ref = [index_block](size_t row, size_t column, FieldRef & field)
{
field = {index_block.get(), row, column};
};
}
else
{
size_t used_key_size = key_condition.getMaxKeyColumn() + 1;
create_field_ref = [&index](size_t row, size_t column, FieldRef & field)
{
index[column]->get(row, field);
};
}
/// NOTE Creating temporary Field objects to pass to KeyCondition.
std::vector<FieldRef> index_left(used_key_size);
std::vector<FieldRef> index_right(used_key_size);
auto may_be_true_in_range = [&](MarkRange & range)
{
if (range.end == marks_count && !has_final_mark)
{
for (size_t i = 0; i < used_key_size; ++i)
create_field_ref(range.begin, i, index_left[i]);
return key_condition.mayBeTrueAfter(
used_key_size, index_left.data(), primary_key.data_types);
}
if (has_final_mark && range.end == marks_count)
range.end -= 1; /// Remove final empty mark. It's useful only for primary key condition.
for (size_t i = 0; i < used_key_size; ++i)
{
create_field_ref(range.begin, i, index_left[i]);
create_field_ref(range.end, i, index_right[i]);
}
return key_condition.mayBeTrueInRange(
used_key_size, index_left.data(), index_right.data(), primary_key.data_types);
};
if (!key_condition.matchesExactContinuousRange())
{
// Do exclusion search, where we drop ranges that do not match
size_t min_marks_for_seek = roundRowsOrBytesToMarks(
settings.merge_tree_min_rows_for_seek,
settings.merge_tree_min_bytes_for_seek,
@ -1321,69 +1380,22 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange(
part->index_granularity_info.index_granularity_bytes);
/** There will always be disjoint suspicious segments on the stack, the leftmost one at the top (back).
* At each step, take the left segment and check if it fits.
* If fits, split it into smaller ones and put them on the stack. If not, discard it.
* If the segment is already of one mark length, add it to response and discard it.
*/
* At each step, take the left segment and check if it fits.
* If fits, split it into smaller ones and put them on the stack. If not, discard it.
* If the segment is already of one mark length, add it to response and discard it.
*/
std::vector<MarkRange> ranges_stack = { {0, marks_count} };
std::function<void(size_t, size_t, FieldRef &)> create_field_ref;
/// If there are no monotonic functions, there is no need to save block reference.
/// Passing explicit field to FieldRef allows to optimize ranges and shows better performance.
const auto & primary_key = metadata_snapshot->getPrimaryKey();
if (key_condition.hasMonotonicFunctionsChain())
{
auto index_block = std::make_shared<Block>();
for (size_t i = 0; i < used_key_size; ++i)
index_block->insert({index[i], primary_key.data_types[i], primary_key.column_names[i]});
create_field_ref = [index_block](size_t row, size_t column, FieldRef & field)
{
field = {index_block.get(), row, column};
};
}
else
{
create_field_ref = [&index](size_t row, size_t column, FieldRef & field)
{
index[column]->get(row, field);
};
}
/// NOTE Creating temporary Field objects to pass to KeyCondition.
std::vector<FieldRef> index_left(used_key_size);
std::vector<FieldRef> index_right(used_key_size);
size_t steps = 0;
while (!ranges_stack.empty())
{
MarkRange range = ranges_stack.back();
ranges_stack.pop_back();
bool may_be_true;
if (range.end == marks_count && !has_final_mark)
{
for (size_t i = 0; i < used_key_size; ++i)
create_field_ref(range.begin, i, index_left[i]);
steps++;
may_be_true = key_condition.mayBeTrueAfter(
used_key_size, index_left.data(), primary_key.data_types);
}
else
{
if (has_final_mark && range.end == marks_count)
range.end -= 1; /// Remove final empty mark. It's useful only for primary key condition.
for (size_t i = 0; i < used_key_size; ++i)
{
create_field_ref(range.begin, i, index_left[i]);
create_field_ref(range.end, i, index_right[i]);
}
may_be_true = key_condition.mayBeTrueInRange(
used_key_size, index_left.data(), index_right.data(), primary_key.data_types);
}
if (!may_be_true)
if (!may_be_true_in_range(range))
continue;
if (range.end == range.begin + 1)
@ -1406,6 +1418,76 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange(
ranges_stack.emplace_back(range.begin, end);
}
}
LOG_TRACE(log, "Used generic exclusion search over index for part {} with {} steps", part->name, steps);
}
else
{
// Do inclusion search, where we only look for one range
size_t steps = 0;
auto find_leaf = [&](bool left) -> std::optional<size_t>
{
std::vector<MarkRange> stack = {};
MarkRange range = {0, marks_count};
steps++;
if (may_be_true_in_range(range))
stack.emplace_back(range.begin, range.end);
while (!stack.empty())
{
range = stack.back();
stack.pop_back();
if (range.end == range.begin + 1)
{
if (left)
return range.begin;
else
return range.end;
}
else
{
std::vector<MarkRange> check_order = {};
MarkRange left_range = {range.begin, (range.begin + range.end) / 2};
MarkRange right_range = {(range.begin + range.end) / 2, range.end};
if (left)
{
check_order.emplace_back(left_range.begin, left_range.end);
check_order.emplace_back(right_range.begin, right_range.end);
}
else
{
check_order.emplace_back(right_range.begin, right_range.end);
check_order.emplace_back(left_range.begin, left_range.end);
}
steps++;
if (may_be_true_in_range(check_order[0]))
{
stack.emplace_back(check_order[0].begin, check_order[0].end);
continue;
}
stack.emplace_back(check_order[1].begin, check_order[1].end);
}
}
return std::nullopt;
};
auto left_leaf = find_leaf(true);
if (left_leaf)
res.emplace_back(left_leaf.value(), find_leaf(false).value());
LOG_TRACE(log, "Used optimized inclusion search over index for part {} with {} steps", part->name, steps);
}
return res;

View File

@ -95,11 +95,11 @@ private:
const KeyCondition & key_condition,
const Settings & settings) const;
static MarkRanges markRangesFromPKRange(
MarkRanges markRangesFromPKRange(
const MergeTreeData::DataPartPtr & part,
const StorageMetadataPtr & metadata_snapshot,
const KeyCondition & key_condition,
const Settings & settings);
const Settings & settings) const;
MarkRanges filterMarksUsingIndex(
MergeTreeIndexPtr index_helper,

View File

@ -127,7 +127,12 @@ public:
{
try
{
writeSuffix();
if (!done)
{
/// Rollback partial writes.
streams.clear();
storage.file_checker.repair();
}
}
catch (...)
{
@ -298,7 +303,6 @@ void LogBlockOutputStream::writeSuffix()
{
if (done)
return;
done = true;
WrittenStreams written_streams;
IDataType::SerializeBinaryBulkSettings settings;
@ -323,9 +327,12 @@ void LogBlockOutputStream::writeSuffix()
column_files.push_back(storage.files[name_stream.first].data_file_path);
column_files.push_back(storage.marks_file_path);
storage.file_checker.update(column_files.begin(), column_files.end());
for (const auto & file : column_files)
storage.file_checker.update(file);
storage.file_checker.save();
streams.clear();
done = true;
}
@ -427,6 +434,7 @@ StorageLog::StorageLog(
const StorageID & table_id_,
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
bool attach,
size_t max_compress_block_size_)
: IStorage(table_id_)
, disk(std::move(disk_))
@ -442,13 +450,31 @@ StorageLog::StorageLog(
if (relative_path_.empty())
throw Exception("Storage " + getName() + " requires data path", ErrorCodes::INCORRECT_FILE_NAME);
/// create directories if they do not exist
disk->createDirectories(table_path);
if (!attach)
{
/// create directories if they do not exist
disk->createDirectories(table_path);
}
else
{
try
{
file_checker.repair();
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
}
}
for (const auto & column : storage_metadata.getColumns().getAllPhysical())
addFiles(column.name, *column.type);
marks_file_path = table_path + DBMS_STORAGE_LOG_MARKS_FILE_NAME;
if (!attach)
for (const auto & file : files)
file_checker.setEmpty(file.second.data_file_path);
}
@ -655,7 +681,7 @@ void registerStorageLog(StorageFactory & factory)
return StorageLog::create(
disk, args.relative_data_path, args.table_id, args.columns, args.constraints,
args.context.getSettings().max_compress_block_size);
args.attach, args.context.getSettings().max_compress_block_size);
}, features);
}

View File

@ -54,6 +54,7 @@ protected:
const StorageID & table_id_,
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
bool attach,
size_t max_compress_block_size_);
private:

View File

@ -161,11 +161,12 @@ public:
, lock(storage.rwlock)
, data_out_file(storage.table_path + "data.bin")
, data_out_compressed(storage.disk->writeFile(data_out_file, DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Append))
, data_out(*data_out_compressed, CompressionCodecFactory::instance().getDefaultCodec(), storage.max_compress_block_size)
, data_out(std::make_unique<CompressedWriteBuffer>(
*data_out_compressed, CompressionCodecFactory::instance().getDefaultCodec(), storage.max_compress_block_size))
, index_out_file(storage.table_path + "index.mrk")
, index_out_compressed(storage.disk->writeFile(index_out_file, DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Append))
, index_out(*index_out_compressed)
, block_out(data_out, 0, metadata_snapshot->getSampleBlock(), false, &index_out, storage.disk->getFileSize(data_out_file))
, index_out(std::make_unique<CompressedWriteBuffer>(*index_out_compressed))
, block_out(*data_out, 0, metadata_snapshot->getSampleBlock(), false, index_out.get(), storage.disk->getFileSize(data_out_file))
{
}
@ -173,7 +174,16 @@ public:
{
try
{
writeSuffix();
if (!done)
{
/// Rollback partial writes.
data_out.reset();
data_out_compressed.reset();
index_out.reset();
index_out_compressed.reset();
storage.file_checker.repair();
}
}
catch (...)
{
@ -194,13 +204,14 @@ public:
return;
block_out.writeSuffix();
data_out.next();
data_out->next();
data_out_compressed->next();
index_out.next();
index_out->next();
index_out_compressed->next();
storage.file_checker.update(data_out_file);
storage.file_checker.update(index_out_file);
storage.file_checker.save();
done = true;
}
@ -212,10 +223,10 @@ private:
String data_out_file;
std::unique_ptr<WriteBuffer> data_out_compressed;
CompressedWriteBuffer data_out;
std::unique_ptr<CompressedWriteBuffer> data_out;
String index_out_file;
std::unique_ptr<WriteBuffer> index_out_compressed;
CompressedWriteBuffer index_out;
std::unique_ptr<CompressedWriteBuffer> index_out;
NativeBlockOutputStream block_out;
bool done = false;
@ -249,6 +260,20 @@ StorageStripeLog::StorageStripeLog(
{
/// create directories if they do not exist
disk->createDirectories(table_path);
file_checker.setEmpty(table_path + "data.bin");
file_checker.setEmpty(table_path + "index.mrk");
}
else
{
try
{
file_checker.repair();
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
}
}
}

View File

@ -118,7 +118,12 @@ public:
{
try
{
writeSuffix();
if (!done)
{
/// Rollback partial writes.
streams.clear();
storage.file_checker.repair();
}
}
catch (...)
{
@ -277,11 +282,13 @@ void TinyLogBlockOutputStream::writeSuffix()
{
if (done)
return;
done = true;
/// If nothing was written - leave the table in initial state.
if (streams.empty())
{
done = true;
return;
}
WrittenStreams written_streams;
IDataType::SerializeBinaryBulkSettings settings;
@ -303,9 +310,12 @@ void TinyLogBlockOutputStream::writeSuffix()
for (auto & pair : streams)
column_files.push_back(storage.files[pair.first].data_file_path);
storage.file_checker.update(column_files.begin(), column_files.end());
for (const auto & file : column_files)
storage.file_checker.update(file);
storage.file_checker.save();
streams.clear();
done = true;
}
@ -352,9 +362,24 @@ StorageTinyLog::StorageTinyLog(
/// create directories if they do not exist
disk->createDirectories(table_path);
}
else
{
try
{
file_checker.repair();
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
}
}
for (const auto & col : storage_metadata.getColumns().getAllPhysical())
addFiles(col.name, *col.type);
if (!attach)
for (const auto & file : files)
file_checker.setEmpty(file.second.data_file_path);
}

View File

@ -55,6 +55,7 @@ const char * auto_config_build[]
"USE_HYPERSCAN", "@ENABLE_HYPERSCAN@",
"USE_SIMDJSON", "@USE_SIMDJSON@",
"USE_GRPC", "@USE_GRPC@",
"USE_LDAP", "@USE_LDAP@",
nullptr, nullptr
};

View File

@ -12,6 +12,10 @@
#include <Access/AccessControlManager.h>
#include <Access/User.h>
#include <Access/AccessFlags.h>
#include <Poco/JSON/JSON.h>
#include <Poco/JSON/Object.h>
#include <Poco/JSON/Stringifier.h>
#include <sstream>
namespace DB
@ -35,7 +39,7 @@ NamesAndTypesList StorageSystemUsers::getNamesAndTypes()
{"id", std::make_shared<DataTypeUUID>()},
{"storage", std::make_shared<DataTypeString>()},
{"auth_type", std::make_shared<DataTypeEnum8>(getAuthenticationTypeEnumValues())},
{"auth_params", std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())},
{"auth_params", std::make_shared<DataTypeString>()},
{"host_ip", std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())},
{"host_names", std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())},
{"host_names_regexp", std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())},
@ -59,8 +63,7 @@ void StorageSystemUsers::fillData(MutableColumns & res_columns, const Context &
auto & column_id = assert_cast<ColumnUInt128 &>(*res_columns[column_index++]).getData();
auto & column_storage = assert_cast<ColumnString &>(*res_columns[column_index++]);
auto & column_auth_type = assert_cast<ColumnInt8 &>(*res_columns[column_index++]).getData();
auto & column_auth_params = assert_cast<ColumnString &>(assert_cast<ColumnArray &>(*res_columns[column_index]).getData());
auto & column_auth_params_offsets = assert_cast<ColumnArray &>(*res_columns[column_index++]).getOffsets();
auto & column_auth_params = assert_cast<ColumnString &>(*res_columns[column_index++]);
auto & column_host_ip = assert_cast<ColumnString &>(assert_cast<ColumnArray &>(*res_columns[column_index]).getData());
auto & column_host_ip_offsets = assert_cast<ColumnArray &>(*res_columns[column_index++]).getOffsets();
auto & column_host_names = assert_cast<ColumnString &>(assert_cast<ColumnArray &>(*res_columns[column_index]).getData());
@ -86,7 +89,24 @@ void StorageSystemUsers::fillData(MutableColumns & res_columns, const Context &
column_id.push_back(id);
column_storage.insertData(storage_name.data(), storage_name.length());
column_auth_type.push_back(static_cast<Int8>(authentication.getType()));
column_auth_params_offsets.push_back(column_auth_params.size());
if (authentication.getType() == Authentication::Type::LDAP_SERVER)
{
Poco::JSON::Object auth_params_json;
auth_params_json.set("server", authentication.getServerName());
std::ostringstream oss;
Poco::JSON::Stringifier::stringify(auth_params_json, oss);
const auto str = oss.str();
column_auth_params.insertData(str.data(), str.size());
}
else
{
static constexpr std::string_view empty_json{"{}"};
column_auth_params.insertData(empty_json.data(), empty_json.length());
}
if (allowed_hosts.containsAnyHost())
{

View File

@ -31,7 +31,7 @@ DB::StoragePtr createStorage(DB::DiskPtr & disk)
names_and_types.emplace_back("a", std::make_shared<DataTypeUInt64>());
StoragePtr table = StorageLog::create(
disk, "table/", StorageID("test", "test"), ColumnsDescription{names_and_types}, ConstraintsDescription{}, 1048576);
disk, "table/", StorageID("test", "test"), ColumnsDescription{names_and_types}, ConstraintsDescription{}, false, 1048576);
table->startup();
@ -100,6 +100,7 @@ std::string writeData(int rows, DB::StoragePtr & table, const DB::Context & cont
BlockOutputStreamPtr out = table->write({}, metadata_snapshot, context);
out->write(block);
out->writeSuffix();
return data;
}
@ -115,7 +116,8 @@ std::string readData(DB::StoragePtr & table, const DB::Context & context)
QueryProcessingStage::Enum stage = table->getQueryProcessingStage(context);
BlockInputStreamPtr in = std::make_shared<TreeExecutorBlockInputStream>(std::move(table->read(column_names, metadata_snapshot, {}, context, stage, 8192, 1)[0]));
BlockInputStreamPtr in = std::make_shared<TreeExecutorBlockInputStream>(
std::move(table->read(column_names, metadata_snapshot, {}, context, stage, 8192, 1)[0]));
Block sample;
{

View File

@ -202,9 +202,8 @@ def run_tests_array(all_tests_with_params):
(name, ext) = os.path.splitext(case)
try:
sys.stdout.flush()
sys.stdout.write("{0:72}".format(name + ": "))
if run_total == 1:
sys.stdout.flush()
if args.skip and any(s in name for s in args.skip):
print(MSG_SKIPPED + " - skip")
@ -598,16 +597,14 @@ def main(args):
if jobs > run_total:
run_total = jobs
batch_size = len(all_tests) / jobs
all_tests_array = []
for n in range(1, 1 + int(run_total)):
start = int(tests_n / run_total * (n - 1))
end = int(tests_n / run_total * n)
all_tests_array.append([all_tests[start : end], suite, suite_dir, suite_tmp_dir, run_total])
for i in range(0, len(all_tests), batch_size):
all_tests_array.append((all_tests[i:i+batch_size], suite, suite_dir, suite_tmp_dir, run_total))
if jobs > 1:
with closing(multiprocessing.Pool(processes=jobs)) as pool:
pool.map(run_tests_array, all_tests_array)
pool.terminate()
else:
run_tests_array(all_tests_array[int(run_n)-1])

View File

@ -200,8 +200,8 @@ def test_introspection():
assert expected_access2 in instance.query("SHOW ACCESS")
assert instance.query("SELECT name, storage, auth_type, auth_params, host_ip, host_names, host_names_regexp, host_names_like, default_roles_all, default_roles_list, default_roles_except from system.users WHERE name IN ('A', 'B') ORDER BY name") ==\
TSV([[ "A", "disk", "no_password", "[]", "['::/0']", "[]", "[]", "[]", 1, "[]", "[]" ],
[ "B", "disk", "no_password", "[]", "['::/0']", "[]", "[]", "[]", 1, "[]", "[]" ]])
TSV([[ "A", "disk", "no_password", "{}", "['::/0']", "[]", "[]", "[]", 1, "[]", "[]" ],
[ "B", "disk", "no_password", "{}", "['::/0']", "[]", "[]", "[]", 1, "[]", "[]" ]])
assert instance.query("SELECT * from system.grants WHERE user_name IN ('A', 'B') ORDER BY user_name, access_type, grant_option") ==\
TSV([[ "A", "\N", "SELECT", "test", "table", "\N", 0, 0 ],

View File

@ -1,16 +1,13 @@
<test>
<preconditions>
<!--FIXME disabled this test until I fix it -->
<table_exists>definitely_no_such_table</table_exists>
</preconditions>
<settings>
<max_memory_usage>20G</max_memory_usage>
<max_memory_usage>30G</max_memory_usage>
<query_profiler_real_time_period_ns>0</query_profiler_real_time_period_ns>
<query_profiler_cpu_time_period_ns>0</query_profiler_cpu_time_period_ns>
</settings>
<create_query>CREATE TABLE t (x UInt64, d32 Decimal32(3), d64 Decimal64(4), d128 Decimal128(5)) ENGINE = Memory</create_query>
<!-- use less threads to save memory -->
<fill_query>INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(200000000) SETTINGS max_threads = 8</fill_query>
<fill_query>INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(500000000) SETTINGS max_threads = 8</fill_query>
<drop_query>DROP TABLE IF EXISTS t</drop_query>
<query>SELECT min(d32), max(d32), argMin(x, d32), argMax(x, d32) FROM t</query>
@ -21,23 +18,23 @@
<query>SELECT avg(d64), sum(d64), sumWithOverflow(d64) FROM t</query>
<query>SELECT avg(d128), sum(d128), sumWithOverflow(d128) FROM t</query>
<query>SELECT uniq(d32), uniqCombined(d32), uniqExact(d32), uniqHLL12(d32) FROM t LIMIT 100000</query>
<query>SELECT uniq(d64), uniqCombined(d64), uniqExact(d64), uniqHLL12(d64) FROM t LIMIT 100000</query>
<query>SELECT uniq(d128), uniqCombined(d128), uniqExact(d128), uniqHLL12(d128) FROM t LIMIT 100000</query>
<query>SELECT uniq(d32), uniqCombined(d32), uniqExact(d32), uniqHLL12(d32) FROM (SELECT * FROM t LIMIT 10000000)</query>
<query>SELECT uniq(d64), uniqCombined(d64), uniqExact(d64), uniqHLL12(d64) FROM (SELECT * FROM t LIMIT 10000000)</query>
<query>SELECT uniq(d128), uniqCombined(d128), uniqExact(d128), uniqHLL12(d128) FROM (SELECT * FROM t LIMIT 1000000)</query>
<query>SELECT median(d32), medianExact(d32), medianExactWeighted(d32, 2) FROM t LIMIT 100000</query>
<query>SELECT median(d64), medianExact(d64), medianExactWeighted(d64, 2) FROM t LIMIT 100000</query>
<query>SELECT median(d128), medianExact(d128), medianExactWeighted(d128, 2) FROM t LIMIT 100000</query>
<query>SELECT median(d32), medianExact(d32), medianExactWeighted(d32, 2) FROM (SELECT * FROM t LIMIT 10000000)</query>
<query>SELECT median(d64), medianExact(d64), medianExactWeighted(d64, 2) FROM (SELECT * FROM t LIMIT 1000000)</query>
<query>SELECT median(d128), medianExact(d128), medianExactWeighted(d128, 2) FROM (SELECT * FROM t LIMIT 1000000)</query>
<query>SELECT quantile(d32), quantileExact(d32), quantileExactWeighted(d32, 2) FROM t LIMIT 100000</query>
<query>SELECT quantile(d64), quantileExact(d64), quantileExactWeighted(d64, 2) FROM t LIMIT 100000</query>
<query>SELECT quantile(d128), quantileExact(d128), quantileExactWeighted(d128, 2) FROM t LIMIT 100000</query>
<query>SELECT quantile(d32), quantileExact(d32), quantileExactWeighted(d32, 2) FROM (SELECT * FROM t LIMIT 10000000)</query>
<query>SELECT quantile(d64), quantileExact(d64), quantileExactWeighted(d64, 2) FROM (SELECT * FROM t LIMIT 1000000)</query>
<query>SELECT quantile(d128), quantileExact(d128), quantileExactWeighted(d128, 2) FROM (SELECT * FROM t LIMIT 1000000)</query>
<query>SELECT quantilesExact(0.1, 0.9)(d32), quantilesExactWeighted(0.1, 0.9)(d32, 2) FROM t LIMIT 100000</query>
<query>SELECT quantilesExact(0.1, 0.9)(d64), quantilesExactWeighted(0.1, 0.9)(d64, 2) FROM t LIMIT 100000</query>
<query>SELECT quantilesExact(0.1, 0.9)(d128), quantilesExactWeighted(0.1, 0.9)(d128, 2) FROM t LIMIT 100000</query>
<query>SELECT quantilesExact(0.1, 0.9)(d32), quantilesExactWeighted(0.1, 0.9)(d32, 2) FROM (SELECT * FROM t LIMIT 10000000)</query>
<query>SELECT quantilesExact(0.1, 0.9)(d64), quantilesExactWeighted(0.1, 0.9)(d64, 2) FROM (SELECT * FROM t LIMIT 1000000)</query>
<query>SELECT quantilesExact(0.1, 0.9)(d128), quantilesExactWeighted(0.1, 0.9)(d128, 2) FROM (SELECT * FROM t LIMIT 1000000)</query>
<query>SELECT varPop(d32), varSamp(d32), stddevPop(d32) FROM t</query>
<query>SELECT varPop(d64), varSamp(d64), stddevPop(d64) FROM t</query>
<query>SELECT varPop(d128), varSamp(d128), stddevPop(d128) FROM t</query>
<query>SELECT varPop(d64), varSamp(d64), stddevPop(d64) FROM (SELECT * FROM t LIMIT 1000000)</query>
<query>SELECT varPop(d128), varSamp(d128), stddevPop(d128) FROM (SELECT * FROM t LIMIT 1000000)</query>
</test>

View File

@ -62,7 +62,7 @@ SELECT count(), sum(d) FROM dst;
SELECT 'OPTIMIZE';
SELECT count(), sum(d), uniqExact(_part) FROM dst;
SYSTEM START MERGES;
SYSTEM START MERGES dst;
SET optimize_throw_if_noop=1;
OPTIMIZE TABLE dst;
SELECT count(), sum(d), uniqExact(_part) FROM dst;

View File

@ -25,9 +25,15 @@ ${CLICKHOUSE_CLIENT} --query="CREATE TABLE fixed_string_test_table (val FixedStr
${CLICKHOUSE_CLIENT} --query="CREATE TABLE signed_integer_test_table (val Int32) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0;"
${CLICKHOUSE_CLIENT} --query="CREATE TABLE unsigned_integer_test_table (val UInt32) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0;"
${CLICKHOUSE_CLIENT} --query="CREATE TABLE enum_test_table (val Enum16('hello' = 1, 'world' = 2, 'yandex' = 256, 'clickhouse' = 257)) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0;"
${CLICKHOUSE_CLIENT} --query="CREATE TABLE date_test_table (val Date) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0;"
${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES;"
${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES string_test_table;"
${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES fixed_string_test_table;"
${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES signed_integer_test_table;"
${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES unsigned_integer_test_table;"
${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES enum_test_table;"
${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES date_test_table;"
${CLICKHOUSE_CLIENT} --query="INSERT INTO string_test_table VALUES ('0'), ('2'), ('2');"
${CLICKHOUSE_CLIENT} --query="INSERT INTO fixed_string_test_table VALUES ('0'), ('2'), ('2');"
@ -80,5 +86,3 @@ ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS signed_integer_test_table;"
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS unsigned_integer_test_table;"
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS enum_test_table;"
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS date_test_table;"
${CLICKHOUSE_CLIENT} --query="SYSTEM START MERGES;"

View File

@ -1,13 +1,13 @@
DROP TABLE IF EXISTS sites;
CREATE TABLE sites (Domain UInt8, `Users.UserID` Array(UInt64), `Users.Dates` Array(Array(Date))) ENGINE = MergeTree ORDER BY Domain SETTINGS vertical_merge_algorithm_min_rows_to_activate = 0, vertical_merge_algorithm_min_columns_to_activate = 0;
SYSTEM STOP MERGES;
SYSTEM STOP MERGES sites;
INSERT INTO sites VALUES (1,[1],[[]]);
INSERT INTO sites VALUES (2,[1],[['2018-06-22']]);
SELECT count(), countArray(Users.Dates), countArrayArray(Users.Dates) FROM sites;
SYSTEM START MERGES;
SYSTEM START MERGES sites;
OPTIMIZE TABLE sites FINAL;
SELECT count(), countArray(Users.Dates), countArrayArray(Users.Dates) FROM sites;

View File

@ -2,7 +2,6 @@
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. $CURDIR/../shell_config.sh
. $CURDIR/mergetree_mutations.lib
$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;"
@ -35,29 +34,25 @@ $CLICKHOUSE_CLIENT --query="SELECT count() FROM minmax_idx WHERE i64 = 2;"
$CLICKHOUSE_CLIENT --query="SELECT count() FROM minmax_idx WHERE i64 = 2 FORMAT JSON" | grep "rows_read"
$CLICKHOUSE_CLIENT -n --query="
ALTER TABLE minmax_idx ADD INDEX idx (i64, u64 * i64) TYPE minmax GRANULARITY 1;"
ALTER TABLE minmax_idx ADD INDEX idx (i64, u64 * i64) TYPE minmax GRANULARITY 1 SETTINGS mutations_sync = 2;"
$CLICKHOUSE_CLIENT --query="ALTER TABLE minmax_idx MATERIALIZE INDEX idx IN PARTITION 1;"
wait_for_mutation "minmax_idx" "mutation_3.txt" "$CLICKHOUSE_DATABASE"
$CLICKHOUSE_CLIENT --query="ALTER TABLE minmax_idx MATERIALIZE INDEX idx IN PARTITION 1 SETTINGS mutations_sync = 2;"
$CLICKHOUSE_CLIENT --query="SELECT count() FROM minmax_idx WHERE i64 = 2;"
$CLICKHOUSE_CLIENT --query="SELECT count() FROM minmax_idx WHERE i64 = 2 FORMAT JSON" | grep "rows_read"
$CLICKHOUSE_CLIENT --query="ALTER TABLE minmax_idx MATERIALIZE INDEX idx IN PARTITION 2;"
wait_for_mutation "minmax_idx" "mutation_4.txt" "$CLICKHOUSE_DATABASE"
$CLICKHOUSE_CLIENT --query="ALTER TABLE minmax_idx MATERIALIZE INDEX idx IN PARTITION 2 SETTINGS mutations_sync = 2"
$CLICKHOUSE_CLIENT --query="SELECT count() FROM minmax_idx WHERE i64 = 2;"
$CLICKHOUSE_CLIENT --query="SELECT count() FROM minmax_idx WHERE i64 = 2 FORMAT JSON" | grep "rows_read"
$CLICKHOUSE_CLIENT --query="ALTER TABLE minmax_idx CLEAR INDEX idx IN PARTITION 1;"
$CLICKHOUSE_CLIENT --query="ALTER TABLE minmax_idx CLEAR INDEX idx IN PARTITION 2;"
sleep 0.5
$CLICKHOUSE_CLIENT --query="ALTER TABLE minmax_idx CLEAR INDEX idx IN PARTITION 1 SETTINGS mutations_sync = 2"
$CLICKHOUSE_CLIENT --query="ALTER TABLE minmax_idx CLEAR INDEX idx IN PARTITION 2 SETTINGS mutations_sync = 2"
$CLICKHOUSE_CLIENT --query="SELECT count() FROM minmax_idx WHERE i64 = 2;"
$CLICKHOUSE_CLIENT --query="SELECT count() FROM minmax_idx WHERE i64 = 2 FORMAT JSON" | grep "rows_read"
$CLICKHOUSE_CLIENT --query="ALTER TABLE minmax_idx MATERIALIZE INDEX idx;"
wait_for_mutation "minmax_idx" "mutation_5.txt" "$CLICKHOUSE_DATABASE"
$CLICKHOUSE_CLIENT --query="ALTER TABLE minmax_idx MATERIALIZE INDEX idx SETTINGS mutations_sync = 2"
$CLICKHOUSE_CLIENT --query="SELECT count() FROM minmax_idx WHERE i64 = 2;"
$CLICKHOUSE_CLIENT --query="SELECT count() FROM minmax_idx WHERE i64 = 2 FORMAT JSON" | grep "rows_read"

View File

@ -41,10 +41,8 @@ SELECT COUNT(*) FROM mt_with_pk WHERE x > toDateTime('2018-10-01 23:57:57');
SELECT sum(marks) FROM system.parts WHERE table = 'mt_with_pk' AND database = currentDatabase() AND active=1;
SELECT '===test mutation===';
ALTER TABLE mt_with_pk UPDATE w = 0 WHERE 1;
ALTER TABLE mt_with_pk UPDATE y = ['q', 'q', 'q'] WHERE 1;
SELECT sleep(1) FORMAT Null;
ALTER TABLE mt_with_pk UPDATE w = 0 WHERE 1 SETTINGS mutations_sync = 2;
ALTER TABLE mt_with_pk UPDATE y = ['q', 'q', 'q'] WHERE 1 SETTINGS mutations_sync = 2;
SELECT sum(w) FROM mt_with_pk;
SELECT distinct(y) FROM mt_with_pk;
@ -97,9 +95,7 @@ CREATE TABLE alter_update_00806 (d Date, e Enum8('foo'=1, 'bar'=2)) Engine = Mer
INSERT INTO alter_update_00806 (d, e) VALUES ('2018-01-01', 'foo');
INSERT INTO alter_update_00806 (d, e) VALUES ('2018-01-02', 'bar');
ALTER TABLE alter_update_00806 UPDATE e = CAST('foo', 'Enum8(\'foo\' = 1, \'bar\' = 2)') WHERE d='2018-01-02';
SELECT sleep(1) FORMAT Null;
ALTER TABLE alter_update_00806 UPDATE e = CAST('foo', 'Enum8(\'foo\' = 1, \'bar\' = 2)') WHERE d='2018-01-02' SETTINGS mutations_sync = 2;
SELECT e FROM alter_update_00806 ORDER BY d;

View File

@ -30,6 +30,7 @@ with client(name='client1>', log=log) as client1, client(name='client2>', log=lo
client1.send('CREATE LIVE VIEW test.lv WITH TIMEOUT AS SELECT sum(a) FROM test.mt')
client1.expect(prompt)
client1.send('WATCH test.lv')
client1.expect('_version')
client1.expect(r'0.*1' + end_of_block)
client2.send('INSERT INTO test.mt VALUES (1),(2),(3)')
client1.expect(r'6.*2' + end_of_block)

View File

@ -30,6 +30,7 @@ with client(name='client1>', log=log) as client1, client(name='client2>', log=lo
client1.send('CREATE LIVE VIEW test.lv WITH TIMEOUT 1 AS SELECT sum(a) FROM test.mt')
client1.expect(prompt)
client1.send('WATCH test.lv')
client1.expect('_version')
client1.expect(r'0.*1' + end_of_block)
client2.send('INSERT INTO test.mt VALUES (1),(2),(3)')
client2.expect(prompt)

Some files were not shown because too many files have changed in this diff Show More