mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Merge branch 'master' into move_docker_images_to_separate_repo
This commit is contained in:
commit
bdd26f2a19
@ -203,3 +203,5 @@ CheckOptions:
|
|||||||
value: CamelCase
|
value: CamelCase
|
||||||
- key: readability-identifier-naming.UsingCase
|
- key: readability-identifier-naming.UsingCase
|
||||||
value: CamelCase
|
value: CamelCase
|
||||||
|
- key: modernize-loop-convert.UseCxx20ReverseRanges
|
||||||
|
value: false
|
||||||
|
@ -7,10 +7,22 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <mysqlxx/Pool.h>
|
#include <mysqlxx/Pool.h>
|
||||||
|
|
||||||
#include <common/sleep.h>
|
#include <common/sleep.h>
|
||||||
|
|
||||||
#include <Poco/Util/LayeredConfiguration.h>
|
#include <Poco/Util/LayeredConfiguration.h>
|
||||||
|
#include <ctime>
|
||||||
|
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
|
||||||
|
inline uint64_t clock_gettime_ns(clockid_t clock_type = CLOCK_MONOTONIC)
|
||||||
|
{
|
||||||
|
struct timespec ts;
|
||||||
|
clock_gettime(clock_type, &ts);
|
||||||
|
return uint64_t(ts.tv_sec * 1000000000LL + ts.tv_nsec);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
namespace mysqlxx
|
namespace mysqlxx
|
||||||
@ -124,10 +136,15 @@ Pool::~Pool()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Pool::Entry Pool::get()
|
Pool::Entry Pool::get(uint64_t wait_timeout)
|
||||||
{
|
{
|
||||||
std::unique_lock<std::mutex> lock(mutex);
|
std::unique_lock<std::mutex> lock(mutex);
|
||||||
|
|
||||||
|
uint64_t deadline = 0;
|
||||||
|
/// UINT64_MAX -- wait indefinitely
|
||||||
|
if (wait_timeout && wait_timeout != UINT64_MAX)
|
||||||
|
deadline = clock_gettime_ns() + wait_timeout * 1'000'000'000;
|
||||||
|
|
||||||
initialize();
|
initialize();
|
||||||
for (;;)
|
for (;;)
|
||||||
{
|
{
|
||||||
@ -153,6 +170,12 @@ Pool::Entry Pool::get()
|
|||||||
logger.trace("(%s): Unable to create a new connection: Max number of connections has been reached.", getDescription());
|
logger.trace("(%s): Unable to create a new connection: Max number of connections has been reached.", getDescription());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!wait_timeout)
|
||||||
|
throw Poco::Exception("mysqlxx::Pool is full (wait is disabled, see connection_wait_timeout setting)");
|
||||||
|
|
||||||
|
if (deadline && clock_gettime_ns() >= deadline)
|
||||||
|
throw Poco::Exception("mysqlxx::Pool is full (connection_wait_timeout is exceeded)");
|
||||||
|
|
||||||
lock.unlock();
|
lock.unlock();
|
||||||
logger.trace("(%s): Sleeping for %d seconds.", getDescription(), MYSQLXX_POOL_SLEEP_ON_CONNECT_FAIL);
|
logger.trace("(%s): Sleeping for %d seconds.", getDescription(), MYSQLXX_POOL_SLEEP_ON_CONNECT_FAIL);
|
||||||
sleepForSeconds(MYSQLXX_POOL_SLEEP_ON_CONNECT_FAIL);
|
sleepForSeconds(MYSQLXX_POOL_SLEEP_ON_CONNECT_FAIL);
|
||||||
|
@ -189,7 +189,7 @@ public:
|
|||||||
~Pool();
|
~Pool();
|
||||||
|
|
||||||
/// Allocates connection.
|
/// Allocates connection.
|
||||||
Entry get();
|
Entry get(uint64_t wait_timeout);
|
||||||
|
|
||||||
/// Allocates connection.
|
/// Allocates connection.
|
||||||
/// If database is not accessible, returns empty Entry object.
|
/// If database is not accessible, returns empty Entry object.
|
||||||
|
@ -21,8 +21,9 @@ PoolWithFailover::PoolWithFailover(
|
|||||||
const unsigned max_connections_,
|
const unsigned max_connections_,
|
||||||
const size_t max_tries_)
|
const size_t max_tries_)
|
||||||
: max_tries(max_tries_)
|
: max_tries(max_tries_)
|
||||||
|
, shareable(config_.getBool(config_name_ + ".share_connection", false))
|
||||||
|
, wait_timeout(UINT64_MAX)
|
||||||
{
|
{
|
||||||
shareable = config_.getBool(config_name_ + ".share_connection", false);
|
|
||||||
if (config_.has(config_name_ + ".replica"))
|
if (config_.has(config_name_ + ".replica"))
|
||||||
{
|
{
|
||||||
Poco::Util::AbstractConfiguration::Keys replica_keys;
|
Poco::Util::AbstractConfiguration::Keys replica_keys;
|
||||||
@ -80,9 +81,11 @@ PoolWithFailover::PoolWithFailover(
|
|||||||
const std::string & password,
|
const std::string & password,
|
||||||
unsigned default_connections_,
|
unsigned default_connections_,
|
||||||
unsigned max_connections_,
|
unsigned max_connections_,
|
||||||
size_t max_tries_)
|
size_t max_tries_,
|
||||||
|
uint64_t wait_timeout_)
|
||||||
: max_tries(max_tries_)
|
: max_tries(max_tries_)
|
||||||
, shareable(false)
|
, shareable(false)
|
||||||
|
, wait_timeout(wait_timeout_)
|
||||||
{
|
{
|
||||||
/// Replicas have the same priority, but traversed replicas are moved to the end of the queue.
|
/// Replicas have the same priority, but traversed replicas are moved to the end of the queue.
|
||||||
for (const auto & [host, port] : addresses)
|
for (const auto & [host, port] : addresses)
|
||||||
@ -101,6 +104,7 @@ PoolWithFailover::PoolWithFailover(
|
|||||||
PoolWithFailover::PoolWithFailover(const PoolWithFailover & other)
|
PoolWithFailover::PoolWithFailover(const PoolWithFailover & other)
|
||||||
: max_tries{other.max_tries}
|
: max_tries{other.max_tries}
|
||||||
, shareable{other.shareable}
|
, shareable{other.shareable}
|
||||||
|
, wait_timeout(other.wait_timeout)
|
||||||
{
|
{
|
||||||
if (shareable)
|
if (shareable)
|
||||||
{
|
{
|
||||||
@ -140,7 +144,7 @@ PoolWithFailover::Entry PoolWithFailover::get()
|
|||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
Entry entry = shareable ? pool->get() : pool->tryGet();
|
Entry entry = shareable ? pool->get(wait_timeout) : pool->tryGet();
|
||||||
|
|
||||||
if (!entry.isNull())
|
if (!entry.isNull())
|
||||||
{
|
{
|
||||||
@ -172,7 +176,7 @@ PoolWithFailover::Entry PoolWithFailover::get()
|
|||||||
if (full_pool)
|
if (full_pool)
|
||||||
{
|
{
|
||||||
app.logger().error("All connections failed, trying to wait on a full pool " + (*full_pool)->getDescription());
|
app.logger().error("All connections failed, trying to wait on a full pool " + (*full_pool)->getDescription());
|
||||||
return (*full_pool)->get();
|
return (*full_pool)->get(wait_timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::stringstream message;
|
std::stringstream message;
|
||||||
|
@ -80,6 +80,8 @@ namespace mysqlxx
|
|||||||
std::mutex mutex;
|
std::mutex mutex;
|
||||||
/// Can the Pool be shared
|
/// Can the Pool be shared
|
||||||
bool shareable;
|
bool shareable;
|
||||||
|
/// Timeout for waiting free connection.
|
||||||
|
uint64_t wait_timeout = 0;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
using Entry = Pool::Entry;
|
using Entry = Pool::Entry;
|
||||||
@ -96,6 +98,7 @@ namespace mysqlxx
|
|||||||
* default_connections Number of connection in pool to each replica at start.
|
* default_connections Number of connection in pool to each replica at start.
|
||||||
* max_connections Maximum number of connections in pool to each replica.
|
* max_connections Maximum number of connections in pool to each replica.
|
||||||
* max_tries_ Max number of connection tries.
|
* max_tries_ Max number of connection tries.
|
||||||
|
* wait_timeout_ Timeout for waiting free connection.
|
||||||
*/
|
*/
|
||||||
PoolWithFailover(
|
PoolWithFailover(
|
||||||
const std::string & config_name_,
|
const std::string & config_name_,
|
||||||
@ -117,7 +120,8 @@ namespace mysqlxx
|
|||||||
const std::string & password,
|
const std::string & password,
|
||||||
unsigned default_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS,
|
unsigned default_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS,
|
||||||
unsigned max_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS,
|
unsigned max_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS,
|
||||||
size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES);
|
size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES,
|
||||||
|
uint64_t wait_timeout_ = UINT64_MAX);
|
||||||
|
|
||||||
PoolWithFailover(const PoolWithFailover & other);
|
PoolWithFailover(const PoolWithFailover & other);
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@ if (ENABLE_CLANG_TIDY)
|
|||||||
message(FATAL_ERROR "clang-tidy requires CMake version at least 3.6.")
|
message(FATAL_ERROR "clang-tidy requires CMake version at least 3.6.")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
find_program (CLANG_TIDY_PATH NAMES "clang-tidy" "clang-tidy-11" "clang-tidy-10" "clang-tidy-9" "clang-tidy-8")
|
find_program (CLANG_TIDY_PATH NAMES "clang-tidy" "clang-tidy-12" "clang-tidy-11" "clang-tidy-10" "clang-tidy-9" "clang-tidy-8")
|
||||||
|
|
||||||
if (CLANG_TIDY_PATH)
|
if (CLANG_TIDY_PATH)
|
||||||
message(STATUS
|
message(STATUS
|
||||||
|
@ -10,7 +10,7 @@ set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it
|
|||||||
set (CMAKE_AR "/usr/bin/ar" CACHE FILEPATH "" FORCE)
|
set (CMAKE_AR "/usr/bin/ar" CACHE FILEPATH "" FORCE)
|
||||||
set (CMAKE_RANLIB "/usr/bin/ranlib" CACHE FILEPATH "" FORCE)
|
set (CMAKE_RANLIB "/usr/bin/ranlib" CACHE FILEPATH "" FORCE)
|
||||||
|
|
||||||
set (LINKER_NAME "lld" CACHE STRING "" FORCE)
|
set (LINKER_NAME "ld.lld" CACHE STRING "" FORCE)
|
||||||
|
|
||||||
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
||||||
set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
||||||
|
@ -13,7 +13,7 @@ set (CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_D
|
|||||||
set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64")
|
set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64")
|
||||||
set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64")
|
set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64")
|
||||||
|
|
||||||
set (LINKER_NAME "lld" CACHE STRING "" FORCE)
|
set (LINKER_NAME "ld.lld" CACHE STRING "" FORCE)
|
||||||
|
|
||||||
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
||||||
set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
||||||
|
@ -79,8 +79,9 @@ endif ()
|
|||||||
|
|
||||||
if (LINKER_NAME)
|
if (LINKER_NAME)
|
||||||
if (COMPILER_CLANG AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 12.0.0 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 12.0.0))
|
if (COMPILER_CLANG AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 12.0.0 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 12.0.0))
|
||||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --ld-path=${LINKER_NAME}")
|
find_program (LLD_PATH NAMES ${LINKER_NAME})
|
||||||
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} --ld-path=${LINKER_NAME}")
|
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --ld-path=${LLD_PATH}")
|
||||||
|
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} --ld-path=${LLD_PATH}")
|
||||||
else ()
|
else ()
|
||||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=${LINKER_NAME}")
|
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=${LINKER_NAME}")
|
||||||
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=${LINKER_NAME}")
|
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=${LINKER_NAME}")
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
FROM ubuntu:20.04
|
FROM ubuntu:20.04
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11
|
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12
|
||||||
|
|
||||||
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ set -e
|
|||||||
#ccache -s # uncomment to display CCache statistics
|
#ccache -s # uncomment to display CCache statistics
|
||||||
mkdir -p /server/build_docker
|
mkdir -p /server/build_docker
|
||||||
cd /server/build_docker
|
cd /server/build_docker
|
||||||
cmake -G Ninja /server "-DCMAKE_C_COMPILER=$(command -v clang-11)" "-DCMAKE_CXX_COMPILER=$(command -v clang++-11)"
|
cmake -G Ninja /server "-DCMAKE_C_COMPILER=$(command -v clang-12)" "-DCMAKE_CXX_COMPILER=$(command -v clang++-12)"
|
||||||
|
|
||||||
# Set the number of build jobs to the half of number of virtual CPU cores (rounded up).
|
# Set the number of build jobs to the half of number of virtual CPU cores (rounded up).
|
||||||
# By default, ninja use all virtual CPU cores, that leads to very high memory consumption without much improvement in build time.
|
# By default, ninja use all virtual CPU cores, that leads to very high memory consumption without much improvement in build time.
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
# docker build -t clickhouse/binary-builder .
|
# docker build -t clickhouse/binary-builder .
|
||||||
FROM ubuntu:20.04
|
FROM ubuntu:20.04
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11
|
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12
|
||||||
|
|
||||||
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
||||||
|
|
||||||
@ -39,8 +39,6 @@ RUN apt-get update \
|
|||||||
bash \
|
bash \
|
||||||
build-essential \
|
build-essential \
|
||||||
ccache \
|
ccache \
|
||||||
clang-11 \
|
|
||||||
clang-tidy-11 \
|
|
||||||
cmake \
|
cmake \
|
||||||
curl \
|
curl \
|
||||||
g++-10 \
|
g++-10 \
|
||||||
@ -50,9 +48,13 @@ RUN apt-get update \
|
|||||||
gperf \
|
gperf \
|
||||||
libicu-dev \
|
libicu-dev \
|
||||||
libreadline-dev \
|
libreadline-dev \
|
||||||
lld-11 \
|
clang-12 \
|
||||||
llvm-11 \
|
clang-tidy-12 \
|
||||||
llvm-11-dev \
|
lld-12 \
|
||||||
|
llvm-12 \
|
||||||
|
llvm-12-dev \
|
||||||
|
libicu-dev \
|
||||||
|
libreadline-dev \
|
||||||
moreutils \
|
moreutils \
|
||||||
ninja-build \
|
ninja-build \
|
||||||
pigz \
|
pigz \
|
||||||
|
@ -4,7 +4,6 @@ set -x -e
|
|||||||
|
|
||||||
mkdir -p build/cmake/toolchain/darwin-x86_64
|
mkdir -p build/cmake/toolchain/darwin-x86_64
|
||||||
tar xJf MacOSX11.0.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-components=1
|
tar xJf MacOSX11.0.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-components=1
|
||||||
|
|
||||||
ln -sf darwin-x86_64 build/cmake/toolchain/darwin-aarch64
|
ln -sf darwin-x86_64 build/cmake/toolchain/darwin-aarch64
|
||||||
|
|
||||||
mkdir -p build/cmake/toolchain/linux-aarch64
|
mkdir -p build/cmake/toolchain/linux-aarch64
|
||||||
@ -23,6 +22,7 @@ cd build/build_docker
|
|||||||
rm -f CMakeCache.txt
|
rm -f CMakeCache.txt
|
||||||
# Read cmake arguments into array (possibly empty)
|
# Read cmake arguments into array (possibly empty)
|
||||||
read -ra CMAKE_FLAGS <<< "${CMAKE_FLAGS:-}"
|
read -ra CMAKE_FLAGS <<< "${CMAKE_FLAGS:-}"
|
||||||
|
env
|
||||||
cmake --debug-trycompile --verbose=1 -DCMAKE_VERBOSE_MAKEFILE=1 -LA "-DCMAKE_BUILD_TYPE=$BUILD_TYPE" "-DSANITIZE=$SANITIZER" -DENABLE_CHECK_HEAVY_BUILDS=1 "${CMAKE_FLAGS[@]}" ..
|
cmake --debug-trycompile --verbose=1 -DCMAKE_VERBOSE_MAKEFILE=1 -LA "-DCMAKE_BUILD_TYPE=$BUILD_TYPE" "-DSANITIZE=$SANITIZER" -DENABLE_CHECK_HEAVY_BUILDS=1 "${CMAKE_FLAGS[@]}" ..
|
||||||
|
|
||||||
ccache --show-config ||:
|
ccache --show-config ||:
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
# docker build -t clickhouse/deb-builder .
|
# docker build -t clickhouse/deb-builder .
|
||||||
FROM ubuntu:20.04
|
FROM ubuntu:20.04
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11
|
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12
|
||||||
|
|
||||||
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
||||||
|
|
||||||
@ -37,17 +37,17 @@ RUN curl -O https://clickhouse-datasets.s3.yandex.net/utils/1/dpkg-deb \
|
|||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get install \
|
&& apt-get install \
|
||||||
alien \
|
alien \
|
||||||
clang-11 \
|
clang-12 \
|
||||||
clang-tidy-11 \
|
clang-tidy-12 \
|
||||||
cmake \
|
cmake \
|
||||||
debhelper \
|
debhelper \
|
||||||
devscripts \
|
devscripts \
|
||||||
gdb \
|
gdb \
|
||||||
git \
|
git \
|
||||||
gperf \
|
gperf \
|
||||||
lld-11 \
|
lld-12 \
|
||||||
llvm-11 \
|
llvm-12 \
|
||||||
llvm-11-dev \
|
llvm-12-dev \
|
||||||
moreutils \
|
moreutils \
|
||||||
ninja-build \
|
ninja-build \
|
||||||
perl \
|
perl \
|
||||||
|
@ -75,7 +75,7 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
|
|||||||
# Explicitly use LLD with Clang by default.
|
# Explicitly use LLD with Clang by default.
|
||||||
# Don't force linker for cross-compilation.
|
# Don't force linker for cross-compilation.
|
||||||
if is_clang and not is_cross_compile:
|
if is_clang and not is_cross_compile:
|
||||||
cmake_flags.append("-DLINKER_NAME=lld")
|
cmake_flags.append("-DLINKER_NAME=ld.lld")
|
||||||
|
|
||||||
if is_cross_darwin:
|
if is_cross_darwin:
|
||||||
cc = compiler[:-len(DARWIN_SUFFIX)]
|
cc = compiler[:-len(DARWIN_SUFFIX)]
|
||||||
@ -204,7 +204,8 @@ if __name__ == "__main__":
|
|||||||
parser.add_argument("--output-dir", required=True)
|
parser.add_argument("--output-dir", required=True)
|
||||||
parser.add_argument("--build-type", choices=("debug", ""), default="")
|
parser.add_argument("--build-type", choices=("debug", ""), default="")
|
||||||
parser.add_argument("--compiler", choices=("clang-11", "clang-11-darwin", "clang-11-darwin-aarch64", "clang-11-aarch64",
|
parser.add_argument("--compiler", choices=("clang-11", "clang-11-darwin", "clang-11-darwin-aarch64", "clang-11-aarch64",
|
||||||
"clang-11-freebsd", "gcc-10"), default="clang-11")
|
"clang-12", "clang-12-darwin", "clang-12-darwin-aarch64", "clang-12-aarch64",
|
||||||
|
"clang-11-freebsd", "clang-12-freebsd", "gcc-10"), default="clang-12")
|
||||||
parser.add_argument("--sanitizer", choices=("address", "thread", "memory", "undefined", ""), default="")
|
parser.add_argument("--sanitizer", choices=("address", "thread", "memory", "undefined", ""), default="")
|
||||||
parser.add_argument("--unbundled", action="store_true")
|
parser.add_argument("--unbundled", action="store_true")
|
||||||
parser.add_argument("--split-binary", action="store_true")
|
parser.add_argument("--split-binary", action="store_true")
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
# docker build -t clickhouse/test-base .
|
# docker build -t clickhouse/test-base .
|
||||||
FROM ubuntu:20.04
|
FROM ubuntu:20.04
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11
|
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12
|
||||||
|
|
||||||
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ RUN apt-get update && apt-get --yes --allow-unauthenticated install clang-9 libl
|
|||||||
# https://github.com/ClickHouse-Extras/woboq_codebrowser/commit/37e15eaf377b920acb0b48dbe82471be9203f76b
|
# https://github.com/ClickHouse-Extras/woboq_codebrowser/commit/37e15eaf377b920acb0b48dbe82471be9203f76b
|
||||||
RUN git clone https://github.com/ClickHouse-Extras/woboq_codebrowser
|
RUN git clone https://github.com/ClickHouse-Extras/woboq_codebrowser
|
||||||
|
|
||||||
RUN cd woboq_codebrowser && cmake . -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER=clang\+\+-9 -DCMAKE_C_COMPILER=clang-9 && make -j
|
RUN cd woboq_codebrowser && cmake . -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER=clang\+\+-12 -DCMAKE_C_COMPILER=clang-12 && make -j
|
||||||
|
|
||||||
ENV CODEGEN=/woboq_codebrowser/generator/codebrowser_generator
|
ENV CODEGEN=/woboq_codebrowser/generator/codebrowser_generator
|
||||||
ENV CODEINDEX=/woboq_codebrowser/indexgenerator/codebrowser_indexgenerator
|
ENV CODEINDEX=/woboq_codebrowser/indexgenerator/codebrowser_indexgenerator
|
||||||
@ -24,7 +24,7 @@ ENV SHA=nosha
|
|||||||
ENV DATA="data"
|
ENV DATA="data"
|
||||||
|
|
||||||
CMD mkdir -p $BUILD_DIRECTORY && cd $BUILD_DIRECTORY && \
|
CMD mkdir -p $BUILD_DIRECTORY && cd $BUILD_DIRECTORY && \
|
||||||
cmake $SOURCE_DIRECTORY -DCMAKE_CXX_COMPILER=/usr/bin/clang\+\+-11 -DCMAKE_C_COMPILER=/usr/bin/clang-11 -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DENABLE_EMBEDDED_COMPILER=0 -DENABLE_S3=0 && \
|
cmake $SOURCE_DIRECTORY -DCMAKE_CXX_COMPILER=/usr/bin/clang\+\+-12 -DCMAKE_C_COMPILER=/usr/bin/clang-12 -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DENABLE_EMBEDDED_COMPILER=0 -DENABLE_S3=0 && \
|
||||||
mkdir -p $HTML_RESULT_DIRECTORY && \
|
mkdir -p $HTML_RESULT_DIRECTORY && \
|
||||||
$CODEGEN -b $BUILD_DIRECTORY -a -o $HTML_RESULT_DIRECTORY -p ClickHouse:$SOURCE_DIRECTORY:$SHA -d $DATA | ts '%Y-%m-%d %H:%M:%S' && \
|
$CODEGEN -b $BUILD_DIRECTORY -a -o $HTML_RESULT_DIRECTORY -p ClickHouse:$SOURCE_DIRECTORY:$SHA -d $DATA | ts '%Y-%m-%d %H:%M:%S' && \
|
||||||
cp -r $STATIC_DATA $HTML_RESULT_DIRECTORY/ &&\
|
cp -r $STATIC_DATA $HTML_RESULT_DIRECTORY/ &&\
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
# docker build -t clickhouse/fasttest .
|
# docker build -t clickhouse/fasttest .
|
||||||
FROM ubuntu:20.04
|
FROM ubuntu:20.04
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11
|
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12
|
||||||
|
|
||||||
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
||||||
|
|
||||||
|
@ -9,7 +9,7 @@ trap 'kill $(jobs -pr) ||:' EXIT
|
|||||||
stage=${stage:-}
|
stage=${stage:-}
|
||||||
|
|
||||||
# Compiler version, normally set by Dockerfile
|
# Compiler version, normally set by Dockerfile
|
||||||
export LLVM_VERSION=${LLVM_VERSION:-11}
|
export LLVM_VERSION=${LLVM_VERSION:-12}
|
||||||
|
|
||||||
# A variable to pass additional flags to CMake.
|
# A variable to pass additional flags to CMake.
|
||||||
# Here we explicitly default it to nothing so that bash doesn't complain about
|
# Here we explicitly default it to nothing so that bash doesn't complain about
|
||||||
@ -401,6 +401,9 @@ function run_tests
|
|||||||
|
|
||||||
# depends on Go
|
# depends on Go
|
||||||
02013_zlib_read_after_eof
|
02013_zlib_read_after_eof
|
||||||
|
|
||||||
|
# Accesses CH via mysql table function (which is unavailable)
|
||||||
|
01747_system_session_log_long
|
||||||
)
|
)
|
||||||
|
|
||||||
time clickhouse-test --hung-check -j 8 --order=random --use-skip-list \
|
time clickhouse-test --hung-check -j 8 --order=random --use-skip-list \
|
||||||
|
@ -12,7 +12,7 @@ stage=${stage:-}
|
|||||||
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||||
echo "$script_dir"
|
echo "$script_dir"
|
||||||
repo_dir=ch
|
repo_dir=ch
|
||||||
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-11_debug_none_bundled_unsplitted_disable_False_binary"}
|
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-12_debug_none_bundled_unsplitted_disable_False_binary"}
|
||||||
|
|
||||||
function clone
|
function clone
|
||||||
{
|
{
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
|
|
||||||
CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-11_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"}
|
CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-12_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"}
|
||||||
CLICKHOUSE_REPO_PATH=${CLICKHOUSE_REPO_PATH:=""}
|
CLICKHOUSE_REPO_PATH=${CLICKHOUSE_REPO_PATH:=""}
|
||||||
|
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ RUN apt-get update --yes \
|
|||||||
ENV PKG_VERSION="pvs-studio-latest"
|
ENV PKG_VERSION="pvs-studio-latest"
|
||||||
|
|
||||||
RUN set -x \
|
RUN set -x \
|
||||||
&& export PUBKEY_HASHSUM="686e5eb8b3c543a5c54442c39ec876b6c2d912fe8a729099e600017ae53c877dda3368fe38ed7a66024fe26df6b5892a" \
|
&& export PUBKEY_HASHSUM="ad369a2e9d8b8c30f5a9f2eb131121739b79c78e03fef0f016ea51871a5f78cd4e6257b270dca0ac3be3d1f19d885516" \
|
||||||
&& wget -nv https://files.viva64.com/etc/pubkey.txt -O /tmp/pubkey.txt \
|
&& wget -nv https://files.viva64.com/etc/pubkey.txt -O /tmp/pubkey.txt \
|
||||||
&& echo "${PUBKEY_HASHSUM} /tmp/pubkey.txt" | sha384sum -c \
|
&& echo "${PUBKEY_HASHSUM} /tmp/pubkey.txt" | sha384sum -c \
|
||||||
&& apt-key add /tmp/pubkey.txt \
|
&& apt-key add /tmp/pubkey.txt \
|
||||||
@ -38,7 +38,7 @@ RUN set -x \
|
|||||||
&& dpkg -i "${PKG_VERSION}.deb"
|
&& dpkg -i "${PKG_VERSION}.deb"
|
||||||
|
|
||||||
CMD echo "Running PVS version $PKG_VERSION" && cd /repo_folder && pvs-studio-analyzer credentials $LICENCE_NAME $LICENCE_KEY -o ./licence.lic \
|
CMD echo "Running PVS version $PKG_VERSION" && cd /repo_folder && pvs-studio-analyzer credentials $LICENCE_NAME $LICENCE_KEY -o ./licence.lic \
|
||||||
&& cmake . -D"ENABLE_EMBEDDED_COMPILER"=OFF -D"USE_INTERNAL_PROTOBUF_LIBRARY"=OFF -D"USE_INTERNAL_GRPC_LIBRARY"=OFF \
|
&& cmake . -D"ENABLE_EMBEDDED_COMPILER"=OFF -D"USE_INTERNAL_PROTOBUF_LIBRARY"=OFF -D"USE_INTERNAL_GRPC_LIBRARY"=OFF -DCMAKE_C_COMPILER=clang-12 -DCMAKE_CXX_COMPILER=clang\+\+-12 \
|
||||||
&& ninja re2_st clickhouse_grpc_protos \
|
&& ninja re2_st clickhouse_grpc_protos \
|
||||||
&& pvs-studio-analyzer analyze -o pvs-studio.log -e contrib -j 4 -l ./licence.lic; \
|
&& pvs-studio-analyzer analyze -o pvs-studio.log -e contrib -j 4 -l ./licence.lic; \
|
||||||
cp /repo_folder/pvs-studio.log /test_output; \
|
cp /repo_folder/pvs-studio.log /test_output; \
|
||||||
|
@ -23,7 +23,7 @@ $ sudo apt-get install git cmake python ninja-build
|
|||||||
|
|
||||||
Or cmake3 instead of cmake on older systems.
|
Or cmake3 instead of cmake on older systems.
|
||||||
|
|
||||||
### Install clang-11 (recommended) {#install-clang-11}
|
### Install clang-12 (recommended) {#install-clang-12}
|
||||||
|
|
||||||
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
|
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
|
||||||
|
|
||||||
@ -33,11 +33,11 @@ sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
|||||||
|
|
||||||
For other Linux distribution - check the availability of the [prebuild packages](https://releases.llvm.org/download.html) or build clang [from sources](https://clang.llvm.org/get_started.html).
|
For other Linux distribution - check the availability of the [prebuild packages](https://releases.llvm.org/download.html) or build clang [from sources](https://clang.llvm.org/get_started.html).
|
||||||
|
|
||||||
#### Use clang-11 for Builds
|
#### Use clang-12 for Builds
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ export CC=clang-11
|
$ export CC=clang-12
|
||||||
$ export CXX=clang++-11
|
$ export CXX=clang++-12
|
||||||
```
|
```
|
||||||
|
|
||||||
Gcc can also be used though it is discouraged.
|
Gcc can also be used though it is discouraged.
|
||||||
|
@ -31,6 +31,10 @@ ENGINE = MaterializedPostgreSQL('host:port', ['database' | database], 'user', 'p
|
|||||||
|
|
||||||
- [materialized_postgresql_allow_automatic_update](../../operations/settings/settings.md#materialized-postgresql-allow-automatic-update)
|
- [materialized_postgresql_allow_automatic_update](../../operations/settings/settings.md#materialized-postgresql-allow-automatic-update)
|
||||||
|
|
||||||
|
- [materialized_postgresql_replication_slot](../../operations/settings/settings.md#materialized-postgresql-replication-slot)
|
||||||
|
|
||||||
|
- [materialized_postgresql_snapshot](../../operations/settings/settings.md#materialized-postgresql-snapshot)
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE DATABASE database1
|
CREATE DATABASE database1
|
||||||
ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password')
|
ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password')
|
||||||
@ -73,7 +77,7 @@ WHERE oid = 'postgres_table'::regclass;
|
|||||||
|
|
||||||
!!! warning "Warning"
|
!!! warning "Warning"
|
||||||
Replication of [**TOAST**](https://www.postgresql.org/docs/9.5/storage-toast.html) values is not supported. The default value for the data type will be used.
|
Replication of [**TOAST**](https://www.postgresql.org/docs/9.5/storage-toast.html) values is not supported. The default value for the data type will be used.
|
||||||
|
|
||||||
## Example of Use {#example-of-use}
|
## Example of Use {#example-of-use}
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
@ -82,3 +86,11 @@ ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres
|
|||||||
|
|
||||||
SELECT * FROM postgresql_db.postgres_table;
|
SELECT * FROM postgresql_db.postgres_table;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Notes {#notes}
|
||||||
|
|
||||||
|
- Failover of the logical replication slot.
|
||||||
|
|
||||||
|
Logical Replication Slots which exist on the primary are not available on standby replicas.
|
||||||
|
So if there is a failover, new primary (the old physical standby) won’t be aware of any slots which were existing with old primary. This will lead to a broken replication from PostgreSQL.
|
||||||
|
A solution to this is to manage replication slots yourself and define a permanent replication slot (some information can be found [here](https://patroni.readthedocs.io/en/latest/SETTINGS.html)). You'll need to pass slot name via `materialized_postgresql_replication_slot` setting, and it has to be exported with `EXPORT SNAPSHOT` option. The snapshot identifier needs to be passed via `materialized_postgresql_snapshot` setting.
|
||||||
|
@ -19,6 +19,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
|||||||
SETTINGS
|
SETTINGS
|
||||||
[connection_pool_size=16, ]
|
[connection_pool_size=16, ]
|
||||||
[connection_max_tries=3, ]
|
[connection_max_tries=3, ]
|
||||||
|
[connection_wait_timeout=5, ] /* 0 -- do not wait */
|
||||||
[connection_auto_close=true ]
|
[connection_auto_close=true ]
|
||||||
;
|
;
|
||||||
```
|
```
|
||||||
|
@ -3436,6 +3436,14 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `0`.
|
Default value: `0`.
|
||||||
|
|
||||||
|
## materialized_postgresql_replication_slot {#materialized-postgresql-replication-slot}
|
||||||
|
|
||||||
|
Allows to have user-managed replication slots. Must be used together with `materialized_postgresql_snapshot`.
|
||||||
|
|
||||||
|
## materialized_postgresql_replication_slot {#materialized-postgresql-replication-slot}
|
||||||
|
|
||||||
|
A text string identifying a snapshot, from which initial dump of tables will be performed. Must be used together with `materialized_postgresql_replication_slot`.
|
||||||
|
|
||||||
## allow_experimental_projection_optimization {#allow-experimental-projection-optimization}
|
## allow_experimental_projection_optimization {#allow-experimental-projection-optimization}
|
||||||
|
|
||||||
Enables or disables [projection](../../engines/table-engines/mergetree-family/mergetree.md#projections) optimization when processing `SELECT` queries.
|
Enables or disables [projection](../../engines/table-engines/mergetree-family/mergetree.md#projections) optimization when processing `SELECT` queries.
|
||||||
@ -3449,7 +3457,7 @@ Default value: `0`.
|
|||||||
|
|
||||||
## force_optimize_projection {#force-optimize-projection}
|
## force_optimize_projection {#force-optimize-projection}
|
||||||
|
|
||||||
Enables or disables the obligatory use of [projections](../../engines/table-engines/mergetree-family/mergetree.md#projections) in `SELECT` queries, when projection optimization is enabled (see [allow_experimental_projection_optimization](#allow-experimental-projection-optimization) setting).
|
Enables or disables the obligatory use of [projections](../../engines/table-engines/mergetree-family/mergetree.md#projections) in `SELECT` queries, when projection optimization is enabled (see [allow_experimental_projection_optimization](#allow-experimental-projection-optimization) setting).
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
|
@ -1,44 +0,0 @@
|
|||||||
# system.views {#system-views}
|
|
||||||
|
|
||||||
Contains the dependencies of all views and the type to which the view belongs. The metadata of the view comes from the [system.tables](tables.md).
|
|
||||||
|
|
||||||
Columns:
|
|
||||||
|
|
||||||
- `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database the view is in.
|
|
||||||
|
|
||||||
- `name` ([String](../../sql-reference/data-types/string.md)) — Name of the view.
|
|
||||||
|
|
||||||
- `main_dependency_database` ([String](../../sql-reference/data-types/string.md)) — The name of the database on which the view depends.
|
|
||||||
|
|
||||||
- `main_dependency_table` ([String](../../sql-reference/data-types/string.md)) - The name of the table on which the view depends.
|
|
||||||
|
|
||||||
- `view_type` ([Enum8](../../sql-reference/data-types/enum.md)) — Type of the view. Values:
|
|
||||||
- `'Default' = 1` — [Default views](../../sql-reference/statements/create/view.md#normal). Should not appear in this log.
|
|
||||||
- `'Materialized' = 2` — [Materialized views](../../sql-reference/statements/create/view.md#materialized).
|
|
||||||
- `'Live' = 3` — [Live views](../../sql-reference/statements/create/view.md#live-view).
|
|
||||||
|
|
||||||
**Example**
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SELECT * FROM system.views LIMIT 2 FORMAT Vertical;
|
|
||||||
```
|
|
||||||
|
|
||||||
```text
|
|
||||||
Row 1:
|
|
||||||
──────
|
|
||||||
database: default
|
|
||||||
name: live_view
|
|
||||||
main_dependency_database: default
|
|
||||||
main_dependency_table: view_source_tb
|
|
||||||
view_type: Live
|
|
||||||
|
|
||||||
Row 2:
|
|
||||||
──────
|
|
||||||
database: default
|
|
||||||
name: materialized_view
|
|
||||||
main_dependency_database: default
|
|
||||||
main_dependency_table: view_source_tb
|
|
||||||
view_type: Materialized
|
|
||||||
```
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/views) <!--hide-->
|
|
@ -393,6 +393,7 @@ void LocalServer::processQueries()
|
|||||||
auto context = session.makeQueryContext();
|
auto context = session.makeQueryContext();
|
||||||
context->makeSessionContext(); /// initial_create_query requires a session context to be set.
|
context->makeSessionContext(); /// initial_create_query requires a session context to be set.
|
||||||
context->setCurrentQueryId("");
|
context->setCurrentQueryId("");
|
||||||
|
|
||||||
applyCmdSettings(context);
|
applyCmdSettings(context);
|
||||||
|
|
||||||
/// Use the same query_id (and thread group) for all queries
|
/// Use the same query_id (and thread group) for all queries
|
||||||
|
@ -45,16 +45,14 @@
|
|||||||
#include <IO/UseSSL.h>
|
#include <IO/UseSSL.h>
|
||||||
#include <Interpreters/AsynchronousMetrics.h>
|
#include <Interpreters/AsynchronousMetrics.h>
|
||||||
#include <Interpreters/DDLWorker.h>
|
#include <Interpreters/DDLWorker.h>
|
||||||
|
#include <Interpreters/DNSCacheUpdater.h>
|
||||||
|
#include <Interpreters/DatabaseCatalog.h>
|
||||||
#include <Interpreters/ExternalDictionariesLoader.h>
|
#include <Interpreters/ExternalDictionariesLoader.h>
|
||||||
#include <Interpreters/ExternalModelsLoader.h>
|
#include <Interpreters/ExternalModelsLoader.h>
|
||||||
#include <Interpreters/ProcessList.h>
|
#include <Interpreters/ProcessList.h>
|
||||||
#include <Interpreters/loadMetadata.h>
|
#include <Interpreters/loadMetadata.h>
|
||||||
#include <Interpreters/DatabaseCatalog.h>
|
|
||||||
#include <Interpreters/DNSCacheUpdater.h>
|
|
||||||
#include <Interpreters/ExternalLoaderXMLConfigRepository.h>
|
|
||||||
#include <Interpreters/InterserverCredentials.h>
|
|
||||||
#include <Interpreters/UserDefinedObjectsLoader.h>
|
|
||||||
#include <Interpreters/JIT/CompiledExpressionCache.h>
|
#include <Interpreters/JIT/CompiledExpressionCache.h>
|
||||||
|
#include <Interpreters/UserDefinedObjectsLoader.h>
|
||||||
#include <Access/AccessControlManager.h>
|
#include <Access/AccessControlManager.h>
|
||||||
#include <Storages/StorageReplicatedMergeTree.h>
|
#include <Storages/StorageReplicatedMergeTree.h>
|
||||||
#include <Storages/System/attachSystemTables.h>
|
#include <Storages/System/attachSystemTables.h>
|
||||||
|
@ -965,6 +965,14 @@
|
|||||||
<flush_interval_milliseconds>1000</flush_interval_milliseconds>
|
<flush_interval_milliseconds>1000</flush_interval_milliseconds>
|
||||||
</crash_log>
|
</crash_log>
|
||||||
|
|
||||||
|
<!-- Session log. Stores user log in (successful or not) and log out events. -->
|
||||||
|
<session_log>
|
||||||
|
<database>system</database>
|
||||||
|
<table>session_log</table>
|
||||||
|
|
||||||
|
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||||
|
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||||
|
</session_log>
|
||||||
|
|
||||||
<!-- Parameters for embedded dictionaries, used in Yandex.Metrica.
|
<!-- Parameters for embedded dictionaries, used in Yandex.Metrica.
|
||||||
See https://clickhouse.yandex/docs/en/dicts/internal_dicts/
|
See https://clickhouse.yandex/docs/en/dicts/internal_dicts/
|
||||||
|
1
programs/server/users.d/session_log_test.xml
Symbolic link
1
programs/server/users.d/session_log_test.xml
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../../../tests/config/users.d/session_log_test.xml
|
@ -143,10 +143,11 @@ public:
|
|||||||
|
|
||||||
std::vector<QuotaUsage> getAllQuotasUsage() const;
|
std::vector<QuotaUsage> getAllQuotasUsage() const;
|
||||||
|
|
||||||
std::shared_ptr<const EnabledSettings> getEnabledSettings(const UUID & user_id,
|
std::shared_ptr<const EnabledSettings> getEnabledSettings(
|
||||||
const SettingsProfileElements & settings_from_user,
|
const UUID & user_id,
|
||||||
const boost::container::flat_set<UUID> & enabled_roles,
|
const SettingsProfileElements & settings_from_user,
|
||||||
const SettingsProfileElements & settings_from_enabled_roles) const;
|
const boost::container::flat_set<UUID> & enabled_roles,
|
||||||
|
const SettingsProfileElements & settings_from_enabled_roles) const;
|
||||||
|
|
||||||
std::shared_ptr<const SettingsProfilesInfo> getSettingsProfileInfo(const UUID & profile_id);
|
std::shared_ptr<const SettingsProfilesInfo> getSettingsProfileInfo(const UUID & profile_id);
|
||||||
|
|
||||||
|
@ -36,6 +36,16 @@ struct SettingsProfilesInfo
|
|||||||
friend bool operator ==(const SettingsProfilesInfo & lhs, const SettingsProfilesInfo & rhs);
|
friend bool operator ==(const SettingsProfilesInfo & lhs, const SettingsProfilesInfo & rhs);
|
||||||
friend bool operator !=(const SettingsProfilesInfo & lhs, const SettingsProfilesInfo & rhs) { return !(lhs == rhs); }
|
friend bool operator !=(const SettingsProfilesInfo & lhs, const SettingsProfilesInfo & rhs) { return !(lhs == rhs); }
|
||||||
|
|
||||||
|
Strings getProfileNames() const
|
||||||
|
{
|
||||||
|
Strings result;
|
||||||
|
result.reserve(profiles.size());
|
||||||
|
for (const auto & profile_id : profiles)
|
||||||
|
result.push_back(names_of_profiles.at(profile_id));
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const AccessControlManager & manager;
|
const AccessControlManager & manager;
|
||||||
};
|
};
|
||||||
|
@ -27,7 +27,12 @@ private:
|
|||||||
size_t stack_size;
|
size_t stack_size;
|
||||||
size_t page_size = 0;
|
size_t page_size = 0;
|
||||||
public:
|
public:
|
||||||
static constexpr size_t default_stack_size = 128 * 1024; /// 64KB was not enough for tests
|
/// NOTE: If you see random segfaults in CI and stack starts from boost::context::...fiber...
|
||||||
|
/// probably it worth to try to increase stack size for coroutines.
|
||||||
|
///
|
||||||
|
/// Current value is just enough for all tests in our CI. It's not selected in some special
|
||||||
|
/// way. We will have 36 pages with 4KB page size.
|
||||||
|
static constexpr size_t default_stack_size = 144 * 1024; /// 64KB was not enough for tests
|
||||||
|
|
||||||
explicit FiberStack(size_t stack_size_ = default_stack_size) : stack_size(stack_size_)
|
explicit FiberStack(size_t stack_size_ = default_stack_size) : stack_size(stack_size_)
|
||||||
{
|
{
|
||||||
@ -43,6 +48,8 @@ public:
|
|||||||
if (MAP_FAILED == vp)
|
if (MAP_FAILED == vp)
|
||||||
DB::throwFromErrno(fmt::format("FiberStack: Cannot mmap {}.", ReadableSize(num_bytes)), DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY);
|
DB::throwFromErrno(fmt::format("FiberStack: Cannot mmap {}.", ReadableSize(num_bytes)), DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY);
|
||||||
|
|
||||||
|
/// TODO: make reports on illegal guard page access more clear.
|
||||||
|
/// Currently we will see segfault and almost random stacktrace.
|
||||||
if (-1 == ::mprotect(vp, page_size, PROT_NONE))
|
if (-1 == ::mprotect(vp, page_size, PROT_NONE))
|
||||||
{
|
{
|
||||||
::munmap(vp, num_bytes);
|
::munmap(vp, num_bytes);
|
||||||
|
@ -113,7 +113,8 @@ namespace DB
|
|||||||
|
|
||||||
std::string CompressionCodecEncrypted::deriveKey(const std::string_view & master_key)
|
std::string CompressionCodecEncrypted::deriveKey(const std::string_view & master_key)
|
||||||
{
|
{
|
||||||
std::string_view salt(""); // No salt: derive keys in a deterministic manner.
|
/// No salt: derive keys in a deterministic manner.
|
||||||
|
std::string_view salt(""); // NOLINT
|
||||||
std::string_view info("Codec Encrypted('AES-128-GCM-SIV') key generation key");
|
std::string_view info("Codec Encrypted('AES-128-GCM-SIV') key generation key");
|
||||||
std::array<char, 32> result;
|
std::array<char, 32> result;
|
||||||
|
|
||||||
|
@ -900,8 +900,7 @@ public:
|
|||||||
Messaging::MessageTransport & mt,
|
Messaging::MessageTransport & mt,
|
||||||
const Poco::Net::SocketAddress & address)
|
const Poco::Net::SocketAddress & address)
|
||||||
{
|
{
|
||||||
Authentication::Type user_auth_type = session.getAuthenticationType(user_name);
|
const Authentication::Type user_auth_type = session.getAuthenticationTypeOrLogInFailure(user_name);
|
||||||
|
|
||||||
if (type_to_method.find(user_auth_type) != type_to_method.end())
|
if (type_to_method.find(user_auth_type) != type_to_method.end())
|
||||||
{
|
{
|
||||||
type_to_method[user_auth_type]->authenticate(user_name, session, mt, address);
|
type_to_method[user_auth_type]->authenticate(user_name, session, mt, address);
|
||||||
|
@ -70,8 +70,8 @@ class IColumn;
|
|||||||
M(UInt64, idle_connection_timeout, 3600, "Close idle TCP connections after specified number of seconds.", 0) \
|
M(UInt64, idle_connection_timeout, 3600, "Close idle TCP connections after specified number of seconds.", 0) \
|
||||||
M(UInt64, distributed_connections_pool_size, DBMS_DEFAULT_DISTRIBUTED_CONNECTIONS_POOL_SIZE, "Maximum number of connections with one remote server in the pool.", 0) \
|
M(UInt64, distributed_connections_pool_size, DBMS_DEFAULT_DISTRIBUTED_CONNECTIONS_POOL_SIZE, "Maximum number of connections with one remote server in the pool.", 0) \
|
||||||
M(UInt64, connections_with_failover_max_tries, DBMS_CONNECTION_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES, "The maximum number of attempts to connect to replicas.", 0) \
|
M(UInt64, connections_with_failover_max_tries, DBMS_CONNECTION_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES, "The maximum number of attempts to connect to replicas.", 0) \
|
||||||
M(UInt64, s3_min_upload_part_size, 512*1024*1024, "The minimum size of part to upload during multipart upload to S3.", 0) \
|
M(UInt64, s3_min_upload_part_size, 32*1024*1024, "The minimum size of part to upload during multipart upload to S3.", 0) \
|
||||||
M(UInt64, s3_max_single_part_upload_size, 64*1024*1024, "The maximum size of object to upload using singlepart upload to S3.", 0) \
|
M(UInt64, s3_max_single_part_upload_size, 32*1024*1024, "The maximum size of object to upload using singlepart upload to S3.", 0) \
|
||||||
M(UInt64, s3_max_single_read_retries, 4, "The maximum number of retries during single S3 read.", 0) \
|
M(UInt64, s3_max_single_read_retries, 4, "The maximum number of retries during single S3 read.", 0) \
|
||||||
M(UInt64, s3_max_redirects, 10, "Max number of S3 redirects hops allowed.", 0) \
|
M(UInt64, s3_max_redirects, 10, "Max number of S3 redirects hops allowed.", 0) \
|
||||||
M(UInt64, s3_max_connections, 1024, "The maximum number of connections per server.", 0) \
|
M(UInt64, s3_max_connections, 1024, "The maximum number of connections per server.", 0) \
|
||||||
|
@ -247,7 +247,7 @@ void MaterializedMySQLSyncThread::assertMySQLAvailable()
|
|||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
checkMySQLVariables(pool.get(), getContext()->getSettingsRef());
|
checkMySQLVariables(pool.get(/* wait_timeout= */ UINT64_MAX), getContext()->getSettingsRef());
|
||||||
}
|
}
|
||||||
catch (const mysqlxx::ConnectionFailed & e)
|
catch (const mysqlxx::ConnectionFailed & e)
|
||||||
{
|
{
|
||||||
@ -729,7 +729,7 @@ void MaterializedMySQLSyncThread::onEvent(Buffers & buffers, const BinlogEventPt
|
|||||||
{
|
{
|
||||||
/// Some behaviors(such as changing the value of "binlog_checksum") rotate the binlog file.
|
/// Some behaviors(such as changing the value of "binlog_checksum") rotate the binlog file.
|
||||||
/// To ensure that the synchronization continues, we need to handle these events
|
/// To ensure that the synchronization continues, we need to handle these events
|
||||||
metadata.fetchMasterVariablesValue(pool.get());
|
metadata.fetchMasterVariablesValue(pool.get(/* wait_timeout= */ UINT64_MAX));
|
||||||
client.setBinlogChecksum(metadata.binlog_checksum);
|
client.setBinlogChecksum(metadata.binlog_checksum);
|
||||||
}
|
}
|
||||||
else if (receive_event->header.type != HEARTBEAT_EVENT)
|
else if (receive_event->header.type != HEARTBEAT_EVENT)
|
||||||
|
@ -61,10 +61,8 @@ void DatabaseMaterializedPostgreSQL::startSynchronization()
|
|||||||
connection_info,
|
connection_info,
|
||||||
getContext(),
|
getContext(),
|
||||||
is_attach,
|
is_attach,
|
||||||
settings->materialized_postgresql_max_block_size.value,
|
*settings,
|
||||||
settings->materialized_postgresql_allow_automatic_update,
|
/* is_materialized_postgresql_database = */ true);
|
||||||
/* is_materialized_postgresql_database = */ true,
|
|
||||||
settings->materialized_postgresql_tables_list.value);
|
|
||||||
|
|
||||||
postgres::Connection connection(connection_info);
|
postgres::Connection connection(connection_info);
|
||||||
NameSet tables_to_replicate;
|
NameSet tables_to_replicate;
|
||||||
|
@ -41,7 +41,7 @@ public:
|
|||||||
return name;
|
return name;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t getNumberOfArguments() const override { return 4; }
|
size_t getNumberOfArguments() const override { return 3; }
|
||||||
|
|
||||||
bool useDefaultImplementationForConstants() const override { return true; }
|
bool useDefaultImplementationForConstants() const override { return true; }
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ public:
|
|||||||
return name;
|
return name;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t getNumberOfArguments() const override { return 4; }
|
size_t getNumberOfArguments() const override { return 3; }
|
||||||
|
|
||||||
bool useDefaultImplementationForConstants() const override { return true; }
|
bool useDefaultImplementationForConstants() const override { return true; }
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ WriteBufferFromFile::WriteBufferFromFile(
|
|||||||
mode_t mode,
|
mode_t mode,
|
||||||
char * existing_memory,
|
char * existing_memory,
|
||||||
size_t alignment)
|
size_t alignment)
|
||||||
: WriteBufferFromFileDescriptor(-1, buf_size, existing_memory, alignment), file_name(file_name_)
|
: WriteBufferFromFileDescriptor(-1, buf_size, existing_memory, alignment, file_name_)
|
||||||
{
|
{
|
||||||
ProfileEvents::increment(ProfileEvents::FileOpen);
|
ProfileEvents::increment(ProfileEvents::FileOpen);
|
||||||
|
|
||||||
@ -65,9 +65,7 @@ WriteBufferFromFile::WriteBufferFromFile(
|
|||||||
size_t buf_size,
|
size_t buf_size,
|
||||||
char * existing_memory,
|
char * existing_memory,
|
||||||
size_t alignment)
|
size_t alignment)
|
||||||
:
|
: WriteBufferFromFileDescriptor(fd_, buf_size, existing_memory, alignment, original_file_name)
|
||||||
WriteBufferFromFileDescriptor(fd_, buf_size, existing_memory, alignment),
|
|
||||||
file_name(original_file_name.empty() ? "(fd = " + toString(fd_) + ")" : original_file_name)
|
|
||||||
{
|
{
|
||||||
fd_ = -1;
|
fd_ = -1;
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,6 @@ namespace DB
|
|||||||
class WriteBufferFromFile : public WriteBufferFromFileDescriptor
|
class WriteBufferFromFile : public WriteBufferFromFileDescriptor
|
||||||
{
|
{
|
||||||
protected:
|
protected:
|
||||||
std::string file_name;
|
|
||||||
CurrentMetrics::Increment metric_increment{CurrentMetrics::OpenFileForWrite};
|
CurrentMetrics::Increment metric_increment{CurrentMetrics::OpenFileForWrite};
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
@ -61,7 +61,12 @@ void WriteBufferFromFileDescriptor::nextImpl()
|
|||||||
if ((-1 == res || 0 == res) && errno != EINTR)
|
if ((-1 == res || 0 == res) && errno != EINTR)
|
||||||
{
|
{
|
||||||
ProfileEvents::increment(ProfileEvents::WriteBufferFromFileDescriptorWriteFailed);
|
ProfileEvents::increment(ProfileEvents::WriteBufferFromFileDescriptorWriteFailed);
|
||||||
throwFromErrnoWithPath("Cannot write to file " + getFileName(), getFileName(),
|
|
||||||
|
/// Don't use getFileName() here because this method can be called from destructor
|
||||||
|
String error_file_name = file_name;
|
||||||
|
if (error_file_name.empty())
|
||||||
|
error_file_name = "(fd = " + toString(fd) + ")";
|
||||||
|
throwFromErrnoWithPath("Cannot write to file " + error_file_name, error_file_name,
|
||||||
ErrorCodes::CANNOT_WRITE_TO_FILE_DESCRIPTOR);
|
ErrorCodes::CANNOT_WRITE_TO_FILE_DESCRIPTOR);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -73,20 +78,20 @@ void WriteBufferFromFileDescriptor::nextImpl()
|
|||||||
ProfileEvents::increment(ProfileEvents::WriteBufferFromFileDescriptorWriteBytes, bytes_written);
|
ProfileEvents::increment(ProfileEvents::WriteBufferFromFileDescriptorWriteBytes, bytes_written);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// NOTE: This class can be used as a very low-level building block, for example
|
||||||
/// Name or some description of file.
|
/// in trace collector. In such places allocations of memory can be dangerous,
|
||||||
std::string WriteBufferFromFileDescriptor::getFileName() const
|
/// so don't allocate anything in this constructor.
|
||||||
{
|
|
||||||
return "(fd = " + toString(fd) + ")";
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
WriteBufferFromFileDescriptor::WriteBufferFromFileDescriptor(
|
WriteBufferFromFileDescriptor::WriteBufferFromFileDescriptor(
|
||||||
int fd_,
|
int fd_,
|
||||||
size_t buf_size,
|
size_t buf_size,
|
||||||
char * existing_memory,
|
char * existing_memory,
|
||||||
size_t alignment)
|
size_t alignment,
|
||||||
: WriteBufferFromFileBase(buf_size, existing_memory, alignment), fd(fd_) {}
|
std::string file_name_)
|
||||||
|
: WriteBufferFromFileBase(buf_size, existing_memory, alignment)
|
||||||
|
, fd(fd_)
|
||||||
|
, file_name(std::move(file_name_))
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
WriteBufferFromFileDescriptor::~WriteBufferFromFileDescriptor()
|
WriteBufferFromFileDescriptor::~WriteBufferFromFileDescriptor()
|
||||||
@ -115,7 +120,7 @@ void WriteBufferFromFileDescriptor::sync()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
off_t WriteBufferFromFileDescriptor::seek(off_t offset, int whence)
|
off_t WriteBufferFromFileDescriptor::seek(off_t offset, int whence) // NOLINT
|
||||||
{
|
{
|
||||||
off_t res = lseek(fd, offset, whence);
|
off_t res = lseek(fd, offset, whence);
|
||||||
if (-1 == res)
|
if (-1 == res)
|
||||||
@ -125,7 +130,7 @@ off_t WriteBufferFromFileDescriptor::seek(off_t offset, int whence)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void WriteBufferFromFileDescriptor::truncate(off_t length)
|
void WriteBufferFromFileDescriptor::truncate(off_t length) // NOLINT
|
||||||
{
|
{
|
||||||
int res = ftruncate(fd, length);
|
int res = ftruncate(fd, length);
|
||||||
if (-1 == res)
|
if (-1 == res)
|
||||||
@ -133,7 +138,7 @@ void WriteBufferFromFileDescriptor::truncate(off_t length)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
off_t WriteBufferFromFileDescriptor::size()
|
off_t WriteBufferFromFileDescriptor::size() const
|
||||||
{
|
{
|
||||||
struct stat buf;
|
struct stat buf;
|
||||||
int res = fstat(fd, &buf);
|
int res = fstat(fd, &buf);
|
||||||
@ -142,4 +147,13 @@ off_t WriteBufferFromFileDescriptor::size()
|
|||||||
return buf.st_size;
|
return buf.st_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::string WriteBufferFromFileDescriptor::getFileName() const
|
||||||
|
{
|
||||||
|
if (file_name.empty())
|
||||||
|
return "(fd = " + toString(fd) + ")";
|
||||||
|
|
||||||
|
return file_name;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -13,17 +13,17 @@ class WriteBufferFromFileDescriptor : public WriteBufferFromFileBase
|
|||||||
protected:
|
protected:
|
||||||
int fd;
|
int fd;
|
||||||
|
|
||||||
|
/// If file has name contains filename, otherwise contains string "(fd=...)"
|
||||||
|
std::string file_name;
|
||||||
|
|
||||||
void nextImpl() override;
|
void nextImpl() override;
|
||||||
|
|
||||||
/// Name or some description of file.
|
|
||||||
std::string getFileName() const override;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
WriteBufferFromFileDescriptor(
|
WriteBufferFromFileDescriptor(
|
||||||
int fd_ = -1,
|
int fd_ = -1,
|
||||||
size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE,
|
size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE,
|
||||||
char * existing_memory = nullptr,
|
char * existing_memory = nullptr,
|
||||||
size_t alignment = 0);
|
size_t alignment = 0,
|
||||||
|
std::string file_name_ = "");
|
||||||
|
|
||||||
/** Could be used before initialization if needed 'fd' was not passed to constructor.
|
/** Could be used before initialization if needed 'fd' was not passed to constructor.
|
||||||
* It's not possible to change 'fd' during work.
|
* It's not possible to change 'fd' during work.
|
||||||
@ -42,10 +42,15 @@ public:
|
|||||||
|
|
||||||
void sync() override;
|
void sync() override;
|
||||||
|
|
||||||
off_t seek(off_t offset, int whence);
|
/// clang-tidy wants these methods to be const, but
|
||||||
void truncate(off_t length);
|
/// they are not const semantically
|
||||||
|
off_t seek(off_t offset, int whence); // NOLINT
|
||||||
|
void truncate(off_t length); // NOLINT
|
||||||
|
|
||||||
off_t size();
|
/// Name or some description of file.
|
||||||
|
std::string getFileName() const override;
|
||||||
|
|
||||||
|
off_t size() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -57,6 +57,7 @@
|
|||||||
#include <Interpreters/Cluster.h>
|
#include <Interpreters/Cluster.h>
|
||||||
#include <Interpreters/InterserverIOHandler.h>
|
#include <Interpreters/InterserverIOHandler.h>
|
||||||
#include <Interpreters/SystemLog.h>
|
#include <Interpreters/SystemLog.h>
|
||||||
|
#include <Interpreters/SessionLog.h>
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
#include <Interpreters/DDLWorker.h>
|
#include <Interpreters/DDLWorker.h>
|
||||||
#include <Interpreters/DDLTask.h>
|
#include <Interpreters/DDLTask.h>
|
||||||
@ -641,7 +642,6 @@ ConfigurationPtr Context::getUsersConfig()
|
|||||||
return shared->users_config;
|
return shared->users_config;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Context::setUser(const UUID & user_id_)
|
void Context::setUser(const UUID & user_id_)
|
||||||
{
|
{
|
||||||
auto lock = getLock();
|
auto lock = getLock();
|
||||||
@ -2071,6 +2071,16 @@ std::shared_ptr<OpenTelemetrySpanLog> Context::getOpenTelemetrySpanLog() const
|
|||||||
return shared->system_logs->opentelemetry_span_log;
|
return shared->system_logs->opentelemetry_span_log;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<SessionLog> Context::getSessionLog() const
|
||||||
|
{
|
||||||
|
auto lock = getLock();
|
||||||
|
|
||||||
|
if (!shared->system_logs)
|
||||||
|
return {};
|
||||||
|
|
||||||
|
return shared->system_logs->session_log;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
std::shared_ptr<ZooKeeperLog> Context::getZooKeeperLog() const
|
std::shared_ptr<ZooKeeperLog> Context::getZooKeeperLog() const
|
||||||
{
|
{
|
||||||
|
@ -74,6 +74,7 @@ class MetricLog;
|
|||||||
class AsynchronousMetricLog;
|
class AsynchronousMetricLog;
|
||||||
class OpenTelemetrySpanLog;
|
class OpenTelemetrySpanLog;
|
||||||
class ZooKeeperLog;
|
class ZooKeeperLog;
|
||||||
|
class SessionLog;
|
||||||
struct MergeTreeSettings;
|
struct MergeTreeSettings;
|
||||||
class StorageS3Settings;
|
class StorageS3Settings;
|
||||||
class IDatabase;
|
class IDatabase;
|
||||||
@ -600,6 +601,7 @@ public:
|
|||||||
bool hasSessionContext() const { return !session_context.expired(); }
|
bool hasSessionContext() const { return !session_context.expired(); }
|
||||||
|
|
||||||
ContextMutablePtr getGlobalContext() const;
|
ContextMutablePtr getGlobalContext() const;
|
||||||
|
|
||||||
bool hasGlobalContext() const { return !global_context.expired(); }
|
bool hasGlobalContext() const { return !global_context.expired(); }
|
||||||
bool isGlobalContext() const
|
bool isGlobalContext() const
|
||||||
{
|
{
|
||||||
@ -735,6 +737,7 @@ public:
|
|||||||
std::shared_ptr<AsynchronousMetricLog> getAsynchronousMetricLog() const;
|
std::shared_ptr<AsynchronousMetricLog> getAsynchronousMetricLog() const;
|
||||||
std::shared_ptr<OpenTelemetrySpanLog> getOpenTelemetrySpanLog() const;
|
std::shared_ptr<OpenTelemetrySpanLog> getOpenTelemetrySpanLog() const;
|
||||||
std::shared_ptr<ZooKeeperLog> getZooKeeperLog() const;
|
std::shared_ptr<ZooKeeperLog> getZooKeeperLog() const;
|
||||||
|
std::shared_ptr<SessionLog> getSessionLog() const;
|
||||||
|
|
||||||
/// Returns an object used to log operations with parts if it possible.
|
/// Returns an object used to log operations with parts if it possible.
|
||||||
/// Provide table name to make required checks.
|
/// Provide table name to make required checks.
|
||||||
|
@ -617,12 +617,6 @@ Dependencies DatabaseCatalog::getDependencies(const StorageID & from) const
|
|||||||
return Dependencies(iter->second.begin(), iter->second.end());
|
return Dependencies(iter->second.begin(), iter->second.end());
|
||||||
}
|
}
|
||||||
|
|
||||||
ViewDependencies DatabaseCatalog::getViewDependencies() const
|
|
||||||
{
|
|
||||||
std::lock_guard lock{databases_mutex};
|
|
||||||
return ViewDependencies(view_dependencies.begin(), view_dependencies.end());
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
DatabaseCatalog::updateDependency(const StorageID & old_from, const StorageID & old_where, const StorageID & new_from,
|
DatabaseCatalog::updateDependency(const StorageID & old_from, const StorageID & old_where, const StorageID & new_from,
|
||||||
const StorageID & new_where)
|
const StorageID & new_where)
|
||||||
|
@ -175,7 +175,6 @@ public:
|
|||||||
void addDependency(const StorageID & from, const StorageID & where);
|
void addDependency(const StorageID & from, const StorageID & where);
|
||||||
void removeDependency(const StorageID & from, const StorageID & where);
|
void removeDependency(const StorageID & from, const StorageID & where);
|
||||||
Dependencies getDependencies(const StorageID & from) const;
|
Dependencies getDependencies(const StorageID & from) const;
|
||||||
ViewDependencies getViewDependencies() const;
|
|
||||||
|
|
||||||
/// For Materialized and Live View
|
/// For Materialized and Live View
|
||||||
void updateDependency(const StorageID & old_from, const StorageID & old_where,const StorageID & new_from, const StorageID & new_where);
|
void updateDependency(const StorageID & old_from, const StorageID & old_where,const StorageID & new_from, const StorageID & new_where);
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
#include <Interpreters/PartLog.h>
|
#include <Interpreters/PartLog.h>
|
||||||
#include <Interpreters/QueryThreadLog.h>
|
#include <Interpreters/QueryThreadLog.h>
|
||||||
#include <Interpreters/QueryViewsLog.h>
|
#include <Interpreters/QueryViewsLog.h>
|
||||||
|
#include <Interpreters/SessionLog.h>
|
||||||
#include <Interpreters/TraceLog.h>
|
#include <Interpreters/TraceLog.h>
|
||||||
#include <Interpreters/TextLog.h>
|
#include <Interpreters/TextLog.h>
|
||||||
#include <Interpreters/MetricLog.h>
|
#include <Interpreters/MetricLog.h>
|
||||||
@ -420,7 +421,8 @@ BlockIO InterpreterSystemQuery::execute()
|
|||||||
[&] { if (auto asynchronous_metric_log = getContext()->getAsynchronousMetricLog()) asynchronous_metric_log->flush(true); },
|
[&] { if (auto asynchronous_metric_log = getContext()->getAsynchronousMetricLog()) asynchronous_metric_log->flush(true); },
|
||||||
[&] { if (auto opentelemetry_span_log = getContext()->getOpenTelemetrySpanLog()) opentelemetry_span_log->flush(true); },
|
[&] { if (auto opentelemetry_span_log = getContext()->getOpenTelemetrySpanLog()) opentelemetry_span_log->flush(true); },
|
||||||
[&] { if (auto query_views_log = getContext()->getQueryViewsLog()) query_views_log->flush(true); },
|
[&] { if (auto query_views_log = getContext()->getQueryViewsLog()) query_views_log->flush(true); },
|
||||||
[&] { if (auto zookeeper_log = getContext()->getZooKeeperLog()) zookeeper_log->flush(true); }
|
[&] { if (auto zookeeper_log = getContext()->getZooKeeperLog()) zookeeper_log->flush(true); },
|
||||||
|
[&] { if (auto session_log = getContext()->getSessionLog()) session_log->flush(true); }
|
||||||
);
|
);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include <Common/ThreadPool.h>
|
#include <Common/ThreadPool.h>
|
||||||
#include <Common/setThreadName.h>
|
#include <Common/setThreadName.h>
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
|
#include <Interpreters/SessionLog.h>
|
||||||
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
@ -241,7 +242,8 @@ void Session::shutdownNamedSessions()
|
|||||||
|
|
||||||
|
|
||||||
Session::Session(const ContextPtr & global_context_, ClientInfo::Interface interface_)
|
Session::Session(const ContextPtr & global_context_, ClientInfo::Interface interface_)
|
||||||
: global_context(global_context_)
|
: session_id(UUIDHelpers::generateV4()),
|
||||||
|
global_context(global_context_)
|
||||||
{
|
{
|
||||||
prepared_client_info.emplace();
|
prepared_client_info.emplace();
|
||||||
prepared_client_info->interface = interface_;
|
prepared_client_info->interface = interface_;
|
||||||
@ -254,6 +256,14 @@ Session::~Session()
|
|||||||
/// Early release a NamedSessionData.
|
/// Early release a NamedSessionData.
|
||||||
if (named_session)
|
if (named_session)
|
||||||
named_session->release();
|
named_session->release();
|
||||||
|
|
||||||
|
if (notified_session_log_about_login)
|
||||||
|
{
|
||||||
|
// must have been set in makeQueryContext or makeSessionContext
|
||||||
|
assert(user);
|
||||||
|
if (auto session_log = getSessionLog())
|
||||||
|
session_log->addLogOut(session_id, user->getName(), getClientInfo());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Authentication::Type Session::getAuthenticationType(const String & user_name) const
|
Authentication::Type Session::getAuthenticationType(const String & user_name) const
|
||||||
@ -261,6 +271,21 @@ Authentication::Type Session::getAuthenticationType(const String & user_name) co
|
|||||||
return global_context->getAccessControlManager().read<User>(user_name)->authentication.getType();
|
return global_context->getAccessControlManager().read<User>(user_name)->authentication.getType();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Authentication::Type Session::getAuthenticationTypeOrLogInFailure(const String & user_name) const
|
||||||
|
{
|
||||||
|
try
|
||||||
|
{
|
||||||
|
return getAuthenticationType(user_name);
|
||||||
|
}
|
||||||
|
catch (const Exception & e)
|
||||||
|
{
|
||||||
|
if (auto session_log = getSessionLog())
|
||||||
|
session_log->addLoginFailure(session_id, getClientInfo(), user_name, e);
|
||||||
|
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Authentication::Digest Session::getPasswordDoubleSHA1(const String & user_name) const
|
Authentication::Digest Session::getPasswordDoubleSHA1(const String & user_name) const
|
||||||
{
|
{
|
||||||
return global_context->getAccessControlManager().read<User>(user_name)->authentication.getPasswordDoubleSHA1();
|
return global_context->getAccessControlManager().read<User>(user_name)->authentication.getPasswordDoubleSHA1();
|
||||||
@ -280,7 +305,16 @@ void Session::authenticate(const Credentials & credentials_, const Poco::Net::So
|
|||||||
if ((address == Poco::Net::SocketAddress{}) && (prepared_client_info->interface == ClientInfo::Interface::LOCAL))
|
if ((address == Poco::Net::SocketAddress{}) && (prepared_client_info->interface == ClientInfo::Interface::LOCAL))
|
||||||
address = Poco::Net::SocketAddress{"127.0.0.1", 0};
|
address = Poco::Net::SocketAddress{"127.0.0.1", 0};
|
||||||
|
|
||||||
user_id = global_context->getAccessControlManager().login(credentials_, address.host());
|
try
|
||||||
|
{
|
||||||
|
user_id = global_context->getAccessControlManager().login(credentials_, address.host());
|
||||||
|
}
|
||||||
|
catch (const Exception & e)
|
||||||
|
{
|
||||||
|
if (auto session_log = getSessionLog())
|
||||||
|
session_log->addLoginFailure(session_id, *prepared_client_info, credentials_.getUserName(), e);
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
|
||||||
prepared_client_info->current_user = credentials_.getUserName();
|
prepared_client_info->current_user = credentials_.getUserName();
|
||||||
prepared_client_info->current_address = address;
|
prepared_client_info->current_address = address;
|
||||||
@ -330,7 +364,7 @@ ContextMutablePtr Session::makeSessionContext()
|
|||||||
return session_context;
|
return session_context;
|
||||||
}
|
}
|
||||||
|
|
||||||
ContextMutablePtr Session::makeSessionContext(const String & session_id_, std::chrono::steady_clock::duration timeout_, bool session_check_)
|
ContextMutablePtr Session::makeSessionContext(const String & session_name_, std::chrono::steady_clock::duration timeout_, bool session_check_)
|
||||||
{
|
{
|
||||||
if (session_context)
|
if (session_context)
|
||||||
throw Exception("Session context already exists", ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Session context already exists", ErrorCodes::LOGICAL_ERROR);
|
||||||
@ -342,7 +376,7 @@ ContextMutablePtr Session::makeSessionContext(const String & session_id_, std::c
|
|||||||
std::shared_ptr<NamedSessionData> new_named_session;
|
std::shared_ptr<NamedSessionData> new_named_session;
|
||||||
bool new_named_session_created = false;
|
bool new_named_session_created = false;
|
||||||
std::tie(new_named_session, new_named_session_created)
|
std::tie(new_named_session, new_named_session_created)
|
||||||
= NamedSessionsStorage::instance().acquireSession(global_context, user_id.value_or(UUID{}), session_id_, timeout_, session_check_);
|
= NamedSessionsStorage::instance().acquireSession(global_context, user_id.value_or(UUID{}), session_name_, timeout_, session_check_);
|
||||||
|
|
||||||
auto new_session_context = new_named_session->context;
|
auto new_session_context = new_named_session->context;
|
||||||
new_session_context->makeSessionContext();
|
new_session_context->makeSessionContext();
|
||||||
@ -359,8 +393,7 @@ ContextMutablePtr Session::makeSessionContext(const String & session_id_, std::c
|
|||||||
new_session_context->setUser(*user_id);
|
new_session_context->setUser(*user_id);
|
||||||
|
|
||||||
/// Session context is ready.
|
/// Session context is ready.
|
||||||
session_context = new_session_context;
|
session_context = std::move(new_session_context);
|
||||||
session_id = session_id_;
|
|
||||||
named_session = new_named_session;
|
named_session = new_named_session;
|
||||||
named_session_created = new_named_session_created;
|
named_session_created = new_named_session_created;
|
||||||
user = session_context->getUser();
|
user = session_context->getUser();
|
||||||
@ -378,6 +411,13 @@ ContextMutablePtr Session::makeQueryContext(ClientInfo && query_client_info) con
|
|||||||
return makeQueryContextImpl(nullptr, &query_client_info);
|
return makeQueryContextImpl(nullptr, &query_client_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<SessionLog> Session::getSessionLog() const
|
||||||
|
{
|
||||||
|
// take it from global context, since it outlives the Session and always available.
|
||||||
|
// please note that server may have session_log disabled, hence this may return nullptr.
|
||||||
|
return global_context->getSessionLog();
|
||||||
|
}
|
||||||
|
|
||||||
ContextMutablePtr Session::makeQueryContextImpl(const ClientInfo * client_info_to_copy, ClientInfo * client_info_to_move) const
|
ContextMutablePtr Session::makeQueryContextImpl(const ClientInfo * client_info_to_copy, ClientInfo * client_info_to_move) const
|
||||||
{
|
{
|
||||||
/// We can create a query context either from a session context or from a global context.
|
/// We can create a query context either from a session context or from a global context.
|
||||||
@ -425,7 +465,21 @@ ContextMutablePtr Session::makeQueryContextImpl(const ClientInfo * client_info_t
|
|||||||
query_context_created = true;
|
query_context_created = true;
|
||||||
user = query_context->getUser();
|
user = query_context->getUser();
|
||||||
|
|
||||||
|
if (!notified_session_log_about_login)
|
||||||
|
{
|
||||||
|
if (auto session_log = getSessionLog())
|
||||||
|
{
|
||||||
|
session_log->addLoginSuccess(
|
||||||
|
session_id,
|
||||||
|
named_session ? std::optional<std::string>(named_session->key.second) : std::nullopt,
|
||||||
|
*query_context);
|
||||||
|
|
||||||
|
notified_session_log_about_login = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return query_context;
|
return query_context;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,6 +19,7 @@ struct NamedSessionData;
|
|||||||
class NamedSessionsStorage;
|
class NamedSessionsStorage;
|
||||||
struct User;
|
struct User;
|
||||||
using UserPtr = std::shared_ptr<const User>;
|
using UserPtr = std::shared_ptr<const User>;
|
||||||
|
class SessionLog;
|
||||||
|
|
||||||
/** Represents user-session from the server perspective,
|
/** Represents user-session from the server perspective,
|
||||||
* basically it is just a smaller subset of Context API, simplifies Context management.
|
* basically it is just a smaller subset of Context API, simplifies Context management.
|
||||||
@ -41,6 +42,8 @@ public:
|
|||||||
/// Provides information about the authentication type of a specified user.
|
/// Provides information about the authentication type of a specified user.
|
||||||
Authentication::Type getAuthenticationType(const String & user_name) const;
|
Authentication::Type getAuthenticationType(const String & user_name) const;
|
||||||
Authentication::Digest getPasswordDoubleSHA1(const String & user_name) const;
|
Authentication::Digest getPasswordDoubleSHA1(const String & user_name) const;
|
||||||
|
/// Same as getAuthenticationType, but adds LoginFailure event in case of error.
|
||||||
|
Authentication::Type getAuthenticationTypeOrLogInFailure(const String & user_name) const;
|
||||||
|
|
||||||
/// Sets the current user, checks the credentials and that the specified address is allowed to connect from.
|
/// Sets the current user, checks the credentials and that the specified address is allowed to connect from.
|
||||||
/// The function throws an exception if there is no such user or password is wrong.
|
/// The function throws an exception if there is no such user or password is wrong.
|
||||||
@ -54,7 +57,7 @@ public:
|
|||||||
/// Makes a session context, can be used one or zero times.
|
/// Makes a session context, can be used one or zero times.
|
||||||
/// The function also assigns an user to this context.
|
/// The function also assigns an user to this context.
|
||||||
ContextMutablePtr makeSessionContext();
|
ContextMutablePtr makeSessionContext();
|
||||||
ContextMutablePtr makeSessionContext(const String & session_id_, std::chrono::steady_clock::duration timeout_, bool session_check_);
|
ContextMutablePtr makeSessionContext(const String & session_name_, std::chrono::steady_clock::duration timeout_, bool session_check_);
|
||||||
ContextMutablePtr sessionContext() { return session_context; }
|
ContextMutablePtr sessionContext() { return session_context; }
|
||||||
ContextPtr sessionContext() const { return session_context; }
|
ContextPtr sessionContext() const { return session_context; }
|
||||||
|
|
||||||
@ -66,8 +69,11 @@ public:
|
|||||||
ContextMutablePtr makeQueryContext(ClientInfo && query_client_info) const;
|
ContextMutablePtr makeQueryContext(ClientInfo && query_client_info) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
std::shared_ptr<SessionLog> getSessionLog() const;
|
||||||
ContextMutablePtr makeQueryContextImpl(const ClientInfo * client_info_to_copy, ClientInfo * client_info_to_move) const;
|
ContextMutablePtr makeQueryContextImpl(const ClientInfo * client_info_to_copy, ClientInfo * client_info_to_move) const;
|
||||||
|
|
||||||
|
mutable bool notified_session_log_about_login = false;
|
||||||
|
const UUID session_id;
|
||||||
const ContextPtr global_context;
|
const ContextPtr global_context;
|
||||||
|
|
||||||
/// ClientInfo that will be copied to a session context when it's created.
|
/// ClientInfo that will be copied to a session context when it's created.
|
||||||
@ -79,9 +85,9 @@ private:
|
|||||||
ContextMutablePtr session_context;
|
ContextMutablePtr session_context;
|
||||||
mutable bool query_context_created = false;
|
mutable bool query_context_created = false;
|
||||||
|
|
||||||
String session_id;
|
|
||||||
std::shared_ptr<NamedSessionData> named_session;
|
std::shared_ptr<NamedSessionData> named_session;
|
||||||
bool named_session_created = false;
|
bool named_session_created = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
261
src/Interpreters/SessionLog.cpp
Normal file
261
src/Interpreters/SessionLog.cpp
Normal file
@ -0,0 +1,261 @@
|
|||||||
|
#include <Interpreters/SessionLog.h>
|
||||||
|
|
||||||
|
#include <Access/ContextAccess.h>
|
||||||
|
#include <Access/User.h>
|
||||||
|
#include <Access/EnabledRolesInfo.h>
|
||||||
|
#include <Core/Settings.h>
|
||||||
|
#include <DataTypes/DataTypeArray.h>
|
||||||
|
#include <DataTypes/DataTypeDateTime64.h>
|
||||||
|
#include <DataTypes/DataTypeDate.h>
|
||||||
|
#include <DataTypes/DataTypeNullable.h>
|
||||||
|
#include <DataTypes/DataTypeDateTime.h>
|
||||||
|
#include <DataTypes/DataTypeEnum.h>
|
||||||
|
#include <DataTypes/DataTypeTuple.h>
|
||||||
|
#include <DataTypes/DataTypeFactory.h>
|
||||||
|
#include <DataTypes/DataTypeLowCardinality.h>
|
||||||
|
#include <DataTypes/DataTypeString.h>
|
||||||
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
|
#include <DataTypes/DataTypeUUID.h>
|
||||||
|
#include <Common/IPv6ToBinary.h>
|
||||||
|
#include <Columns/ColumnArray.h>
|
||||||
|
#include <Columns/ColumnString.h>
|
||||||
|
#include <Columns/ColumnTuple.h>
|
||||||
|
#include <Access/SettingsProfilesInfo.h>
|
||||||
|
|
||||||
|
#include <cassert>
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
using namespace DB;
|
||||||
|
|
||||||
|
inline DateTime64 time_in_microseconds(std::chrono::time_point<std::chrono::system_clock> timepoint)
|
||||||
|
{
|
||||||
|
return std::chrono::duration_cast<std::chrono::microseconds>(timepoint.time_since_epoch()).count();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline time_t time_in_seconds(std::chrono::time_point<std::chrono::system_clock> timepoint)
|
||||||
|
{
|
||||||
|
return std::chrono::duration_cast<std::chrono::seconds>(timepoint.time_since_epoch()).count();
|
||||||
|
}
|
||||||
|
|
||||||
|
auto eventTime()
|
||||||
|
{
|
||||||
|
const auto finish_time = std::chrono::system_clock::now();
|
||||||
|
|
||||||
|
return std::make_pair(time_in_seconds(finish_time), time_in_microseconds(finish_time));
|
||||||
|
}
|
||||||
|
|
||||||
|
using AuthType = Authentication::Type;
|
||||||
|
using Interface = ClientInfo::Interface;
|
||||||
|
|
||||||
|
void fillColumnArray(const Strings & data, IColumn & column)
|
||||||
|
{
|
||||||
|
auto & array = typeid_cast<ColumnArray &>(column);
|
||||||
|
size_t size = 0;
|
||||||
|
auto & data_col = array.getData();
|
||||||
|
for (const auto & name : data)
|
||||||
|
{
|
||||||
|
data_col.insertData(name.data(), name.size());
|
||||||
|
++size;
|
||||||
|
}
|
||||||
|
auto & offsets = array.getOffsets();
|
||||||
|
offsets.push_back(offsets.back() + size);
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
SessionLogElement::SessionLogElement(const UUID & session_id_, Type type_)
|
||||||
|
: session_id(session_id_),
|
||||||
|
type(type_)
|
||||||
|
{
|
||||||
|
std::tie(event_time, event_time_microseconds) = eventTime();
|
||||||
|
}
|
||||||
|
|
||||||
|
NamesAndTypesList SessionLogElement::getNamesAndTypes()
|
||||||
|
{
|
||||||
|
const auto event_type = std::make_shared<DataTypeEnum8>(
|
||||||
|
DataTypeEnum8::Values
|
||||||
|
{
|
||||||
|
{"LoginFailure", static_cast<Int8>(SESSION_LOGIN_FAILURE)},
|
||||||
|
{"LoginSuccess", static_cast<Int8>(SESSION_LOGIN_SUCCESS)},
|
||||||
|
{"Logout", static_cast<Int8>(SESSION_LOGOUT)}
|
||||||
|
});
|
||||||
|
|
||||||
|
#define AUTH_TYPE_NAME_AND_VALUE(v) std::make_pair(Authentication::TypeInfo::get(v).raw_name, static_cast<Int8>(v))
|
||||||
|
const auto identified_with_column = std::make_shared<DataTypeEnum8>(
|
||||||
|
DataTypeEnum8::Values
|
||||||
|
{
|
||||||
|
AUTH_TYPE_NAME_AND_VALUE(AuthType::NO_PASSWORD),
|
||||||
|
AUTH_TYPE_NAME_AND_VALUE(AuthType::PLAINTEXT_PASSWORD),
|
||||||
|
AUTH_TYPE_NAME_AND_VALUE(AuthType::SHA256_PASSWORD),
|
||||||
|
AUTH_TYPE_NAME_AND_VALUE(AuthType::DOUBLE_SHA1_PASSWORD),
|
||||||
|
AUTH_TYPE_NAME_AND_VALUE(AuthType::LDAP),
|
||||||
|
AUTH_TYPE_NAME_AND_VALUE(AuthType::KERBEROS)
|
||||||
|
});
|
||||||
|
#undef AUTH_TYPE_NAME_AND_VALUE
|
||||||
|
|
||||||
|
const auto interface_type_column = std::make_shared<DataTypeEnum8>(
|
||||||
|
DataTypeEnum8::Values
|
||||||
|
{
|
||||||
|
{"TCP", static_cast<Int8>(Interface::TCP)},
|
||||||
|
{"HTTP", static_cast<Int8>(Interface::HTTP)},
|
||||||
|
{"gRPC", static_cast<Int8>(Interface::GRPC)},
|
||||||
|
{"MySQL", static_cast<Int8>(Interface::MYSQL)},
|
||||||
|
{"PostgreSQL", static_cast<Int8>(Interface::POSTGRESQL)}
|
||||||
|
});
|
||||||
|
|
||||||
|
const auto lc_string_datatype = std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>());
|
||||||
|
|
||||||
|
const auto changed_settings_type_column = std::make_shared<DataTypeArray>(
|
||||||
|
std::make_shared<DataTypeTuple>(
|
||||||
|
DataTypes({
|
||||||
|
// setting name
|
||||||
|
lc_string_datatype,
|
||||||
|
// value
|
||||||
|
std::make_shared<DataTypeString>()
|
||||||
|
})));
|
||||||
|
|
||||||
|
return
|
||||||
|
{
|
||||||
|
{"type", std::move(event_type)},
|
||||||
|
{"session_id", std::make_shared<DataTypeUUID>()},
|
||||||
|
{"session_name", std::make_shared<DataTypeString>()},
|
||||||
|
{"event_date", std::make_shared<DataTypeDate>()},
|
||||||
|
{"event_time", std::make_shared<DataTypeDateTime>()},
|
||||||
|
{"event_time_microseconds", std::make_shared<DataTypeDateTime64>(6)},
|
||||||
|
|
||||||
|
{"user", std::make_shared<DataTypeString>()},
|
||||||
|
{"auth_type", std::move(identified_with_column)},
|
||||||
|
|
||||||
|
{"profiles", std::make_shared<DataTypeArray>(lc_string_datatype)},
|
||||||
|
{"roles", std::make_shared<DataTypeArray>(lc_string_datatype)},
|
||||||
|
{"changed_settings", std::move(changed_settings_type_column)},
|
||||||
|
|
||||||
|
{"client_address", DataTypeFactory::instance().get("IPv6")},
|
||||||
|
{"client_port", std::make_shared<DataTypeUInt16>()},
|
||||||
|
{"interface", std::move(interface_type_column)},
|
||||||
|
|
||||||
|
{"client_hostname", std::make_shared<DataTypeString>()},
|
||||||
|
{"client_name", std::make_shared<DataTypeString>()},
|
||||||
|
{"client_revision", std::make_shared<DataTypeUInt32>()},
|
||||||
|
{"client_version_major", std::make_shared<DataTypeUInt32>()},
|
||||||
|
{"client_version_minor", std::make_shared<DataTypeUInt32>()},
|
||||||
|
{"client_version_patch", std::make_shared<DataTypeUInt32>()},
|
||||||
|
|
||||||
|
{"failure_reason", std::make_shared<DataTypeString>()},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
void SessionLogElement::appendToBlock(MutableColumns & columns) const
|
||||||
|
{
|
||||||
|
assert(type >= SESSION_LOGIN_FAILURE && type <= SESSION_LOGOUT);
|
||||||
|
assert(user_identified_with >= Authentication::Type::NO_PASSWORD && user_identified_with <= Authentication::Type::MAX_TYPE);
|
||||||
|
|
||||||
|
size_t i = 0;
|
||||||
|
|
||||||
|
columns[i++]->insert(type);
|
||||||
|
columns[i++]->insert(session_id);
|
||||||
|
columns[i++]->insert(session_name);
|
||||||
|
columns[i++]->insert(static_cast<DayNum>(DateLUT::instance().toDayNum(event_time).toUnderType()));
|
||||||
|
columns[i++]->insert(event_time);
|
||||||
|
columns[i++]->insert(event_time_microseconds);
|
||||||
|
|
||||||
|
columns[i++]->insert(user);
|
||||||
|
columns[i++]->insert(user_identified_with);
|
||||||
|
|
||||||
|
fillColumnArray(profiles, *columns[i++]);
|
||||||
|
fillColumnArray(roles, *columns[i++]);
|
||||||
|
|
||||||
|
{
|
||||||
|
auto & changed_settings_array_col = assert_cast<ColumnArray &>(*columns[i++]);
|
||||||
|
auto & changed_settings_tuple_col = assert_cast<ColumnTuple &>(changed_settings_array_col.getData());
|
||||||
|
auto & names_col = *changed_settings_tuple_col.getColumnPtr(0)->assumeMutable();
|
||||||
|
auto & values_col = assert_cast<ColumnString &>(*changed_settings_tuple_col.getColumnPtr(1)->assumeMutable());
|
||||||
|
|
||||||
|
size_t items_added = 0;
|
||||||
|
for (const auto & kv : changed_settings)
|
||||||
|
{
|
||||||
|
names_col.insert(kv.first);
|
||||||
|
values_col.insert(kv.second);
|
||||||
|
++items_added;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto & offsets = changed_settings_array_col.getOffsets();
|
||||||
|
offsets.push_back(changed_settings_tuple_col.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
columns[i++]->insertData(IPv6ToBinary(client_info.current_address.host()).data(), 16);
|
||||||
|
columns[i++]->insert(client_info.current_address.port());
|
||||||
|
|
||||||
|
columns[i++]->insert(client_info.interface);
|
||||||
|
|
||||||
|
columns[i++]->insertData(client_info.client_hostname.data(), client_info.client_hostname.length());
|
||||||
|
columns[i++]->insertData(client_info.client_name.data(), client_info.client_name.length());
|
||||||
|
columns[i++]->insert(client_info.client_tcp_protocol_version);
|
||||||
|
columns[i++]->insert(client_info.client_version_major);
|
||||||
|
columns[i++]->insert(client_info.client_version_minor);
|
||||||
|
columns[i++]->insert(client_info.client_version_patch);
|
||||||
|
|
||||||
|
columns[i++]->insertData(auth_failure_reason.data(), auth_failure_reason.length());
|
||||||
|
}
|
||||||
|
|
||||||
|
void SessionLog::addLoginSuccess(const UUID & session_id, std::optional<String> session_name, const Context & login_context)
|
||||||
|
{
|
||||||
|
const auto access = login_context.getAccess();
|
||||||
|
const auto & settings = login_context.getSettingsRef();
|
||||||
|
const auto & client_info = login_context.getClientInfo();
|
||||||
|
|
||||||
|
DB::SessionLogElement log_entry(session_id, SESSION_LOGIN_SUCCESS);
|
||||||
|
log_entry.client_info = client_info;
|
||||||
|
|
||||||
|
{
|
||||||
|
const auto user = access->getUser();
|
||||||
|
log_entry.user = user->getName();
|
||||||
|
log_entry.user_identified_with = user->authentication.getType();
|
||||||
|
log_entry.external_auth_server = user->authentication.getLDAPServerName();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (session_name)
|
||||||
|
log_entry.session_name = *session_name;
|
||||||
|
|
||||||
|
if (const auto roles_info = access->getRolesInfo())
|
||||||
|
log_entry.roles = roles_info->getCurrentRolesNames();
|
||||||
|
|
||||||
|
if (const auto profile_info = access->getDefaultProfileInfo())
|
||||||
|
log_entry.profiles = profile_info->getProfileNames();
|
||||||
|
|
||||||
|
for (const auto & s : settings.allChanged())
|
||||||
|
log_entry.changed_settings.emplace_back(s.getName(), s.getValueString());
|
||||||
|
|
||||||
|
add(log_entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SessionLog::addLoginFailure(
|
||||||
|
const UUID & session_id,
|
||||||
|
const ClientInfo & info,
|
||||||
|
const String & user,
|
||||||
|
const Exception & reason)
|
||||||
|
{
|
||||||
|
SessionLogElement log_entry(session_id, SESSION_LOGIN_FAILURE);
|
||||||
|
|
||||||
|
log_entry.user = user;
|
||||||
|
log_entry.auth_failure_reason = reason.message();
|
||||||
|
log_entry.client_info = info;
|
||||||
|
log_entry.user_identified_with = Authentication::Type::NO_PASSWORD;
|
||||||
|
|
||||||
|
add(log_entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SessionLog::addLogOut(const UUID & session_id, const String & user, const ClientInfo & client_info)
|
||||||
|
{
|
||||||
|
auto log_entry = SessionLogElement(session_id, SESSION_LOGOUT);
|
||||||
|
log_entry.user = user;
|
||||||
|
log_entry.client_info = client_info;
|
||||||
|
|
||||||
|
add(log_entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
74
src/Interpreters/SessionLog.h
Normal file
74
src/Interpreters/SessionLog.h
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Interpreters/SystemLog.h>
|
||||||
|
#include <Interpreters/ClientInfo.h>
|
||||||
|
#include <Access/Authentication.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
enum SessionLogElementType : int8_t
|
||||||
|
{
|
||||||
|
SESSION_LOGIN_FAILURE = 0,
|
||||||
|
SESSION_LOGIN_SUCCESS = 1,
|
||||||
|
SESSION_LOGOUT = 2,
|
||||||
|
};
|
||||||
|
|
||||||
|
class ContextAccess;
|
||||||
|
|
||||||
|
/** A struct which will be inserted as row into session_log table.
|
||||||
|
*
|
||||||
|
* Allows to log information about user sessions:
|
||||||
|
* - auth attempts, auth result, auth method, etc.
|
||||||
|
* - log out events
|
||||||
|
*/
|
||||||
|
struct SessionLogElement
|
||||||
|
{
|
||||||
|
using Type = SessionLogElementType;
|
||||||
|
|
||||||
|
SessionLogElement() = default;
|
||||||
|
SessionLogElement(const UUID & session_id_, Type type_);
|
||||||
|
SessionLogElement(const SessionLogElement &) = default;
|
||||||
|
SessionLogElement & operator=(const SessionLogElement &) = default;
|
||||||
|
SessionLogElement(SessionLogElement &&) = default;
|
||||||
|
SessionLogElement & operator=(SessionLogElement &&) = default;
|
||||||
|
|
||||||
|
UUID session_id;
|
||||||
|
|
||||||
|
Type type = SESSION_LOGIN_FAILURE;
|
||||||
|
|
||||||
|
String session_name;
|
||||||
|
time_t event_time{};
|
||||||
|
Decimal64 event_time_microseconds{};
|
||||||
|
|
||||||
|
String user;
|
||||||
|
Authentication::Type user_identified_with = Authentication::Type::NO_PASSWORD;
|
||||||
|
String external_auth_server;
|
||||||
|
Strings roles;
|
||||||
|
Strings profiles;
|
||||||
|
std::vector<std::pair<String, String>> changed_settings;
|
||||||
|
|
||||||
|
ClientInfo client_info;
|
||||||
|
String auth_failure_reason;
|
||||||
|
|
||||||
|
static std::string name() { return "SessionLog"; }
|
||||||
|
|
||||||
|
static NamesAndTypesList getNamesAndTypes();
|
||||||
|
static NamesAndAliases getNamesAndAliases() { return {}; }
|
||||||
|
|
||||||
|
void appendToBlock(MutableColumns & columns) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/// Instead of typedef - to allow forward declaration.
|
||||||
|
class SessionLog : public SystemLog<SessionLogElement>
|
||||||
|
{
|
||||||
|
using SystemLog<SessionLogElement>::SystemLog;
|
||||||
|
|
||||||
|
public:
|
||||||
|
void addLoginSuccess(const UUID & session_id, std::optional<String> session_name, const Context & login_context);
|
||||||
|
void addLoginFailure(const UUID & session_id, const ClientInfo & info, const String & user, const Exception & reason);
|
||||||
|
void addLogOut(const UUID & session_id, const String & user, const ClientInfo & client_info);
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -402,8 +402,8 @@ void Set::checkTypesEqual(size_t set_type_idx, const DataTypePtr & other_type) c
|
|||||||
+ data_types[set_type_idx]->getName() + " on the right", ErrorCodes::TYPE_MISMATCH);
|
+ data_types[set_type_idx]->getName() + " on the right", ErrorCodes::TYPE_MISMATCH);
|
||||||
}
|
}
|
||||||
|
|
||||||
MergeTreeSetIndex::MergeTreeSetIndex(const Columns & set_elements, std::vector<KeyTuplePositionMapping> && index_mapping_)
|
MergeTreeSetIndex::MergeTreeSetIndex(const Columns & set_elements, std::vector<KeyTuplePositionMapping> && indexes_mapping_)
|
||||||
: indexes_mapping(std::move(index_mapping_))
|
: has_all_keys(set_elements.size() == indexes_mapping_.size()), indexes_mapping(std::move(indexes_mapping_))
|
||||||
{
|
{
|
||||||
std::sort(indexes_mapping.begin(), indexes_mapping.end(),
|
std::sort(indexes_mapping.begin(), indexes_mapping.end(),
|
||||||
[](const KeyTuplePositionMapping & l, const KeyTuplePositionMapping & r)
|
[](const KeyTuplePositionMapping & l, const KeyTuplePositionMapping & r)
|
||||||
@ -548,11 +548,11 @@ BoolMask MergeTreeSetIndex::checkInRange(const std::vector<Range> & key_ranges,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (one_element_range)
|
if (one_element_range && has_all_keys)
|
||||||
{
|
{
|
||||||
/// Here we know that there is one element in range.
|
/// Here we know that there is one element in range.
|
||||||
/// The main difference with the normal case is that we can definitely say that
|
/// The main difference with the normal case is that we can definitely say that
|
||||||
/// condition in this range always TRUE (can_be_false = 0) xor always FALSE (can_be_true = 0).
|
/// condition in this range is always TRUE (can_be_false = 0) or always FALSE (can_be_true = 0).
|
||||||
|
|
||||||
/// Check if it's an empty range
|
/// Check if it's an empty range
|
||||||
if (!left_included || !right_included)
|
if (!left_included || !right_included)
|
||||||
|
@ -208,7 +208,7 @@ public:
|
|||||||
std::vector<FunctionBasePtr> functions;
|
std::vector<FunctionBasePtr> functions;
|
||||||
};
|
};
|
||||||
|
|
||||||
MergeTreeSetIndex(const Columns & set_elements, std::vector<KeyTuplePositionMapping> && index_mapping_);
|
MergeTreeSetIndex(const Columns & set_elements, std::vector<KeyTuplePositionMapping> && indexes_mapping_);
|
||||||
|
|
||||||
size_t size() const { return ordered_set.at(0)->size(); }
|
size_t size() const { return ordered_set.at(0)->size(); }
|
||||||
|
|
||||||
@ -217,6 +217,8 @@ public:
|
|||||||
BoolMask checkInRange(const std::vector<Range> & key_ranges, const DataTypes & data_types) const;
|
BoolMask checkInRange(const std::vector<Range> & key_ranges, const DataTypes & data_types) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
// If all arguments in tuple are key columns, we can optimize NOT IN when there is only one element.
|
||||||
|
bool has_all_keys;
|
||||||
Columns ordered_set;
|
Columns ordered_set;
|
||||||
std::vector<KeyTuplePositionMapping> indexes_mapping;
|
std::vector<KeyTuplePositionMapping> indexes_mapping;
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
#include <Interpreters/QueryLog.h>
|
#include <Interpreters/QueryLog.h>
|
||||||
#include <Interpreters/QueryThreadLog.h>
|
#include <Interpreters/QueryThreadLog.h>
|
||||||
#include <Interpreters/QueryViewsLog.h>
|
#include <Interpreters/QueryViewsLog.h>
|
||||||
#include <Interpreters/SystemLog.h>
|
#include <Interpreters/SessionLog.h>
|
||||||
#include <Interpreters/TextLog.h>
|
#include <Interpreters/TextLog.h>
|
||||||
#include <Interpreters/TraceLog.h>
|
#include <Interpreters/TraceLog.h>
|
||||||
#include <Interpreters/ZooKeeperLog.h>
|
#include <Interpreters/ZooKeeperLog.h>
|
||||||
@ -39,7 +39,13 @@ std::shared_ptr<TSystemLog> createSystemLog(
|
|||||||
const String & config_prefix)
|
const String & config_prefix)
|
||||||
{
|
{
|
||||||
if (!config.has(config_prefix))
|
if (!config.has(config_prefix))
|
||||||
|
{
|
||||||
|
LOG_DEBUG(&Poco::Logger::get("SystemLog"),
|
||||||
|
"Not creating {}.{} since corresponding section '{}' is missing from config",
|
||||||
|
default_database_name, default_table_name, config_prefix);
|
||||||
|
|
||||||
return {};
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
String database = config.getString(config_prefix + ".database", default_database_name);
|
String database = config.getString(config_prefix + ".database", default_database_name);
|
||||||
String table = config.getString(config_prefix + ".table", default_table_name);
|
String table = config.getString(config_prefix + ".table", default_table_name);
|
||||||
@ -107,6 +113,7 @@ SystemLogs::SystemLogs(ContextPtr global_context, const Poco::Util::AbstractConf
|
|||||||
"opentelemetry_span_log");
|
"opentelemetry_span_log");
|
||||||
query_views_log = createSystemLog<QueryViewsLog>(global_context, "system", "query_views_log", config, "query_views_log");
|
query_views_log = createSystemLog<QueryViewsLog>(global_context, "system", "query_views_log", config, "query_views_log");
|
||||||
zookeeper_log = createSystemLog<ZooKeeperLog>(global_context, "system", "zookeeper_log", config, "zookeeper_log");
|
zookeeper_log = createSystemLog<ZooKeeperLog>(global_context, "system", "zookeeper_log", config, "zookeeper_log");
|
||||||
|
session_log = createSystemLog<SessionLog>(global_context, "system", "session_log", config, "session_log");
|
||||||
|
|
||||||
if (query_log)
|
if (query_log)
|
||||||
logs.emplace_back(query_log.get());
|
logs.emplace_back(query_log.get());
|
||||||
@ -130,6 +137,8 @@ SystemLogs::SystemLogs(ContextPtr global_context, const Poco::Util::AbstractConf
|
|||||||
logs.emplace_back(query_views_log.get());
|
logs.emplace_back(query_views_log.get());
|
||||||
if (zookeeper_log)
|
if (zookeeper_log)
|
||||||
logs.emplace_back(zookeeper_log.get());
|
logs.emplace_back(zookeeper_log.get());
|
||||||
|
if (session_log)
|
||||||
|
logs.emplace_back(session_log.get());
|
||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
|
@ -75,6 +75,7 @@ class AsynchronousMetricLog;
|
|||||||
class OpenTelemetrySpanLog;
|
class OpenTelemetrySpanLog;
|
||||||
class QueryViewsLog;
|
class QueryViewsLog;
|
||||||
class ZooKeeperLog;
|
class ZooKeeperLog;
|
||||||
|
class SessionLog;
|
||||||
|
|
||||||
|
|
||||||
class ISystemLog
|
class ISystemLog
|
||||||
@ -115,6 +116,8 @@ struct SystemLogs
|
|||||||
std::shared_ptr<QueryViewsLog> query_views_log;
|
std::shared_ptr<QueryViewsLog> query_views_log;
|
||||||
/// Used to log all actions of ZooKeeper client
|
/// Used to log all actions of ZooKeeper client
|
||||||
std::shared_ptr<ZooKeeperLog> zookeeper_log;
|
std::shared_ptr<ZooKeeperLog> zookeeper_log;
|
||||||
|
/// Login, LogOut and Login failure events
|
||||||
|
std::shared_ptr<SessionLog> session_log;
|
||||||
|
|
||||||
std::vector<ISystemLog *> logs;
|
std::vector<ISystemLog *> logs;
|
||||||
};
|
};
|
||||||
|
@ -26,7 +26,8 @@ NamesAndTypesList TextLogElement::getNamesAndTypes()
|
|||||||
{"Notice", static_cast<Int8>(Message::PRIO_NOTICE)},
|
{"Notice", static_cast<Int8>(Message::PRIO_NOTICE)},
|
||||||
{"Information", static_cast<Int8>(Message::PRIO_INFORMATION)},
|
{"Information", static_cast<Int8>(Message::PRIO_INFORMATION)},
|
||||||
{"Debug", static_cast<Int8>(Message::PRIO_DEBUG)},
|
{"Debug", static_cast<Int8>(Message::PRIO_DEBUG)},
|
||||||
{"Trace", static_cast<Int8>(Message::PRIO_TRACE)}
|
{"Trace", static_cast<Int8>(Message::PRIO_TRACE)},
|
||||||
|
{"Test", static_cast<Int8>(Message::PRIO_TEST)},
|
||||||
});
|
});
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -148,6 +148,7 @@ SRCS(
|
|||||||
RowRefs.cpp
|
RowRefs.cpp
|
||||||
SelectIntersectExceptQueryVisitor.cpp
|
SelectIntersectExceptQueryVisitor.cpp
|
||||||
Session.cpp
|
Session.cpp
|
||||||
|
SessionLog.cpp
|
||||||
Set.cpp
|
Set.cpp
|
||||||
SetVariants.cpp
|
SetVariants.cpp
|
||||||
SortedBlocksWriter.cpp
|
SortedBlocksWriter.cpp
|
||||||
|
@ -246,7 +246,7 @@ void MySQLHandler::authenticate(const String & user_name, const String & auth_pl
|
|||||||
try
|
try
|
||||||
{
|
{
|
||||||
// For compatibility with JavaScript MySQL client, Native41 authentication plugin is used when possible (if password is specified using double SHA1). Otherwise SHA256 plugin is used.
|
// For compatibility with JavaScript MySQL client, Native41 authentication plugin is used when possible (if password is specified using double SHA1). Otherwise SHA256 plugin is used.
|
||||||
if (session->getAuthenticationType(user_name) == DB::Authentication::SHA256_PASSWORD)
|
if (session->getAuthenticationTypeOrLogInFailure(user_name) == DB::Authentication::SHA256_PASSWORD)
|
||||||
{
|
{
|
||||||
authPluginSSL();
|
authPluginSSL();
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ class ASTStorage;
|
|||||||
#define LIST_OF_MYSQL_SETTINGS(M) \
|
#define LIST_OF_MYSQL_SETTINGS(M) \
|
||||||
M(UInt64, connection_pool_size, 16, "Size of connection pool (if all connections are in use, the query will wait until some connection will be freed).", 0) \
|
M(UInt64, connection_pool_size, 16, "Size of connection pool (if all connections are in use, the query will wait until some connection will be freed).", 0) \
|
||||||
M(UInt64, connection_max_tries, 3, "Number of retries for pool with failover", 0) \
|
M(UInt64, connection_max_tries, 3, "Number of retries for pool with failover", 0) \
|
||||||
|
M(UInt64, connection_wait_timeout, 5, "Timeout (in seconds) for waiting for free connection (in case of there is already connection_pool_size active connections), 0 - do not wait.", 0) \
|
||||||
M(Bool, connection_auto_close, true, "Auto-close connection after query execution, i.e. disable connection reuse.", 0) \
|
M(Bool, connection_auto_close, true, "Auto-close connection after query execution, i.e. disable connection reuse.", 0) \
|
||||||
|
|
||||||
DECLARE_SETTINGS_TRAITS(MySQLSettingsTraits, LIST_OF_MYSQL_SETTINGS)
|
DECLARE_SETTINGS_TRAITS(MySQLSettingsTraits, LIST_OF_MYSQL_SETTINGS)
|
||||||
|
@ -625,9 +625,8 @@ bool MaterializedPostgreSQLConsumer::readFromReplicationSlot()
|
|||||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
catch (const pqxx::broken_connection & e)
|
catch (const pqxx::broken_connection &)
|
||||||
{
|
{
|
||||||
LOG_ERROR(log, "Connection error: {}", e.what());
|
|
||||||
connection->tryUpdateConnection();
|
connection->tryUpdateConnection();
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -641,6 +640,7 @@ bool MaterializedPostgreSQLConsumer::readFromReplicationSlot()
|
|||||||
if (error_message.find("out of relcache_callback_list slots") == std::string::npos)
|
if (error_message.find("out of relcache_callback_list slots") == std::string::npos)
|
||||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||||
|
|
||||||
|
connection->tryUpdateConnection();
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
catch (const pqxx::conversion_error & e)
|
catch (const pqxx::conversion_error & e)
|
||||||
|
@ -17,6 +17,8 @@ namespace DB
|
|||||||
M(UInt64, materialized_postgresql_max_block_size, 65536, "Number of row collected before flushing data into table.", 0) \
|
M(UInt64, materialized_postgresql_max_block_size, 65536, "Number of row collected before flushing data into table.", 0) \
|
||||||
M(String, materialized_postgresql_tables_list, "", "List of tables for MaterializedPostgreSQL database engine", 0) \
|
M(String, materialized_postgresql_tables_list, "", "List of tables for MaterializedPostgreSQL database engine", 0) \
|
||||||
M(Bool, materialized_postgresql_allow_automatic_update, false, "Allow to reload table in the background, when schema changes are detected", 0) \
|
M(Bool, materialized_postgresql_allow_automatic_update, false, "Allow to reload table in the background, when schema changes are detected", 0) \
|
||||||
|
M(String, materialized_postgresql_replication_slot, "", "A user-created replication slot", 0) \
|
||||||
|
M(String, materialized_postgresql_snapshot, "", "User provided snapshot in case he manages replication slots himself", 0) \
|
||||||
|
|
||||||
DECLARE_SETTINGS_TRAITS(MaterializedPostgreSQLSettingsTraits, LIST_OF_MATERIALIZED_POSTGRESQL_SETTINGS)
|
DECLARE_SETTINGS_TRAITS(MaterializedPostgreSQLSettingsTraits, LIST_OF_MATERIALIZED_POSTGRESQL_SETTINGS)
|
||||||
|
|
||||||
|
@ -32,24 +32,28 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler(
|
|||||||
const postgres::ConnectionInfo & connection_info_,
|
const postgres::ConnectionInfo & connection_info_,
|
||||||
ContextPtr context_,
|
ContextPtr context_,
|
||||||
bool is_attach_,
|
bool is_attach_,
|
||||||
const size_t max_block_size_,
|
const MaterializedPostgreSQLSettings & replication_settings,
|
||||||
bool allow_automatic_update_,
|
bool is_materialized_postgresql_database_)
|
||||||
bool is_materialized_postgresql_database_,
|
|
||||||
const String tables_list_)
|
|
||||||
: log(&Poco::Logger::get("PostgreSQLReplicationHandler"))
|
: log(&Poco::Logger::get("PostgreSQLReplicationHandler"))
|
||||||
, context(context_)
|
, context(context_)
|
||||||
, is_attach(is_attach_)
|
, is_attach(is_attach_)
|
||||||
, remote_database_name(remote_database_name_)
|
, remote_database_name(remote_database_name_)
|
||||||
, current_database_name(current_database_name_)
|
, current_database_name(current_database_name_)
|
||||||
, connection_info(connection_info_)
|
, connection_info(connection_info_)
|
||||||
, max_block_size(max_block_size_)
|
, max_block_size(replication_settings.materialized_postgresql_max_block_size)
|
||||||
, allow_automatic_update(allow_automatic_update_)
|
, allow_automatic_update(replication_settings.materialized_postgresql_allow_automatic_update)
|
||||||
, is_materialized_postgresql_database(is_materialized_postgresql_database_)
|
, is_materialized_postgresql_database(is_materialized_postgresql_database_)
|
||||||
, tables_list(tables_list_)
|
, tables_list(replication_settings.materialized_postgresql_tables_list)
|
||||||
|
, user_provided_snapshot(replication_settings.materialized_postgresql_snapshot)
|
||||||
, connection(std::make_shared<postgres::Connection>(connection_info_))
|
, connection(std::make_shared<postgres::Connection>(connection_info_))
|
||||||
, milliseconds_to_wait(RESCHEDULE_MS)
|
, milliseconds_to_wait(RESCHEDULE_MS)
|
||||||
{
|
{
|
||||||
replication_slot = fmt::format("{}_ch_replication_slot", replication_identifier);
|
replication_slot = replication_settings.materialized_postgresql_replication_slot;
|
||||||
|
if (replication_slot.empty())
|
||||||
|
{
|
||||||
|
user_managed_slot = false;
|
||||||
|
replication_slot = fmt::format("{}_ch_replication_slot", replication_identifier);
|
||||||
|
}
|
||||||
publication_name = fmt::format("{}_ch_publication", replication_identifier);
|
publication_name = fmt::format("{}_ch_publication", replication_identifier);
|
||||||
|
|
||||||
startup_task = context->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ waitConnectionAndStart(); });
|
startup_task = context->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ waitConnectionAndStart(); });
|
||||||
@ -121,7 +125,20 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error)
|
|||||||
|
|
||||||
auto initial_sync = [&]()
|
auto initial_sync = [&]()
|
||||||
{
|
{
|
||||||
createReplicationSlot(tx, start_lsn, snapshot_name);
|
LOG_TRACE(log, "Starting tables sync load");
|
||||||
|
|
||||||
|
if (user_managed_slot)
|
||||||
|
{
|
||||||
|
if (user_provided_snapshot.empty())
|
||||||
|
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||||
|
"Using a user-defined replication slot must be provided with a snapshot from EXPORT SNAPSHOT when the slot is created."
|
||||||
|
"Pass it to `materialized_postgresql_snapshot` setting");
|
||||||
|
snapshot_name = user_provided_snapshot;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
createReplicationSlot(tx, start_lsn, snapshot_name);
|
||||||
|
}
|
||||||
|
|
||||||
for (const auto & [table_name, storage] : materialized_storages)
|
for (const auto & [table_name, storage] : materialized_storages)
|
||||||
{
|
{
|
||||||
@ -147,12 +164,17 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error)
|
|||||||
/// Recreation of a replication slot imposes reloading of all tables.
|
/// Recreation of a replication slot imposes reloading of all tables.
|
||||||
if (!isReplicationSlotExist(tx, start_lsn, /* temporary */false))
|
if (!isReplicationSlotExist(tx, start_lsn, /* temporary */false))
|
||||||
{
|
{
|
||||||
|
if (user_managed_slot)
|
||||||
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Having replication slot `{}` from settings, but it does not exist", replication_slot);
|
||||||
|
|
||||||
initial_sync();
|
initial_sync();
|
||||||
}
|
}
|
||||||
/// Always drop replication slot if it is CREATE query and not ATTACH.
|
/// Always drop replication slot if it is CREATE query and not ATTACH.
|
||||||
else if (!is_attach || new_publication)
|
else if (!is_attach || new_publication)
|
||||||
{
|
{
|
||||||
dropReplicationSlot(tx);
|
if (!user_managed_slot)
|
||||||
|
dropReplicationSlot(tx);
|
||||||
|
|
||||||
initial_sync();
|
initial_sync();
|
||||||
}
|
}
|
||||||
/// Synchronization and initial load already took place - do not create any new tables, just fetch StoragePtr's
|
/// Synchronization and initial load already took place - do not create any new tables, just fetch StoragePtr's
|
||||||
@ -376,6 +398,8 @@ bool PostgreSQLReplicationHandler::isReplicationSlotExist(pqxx::nontransaction &
|
|||||||
void PostgreSQLReplicationHandler::createReplicationSlot(
|
void PostgreSQLReplicationHandler::createReplicationSlot(
|
||||||
pqxx::nontransaction & tx, String & start_lsn, String & snapshot_name, bool temporary)
|
pqxx::nontransaction & tx, String & start_lsn, String & snapshot_name, bool temporary)
|
||||||
{
|
{
|
||||||
|
assert(temporary || !user_managed_slot);
|
||||||
|
|
||||||
String query_str, slot_name;
|
String query_str, slot_name;
|
||||||
if (temporary)
|
if (temporary)
|
||||||
slot_name = replication_slot + "_tmp";
|
slot_name = replication_slot + "_tmp";
|
||||||
@ -401,6 +425,8 @@ void PostgreSQLReplicationHandler::createReplicationSlot(
|
|||||||
|
|
||||||
void PostgreSQLReplicationHandler::dropReplicationSlot(pqxx::nontransaction & tx, bool temporary)
|
void PostgreSQLReplicationHandler::dropReplicationSlot(pqxx::nontransaction & tx, bool temporary)
|
||||||
{
|
{
|
||||||
|
assert(temporary || !user_managed_slot);
|
||||||
|
|
||||||
std::string slot_name;
|
std::string slot_name;
|
||||||
if (temporary)
|
if (temporary)
|
||||||
slot_name = replication_slot + "_tmp";
|
slot_name = replication_slot + "_tmp";
|
||||||
@ -433,14 +459,17 @@ void PostgreSQLReplicationHandler::shutdownFinal()
|
|||||||
|
|
||||||
connection->execWithRetry([&](pqxx::nontransaction & tx)
|
connection->execWithRetry([&](pqxx::nontransaction & tx)
|
||||||
{
|
{
|
||||||
if (isReplicationSlotExist(tx, last_committed_lsn, /* temporary */false))
|
if (isReplicationSlotExist(tx, last_committed_lsn, /* temporary */true))
|
||||||
dropReplicationSlot(tx, /* temporary */false);
|
dropReplicationSlot(tx, /* temporary */true);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
if (user_managed_slot)
|
||||||
|
return;
|
||||||
|
|
||||||
connection->execWithRetry([&](pqxx::nontransaction & tx)
|
connection->execWithRetry([&](pqxx::nontransaction & tx)
|
||||||
{
|
{
|
||||||
if (isReplicationSlotExist(tx, last_committed_lsn, /* temporary */true))
|
if (isReplicationSlotExist(tx, last_committed_lsn, /* temporary */false))
|
||||||
dropReplicationSlot(tx, /* temporary */true);
|
dropReplicationSlot(tx, /* temporary */false);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
catch (Exception & e)
|
catch (Exception & e)
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "MaterializedPostgreSQLConsumer.h"
|
#include "MaterializedPostgreSQLConsumer.h"
|
||||||
|
#include "MaterializedPostgreSQLSettings.h"
|
||||||
#include <Databases/PostgreSQL/fetchPostgreSQLTableStructure.h>
|
#include <Databases/PostgreSQL/fetchPostgreSQLTableStructure.h>
|
||||||
#include <Core/PostgreSQL/Utils.h>
|
#include <Core/PostgreSQL/Utils.h>
|
||||||
|
|
||||||
@ -25,10 +26,8 @@ public:
|
|||||||
const postgres::ConnectionInfo & connection_info_,
|
const postgres::ConnectionInfo & connection_info_,
|
||||||
ContextPtr context_,
|
ContextPtr context_,
|
||||||
bool is_attach_,
|
bool is_attach_,
|
||||||
const size_t max_block_size_,
|
const MaterializedPostgreSQLSettings & replication_settings,
|
||||||
bool allow_automatic_update_,
|
bool is_materialized_postgresql_database_);
|
||||||
bool is_materialized_postgresql_database_,
|
|
||||||
const String tables_list = "");
|
|
||||||
|
|
||||||
/// Activate task to be run from a separate thread: wait until connection is available and call startReplication().
|
/// Activate task to be run from a separate thread: wait until connection is available and call startReplication().
|
||||||
void startup();
|
void startup();
|
||||||
@ -108,6 +107,9 @@ private:
|
|||||||
/// A coma-separated list of tables, which are going to be replicated for database engine. By default, a whole database is replicated.
|
/// A coma-separated list of tables, which are going to be replicated for database engine. By default, a whole database is replicated.
|
||||||
String tables_list;
|
String tables_list;
|
||||||
|
|
||||||
|
bool user_managed_slot = true;
|
||||||
|
String user_provided_snapshot;
|
||||||
|
|
||||||
String replication_slot, publication_name;
|
String replication_slot, publication_name;
|
||||||
|
|
||||||
/// Shared between replication_consumer and replication_handler, but never accessed concurrently.
|
/// Shared between replication_consumer and replication_handler, but never accessed concurrently.
|
||||||
|
@ -64,6 +64,8 @@ StorageMaterializedPostgreSQL::StorageMaterializedPostgreSQL(
|
|||||||
setInMemoryMetadata(storage_metadata);
|
setInMemoryMetadata(storage_metadata);
|
||||||
|
|
||||||
String replication_identifier = remote_database_name + "_" + remote_table_name_;
|
String replication_identifier = remote_database_name + "_" + remote_table_name_;
|
||||||
|
replication_settings->materialized_postgresql_tables_list = remote_table_name_;
|
||||||
|
|
||||||
replication_handler = std::make_unique<PostgreSQLReplicationHandler>(
|
replication_handler = std::make_unique<PostgreSQLReplicationHandler>(
|
||||||
replication_identifier,
|
replication_identifier,
|
||||||
remote_database_name,
|
remote_database_name,
|
||||||
@ -71,8 +73,8 @@ StorageMaterializedPostgreSQL::StorageMaterializedPostgreSQL(
|
|||||||
connection_info,
|
connection_info,
|
||||||
getContext(),
|
getContext(),
|
||||||
is_attach,
|
is_attach,
|
||||||
replication_settings->materialized_postgresql_max_block_size.value,
|
*replication_settings,
|
||||||
/* allow_automatic_update */ false, /* is_materialized_postgresql_database */false);
|
/* is_materialized_postgresql_database */false);
|
||||||
|
|
||||||
if (!is_attach)
|
if (!is_attach)
|
||||||
{
|
{
|
||||||
|
@ -1332,7 +1332,12 @@ void registerStorageDistributed(StorageFactory & factory)
|
|||||||
String remote_table = engine_args[2]->as<ASTLiteral &>().value.safeGet<String>();
|
String remote_table = engine_args[2]->as<ASTLiteral &>().value.safeGet<String>();
|
||||||
|
|
||||||
const auto & sharding_key = engine_args.size() >= 4 ? engine_args[3] : nullptr;
|
const auto & sharding_key = engine_args.size() >= 4 ? engine_args[3] : nullptr;
|
||||||
const auto & storage_policy = engine_args.size() >= 5 ? engine_args[4]->as<ASTLiteral &>().value.safeGet<String>() : "default";
|
String storage_policy = "default";
|
||||||
|
if (engine_args.size() >= 5)
|
||||||
|
{
|
||||||
|
engine_args[4] = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[4], local_context);
|
||||||
|
storage_policy = engine_args[4]->as<ASTLiteral &>().value.safeGet<String>();
|
||||||
|
}
|
||||||
|
|
||||||
/// Check that sharding_key exists in the table and has numeric type.
|
/// Check that sharding_key exists in the table and has numeric type.
|
||||||
if (sharding_key)
|
if (sharding_key)
|
||||||
|
@ -267,11 +267,15 @@ void registerStorageMySQL(StorageFactory & factory)
|
|||||||
throw Exception("connection_pool_size cannot be zero.", ErrorCodes::BAD_ARGUMENTS);
|
throw Exception("connection_pool_size cannot be zero.", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
auto addresses = parseRemoteDescriptionForExternalDatabase(host_port, max_addresses, 3306);
|
auto addresses = parseRemoteDescriptionForExternalDatabase(host_port, max_addresses, 3306);
|
||||||
mysqlxx::PoolWithFailover pool(remote_database, addresses,
|
mysqlxx::PoolWithFailover pool(
|
||||||
username, password,
|
remote_database,
|
||||||
|
addresses,
|
||||||
|
username,
|
||||||
|
password,
|
||||||
MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS,
|
MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS,
|
||||||
mysql_settings.connection_pool_size,
|
mysql_settings.connection_pool_size,
|
||||||
mysql_settings.connection_max_tries);
|
mysql_settings.connection_max_tries,
|
||||||
|
mysql_settings.connection_wait_timeout);
|
||||||
|
|
||||||
bool replace_query = false;
|
bool replace_query = false;
|
||||||
std::string on_duplicate_clause;
|
std::string on_duplicate_clause;
|
||||||
|
@ -73,7 +73,9 @@ NamesAndAliases StorageSystemProcesses::getNamesAndAliases()
|
|||||||
return
|
return
|
||||||
{
|
{
|
||||||
{"ProfileEvents.Names", {std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())}, "mapKeys(ProfileEvents)"},
|
{"ProfileEvents.Names", {std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())}, "mapKeys(ProfileEvents)"},
|
||||||
{"ProfileEvents.Values", {std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>())}, "mapValues(ProfileEvents)"}
|
{"ProfileEvents.Values", {std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>())}, "mapValues(ProfileEvents)"},
|
||||||
|
{"Settings.Names", {std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())}, "mapKeys(Settings)" },
|
||||||
|
{"Settings.Values", {std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())}, "mapValues(Settings)"}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,68 +0,0 @@
|
|||||||
#include <Storages/System/StorageSystemViews.h>
|
|
||||||
#include <DataTypes/DataTypeString.h>
|
|
||||||
#include <Access/ContextAccess.h>
|
|
||||||
#include <Interpreters/Context.h>
|
|
||||||
#include <Interpreters/QueryViewsLog.h>
|
|
||||||
#include <DataTypes/DataTypeEnum.h>
|
|
||||||
#include <Storages/StorageMaterializedView.h>
|
|
||||||
#include <Storages/LiveView/StorageLiveView.h>
|
|
||||||
|
|
||||||
namespace DB
|
|
||||||
{
|
|
||||||
|
|
||||||
class Context;
|
|
||||||
|
|
||||||
NamesAndTypesList StorageSystemViews::getNamesAndTypes()
|
|
||||||
{
|
|
||||||
auto view_type_datatype = std::make_shared<DataTypeEnum8>(DataTypeEnum8::Values{
|
|
||||||
{"Default", static_cast<Int8>(QueryViewsLogElement::ViewType::DEFAULT)},
|
|
||||||
{"Materialized", static_cast<Int8>(QueryViewsLogElement::ViewType::MATERIALIZED)},
|
|
||||||
{"Live", static_cast<Int8>(QueryViewsLogElement::ViewType::LIVE)}});
|
|
||||||
|
|
||||||
return {
|
|
||||||
{"database", std::make_shared<DataTypeString>()},
|
|
||||||
{"name", std::make_shared<DataTypeString>()},
|
|
||||||
{"main_dependency_database", std::make_shared<DataTypeString>()},
|
|
||||||
{"main_dependency_table", std::make_shared<DataTypeString>()},
|
|
||||||
{"view_type", std::move(view_type_datatype)},
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
void StorageSystemViews::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const
|
|
||||||
{
|
|
||||||
const auto access = context->getAccess();
|
|
||||||
const bool check_access_for_databases = !access->isGranted(AccessType::SHOW_TABLES);
|
|
||||||
|
|
||||||
for (const auto & [table_id, view_ids] : DatabaseCatalog::instance().getViewDependencies())
|
|
||||||
{
|
|
||||||
const bool check_access_for_tables = check_access_for_databases && !access->isGranted(AccessType::SHOW_TABLES, table_id.database_name);
|
|
||||||
|
|
||||||
if (check_access_for_tables && !access->isGranted(AccessType::SHOW_TABLES, table_id.database_name, table_id.table_name))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
size_t col_num;
|
|
||||||
for (const auto & view_id : view_ids)
|
|
||||||
{
|
|
||||||
auto view_ptr = DatabaseCatalog::instance().getTable(view_id, context);
|
|
||||||
QueryViewsLogElement::ViewType type = QueryViewsLogElement::ViewType::DEFAULT;
|
|
||||||
|
|
||||||
if (typeid_cast<const StorageMaterializedView *>(view_ptr.get()))
|
|
||||||
{
|
|
||||||
type = QueryViewsLogElement::ViewType::MATERIALIZED;
|
|
||||||
}
|
|
||||||
else if (typeid_cast<const StorageLiveView *>(view_ptr.get()))
|
|
||||||
{
|
|
||||||
type = QueryViewsLogElement::ViewType::LIVE;
|
|
||||||
}
|
|
||||||
|
|
||||||
col_num = 0;
|
|
||||||
res_columns[col_num++]->insert(view_id.database_name);
|
|
||||||
res_columns[col_num++]->insert(view_id.table_name);
|
|
||||||
res_columns[col_num++]->insert(table_id.database_name);
|
|
||||||
res_columns[col_num++]->insert(table_id.table_name);
|
|
||||||
res_columns[col_num++]->insert(type);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
@ -1,24 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <common/shared_ptr_helper.h>
|
|
||||||
#include <Storages/System/IStorageSystemOneBlock.h>
|
|
||||||
|
|
||||||
namespace DB
|
|
||||||
{
|
|
||||||
|
|
||||||
class StorageSystemViews final : public shared_ptr_helper<StorageSystemViews>, public IStorageSystemOneBlock<StorageSystemViews>
|
|
||||||
{
|
|
||||||
friend struct shared_ptr_helper<StorageSystemViews>;
|
|
||||||
protected:
|
|
||||||
using IStorageSystemOneBlock::IStorageSystemOneBlock;
|
|
||||||
|
|
||||||
void fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const override;
|
|
||||||
|
|
||||||
public:
|
|
||||||
std::string getName() const override { return "SystemViews"; }
|
|
||||||
|
|
||||||
static NamesAndTypesList getNamesAndTypes();
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
}
|
|
@ -44,7 +44,6 @@
|
|||||||
#include <Storages/System/StorageSystemTableEngines.h>
|
#include <Storages/System/StorageSystemTableEngines.h>
|
||||||
#include <Storages/System/StorageSystemTableFunctions.h>
|
#include <Storages/System/StorageSystemTableFunctions.h>
|
||||||
#include <Storages/System/StorageSystemTables.h>
|
#include <Storages/System/StorageSystemTables.h>
|
||||||
#include <Storages/System/StorageSystemViews.h>
|
|
||||||
#include <Storages/System/StorageSystemZooKeeper.h>
|
#include <Storages/System/StorageSystemZooKeeper.h>
|
||||||
#include <Storages/System/StorageSystemContributors.h>
|
#include <Storages/System/StorageSystemContributors.h>
|
||||||
#include <Storages/System/StorageSystemErrors.h>
|
#include <Storages/System/StorageSystemErrors.h>
|
||||||
@ -96,7 +95,6 @@ void attachSystemTablesLocal(IDatabase & system_database)
|
|||||||
attach<StorageSystemZeros>(system_database, "zeros_mt", true);
|
attach<StorageSystemZeros>(system_database, "zeros_mt", true);
|
||||||
attach<StorageSystemDatabases>(system_database, "databases");
|
attach<StorageSystemDatabases>(system_database, "databases");
|
||||||
attach<StorageSystemTables>(system_database, "tables");
|
attach<StorageSystemTables>(system_database, "tables");
|
||||||
attach<StorageSystemViews>(system_database, "views");
|
|
||||||
attach<StorageSystemColumns>(system_database, "columns");
|
attach<StorageSystemColumns>(system_database, "columns");
|
||||||
attach<StorageSystemFunctions>(system_database, "functions");
|
attach<StorageSystemFunctions>(system_database, "functions");
|
||||||
attach<StorageSystemEvents>(system_database, "events");
|
attach<StorageSystemEvents>(system_database, "events");
|
||||||
|
@ -214,7 +214,6 @@ SRCS(
|
|||||||
System/StorageSystemTables.cpp
|
System/StorageSystemTables.cpp
|
||||||
System/StorageSystemUserDirectories.cpp
|
System/StorageSystemUserDirectories.cpp
|
||||||
System/StorageSystemUsers.cpp
|
System/StorageSystemUsers.cpp
|
||||||
System/StorageSystemViews.cpp
|
|
||||||
System/StorageSystemWarnings.cpp
|
System/StorageSystemWarnings.cpp
|
||||||
System/StorageSystemZeros.cpp
|
System/StorageSystemZeros.cpp
|
||||||
System/StorageSystemZooKeeper.cpp
|
System/StorageSystemZooKeeper.cpp
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
{
|
{
|
||||||
"build_config": [
|
"build_config": [
|
||||||
{
|
{
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"build-type": "",
|
"build-type": "",
|
||||||
"sanitizer": "",
|
"sanitizer": "",
|
||||||
"package-type": "deb",
|
"package-type": "deb",
|
||||||
@ -12,7 +12,7 @@
|
|||||||
"with_coverage": false
|
"with_coverage": false
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"build-type": "",
|
"build-type": "",
|
||||||
"sanitizer": "",
|
"sanitizer": "",
|
||||||
"package-type": "performance",
|
"package-type": "performance",
|
||||||
@ -32,7 +32,7 @@
|
|||||||
"with_coverage": false
|
"with_coverage": false
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"build-type": "",
|
"build-type": "",
|
||||||
"sanitizer": "address",
|
"sanitizer": "address",
|
||||||
"package-type": "deb",
|
"package-type": "deb",
|
||||||
@ -42,7 +42,7 @@
|
|||||||
"with_coverage": false
|
"with_coverage": false
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"build-type": "",
|
"build-type": "",
|
||||||
"sanitizer": "undefined",
|
"sanitizer": "undefined",
|
||||||
"package-type": "deb",
|
"package-type": "deb",
|
||||||
@ -52,7 +52,7 @@
|
|||||||
"with_coverage": false
|
"with_coverage": false
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"build-type": "",
|
"build-type": "",
|
||||||
"sanitizer": "thread",
|
"sanitizer": "thread",
|
||||||
"package-type": "deb",
|
"package-type": "deb",
|
||||||
@ -62,7 +62,7 @@
|
|||||||
"with_coverage": false
|
"with_coverage": false
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"build-type": "",
|
"build-type": "",
|
||||||
"sanitizer": "memory",
|
"sanitizer": "memory",
|
||||||
"package-type": "deb",
|
"package-type": "deb",
|
||||||
@ -72,7 +72,7 @@
|
|||||||
"with_coverage": false
|
"with_coverage": false
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"build-type": "debug",
|
"build-type": "debug",
|
||||||
"sanitizer": "",
|
"sanitizer": "",
|
||||||
"package-type": "deb",
|
"package-type": "deb",
|
||||||
@ -92,7 +92,7 @@
|
|||||||
"with_coverage": false
|
"with_coverage": false
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"build-type": "",
|
"build-type": "",
|
||||||
"sanitizer": "",
|
"sanitizer": "",
|
||||||
"package-type": "binary",
|
"package-type": "binary",
|
||||||
@ -104,7 +104,7 @@
|
|||||||
],
|
],
|
||||||
"special_build_config": [
|
"special_build_config": [
|
||||||
{
|
{
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"build-type": "debug",
|
"build-type": "debug",
|
||||||
"sanitizer": "",
|
"sanitizer": "",
|
||||||
"package-type": "deb",
|
"package-type": "deb",
|
||||||
@ -114,7 +114,7 @@
|
|||||||
"with_coverage": false
|
"with_coverage": false
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"build-type": "",
|
"build-type": "",
|
||||||
"sanitizer": "",
|
"sanitizer": "",
|
||||||
"package-type": "binary",
|
"package-type": "binary",
|
||||||
@ -124,7 +124,7 @@
|
|||||||
"with_coverage": false
|
"with_coverage": false
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"compiler": "clang-11-darwin",
|
"compiler": "clang-12-darwin",
|
||||||
"build-type": "",
|
"build-type": "",
|
||||||
"sanitizer": "",
|
"sanitizer": "",
|
||||||
"package-type": "binary",
|
"package-type": "binary",
|
||||||
@ -134,7 +134,7 @@
|
|||||||
"with_coverage": false
|
"with_coverage": false
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"compiler": "clang-11-aarch64",
|
"compiler": "clang-12-aarch64",
|
||||||
"build-type": "",
|
"build-type": "",
|
||||||
"sanitizer": "",
|
"sanitizer": "",
|
||||||
"package-type": "binary",
|
"package-type": "binary",
|
||||||
@ -144,7 +144,7 @@
|
|||||||
"with_coverage": false
|
"with_coverage": false
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"compiler": "clang-11-freebsd",
|
"compiler": "clang-12-freebsd",
|
||||||
"build-type": "",
|
"build-type": "",
|
||||||
"sanitizer": "",
|
"sanitizer": "",
|
||||||
"package-type": "binary",
|
"package-type": "binary",
|
||||||
@ -154,7 +154,7 @@
|
|||||||
"with_coverage": false
|
"with_coverage": false
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"compiler": "clang-11-darwin-aarch64",
|
"compiler": "clang-12-darwin-aarch64",
|
||||||
"build-type": "",
|
"build-type": "",
|
||||||
"sanitizer": "",
|
"sanitizer": "",
|
||||||
"package-type": "binary",
|
"package-type": "binary",
|
||||||
@ -167,7 +167,7 @@
|
|||||||
"tests_config": {
|
"tests_config": {
|
||||||
"Functional stateful tests (address)": {
|
"Functional stateful tests (address)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "address",
|
"sanitizer": "address",
|
||||||
@ -179,7 +179,7 @@
|
|||||||
},
|
},
|
||||||
"Functional stateful tests (thread)": {
|
"Functional stateful tests (thread)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "thread",
|
"sanitizer": "thread",
|
||||||
@ -191,7 +191,7 @@
|
|||||||
},
|
},
|
||||||
"Functional stateful tests (memory)": {
|
"Functional stateful tests (memory)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "memory",
|
"sanitizer": "memory",
|
||||||
@ -203,7 +203,7 @@
|
|||||||
},
|
},
|
||||||
"Functional stateful tests (ubsan)": {
|
"Functional stateful tests (ubsan)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "undefined",
|
"sanitizer": "undefined",
|
||||||
@ -215,7 +215,7 @@
|
|||||||
},
|
},
|
||||||
"Functional stateful tests (debug)": {
|
"Functional stateful tests (debug)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "debug",
|
"build_type": "debug",
|
||||||
"sanitizer": "none",
|
"sanitizer": "none",
|
||||||
@ -227,7 +227,7 @@
|
|||||||
},
|
},
|
||||||
"Functional stateful tests (release)": {
|
"Functional stateful tests (release)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "none",
|
"sanitizer": "none",
|
||||||
@ -239,7 +239,7 @@
|
|||||||
},
|
},
|
||||||
"Functional stateful tests (release, DatabaseOrdinary)": {
|
"Functional stateful tests (release, DatabaseOrdinary)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "none",
|
"sanitizer": "none",
|
||||||
@ -251,7 +251,7 @@
|
|||||||
},
|
},
|
||||||
"Functional stateful tests (release, DatabaseReplicated)": {
|
"Functional stateful tests (release, DatabaseReplicated)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "none",
|
"sanitizer": "none",
|
||||||
@ -263,7 +263,7 @@
|
|||||||
},
|
},
|
||||||
"Functional stateless tests (address)": {
|
"Functional stateless tests (address)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "address",
|
"sanitizer": "address",
|
||||||
@ -275,7 +275,7 @@
|
|||||||
},
|
},
|
||||||
"Functional stateless tests (thread)": {
|
"Functional stateless tests (thread)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "thread",
|
"sanitizer": "thread",
|
||||||
@ -287,7 +287,7 @@
|
|||||||
},
|
},
|
||||||
"Functional stateless tests (memory)": {
|
"Functional stateless tests (memory)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "memory",
|
"sanitizer": "memory",
|
||||||
@ -299,7 +299,7 @@
|
|||||||
},
|
},
|
||||||
"Functional stateless tests (ubsan)": {
|
"Functional stateless tests (ubsan)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "undefined",
|
"sanitizer": "undefined",
|
||||||
@ -311,7 +311,7 @@
|
|||||||
},
|
},
|
||||||
"Functional stateless tests (debug)": {
|
"Functional stateless tests (debug)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "debug",
|
"build_type": "debug",
|
||||||
"sanitizer": "none",
|
"sanitizer": "none",
|
||||||
@ -323,7 +323,7 @@
|
|||||||
},
|
},
|
||||||
"Functional stateless tests (release)": {
|
"Functional stateless tests (release)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "none",
|
"sanitizer": "none",
|
||||||
@ -335,7 +335,7 @@
|
|||||||
},
|
},
|
||||||
"Functional stateless tests (pytest)": {
|
"Functional stateless tests (pytest)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "none",
|
"sanitizer": "none",
|
||||||
@ -359,7 +359,7 @@
|
|||||||
},
|
},
|
||||||
"Functional stateless tests (release, wide parts enabled)": {
|
"Functional stateless tests (release, wide parts enabled)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "none",
|
"sanitizer": "none",
|
||||||
@ -371,7 +371,7 @@
|
|||||||
},
|
},
|
||||||
"Functional stateless tests (release, DatabaseOrdinary)": {
|
"Functional stateless tests (release, DatabaseOrdinary)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "none",
|
"sanitizer": "none",
|
||||||
@ -383,7 +383,7 @@
|
|||||||
},
|
},
|
||||||
"Functional stateless tests (release, DatabaseReplicated)": {
|
"Functional stateless tests (release, DatabaseReplicated)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "none",
|
"sanitizer": "none",
|
||||||
@ -395,7 +395,7 @@
|
|||||||
},
|
},
|
||||||
"Stress test (address)": {
|
"Stress test (address)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "address",
|
"sanitizer": "address",
|
||||||
@ -407,7 +407,7 @@
|
|||||||
},
|
},
|
||||||
"Stress test (thread)": {
|
"Stress test (thread)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "thread",
|
"sanitizer": "thread",
|
||||||
@ -419,7 +419,7 @@
|
|||||||
},
|
},
|
||||||
"Stress test (undefined)": {
|
"Stress test (undefined)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "undefined",
|
"sanitizer": "undefined",
|
||||||
@ -431,7 +431,7 @@
|
|||||||
},
|
},
|
||||||
"Stress test (memory)": {
|
"Stress test (memory)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "memory",
|
"sanitizer": "memory",
|
||||||
@ -443,7 +443,7 @@
|
|||||||
},
|
},
|
||||||
"Stress test (debug)": {
|
"Stress test (debug)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "debug",
|
"build_type": "debug",
|
||||||
"sanitizer": "none",
|
"sanitizer": "none",
|
||||||
@ -455,7 +455,7 @@
|
|||||||
},
|
},
|
||||||
"Integration tests (asan)": {
|
"Integration tests (asan)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "address",
|
"sanitizer": "address",
|
||||||
@ -467,7 +467,7 @@
|
|||||||
},
|
},
|
||||||
"Integration tests (thread)": {
|
"Integration tests (thread)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "thread",
|
"sanitizer": "thread",
|
||||||
@ -479,7 +479,7 @@
|
|||||||
},
|
},
|
||||||
"Integration tests (release)": {
|
"Integration tests (release)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "none",
|
"sanitizer": "none",
|
||||||
@ -491,7 +491,7 @@
|
|||||||
},
|
},
|
||||||
"Integration tests (memory)": {
|
"Integration tests (memory)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "memory",
|
"sanitizer": "memory",
|
||||||
@ -503,7 +503,7 @@
|
|||||||
},
|
},
|
||||||
"Integration tests flaky check (asan)": {
|
"Integration tests flaky check (asan)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "address",
|
"sanitizer": "address",
|
||||||
@ -515,7 +515,7 @@
|
|||||||
},
|
},
|
||||||
"Compatibility check": {
|
"Compatibility check": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "none",
|
"sanitizer": "none",
|
||||||
@ -527,7 +527,7 @@
|
|||||||
},
|
},
|
||||||
"Split build smoke test": {
|
"Split build smoke test": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "binary",
|
"package_type": "binary",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "none",
|
"sanitizer": "none",
|
||||||
@ -539,7 +539,7 @@
|
|||||||
},
|
},
|
||||||
"Testflows check": {
|
"Testflows check": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "none",
|
"sanitizer": "none",
|
||||||
@ -563,7 +563,7 @@
|
|||||||
},
|
},
|
||||||
"Unit tests release clang": {
|
"Unit tests release clang": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "binary",
|
"package_type": "binary",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "none",
|
"sanitizer": "none",
|
||||||
@ -575,7 +575,7 @@
|
|||||||
},
|
},
|
||||||
"Unit tests ASAN": {
|
"Unit tests ASAN": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "binary",
|
"package_type": "binary",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "address",
|
"sanitizer": "address",
|
||||||
@ -587,7 +587,7 @@
|
|||||||
},
|
},
|
||||||
"Unit tests MSAN": {
|
"Unit tests MSAN": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "binary",
|
"package_type": "binary",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "memory",
|
"sanitizer": "memory",
|
||||||
@ -599,7 +599,7 @@
|
|||||||
},
|
},
|
||||||
"Unit tests TSAN": {
|
"Unit tests TSAN": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "binary",
|
"package_type": "binary",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "thread",
|
"sanitizer": "thread",
|
||||||
@ -611,7 +611,7 @@
|
|||||||
},
|
},
|
||||||
"Unit tests UBSAN": {
|
"Unit tests UBSAN": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "binary",
|
"package_type": "binary",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "thread",
|
"sanitizer": "thread",
|
||||||
@ -623,7 +623,7 @@
|
|||||||
},
|
},
|
||||||
"AST fuzzer (debug)": {
|
"AST fuzzer (debug)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "binary",
|
"package_type": "binary",
|
||||||
"build_type": "debug",
|
"build_type": "debug",
|
||||||
"sanitizer": "none",
|
"sanitizer": "none",
|
||||||
@ -635,7 +635,7 @@
|
|||||||
},
|
},
|
||||||
"AST fuzzer (ASan)": {
|
"AST fuzzer (ASan)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "binary",
|
"package_type": "binary",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "address",
|
"sanitizer": "address",
|
||||||
@ -647,7 +647,7 @@
|
|||||||
},
|
},
|
||||||
"AST fuzzer (MSan)": {
|
"AST fuzzer (MSan)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "binary",
|
"package_type": "binary",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "memory",
|
"sanitizer": "memory",
|
||||||
@ -659,7 +659,7 @@
|
|||||||
},
|
},
|
||||||
"AST fuzzer (TSan)": {
|
"AST fuzzer (TSan)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "binary",
|
"package_type": "binary",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "thread",
|
"sanitizer": "thread",
|
||||||
@ -671,7 +671,7 @@
|
|||||||
},
|
},
|
||||||
"AST fuzzer (UBSan)": {
|
"AST fuzzer (UBSan)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "binary",
|
"package_type": "binary",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "undefined",
|
"sanitizer": "undefined",
|
||||||
@ -683,7 +683,7 @@
|
|||||||
},
|
},
|
||||||
"Release": {
|
"Release": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "none",
|
"sanitizer": "none",
|
||||||
@ -695,7 +695,7 @@
|
|||||||
},
|
},
|
||||||
"Functional stateless tests flaky check (address)": {
|
"Functional stateless tests flaky check (address)": {
|
||||||
"required_build_properties": {
|
"required_build_properties": {
|
||||||
"compiler": "clang-11",
|
"compiler": "clang-12",
|
||||||
"package_type": "deb",
|
"package_type": "deb",
|
||||||
"build_type": "relwithdebuginfo",
|
"build_type": "relwithdebuginfo",
|
||||||
"sanitizer": "address",
|
"sanitizer": "address",
|
||||||
|
@ -45,6 +45,7 @@ ln -sf $SRC_PATH/users.d/access_management.xml $DEST_SERVER_PATH/users.d/
|
|||||||
ln -sf $SRC_PATH/users.d/database_atomic_drop_detach_sync.xml $DEST_SERVER_PATH/users.d/
|
ln -sf $SRC_PATH/users.d/database_atomic_drop_detach_sync.xml $DEST_SERVER_PATH/users.d/
|
||||||
ln -sf $SRC_PATH/users.d/opentelemetry.xml $DEST_SERVER_PATH/users.d/
|
ln -sf $SRC_PATH/users.d/opentelemetry.xml $DEST_SERVER_PATH/users.d/
|
||||||
ln -sf $SRC_PATH/users.d/remote_queries.xml $DEST_SERVER_PATH/users.d/
|
ln -sf $SRC_PATH/users.d/remote_queries.xml $DEST_SERVER_PATH/users.d/
|
||||||
|
ln -sf $SRC_PATH/users.d/session_log_test.xml $DEST_SERVER_PATH/users.d/
|
||||||
|
|
||||||
# FIXME DataPartsExchange may hang for http_send_timeout seconds
|
# FIXME DataPartsExchange may hang for http_send_timeout seconds
|
||||||
# when nobody is going to read from the other side of socket (due to "Fetching of part was cancelled"),
|
# when nobody is going to read from the other side of socket (due to "Fetching of part was cancelled"),
|
||||||
|
30
tests/config/users.d/session_log_test.xml
Normal file
30
tests/config/users.d/session_log_test.xml
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<!-- User and profile to be used in sesson_log tests, to make sure that list of user's profiles is logged correctly -->
|
||||||
|
<yandex>
|
||||||
|
<profiles>
|
||||||
|
<session_log_test_xml_profile>
|
||||||
|
<!--
|
||||||
|
can't be readonly since we need to modify some
|
||||||
|
user settings in queries initiated by test.
|
||||||
|
<readonly>1</readonly>
|
||||||
|
-->
|
||||||
|
<send_logs_level>none</send_logs_level>
|
||||||
|
</session_log_test_xml_profile>
|
||||||
|
</profiles>
|
||||||
|
<roles>
|
||||||
|
<session_log_test_xml_role>
|
||||||
|
</session_log_test_xml_role>
|
||||||
|
</roles>
|
||||||
|
|
||||||
|
<users>
|
||||||
|
<session_log_test_xml_user>
|
||||||
|
<password></password>
|
||||||
|
<networks incl="networks" replace="replace">
|
||||||
|
<ip>::1</ip>
|
||||||
|
<ip>127.0.0.1</ip>
|
||||||
|
</networks>
|
||||||
|
<profile>session_log_test_xml_profile</profile>
|
||||||
|
<quota>default</quota>
|
||||||
|
</session_log_test_xml_user>
|
||||||
|
</users>
|
||||||
|
</yandex>
|
@ -31,18 +31,33 @@ postgres_table_template_3 = """
|
|||||||
key1 Integer NOT NULL, value1 Integer, key2 Integer NOT NULL, value2 Integer NOT NULL)
|
key1 Integer NOT NULL, value1 Integer, key2 Integer NOT NULL, value2 Integer NOT NULL)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def get_postgres_conn(ip, port, database=False, auto_commit=True, database_name='postgres_database'):
|
def get_postgres_conn(ip, port, database=False, auto_commit=True, database_name='postgres_database', replication=False):
|
||||||
if database == True:
|
if database == True:
|
||||||
conn_string = "host={} port={} dbname='{}' user='postgres' password='mysecretpassword'".format(ip, port, database_name)
|
conn_string = "host={} port={} dbname='{}' user='postgres' password='mysecretpassword'".format(ip, port, database_name)
|
||||||
else:
|
else:
|
||||||
conn_string = "host={} port={} user='postgres' password='mysecretpassword'".format(ip, port)
|
conn_string = "host={} port={} user='postgres' password='mysecretpassword'".format(ip, port)
|
||||||
|
|
||||||
|
if replication:
|
||||||
|
conn_string += " replication='database'"
|
||||||
|
|
||||||
conn = psycopg2.connect(conn_string)
|
conn = psycopg2.connect(conn_string)
|
||||||
if auto_commit:
|
if auto_commit:
|
||||||
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
|
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
|
||||||
conn.autocommit = True
|
conn.autocommit = True
|
||||||
return conn
|
return conn
|
||||||
|
|
||||||
|
def create_replication_slot(conn, slot_name='user_slot'):
|
||||||
|
cursor = conn.cursor()
|
||||||
|
cursor.execute('CREATE_REPLICATION_SLOT {} LOGICAL pgoutput EXPORT_SNAPSHOT'.format(slot_name))
|
||||||
|
result = cursor.fetchall()
|
||||||
|
print(result[0][0]) # slot name
|
||||||
|
print(result[0][1]) # start lsn
|
||||||
|
print(result[0][2]) # snapshot
|
||||||
|
return result[0][2]
|
||||||
|
|
||||||
|
def drop_replication_slot(conn, slot_name='user_slot'):
|
||||||
|
cursor = conn.cursor()
|
||||||
|
cursor.execute("select pg_drop_replication_slot('{}')".format(slot_name))
|
||||||
|
|
||||||
def create_postgres_db(cursor, name='postgres_database'):
|
def create_postgres_db(cursor, name='postgres_database'):
|
||||||
cursor.execute("CREATE DATABASE {}".format(name))
|
cursor.execute("CREATE DATABASE {}".format(name))
|
||||||
@ -941,6 +956,34 @@ def test_quoting(started_cluster):
|
|||||||
drop_materialized_db()
|
drop_materialized_db()
|
||||||
|
|
||||||
|
|
||||||
|
def test_user_managed_slots(started_cluster):
|
||||||
|
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
|
||||||
|
port=started_cluster.postgres_port,
|
||||||
|
database=True)
|
||||||
|
cursor = conn.cursor()
|
||||||
|
table_name = 'test_table'
|
||||||
|
create_postgres_table(cursor, table_name);
|
||||||
|
instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(10000)".format(table_name))
|
||||||
|
|
||||||
|
slot_name = 'user_slot'
|
||||||
|
replication_connection = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port,
|
||||||
|
database=True, replication=True, auto_commit=True)
|
||||||
|
snapshot = create_replication_slot(replication_connection, slot_name=slot_name)
|
||||||
|
create_materialized_db(ip=started_cluster.postgres_ip,
|
||||||
|
port=started_cluster.postgres_port,
|
||||||
|
settings=["materialized_postgresql_replication_slot = '{}'".format(slot_name),
|
||||||
|
"materialized_postgresql_snapshot = '{}'".format(snapshot)])
|
||||||
|
check_tables_are_synchronized(table_name);
|
||||||
|
instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(10000, 10000)".format(table_name))
|
||||||
|
check_tables_are_synchronized(table_name);
|
||||||
|
instance.restart_clickhouse()
|
||||||
|
instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(20000, 10000)".format(table_name))
|
||||||
|
check_tables_are_synchronized(table_name);
|
||||||
|
drop_postgres_table(cursor, table_name)
|
||||||
|
drop_materialized_db()
|
||||||
|
drop_replication_slot(replication_connection, slot_name)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
cluster.start()
|
cluster.start()
|
||||||
input("Cluster created, press any key to destroy...")
|
input("Cluster created, press any key to destroy...")
|
||||||
|
@ -3,7 +3,10 @@ from contextlib import contextmanager
|
|||||||
## sudo -H pip install PyMySQL
|
## sudo -H pip install PyMySQL
|
||||||
import pymysql.cursors
|
import pymysql.cursors
|
||||||
import pytest
|
import pytest
|
||||||
|
import time
|
||||||
|
import threading
|
||||||
from helpers.cluster import ClickHouseCluster
|
from helpers.cluster import ClickHouseCluster
|
||||||
|
from helpers.client import QueryRuntimeException
|
||||||
|
|
||||||
cluster = ClickHouseCluster(__file__)
|
cluster = ClickHouseCluster(__file__)
|
||||||
|
|
||||||
@ -319,6 +322,51 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL
|
|||||||
conn.close()
|
conn.close()
|
||||||
|
|
||||||
|
|
||||||
|
# Check that limited connection_wait_timeout (via connection_pool_size=1) will throw.
|
||||||
|
def test_settings_connection_wait_timeout(started_cluster):
|
||||||
|
table_name = 'test_settings_connection_wait_timeout'
|
||||||
|
node1.query(f'DROP TABLE IF EXISTS {table_name}')
|
||||||
|
wait_timeout = 2
|
||||||
|
|
||||||
|
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
|
||||||
|
drop_mysql_table(conn, table_name)
|
||||||
|
create_mysql_table(conn, table_name)
|
||||||
|
|
||||||
|
node1.query('''
|
||||||
|
CREATE TABLE {}
|
||||||
|
(
|
||||||
|
id UInt32,
|
||||||
|
name String,
|
||||||
|
age UInt32,
|
||||||
|
money UInt32
|
||||||
|
)
|
||||||
|
ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse')
|
||||||
|
SETTINGS connection_wait_timeout={}, connection_pool_size=1
|
||||||
|
'''.format(table_name, table_name, wait_timeout)
|
||||||
|
)
|
||||||
|
|
||||||
|
node1.query("INSERT INTO {} (id, name) SELECT number, concat('name_', toString(number)) from numbers(10) ".format(table_name))
|
||||||
|
|
||||||
|
def worker():
|
||||||
|
node1.query("SELECT sleepEachRow(1) FROM {}".format(table_name))
|
||||||
|
|
||||||
|
worker_thread = threading.Thread(target=worker)
|
||||||
|
worker_thread.start()
|
||||||
|
|
||||||
|
# ensure that first query started in worker_thread
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
started = time.time()
|
||||||
|
with pytest.raises(QueryRuntimeException, match=r"Exception: mysqlxx::Pool is full \(connection_wait_timeout is exceeded\)"):
|
||||||
|
node1.query("SELECT sleepEachRow(1) FROM {}".format(table_name))
|
||||||
|
ended = time.time()
|
||||||
|
assert (ended - started) >= wait_timeout
|
||||||
|
|
||||||
|
worker_thread.join()
|
||||||
|
|
||||||
|
drop_mysql_table(conn, table_name)
|
||||||
|
conn.close()
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
with contextmanager(started_cluster)() as cluster:
|
with contextmanager(started_cluster)() as cluster:
|
||||||
for name, instance in list(cluster.instances.items()):
|
for name, instance in list(cluster.instances.items()):
|
||||||
|
@ -1 +1 @@
|
|||||||
CREATE QUOTA default KEYED BY user_name FOR INTERVAL 1 hour TRACKING ONLY TO default, readonly
|
CREATE QUOTA default KEYED BY user_name FOR INTERVAL 1 hour TRACKING ONLY TO default, readonly, session_log_test_xml_user
|
||||||
|
@ -0,0 +1,218 @@
|
|||||||
|
|
||||||
|
# no_password - User with profile from XML
|
||||||
|
TCP endpoint
|
||||||
|
TCP 'wrong password' case is skipped for no_password.
|
||||||
|
HTTP endpoint
|
||||||
|
HTTP 'wrong password' case is skipped for no_password.
|
||||||
|
MySQL endpoint
|
||||||
|
MySQL 'wrong password' case is skipped for no_password.
|
||||||
|
|
||||||
|
# no_password - No profiles no roles
|
||||||
|
TCP endpoint
|
||||||
|
TCP 'wrong password' case is skipped for no_password.
|
||||||
|
HTTP endpoint
|
||||||
|
HTTP 'wrong password' case is skipped for no_password.
|
||||||
|
MySQL endpoint
|
||||||
|
MySQL 'wrong password' case is skipped for no_password.
|
||||||
|
|
||||||
|
# no_password - Two profiles, no roles
|
||||||
|
TCP endpoint
|
||||||
|
TCP 'wrong password' case is skipped for no_password.
|
||||||
|
HTTP endpoint
|
||||||
|
HTTP 'wrong password' case is skipped for no_password.
|
||||||
|
MySQL endpoint
|
||||||
|
MySQL 'wrong password' case is skipped for no_password.
|
||||||
|
|
||||||
|
# no_password - Two profiles and two simple roles
|
||||||
|
TCP endpoint
|
||||||
|
TCP 'wrong password' case is skipped for no_password.
|
||||||
|
HTTP endpoint
|
||||||
|
HTTP 'wrong password' case is skipped for no_password.
|
||||||
|
MySQL endpoint
|
||||||
|
MySQL 'wrong password' case is skipped for no_password.
|
||||||
|
|
||||||
|
# plaintext_password - No profiles no roles
|
||||||
|
TCP endpoint
|
||||||
|
HTTP endpoint
|
||||||
|
MySQL endpoint
|
||||||
|
|
||||||
|
# plaintext_password - Two profiles, no roles
|
||||||
|
TCP endpoint
|
||||||
|
HTTP endpoint
|
||||||
|
MySQL endpoint
|
||||||
|
|
||||||
|
# plaintext_password - Two profiles and two simple roles
|
||||||
|
TCP endpoint
|
||||||
|
HTTP endpoint
|
||||||
|
MySQL endpoint
|
||||||
|
|
||||||
|
# sha256_password - No profiles no roles
|
||||||
|
TCP endpoint
|
||||||
|
HTTP endpoint
|
||||||
|
MySQL endpoint
|
||||||
|
MySQL 'successful login' case is skipped for sha256_password.
|
||||||
|
|
||||||
|
# sha256_password - Two profiles, no roles
|
||||||
|
TCP endpoint
|
||||||
|
HTTP endpoint
|
||||||
|
MySQL endpoint
|
||||||
|
MySQL 'successful login' case is skipped for sha256_password.
|
||||||
|
|
||||||
|
# sha256_password - Two profiles and two simple roles
|
||||||
|
TCP endpoint
|
||||||
|
HTTP endpoint
|
||||||
|
MySQL endpoint
|
||||||
|
MySQL 'successful login' case is skipped for sha256_password.
|
||||||
|
|
||||||
|
# double_sha1_password - No profiles no roles
|
||||||
|
TCP endpoint
|
||||||
|
HTTP endpoint
|
||||||
|
MySQL endpoint
|
||||||
|
|
||||||
|
# double_sha1_password - Two profiles, no roles
|
||||||
|
TCP endpoint
|
||||||
|
HTTP endpoint
|
||||||
|
MySQL endpoint
|
||||||
|
|
||||||
|
# double_sha1_password - Two profiles and two simple roles
|
||||||
|
TCP endpoint
|
||||||
|
HTTP endpoint
|
||||||
|
MySQL endpoint
|
||||||
|
${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles TCP LoginFailure 1
|
||||||
|
${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles TCP LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles TCP Logout 1
|
||||||
|
${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles HTTP LoginFailure 1
|
||||||
|
${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles HTTP LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles HTTP Logout 1
|
||||||
|
${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles MySQL LoginFailure many
|
||||||
|
${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles MySQL LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles MySQL Logout 1
|
||||||
|
${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles TCP LoginFailure 1
|
||||||
|
${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles TCP LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles TCP Logout 1
|
||||||
|
${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles HTTP LoginFailure 1
|
||||||
|
${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles HTTP LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles HTTP Logout 1
|
||||||
|
${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles MySQL LoginFailure many
|
||||||
|
${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles MySQL LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles MySQL Logout 1
|
||||||
|
${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles TCP LoginFailure 1
|
||||||
|
${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles TCP LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles TCP Logout 1
|
||||||
|
${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles HTTP LoginFailure 1
|
||||||
|
${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles HTTP LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles HTTP Logout 1
|
||||||
|
${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles MySQL LoginFailure many
|
||||||
|
${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles MySQL LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles MySQL Logout 1
|
||||||
|
${BASE_USERNAME}_no_password_no_profiles_no_roles TCP LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_no_password_no_profiles_no_roles TCP Logout 1
|
||||||
|
${BASE_USERNAME}_no_password_no_profiles_no_roles HTTP LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_no_password_no_profiles_no_roles HTTP Logout 1
|
||||||
|
${BASE_USERNAME}_no_password_no_profiles_no_roles MySQL LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_no_password_no_profiles_no_roles MySQL Logout 1
|
||||||
|
${BASE_USERNAME}_no_password_two_profiles_no_roles TCP LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_no_password_two_profiles_no_roles TCP Logout 1
|
||||||
|
${BASE_USERNAME}_no_password_two_profiles_no_roles HTTP LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_no_password_two_profiles_no_roles HTTP Logout 1
|
||||||
|
${BASE_USERNAME}_no_password_two_profiles_no_roles MySQL LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_no_password_two_profiles_no_roles MySQL Logout 1
|
||||||
|
${BASE_USERNAME}_no_password_two_profiles_two_roles TCP LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_no_password_two_profiles_two_roles TCP Logout 1
|
||||||
|
${BASE_USERNAME}_no_password_two_profiles_two_roles HTTP LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_no_password_two_profiles_two_roles HTTP Logout 1
|
||||||
|
${BASE_USERNAME}_no_password_two_profiles_two_roles MySQL LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_no_password_two_profiles_two_roles MySQL Logout 1
|
||||||
|
${BASE_USERNAME}_plaintext_password_no_profiles_no_roles TCP LoginFailure 1
|
||||||
|
${BASE_USERNAME}_plaintext_password_no_profiles_no_roles TCP LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_plaintext_password_no_profiles_no_roles TCP Logout 1
|
||||||
|
${BASE_USERNAME}_plaintext_password_no_profiles_no_roles HTTP LoginFailure 1
|
||||||
|
${BASE_USERNAME}_plaintext_password_no_profiles_no_roles HTTP LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_plaintext_password_no_profiles_no_roles HTTP Logout 1
|
||||||
|
${BASE_USERNAME}_plaintext_password_no_profiles_no_roles MySQL LoginFailure many
|
||||||
|
${BASE_USERNAME}_plaintext_password_no_profiles_no_roles MySQL LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_plaintext_password_no_profiles_no_roles MySQL Logout 1
|
||||||
|
${BASE_USERNAME}_plaintext_password_two_profiles_no_roles TCP LoginFailure 1
|
||||||
|
${BASE_USERNAME}_plaintext_password_two_profiles_no_roles TCP LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_plaintext_password_two_profiles_no_roles TCP Logout 1
|
||||||
|
${BASE_USERNAME}_plaintext_password_two_profiles_no_roles HTTP LoginFailure 1
|
||||||
|
${BASE_USERNAME}_plaintext_password_two_profiles_no_roles HTTP LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_plaintext_password_two_profiles_no_roles HTTP Logout 1
|
||||||
|
${BASE_USERNAME}_plaintext_password_two_profiles_no_roles MySQL LoginFailure many
|
||||||
|
${BASE_USERNAME}_plaintext_password_two_profiles_no_roles MySQL LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_plaintext_password_two_profiles_no_roles MySQL Logout 1
|
||||||
|
${BASE_USERNAME}_plaintext_password_two_profiles_two_roles TCP LoginFailure 1
|
||||||
|
${BASE_USERNAME}_plaintext_password_two_profiles_two_roles TCP LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_plaintext_password_two_profiles_two_roles TCP Logout 1
|
||||||
|
${BASE_USERNAME}_plaintext_password_two_profiles_two_roles HTTP LoginFailure 1
|
||||||
|
${BASE_USERNAME}_plaintext_password_two_profiles_two_roles HTTP LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_plaintext_password_two_profiles_two_roles HTTP Logout 1
|
||||||
|
${BASE_USERNAME}_plaintext_password_two_profiles_two_roles MySQL LoginFailure many
|
||||||
|
${BASE_USERNAME}_plaintext_password_two_profiles_two_roles MySQL LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_plaintext_password_two_profiles_two_roles MySQL Logout 1
|
||||||
|
${BASE_USERNAME}_sha256_password_no_profiles_no_roles TCP LoginFailure 1
|
||||||
|
${BASE_USERNAME}_sha256_password_no_profiles_no_roles TCP LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_sha256_password_no_profiles_no_roles TCP Logout 1
|
||||||
|
${BASE_USERNAME}_sha256_password_no_profiles_no_roles HTTP LoginFailure 1
|
||||||
|
${BASE_USERNAME}_sha256_password_no_profiles_no_roles HTTP LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_sha256_password_no_profiles_no_roles HTTP Logout 1
|
||||||
|
${BASE_USERNAME}_sha256_password_no_profiles_no_roles MySQL LoginFailure many
|
||||||
|
${BASE_USERNAME}_sha256_password_two_profiles_no_roles TCP LoginFailure 1
|
||||||
|
${BASE_USERNAME}_sha256_password_two_profiles_no_roles TCP LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_sha256_password_two_profiles_no_roles TCP Logout 1
|
||||||
|
${BASE_USERNAME}_sha256_password_two_profiles_no_roles HTTP LoginFailure 1
|
||||||
|
${BASE_USERNAME}_sha256_password_two_profiles_no_roles HTTP LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_sha256_password_two_profiles_no_roles HTTP Logout 1
|
||||||
|
${BASE_USERNAME}_sha256_password_two_profiles_no_roles MySQL LoginFailure many
|
||||||
|
${BASE_USERNAME}_sha256_password_two_profiles_two_roles TCP LoginFailure 1
|
||||||
|
${BASE_USERNAME}_sha256_password_two_profiles_two_roles TCP LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_sha256_password_two_profiles_two_roles TCP Logout 1
|
||||||
|
${BASE_USERNAME}_sha256_password_two_profiles_two_roles HTTP LoginFailure 1
|
||||||
|
${BASE_USERNAME}_sha256_password_two_profiles_two_roles HTTP LoginSuccess 1
|
||||||
|
${BASE_USERNAME}_sha256_password_two_profiles_two_roles HTTP Logout 1
|
||||||
|
${BASE_USERNAME}_sha256_password_two_profiles_two_roles MySQL LoginFailure many
|
||||||
|
invalid_${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles TCP LoginFailure 1
|
||||||
|
invalid_${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles HTTP LoginFailure 1
|
||||||
|
invalid_${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles MySQL LoginFailure many
|
||||||
|
invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles TCP LoginFailure 1
|
||||||
|
invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles HTTP LoginFailure 1
|
||||||
|
invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles MySQL LoginFailure many
|
||||||
|
invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles TCP LoginFailure 1
|
||||||
|
invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles HTTP LoginFailure 1
|
||||||
|
invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles MySQL LoginFailure many
|
||||||
|
invalid_${BASE_USERNAME}_no_password_no_profiles_no_roles TCP LoginFailure 1
|
||||||
|
invalid_${BASE_USERNAME}_no_password_no_profiles_no_roles HTTP LoginFailure 1
|
||||||
|
invalid_${BASE_USERNAME}_no_password_no_profiles_no_roles MySQL LoginFailure many
|
||||||
|
invalid_${BASE_USERNAME}_no_password_two_profiles_no_roles TCP LoginFailure 1
|
||||||
|
invalid_${BASE_USERNAME}_no_password_two_profiles_no_roles HTTP LoginFailure 1
|
||||||
|
invalid_${BASE_USERNAME}_no_password_two_profiles_no_roles MySQL LoginFailure many
|
||||||
|
invalid_${BASE_USERNAME}_no_password_two_profiles_two_roles TCP LoginFailure 1
|
||||||
|
invalid_${BASE_USERNAME}_no_password_two_profiles_two_roles HTTP LoginFailure 1
|
||||||
|
invalid_${BASE_USERNAME}_no_password_two_profiles_two_roles MySQL LoginFailure many
|
||||||
|
invalid_${BASE_USERNAME}_plaintext_password_no_profiles_no_roles TCP LoginFailure 1
|
||||||
|
invalid_${BASE_USERNAME}_plaintext_password_no_profiles_no_roles HTTP LoginFailure 1
|
||||||
|
invalid_${BASE_USERNAME}_plaintext_password_no_profiles_no_roles MySQL LoginFailure many
|
||||||
|
invalid_${BASE_USERNAME}_plaintext_password_two_profiles_no_roles TCP LoginFailure 1
|
||||||
|
invalid_${BASE_USERNAME}_plaintext_password_two_profiles_no_roles HTTP LoginFailure 1
|
||||||
|
invalid_${BASE_USERNAME}_plaintext_password_two_profiles_no_roles MySQL LoginFailure many
|
||||||
|
invalid_${BASE_USERNAME}_plaintext_password_two_profiles_two_roles TCP LoginFailure 1
|
||||||
|
invalid_${BASE_USERNAME}_plaintext_password_two_profiles_two_roles HTTP LoginFailure 1
|
||||||
|
invalid_${BASE_USERNAME}_plaintext_password_two_profiles_two_roles MySQL LoginFailure many
|
||||||
|
invalid_${BASE_USERNAME}_sha256_password_no_profiles_no_roles TCP LoginFailure 1
|
||||||
|
invalid_${BASE_USERNAME}_sha256_password_no_profiles_no_roles HTTP LoginFailure 1
|
||||||
|
invalid_${BASE_USERNAME}_sha256_password_no_profiles_no_roles MySQL LoginFailure many
|
||||||
|
invalid_${BASE_USERNAME}_sha256_password_two_profiles_no_roles TCP LoginFailure 1
|
||||||
|
invalid_${BASE_USERNAME}_sha256_password_two_profiles_no_roles HTTP LoginFailure 1
|
||||||
|
invalid_${BASE_USERNAME}_sha256_password_two_profiles_no_roles MySQL LoginFailure many
|
||||||
|
invalid_${BASE_USERNAME}_sha256_password_two_profiles_two_roles TCP LoginFailure 1
|
||||||
|
invalid_${BASE_USERNAME}_sha256_password_two_profiles_two_roles HTTP LoginFailure 1
|
||||||
|
invalid_${BASE_USERNAME}_sha256_password_two_profiles_two_roles MySQL LoginFailure many
|
||||||
|
invalid_session_log_test_xml_user TCP LoginFailure 1
|
||||||
|
invalid_session_log_test_xml_user HTTP LoginFailure 1
|
||||||
|
invalid_session_log_test_xml_user MySQL LoginFailure many
|
||||||
|
session_log_test_xml_user TCP LoginSuccess 1
|
||||||
|
session_log_test_xml_user TCP Logout 1
|
||||||
|
session_log_test_xml_user HTTP LoginSuccess 1
|
||||||
|
session_log_test_xml_user HTTP Logout 1
|
||||||
|
session_log_test_xml_user MySQL LoginSuccess 1
|
||||||
|
session_log_test_xml_user MySQL Logout 1
|
370
tests/queries/0_stateless/01747_system_session_log_long.sh
Executable file
370
tests/queries/0_stateless/01747_system_session_log_long.sh
Executable file
@ -0,0 +1,370 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
##################################################################################################
|
||||||
|
# Verify that login, logout, and login failure events are properly stored in system.session_log
|
||||||
|
# when different `IDENTIFIED BY` clauses are used on user.
|
||||||
|
#
|
||||||
|
# Make sure that system.session_log entries are non-empty and provide enough info on each event.
|
||||||
|
#
|
||||||
|
# Using multiple protocols
|
||||||
|
# * native TCP protocol with CH client
|
||||||
|
# * HTTP with CURL
|
||||||
|
# * MySQL - CH server accesses itself via mysql table function, query typically fails (unrelated)
|
||||||
|
# but auth should be performed properly.
|
||||||
|
# * PostgreSQL - CH server accesses itself via postgresql table function (currently out of order).
|
||||||
|
# * gRPC - not done yet
|
||||||
|
#
|
||||||
|
# There is way to control how many time a query (e.g. via mysql table function) is retried
|
||||||
|
# and hence variable number of records in session_log. To mitigate this and simplify final query,
|
||||||
|
# each auth_type is tested for separate user. That way SELECT DISTINCT doesn't exclude log entries
|
||||||
|
# from different cases.
|
||||||
|
#
|
||||||
|
# All created users added to the ALL_USERNAMES and later cleaned up.
|
||||||
|
##################################################################################################
|
||||||
|
|
||||||
|
# To minimize amount of error context sent on failed queries when talking to CH via MySQL protocol.
|
||||||
|
export CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=none
|
||||||
|
|
||||||
|
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
|
# shellcheck source=../shell_config.sh
|
||||||
|
. "$CURDIR"/../shell_config.sh
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
# Since there is no way to cleanup system.session_log table,
|
||||||
|
# make sure that we can identify log entries from this test by a random user name.
|
||||||
|
readonly BASE_USERNAME="session_log_test_user_$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 32)"
|
||||||
|
readonly TMP_QUERY_FILE=$(mktemp /tmp/tmp_query.log.XXXXXX)
|
||||||
|
declare -a ALL_USERNAMES
|
||||||
|
ALL_USERNAMES+=("${BASE_USERNAME}")
|
||||||
|
|
||||||
|
function reportError()
|
||||||
|
{
|
||||||
|
if [ -s "${TMP_QUERY_FILE}" ] ;
|
||||||
|
then
|
||||||
|
echo "!!!!!! ERROR ${CLICKHOUSE_CLIENT} ${*} --queries-file ${TMP_QUERY_FILE}" >&2
|
||||||
|
echo "query:" >&2
|
||||||
|
cat "${TMP_QUERY_FILE}" >&2
|
||||||
|
rm -f "${TMP_QUERY_FILE}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function executeQuery()
|
||||||
|
{
|
||||||
|
## Execute query (provided via heredoc or herestring) and print query in case of error.
|
||||||
|
trap 'rm -f ${TMP_QUERY_FILE}; trap - ERR RETURN' RETURN
|
||||||
|
# Since we want to report with current values supplied to this function call
|
||||||
|
# shellcheck disable=SC2064
|
||||||
|
trap "reportError $*" ERR
|
||||||
|
|
||||||
|
cat - > "${TMP_QUERY_FILE}"
|
||||||
|
${CLICKHOUSE_CLIENT} "${@}" --queries-file "${TMP_QUERY_FILE}"
|
||||||
|
}
|
||||||
|
|
||||||
|
function cleanup()
|
||||||
|
{
|
||||||
|
local usernames_to_cleanup
|
||||||
|
usernames_to_cleanup="$(IFS=, ; echo "${ALL_USERNAMES[*]}")"
|
||||||
|
executeQuery <<EOF
|
||||||
|
DROP USER IF EXISTS ${usernames_to_cleanup};
|
||||||
|
DROP SETTINGS PROFILE IF EXISTS session_log_test_profile;
|
||||||
|
DROP SETTINGS PROFILE IF EXISTS session_log_test_profile2;
|
||||||
|
DROP ROLE IF EXISTS session_log_test_role;
|
||||||
|
DROP ROLE IF EXISTS session_log_test_role2;
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup
|
||||||
|
trap "cleanup" EXIT
|
||||||
|
|
||||||
|
function executeQueryExpectError()
|
||||||
|
{
|
||||||
|
cat - > "${TMP_QUERY_FILE}"
|
||||||
|
! ${CLICKHOUSE_CLIENT} "${@}" --multiquery --queries-file "${TMP_QUERY_FILE}" 2>&1 | tee -a ${TMP_QUERY_FILE}
|
||||||
|
}
|
||||||
|
|
||||||
|
function createUser()
|
||||||
|
{
|
||||||
|
local auth_type="${1}"
|
||||||
|
local username="${2}"
|
||||||
|
local password="${3}"
|
||||||
|
|
||||||
|
if [[ "${auth_type}" == "no_password" ]]
|
||||||
|
then
|
||||||
|
password=""
|
||||||
|
|
||||||
|
elif [[ "${auth_type}" == "plaintext_password" ]]
|
||||||
|
then
|
||||||
|
password="${password}"
|
||||||
|
|
||||||
|
elif [[ "${auth_type}" == "sha256_password" ]]
|
||||||
|
then
|
||||||
|
password="$(executeQuery <<< "SELECT hex(SHA256('${password}'))")"
|
||||||
|
|
||||||
|
elif [[ "${auth_type}" == "double_sha1_password" ]]
|
||||||
|
then
|
||||||
|
password="$(executeQuery <<< "SELECT hex(SHA1(SHA1('${password}')))")"
|
||||||
|
|
||||||
|
else
|
||||||
|
echo "Invalid auth_type: ${auth_type}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
export RESULTING_PASS="${password}"
|
||||||
|
if [ -n "${password}" ]
|
||||||
|
then
|
||||||
|
password="BY '${password}'"
|
||||||
|
fi
|
||||||
|
|
||||||
|
executeQuery <<EOF
|
||||||
|
DROP USER IF EXISTS '${username}';
|
||||||
|
CREATE USER '${username}' IDENTIFIED WITH ${auth_type} ${password};
|
||||||
|
EOF
|
||||||
|
ALL_USERNAMES+=("${username}")
|
||||||
|
}
|
||||||
|
|
||||||
|
function testTCP()
|
||||||
|
{
|
||||||
|
echo "TCP endpoint"
|
||||||
|
|
||||||
|
local auth_type="${1}"
|
||||||
|
local username="${2}"
|
||||||
|
local password="${3}"
|
||||||
|
|
||||||
|
# Loging\Logout
|
||||||
|
if [[ -n "${password}" ]]
|
||||||
|
then
|
||||||
|
executeQuery -u "${username}" --password "${password}" <<< "SELECT 1 FORMAT Null;"
|
||||||
|
else
|
||||||
|
executeQuery -u "${username}" <<< "SELECT 1 FORMAT Null;"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Wrong username
|
||||||
|
executeQueryExpectError -u "invalid_${username}" \
|
||||||
|
<<< "SELECT 1 Format Null" \
|
||||||
|
| grep -Eq "Code: 516. .+ invalid_${username}: Authentication failed: password is incorrect or there is no user with such name"
|
||||||
|
|
||||||
|
# Wrong password
|
||||||
|
if [[ "${auth_type}" == "no_password" ]]
|
||||||
|
then
|
||||||
|
echo "TCP 'wrong password' case is skipped for ${auth_type}."
|
||||||
|
else
|
||||||
|
# user with `no_password` user is able to login with any password, so it makes sense to skip this testcase.
|
||||||
|
executeQueryExpectError -u "${username}" --password "invalid_${password}" \
|
||||||
|
<<< "SELECT 1 Format Null" \
|
||||||
|
| grep -Eq "Code: 516. .+ ${username}: Authentication failed: password is incorrect or there is no user with such name"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function testHTTPWithURL()
|
||||||
|
{
|
||||||
|
local auth_type="${1}"
|
||||||
|
local username="${2}"
|
||||||
|
local password="${3}"
|
||||||
|
local clickhouse_url="${4}"
|
||||||
|
|
||||||
|
# Loging\Logout
|
||||||
|
${CLICKHOUSE_CURL} -sS "${clickhouse_url}" \
|
||||||
|
-H "X-ClickHouse-User: ${username}" -H "X-ClickHouse-Key: ${password}" \
|
||||||
|
-d 'SELECT 1 Format Null'
|
||||||
|
|
||||||
|
# Wrong username
|
||||||
|
${CLICKHOUSE_CURL} -sS "${clickhouse_url}" \
|
||||||
|
-H "X-ClickHouse-User: invalid_${username}" -H "X-ClickHouse-Key: ${password}" \
|
||||||
|
-d 'SELECT 1 Format Null' \
|
||||||
|
| grep -Eq "Code: 516. .+ invalid_${username}: Authentication failed: password is incorrect or there is no user with such name"
|
||||||
|
|
||||||
|
# Wrong password
|
||||||
|
if [[ "${auth_type}" == "no_password" ]]
|
||||||
|
then
|
||||||
|
echo "HTTP 'wrong password' case is skipped for ${auth_type}."
|
||||||
|
else
|
||||||
|
# user with `no_password` is able to login with any password, so it makes sense to skip this testcase.
|
||||||
|
${CLICKHOUSE_CURL} -sS "${clickhouse_url}" \
|
||||||
|
-H "X-ClickHouse-User: ${username}" -H "X-ClickHouse-Key: invalid_${password}" \
|
||||||
|
-d 'SELECT 1 Format Null' \
|
||||||
|
| grep -Eq "Code: 516. .+ ${username}: Authentication failed: password is incorrect or there is no user with such name"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function testHTTP()
|
||||||
|
{
|
||||||
|
echo "HTTP endpoint"
|
||||||
|
testHTTPWithURL "${1}" "${2}" "${3}" "${CLICKHOUSE_URL}"
|
||||||
|
}
|
||||||
|
|
||||||
|
function testHTTPNamedSession()
|
||||||
|
{
|
||||||
|
# echo "HTTP endpoint with named session"
|
||||||
|
local HTTP_SESSION_ID
|
||||||
|
HTTP_SESSION_ID="session_id_$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 32)"
|
||||||
|
if [ -v CLICKHOUSE_URL_PARAMS ]
|
||||||
|
then
|
||||||
|
CLICKHOUSE_URL_WITH_SESSION_ID="${CLICKHOUSE_URL}&session_id=${HTTP_SESSION_ID}"
|
||||||
|
else
|
||||||
|
CLICKHOUSE_URL_WITH_SESSION_ID="${CLICKHOUSE_URL}?session_id=${HTTP_SESSION_ID}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
testHTTPWithURL "${1}" "${2}" "${3}" "${CLICKHOUSE_URL_WITH_SESSION_ID}"
|
||||||
|
}
|
||||||
|
|
||||||
|
function testMySQL()
|
||||||
|
{
|
||||||
|
echo "MySQL endpoint"
|
||||||
|
local auth_type="${1}"
|
||||||
|
local username="${2}"
|
||||||
|
local password="${3}"
|
||||||
|
|
||||||
|
trap "reportError" ERR
|
||||||
|
|
||||||
|
# echo 'Loging\Logout'
|
||||||
|
# sha256 auth is done differenctly for MySQL, so skip it for now.
|
||||||
|
if [[ "${auth_type}" == "sha256_password" ]]
|
||||||
|
then
|
||||||
|
echo "MySQL 'successful login' case is skipped for ${auth_type}."
|
||||||
|
else
|
||||||
|
# CH is able to log into itself via MySQL protocol but query fails.
|
||||||
|
executeQueryExpectError \
|
||||||
|
<<< "SELECT 1 FROM mysql('127.0.0.1:9004', 'system', 'numbers', '${username}', '${password}') LIMIT 1 \
|
||||||
|
FORMAT NUll" \
|
||||||
|
| grep -Eq "Code: 1000\. DB::Exception: .*"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# echo 'Wrong username'
|
||||||
|
executeQueryExpectError \
|
||||||
|
<<< "SELECT 1 FROM mysql('127.0.0.1:9004', 'system', 'numbers', 'invalid_${username}', '${password}') LIMIT 1 \
|
||||||
|
FORMAT NUll" \
|
||||||
|
| grep -Eq "Code: 1000\. DB::Exception: .* invalid_${username}"
|
||||||
|
|
||||||
|
# echo 'Wrong password'
|
||||||
|
if [[ "${auth_type}" == "no_password" ]]
|
||||||
|
then
|
||||||
|
echo "MySQL 'wrong password' case is skipped for ${auth_type}."
|
||||||
|
else
|
||||||
|
# user with `no_password` is able to login with any password, so it makes sense to skip this testcase.
|
||||||
|
executeQueryExpectError \
|
||||||
|
<<< "SELECT 1 FROM mysql('127.0.0.1:9004', 'system', 'numbers', '${username}', 'invalid_${password}') LIMIT 1 \
|
||||||
|
FORMAT NUll" \
|
||||||
|
| grep -Eq "Code: 1000\. DB::Exception: .* ${username}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# function testPostgreSQL()
|
||||||
|
# {
|
||||||
|
# local auth_type="${1}"
|
||||||
|
#
|
||||||
|
# # Right now it is impossible to log into CH via PostgreSQL protocol without a password.
|
||||||
|
# if [[ "${auth_type}" == "no_password" ]]
|
||||||
|
# then
|
||||||
|
# return 0
|
||||||
|
# fi
|
||||||
|
#
|
||||||
|
# # Loging\Logout
|
||||||
|
# # CH is being able to log into itself via PostgreSQL protocol but query fails.
|
||||||
|
# executeQueryExpectError \
|
||||||
|
# <<< "SELECT 1 FROM postgresql('localhost:9005', 'system', 'numbers', '${username}', '${password}') LIMIT 1 FORMAT NUll" \
|
||||||
|
# | grep -Eq "Code: 1001. DB::Exception: .* pqxx::broken_connection: .*"
|
||||||
|
#
|
||||||
|
# # Wrong username
|
||||||
|
# executeQueryExpectError \
|
||||||
|
# <<< "SELECT 1 FROM postgresql('localhost:9005', 'system', 'numbers', 'invalid_${username}', '${password}') LIMIT 1 FORMAT NUll" \
|
||||||
|
# | grep -Eq "Code: 1001. DB::Exception: .* pqxx::broken_connection: .*"
|
||||||
|
#
|
||||||
|
# # Wrong password
|
||||||
|
# executeQueryExpectError \
|
||||||
|
# <<< "SELECT 1 FROM postgresql('localhost:9005', 'system', 'numbers', '${username}', 'invalid_${password}') LIMIT 1 FORMAT NUll" \
|
||||||
|
# | grep -Eq "Code: 1001. DB::Exception: .* pqxx::broken_connection: .*"
|
||||||
|
# }
|
||||||
|
|
||||||
|
function runEndpointTests()
|
||||||
|
{
|
||||||
|
local case_name="${1}"
|
||||||
|
shift 1
|
||||||
|
|
||||||
|
local auth_type="${1}"
|
||||||
|
local username="${2}"
|
||||||
|
local password="${3}"
|
||||||
|
local setup_queries="${4:-}"
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo "# ${auth_type} - ${case_name} "
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} -q "SET log_comment='${username} ${auth_type} - ${case_name}';"
|
||||||
|
if [[ -n "${setup_queries}" ]]
|
||||||
|
then
|
||||||
|
# echo "Executing setup queries: ${setup_queries}"
|
||||||
|
echo "${setup_queries}" | executeQuery --multiquery
|
||||||
|
fi
|
||||||
|
|
||||||
|
testTCP "${auth_type}" "${username}" "${password}"
|
||||||
|
testHTTP "${auth_type}" "${username}" "${password}"
|
||||||
|
|
||||||
|
# testHTTPNamedSession "${auth_type}" "${username}" "${password}"
|
||||||
|
testMySQL "${auth_type}" "${username}" "${password}"
|
||||||
|
# testPostgreSQL "${auth_type}" "${username}" "${password}"
|
||||||
|
}
|
||||||
|
|
||||||
|
function testAsUserIdentifiedBy()
|
||||||
|
{
|
||||||
|
local auth_type="${1}"
|
||||||
|
local password="password"
|
||||||
|
|
||||||
|
cleanup
|
||||||
|
|
||||||
|
local username="${BASE_USERNAME}_${auth_type}_no_profiles_no_roles"
|
||||||
|
createUser "${auth_type}" "${username}" "${password}"
|
||||||
|
runEndpointTests "No profiles no roles" "${auth_type}" "${username}" "${RESULTING_PASS}"
|
||||||
|
|
||||||
|
username="${BASE_USERNAME}_${auth_type}_two_profiles_no_roles"
|
||||||
|
createUser "${auth_type}" "${username}" "${password}"
|
||||||
|
runEndpointTests "Two profiles, no roles" "${auth_type}" "${username}" "${RESULTING_PASS}" "\
|
||||||
|
DROP SETTINGS PROFILE IF EXISTS session_log_test_profile;
|
||||||
|
DROP SETTINGS PROFILE IF EXISTS session_log_test_profile2;
|
||||||
|
CREATE PROFILE session_log_test_profile SETTINGS max_memory_usage=10000000 TO ${username};
|
||||||
|
CREATE PROFILE session_log_test_profile2 SETTINGS max_rows_to_transfer=1000 TO ${username};
|
||||||
|
"
|
||||||
|
|
||||||
|
username="${BASE_USERNAME}_${auth_type}_two_profiles_two_roles"
|
||||||
|
createUser "${auth_type}" "${username}" "${password}"
|
||||||
|
runEndpointTests "Two profiles and two simple roles" "${auth_type}" "${username}" "${RESULTING_PASS}" "\
|
||||||
|
CREATE ROLE session_log_test_role;
|
||||||
|
GRANT session_log_test_role TO ${username};
|
||||||
|
CREATE ROLE session_log_test_role2 SETTINGS max_columns_to_read=100;
|
||||||
|
GRANT session_log_test_role2 TO ${username};
|
||||||
|
SET DEFAULT ROLE session_log_test_role, session_log_test_role2 TO ${username};
|
||||||
|
"
|
||||||
|
}
|
||||||
|
|
||||||
|
# to cut off previous runs
|
||||||
|
readonly start_time="$(executeQuery <<< 'SELECT now64(6);')"
|
||||||
|
|
||||||
|
# Special case: user and profile are both defined in XML
|
||||||
|
runEndpointTests "User with profile from XML" "no_password" "session_log_test_xml_user" ''
|
||||||
|
|
||||||
|
testAsUserIdentifiedBy "no_password"
|
||||||
|
testAsUserIdentifiedBy "plaintext_password"
|
||||||
|
testAsUserIdentifiedBy "sha256_password"
|
||||||
|
testAsUserIdentifiedBy "double_sha1_password"
|
||||||
|
|
||||||
|
executeQuery --multiquery <<EOF
|
||||||
|
SYSTEM FLUSH LOGS;
|
||||||
|
|
||||||
|
WITH
|
||||||
|
now64(6) as n,
|
||||||
|
toDateTime64('${start_time}', 3) as test_start_time
|
||||||
|
SELECT
|
||||||
|
replaceAll(user, '${BASE_USERNAME}', '\${BASE_USERNAME}') as user_name,
|
||||||
|
interface,
|
||||||
|
type,
|
||||||
|
if(count(*) > 1, 'many', toString(count(*))) -- do not rely on count value since MySQL does arbitrary number of retries
|
||||||
|
FROM
|
||||||
|
system.session_log
|
||||||
|
WHERE
|
||||||
|
(user LIKE '%session_log_test_xml_user%' OR user LIKE '%${BASE_USERNAME}%')
|
||||||
|
AND
|
||||||
|
event_time_microseconds >= test_start_time
|
||||||
|
GROUP BY
|
||||||
|
user_name, interface, type
|
||||||
|
ORDER BY
|
||||||
|
user_name, interface, type;
|
||||||
|
EOF
|
@ -4,3 +4,5 @@
|
|||||||
7 107
|
7 107
|
||||||
8 108
|
8 108
|
||||||
9 109
|
9 109
|
||||||
|
1970-01-01 1 one
|
||||||
|
1970-01-01 3 three
|
||||||
|
@ -8,3 +8,18 @@ set max_rows_to_read = 5;
|
|||||||
select * from test1 where i not in (1,2,3,4,5) order by i;
|
select * from test1 where i not in (1,2,3,4,5) order by i;
|
||||||
|
|
||||||
drop table test1;
|
drop table test1;
|
||||||
|
|
||||||
|
drop table if exists t1;
|
||||||
|
drop table if exists t2;
|
||||||
|
|
||||||
|
create table t1 (date Date, a Float64, b String) Engine=MergeTree ORDER BY date;
|
||||||
|
create table t2 (date Date, a Float64, b String) Engine=MergeTree ORDER BY date;
|
||||||
|
|
||||||
|
insert into t1(a, b) values (1, 'one'), (2, 'two');
|
||||||
|
insert into t2(a, b) values (2, 'two'), (3, 'three');
|
||||||
|
|
||||||
|
select date, a, b from t1 where (date, a, b) NOT IN (select date,a,b from t2);
|
||||||
|
select date, a, b from t2 where (date, a, b) NOT IN (select date,a,b from t1);
|
||||||
|
|
||||||
|
drop table t1;
|
||||||
|
drop table t2;
|
||||||
|
@ -0,0 +1 @@
|
|||||||
|
1
|
47
tests/queries/0_stateless/02015_shard_crash_clang_12_build.sh
Executable file
47
tests/queries/0_stateless/02015_shard_crash_clang_12_build.sh
Executable file
@ -0,0 +1,47 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# This test reproduces crash in case of insufficient coroutines stack size
|
||||||
|
|
||||||
|
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
|
# shellcheck source=../shell_config.sh
|
||||||
|
. "$CURDIR"/../shell_config.sh
|
||||||
|
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS local"
|
||||||
|
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS distributed"
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT --query "CREATE TABLE local (x UInt8) ENGINE = Memory;"
|
||||||
|
$CLICKHOUSE_CLIENT --query "CREATE TABLE distributed AS local ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), local, x);"
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT --insert_distributed_sync=0 --network_compression_method='zstd' --query "INSERT INTO distributed SELECT number FROM numbers(256);"
|
||||||
|
$CLICKHOUSE_CLIENT --insert_distributed_sync=0 --network_compression_method='zstd' --query "SYSTEM FLUSH DISTRIBUTED distributed;"
|
||||||
|
|
||||||
|
function select_thread()
|
||||||
|
{
|
||||||
|
while true; do
|
||||||
|
$CLICKHOUSE_CLIENT --insert_distributed_sync=0 --network_compression_method='zstd' --query "SELECT count() FROM local" >/dev/null
|
||||||
|
$CLICKHOUSE_CLIENT --insert_distributed_sync=0 --network_compression_method='zstd' --query "SELECT count() FROM distributed" >/dev/null
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
export -f select_thread;
|
||||||
|
|
||||||
|
TIMEOUT=30
|
||||||
|
|
||||||
|
timeout $TIMEOUT bash -c select_thread 2> /dev/null &
|
||||||
|
timeout $TIMEOUT bash -c select_thread 2> /dev/null &
|
||||||
|
timeout $TIMEOUT bash -c select_thread 2> /dev/null &
|
||||||
|
timeout $TIMEOUT bash -c select_thread 2> /dev/null &
|
||||||
|
timeout $TIMEOUT bash -c select_thread 2> /dev/null &
|
||||||
|
timeout $TIMEOUT bash -c select_thread 2> /dev/null &
|
||||||
|
timeout $TIMEOUT bash -c select_thread 2> /dev/null &
|
||||||
|
timeout $TIMEOUT bash -c select_thread 2> /dev/null &
|
||||||
|
timeout $TIMEOUT bash -c select_thread 2> /dev/null &
|
||||||
|
timeout $TIMEOUT bash -c select_thread 2> /dev/null &
|
||||||
|
|
||||||
|
wait
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT --query "SELECT 1"
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS local"
|
||||||
|
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS distributed"
|
@ -1 +0,0 @@
|
|||||||
02015_db materialized_view 02015_db view_source_tb Materialized
|
|
@ -1,14 +0,0 @@
|
|||||||
DROP DATABASE IF EXISTS 02015_db;
|
|
||||||
CREATE DATABASE IF NOT EXISTS 02015_db;
|
|
||||||
|
|
||||||
DROP TABLE IF EXISTS 02015_db.view_source_tb;
|
|
||||||
CREATE TABLE IF NOT EXISTS 02015_db.view_source_tb (a UInt8, s String) ENGINE = MergeTree() ORDER BY a;
|
|
||||||
|
|
||||||
DROP TABLE IF EXISTS 02015_db.materialized_view;
|
|
||||||
CREATE MATERIALIZED VIEW IF NOT EXISTS 02015_db.materialized_view ENGINE = ReplacingMergeTree() ORDER BY a AS SELECT * FROM 02015_db.view_source_tb;
|
|
||||||
|
|
||||||
SELECT * FROM system.views WHERE database='02015_db' and name = 'materialized_view';
|
|
||||||
|
|
||||||
DROP TABLE IF EXISTS 02015_db.materialized_view;
|
|
||||||
DROP TABLE IF EXISTS 02015_db.view_source_tb;
|
|
||||||
DROP DATABASE IF EXISTS 02015_db;
|
|
@ -0,0 +1,12 @@
|
|||||||
|
drop table if exists t;
|
||||||
|
drop table if exists td1;
|
||||||
|
drop table if exists td2;
|
||||||
|
drop table if exists td3;
|
||||||
|
create table t (val UInt32) engine = MergeTree order by val;
|
||||||
|
create table td1 engine = Distributed(test_shard_localhost, currentDatabase(), 't') as t;
|
||||||
|
create table td2 engine = Distributed(test_shard_localhost, currentDatabase(), 't', xxHash32(val), default) as t;
|
||||||
|
create table td3 engine = Distributed(test_shard_localhost, currentDatabase(), 't', xxHash32(val), 'default') as t;
|
||||||
|
drop table if exists t;
|
||||||
|
drop table if exists td1;
|
||||||
|
drop table if exists td2;
|
||||||
|
drop table if exists td3;
|
@ -466,7 +466,7 @@
|
|||||||
"polygon_dicts", // they use an explicitly specified database
|
"polygon_dicts", // they use an explicitly specified database
|
||||||
"01658_read_file_to_stringcolumn",
|
"01658_read_file_to_stringcolumn",
|
||||||
"01721_engine_file_truncate_on_insert", // It's ok to execute in parallel but not several instances of the same test.
|
"01721_engine_file_truncate_on_insert", // It's ok to execute in parallel but not several instances of the same test.
|
||||||
"01702_system_query_log", // It's ok to execute in parallel with oter tests but not several instances of the same test.
|
"01702_system_query_log", // It's ok to execute in parallel but not several instances of the same test.
|
||||||
"01748_dictionary_table_dot", // creates database
|
"01748_dictionary_table_dot", // creates database
|
||||||
"00950_dict_get",
|
"00950_dict_get",
|
||||||
"01615_random_one_shard_insertion",
|
"01615_random_one_shard_insertion",
|
||||||
@ -513,6 +513,6 @@
|
|||||||
"01530_drop_database_atomic_sync", /// creates database
|
"01530_drop_database_atomic_sync", /// creates database
|
||||||
"02001_add_default_database_to_system_users", ///create user
|
"02001_add_default_database_to_system_users", ///create user
|
||||||
"02002_row_level_filter_bug", ///create user
|
"02002_row_level_filter_bug", ///create user
|
||||||
"02015_system_views"
|
"01747_system_session_log_long" // Reads from system.session_log and can't be run in parallel with any other test (since almost any other test writes to session_log)
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user