mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-09-21 01:00:48 +00:00
Merge branch 'master' of github.com:ClickHouse/ClickHouse into materialized-postgresql
This commit is contained in:
commit
797f858bc6
@ -203,3 +203,5 @@ CheckOptions:
|
||||
value: CamelCase
|
||||
- key: readability-identifier-naming.UsingCase
|
||||
value: CamelCase
|
||||
- key: modernize-loop-convert.UseCxx20ReverseRanges
|
||||
value: false
|
||||
|
@ -165,6 +165,13 @@ if (COMPILER_CLANG)
|
||||
if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
|
||||
set(COMPILER_FLAGS "${COMPILER_FLAGS} -gdwarf-aranges")
|
||||
endif ()
|
||||
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 12.0.0)
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Xclang -fuse-ctor-homing")
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Xclang -fuse-ctor-homing")
|
||||
endif()
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
# If turned `ON`, assumes the user has either the system GTest library or the bundled one.
|
||||
|
@ -42,6 +42,7 @@ namespace
|
||||
} while (false)
|
||||
|
||||
|
||||
#define LOG_TEST(logger, ...) LOG_IMPL(logger, DB::LogsLevel::test, Poco::Message::PRIO_TEST, __VA_ARGS__)
|
||||
#define LOG_TRACE(logger, ...) LOG_IMPL(logger, DB::LogsLevel::trace, Poco::Message::PRIO_TRACE, __VA_ARGS__)
|
||||
#define LOG_DEBUG(logger, ...) LOG_IMPL(logger, DB::LogsLevel::debug, Poco::Message::PRIO_DEBUG, __VA_ARGS__)
|
||||
#define LOG_INFO(logger, ...) LOG_IMPL(logger, DB::LogsLevel::information, Poco::Message::PRIO_INFORMATION, __VA_ARGS__)
|
||||
|
@ -7,10 +7,22 @@
|
||||
#endif
|
||||
|
||||
#include <mysqlxx/Pool.h>
|
||||
|
||||
#include <common/sleep.h>
|
||||
|
||||
#include <Poco/Util/LayeredConfiguration.h>
|
||||
#include <ctime>
|
||||
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
inline uint64_t clock_gettime_ns(clockid_t clock_type = CLOCK_MONOTONIC)
|
||||
{
|
||||
struct timespec ts;
|
||||
clock_gettime(clock_type, &ts);
|
||||
return uint64_t(ts.tv_sec * 1000000000LL + ts.tv_nsec);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
namespace mysqlxx
|
||||
@ -124,10 +136,15 @@ Pool::~Pool()
|
||||
}
|
||||
|
||||
|
||||
Pool::Entry Pool::get()
|
||||
Pool::Entry Pool::get(uint64_t wait_timeout)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
|
||||
uint64_t deadline = 0;
|
||||
/// UINT64_MAX -- wait indefinitely
|
||||
if (wait_timeout && wait_timeout != UINT64_MAX)
|
||||
deadline = clock_gettime_ns() + wait_timeout * 1'000'000'000;
|
||||
|
||||
initialize();
|
||||
for (;;)
|
||||
{
|
||||
@ -153,6 +170,12 @@ Pool::Entry Pool::get()
|
||||
logger.trace("(%s): Unable to create a new connection: Max number of connections has been reached.", getDescription());
|
||||
}
|
||||
|
||||
if (!wait_timeout)
|
||||
throw Poco::Exception("mysqlxx::Pool is full (wait is disabled, see connection_wait_timeout setting)");
|
||||
|
||||
if (deadline && clock_gettime_ns() >= deadline)
|
||||
throw Poco::Exception("mysqlxx::Pool is full (connection_wait_timeout is exceeded)");
|
||||
|
||||
lock.unlock();
|
||||
logger.trace("(%s): Sleeping for %d seconds.", getDescription(), MYSQLXX_POOL_SLEEP_ON_CONNECT_FAIL);
|
||||
sleepForSeconds(MYSQLXX_POOL_SLEEP_ON_CONNECT_FAIL);
|
||||
|
@ -189,7 +189,7 @@ public:
|
||||
~Pool();
|
||||
|
||||
/// Allocates connection.
|
||||
Entry get();
|
||||
Entry get(uint64_t wait_timeout);
|
||||
|
||||
/// Allocates connection.
|
||||
/// If database is not accessible, returns empty Entry object.
|
||||
|
@ -21,8 +21,9 @@ PoolWithFailover::PoolWithFailover(
|
||||
const unsigned max_connections_,
|
||||
const size_t max_tries_)
|
||||
: max_tries(max_tries_)
|
||||
, shareable(config_.getBool(config_name_ + ".share_connection", false))
|
||||
, wait_timeout(UINT64_MAX)
|
||||
{
|
||||
shareable = config_.getBool(config_name_ + ".share_connection", false);
|
||||
if (config_.has(config_name_ + ".replica"))
|
||||
{
|
||||
Poco::Util::AbstractConfiguration::Keys replica_keys;
|
||||
@ -80,9 +81,11 @@ PoolWithFailover::PoolWithFailover(
|
||||
const std::string & password,
|
||||
unsigned default_connections_,
|
||||
unsigned max_connections_,
|
||||
size_t max_tries_)
|
||||
size_t max_tries_,
|
||||
uint64_t wait_timeout_)
|
||||
: max_tries(max_tries_)
|
||||
, shareable(false)
|
||||
, wait_timeout(wait_timeout_)
|
||||
{
|
||||
/// Replicas have the same priority, but traversed replicas are moved to the end of the queue.
|
||||
for (const auto & [host, port] : addresses)
|
||||
@ -101,6 +104,7 @@ PoolWithFailover::PoolWithFailover(
|
||||
PoolWithFailover::PoolWithFailover(const PoolWithFailover & other)
|
||||
: max_tries{other.max_tries}
|
||||
, shareable{other.shareable}
|
||||
, wait_timeout(other.wait_timeout)
|
||||
{
|
||||
if (shareable)
|
||||
{
|
||||
@ -140,7 +144,7 @@ PoolWithFailover::Entry PoolWithFailover::get()
|
||||
|
||||
try
|
||||
{
|
||||
Entry entry = shareable ? pool->get() : pool->tryGet();
|
||||
Entry entry = shareable ? pool->get(wait_timeout) : pool->tryGet();
|
||||
|
||||
if (!entry.isNull())
|
||||
{
|
||||
@ -172,7 +176,7 @@ PoolWithFailover::Entry PoolWithFailover::get()
|
||||
if (full_pool)
|
||||
{
|
||||
app.logger().error("All connections failed, trying to wait on a full pool " + (*full_pool)->getDescription());
|
||||
return (*full_pool)->get();
|
||||
return (*full_pool)->get(wait_timeout);
|
||||
}
|
||||
|
||||
std::stringstream message;
|
||||
|
@ -80,6 +80,8 @@ namespace mysqlxx
|
||||
std::mutex mutex;
|
||||
/// Can the Pool be shared
|
||||
bool shareable;
|
||||
/// Timeout for waiting free connection.
|
||||
uint64_t wait_timeout = 0;
|
||||
|
||||
public:
|
||||
using Entry = Pool::Entry;
|
||||
@ -96,6 +98,7 @@ namespace mysqlxx
|
||||
* default_connections Number of connection in pool to each replica at start.
|
||||
* max_connections Maximum number of connections in pool to each replica.
|
||||
* max_tries_ Max number of connection tries.
|
||||
* wait_timeout_ Timeout for waiting free connection.
|
||||
*/
|
||||
PoolWithFailover(
|
||||
const std::string & config_name_,
|
||||
@ -117,7 +120,8 @@ namespace mysqlxx
|
||||
const std::string & password,
|
||||
unsigned default_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS,
|
||||
unsigned max_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS,
|
||||
size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES);
|
||||
size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES,
|
||||
uint64_t wait_timeout_ = UINT64_MAX);
|
||||
|
||||
PoolWithFailover(const PoolWithFailover & other);
|
||||
|
||||
|
@ -6,7 +6,7 @@ if (ENABLE_CLANG_TIDY)
|
||||
message(FATAL_ERROR "clang-tidy requires CMake version at least 3.6.")
|
||||
endif()
|
||||
|
||||
find_program (CLANG_TIDY_PATH NAMES "clang-tidy" "clang-tidy-11" "clang-tidy-10" "clang-tidy-9" "clang-tidy-8")
|
||||
find_program (CLANG_TIDY_PATH NAMES "clang-tidy" "clang-tidy-12" "clang-tidy-11" "clang-tidy-10" "clang-tidy-9" "clang-tidy-8")
|
||||
|
||||
if (CLANG_TIDY_PATH)
|
||||
message(STATUS
|
||||
|
@ -10,7 +10,7 @@ set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it
|
||||
set (CMAKE_AR "/usr/bin/ar" CACHE FILEPATH "" FORCE)
|
||||
set (CMAKE_RANLIB "/usr/bin/ranlib" CACHE FILEPATH "" FORCE)
|
||||
|
||||
set (LINKER_NAME "lld" CACHE STRING "" FORCE)
|
||||
set (LINKER_NAME "ld.lld" CACHE STRING "" FORCE)
|
||||
|
||||
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
||||
set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
||||
|
@ -13,7 +13,7 @@ set (CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_D
|
||||
set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64")
|
||||
set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64")
|
||||
|
||||
set (LINKER_NAME "lld" CACHE STRING "" FORCE)
|
||||
set (LINKER_NAME "ld.lld" CACHE STRING "" FORCE)
|
||||
|
||||
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
||||
set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
||||
|
@ -79,8 +79,9 @@ endif ()
|
||||
|
||||
if (LINKER_NAME)
|
||||
if (COMPILER_CLANG AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 12.0.0 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 12.0.0))
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --ld-path=${LINKER_NAME}")
|
||||
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} --ld-path=${LINKER_NAME}")
|
||||
find_program (LLD_PATH NAMES ${LINKER_NAME})
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --ld-path=${LLD_PATH}")
|
||||
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} --ld-path=${LLD_PATH}")
|
||||
else ()
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=${LINKER_NAME}")
|
||||
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=${LINKER_NAME}")
|
||||
|
2
contrib/poco
vendored
2
contrib/poco
vendored
@ -1 +1 @@
|
||||
Subproject commit b687c17bc2be36b6333a1d7cfffbf9eab65509a9
|
||||
Subproject commit 46c80daf1b015aa10474ce82e3d24b578c6ae422
|
@ -1,6 +1,6 @@
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12
|
||||
|
||||
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
||||
|
||||
|
@ -4,7 +4,7 @@ set -e
|
||||
#ccache -s # uncomment to display CCache statistics
|
||||
mkdir -p /server/build_docker
|
||||
cd /server/build_docker
|
||||
cmake -G Ninja /server "-DCMAKE_C_COMPILER=$(command -v clang-11)" "-DCMAKE_CXX_COMPILER=$(command -v clang++-11)"
|
||||
cmake -G Ninja /server "-DCMAKE_C_COMPILER=$(command -v clang-12)" "-DCMAKE_CXX_COMPILER=$(command -v clang++-12)"
|
||||
|
||||
# Set the number of build jobs to the half of number of virtual CPU cores (rounded up).
|
||||
# By default, ninja use all virtual CPU cores, that leads to very high memory consumption without much improvement in build time.
|
||||
|
@ -1,7 +1,7 @@
|
||||
# docker build -t yandex/clickhouse-binary-builder .
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12
|
||||
|
||||
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
||||
|
||||
@ -39,8 +39,6 @@ RUN apt-get update \
|
||||
bash \
|
||||
build-essential \
|
||||
ccache \
|
||||
clang-11 \
|
||||
clang-tidy-11 \
|
||||
cmake \
|
||||
curl \
|
||||
g++-10 \
|
||||
@ -50,9 +48,13 @@ RUN apt-get update \
|
||||
gperf \
|
||||
libicu-dev \
|
||||
libreadline-dev \
|
||||
lld-11 \
|
||||
llvm-11 \
|
||||
llvm-11-dev \
|
||||
clang-12 \
|
||||
clang-tidy-12 \
|
||||
lld-12 \
|
||||
llvm-12 \
|
||||
llvm-12-dev \
|
||||
libicu-dev \
|
||||
libreadline-dev \
|
||||
moreutils \
|
||||
ninja-build \
|
||||
pigz \
|
||||
|
@ -4,7 +4,6 @@ set -x -e
|
||||
|
||||
mkdir -p build/cmake/toolchain/darwin-x86_64
|
||||
tar xJf MacOSX11.0.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-components=1
|
||||
|
||||
ln -sf darwin-x86_64 build/cmake/toolchain/darwin-aarch64
|
||||
|
||||
mkdir -p build/cmake/toolchain/linux-aarch64
|
||||
@ -23,6 +22,7 @@ cd build/build_docker
|
||||
rm -f CMakeCache.txt
|
||||
# Read cmake arguments into array (possibly empty)
|
||||
read -ra CMAKE_FLAGS <<< "${CMAKE_FLAGS:-}"
|
||||
env
|
||||
cmake --debug-trycompile --verbose=1 -DCMAKE_VERBOSE_MAKEFILE=1 -LA "-DCMAKE_BUILD_TYPE=$BUILD_TYPE" "-DSANITIZE=$SANITIZER" -DENABLE_CHECK_HEAVY_BUILDS=1 "${CMAKE_FLAGS[@]}" ..
|
||||
|
||||
ccache --show-config ||:
|
||||
|
@ -1,7 +1,7 @@
|
||||
# docker build -t yandex/clickhouse-deb-builder .
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12
|
||||
|
||||
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
||||
|
||||
@ -37,17 +37,17 @@ RUN curl -O https://clickhouse-datasets.s3.yandex.net/utils/1/dpkg-deb \
|
||||
RUN apt-get update \
|
||||
&& apt-get install \
|
||||
alien \
|
||||
clang-11 \
|
||||
clang-tidy-11 \
|
||||
clang-12 \
|
||||
clang-tidy-12 \
|
||||
cmake \
|
||||
debhelper \
|
||||
devscripts \
|
||||
gdb \
|
||||
git \
|
||||
gperf \
|
||||
lld-11 \
|
||||
llvm-11 \
|
||||
llvm-11-dev \
|
||||
lld-12 \
|
||||
llvm-12 \
|
||||
llvm-12-dev \
|
||||
moreutils \
|
||||
ninja-build \
|
||||
perl \
|
||||
|
@ -75,7 +75,7 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
|
||||
# Explicitly use LLD with Clang by default.
|
||||
# Don't force linker for cross-compilation.
|
||||
if is_clang and not is_cross_compile:
|
||||
cmake_flags.append("-DLINKER_NAME=lld")
|
||||
cmake_flags.append("-DLINKER_NAME=ld.lld")
|
||||
|
||||
if is_cross_darwin:
|
||||
cc = compiler[:-len(DARWIN_SUFFIX)]
|
||||
@ -204,7 +204,8 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--output-dir", required=True)
|
||||
parser.add_argument("--build-type", choices=("debug", ""), default="")
|
||||
parser.add_argument("--compiler", choices=("clang-11", "clang-11-darwin", "clang-11-darwin-aarch64", "clang-11-aarch64",
|
||||
"clang-11-freebsd", "gcc-10"), default="clang-11")
|
||||
"clang-12", "clang-12-darwin", "clang-12-darwin-aarch64", "clang-12-aarch64",
|
||||
"clang-11-freebsd", "clang-12-freebsd", "gcc-10"), default="clang-12")
|
||||
parser.add_argument("--sanitizer", choices=("address", "thread", "memory", "undefined", ""), default="")
|
||||
parser.add_argument("--unbundled", action="store_true")
|
||||
parser.add_argument("--split-binary", action="store_true")
|
||||
|
@ -1,7 +1,7 @@
|
||||
# docker build -t yandex/clickhouse-test-base .
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12
|
||||
|
||||
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
||||
|
||||
|
@ -11,7 +11,7 @@ RUN apt-get update && apt-get --yes --allow-unauthenticated install clang-9 libl
|
||||
# https://github.com/ClickHouse-Extras/woboq_codebrowser/commit/37e15eaf377b920acb0b48dbe82471be9203f76b
|
||||
RUN git clone https://github.com/ClickHouse-Extras/woboq_codebrowser
|
||||
|
||||
RUN cd woboq_codebrowser && cmake . -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER=clang\+\+-9 -DCMAKE_C_COMPILER=clang-9 && make -j
|
||||
RUN cd woboq_codebrowser && cmake . -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER=clang\+\+-12 -DCMAKE_C_COMPILER=clang-12 && make -j
|
||||
|
||||
ENV CODEGEN=/woboq_codebrowser/generator/codebrowser_generator
|
||||
ENV CODEINDEX=/woboq_codebrowser/indexgenerator/codebrowser_indexgenerator
|
||||
@ -24,7 +24,7 @@ ENV SHA=nosha
|
||||
ENV DATA="data"
|
||||
|
||||
CMD mkdir -p $BUILD_DIRECTORY && cd $BUILD_DIRECTORY && \
|
||||
cmake $SOURCE_DIRECTORY -DCMAKE_CXX_COMPILER=/usr/bin/clang\+\+-11 -DCMAKE_C_COMPILER=/usr/bin/clang-11 -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DENABLE_EMBEDDED_COMPILER=0 -DENABLE_S3=0 && \
|
||||
cmake $SOURCE_DIRECTORY -DCMAKE_CXX_COMPILER=/usr/bin/clang\+\+-12 -DCMAKE_C_COMPILER=/usr/bin/clang-12 -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DENABLE_EMBEDDED_COMPILER=0 -DENABLE_S3=0 && \
|
||||
mkdir -p $HTML_RESULT_DIRECTORY && \
|
||||
$CODEGEN -b $BUILD_DIRECTORY -a -o $HTML_RESULT_DIRECTORY -p ClickHouse:$SOURCE_DIRECTORY:$SHA -d $DATA | ts '%Y-%m-%d %H:%M:%S' && \
|
||||
cp -r $STATIC_DATA $HTML_RESULT_DIRECTORY/ &&\
|
||||
|
@ -1,7 +1,7 @@
|
||||
# docker build -t yandex/clickhouse-fasttest .
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12
|
||||
|
||||
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
||||
|
||||
|
@ -9,7 +9,7 @@ trap 'kill $(jobs -pr) ||:' EXIT
|
||||
stage=${stage:-}
|
||||
|
||||
# Compiler version, normally set by Dockerfile
|
||||
export LLVM_VERSION=${LLVM_VERSION:-11}
|
||||
export LLVM_VERSION=${LLVM_VERSION:-12}
|
||||
|
||||
# A variable to pass additional flags to CMake.
|
||||
# Here we explicitly default it to nothing so that bash doesn't complain about
|
||||
@ -401,6 +401,9 @@ function run_tests
|
||||
|
||||
# depends on Go
|
||||
02013_zlib_read_after_eof
|
||||
|
||||
# Accesses CH via mysql table function (which is unavailable)
|
||||
01747_system_session_log_long
|
||||
)
|
||||
|
||||
time clickhouse-test --hung-check -j 8 --order=random --use-skip-list \
|
||||
|
@ -12,7 +12,7 @@ stage=${stage:-}
|
||||
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
echo "$script_dir"
|
||||
repo_dir=ch
|
||||
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-11_debug_none_bundled_unsplitted_disable_False_binary"}
|
||||
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-12_debug_none_bundled_unsplitted_disable_False_binary"}
|
||||
|
||||
function clone
|
||||
{
|
||||
|
@ -2,7 +2,7 @@
|
||||
set -euo pipefail
|
||||
|
||||
|
||||
CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-11_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"}
|
||||
CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-12_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"}
|
||||
CLICKHOUSE_REPO_PATH=${CLICKHOUSE_REPO_PATH:=""}
|
||||
|
||||
|
||||
|
@ -28,7 +28,7 @@ RUN apt-get update --yes \
|
||||
ENV PKG_VERSION="pvs-studio-latest"
|
||||
|
||||
RUN set -x \
|
||||
&& export PUBKEY_HASHSUM="686e5eb8b3c543a5c54442c39ec876b6c2d912fe8a729099e600017ae53c877dda3368fe38ed7a66024fe26df6b5892a" \
|
||||
&& export PUBKEY_HASHSUM="ad369a2e9d8b8c30f5a9f2eb131121739b79c78e03fef0f016ea51871a5f78cd4e6257b270dca0ac3be3d1f19d885516" \
|
||||
&& wget -nv https://files.viva64.com/etc/pubkey.txt -O /tmp/pubkey.txt \
|
||||
&& echo "${PUBKEY_HASHSUM} /tmp/pubkey.txt" | sha384sum -c \
|
||||
&& apt-key add /tmp/pubkey.txt \
|
||||
@ -38,7 +38,7 @@ RUN set -x \
|
||||
&& dpkg -i "${PKG_VERSION}.deb"
|
||||
|
||||
CMD echo "Running PVS version $PKG_VERSION" && cd /repo_folder && pvs-studio-analyzer credentials $LICENCE_NAME $LICENCE_KEY -o ./licence.lic \
|
||||
&& cmake . -D"ENABLE_EMBEDDED_COMPILER"=OFF -D"USE_INTERNAL_PROTOBUF_LIBRARY"=OFF -D"USE_INTERNAL_GRPC_LIBRARY"=OFF \
|
||||
&& cmake . -D"ENABLE_EMBEDDED_COMPILER"=OFF -D"USE_INTERNAL_PROTOBUF_LIBRARY"=OFF -D"USE_INTERNAL_GRPC_LIBRARY"=OFF -DCMAKE_C_COMPILER=clang-12 -DCMAKE_CXX_COMPILER=clang\+\+-12 \
|
||||
&& ninja re2_st clickhouse_grpc_protos \
|
||||
&& pvs-studio-analyzer analyze -o pvs-studio.log -e contrib -j 4 -l ./licence.lic; \
|
||||
cp /repo_folder/pvs-studio.log /test_output; \
|
||||
|
@ -23,7 +23,7 @@ $ sudo apt-get install git cmake python ninja-build
|
||||
|
||||
Or cmake3 instead of cmake on older systems.
|
||||
|
||||
### Install clang-11 (recommended) {#install-clang-11}
|
||||
### Install clang-12 (recommended) {#install-clang-12}
|
||||
|
||||
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
|
||||
|
||||
@ -33,11 +33,11 @@ sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
|
||||
For other Linux distribution - check the availability of the [prebuild packages](https://releases.llvm.org/download.html) or build clang [from sources](https://clang.llvm.org/get_started.html).
|
||||
|
||||
#### Use clang-11 for Builds
|
||||
#### Use clang-12 for Builds
|
||||
|
||||
``` bash
|
||||
$ export CC=clang-11
|
||||
$ export CXX=clang++-11
|
||||
$ export CC=clang-12
|
||||
$ export CXX=clang++-12
|
||||
```
|
||||
|
||||
Gcc can also be used though it is discouraged.
|
||||
|
@ -45,6 +45,10 @@ DETACH TABLE postgres_database.table_to_remove;
|
||||
|
||||
- [materialized_postgresql_allow_automatic_update](../../operations/settings/settings.md#materialized-postgresql-allow-automatic-update)
|
||||
|
||||
- [materialized_postgresql_replication_slot](../../operations/settings/settings.md#materialized-postgresql-replication-slot)
|
||||
|
||||
- [materialized_postgresql_snapshot](../../operations/settings/settings.md#materialized-postgresql-snapshot)
|
||||
|
||||
``` sql
|
||||
CREATE DATABASE database1
|
||||
ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password')
|
||||
@ -102,3 +106,11 @@ ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres
|
||||
|
||||
SELECT * FROM postgresql_db.postgres_table;
|
||||
```
|
||||
|
||||
## Notes {#notes}
|
||||
|
||||
- Failover of the logical replication slot.
|
||||
|
||||
Logical Replication Slots which exist on the primary are not available on standby replicas.
|
||||
So if there is a failover, new primary (the old physical standby) won’t be aware of any slots which were existing with old primary. This will lead to a broken replication from PostgreSQL.
|
||||
A solution to this is to manage replication slots yourself and define a permanent replication slot (some information can be found [here](https://patroni.readthedocs.io/en/latest/SETTINGS.html)). You'll need to pass slot name via `materialized_postgresql_replication_slot` setting, and it has to be exported with `EXPORT SNAPSHOT` option. The snapshot identifier needs to be passed via `materialized_postgresql_snapshot` setting.
|
||||
|
@ -19,6 +19,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
SETTINGS
|
||||
[connection_pool_size=16, ]
|
||||
[connection_max_tries=3, ]
|
||||
[connection_wait_timeout=5, ] /* 0 -- do not wait */
|
||||
[connection_auto_close=true ]
|
||||
;
|
||||
```
|
||||
|
@ -38,6 +38,10 @@ You can also download and install packages manually from [here](https://repo.cli
|
||||
- `clickhouse-client` — Creates a symbolic link for `clickhouse-client` and other client-related tools. and installs client configuration files.
|
||||
- `clickhouse-common-static-dbg` — Installs ClickHouse compiled binary files with debug info.
|
||||
|
||||
!!! attention "Attention"
|
||||
If you need to install specific version of ClickHouse you have to install all packages with the same version:
|
||||
`sudo apt-get install clickhouse-server=21.8.5.7 clickhouse-client=21.8.5.7 clickhouse-common-static=21.8.5.7`
|
||||
|
||||
### From RPM Packages {#from-rpm-packages}
|
||||
|
||||
It is recommended to use official pre-compiled `rpm` packages for CentOS, RedHat, and all other rpm-based Linux distributions.
|
||||
|
@ -1270,6 +1270,8 @@ You can insert Parquet data from a file into ClickHouse table by the following c
|
||||
$ cat {filename} | clickhouse-client --query="INSERT INTO {some_table} FORMAT Parquet"
|
||||
```
|
||||
|
||||
To insert data into [Nested](../sql-reference/data-types/nested-data-structures/nested.md) columns as an array of structs values you must switch on the [input_format_parquet_import_nested](../operations/settings/settings.md#input_format_parquet_import_nested) setting.
|
||||
|
||||
You can select data from a ClickHouse table and save them into some file in the Parquet format by the following command:
|
||||
|
||||
``` bash
|
||||
@ -1328,6 +1330,8 @@ You can insert Arrow data from a file into ClickHouse table by the following com
|
||||
$ cat filename.arrow | clickhouse-client --query="INSERT INTO some_table FORMAT Arrow"
|
||||
```
|
||||
|
||||
To insert data into [Nested](../sql-reference/data-types/nested-data-structures/nested.md) columns as an array of structs values you must switch on the [input_format_arrow_import_nested](../operations/settings/settings.md#input_format_arrow_import_nested) setting.
|
||||
|
||||
### Selecting Data {#selecting-data-arrow}
|
||||
|
||||
You can select data from a ClickHouse table and save them into some file in the Arrow format by the following command:
|
||||
@ -1384,6 +1388,8 @@ You can insert ORC data from a file into ClickHouse table by the following comma
|
||||
$ cat filename.orc | clickhouse-client --query="INSERT INTO some_table FORMAT ORC"
|
||||
```
|
||||
|
||||
To insert data into [Nested](../sql-reference/data-types/nested-data-structures/nested.md) columns as an array of structs values you must switch on the [input_format_orc_import_nested](../operations/settings/settings.md#input_format_orc_import_nested) setting.
|
||||
|
||||
### Selecting Data {#selecting-data-2}
|
||||
|
||||
You can select data from a ClickHouse table and save them into some file in the ORC format by the following command:
|
||||
|
@ -1253,7 +1253,7 @@ If this section is specified, the path from [users_config](../../operations/serv
|
||||
|
||||
The `user_directories` section can contain any number of items, the order of the items means their precedence (the higher the item the higher the precedence).
|
||||
|
||||
**Example**
|
||||
**Examples**
|
||||
|
||||
``` xml
|
||||
<user_directories>
|
||||
@ -1263,13 +1263,23 @@ The `user_directories` section can contain any number of items, the order of the
|
||||
<local_directory>
|
||||
<path>/var/lib/clickhouse/access/</path>
|
||||
</local_directory>
|
||||
</user_directories>
|
||||
```
|
||||
|
||||
Users, roles, row policies, quotas, and profiles can be also stored in ZooKeeper:
|
||||
|
||||
``` xml
|
||||
<user_directories>
|
||||
<users_xml>
|
||||
<path>/etc/clickhouse-server/users.xml</path>
|
||||
</users_xml>
|
||||
<replicated>
|
||||
<zookeeper_path>/clickhouse/access/</zookeeper_path>
|
||||
</replicated>
|
||||
</user_directories>
|
||||
```
|
||||
|
||||
You can also specify settings `memory` — means storing information only in memory, without writing to disk, and `ldap` — means storing information on an LDAP server.
|
||||
You can also define sections `memory` — means storing information only in memory, without writing to disk, and `ldap` — means storing information on an LDAP server.
|
||||
|
||||
To add an LDAP server as a remote user directory of users that are not defined locally, define a single `ldap` section with a following parameters:
|
||||
- `server` — one of LDAP server names defined in `ldap_servers` config section. This parameter is mandatory and cannot be empty.
|
||||
|
@ -260,6 +260,39 @@ If an error occurred while reading rows but the error counter is still less than
|
||||
|
||||
If both `input_format_allow_errors_num` and `input_format_allow_errors_ratio` are exceeded, ClickHouse throws an exception.
|
||||
|
||||
## input_format_parquet_import_nested {#input_format_parquet_import_nested}
|
||||
|
||||
Enables or disables the ability to insert the data into [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) columns as an array of structs in [Parquet](../../interfaces/formats.md#data-format-parquet) input format.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Data can not be inserted into `Nested` columns as an array of structs.
|
||||
- 1 — Data can be inserted into `Nested` columns as an array of structs.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## input_format_arrow_import_nested {#input_format_arrow_import_nested}
|
||||
|
||||
Enables or disables the ability to insert the data into [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) columns as an array of structs in [Arrow](../../interfaces/formats.md#data_types-matching-arrow) input format.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Data can not be inserted into `Nested` columns as an array of structs.
|
||||
- 1 — Data can be inserted into `Nested` columns as an array of structs.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## input_format_orc_import_nested {#input_format_orc_import_nested}
|
||||
|
||||
Enables or disables the ability to insert the data into [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) columns as an array of structs in [ORC](../../interfaces/formats.md#data-format-orc) input format.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Data can not be inserted into `Nested` columns as an array of structs.
|
||||
- 1 — Data can be inserted into `Nested` columns as an array of structs.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## input_format_values_interpret_expressions {#settings-input_format_values_interpret_expressions}
|
||||
|
||||
Enables or disables the full SQL parser if the fast stream parser can’t parse the data. This setting is used only for the [Values](../../interfaces/formats.md#data-format-values) format at the data insertion. For more information about syntax parsing, see the [Syntax](../../sql-reference/syntax.md) section.
|
||||
@ -3436,6 +3469,14 @@ Possible values:
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## materialized_postgresql_replication_slot {#materialized-postgresql-replication-slot}
|
||||
|
||||
Allows to have user-managed replication slots. Must be used together with `materialized_postgresql_snapshot`.
|
||||
|
||||
## materialized_postgresql_replication_slot {#materialized-postgresql-replication-slot}
|
||||
|
||||
A text string identifying a snapshot, from which initial dump of tables will be performed. Must be used together with `materialized_postgresql_replication_slot`.
|
||||
|
||||
## allow_experimental_projection_optimization {#allow-experimental-projection-optimization}
|
||||
|
||||
Enables or disables [projection](../../engines/table-engines/mergetree-family/mergetree.md#projections) optimization when processing `SELECT` queries.
|
||||
@ -3457,3 +3498,13 @@ Possible values:
|
||||
- 1 — Projection optimization is obligatory.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## regexp_max_matches_per_row {#regexp-max-matches-per-row}
|
||||
|
||||
Sets the maximum number of matches for a single regular expression per row. Use it to protect against memory overload when using greedy regular expression in the [extractAllGroupsHorizontal](../../sql-reference/functions/string-search-functions.md#extractallgroups-horizontal) function.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
|
||||
Default value: `1000`.
|
||||
|
@ -1,44 +0,0 @@
|
||||
# system.views {#system-views}
|
||||
|
||||
Contains the dependencies of all views and the type to which the view belongs. The metadata of the view comes from the [system.tables](tables.md).
|
||||
|
||||
Columns:
|
||||
|
||||
- `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database the view is in.
|
||||
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — Name of the view.
|
||||
|
||||
- `main_dependency_database` ([String](../../sql-reference/data-types/string.md)) — The name of the database on which the view depends.
|
||||
|
||||
- `main_dependency_table` ([String](../../sql-reference/data-types/string.md)) - The name of the table on which the view depends.
|
||||
|
||||
- `view_type` ([Enum8](../../sql-reference/data-types/enum.md)) — Type of the view. Values:
|
||||
- `'Default' = 1` — [Default views](../../sql-reference/statements/create/view.md#normal). Should not appear in this log.
|
||||
- `'Materialized' = 2` — [Materialized views](../../sql-reference/statements/create/view.md#materialized).
|
||||
- `'Live' = 3` — [Live views](../../sql-reference/statements/create/view.md#live-view).
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
SELECT * FROM system.views LIMIT 2 FORMAT Vertical;
|
||||
```
|
||||
|
||||
```text
|
||||
Row 1:
|
||||
──────
|
||||
database: default
|
||||
name: live_view
|
||||
main_dependency_database: default
|
||||
main_dependency_table: view_source_tb
|
||||
view_type: Live
|
||||
|
||||
Row 2:
|
||||
──────
|
||||
database: default
|
||||
name: materialized_view
|
||||
main_dependency_database: default
|
||||
main_dependency_table: view_source_tb
|
||||
view_type: Materialized
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/views) <!--hide-->
|
@ -3,7 +3,9 @@ toc_priority: 57
|
||||
toc_title: Nested(Name1 Type1, Name2 Type2, ...)
|
||||
---
|
||||
|
||||
# Nested(name1 Type1, Name2 Type2, …) {#nestedname1-type1-name2-type2}
|
||||
# Nested {#nested}
|
||||
|
||||
## Nested(name1 Type1, Name2 Type2, …) {#nestedname1-type1-name2-type2}
|
||||
|
||||
A nested data structure is like a table inside a cell. The parameters of a nested data structure – the column names and types – are specified the same way as in a [CREATE TABLE](../../../sql-reference/statements/create/table.md) query. Each table row can correspond to any number of rows in a nested data structure.
|
||||
|
||||
|
@ -1438,9 +1438,9 @@ Result:
|
||||
└───────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## snowflakeToDateTime {#snowflakeToDateTime}
|
||||
## snowflakeToDateTime {#snowflaketodatetime}
|
||||
|
||||
Extract time from snowflake id as DateTime format.
|
||||
Extracts time from [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) as [DateTime](../data-types/datetime.md) format.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -1450,12 +1450,12 @@ snowflakeToDateTime(value [, time_zone])
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `value` — `snowflake id`, Int64 value.
|
||||
- `value` — Snowflake ID. [Int64](../data-types/int-uint.md).
|
||||
- `time_zone` — [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- value converted to the `DateTime` data type.
|
||||
- Input value converted to the [DateTime](../data-types/datetime.md) data type.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -1474,9 +1474,9 @@ Result:
|
||||
└──────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## snowflakeToDateTime64 {#snowflakeToDateTime64}
|
||||
## snowflakeToDateTime64 {#snowflaketodatetime64}
|
||||
|
||||
Extract time from snowflake id as DateTime64 format.
|
||||
Extracts time from [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) as [DateTime64](../data-types/datetime64.md) format.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -1486,12 +1486,12 @@ snowflakeToDateTime64(value [, time_zone])
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `value` — `snowflake id`, Int64 value.
|
||||
- `value` — Snowflake ID. [Int64](../data-types/int-uint.md).
|
||||
- `time_zone` — [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- value converted to the `DateTime64` data type.
|
||||
- Input value converted to the [DateTime64](../data-types/datetime64.md) data type.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -1510,9 +1510,9 @@ Result:
|
||||
└────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## dateTimeToSnowflake {#dateTimeToSnowflake}
|
||||
## dateTimeToSnowflake {#datetimetosnowflake}
|
||||
|
||||
Convert DateTime to the first snowflake id at the giving time.
|
||||
Converts [DateTime](../data-types/datetime.md) value to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -1524,33 +1524,29 @@ dateTimeToSnowflake(value)
|
||||
|
||||
- `value` — Date and time. [DateTime](../../sql-reference/data-types/datetime.md).
|
||||
|
||||
|
||||
**Returned value**
|
||||
|
||||
- `value` converted to the `Int64` data type as the first snowflake id at that time.
|
||||
- Input value converted to the [Int64](../data-types/int-uint.md) data type as the first Snowflake ID at that time.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
WITH toDateTime('2021-08-15 18:57:56', 'Asia/Shanghai') AS dt
|
||||
SELECT dateTimeToSnowflake(dt);
|
||||
WITH toDateTime('2021-08-15 18:57:56', 'Asia/Shanghai') AS dt SELECT dateTimeToSnowflake(dt);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
|
||||
┌─dateTimeToSnowflake(dt)─┐
|
||||
│ 1426860702823350272 │
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
## dateTime64ToSnowflake {#datetime64tosnowflake}
|
||||
|
||||
## dateTime64ToSnowflake {#dateTime64ToSnowflake}
|
||||
|
||||
Convert DateTime64 to the first snowflake id at the giving time.
|
||||
Convert [DateTime64](../data-types/datetime64.md) to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -1562,18 +1558,16 @@ dateTime64ToSnowflake(value)
|
||||
|
||||
- `value` — Date and time. [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
|
||||
**Returned value**
|
||||
|
||||
- `value` converted to the `Int64` data type as the first snowflake id at that time.
|
||||
- Input value converted to the [Int64](../data-types/int-uint.md) data type as the first Snowflake ID at that time.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
WITH toDateTime64('2021-08-15 18:57:56.492', 3, 'Asia/Shanghai') AS dt64
|
||||
SELECT dateTime64ToSnowflake(dt64);
|
||||
WITH toDateTime64('2021-08-15 18:57:56.492', 3, 'Asia/Shanghai') AS dt64 SELECT dateTime64ToSnowflake(dt64);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
@ -31,6 +31,19 @@ grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not su
|
||||
|
||||
Если вы хотите использовать наиболее свежую версию, замените `stable` на `testing` (рекомендуется для тестовых окружений).
|
||||
|
||||
Также вы можете вручную скачать и установить пакеты из [репозитория](https://repo.clickhouse.tech/deb/stable/main/).
|
||||
|
||||
#### Пакеты {#packages}
|
||||
|
||||
- `clickhouse-common-static` — Устанавливает исполняемые файлы ClickHouse.
|
||||
- `clickhouse-server` — Создает символические ссылки для `clickhouse-server` и устанавливает конфигурационные файлы.
|
||||
- `clickhouse-client` — Создает символические ссылки для `clickhouse-client` и других клиентских инструментов и устанавливает конфигурационные файлы `clickhouse-client`.
|
||||
- `clickhouse-common-static-dbg` — Устанавливает исполняемые файлы ClickHouse собранные с отладочной информацией.
|
||||
|
||||
!!! attention "Внимание"
|
||||
Если вам нужно установить ClickHouse определенной версии, вы должны установить все пакеты одной версии:
|
||||
`sudo apt-get install clickhouse-server=21.8.5.7 clickhouse-client=21.8.5.7 clickhouse-common-static=21.8.5.7`
|
||||
|
||||
### Из RPM пакетов {#from-rpm-packages}
|
||||
|
||||
Команда ClickHouse в Яндексе рекомендует использовать официальные предкомпилированные `rpm` пакеты для CentOS, RedHat и всех остальных дистрибутивов Linux, основанных на rpm.
|
||||
|
@ -1180,7 +1180,7 @@ ClickHouse поддерживает настраиваемую точность
|
||||
|
||||
Типы данных столбцов в ClickHouse могут отличаться от типов данных соответствующих полей файла в формате Parquet. При вставке данных ClickHouse интерпретирует типы данных в соответствии с таблицей выше, а затем [приводит](../sql-reference/functions/type-conversion-functions/#type_conversion_function-cast) данные к тому типу, который установлен для столбца таблицы.
|
||||
|
||||
### Вставка и выборка данных {#vstavka-i-vyborka-dannykh}
|
||||
### Вставка и выборка данных {#inserting-and-selecting-data}
|
||||
|
||||
Чтобы вставить в ClickHouse данные из файла в формате Parquet, выполните команду следующего вида:
|
||||
|
||||
@ -1188,6 +1188,8 @@ ClickHouse поддерживает настраиваемую точность
|
||||
$ cat {filename} | clickhouse-client --query="INSERT INTO {some_table} FORMAT Parquet"
|
||||
```
|
||||
|
||||
Чтобы вставить данные в колонки типа [Nested](../sql-reference/data-types/nested-data-structures/nested.md) в виде массива структур, нужно включить настройку [input_format_parquet_import_nested](../operations/settings/settings.md#input_format_parquet_import_nested).
|
||||
|
||||
Чтобы получить данные из таблицы ClickHouse и сохранить их в файл формата Parquet, используйте команду следующего вида:
|
||||
|
||||
``` bash
|
||||
@ -1246,6 +1248,8 @@ ClickHouse поддерживает настраиваемую точность
|
||||
$ cat filename.arrow | clickhouse-client --query="INSERT INTO some_table FORMAT Arrow"
|
||||
```
|
||||
|
||||
Чтобы вставить данные в колонки типа [Nested](../sql-reference/data-types/nested-data-structures/nested.md) в виде массива структур, нужно включить настройку [input_format_arrow_import_nested](../operations/settings/settings.md#input_format_arrow_import_nested).
|
||||
|
||||
### Вывод данных {#selecting-data-arrow}
|
||||
|
||||
Чтобы получить данные из таблицы ClickHouse и сохранить их в файл формата Arrow, используйте команду следующего вида:
|
||||
@ -1294,7 +1298,7 @@ ClickHouse поддерживает настраиваемую точность
|
||||
|
||||
Типы данных столбцов в таблицах ClickHouse могут отличаться от типов данных для соответствующих полей ORC. При вставке данных ClickHouse интерпретирует типы данных ORC согласно таблице соответствия, а затем [приводит](../sql-reference/functions/type-conversion-functions/#type_conversion_function-cast) данные к типу, установленному для столбца таблицы ClickHouse.
|
||||
|
||||
### Вставка данных {#vstavka-dannykh-1}
|
||||
### Вставка данных {#inserting-data-2}
|
||||
|
||||
Чтобы вставить в ClickHouse данные из файла в формате ORC, используйте команду следующего вида:
|
||||
|
||||
@ -1302,7 +1306,9 @@ ClickHouse поддерживает настраиваемую точность
|
||||
$ cat filename.orc | clickhouse-client --query="INSERT INTO some_table FORMAT ORC"
|
||||
```
|
||||
|
||||
### Вывод данных {#vyvod-dannykh-1}
|
||||
Чтобы вставить данные в колонки типа [Nested](../sql-reference/data-types/nested-data-structures/nested.md) в виде массива структур, нужно включить настройку [input_format_orc_import_nested](../operations/settings/settings.md#input_format_orc_import_nested).
|
||||
|
||||
### Вывод данных {#selecting-data-2}
|
||||
|
||||
Чтобы получить данные из таблицы ClickHouse и сохранить их в файл формата ORC, используйте команду следующего вида:
|
||||
|
||||
|
@ -1200,12 +1200,13 @@ ClickHouse использует ZooKeeper для хранения метадан
|
||||
Секция конфигурационного файла,которая содержит настройки:
|
||||
- Путь к конфигурационному файлу с предустановленными пользователями.
|
||||
- Путь к файлу, в котором содержатся пользователи, созданные при помощи SQL команд.
|
||||
- Путь к узлу ZooKeeper, где хранятся и реплицируются пользователи, созданные с помощью команд SQL (экспериментальная функциональность).
|
||||
|
||||
Если эта секция определена, путь из [users_config](../../operations/server-configuration-parameters/settings.md#users-config) и [access_control_path](../../operations/server-configuration-parameters/settings.md#access_control_path) не используется.
|
||||
|
||||
Секция `user_directories` может содержать любое количество элементов, порядок расположения элементов обозначает их приоритет (чем выше элемент, тем выше приоритет).
|
||||
|
||||
**Пример**
|
||||
**Примеры**
|
||||
|
||||
``` xml
|
||||
<user_directories>
|
||||
@ -1218,7 +1219,20 @@ ClickHouse использует ZooKeeper для хранения метадан
|
||||
</user_directories>
|
||||
```
|
||||
|
||||
Также вы можете указать настройку `memory` — означает хранение информации только в памяти, без записи на диск, и `ldap` — означает хранения информации на [LDAP-сервере](https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol).
|
||||
Пользователи, роли, политики доступа к строкам, квоты и профили могут храниться в ZooKeeper:
|
||||
|
||||
``` xml
|
||||
<user_directories>
|
||||
<users_xml>
|
||||
<path>/etc/clickhouse-server/users.xml</path>
|
||||
</users_xml>
|
||||
<replicated>
|
||||
<zookeeper_path>/clickhouse/access/</zookeeper_path>
|
||||
</replicated>
|
||||
</user_directories>
|
||||
```
|
||||
|
||||
Также вы можете добавить секции `memory` — означает хранение информации только в памяти, без записи на диск, и `ldap` — означает хранения информации на [LDAP-сервере](https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol).
|
||||
|
||||
Чтобы добавить LDAP-сервер в качестве удаленного каталога пользователей, которые не определены локально, определите один раздел `ldap` со следующими параметрами:
|
||||
- `server` — имя одного из LDAP-серверов, определенных в секции `ldap_servers` конфигурациионного файла. Этот параметр явялется необязательным и может быть пустым.
|
||||
|
@ -237,6 +237,39 @@ ClickHouse применяет настройку в тех случаях, ко
|
||||
|
||||
В случае превышения `input_format_allow_errors_ratio` ClickHouse генерирует исключение.
|
||||
|
||||
## input_format_parquet_import_nested {#input_format_parquet_import_nested}
|
||||
|
||||
Включает или отключает возможность вставки данных в колонки типа [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) в виде массива структур в формате ввода [Parquet](../../interfaces/formats.md#data-format-parquet).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 0 — данные не могут быть вставлены в колонки типа `Nested` в виде массива структур.
|
||||
- 0 — данные могут быть вставлены в колонки типа `Nested` в виде массива структур.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
||||
## input_format_arrow_import_nested {#input_format_arrow_import_nested}
|
||||
|
||||
Включает или отключает возможность вставки данных в колонки типа [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) в виде массива структур в формате ввода [Arrow](../../interfaces/formats.md#data_types-matching-arrow).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 0 — данные не могут быть вставлены в колонки типа `Nested` в виде массива структур.
|
||||
- 0 — данные могут быть вставлены в колонки типа `Nested` в виде массива структур.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
||||
## input_format_orc_import_nested {#input_format_orc_import_nested}
|
||||
|
||||
Включает или отключает возможность вставки данных в колонки типа [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) в виде массива структур в формате ввода [ORC](../../interfaces/formats.md#data-format-orc).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 0 — данные не могут быть вставлены в колонки типа `Nested` в виде массива структур.
|
||||
- 0 — данные могут быть вставлены в колонки типа `Nested` в виде массива структур.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
||||
## input_format_values_interpret_expressions {#settings-input_format_values_interpret_expressions}
|
||||
|
||||
Включает или отключает парсер SQL, если потоковый парсер не может проанализировать данные. Этот параметр используется только для формата [Values](../../interfaces/formats.md#data-format-values) при вставке данных. Дополнительные сведения о парсерах читайте в разделе [Синтаксис](../../sql-reference/syntax.md).
|
||||
@ -3274,3 +3307,13 @@ SETTINGS index_granularity = 8192 │
|
||||
- 1 — Проекции обязательно используются.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
||||
## regexp_max_matches_per_row {#regexp-max-matches-per-row}
|
||||
|
||||
Задает максимальное количество совпадений для регулярного выражения. Настройка применяется для защиты памяти от перегрузки при использовании "жадных" квантификаторов в регулярном выражении для функции [extractAllGroupsHorizontal](../../sql-reference/functions/string-search-functions.md#extractallgroups-horizontal).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- Положительное целое число.
|
||||
|
||||
Значение по умолчанию: `1000`.
|
@ -1,4 +1,6 @@
|
||||
# Nested(Name1 Type1, Name2 Type2, …) {#nestedname1-type1-name2-type2}
|
||||
# Nested {#nested}
|
||||
|
||||
## Nested(Name1 Type1, Name2 Type2, …) {#nestedname1-type1-name2-type2}
|
||||
|
||||
Вложенная структура данных - это как будто вложенная таблица. Параметры вложенной структуры данных - имена и типы столбцов, указываются так же, как у запроса CREATE. Каждой строке таблицы может соответствовать произвольное количество строк вложенной структуры данных.
|
||||
|
||||
@ -95,4 +97,3 @@ LIMIT 10
|
||||
При запросе DESCRIBE, столбцы вложенной структуры данных перечисляются так же по отдельности.
|
||||
|
||||
Работоспособность запроса ALTER для элементов вложенных структур данных, является сильно ограниченной.
|
||||
|
||||
|
@ -1436,3 +1436,144 @@ FROM numbers(3);
|
||||
│ 2,"good" │
|
||||
└───────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## snowflakeToDateTime {#snowflaketodatetime}
|
||||
|
||||
Извлекает время из [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) в формате [DateTime](../data-types/datetime.md).
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
snowflakeToDateTime(value [, time_zone])
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `value` — Snowflake ID. [Int64](../data-types/int-uint.md).
|
||||
- `time_zone` — [временная зона сервера](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). Функция распознает `time_string` в соответствии с часовым поясом. Необязательный. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Значение, преобразованное в фомат [DateTime](../data-types/datetime.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT snowflakeToDateTime(CAST('1426860702823350272', 'Int64'), 'UTC');
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
|
||||
┌─snowflakeToDateTime(CAST('1426860702823350272', 'Int64'), 'UTC')─┐
|
||||
│ 2021-08-15 10:57:56 │
|
||||
└──────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## snowflakeToDateTime64 {#snowflaketodatetime64}
|
||||
|
||||
Извлекает время из [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) в формате [DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
snowflakeToDateTime64(value [, time_zone])
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `value` — Snowflake ID. [Int64](../data-types/int-uint.md).
|
||||
- `time_zone` — [временная зона сервера](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). Функция распознает `time_string` в соответствии с часовым поясом. Необязательный. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Значение, преобразованное в фомат [DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT snowflakeToDateTime64(CAST('1426860802823350272', 'Int64'), 'UTC');
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
|
||||
┌─snowflakeToDateTime64(CAST('1426860802823350272', 'Int64'), 'UTC')─┐
|
||||
│ 2021-08-15 10:58:19.841 │
|
||||
└────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## dateTimeToSnowflake {#datetimetosnowflake}
|
||||
|
||||
Преобразует значение [DateTime](../data-types/datetime.md) в первый идентификатор [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) на текущий момент.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
dateTimeToSnowflake(value)
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `value` — дата и время. [DateTime](../../sql-reference/data-types/datetime.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Значение, преобразованное в [Int64](../data-types/int-uint.md), как первый идентификатор Snowflake ID в момент выполнения.
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
WITH toDateTime('2021-08-15 18:57:56', 'Asia/Shanghai') AS dt SELECT dateTimeToSnowflake(dt);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─dateTimeToSnowflake(dt)─┐
|
||||
│ 1426860702823350272 │
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
## dateTime64ToSnowflake {#datetime64tosnowflake}
|
||||
|
||||
Преобразует значение [DateTime64](../data-types/datetime64.md) в первый идентификатор [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) на текущий момент.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
dateTime64ToSnowflake(value)
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `value` — дата и время. [DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Значение, преобразованное в [Int64](../data-types/int-uint.md), как первый идентификатор Snowflake ID в момент выполнения.
|
||||
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
WITH toDateTime64('2021-08-15 18:57:56.492', 3, 'Asia/Shanghai') AS dt64 SELECT dateTime64ToSnowflake(dt64);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─dateTime64ToSnowflake(dt64)─┐
|
||||
│ 1426860704886947840 │
|
||||
└─────────────────────────────┘
|
||||
```
|
||||
|
@ -247,6 +247,7 @@ CREATE TABLE codec_example
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
```
|
||||
|
||||
## Временные таблицы {#temporary-tables}
|
||||
|
||||
ClickHouse поддерживает временные таблицы со следующими характеристиками:
|
||||
|
@ -393,6 +393,7 @@ void LocalServer::processQueries()
|
||||
auto context = session.makeQueryContext();
|
||||
context->makeSessionContext(); /// initial_create_query requires a session context to be set.
|
||||
context->setCurrentQueryId("");
|
||||
|
||||
applyCmdSettings(context);
|
||||
|
||||
/// Use the same query_id (and thread group) for all queries
|
||||
|
@ -45,16 +45,14 @@
|
||||
#include <IO/UseSSL.h>
|
||||
#include <Interpreters/AsynchronousMetrics.h>
|
||||
#include <Interpreters/DDLWorker.h>
|
||||
#include <Interpreters/DNSCacheUpdater.h>
|
||||
#include <Interpreters/DatabaseCatalog.h>
|
||||
#include <Interpreters/ExternalDictionariesLoader.h>
|
||||
#include <Interpreters/ExternalModelsLoader.h>
|
||||
#include <Interpreters/ProcessList.h>
|
||||
#include <Interpreters/loadMetadata.h>
|
||||
#include <Interpreters/DatabaseCatalog.h>
|
||||
#include <Interpreters/DNSCacheUpdater.h>
|
||||
#include <Interpreters/ExternalLoaderXMLConfigRepository.h>
|
||||
#include <Interpreters/InterserverCredentials.h>
|
||||
#include <Interpreters/UserDefinedObjectsLoader.h>
|
||||
#include <Interpreters/JIT/CompiledExpressionCache.h>
|
||||
#include <Interpreters/UserDefinedObjectsLoader.h>
|
||||
#include <Access/AccessControlManager.h>
|
||||
#include <Storages/StorageReplicatedMergeTree.h>
|
||||
#include <Storages/System/attachSystemTables.h>
|
||||
@ -1131,6 +1129,10 @@ if (ThreadFuzzer::instance().isEffective())
|
||||
global_context->setSystemZooKeeperLogAfterInitializationIfNeeded();
|
||||
/// After the system database is created, attach virtual system tables (in addition to query_log and part_log)
|
||||
attachSystemTablesServer(*database_catalog.getSystemDatabase(), has_zookeeper);
|
||||
/// Firstly remove partially dropped databases, to avoid race with MaterializedMySQLSyncThread,
|
||||
/// that may execute DROP before loadMarkedAsDroppedTables() in background,
|
||||
/// and so loadMarkedAsDroppedTables() will find it and try to add, and UUID will overlap.
|
||||
database_catalog.loadMarkedAsDroppedTables();
|
||||
/// Then, load remaining databases
|
||||
loadMetadata(global_context, default_database);
|
||||
database_catalog.loadDatabases();
|
||||
|
@ -18,6 +18,7 @@
|
||||
- information
|
||||
- debug
|
||||
- trace
|
||||
- test (not for production usage)
|
||||
|
||||
[1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
|
||||
-->
|
||||
@ -964,6 +965,14 @@
|
||||
<flush_interval_milliseconds>1000</flush_interval_milliseconds>
|
||||
</crash_log>
|
||||
|
||||
<!-- Session log. Stores user log in (successful or not) and log out events. -->
|
||||
<session_log>
|
||||
<database>system</database>
|
||||
<table>session_log</table>
|
||||
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</session_log>
|
||||
|
||||
<!-- Parameters for embedded dictionaries, used in Yandex.Metrica.
|
||||
See https://clickhouse.yandex/docs/en/dicts/internal_dicts/
|
||||
|
1
programs/server/users.d/session_log_test.xml
Symbolic link
1
programs/server/users.d/session_log_test.xml
Symbolic link
@ -0,0 +1 @@
|
||||
../../../tests/config/users.d/session_log_test.xml
|
@ -143,10 +143,11 @@ public:
|
||||
|
||||
std::vector<QuotaUsage> getAllQuotasUsage() const;
|
||||
|
||||
std::shared_ptr<const EnabledSettings> getEnabledSettings(const UUID & user_id,
|
||||
const SettingsProfileElements & settings_from_user,
|
||||
const boost::container::flat_set<UUID> & enabled_roles,
|
||||
const SettingsProfileElements & settings_from_enabled_roles) const;
|
||||
std::shared_ptr<const EnabledSettings> getEnabledSettings(
|
||||
const UUID & user_id,
|
||||
const SettingsProfileElements & settings_from_user,
|
||||
const boost::container::flat_set<UUID> & enabled_roles,
|
||||
const SettingsProfileElements & settings_from_enabled_roles) const;
|
||||
|
||||
std::shared_ptr<const SettingsProfilesInfo> getSettingsProfileInfo(const UUID & profile_id);
|
||||
|
||||
|
@ -36,6 +36,16 @@ struct SettingsProfilesInfo
|
||||
friend bool operator ==(const SettingsProfilesInfo & lhs, const SettingsProfilesInfo & rhs);
|
||||
friend bool operator !=(const SettingsProfilesInfo & lhs, const SettingsProfilesInfo & rhs) { return !(lhs == rhs); }
|
||||
|
||||
Strings getProfileNames() const
|
||||
{
|
||||
Strings result;
|
||||
result.reserve(profiles.size());
|
||||
for (const auto & profile_id : profiles)
|
||||
result.push_back(names_of_profiles.at(profile_id));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
private:
|
||||
const AccessControlManager & manager;
|
||||
};
|
||||
|
@ -27,7 +27,12 @@ private:
|
||||
size_t stack_size;
|
||||
size_t page_size = 0;
|
||||
public:
|
||||
static constexpr size_t default_stack_size = 128 * 1024; /// 64KB was not enough for tests
|
||||
/// NOTE: If you see random segfaults in CI and stack starts from boost::context::...fiber...
|
||||
/// probably it worth to try to increase stack size for coroutines.
|
||||
///
|
||||
/// Current value is just enough for all tests in our CI. It's not selected in some special
|
||||
/// way. We will have 36 pages with 4KB page size.
|
||||
static constexpr size_t default_stack_size = 144 * 1024; /// 64KB was not enough for tests
|
||||
|
||||
explicit FiberStack(size_t stack_size_ = default_stack_size) : stack_size(stack_size_)
|
||||
{
|
||||
@ -43,6 +48,8 @@ public:
|
||||
if (MAP_FAILED == vp)
|
||||
DB::throwFromErrno(fmt::format("FiberStack: Cannot mmap {}.", ReadableSize(num_bytes)), DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY);
|
||||
|
||||
/// TODO: make reports on illegal guard page access more clear.
|
||||
/// Currently we will see segfault and almost random stacktrace.
|
||||
if (-1 == ::mprotect(vp, page_size, PROT_NONE))
|
||||
{
|
||||
::munmap(vp, num_bytes);
|
||||
|
@ -6,6 +6,8 @@
|
||||
#include <Poco/Logger.h>
|
||||
#include <Poco/AutoPtr.h>
|
||||
#include <Poco/NullChannel.h>
|
||||
#include <Poco/StreamChannel.h>
|
||||
#include <sstream>
|
||||
|
||||
|
||||
TEST(Logger, Log)
|
||||
@ -17,3 +19,34 @@ TEST(Logger, Log)
|
||||
/// This test checks that we don't pass this string to fmtlib, because it is the only argument.
|
||||
EXPECT_NO_THROW(LOG_INFO(log, "Hello {} World"));
|
||||
}
|
||||
|
||||
TEST(Logger, TestLog)
|
||||
{
|
||||
{ /// Test logs visible for test level
|
||||
|
||||
std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
||||
auto my_channel = Poco::AutoPtr<Poco::StreamChannel>(new Poco::StreamChannel(oss));
|
||||
auto * log = &Poco::Logger::create("TestLogger", my_channel.get());
|
||||
log->setLevel("test");
|
||||
LOG_TEST(log, "Hello World");
|
||||
|
||||
EXPECT_EQ(oss.str(), "Hello World\n");
|
||||
Poco::Logger::destroy("TestLogger");
|
||||
}
|
||||
|
||||
{ /// Test logs invisible for other levels
|
||||
for (const auto & level : {"trace", "debug", "information", "warning", "error", "fatal"})
|
||||
{
|
||||
std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
||||
auto my_channel = Poco::AutoPtr<Poco::StreamChannel>(new Poco::StreamChannel(oss));
|
||||
auto * log = &Poco::Logger::create(std::string{level} + "_Logger", my_channel.get());
|
||||
log->setLevel(level);
|
||||
LOG_TEST(log, "Hello World");
|
||||
|
||||
EXPECT_EQ(oss.str(), "");
|
||||
|
||||
Poco::Logger::destroy(std::string{level} + "_Logger");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -113,7 +113,8 @@ namespace DB
|
||||
|
||||
std::string CompressionCodecEncrypted::deriveKey(const std::string_view & master_key)
|
||||
{
|
||||
std::string_view salt(""); // No salt: derive keys in a deterministic manner.
|
||||
/// No salt: derive keys in a deterministic manner.
|
||||
std::string_view salt(""); // NOLINT
|
||||
std::string_view info("Codec Encrypted('AES-128-GCM-SIV') key generation key");
|
||||
std::array<char, 32> result;
|
||||
|
||||
|
@ -122,6 +122,10 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_TEST(log, "Commit request for session {} with type {}, log id {}{}",
|
||||
request_for_session.session_id, toString(request_for_session.request->getOpNum()), log_idx,
|
||||
request_for_session.request->getPath().empty() ? "" : ", path " + request_for_session.request->getPath());
|
||||
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
KeeperStorage::ResponsesForSessions responses_for_sessions = storage->processRequest(request_for_session.request, request_for_session.session_id, log_idx);
|
||||
for (auto & response_for_session : responses_for_sessions)
|
||||
|
@ -900,8 +900,7 @@ public:
|
||||
Messaging::MessageTransport & mt,
|
||||
const Poco::Net::SocketAddress & address)
|
||||
{
|
||||
Authentication::Type user_auth_type = session.getAuthenticationType(user_name);
|
||||
|
||||
const Authentication::Type user_auth_type = session.getAuthenticationTypeOrLogInFailure(user_name);
|
||||
if (type_to_method.find(user_auth_type) != type_to_method.end())
|
||||
{
|
||||
type_to_method[user_auth_type]->authenticate(user_name, session, mt, address);
|
||||
|
@ -70,8 +70,8 @@ class IColumn;
|
||||
M(UInt64, idle_connection_timeout, 3600, "Close idle TCP connections after specified number of seconds.", 0) \
|
||||
M(UInt64, distributed_connections_pool_size, DBMS_DEFAULT_DISTRIBUTED_CONNECTIONS_POOL_SIZE, "Maximum number of connections with one remote server in the pool.", 0) \
|
||||
M(UInt64, connections_with_failover_max_tries, DBMS_CONNECTION_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES, "The maximum number of attempts to connect to replicas.", 0) \
|
||||
M(UInt64, s3_min_upload_part_size, 512*1024*1024, "The minimum size of part to upload during multipart upload to S3.", 0) \
|
||||
M(UInt64, s3_max_single_part_upload_size, 64*1024*1024, "The maximum size of object to upload using singlepart upload to S3.", 0) \
|
||||
M(UInt64, s3_min_upload_part_size, 32*1024*1024, "The minimum size of part to upload during multipart upload to S3.", 0) \
|
||||
M(UInt64, s3_max_single_part_upload_size, 32*1024*1024, "The maximum size of object to upload using singlepart upload to S3.", 0) \
|
||||
M(UInt64, s3_max_single_read_retries, 4, "The maximum number of retries during single S3 read.", 0) \
|
||||
M(UInt64, s3_max_redirects, 10, "Max number of S3 redirects hops allowed.", 0) \
|
||||
M(UInt64, s3_max_connections, 1024, "The maximum number of connections per server.", 0) \
|
||||
|
@ -79,8 +79,8 @@ IMPLEMENT_SETTING_ENUM(LogsLevel, ErrorCodes::BAD_ARGUMENTS,
|
||||
{"warning", LogsLevel::warning},
|
||||
{"information", LogsLevel::information},
|
||||
{"debug", LogsLevel::debug},
|
||||
{"trace", LogsLevel::trace}})
|
||||
|
||||
{"trace", LogsLevel::trace},
|
||||
{"test", LogsLevel::test}})
|
||||
|
||||
IMPLEMENT_SETTING_ENUM_WITH_RENAME(LogQueriesType, ErrorCodes::BAD_ARGUMENTS,
|
||||
{{"QUERY_START", QUERY_START},
|
||||
|
@ -94,6 +94,7 @@ enum class LogsLevel
|
||||
information,
|
||||
debug,
|
||||
trace,
|
||||
test,
|
||||
};
|
||||
|
||||
DECLARE_SETTING_ENUM(LogsLevel)
|
||||
|
@ -247,7 +247,7 @@ void MaterializedMySQLSyncThread::assertMySQLAvailable()
|
||||
{
|
||||
try
|
||||
{
|
||||
checkMySQLVariables(pool.get(), getContext()->getSettingsRef());
|
||||
checkMySQLVariables(pool.get(/* wait_timeout= */ UINT64_MAX), getContext()->getSettingsRef());
|
||||
}
|
||||
catch (const mysqlxx::ConnectionFailed & e)
|
||||
{
|
||||
@ -729,7 +729,7 @@ void MaterializedMySQLSyncThread::onEvent(Buffers & buffers, const BinlogEventPt
|
||||
{
|
||||
/// Some behaviors(such as changing the value of "binlog_checksum") rotate the binlog file.
|
||||
/// To ensure that the synchronization continues, we need to handle these events
|
||||
metadata.fetchMasterVariablesValue(pool.get());
|
||||
metadata.fetchMasterVariablesValue(pool.get(/* wait_timeout= */ UINT64_MAX));
|
||||
client.setBinlogChecksum(metadata.binlog_checksum);
|
||||
}
|
||||
else if (receive_event->header.type != HEARTBEAT_EVENT)
|
||||
|
@ -65,10 +65,8 @@ void DatabaseMaterializedPostgreSQL::startSynchronization()
|
||||
connection_info,
|
||||
getContext(),
|
||||
is_attach,
|
||||
settings->materialized_postgresql_max_block_size.value,
|
||||
settings->materialized_postgresql_allow_automatic_update,
|
||||
/* is_materialized_postgresql_database = */ true,
|
||||
settings->materialized_postgresql_tables_list.value);
|
||||
*settings,
|
||||
/* is_materialized_postgresql_database = */ true);
|
||||
|
||||
NameSet tables_to_replicate;
|
||||
try
|
||||
|
@ -104,9 +104,11 @@ public:
|
||||
|
||||
const auto * attr_name_col = checkAndGetColumnConst<ColumnString>(sample_columns.getByPosition(1).column.get());
|
||||
if (!attr_name_col)
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Second argument of function dictGet... must be a constant string");
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Second argument of function dictGet must be a constant string");
|
||||
|
||||
return getDictionary(dict_name_col->getValue<String>())->isInjective(attr_name_col->getValue<String>());
|
||||
const auto dictionary_name = dict_name_col->getValue<String>();
|
||||
const auto attribute_name = attr_name_col->getValue<String>();
|
||||
return getDictionary(dictionary_name)->isInjective(attribute_name);
|
||||
}
|
||||
|
||||
DictionaryStructure getDictionaryStructure(const String & dictionary_name) const
|
||||
|
@ -41,7 +41,7 @@ public:
|
||||
return name;
|
||||
}
|
||||
|
||||
size_t getNumberOfArguments() const override { return 4; }
|
||||
size_t getNumberOfArguments() const override { return 3; }
|
||||
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
|
||||
|
@ -41,7 +41,7 @@ public:
|
||||
return name;
|
||||
}
|
||||
|
||||
size_t getNumberOfArguments() const override { return 4; }
|
||||
size_t getNumberOfArguments() const override { return 3; }
|
||||
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
|
||||
|
@ -32,7 +32,7 @@ WriteBufferFromFile::WriteBufferFromFile(
|
||||
mode_t mode,
|
||||
char * existing_memory,
|
||||
size_t alignment)
|
||||
: WriteBufferFromFileDescriptor(-1, buf_size, existing_memory, alignment), file_name(file_name_)
|
||||
: WriteBufferFromFileDescriptor(-1, buf_size, existing_memory, alignment, file_name_)
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::FileOpen);
|
||||
|
||||
@ -65,9 +65,7 @@ WriteBufferFromFile::WriteBufferFromFile(
|
||||
size_t buf_size,
|
||||
char * existing_memory,
|
||||
size_t alignment)
|
||||
:
|
||||
WriteBufferFromFileDescriptor(fd_, buf_size, existing_memory, alignment),
|
||||
file_name(original_file_name.empty() ? "(fd = " + toString(fd_) + ")" : original_file_name)
|
||||
: WriteBufferFromFileDescriptor(fd_, buf_size, existing_memory, alignment, original_file_name)
|
||||
{
|
||||
fd_ = -1;
|
||||
}
|
||||
|
@ -25,7 +25,6 @@ namespace DB
|
||||
class WriteBufferFromFile : public WriteBufferFromFileDescriptor
|
||||
{
|
||||
protected:
|
||||
std::string file_name;
|
||||
CurrentMetrics::Increment metric_increment{CurrentMetrics::OpenFileForWrite};
|
||||
|
||||
public:
|
||||
|
@ -61,7 +61,12 @@ void WriteBufferFromFileDescriptor::nextImpl()
|
||||
if ((-1 == res || 0 == res) && errno != EINTR)
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::WriteBufferFromFileDescriptorWriteFailed);
|
||||
throwFromErrnoWithPath("Cannot write to file " + getFileName(), getFileName(),
|
||||
|
||||
/// Don't use getFileName() here because this method can be called from destructor
|
||||
String error_file_name = file_name;
|
||||
if (error_file_name.empty())
|
||||
error_file_name = "(fd = " + toString(fd) + ")";
|
||||
throwFromErrnoWithPath("Cannot write to file " + error_file_name, error_file_name,
|
||||
ErrorCodes::CANNOT_WRITE_TO_FILE_DESCRIPTOR);
|
||||
}
|
||||
|
||||
@ -73,20 +78,20 @@ void WriteBufferFromFileDescriptor::nextImpl()
|
||||
ProfileEvents::increment(ProfileEvents::WriteBufferFromFileDescriptorWriteBytes, bytes_written);
|
||||
}
|
||||
|
||||
|
||||
/// Name or some description of file.
|
||||
std::string WriteBufferFromFileDescriptor::getFileName() const
|
||||
{
|
||||
return "(fd = " + toString(fd) + ")";
|
||||
}
|
||||
|
||||
|
||||
/// NOTE: This class can be used as a very low-level building block, for example
|
||||
/// in trace collector. In such places allocations of memory can be dangerous,
|
||||
/// so don't allocate anything in this constructor.
|
||||
WriteBufferFromFileDescriptor::WriteBufferFromFileDescriptor(
|
||||
int fd_,
|
||||
size_t buf_size,
|
||||
char * existing_memory,
|
||||
size_t alignment)
|
||||
: WriteBufferFromFileBase(buf_size, existing_memory, alignment), fd(fd_) {}
|
||||
size_t alignment,
|
||||
std::string file_name_)
|
||||
: WriteBufferFromFileBase(buf_size, existing_memory, alignment)
|
||||
, fd(fd_)
|
||||
, file_name(std::move(file_name_))
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
WriteBufferFromFileDescriptor::~WriteBufferFromFileDescriptor()
|
||||
@ -115,7 +120,7 @@ void WriteBufferFromFileDescriptor::sync()
|
||||
}
|
||||
|
||||
|
||||
off_t WriteBufferFromFileDescriptor::seek(off_t offset, int whence)
|
||||
off_t WriteBufferFromFileDescriptor::seek(off_t offset, int whence) // NOLINT
|
||||
{
|
||||
off_t res = lseek(fd, offset, whence);
|
||||
if (-1 == res)
|
||||
@ -125,7 +130,7 @@ off_t WriteBufferFromFileDescriptor::seek(off_t offset, int whence)
|
||||
}
|
||||
|
||||
|
||||
void WriteBufferFromFileDescriptor::truncate(off_t length)
|
||||
void WriteBufferFromFileDescriptor::truncate(off_t length) // NOLINT
|
||||
{
|
||||
int res = ftruncate(fd, length);
|
||||
if (-1 == res)
|
||||
@ -133,7 +138,7 @@ void WriteBufferFromFileDescriptor::truncate(off_t length)
|
||||
}
|
||||
|
||||
|
||||
off_t WriteBufferFromFileDescriptor::size()
|
||||
off_t WriteBufferFromFileDescriptor::size() const
|
||||
{
|
||||
struct stat buf;
|
||||
int res = fstat(fd, &buf);
|
||||
@ -142,4 +147,13 @@ off_t WriteBufferFromFileDescriptor::size()
|
||||
return buf.st_size;
|
||||
}
|
||||
|
||||
std::string WriteBufferFromFileDescriptor::getFileName() const
|
||||
{
|
||||
if (file_name.empty())
|
||||
return "(fd = " + toString(fd) + ")";
|
||||
|
||||
return file_name;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -13,17 +13,17 @@ class WriteBufferFromFileDescriptor : public WriteBufferFromFileBase
|
||||
protected:
|
||||
int fd;
|
||||
|
||||
/// If file has name contains filename, otherwise contains string "(fd=...)"
|
||||
std::string file_name;
|
||||
|
||||
void nextImpl() override;
|
||||
|
||||
/// Name or some description of file.
|
||||
std::string getFileName() const override;
|
||||
|
||||
public:
|
||||
WriteBufferFromFileDescriptor(
|
||||
int fd_ = -1,
|
||||
size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE,
|
||||
char * existing_memory = nullptr,
|
||||
size_t alignment = 0);
|
||||
size_t alignment = 0,
|
||||
std::string file_name_ = "");
|
||||
|
||||
/** Could be used before initialization if needed 'fd' was not passed to constructor.
|
||||
* It's not possible to change 'fd' during work.
|
||||
@ -42,10 +42,15 @@ public:
|
||||
|
||||
void sync() override;
|
||||
|
||||
off_t seek(off_t offset, int whence);
|
||||
void truncate(off_t length);
|
||||
/// clang-tidy wants these methods to be const, but
|
||||
/// they are not const semantically
|
||||
off_t seek(off_t offset, int whence); // NOLINT
|
||||
void truncate(off_t length); // NOLINT
|
||||
|
||||
off_t size();
|
||||
/// Name or some description of file.
|
||||
std::string getFileName() const override;
|
||||
|
||||
off_t size() const;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -28,41 +28,49 @@ ZstdInflatingReadBuffer::~ZstdInflatingReadBuffer()
|
||||
|
||||
bool ZstdInflatingReadBuffer::nextImpl()
|
||||
{
|
||||
if (eof)
|
||||
return false;
|
||||
|
||||
if (input.pos >= input.size)
|
||||
do
|
||||
{
|
||||
in->nextIfAtEnd();
|
||||
input.src = reinterpret_cast<unsigned char *>(in->position());
|
||||
input.pos = 0;
|
||||
input.size = in->buffer().end() - in->position();
|
||||
}
|
||||
// If it is known that end of file was reached, return false
|
||||
if (eof)
|
||||
return false;
|
||||
|
||||
output.dst = reinterpret_cast<unsigned char *>(internal_buffer.begin());
|
||||
output.size = internal_buffer.size();
|
||||
output.pos = 0;
|
||||
/// If end was reached, get next part
|
||||
if (input.pos >= input.size)
|
||||
{
|
||||
in->nextIfAtEnd();
|
||||
input.src = reinterpret_cast<unsigned char *>(in->position());
|
||||
input.pos = 0;
|
||||
input.size = in->buffer().end() - in->position();
|
||||
}
|
||||
|
||||
size_t ret = ZSTD_decompressStream(dctx, &output, &input);
|
||||
if (ZSTD_isError(ret))
|
||||
throw Exception(
|
||||
ErrorCodes::ZSTD_DECODER_FAILED, "Zstd stream decoding failed: error code: {}; zstd version: {}", ret, ZSTD_VERSION_STRING);
|
||||
/// fill output
|
||||
output.dst = reinterpret_cast<unsigned char *>(internal_buffer.begin());
|
||||
output.size = internal_buffer.size();
|
||||
output.pos = 0;
|
||||
|
||||
in->position() = in->buffer().begin() + input.pos;
|
||||
working_buffer.resize(output.pos);
|
||||
/// Decompress data and check errors.
|
||||
size_t ret = ZSTD_decompressStream(dctx, &output, &input);
|
||||
if (ZSTD_isError(ret))
|
||||
throw Exception(
|
||||
ErrorCodes::ZSTD_DECODER_FAILED, "Zstd stream decoding failed: error code: {}; zstd version: {}", ret, ZSTD_VERSION_STRING);
|
||||
|
||||
if (in->eof())
|
||||
{
|
||||
eof = true;
|
||||
return !working_buffer.empty();
|
||||
}
|
||||
else if (output.pos == 0)
|
||||
{
|
||||
/// Check that something has changed after decompress (input or output position)
|
||||
assert(output.pos > 0 || in->position() < in->buffer().begin() + input.pos);
|
||||
|
||||
/// move position to the end of read data
|
||||
in->position() = in->buffer().begin() + input.pos;
|
||||
working_buffer.resize(output.pos);
|
||||
|
||||
/// If end of file is reached, fill eof variable and return true if there is some data in buffer, otherwise return false
|
||||
if (in->eof())
|
||||
{
|
||||
eof = true;
|
||||
return !working_buffer.empty();
|
||||
}
|
||||
/// It is possible, that input buffer is not at eof yet, but nothing was decompressed in current iteration.
|
||||
/// But there are cases, when such behaviour is not allowed - i.e. if input buffer is not eof, then
|
||||
/// it has to be guaranteed that working_buffer is not empty. So if it is empty, continue.
|
||||
return nextImpl();
|
||||
}
|
||||
} while (output.pos == 0);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -5,6 +5,7 @@ LIBRARY()
|
||||
|
||||
ADDINCL(
|
||||
contrib/libs/zstd/include
|
||||
contrib/libs/lz4
|
||||
contrib/restricted/fast_float/include
|
||||
)
|
||||
|
||||
@ -14,10 +15,10 @@ PEERDIR(
|
||||
contrib/libs/brotli/enc
|
||||
contrib/libs/poco/NetSSL_OpenSSL
|
||||
contrib/libs/zstd
|
||||
contrib/libs/lz4
|
||||
contrib/restricted/fast_float
|
||||
)
|
||||
|
||||
|
||||
SRCS(
|
||||
AIO.cpp
|
||||
AsynchronousReadBufferFromFile.cpp
|
||||
|
@ -4,6 +4,7 @@ LIBRARY()
|
||||
|
||||
ADDINCL(
|
||||
contrib/libs/zstd/include
|
||||
contrib/libs/lz4
|
||||
contrib/restricted/fast_float/include
|
||||
)
|
||||
|
||||
@ -13,10 +14,10 @@ PEERDIR(
|
||||
contrib/libs/brotli/enc
|
||||
contrib/libs/poco/NetSSL_OpenSSL
|
||||
contrib/libs/zstd
|
||||
contrib/libs/lz4
|
||||
contrib/restricted/fast_float
|
||||
)
|
||||
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F examples | grep -v -F fuzzers | grep -v -P 'S3|HDFS' | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
@ -57,6 +57,7 @@
|
||||
#include <Interpreters/Cluster.h>
|
||||
#include <Interpreters/InterserverIOHandler.h>
|
||||
#include <Interpreters/SystemLog.h>
|
||||
#include <Interpreters/SessionLog.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/DDLWorker.h>
|
||||
#include <Interpreters/DDLTask.h>
|
||||
@ -641,7 +642,6 @@ ConfigurationPtr Context::getUsersConfig()
|
||||
return shared->users_config;
|
||||
}
|
||||
|
||||
|
||||
void Context::setUser(const UUID & user_id_)
|
||||
{
|
||||
auto lock = getLock();
|
||||
@ -2071,6 +2071,16 @@ std::shared_ptr<OpenTelemetrySpanLog> Context::getOpenTelemetrySpanLog() const
|
||||
return shared->system_logs->opentelemetry_span_log;
|
||||
}
|
||||
|
||||
std::shared_ptr<SessionLog> Context::getSessionLog() const
|
||||
{
|
||||
auto lock = getLock();
|
||||
|
||||
if (!shared->system_logs)
|
||||
return {};
|
||||
|
||||
return shared->system_logs->session_log;
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<ZooKeeperLog> Context::getZooKeeperLog() const
|
||||
{
|
||||
|
@ -74,6 +74,7 @@ class MetricLog;
|
||||
class AsynchronousMetricLog;
|
||||
class OpenTelemetrySpanLog;
|
||||
class ZooKeeperLog;
|
||||
class SessionLog;
|
||||
struct MergeTreeSettings;
|
||||
class StorageS3Settings;
|
||||
class IDatabase;
|
||||
@ -600,6 +601,7 @@ public:
|
||||
bool hasSessionContext() const { return !session_context.expired(); }
|
||||
|
||||
ContextMutablePtr getGlobalContext() const;
|
||||
|
||||
bool hasGlobalContext() const { return !global_context.expired(); }
|
||||
bool isGlobalContext() const
|
||||
{
|
||||
@ -735,6 +737,7 @@ public:
|
||||
std::shared_ptr<AsynchronousMetricLog> getAsynchronousMetricLog() const;
|
||||
std::shared_ptr<OpenTelemetrySpanLog> getOpenTelemetrySpanLog() const;
|
||||
std::shared_ptr<ZooKeeperLog> getZooKeeperLog() const;
|
||||
std::shared_ptr<SessionLog> getSessionLog() const;
|
||||
|
||||
/// Returns an object used to log operations with parts if it possible.
|
||||
/// Provide table name to make required checks.
|
||||
|
@ -147,7 +147,6 @@ void DatabaseCatalog::initializeAndLoadTemporaryDatabase()
|
||||
|
||||
void DatabaseCatalog::loadDatabases()
|
||||
{
|
||||
loadMarkedAsDroppedTables();
|
||||
auto task_holder = getContext()->getSchedulePool().createTask("DatabaseCatalog", [this](){ this->dropTableDataTask(); });
|
||||
drop_task = std::make_unique<BackgroundSchedulePoolTaskHolder>(std::move(task_holder));
|
||||
(*drop_task)->activate();
|
||||
@ -621,12 +620,6 @@ Dependencies DatabaseCatalog::getDependencies(const StorageID & from) const
|
||||
return Dependencies(iter->second.begin(), iter->second.end());
|
||||
}
|
||||
|
||||
ViewDependencies DatabaseCatalog::getViewDependencies() const
|
||||
{
|
||||
std::lock_guard lock{databases_mutex};
|
||||
return ViewDependencies(view_dependencies.begin(), view_dependencies.end());
|
||||
}
|
||||
|
||||
void
|
||||
DatabaseCatalog::updateDependency(const StorageID & old_from, const StorageID & old_where, const StorageID & new_from,
|
||||
const StorageID & new_where)
|
||||
|
@ -130,6 +130,7 @@ public:
|
||||
|
||||
void initializeAndLoadTemporaryDatabase();
|
||||
void loadDatabases();
|
||||
void loadMarkedAsDroppedTables();
|
||||
|
||||
/// Get an object that protects the table from concurrently executing multiple DDL operations.
|
||||
DDLGuardPtr getDDLGuard(const String & database, const String & table);
|
||||
@ -174,7 +175,6 @@ public:
|
||||
void addDependency(const StorageID & from, const StorageID & where);
|
||||
void removeDependency(const StorageID & from, const StorageID & where);
|
||||
Dependencies getDependencies(const StorageID & from) const;
|
||||
ViewDependencies getViewDependencies() const;
|
||||
|
||||
/// For Materialized and Live View
|
||||
void updateDependency(const StorageID & old_from, const StorageID & old_where,const StorageID & new_from, const StorageID & new_where);
|
||||
@ -241,7 +241,6 @@ private:
|
||||
};
|
||||
using TablesMarkedAsDropped = std::list<TableMarkedAsDropped>;
|
||||
|
||||
void loadMarkedAsDroppedTables();
|
||||
void dropTableDataTask();
|
||||
void dropTableFinally(const TableMarkedAsDropped & table);
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <Interpreters/PartLog.h>
|
||||
#include <Interpreters/QueryThreadLog.h>
|
||||
#include <Interpreters/QueryViewsLog.h>
|
||||
#include <Interpreters/SessionLog.h>
|
||||
#include <Interpreters/TraceLog.h>
|
||||
#include <Interpreters/TextLog.h>
|
||||
#include <Interpreters/MetricLog.h>
|
||||
@ -420,7 +421,8 @@ BlockIO InterpreterSystemQuery::execute()
|
||||
[&] { if (auto asynchronous_metric_log = getContext()->getAsynchronousMetricLog()) asynchronous_metric_log->flush(true); },
|
||||
[&] { if (auto opentelemetry_span_log = getContext()->getOpenTelemetrySpanLog()) opentelemetry_span_log->flush(true); },
|
||||
[&] { if (auto query_views_log = getContext()->getQueryViewsLog()) query_views_log->flush(true); },
|
||||
[&] { if (auto zookeeper_log = getContext()->getZooKeeperLog()) zookeeper_log->flush(true); }
|
||||
[&] { if (auto zookeeper_log = getContext()->getZooKeeperLog()) zookeeper_log->flush(true); },
|
||||
[&] { if (auto session_log = getContext()->getSessionLog()) session_log->flush(true); }
|
||||
);
|
||||
break;
|
||||
}
|
||||
|
@ -225,22 +225,19 @@ void LogicalExpressionsOptimizer::addInExpression(const DisjunctiveEqualityChain
|
||||
|
||||
/// 1. Create a new IN expression based on information from the OR-chain.
|
||||
|
||||
/// Construct a list of literals `x1, ..., xN` from the string `expr = x1 OR ... OR expr = xN`
|
||||
ASTPtr value_list = std::make_shared<ASTExpressionList>();
|
||||
/// Construct a tuple of literals `x1, ..., xN` from the string `expr = x1 OR ... OR expr = xN`
|
||||
|
||||
Tuple tuple;
|
||||
tuple.reserve(equality_functions.size());
|
||||
|
||||
for (const auto * function : equality_functions)
|
||||
{
|
||||
const auto & operands = getFunctionOperands(function);
|
||||
value_list->children.push_back(operands[1]);
|
||||
tuple.push_back(operands[1]->as<ASTLiteral>()->value);
|
||||
}
|
||||
|
||||
/// Sort the literals so that they are specified in the same order in the IN expression.
|
||||
/// Otherwise, they would be specified in the order of the ASTLiteral addresses, which is nondeterministic.
|
||||
std::sort(value_list->children.begin(), value_list->children.end(), [](const DB::ASTPtr & lhs, const DB::ASTPtr & rhs)
|
||||
{
|
||||
const auto * val_lhs = lhs->as<ASTLiteral>();
|
||||
const auto * val_rhs = rhs->as<ASTLiteral>();
|
||||
return val_lhs->value < val_rhs->value;
|
||||
});
|
||||
std::sort(tuple.begin(), tuple.end());
|
||||
|
||||
/// Get the expression `expr` from the chain `expr = x1 OR ... OR expr = xN`
|
||||
ASTPtr equals_expr_lhs;
|
||||
@ -250,14 +247,11 @@ void LogicalExpressionsOptimizer::addInExpression(const DisjunctiveEqualityChain
|
||||
equals_expr_lhs = operands[0];
|
||||
}
|
||||
|
||||
auto tuple_function = std::make_shared<ASTFunction>();
|
||||
tuple_function->name = "tuple";
|
||||
tuple_function->arguments = value_list;
|
||||
tuple_function->children.push_back(tuple_function->arguments);
|
||||
auto tuple_literal = std::make_shared<ASTLiteral>(std::move(tuple));
|
||||
|
||||
ASTPtr expression_list = std::make_shared<ASTExpressionList>();
|
||||
expression_list->children.push_back(equals_expr_lhs);
|
||||
expression_list->children.push_back(tuple_function);
|
||||
expression_list->children.push_back(tuple_literal);
|
||||
|
||||
/// Construct the expression `expr IN (x1, ..., xN)`
|
||||
auto in_function = std::make_shared<ASTFunction>();
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <Common/ThreadPool.h>
|
||||
#include <Common/setThreadName.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/SessionLog.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
@ -241,7 +242,8 @@ void Session::shutdownNamedSessions()
|
||||
|
||||
|
||||
Session::Session(const ContextPtr & global_context_, ClientInfo::Interface interface_)
|
||||
: global_context(global_context_)
|
||||
: session_id(UUIDHelpers::generateV4()),
|
||||
global_context(global_context_)
|
||||
{
|
||||
prepared_client_info.emplace();
|
||||
prepared_client_info->interface = interface_;
|
||||
@ -254,6 +256,14 @@ Session::~Session()
|
||||
/// Early release a NamedSessionData.
|
||||
if (named_session)
|
||||
named_session->release();
|
||||
|
||||
if (notified_session_log_about_login)
|
||||
{
|
||||
// must have been set in makeQueryContext or makeSessionContext
|
||||
assert(user);
|
||||
if (auto session_log = getSessionLog())
|
||||
session_log->addLogOut(session_id, user->getName(), getClientInfo());
|
||||
}
|
||||
}
|
||||
|
||||
Authentication::Type Session::getAuthenticationType(const String & user_name) const
|
||||
@ -261,6 +271,21 @@ Authentication::Type Session::getAuthenticationType(const String & user_name) co
|
||||
return global_context->getAccessControlManager().read<User>(user_name)->authentication.getType();
|
||||
}
|
||||
|
||||
Authentication::Type Session::getAuthenticationTypeOrLogInFailure(const String & user_name) const
|
||||
{
|
||||
try
|
||||
{
|
||||
return getAuthenticationType(user_name);
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
if (auto session_log = getSessionLog())
|
||||
session_log->addLoginFailure(session_id, getClientInfo(), user_name, e);
|
||||
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
Authentication::Digest Session::getPasswordDoubleSHA1(const String & user_name) const
|
||||
{
|
||||
return global_context->getAccessControlManager().read<User>(user_name)->authentication.getPasswordDoubleSHA1();
|
||||
@ -280,7 +305,16 @@ void Session::authenticate(const Credentials & credentials_, const Poco::Net::So
|
||||
if ((address == Poco::Net::SocketAddress{}) && (prepared_client_info->interface == ClientInfo::Interface::LOCAL))
|
||||
address = Poco::Net::SocketAddress{"127.0.0.1", 0};
|
||||
|
||||
user_id = global_context->getAccessControlManager().login(credentials_, address.host());
|
||||
try
|
||||
{
|
||||
user_id = global_context->getAccessControlManager().login(credentials_, address.host());
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
if (auto session_log = getSessionLog())
|
||||
session_log->addLoginFailure(session_id, *prepared_client_info, credentials_.getUserName(), e);
|
||||
throw;
|
||||
}
|
||||
|
||||
prepared_client_info->current_user = credentials_.getUserName();
|
||||
prepared_client_info->current_address = address;
|
||||
@ -330,7 +364,7 @@ ContextMutablePtr Session::makeSessionContext()
|
||||
return session_context;
|
||||
}
|
||||
|
||||
ContextMutablePtr Session::makeSessionContext(const String & session_id_, std::chrono::steady_clock::duration timeout_, bool session_check_)
|
||||
ContextMutablePtr Session::makeSessionContext(const String & session_name_, std::chrono::steady_clock::duration timeout_, bool session_check_)
|
||||
{
|
||||
if (session_context)
|
||||
throw Exception("Session context already exists", ErrorCodes::LOGICAL_ERROR);
|
||||
@ -342,7 +376,7 @@ ContextMutablePtr Session::makeSessionContext(const String & session_id_, std::c
|
||||
std::shared_ptr<NamedSessionData> new_named_session;
|
||||
bool new_named_session_created = false;
|
||||
std::tie(new_named_session, new_named_session_created)
|
||||
= NamedSessionsStorage::instance().acquireSession(global_context, user_id.value_or(UUID{}), session_id_, timeout_, session_check_);
|
||||
= NamedSessionsStorage::instance().acquireSession(global_context, user_id.value_or(UUID{}), session_name_, timeout_, session_check_);
|
||||
|
||||
auto new_session_context = new_named_session->context;
|
||||
new_session_context->makeSessionContext();
|
||||
@ -359,8 +393,7 @@ ContextMutablePtr Session::makeSessionContext(const String & session_id_, std::c
|
||||
new_session_context->setUser(*user_id);
|
||||
|
||||
/// Session context is ready.
|
||||
session_context = new_session_context;
|
||||
session_id = session_id_;
|
||||
session_context = std::move(new_session_context);
|
||||
named_session = new_named_session;
|
||||
named_session_created = new_named_session_created;
|
||||
user = session_context->getUser();
|
||||
@ -378,6 +411,13 @@ ContextMutablePtr Session::makeQueryContext(ClientInfo && query_client_info) con
|
||||
return makeQueryContextImpl(nullptr, &query_client_info);
|
||||
}
|
||||
|
||||
std::shared_ptr<SessionLog> Session::getSessionLog() const
|
||||
{
|
||||
// take it from global context, since it outlives the Session and always available.
|
||||
// please note that server may have session_log disabled, hence this may return nullptr.
|
||||
return global_context->getSessionLog();
|
||||
}
|
||||
|
||||
ContextMutablePtr Session::makeQueryContextImpl(const ClientInfo * client_info_to_copy, ClientInfo * client_info_to_move) const
|
||||
{
|
||||
/// We can create a query context either from a session context or from a global context.
|
||||
@ -425,7 +465,21 @@ ContextMutablePtr Session::makeQueryContextImpl(const ClientInfo * client_info_t
|
||||
query_context_created = true;
|
||||
user = query_context->getUser();
|
||||
|
||||
if (!notified_session_log_about_login)
|
||||
{
|
||||
if (auto session_log = getSessionLog())
|
||||
{
|
||||
session_log->addLoginSuccess(
|
||||
session_id,
|
||||
named_session ? std::optional<std::string>(named_session->key.second) : std::nullopt,
|
||||
*query_context);
|
||||
|
||||
notified_session_log_about_login = true;
|
||||
}
|
||||
}
|
||||
|
||||
return query_context;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
@ -19,6 +19,7 @@ struct NamedSessionData;
|
||||
class NamedSessionsStorage;
|
||||
struct User;
|
||||
using UserPtr = std::shared_ptr<const User>;
|
||||
class SessionLog;
|
||||
|
||||
/** Represents user-session from the server perspective,
|
||||
* basically it is just a smaller subset of Context API, simplifies Context management.
|
||||
@ -41,6 +42,8 @@ public:
|
||||
/// Provides information about the authentication type of a specified user.
|
||||
Authentication::Type getAuthenticationType(const String & user_name) const;
|
||||
Authentication::Digest getPasswordDoubleSHA1(const String & user_name) const;
|
||||
/// Same as getAuthenticationType, but adds LoginFailure event in case of error.
|
||||
Authentication::Type getAuthenticationTypeOrLogInFailure(const String & user_name) const;
|
||||
|
||||
/// Sets the current user, checks the credentials and that the specified address is allowed to connect from.
|
||||
/// The function throws an exception if there is no such user or password is wrong.
|
||||
@ -54,7 +57,7 @@ public:
|
||||
/// Makes a session context, can be used one or zero times.
|
||||
/// The function also assigns an user to this context.
|
||||
ContextMutablePtr makeSessionContext();
|
||||
ContextMutablePtr makeSessionContext(const String & session_id_, std::chrono::steady_clock::duration timeout_, bool session_check_);
|
||||
ContextMutablePtr makeSessionContext(const String & session_name_, std::chrono::steady_clock::duration timeout_, bool session_check_);
|
||||
ContextMutablePtr sessionContext() { return session_context; }
|
||||
ContextPtr sessionContext() const { return session_context; }
|
||||
|
||||
@ -66,8 +69,11 @@ public:
|
||||
ContextMutablePtr makeQueryContext(ClientInfo && query_client_info) const;
|
||||
|
||||
private:
|
||||
std::shared_ptr<SessionLog> getSessionLog() const;
|
||||
ContextMutablePtr makeQueryContextImpl(const ClientInfo * client_info_to_copy, ClientInfo * client_info_to_move) const;
|
||||
|
||||
mutable bool notified_session_log_about_login = false;
|
||||
const UUID session_id;
|
||||
const ContextPtr global_context;
|
||||
|
||||
/// ClientInfo that will be copied to a session context when it's created.
|
||||
@ -79,9 +85,9 @@ private:
|
||||
ContextMutablePtr session_context;
|
||||
mutable bool query_context_created = false;
|
||||
|
||||
String session_id;
|
||||
std::shared_ptr<NamedSessionData> named_session;
|
||||
bool named_session_created = false;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
261
src/Interpreters/SessionLog.cpp
Normal file
261
src/Interpreters/SessionLog.cpp
Normal file
@ -0,0 +1,261 @@
|
||||
#include <Interpreters/SessionLog.h>
|
||||
|
||||
#include <Access/ContextAccess.h>
|
||||
#include <Access/User.h>
|
||||
#include <Access/EnabledRolesInfo.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypeDateTime64.h>
|
||||
#include <DataTypes/DataTypeDate.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
#include <DataTypes/DataTypeEnum.h>
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
#include <DataTypes/DataTypeLowCardinality.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/DataTypeUUID.h>
|
||||
#include <Common/IPv6ToBinary.h>
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Columns/ColumnTuple.h>
|
||||
#include <Access/SettingsProfilesInfo.h>
|
||||
|
||||
#include <cassert>
|
||||
|
||||
namespace
|
||||
{
|
||||
using namespace DB;
|
||||
|
||||
inline DateTime64 time_in_microseconds(std::chrono::time_point<std::chrono::system_clock> timepoint)
|
||||
{
|
||||
return std::chrono::duration_cast<std::chrono::microseconds>(timepoint.time_since_epoch()).count();
|
||||
}
|
||||
|
||||
inline time_t time_in_seconds(std::chrono::time_point<std::chrono::system_clock> timepoint)
|
||||
{
|
||||
return std::chrono::duration_cast<std::chrono::seconds>(timepoint.time_since_epoch()).count();
|
||||
}
|
||||
|
||||
auto eventTime()
|
||||
{
|
||||
const auto finish_time = std::chrono::system_clock::now();
|
||||
|
||||
return std::make_pair(time_in_seconds(finish_time), time_in_microseconds(finish_time));
|
||||
}
|
||||
|
||||
using AuthType = Authentication::Type;
|
||||
using Interface = ClientInfo::Interface;
|
||||
|
||||
void fillColumnArray(const Strings & data, IColumn & column)
|
||||
{
|
||||
auto & array = typeid_cast<ColumnArray &>(column);
|
||||
size_t size = 0;
|
||||
auto & data_col = array.getData();
|
||||
for (const auto & name : data)
|
||||
{
|
||||
data_col.insertData(name.data(), name.size());
|
||||
++size;
|
||||
}
|
||||
auto & offsets = array.getOffsets();
|
||||
offsets.push_back(offsets.back() + size);
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
SessionLogElement::SessionLogElement(const UUID & session_id_, Type type_)
|
||||
: session_id(session_id_),
|
||||
type(type_)
|
||||
{
|
||||
std::tie(event_time, event_time_microseconds) = eventTime();
|
||||
}
|
||||
|
||||
NamesAndTypesList SessionLogElement::getNamesAndTypes()
|
||||
{
|
||||
const auto event_type = std::make_shared<DataTypeEnum8>(
|
||||
DataTypeEnum8::Values
|
||||
{
|
||||
{"LoginFailure", static_cast<Int8>(SESSION_LOGIN_FAILURE)},
|
||||
{"LoginSuccess", static_cast<Int8>(SESSION_LOGIN_SUCCESS)},
|
||||
{"Logout", static_cast<Int8>(SESSION_LOGOUT)}
|
||||
});
|
||||
|
||||
#define AUTH_TYPE_NAME_AND_VALUE(v) std::make_pair(Authentication::TypeInfo::get(v).raw_name, static_cast<Int8>(v))
|
||||
const auto identified_with_column = std::make_shared<DataTypeEnum8>(
|
||||
DataTypeEnum8::Values
|
||||
{
|
||||
AUTH_TYPE_NAME_AND_VALUE(AuthType::NO_PASSWORD),
|
||||
AUTH_TYPE_NAME_AND_VALUE(AuthType::PLAINTEXT_PASSWORD),
|
||||
AUTH_TYPE_NAME_AND_VALUE(AuthType::SHA256_PASSWORD),
|
||||
AUTH_TYPE_NAME_AND_VALUE(AuthType::DOUBLE_SHA1_PASSWORD),
|
||||
AUTH_TYPE_NAME_AND_VALUE(AuthType::LDAP),
|
||||
AUTH_TYPE_NAME_AND_VALUE(AuthType::KERBEROS)
|
||||
});
|
||||
#undef AUTH_TYPE_NAME_AND_VALUE
|
||||
|
||||
const auto interface_type_column = std::make_shared<DataTypeEnum8>(
|
||||
DataTypeEnum8::Values
|
||||
{
|
||||
{"TCP", static_cast<Int8>(Interface::TCP)},
|
||||
{"HTTP", static_cast<Int8>(Interface::HTTP)},
|
||||
{"gRPC", static_cast<Int8>(Interface::GRPC)},
|
||||
{"MySQL", static_cast<Int8>(Interface::MYSQL)},
|
||||
{"PostgreSQL", static_cast<Int8>(Interface::POSTGRESQL)}
|
||||
});
|
||||
|
||||
const auto lc_string_datatype = std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>());
|
||||
|
||||
const auto changed_settings_type_column = std::make_shared<DataTypeArray>(
|
||||
std::make_shared<DataTypeTuple>(
|
||||
DataTypes({
|
||||
// setting name
|
||||
lc_string_datatype,
|
||||
// value
|
||||
std::make_shared<DataTypeString>()
|
||||
})));
|
||||
|
||||
return
|
||||
{
|
||||
{"type", std::move(event_type)},
|
||||
{"session_id", std::make_shared<DataTypeUUID>()},
|
||||
{"session_name", std::make_shared<DataTypeString>()},
|
||||
{"event_date", std::make_shared<DataTypeDate>()},
|
||||
{"event_time", std::make_shared<DataTypeDateTime>()},
|
||||
{"event_time_microseconds", std::make_shared<DataTypeDateTime64>(6)},
|
||||
|
||||
{"user", std::make_shared<DataTypeString>()},
|
||||
{"auth_type", std::move(identified_with_column)},
|
||||
|
||||
{"profiles", std::make_shared<DataTypeArray>(lc_string_datatype)},
|
||||
{"roles", std::make_shared<DataTypeArray>(lc_string_datatype)},
|
||||
{"changed_settings", std::move(changed_settings_type_column)},
|
||||
|
||||
{"client_address", DataTypeFactory::instance().get("IPv6")},
|
||||
{"client_port", std::make_shared<DataTypeUInt16>()},
|
||||
{"interface", std::move(interface_type_column)},
|
||||
|
||||
{"client_hostname", std::make_shared<DataTypeString>()},
|
||||
{"client_name", std::make_shared<DataTypeString>()},
|
||||
{"client_revision", std::make_shared<DataTypeUInt32>()},
|
||||
{"client_version_major", std::make_shared<DataTypeUInt32>()},
|
||||
{"client_version_minor", std::make_shared<DataTypeUInt32>()},
|
||||
{"client_version_patch", std::make_shared<DataTypeUInt32>()},
|
||||
|
||||
{"failure_reason", std::make_shared<DataTypeString>()},
|
||||
};
|
||||
}
|
||||
|
||||
void SessionLogElement::appendToBlock(MutableColumns & columns) const
|
||||
{
|
||||
assert(type >= SESSION_LOGIN_FAILURE && type <= SESSION_LOGOUT);
|
||||
assert(user_identified_with >= Authentication::Type::NO_PASSWORD && user_identified_with <= Authentication::Type::MAX_TYPE);
|
||||
|
||||
size_t i = 0;
|
||||
|
||||
columns[i++]->insert(type);
|
||||
columns[i++]->insert(session_id);
|
||||
columns[i++]->insert(session_name);
|
||||
columns[i++]->insert(static_cast<DayNum>(DateLUT::instance().toDayNum(event_time).toUnderType()));
|
||||
columns[i++]->insert(event_time);
|
||||
columns[i++]->insert(event_time_microseconds);
|
||||
|
||||
columns[i++]->insert(user);
|
||||
columns[i++]->insert(user_identified_with);
|
||||
|
||||
fillColumnArray(profiles, *columns[i++]);
|
||||
fillColumnArray(roles, *columns[i++]);
|
||||
|
||||
{
|
||||
auto & changed_settings_array_col = assert_cast<ColumnArray &>(*columns[i++]);
|
||||
auto & changed_settings_tuple_col = assert_cast<ColumnTuple &>(changed_settings_array_col.getData());
|
||||
auto & names_col = *changed_settings_tuple_col.getColumnPtr(0)->assumeMutable();
|
||||
auto & values_col = assert_cast<ColumnString &>(*changed_settings_tuple_col.getColumnPtr(1)->assumeMutable());
|
||||
|
||||
size_t items_added = 0;
|
||||
for (const auto & kv : changed_settings)
|
||||
{
|
||||
names_col.insert(kv.first);
|
||||
values_col.insert(kv.second);
|
||||
++items_added;
|
||||
}
|
||||
|
||||
auto & offsets = changed_settings_array_col.getOffsets();
|
||||
offsets.push_back(changed_settings_tuple_col.size());
|
||||
}
|
||||
|
||||
columns[i++]->insertData(IPv6ToBinary(client_info.current_address.host()).data(), 16);
|
||||
columns[i++]->insert(client_info.current_address.port());
|
||||
|
||||
columns[i++]->insert(client_info.interface);
|
||||
|
||||
columns[i++]->insertData(client_info.client_hostname.data(), client_info.client_hostname.length());
|
||||
columns[i++]->insertData(client_info.client_name.data(), client_info.client_name.length());
|
||||
columns[i++]->insert(client_info.client_tcp_protocol_version);
|
||||
columns[i++]->insert(client_info.client_version_major);
|
||||
columns[i++]->insert(client_info.client_version_minor);
|
||||
columns[i++]->insert(client_info.client_version_patch);
|
||||
|
||||
columns[i++]->insertData(auth_failure_reason.data(), auth_failure_reason.length());
|
||||
}
|
||||
|
||||
void SessionLog::addLoginSuccess(const UUID & session_id, std::optional<String> session_name, const Context & login_context)
|
||||
{
|
||||
const auto access = login_context.getAccess();
|
||||
const auto & settings = login_context.getSettingsRef();
|
||||
const auto & client_info = login_context.getClientInfo();
|
||||
|
||||
DB::SessionLogElement log_entry(session_id, SESSION_LOGIN_SUCCESS);
|
||||
log_entry.client_info = client_info;
|
||||
|
||||
{
|
||||
const auto user = access->getUser();
|
||||
log_entry.user = user->getName();
|
||||
log_entry.user_identified_with = user->authentication.getType();
|
||||
log_entry.external_auth_server = user->authentication.getLDAPServerName();
|
||||
}
|
||||
|
||||
if (session_name)
|
||||
log_entry.session_name = *session_name;
|
||||
|
||||
if (const auto roles_info = access->getRolesInfo())
|
||||
log_entry.roles = roles_info->getCurrentRolesNames();
|
||||
|
||||
if (const auto profile_info = access->getDefaultProfileInfo())
|
||||
log_entry.profiles = profile_info->getProfileNames();
|
||||
|
||||
for (const auto & s : settings.allChanged())
|
||||
log_entry.changed_settings.emplace_back(s.getName(), s.getValueString());
|
||||
|
||||
add(log_entry);
|
||||
}
|
||||
|
||||
void SessionLog::addLoginFailure(
|
||||
const UUID & session_id,
|
||||
const ClientInfo & info,
|
||||
const String & user,
|
||||
const Exception & reason)
|
||||
{
|
||||
SessionLogElement log_entry(session_id, SESSION_LOGIN_FAILURE);
|
||||
|
||||
log_entry.user = user;
|
||||
log_entry.auth_failure_reason = reason.message();
|
||||
log_entry.client_info = info;
|
||||
log_entry.user_identified_with = Authentication::Type::NO_PASSWORD;
|
||||
|
||||
add(log_entry);
|
||||
}
|
||||
|
||||
void SessionLog::addLogOut(const UUID & session_id, const String & user, const ClientInfo & client_info)
|
||||
{
|
||||
auto log_entry = SessionLogElement(session_id, SESSION_LOGOUT);
|
||||
log_entry.user = user;
|
||||
log_entry.client_info = client_info;
|
||||
|
||||
add(log_entry);
|
||||
}
|
||||
|
||||
}
|
74
src/Interpreters/SessionLog.h
Normal file
74
src/Interpreters/SessionLog.h
Normal file
@ -0,0 +1,74 @@
|
||||
#pragma once
|
||||
|
||||
#include <Interpreters/SystemLog.h>
|
||||
#include <Interpreters/ClientInfo.h>
|
||||
#include <Access/Authentication.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
enum SessionLogElementType : int8_t
|
||||
{
|
||||
SESSION_LOGIN_FAILURE = 0,
|
||||
SESSION_LOGIN_SUCCESS = 1,
|
||||
SESSION_LOGOUT = 2,
|
||||
};
|
||||
|
||||
class ContextAccess;
|
||||
|
||||
/** A struct which will be inserted as row into session_log table.
|
||||
*
|
||||
* Allows to log information about user sessions:
|
||||
* - auth attempts, auth result, auth method, etc.
|
||||
* - log out events
|
||||
*/
|
||||
struct SessionLogElement
|
||||
{
|
||||
using Type = SessionLogElementType;
|
||||
|
||||
SessionLogElement() = default;
|
||||
SessionLogElement(const UUID & session_id_, Type type_);
|
||||
SessionLogElement(const SessionLogElement &) = default;
|
||||
SessionLogElement & operator=(const SessionLogElement &) = default;
|
||||
SessionLogElement(SessionLogElement &&) = default;
|
||||
SessionLogElement & operator=(SessionLogElement &&) = default;
|
||||
|
||||
UUID session_id;
|
||||
|
||||
Type type = SESSION_LOGIN_FAILURE;
|
||||
|
||||
String session_name;
|
||||
time_t event_time{};
|
||||
Decimal64 event_time_microseconds{};
|
||||
|
||||
String user;
|
||||
Authentication::Type user_identified_with = Authentication::Type::NO_PASSWORD;
|
||||
String external_auth_server;
|
||||
Strings roles;
|
||||
Strings profiles;
|
||||
std::vector<std::pair<String, String>> changed_settings;
|
||||
|
||||
ClientInfo client_info;
|
||||
String auth_failure_reason;
|
||||
|
||||
static std::string name() { return "SessionLog"; }
|
||||
|
||||
static NamesAndTypesList getNamesAndTypes();
|
||||
static NamesAndAliases getNamesAndAliases() { return {}; }
|
||||
|
||||
void appendToBlock(MutableColumns & columns) const;
|
||||
};
|
||||
|
||||
|
||||
/// Instead of typedef - to allow forward declaration.
|
||||
class SessionLog : public SystemLog<SessionLogElement>
|
||||
{
|
||||
using SystemLog<SessionLogElement>::SystemLog;
|
||||
|
||||
public:
|
||||
void addLoginSuccess(const UUID & session_id, std::optional<String> session_name, const Context & login_context);
|
||||
void addLoginFailure(const UUID & session_id, const ClientInfo & info, const String & user, const Exception & reason);
|
||||
void addLogOut(const UUID & session_id, const String & user, const ClientInfo & client_info);
|
||||
};
|
||||
|
||||
}
|
@ -402,8 +402,8 @@ void Set::checkTypesEqual(size_t set_type_idx, const DataTypePtr & other_type) c
|
||||
+ data_types[set_type_idx]->getName() + " on the right", ErrorCodes::TYPE_MISMATCH);
|
||||
}
|
||||
|
||||
MergeTreeSetIndex::MergeTreeSetIndex(const Columns & set_elements, std::vector<KeyTuplePositionMapping> && index_mapping_)
|
||||
: indexes_mapping(std::move(index_mapping_))
|
||||
MergeTreeSetIndex::MergeTreeSetIndex(const Columns & set_elements, std::vector<KeyTuplePositionMapping> && indexes_mapping_)
|
||||
: has_all_keys(set_elements.size() == indexes_mapping_.size()), indexes_mapping(std::move(indexes_mapping_))
|
||||
{
|
||||
std::sort(indexes_mapping.begin(), indexes_mapping.end(),
|
||||
[](const KeyTuplePositionMapping & l, const KeyTuplePositionMapping & r)
|
||||
@ -548,11 +548,11 @@ BoolMask MergeTreeSetIndex::checkInRange(const std::vector<Range> & key_ranges,
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (one_element_range)
|
||||
if (one_element_range && has_all_keys)
|
||||
{
|
||||
/// Here we know that there is one element in range.
|
||||
/// The main difference with the normal case is that we can definitely say that
|
||||
/// condition in this range always TRUE (can_be_false = 0) xor always FALSE (can_be_true = 0).
|
||||
/// condition in this range is always TRUE (can_be_false = 0) or always FALSE (can_be_true = 0).
|
||||
|
||||
/// Check if it's an empty range
|
||||
if (!left_included || !right_included)
|
||||
|
@ -208,7 +208,7 @@ public:
|
||||
std::vector<FunctionBasePtr> functions;
|
||||
};
|
||||
|
||||
MergeTreeSetIndex(const Columns & set_elements, std::vector<KeyTuplePositionMapping> && index_mapping_);
|
||||
MergeTreeSetIndex(const Columns & set_elements, std::vector<KeyTuplePositionMapping> && indexes_mapping_);
|
||||
|
||||
size_t size() const { return ordered_set.at(0)->size(); }
|
||||
|
||||
@ -217,6 +217,8 @@ public:
|
||||
BoolMask checkInRange(const std::vector<Range> & key_ranges, const DataTypes & data_types) const;
|
||||
|
||||
private:
|
||||
// If all arguments in tuple are key columns, we can optimize NOT IN when there is only one element.
|
||||
bool has_all_keys;
|
||||
Columns ordered_set;
|
||||
std::vector<KeyTuplePositionMapping> indexes_mapping;
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
#include <Interpreters/QueryLog.h>
|
||||
#include <Interpreters/QueryThreadLog.h>
|
||||
#include <Interpreters/QueryViewsLog.h>
|
||||
#include <Interpreters/SystemLog.h>
|
||||
#include <Interpreters/SessionLog.h>
|
||||
#include <Interpreters/TextLog.h>
|
||||
#include <Interpreters/TraceLog.h>
|
||||
#include <Interpreters/ZooKeeperLog.h>
|
||||
@ -39,7 +39,13 @@ std::shared_ptr<TSystemLog> createSystemLog(
|
||||
const String & config_prefix)
|
||||
{
|
||||
if (!config.has(config_prefix))
|
||||
{
|
||||
LOG_DEBUG(&Poco::Logger::get("SystemLog"),
|
||||
"Not creating {}.{} since corresponding section '{}' is missing from config",
|
||||
default_database_name, default_table_name, config_prefix);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
String database = config.getString(config_prefix + ".database", default_database_name);
|
||||
String table = config.getString(config_prefix + ".table", default_table_name);
|
||||
@ -107,6 +113,7 @@ SystemLogs::SystemLogs(ContextPtr global_context, const Poco::Util::AbstractConf
|
||||
"opentelemetry_span_log");
|
||||
query_views_log = createSystemLog<QueryViewsLog>(global_context, "system", "query_views_log", config, "query_views_log");
|
||||
zookeeper_log = createSystemLog<ZooKeeperLog>(global_context, "system", "zookeeper_log", config, "zookeeper_log");
|
||||
session_log = createSystemLog<SessionLog>(global_context, "system", "session_log", config, "session_log");
|
||||
|
||||
if (query_log)
|
||||
logs.emplace_back(query_log.get());
|
||||
@ -130,6 +137,8 @@ SystemLogs::SystemLogs(ContextPtr global_context, const Poco::Util::AbstractConf
|
||||
logs.emplace_back(query_views_log.get());
|
||||
if (zookeeper_log)
|
||||
logs.emplace_back(zookeeper_log.get());
|
||||
if (session_log)
|
||||
logs.emplace_back(session_log.get());
|
||||
|
||||
try
|
||||
{
|
||||
|
@ -75,6 +75,7 @@ class AsynchronousMetricLog;
|
||||
class OpenTelemetrySpanLog;
|
||||
class QueryViewsLog;
|
||||
class ZooKeeperLog;
|
||||
class SessionLog;
|
||||
|
||||
|
||||
class ISystemLog
|
||||
@ -115,6 +116,8 @@ struct SystemLogs
|
||||
std::shared_ptr<QueryViewsLog> query_views_log;
|
||||
/// Used to log all actions of ZooKeeper client
|
||||
std::shared_ptr<ZooKeeperLog> zookeeper_log;
|
||||
/// Login, LogOut and Login failure events
|
||||
std::shared_ptr<SessionLog> session_log;
|
||||
|
||||
std::vector<ISystemLog *> logs;
|
||||
};
|
||||
|
@ -26,7 +26,8 @@ NamesAndTypesList TextLogElement::getNamesAndTypes()
|
||||
{"Notice", static_cast<Int8>(Message::PRIO_NOTICE)},
|
||||
{"Information", static_cast<Int8>(Message::PRIO_INFORMATION)},
|
||||
{"Debug", static_cast<Int8>(Message::PRIO_DEBUG)},
|
||||
{"Trace", static_cast<Int8>(Message::PRIO_TRACE)}
|
||||
{"Trace", static_cast<Int8>(Message::PRIO_TRACE)},
|
||||
{"Test", static_cast<Int8>(Message::PRIO_TEST)},
|
||||
});
|
||||
|
||||
return
|
||||
|
@ -148,6 +148,7 @@ SRCS(
|
||||
RowRefs.cpp
|
||||
SelectIntersectExceptQueryVisitor.cpp
|
||||
Session.cpp
|
||||
SessionLog.cpp
|
||||
Set.cpp
|
||||
SetVariants.cpp
|
||||
SortedBlocksWriter.cpp
|
||||
|
@ -246,7 +246,7 @@ void MySQLHandler::authenticate(const String & user_name, const String & auth_pl
|
||||
try
|
||||
{
|
||||
// For compatibility with JavaScript MySQL client, Native41 authentication plugin is used when possible (if password is specified using double SHA1). Otherwise SHA256 plugin is used.
|
||||
if (session->getAuthenticationType(user_name) == DB::Authentication::SHA256_PASSWORD)
|
||||
if (session->getAuthenticationTypeOrLogInFailure(user_name) == DB::Authentication::SHA256_PASSWORD)
|
||||
{
|
||||
authPluginSSL();
|
||||
}
|
||||
|
@ -174,7 +174,7 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read(
|
||||
if (query_info.projection->desc->is_minmax_count_projection)
|
||||
{
|
||||
Pipe pipe(std::make_shared<SourceFromSingleChunk>(
|
||||
query_info.minmax_count_projection_block,
|
||||
query_info.minmax_count_projection_block.cloneEmpty(),
|
||||
Chunk(query_info.minmax_count_projection_block.getColumns(), query_info.minmax_count_projection_block.rows())));
|
||||
auto read_from_pipe = std::make_unique<ReadFromPreparedSource>(std::move(pipe));
|
||||
projection_plan->addStep(std::move(read_from_pipe));
|
||||
|
@ -89,6 +89,7 @@ struct Settings;
|
||||
M(Bool, replicated_can_become_leader, true, "If true, Replicated tables replicas on this node will try to acquire leadership.", 0) \
|
||||
M(Seconds, zookeeper_session_expiration_check_period, 60, "ZooKeeper session expiration check period, in seconds.", 0) \
|
||||
M(Bool, detach_old_local_parts_when_cloning_replica, true, "Do not remove old local parts when repairing lost replica.", 0) \
|
||||
M(Bool, detach_not_byte_identical_parts, false, "Do not remove non byte-idential parts for ReplicatedMergeTree, instead detach them (maybe useful for further analysis).", 0) \
|
||||
M(UInt64, max_replicated_fetches_network_bandwidth, 0, "The maximum speed of data exchange over the network in bytes per second for replicated fetches. Zero means unlimited.", 0) \
|
||||
M(UInt64, max_replicated_sends_network_bandwidth, 0, "The maximum speed of data exchange over the network in bytes per second for replicated sends. Zero means unlimited.", 0) \
|
||||
\
|
||||
|
@ -17,6 +17,7 @@ class ASTStorage;
|
||||
#define LIST_OF_MYSQL_SETTINGS(M) \
|
||||
M(UInt64, connection_pool_size, 16, "Size of connection pool (if all connections are in use, the query will wait until some connection will be freed).", 0) \
|
||||
M(UInt64, connection_max_tries, 3, "Number of retries for pool with failover", 0) \
|
||||
M(UInt64, connection_wait_timeout, 5, "Timeout (in seconds) for waiting for free connection (in case of there is already connection_pool_size active connections), 0 - do not wait.", 0) \
|
||||
M(Bool, connection_auto_close, true, "Auto-close connection after query execution, i.e. disable connection reuse.", 0) \
|
||||
|
||||
DECLARE_SETTINGS_TRAITS(MySQLSettingsTraits, LIST_OF_MYSQL_SETTINGS)
|
||||
|
@ -17,6 +17,8 @@ namespace DB
|
||||
M(UInt64, materialized_postgresql_max_block_size, 65536, "Number of row collected before flushing data into table.", 0) \
|
||||
M(String, materialized_postgresql_tables_list, "", "List of tables for MaterializedPostgreSQL database engine", 0) \
|
||||
M(Bool, materialized_postgresql_allow_automatic_update, false, "Allow to reload table in the background, when schema changes are detected", 0) \
|
||||
M(String, materialized_postgresql_replication_slot, "", "A user-created replication slot", 0) \
|
||||
M(String, materialized_postgresql_snapshot, "", "User provided snapshot in case he manages replication slots himself", 0) \
|
||||
|
||||
DECLARE_SETTINGS_TRAITS(MaterializedPostgreSQLSettingsTraits, LIST_OF_MATERIALIZED_POSTGRESQL_SETTINGS)
|
||||
|
||||
|
@ -34,25 +34,27 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler(
|
||||
const postgres::ConnectionInfo & connection_info_,
|
||||
ContextPtr context_,
|
||||
bool is_attach_,
|
||||
const size_t max_block_size_,
|
||||
bool allow_automatic_update_,
|
||||
bool is_materialized_postgresql_database_,
|
||||
const String tables_list_)
|
||||
: log(&Poco::Logger::get("PostgreSQLReplicationHandler(" +
|
||||
(is_materialized_postgresql_database_ ? remote_database_name_ : remote_database_name_ + '.' + tables_list_)
|
||||
+ ")"))
|
||||
const MaterializedPostgreSQLSettings & replication_settings,
|
||||
bool is_materialized_postgresql_database_)
|
||||
: log(&Poco::Logger::get("PostgreSQLReplicationHandler"))
|
||||
, context(context_)
|
||||
, is_attach(is_attach_)
|
||||
, remote_database_name(remote_database_name_)
|
||||
, current_database_name(current_database_name_)
|
||||
, connection_info(connection_info_)
|
||||
, max_block_size(max_block_size_)
|
||||
, allow_automatic_update(allow_automatic_update_)
|
||||
, max_block_size(replication_settings.materialized_postgresql_max_block_size)
|
||||
, allow_automatic_update(replication_settings.materialized_postgresql_allow_automatic_update)
|
||||
, is_materialized_postgresql_database(is_materialized_postgresql_database_)
|
||||
, tables_list(tables_list_)
|
||||
, tables_list(replication_settings.materialized_postgresql_tables_list)
|
||||
, user_provided_snapshot(replication_settings.materialized_postgresql_snapshot)
|
||||
, milliseconds_to_wait(RESCHEDULE_MS)
|
||||
{
|
||||
replication_slot = fmt::format("{}_ch_replication_slot", replication_identifier);
|
||||
replication_slot = replication_settings.materialized_postgresql_replication_slot;
|
||||
if (replication_slot.empty())
|
||||
{
|
||||
user_managed_slot = false;
|
||||
replication_slot = fmt::format("{}_ch_replication_slot", replication_identifier);
|
||||
}
|
||||
publication_name = fmt::format("{}_ch_publication", replication_identifier);
|
||||
|
||||
startup_task = context->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ waitConnectionAndStart(); });
|
||||
@ -119,7 +121,20 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error)
|
||||
|
||||
auto initial_sync = [&]()
|
||||
{
|
||||
createReplicationSlot(tx, start_lsn, snapshot_name);
|
||||
LOG_TRACE(log, "Starting tables sync load");
|
||||
|
||||
if (user_managed_slot)
|
||||
{
|
||||
if (user_provided_snapshot.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Using a user-defined replication slot must be provided with a snapshot from EXPORT SNAPSHOT when the slot is created."
|
||||
"Pass it to `materialized_postgresql_snapshot` setting");
|
||||
snapshot_name = user_provided_snapshot;
|
||||
}
|
||||
else
|
||||
{
|
||||
createReplicationSlot(tx, start_lsn, snapshot_name);
|
||||
}
|
||||
|
||||
/// Loading tables from snapshot requires a certain transaction type, so we need to open a new transactin.
|
||||
/// But we cannot have more than one open transaciton on the same connection. Therefore we have
|
||||
@ -149,12 +164,17 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error)
|
||||
/// Recreation of a replication slot imposes reloading of all tables.
|
||||
if (!isReplicationSlotExist(tx, start_lsn, /* temporary */false))
|
||||
{
|
||||
if (user_managed_slot)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Having replication slot `{}` from settings, but it does not exist", replication_slot);
|
||||
|
||||
initial_sync();
|
||||
}
|
||||
/// Always drop replication slot if it is CREATE query and not ATTACH.
|
||||
else if (!is_attach || new_publication)
|
||||
{
|
||||
dropReplicationSlot(tx);
|
||||
if (!user_managed_slot)
|
||||
dropReplicationSlot(tx);
|
||||
|
||||
initial_sync();
|
||||
}
|
||||
/// Synchronization and initial load already took place - do not create any new tables, just fetch StoragePtr's
|
||||
@ -390,6 +410,8 @@ bool PostgreSQLReplicationHandler::isReplicationSlotExist(pqxx::nontransaction &
|
||||
void PostgreSQLReplicationHandler::createReplicationSlot(
|
||||
pqxx::nontransaction & tx, String & start_lsn, String & snapshot_name, bool temporary)
|
||||
{
|
||||
assert(temporary || !user_managed_slot);
|
||||
|
||||
String query_str, slot_name;
|
||||
if (temporary)
|
||||
slot_name = replication_slot + "_tmp";
|
||||
@ -415,6 +437,8 @@ void PostgreSQLReplicationHandler::createReplicationSlot(
|
||||
|
||||
void PostgreSQLReplicationHandler::dropReplicationSlot(pqxx::nontransaction & tx, bool temporary)
|
||||
{
|
||||
assert(temporary || !user_managed_slot);
|
||||
|
||||
std::string slot_name;
|
||||
if (temporary)
|
||||
slot_name = replication_slot + "_tmp";
|
||||
@ -470,17 +494,24 @@ void PostgreSQLReplicationHandler::shutdownFinal()
|
||||
connection.execWithRetry([&](pqxx::nontransaction & tx){ dropPublication(tx); });
|
||||
String last_committed_lsn;
|
||||
|
||||
connection.execWithRetry([&](pqxx::nontransaction & tx)
|
||||
{
|
||||
if (isReplicationSlotExist(tx, last_committed_lsn, /* temporary */false))
|
||||
dropReplicationSlot(tx, /* temporary */false);
|
||||
});
|
||||
|
||||
connection.execWithRetry([&](pqxx::nontransaction & tx)
|
||||
{
|
||||
if (isReplicationSlotExist(tx, last_committed_lsn, /* temporary */true))
|
||||
dropReplicationSlot(tx, /* temporary */true);
|
||||
});
|
||||
|
||||
<<<<<<< HEAD
|
||||
connection.execWithRetry([&](pqxx::nontransaction & tx)
|
||||
=======
|
||||
if (user_managed_slot)
|
||||
return;
|
||||
|
||||
connection->execWithRetry([&](pqxx::nontransaction & tx)
|
||||
>>>>>>> 8588e4d9bb809f7ca29426934855567646d2bd01
|
||||
{
|
||||
if (isReplicationSlotExist(tx, last_committed_lsn, /* temporary */false))
|
||||
dropReplicationSlot(tx, /* temporary */false);
|
||||
});
|
||||
}
|
||||
catch (Exception & e)
|
||||
{
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "MaterializedPostgreSQLConsumer.h"
|
||||
#include "MaterializedPostgreSQLSettings.h"
|
||||
#include <Databases/PostgreSQL/fetchPostgreSQLTableStructure.h>
|
||||
#include <Core/PostgreSQL/Utils.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
@ -22,10 +23,8 @@ public:
|
||||
const postgres::ConnectionInfo & connection_info_,
|
||||
ContextPtr context_,
|
||||
bool is_attach_,
|
||||
const size_t max_block_size_,
|
||||
bool allow_automatic_update_,
|
||||
bool is_materialized_postgresql_database_,
|
||||
const String tables_list = "");
|
||||
const MaterializedPostgreSQLSettings & replication_settings,
|
||||
bool is_materialized_postgresql_database_);
|
||||
|
||||
/// Activate task to be run from a separate thread: wait until connection is available and call startReplication().
|
||||
void startup();
|
||||
@ -117,6 +116,9 @@ private:
|
||||
/// A coma-separated list of tables, which are going to be replicated for database engine. By default, a whole database is replicated.
|
||||
String tables_list;
|
||||
|
||||
bool user_managed_slot = true;
|
||||
String user_provided_snapshot;
|
||||
|
||||
String replication_slot, publication_name;
|
||||
|
||||
/// Replication consumer. Manages decoding of replication stream and syncing into tables.
|
||||
|
@ -65,6 +65,8 @@ StorageMaterializedPostgreSQL::StorageMaterializedPostgreSQL(
|
||||
setInMemoryMetadata(storage_metadata);
|
||||
|
||||
String replication_identifier = remote_database_name + "_" + remote_table_name_;
|
||||
replication_settings->materialized_postgresql_tables_list = remote_table_name_;
|
||||
|
||||
replication_handler = std::make_unique<PostgreSQLReplicationHandler>(
|
||||
replication_identifier,
|
||||
remote_database_name,
|
||||
@ -72,9 +74,9 @@ StorageMaterializedPostgreSQL::StorageMaterializedPostgreSQL(
|
||||
connection_info,
|
||||
getContext(),
|
||||
is_attach,
|
||||
replication_settings->materialized_postgresql_max_block_size.value,
|
||||
/* allow_automatic_update */ false, /* is_materialized_postgresql_database */false,
|
||||
remote_table_name_);
|
||||
*replication_settings,
|
||||
/* is_materialized_postgresql_database */false,
|
||||
remote_table_name);
|
||||
|
||||
if (!is_attach)
|
||||
{
|
||||
|
@ -1332,7 +1332,12 @@ void registerStorageDistributed(StorageFactory & factory)
|
||||
String remote_table = engine_args[2]->as<ASTLiteral &>().value.safeGet<String>();
|
||||
|
||||
const auto & sharding_key = engine_args.size() >= 4 ? engine_args[3] : nullptr;
|
||||
const auto & storage_policy = engine_args.size() >= 5 ? engine_args[4]->as<ASTLiteral &>().value.safeGet<String>() : "default";
|
||||
String storage_policy = "default";
|
||||
if (engine_args.size() >= 5)
|
||||
{
|
||||
engine_args[4] = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[4], local_context);
|
||||
storage_policy = engine_args[4]->as<ASTLiteral &>().value.safeGet<String>();
|
||||
}
|
||||
|
||||
/// Check that sharding_key exists in the table and has numeric type.
|
||||
if (sharding_key)
|
||||
|
@ -61,7 +61,7 @@ protected:
|
||||
|
||||
const Block & src = (*data)[current_index];
|
||||
Columns columns;
|
||||
columns.reserve(columns.size());
|
||||
columns.reserve(column_names_and_types.size());
|
||||
|
||||
/// Add only required columns to `res`.
|
||||
for (const auto & elem : column_names_and_types)
|
||||
|
@ -267,11 +267,15 @@ void registerStorageMySQL(StorageFactory & factory)
|
||||
throw Exception("connection_pool_size cannot be zero.", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
auto addresses = parseRemoteDescriptionForExternalDatabase(host_port, max_addresses, 3306);
|
||||
mysqlxx::PoolWithFailover pool(remote_database, addresses,
|
||||
username, password,
|
||||
mysqlxx::PoolWithFailover pool(
|
||||
remote_database,
|
||||
addresses,
|
||||
username,
|
||||
password,
|
||||
MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS,
|
||||
mysql_settings.connection_pool_size,
|
||||
mysql_settings.connection_max_tries);
|
||||
mysql_settings.connection_max_tries,
|
||||
mysql_settings.connection_wait_timeout);
|
||||
|
||||
bool replace_query = false;
|
||||
std::string on_duplicate_clause;
|
||||
|
@ -1811,7 +1811,10 @@ bool StorageReplicatedMergeTree::tryExecuteMerge(const LogEntry & entry)
|
||||
|
||||
write_part_log(ExecutionStatus::fromCurrentException());
|
||||
|
||||
tryRemovePartImmediately(std::move(part));
|
||||
if (storage_settings_ptr->detach_not_byte_identical_parts)
|
||||
forgetPartAndMoveToDetached(std::move(part), "merge-not-byte-identical");
|
||||
else
|
||||
tryRemovePartImmediately(std::move(part));
|
||||
/// No need to delete the part from ZK because we can be sure that the commit transaction
|
||||
/// didn't go through.
|
||||
|
||||
@ -1935,7 +1938,10 @@ bool StorageReplicatedMergeTree::tryExecutePartMutation(const StorageReplicatedM
|
||||
|
||||
write_part_log(ExecutionStatus::fromCurrentException());
|
||||
|
||||
tryRemovePartImmediately(std::move(new_part));
|
||||
if (storage_settings_ptr->detach_not_byte_identical_parts)
|
||||
forgetPartAndMoveToDetached(std::move(new_part), "mutate-not-byte-identical");
|
||||
else
|
||||
tryRemovePartImmediately(std::move(new_part));
|
||||
/// No need to delete the part from ZK because we can be sure that the commit transaction
|
||||
/// didn't go through.
|
||||
|
||||
|
@ -73,7 +73,9 @@ NamesAndAliases StorageSystemProcesses::getNamesAndAliases()
|
||||
return
|
||||
{
|
||||
{"ProfileEvents.Names", {std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())}, "mapKeys(ProfileEvents)"},
|
||||
{"ProfileEvents.Values", {std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>())}, "mapValues(ProfileEvents)"}
|
||||
{"ProfileEvents.Values", {std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>())}, "mapValues(ProfileEvents)"},
|
||||
{"Settings.Names", {std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())}, "mapKeys(Settings)" },
|
||||
{"Settings.Values", {std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())}, "mapValues(Settings)"}
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -1,68 +0,0 @@
|
||||
#include <Storages/System/StorageSystemViews.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <Access/ContextAccess.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/QueryViewsLog.h>
|
||||
#include <DataTypes/DataTypeEnum.h>
|
||||
#include <Storages/StorageMaterializedView.h>
|
||||
#include <Storages/LiveView/StorageLiveView.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class Context;
|
||||
|
||||
NamesAndTypesList StorageSystemViews::getNamesAndTypes()
|
||||
{
|
||||
auto view_type_datatype = std::make_shared<DataTypeEnum8>(DataTypeEnum8::Values{
|
||||
{"Default", static_cast<Int8>(QueryViewsLogElement::ViewType::DEFAULT)},
|
||||
{"Materialized", static_cast<Int8>(QueryViewsLogElement::ViewType::MATERIALIZED)},
|
||||
{"Live", static_cast<Int8>(QueryViewsLogElement::ViewType::LIVE)}});
|
||||
|
||||
return {
|
||||
{"database", std::make_shared<DataTypeString>()},
|
||||
{"name", std::make_shared<DataTypeString>()},
|
||||
{"main_dependency_database", std::make_shared<DataTypeString>()},
|
||||
{"main_dependency_table", std::make_shared<DataTypeString>()},
|
||||
{"view_type", std::move(view_type_datatype)},
|
||||
};
|
||||
}
|
||||
|
||||
void StorageSystemViews::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const
|
||||
{
|
||||
const auto access = context->getAccess();
|
||||
const bool check_access_for_databases = !access->isGranted(AccessType::SHOW_TABLES);
|
||||
|
||||
for (const auto & [table_id, view_ids] : DatabaseCatalog::instance().getViewDependencies())
|
||||
{
|
||||
const bool check_access_for_tables = check_access_for_databases && !access->isGranted(AccessType::SHOW_TABLES, table_id.database_name);
|
||||
|
||||
if (check_access_for_tables && !access->isGranted(AccessType::SHOW_TABLES, table_id.database_name, table_id.table_name))
|
||||
continue;
|
||||
|
||||
size_t col_num;
|
||||
for (const auto & view_id : view_ids)
|
||||
{
|
||||
auto view_ptr = DatabaseCatalog::instance().getTable(view_id, context);
|
||||
QueryViewsLogElement::ViewType type = QueryViewsLogElement::ViewType::DEFAULT;
|
||||
|
||||
if (typeid_cast<const StorageMaterializedView *>(view_ptr.get()))
|
||||
{
|
||||
type = QueryViewsLogElement::ViewType::MATERIALIZED;
|
||||
}
|
||||
else if (typeid_cast<const StorageLiveView *>(view_ptr.get()))
|
||||
{
|
||||
type = QueryViewsLogElement::ViewType::LIVE;
|
||||
}
|
||||
|
||||
col_num = 0;
|
||||
res_columns[col_num++]->insert(view_id.database_name);
|
||||
res_columns[col_num++]->insert(view_id.table_name);
|
||||
res_columns[col_num++]->insert(table_id.database_name);
|
||||
res_columns[col_num++]->insert(table_id.table_name);
|
||||
res_columns[col_num++]->insert(type);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -1,24 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <common/shared_ptr_helper.h>
|
||||
#include <Storages/System/IStorageSystemOneBlock.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class StorageSystemViews final : public shared_ptr_helper<StorageSystemViews>, public IStorageSystemOneBlock<StorageSystemViews>
|
||||
{
|
||||
friend struct shared_ptr_helper<StorageSystemViews>;
|
||||
protected:
|
||||
using IStorageSystemOneBlock::IStorageSystemOneBlock;
|
||||
|
||||
void fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const override;
|
||||
|
||||
public:
|
||||
std::string getName() const override { return "SystemViews"; }
|
||||
|
||||
static NamesAndTypesList getNamesAndTypes();
|
||||
|
||||
};
|
||||
|
||||
}
|
@ -44,7 +44,6 @@
|
||||
#include <Storages/System/StorageSystemTableEngines.h>
|
||||
#include <Storages/System/StorageSystemTableFunctions.h>
|
||||
#include <Storages/System/StorageSystemTables.h>
|
||||
#include <Storages/System/StorageSystemViews.h>
|
||||
#include <Storages/System/StorageSystemZooKeeper.h>
|
||||
#include <Storages/System/StorageSystemContributors.h>
|
||||
#include <Storages/System/StorageSystemErrors.h>
|
||||
@ -96,7 +95,6 @@ void attachSystemTablesLocal(IDatabase & system_database)
|
||||
attach<StorageSystemZeros>(system_database, "zeros_mt", true);
|
||||
attach<StorageSystemDatabases>(system_database, "databases");
|
||||
attach<StorageSystemTables>(system_database, "tables");
|
||||
attach<StorageSystemViews>(system_database, "views");
|
||||
attach<StorageSystemColumns>(system_database, "columns");
|
||||
attach<StorageSystemFunctions>(system_database, "functions");
|
||||
attach<StorageSystemEvents>(system_database, "events");
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user