mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Merge branch 'master' into structure-to-schema
This commit is contained in:
commit
8f6526a930
2
.github/workflows/master.yml
vendored
2
.github/workflows/master.yml
vendored
@ -3643,7 +3643,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/unit_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Unit tests (release-clang)
|
||||
CHECK_NAME=Unit tests (release)
|
||||
REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
|
2
.github/workflows/pull_request.yml
vendored
2
.github/workflows/pull_request.yml
vendored
@ -4541,7 +4541,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/unit_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Unit tests (release-clang)
|
||||
CHECK_NAME=Unit tests (release)
|
||||
REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
|
@ -23,7 +23,6 @@
|
||||
* Added `Overlay` database engine to combine multiple databases into one. Added `Filesystem` database engine to represent a directory in the filesystem as a set of implicitly available tables with auto-detected formats and structures. A new `S3` database engine allows to read-only interact with s3 storage by representing a prefix as a set of tables. A new `HDFS` database engine allows to interact with HDFS storage in the same way. [#48821](https://github.com/ClickHouse/ClickHouse/pull/48821) ([alekseygolub](https://github.com/alekseygolub)).
|
||||
* Add support for external disks in Keeper for storing snapshots and logs. [#50098](https://github.com/ClickHouse/ClickHouse/pull/50098) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Add support for multi-directory selection (`{}`) globs. [#50559](https://github.com/ClickHouse/ClickHouse/pull/50559) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Support ZooKeeper `reconfig` command for ClickHouse Keeper with incremental reconfiguration which can be enabled via `keeper_server.enable_reconfiguration` setting. Support adding servers, removing servers, and changing server priorities. [#49450](https://github.com/ClickHouse/ClickHouse/pull/49450) ([Mike Kot](https://github.com/myrrc)).
|
||||
* Kafka connector can fetch Avro schema from schema registry with basic authentication using url-encoded credentials. [#49664](https://github.com/ClickHouse/ClickHouse/pull/49664) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||
* Add function `arrayJaccardIndex` which computes the Jaccard similarity between two arrays. [#50076](https://github.com/ClickHouse/ClickHouse/pull/50076) ([FFFFFFFHHHHHHH](https://github.com/FFFFFFFHHHHHHH)).
|
||||
* Add a column `is_obsolete` to `system.settings` and similar tables. Closes [#50819](https://github.com/ClickHouse/ClickHouse/issues/50819). [#50826](https://github.com/ClickHouse/ClickHouse/pull/50826) ([flynn](https://github.com/ucasfl)).
|
||||
@ -124,6 +123,7 @@
|
||||
* (experimental MaterializedMySQL) Now double quoted comments are supported in MaterializedMySQL. [#52355](https://github.com/ClickHouse/ClickHouse/pull/52355) ([Val Doroshchuk](https://github.com/valbok)).
|
||||
* Upgrade Intel QPL from v1.1.0 to v1.2.0 2. Upgrade Intel accel-config from v3.5 to v4.0 3. Fixed issue that Device IOTLB miss has big perf. impact for IAA accelerators. [#52180](https://github.com/ClickHouse/ClickHouse/pull/52180) ([jasperzhu](https://github.com/jinjunzh)).
|
||||
* The `session_timezone` setting (new in version 23.6) is demoted to experimental. [#52445](https://github.com/ClickHouse/ClickHouse/pull/52445) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Support ZooKeeper `reconfig` command for ClickHouse Keeper with incremental reconfiguration which can be enabled via `keeper_server.enable_reconfiguration` setting. Support adding servers, removing servers, and changing server priorities. [#49450](https://github.com/ClickHouse/ClickHouse/pull/49450) ([Mike Kot](https://github.com/myrrc)). It is suspected that this feature is incomplete.
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Add experimental ClickHouse builds for Linux RISC-V 64 to CI. [#31398](https://github.com/ClickHouse/ClickHouse/pull/31398) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
@ -165,8 +165,14 @@ elseif(GLIBC_COMPATIBILITY)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Glibc compatibility cannot be enabled in current configuration")
|
||||
endif ()
|
||||
|
||||
# Make sure the final executable has symbols exported
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -rdynamic")
|
||||
if (OS_LINUX)
|
||||
# We should not export dynamic symbols, because:
|
||||
# - The main clickhouse binary does not use dlopen,
|
||||
# and whatever is poisoning it by LD_PRELOAD should not link to our symbols.
|
||||
# - The clickhouse-odbc-bridge and clickhouse-library-bridge binaries
|
||||
# should not expose their symbols to ODBC drivers and libraries.
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-export-dynamic")
|
||||
endif ()
|
||||
|
||||
if (OS_DARWIN)
|
||||
# The `-all_load` flag forces loading of all symbols from all libraries,
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <functional>
|
||||
#include <iosfwd>
|
||||
|
||||
#include <base/defines.h>
|
||||
#include <base/types.h>
|
||||
#include <base/unaligned.h>
|
||||
|
||||
@ -274,6 +275,8 @@ struct CRC32Hash
|
||||
if (size == 0)
|
||||
return 0;
|
||||
|
||||
chassert(pos);
|
||||
|
||||
if (size < 8)
|
||||
{
|
||||
return static_cast<unsigned>(hashLessThan8(x.data, x.size));
|
||||
|
@ -115,8 +115,15 @@
|
||||
/// because SIGABRT is easier to debug than SIGTRAP (the second one makes gdb crazy)
|
||||
#if !defined(chassert)
|
||||
#if defined(ABORT_ON_LOGICAL_ERROR)
|
||||
// clang-format off
|
||||
#include <base/types.h>
|
||||
namespace DB
|
||||
{
|
||||
void abortOnFailedAssertion(const String & description);
|
||||
}
|
||||
#define chassert(x) static_cast<bool>(x) ? void(0) : ::DB::abortOnFailedAssertion(#x)
|
||||
#define UNREACHABLE() abort()
|
||||
// clang-format off
|
||||
#else
|
||||
/// Here sizeof() trick is used to suppress unused warning for result,
|
||||
/// since simple "(void)x" will evaluate the expression, while
|
||||
|
@ -57,7 +57,7 @@ public:
|
||||
URI();
|
||||
/// Creates an empty URI.
|
||||
|
||||
explicit URI(const std::string & uri, bool disable_url_encoding = false);
|
||||
explicit URI(const std::string & uri, bool enable_url_encoding = true);
|
||||
/// Parses an URI from the given string. Throws a
|
||||
/// SyntaxException if the uri is not valid.
|
||||
|
||||
@ -362,7 +362,7 @@ private:
|
||||
std::string _query;
|
||||
std::string _fragment;
|
||||
|
||||
bool _disable_url_encoding = false;
|
||||
bool _enable_url_encoding = true;
|
||||
};
|
||||
|
||||
|
||||
|
@ -36,8 +36,8 @@ URI::URI():
|
||||
}
|
||||
|
||||
|
||||
URI::URI(const std::string& uri, bool decode_and_encode_path):
|
||||
_port(0), _disable_url_encoding(decode_and_encode_path)
|
||||
URI::URI(const std::string& uri, bool enable_url_encoding):
|
||||
_port(0), _enable_url_encoding(enable_url_encoding)
|
||||
{
|
||||
parse(uri);
|
||||
}
|
||||
@ -108,7 +108,7 @@ URI::URI(const URI& uri):
|
||||
_path(uri._path),
|
||||
_query(uri._query),
|
||||
_fragment(uri._fragment),
|
||||
_disable_url_encoding(uri._disable_url_encoding)
|
||||
_enable_url_encoding(uri._enable_url_encoding)
|
||||
{
|
||||
}
|
||||
|
||||
@ -121,7 +121,7 @@ URI::URI(const URI& baseURI, const std::string& relativeURI):
|
||||
_path(baseURI._path),
|
||||
_query(baseURI._query),
|
||||
_fragment(baseURI._fragment),
|
||||
_disable_url_encoding(baseURI._disable_url_encoding)
|
||||
_enable_url_encoding(baseURI._enable_url_encoding)
|
||||
{
|
||||
resolve(relativeURI);
|
||||
}
|
||||
@ -153,7 +153,7 @@ URI& URI::operator = (const URI& uri)
|
||||
_path = uri._path;
|
||||
_query = uri._query;
|
||||
_fragment = uri._fragment;
|
||||
_disable_url_encoding = uri._disable_url_encoding;
|
||||
_enable_url_encoding = uri._enable_url_encoding;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
@ -184,7 +184,7 @@ void URI::swap(URI& uri)
|
||||
std::swap(_path, uri._path);
|
||||
std::swap(_query, uri._query);
|
||||
std::swap(_fragment, uri._fragment);
|
||||
std::swap(_disable_url_encoding, uri._disable_url_encoding);
|
||||
std::swap(_enable_url_encoding, uri._enable_url_encoding);
|
||||
}
|
||||
|
||||
|
||||
@ -687,18 +687,18 @@ void URI::decode(const std::string& str, std::string& decodedStr, bool plusAsSpa
|
||||
|
||||
void URI::encodePath(std::string & encodedStr) const
|
||||
{
|
||||
if (_disable_url_encoding)
|
||||
encodedStr = _path;
|
||||
else
|
||||
if (_enable_url_encoding)
|
||||
encode(_path, RESERVED_PATH, encodedStr);
|
||||
else
|
||||
encodedStr = _path;
|
||||
}
|
||||
|
||||
void URI::decodePath(const std::string & encodedStr)
|
||||
{
|
||||
if (_disable_url_encoding)
|
||||
_path = encodedStr;
|
||||
else
|
||||
if (_enable_url_encoding)
|
||||
decode(encodedStr, _path);
|
||||
else
|
||||
_path = encodedStr;
|
||||
}
|
||||
|
||||
bool URI::isWellKnownPort() const
|
||||
|
@ -22,8 +22,9 @@ macro(clickhouse_split_debug_symbols)
|
||||
# Splits debug symbols into separate file, leaves the binary untouched:
|
||||
COMMAND "${OBJCOPY_PATH}" --only-keep-debug "${STRIP_DESTINATION_DIR}/bin/${STRIP_TARGET}" "${STRIP_DESTINATION_DIR}/lib/debug/bin/${STRIP_TARGET}.debug"
|
||||
COMMAND chmod 0644 "${STRIP_DESTINATION_DIR}/lib/debug/bin/${STRIP_TARGET}.debug"
|
||||
# Strips binary, sections '.note' & '.comment' are removed in line with Debian's stripping policy: www.debian.org/doc/debian-policy/ch-files.html, section '.clickhouse.hash' is needed for integrity check:
|
||||
COMMAND "${STRIP_PATH}" --remove-section=.comment --remove-section=.note --keep-section=.clickhouse.hash "${STRIP_DESTINATION_DIR}/bin/${STRIP_TARGET}"
|
||||
# Strips binary, sections '.note' & '.comment' are removed in line with Debian's stripping policy: www.debian.org/doc/debian-policy/ch-files.html, section '.clickhouse.hash' is needed for integrity check.
|
||||
# Also, after we disabled the export of symbols for dynamic linking, we still to keep a static symbol table for good stack traces.
|
||||
COMMAND "${STRIP_PATH}" --strip-debug --remove-section=.comment --remove-section=.note "${STRIP_DESTINATION_DIR}/bin/${STRIP_TARGET}"
|
||||
# Associate stripped binary with debug symbols:
|
||||
COMMAND "${OBJCOPY_PATH}" --add-gnu-debuglink "${STRIP_DESTINATION_DIR}/lib/debug/bin/${STRIP_TARGET}.debug" "${STRIP_DESTINATION_DIR}/bin/${STRIP_TARGET}"
|
||||
COMMENT "Stripping clickhouse binary" VERBATIM
|
||||
|
@ -161,5 +161,9 @@
|
||||
"docker/test/sqllogic": {
|
||||
"name": "clickhouse/sqllogic-test",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/integration/nginx_dav": {
|
||||
"name": "clickhouse/nginx-dav",
|
||||
"dependent": []
|
||||
}
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ then
|
||||
ninja $NINJA_FLAGS clickhouse-keeper
|
||||
|
||||
ls -la ./programs/
|
||||
ldd ./programs/clickhouse-keeper
|
||||
ldd ./programs/clickhouse-keeper ||:
|
||||
|
||||
if [ -n "$MAKE_DEB" ]; then
|
||||
# No quotes because I want it to expand to nothing if empty.
|
||||
@ -80,19 +80,9 @@ else
|
||||
cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA "-DCMAKE_BUILD_TYPE=$BUILD_TYPE" "-DSANITIZE=$SANITIZER" -DENABLE_CHECK_HEAVY_BUILDS=1 "${CMAKE_FLAGS[@]}" ..
|
||||
fi
|
||||
|
||||
if [ "coverity" == "$COMBINED_OUTPUT" ]
|
||||
then
|
||||
mkdir -p /workdir/cov-analysis
|
||||
|
||||
wget --post-data "token=$COVERITY_TOKEN&project=ClickHouse%2FClickHouse" -qO- https://scan.coverity.com/download/linux64 | tar xz -C /workdir/cov-analysis --strip-components 1
|
||||
export PATH=$PATH:/workdir/cov-analysis/bin
|
||||
cov-configure --config ./coverity.config --template --comptype clangcc --compiler "$CC"
|
||||
SCAN_WRAPPER="cov-build --config ./coverity.config --dir cov-int"
|
||||
fi
|
||||
|
||||
# No quotes because I want it to expand to nothing if empty.
|
||||
# shellcheck disable=SC2086 # No quotes because I want it to expand to nothing if empty.
|
||||
$SCAN_WRAPPER ninja $NINJA_FLAGS $BUILD_TARGET
|
||||
ninja $NINJA_FLAGS $BUILD_TARGET
|
||||
|
||||
ls -la ./programs
|
||||
|
||||
@ -175,13 +165,6 @@ then
|
||||
mv "$COMBINED_OUTPUT.tar.zst" /output
|
||||
fi
|
||||
|
||||
if [ "coverity" == "$COMBINED_OUTPUT" ]
|
||||
then
|
||||
# Coverity does not understand ZSTD.
|
||||
tar -cvz -f "coverity-scan.tar.gz" cov-int
|
||||
mv "coverity-scan.tar.gz" /output
|
||||
fi
|
||||
|
||||
ccache_status
|
||||
ccache --evict-older-than 1d
|
||||
|
||||
|
@ -253,11 +253,6 @@ def parse_env_variables(
|
||||
cmake_flags.append(f"-DCMAKE_C_COMPILER={cc}")
|
||||
cmake_flags.append(f"-DCMAKE_CXX_COMPILER={cxx}")
|
||||
|
||||
# Create combined output archive for performance tests.
|
||||
if package_type == "coverity":
|
||||
result.append("COMBINED_OUTPUT=coverity")
|
||||
result.append('COVERITY_TOKEN="$COVERITY_TOKEN"')
|
||||
|
||||
if sanitizer:
|
||||
result.append(f"SANITIZER={sanitizer}")
|
||||
if build_type:
|
||||
@ -356,7 +351,7 @@ def parse_args() -> argparse.Namespace:
|
||||
)
|
||||
parser.add_argument(
|
||||
"--package-type",
|
||||
choices=["deb", "binary", "coverity"],
|
||||
choices=["deb", "binary"],
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
|
@ -11,6 +11,7 @@ RUN apt-get update \
|
||||
pv \
|
||||
ripgrep \
|
||||
zstd \
|
||||
locales \
|
||||
--yes --no-install-recommends
|
||||
|
||||
# Sanitizer options for services (clickhouse-server)
|
||||
@ -28,7 +29,10 @@ ENV TSAN_OPTIONS='halt_on_error=1 history_size=7 memory_limit_mb=46080 second_de
|
||||
ENV UBSAN_OPTIONS='print_stacktrace=1'
|
||||
ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'
|
||||
|
||||
ENV TZ=Europe/Moscow
|
||||
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen en_US.UTF-8
|
||||
ENV LC_ALL en_US.UTF-8
|
||||
|
||||
ENV TZ=Europe/Amsterdam
|
||||
RUN ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
|
||||
|
||||
CMD sleep 1
|
||||
|
@ -32,7 +32,7 @@ RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
||||
&& odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \
|
||||
&& rm -rf /tmp/clickhouse-odbc-tmp
|
||||
|
||||
ENV TZ=Europe/Moscow
|
||||
ENV TZ=Europe/Amsterdam
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
ENV COMMIT_SHA=''
|
||||
|
@ -8,7 +8,7 @@ ARG apt_archive="http://archive.ubuntu.com"
|
||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||
|
||||
ENV LANG=C.UTF-8
|
||||
ENV TZ=Europe/Moscow
|
||||
ENV TZ=Europe/Amsterdam
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN apt-get update \
|
||||
|
6
docker/test/integration/nginx_dav/Dockerfile
Normal file
6
docker/test/integration/nginx_dav/Dockerfile
Normal file
@ -0,0 +1,6 @@
|
||||
FROM nginx:alpine-slim
|
||||
|
||||
COPY default.conf /etc/nginx/conf.d/
|
||||
|
||||
RUN mkdir /usr/share/nginx/files/ \
|
||||
&& chown nginx: /usr/share/nginx/files/ -R
|
25
docker/test/integration/nginx_dav/default.conf
Normal file
25
docker/test/integration/nginx_dav/default.conf
Normal file
@ -0,0 +1,25 @@
|
||||
server {
|
||||
listen 80;
|
||||
|
||||
#root /usr/share/nginx/test.com;
|
||||
index index.html index.htm;
|
||||
|
||||
server_name test.com localhost;
|
||||
|
||||
location / {
|
||||
expires max;
|
||||
root /usr/share/nginx/files;
|
||||
client_max_body_size 20m;
|
||||
client_body_temp_path /usr/share/nginx/tmp;
|
||||
dav_methods PUT; # Allowed methods, only PUT is necessary
|
||||
|
||||
create_full_put_path on; # nginx automatically creates nested directories
|
||||
dav_access user:rw group:r all:r; # access permissions for files
|
||||
|
||||
limit_except GET {
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
error_page 405 =200 $uri;
|
||||
}
|
@ -13,4 +13,3 @@ services:
|
||||
- ${MEILI_SECURE_EXTERNAL_PORT:-7700}:${MEILI_SECURE_INTERNAL_PORT:-7700}
|
||||
environment:
|
||||
MEILI_MASTER_KEY: "password"
|
||||
|
||||
|
@ -5,7 +5,7 @@ services:
|
||||
# Files will be put into /usr/share/nginx/files.
|
||||
|
||||
nginx:
|
||||
image: kssenii/nginx-test:1.1
|
||||
image: clickhouse/nginx-dav:${DOCKER_NGINX_DAV_TAG:-latest}
|
||||
restart: always
|
||||
ports:
|
||||
- 80:80
|
||||
|
@ -64,15 +64,16 @@ export CLICKHOUSE_ODBC_BRIDGE_BINARY_PATH=/clickhouse-odbc-bridge
|
||||
export CLICKHOUSE_LIBRARY_BRIDGE_BINARY_PATH=/clickhouse-library-bridge
|
||||
|
||||
export DOCKER_BASE_TAG=${DOCKER_BASE_TAG:=latest}
|
||||
export DOCKER_HELPER_TAG=${DOCKER_HELPER_TAG:=latest}
|
||||
export DOCKER_MYSQL_GOLANG_CLIENT_TAG=${DOCKER_MYSQL_GOLANG_CLIENT_TAG:=latest}
|
||||
export DOCKER_DOTNET_CLIENT_TAG=${DOCKER_DOTNET_CLIENT_TAG:=latest}
|
||||
export DOCKER_HELPER_TAG=${DOCKER_HELPER_TAG:=latest}
|
||||
export DOCKER_KERBERIZED_HADOOP_TAG=${DOCKER_KERBERIZED_HADOOP_TAG:=latest}
|
||||
export DOCKER_KERBEROS_KDC_TAG=${DOCKER_KERBEROS_KDC_TAG:=latest}
|
||||
export DOCKER_MYSQL_GOLANG_CLIENT_TAG=${DOCKER_MYSQL_GOLANG_CLIENT_TAG:=latest}
|
||||
export DOCKER_MYSQL_JAVA_CLIENT_TAG=${DOCKER_MYSQL_JAVA_CLIENT_TAG:=latest}
|
||||
export DOCKER_MYSQL_JS_CLIENT_TAG=${DOCKER_MYSQL_JS_CLIENT_TAG:=latest}
|
||||
export DOCKER_MYSQL_PHP_CLIENT_TAG=${DOCKER_MYSQL_PHP_CLIENT_TAG:=latest}
|
||||
export DOCKER_NGINX_DAV_TAG=${DOCKER_NGINX_DAV_TAG:=latest}
|
||||
export DOCKER_POSTGRESQL_JAVA_CLIENT_TAG=${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG:=latest}
|
||||
export DOCKER_KERBEROS_KDC_TAG=${DOCKER_KERBEROS_KDC_TAG:=latest}
|
||||
export DOCKER_KERBERIZED_HADOOP_TAG=${DOCKER_KERBERIZED_HADOOP_TAG:=latest}
|
||||
|
||||
cd /ClickHouse/tests/integration
|
||||
exec "$@"
|
||||
|
@ -11,7 +11,7 @@ ARG apt_archive="http://archive.ubuntu.com"
|
||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||
|
||||
ENV LANG=C.UTF-8
|
||||
ENV TZ=Europe/Moscow
|
||||
ENV TZ=Europe/Amsterdam
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN apt-get update \
|
||||
|
@ -52,7 +52,7 @@ RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
||||
&& odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \
|
||||
&& rm -rf /tmp/clickhouse-odbc-tmp
|
||||
|
||||
ENV TZ=Europe/Moscow
|
||||
ENV TZ=Europe/Amsterdam
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
ENV NUM_TRIES=1
|
||||
|
@ -233,4 +233,10 @@ rowNumberInAllBlocks()
|
||||
LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv || echo "failure\tCannot parse test_results.tsv" > /test_output/check_status.tsv
|
||||
[ -s /test_output/check_status.tsv ] || echo -e "success\tNo errors found" > /test_output/check_status.tsv
|
||||
|
||||
# But OOMs in stress test are allowed
|
||||
if rg 'OOM in dmesg|Signal 9' /test_output/check_status.tsv
|
||||
then
|
||||
sed -i 's/failure/success/' /test_output/check_status.tsv
|
||||
fi
|
||||
|
||||
collect_core_dumps
|
||||
|
@ -18,10 +18,14 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
||||
python3-pip \
|
||||
shellcheck \
|
||||
yamllint \
|
||||
locales \
|
||||
&& pip3 install black==23.1.0 boto3 codespell==2.2.1 mypy==1.3.0 PyGithub unidiff pylint==2.6.2 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /root/.cache/pip
|
||||
|
||||
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen en_US.UTF-8
|
||||
ENV LC_ALL en_US.UTF-8
|
||||
|
||||
# Architecture of the image when BuildKit/buildx is used
|
||||
ARG TARGETARCH
|
||||
|
||||
|
@ -231,4 +231,10 @@ rowNumberInAllBlocks()
|
||||
LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv || echo "failure\tCannot parse test_results.tsv" > /test_output/check_status.tsv
|
||||
[ -s /test_output/check_status.tsv ] || echo -e "success\tNo errors found" > /test_output/check_status.tsv
|
||||
|
||||
# But OOMs in stress test are allowed
|
||||
if rg 'OOM in dmesg|Signal 9' /test_output/check_status.tsv
|
||||
then
|
||||
sed -i 's/failure/success/' /test_output/check_status.tsv
|
||||
fi
|
||||
|
||||
collect_core_dumps
|
||||
|
@ -106,4 +106,4 @@ For partitioning by month, use the `toYYYYMM(date_column)` expression, where `da
|
||||
## Storage Settings {#storage-settings}
|
||||
|
||||
- [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||
- [disable_url_encoding](/docs/en/operations/settings/settings.md#disable_url_encoding) -allows to disable decoding/encoding path in uri. Disabled by default.
|
||||
- [enable_url_encoding](/docs/en/operations/settings/settings.md#enable_url_encoding) - allows to enable/disable decoding/encoding path in uri. Enabled by default.
|
||||
|
@ -84,6 +84,7 @@ The BACKUP and RESTORE statements take a list of DATABASE and TABLE names, a des
|
||||
- `password` for the file on disk
|
||||
- `base_backup`: the destination of the previous backup of this source. For example, `Disk('backups', '1.zip')`
|
||||
- `structure_only`: if enabled, allows to only backup or restore the CREATE statements without the data of tables
|
||||
- `s3_storage_class`: the storage class used for S3 backup. For example, `STANDARD`
|
||||
|
||||
### Usage examples
|
||||
|
||||
|
@ -512,7 +512,7 @@ Both the cache for `local_disk`, and temporary data will be stored in `/tiny_loc
|
||||
<type>cache</type>
|
||||
<disk>local_disk</disk>
|
||||
<path>/tiny_local_cache/</path>
|
||||
<max_size>10M</max_size>
|
||||
<max_size_rows>10M</max_size_rows>
|
||||
<max_file_segment_size>1M</max_file_segment_size>
|
||||
<cache_on_write_operations>1</cache_on_write_operations>
|
||||
<do_not_evict_index_and_mark_files>0</do_not_evict_index_and_mark_files>
|
||||
@ -1592,6 +1592,10 @@ To manually turn on metrics history collection [`system.metric_log`](../../opera
|
||||
<table>metric_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<collect_interval_milliseconds>1000</collect_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</metric_log>
|
||||
</clickhouse>
|
||||
```
|
||||
@ -1695,6 +1699,14 @@ Use the following parameters to configure logging:
|
||||
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
||||
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
||||
- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table.
|
||||
- `max_size_rows` – Maximal size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk.
|
||||
Default: 1048576.
|
||||
- `reserved_size_rows` – Pre-allocated memory size in lines for the logs.
|
||||
Default: 8192.
|
||||
- `buffer_size_rows_flush_threshold` – Lines amount threshold, reaching it launches flushing logs to the disk in background.
|
||||
Default: `max_size_rows / 2`.
|
||||
- `flush_on_crash` - Indication whether logs should be dumped to the disk in case of a crash.
|
||||
Default: false.
|
||||
- `storage_policy` – Name of storage policy to use for the table (optional)
|
||||
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
||||
|
||||
@ -1706,6 +1718,10 @@ Use the following parameters to configure logging:
|
||||
<table>part_log</table>
|
||||
<partition_by>toMonday(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</part_log>
|
||||
```
|
||||
|
||||
@ -1773,6 +1789,14 @@ Use the following parameters to configure logging:
|
||||
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
||||
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
||||
- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table.
|
||||
- `max_size_rows` – Maximal size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk.
|
||||
Default: 1048576.
|
||||
- `reserved_size_rows` – Pre-allocated memory size in lines for the logs.
|
||||
Default: 8192.
|
||||
- `buffer_size_rows_flush_threshold` – Lines amount threshold, reaching it launches flushing logs to the disk in background.
|
||||
Default: `max_size_rows / 2`.
|
||||
- `flush_on_crash` - Indication whether logs should be dumped to the disk in case of a crash.
|
||||
Default: false.
|
||||
- `storage_policy` – Name of storage policy to use for the table (optional)
|
||||
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
||||
|
||||
@ -1786,6 +1810,10 @@ If the table does not exist, ClickHouse will create it. If the structure of the
|
||||
<table>query_log</table>
|
||||
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + INTERVAL 30 day</engine>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</query_log>
|
||||
```
|
||||
|
||||
@ -1831,6 +1859,14 @@ Use the following parameters to configure logging:
|
||||
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
||||
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
||||
- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table.
|
||||
- `max_size_rows` – Maximal size in lines for the logs. When non-flushed logs amount reaches max_size_rows, logs dumped to the disk.
|
||||
Default: 1048576.
|
||||
- `reserved_size_rows` – Pre-allocated memory size in lines for the logs.
|
||||
Default: 8192.
|
||||
- `buffer_size_rows_flush_threshold` – Lines amount threshold, reaching it launches flushing logs to the disk in background.
|
||||
Default: `max_size_rows / 2`.
|
||||
- `flush_on_crash` - Indication whether logs should be dumped to the disk in case of a crash.
|
||||
Default: false.
|
||||
- `storage_policy` – Name of storage policy to use for the table (optional)
|
||||
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
||||
|
||||
@ -1844,6 +1880,10 @@ If the table does not exist, ClickHouse will create it. If the structure of the
|
||||
<table>query_thread_log</table>
|
||||
<partition_by>toMonday(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</query_thread_log>
|
||||
```
|
||||
|
||||
@ -1861,6 +1901,14 @@ Use the following parameters to configure logging:
|
||||
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
||||
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
||||
- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table.
|
||||
- `max_size_rows` – Maximal size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk.
|
||||
Default: 1048576.
|
||||
- `reserved_size_rows` – Pre-allocated memory size in lines for the logs.
|
||||
Default: 8192.
|
||||
- `buffer_size_rows_flush_threshold` – Lines amount threshold, reaching it launches flushing logs to the disk in background.
|
||||
Default: `max_size_rows / 2`.
|
||||
- `flush_on_crash` - Indication whether logs should be dumped to the disk in case of a crash.
|
||||
Default: false.
|
||||
- `storage_policy` – Name of storage policy to use for the table (optional)
|
||||
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
||||
|
||||
@ -1874,6 +1922,10 @@ If the table does not exist, ClickHouse will create it. If the structure of the
|
||||
<table>query_views_log</table>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</query_views_log>
|
||||
```
|
||||
|
||||
@ -1890,6 +1942,14 @@ Parameters:
|
||||
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
||||
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
||||
- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table.
|
||||
- `max_size_rows` – Maximal size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk.
|
||||
Default: 1048576.
|
||||
- `reserved_size_rows` – Pre-allocated memory size in lines for the logs.
|
||||
Default: 8192.
|
||||
- `buffer_size_rows_flush_threshold` – Lines amount threshold, reaching it launches flushing logs to the disk in background.
|
||||
Default: `max_size_rows / 2`.
|
||||
- `flush_on_crash` - Indication whether logs should be dumped to the disk in case of a crash.
|
||||
Default: false.
|
||||
- `storage_policy` – Name of storage policy to use for the table (optional)
|
||||
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
||||
|
||||
@ -1901,13 +1961,16 @@ Parameters:
|
||||
<database>system</database>
|
||||
<table>text_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
<!-- <partition_by>event_date</partition_by> -->
|
||||
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + INTERVAL 30 day</engine>
|
||||
</text_log>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
|
||||
## trace_log {#server_configuration_parameters-trace_log}
|
||||
|
||||
Settings for the [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) system table operation.
|
||||
@ -1920,6 +1983,12 @@ Parameters:
|
||||
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
||||
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/index.md) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
||||
- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table.
|
||||
- `max_size_rows` – Maximal size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk.
|
||||
Default: 1048576.
|
||||
- `reserved_size_rows` – Pre-allocated memory size in lines for the logs.
|
||||
Default: 8192.
|
||||
- `buffer_size_rows_flush_threshold` – Lines amount threshold, reaching it launches flushing logs to the disk in background.
|
||||
Default: `max_size_rows / 2`.
|
||||
- `storage_policy` – Name of storage policy to use for the table (optional)
|
||||
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
||||
|
||||
@ -1931,6 +2000,10 @@ The default server configuration file `config.xml` contains the following settin
|
||||
<table>trace_log</table>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</trace_log>
|
||||
```
|
||||
|
||||
@ -1945,9 +2018,18 @@ Parameters:
|
||||
- `partition_by` — [Custom partitioning key](../../engines/table-engines/mergetree-family/custom-partitioning-key.md) for a system table. Can't be used if `engine` defined.
|
||||
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` defined.
|
||||
- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table.
|
||||
- `max_size_rows` – Maximal size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk.
|
||||
Default: 1048576.
|
||||
- `reserved_size_rows` – Pre-allocated memory size in lines for the logs.
|
||||
Default: 8192.
|
||||
- `buffer_size_rows_flush_threshold` – Lines amount threshold, reaching it launches flushing logs to the disk in background.
|
||||
Default: `max_size_rows / 2`.
|
||||
- `flush_on_crash` - Indication whether logs should be dumped to the disk in case of a crash.
|
||||
Default: false.
|
||||
- `storage_policy` – Name of storage policy to use for the table (optional)
|
||||
|
||||
**Example**
|
||||
|
||||
```xml
|
||||
<clickhouse>
|
||||
<asynchronous_insert_log>
|
||||
@ -1955,11 +2037,53 @@ Parameters:
|
||||
<table>asynchronous_insert_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
<!-- <engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + INTERVAL 30 day</engine> -->
|
||||
</asynchronous_insert_log>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## crash_log {#server_configuration_parameters-crash_log}
|
||||
|
||||
Settings for the [crash_log](../../operations/system-tables/crash-log.md) system table operation.
|
||||
|
||||
Parameters:
|
||||
|
||||
- `database` — Database for storing a table.
|
||||
- `table` — Table name.
|
||||
- `partition_by` — [Custom partitioning key](../../engines/table-engines/mergetree-family/custom-partitioning-key.md) for a system table. Can't be used if `engine` defined.
|
||||
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
||||
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/index.md) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
||||
- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table.
|
||||
- `max_size_rows` – Maximal size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk.
|
||||
Default: 1048576.
|
||||
- `reserved_size_rows` – Pre-allocated memory size in lines for the logs.
|
||||
Default: 8192.
|
||||
- `buffer_size_rows_flush_threshold` – Lines amount threshold, reaching it launches flushing logs to the disk in background.
|
||||
Default: `max_size_rows / 2`.
|
||||
- `flush_on_crash` - Indication whether logs should be dumped to the disk in case of a crash.
|
||||
Default: false.
|
||||
- `storage_policy` – Name of storage policy to use for the table (optional)
|
||||
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
||||
|
||||
The default server configuration file `config.xml` contains the following settings section:
|
||||
|
||||
``` xml
|
||||
<crash_log>
|
||||
<database>system</database>
|
||||
<table>crash_log</table>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1024</max_size_rows>
|
||||
<reserved_size_rows>1024</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>512</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</crash_log>
|
||||
```
|
||||
|
||||
## query_masking_rules {#query-masking-rules}
|
||||
|
||||
Regexp-based rules, which will be applied to queries as well as all log messages before storing them in server logs,
|
||||
@ -2164,6 +2288,8 @@ This section contains the following parameters:
|
||||
- `session_timeout_ms` — Maximum timeout for the client session in milliseconds.
|
||||
- `operation_timeout_ms` — Maximum timeout for one operation in milliseconds.
|
||||
- `root` — The [znode](http://zookeeper.apache.org/doc/r3.5.5/zookeeperOver.html#Nodes+and+ephemeral+nodes) that is used as the root for znodes used by the ClickHouse server. Optional.
|
||||
- `fallback_session_lifetime.min` - If the first zookeeper host resolved by zookeeper_load_balancing strategy is unavailable, limit the lifetime of a zookeeper session to the fallback node. This is done for load-balancing purposes to avoid excessive load on one of zookeeper hosts. This setting sets the minimal duration of the fallback session. Set in seconds. Optional. Default is 3 hours.
|
||||
- `fallback_session_lifetime.max` - If the first zookeeper host resolved by zookeeper_load_balancing strategy is unavailable, limit the lifetime of a zookeeper session to the fallback node. This is done for load-balancing purposes to avoid excessive load on one of zookeeper hosts. This setting sets the maximum duration of the fallback session. Set in seconds. Optional. Default is 6 hours.
|
||||
- `identity` — User and password, that can be required by ZooKeeper to give access to requested znodes. Optional.
|
||||
- zookeeper_load_balancing - Specifies the algorithm of ZooKeeper node selection.
|
||||
* random - randomly selects one of ZooKeeper nodes.
|
||||
|
@ -327,3 +327,39 @@ The maximum amount of data consumed by temporary files on disk in bytes for all
|
||||
Zero means unlimited.
|
||||
|
||||
Default value: 0.
|
||||
|
||||
## max_sessions_for_user {#max-sessions-per-user}
|
||||
|
||||
Maximum number of simultaneous sessions per authenticated user to the ClickHouse server.
|
||||
|
||||
Example:
|
||||
|
||||
``` xml
|
||||
<profiles>
|
||||
<single_session_profile>
|
||||
<max_sessions_for_user>1</max_sessions_for_user>
|
||||
</single_session_profile>
|
||||
<two_sessions_profile>
|
||||
<max_sessions_for_user>2</max_sessions_for_user>
|
||||
</two_sessions_profile>
|
||||
<unlimited_sessions_profile>
|
||||
<max_sessions_for_user>0</max_sessions_for_user>
|
||||
</unlimited_sessions_profile>
|
||||
</profiles>
|
||||
<users>
|
||||
<!-- User Alice can connect to a ClickHouse server no more than once at a time. -->
|
||||
<Alice>
|
||||
<profile>single_session_user</profile>
|
||||
</Alice>
|
||||
<!-- User Bob can use 2 simultaneous sessions. -->
|
||||
<Bob>
|
||||
<profile>two_sessions_profile</profile>
|
||||
</Bob>
|
||||
<!-- User Charles can use arbitrarily many of simultaneous sessions. -->
|
||||
<Charles>
|
||||
<profile>unlimited_sessions_profile</profile>
|
||||
</Charles>
|
||||
</users>
|
||||
```
|
||||
|
||||
Default value: 0 (Infinite count of simultaneous sessions).
|
||||
|
@ -1168,7 +1168,7 @@ Enabled by default.
|
||||
|
||||
Compression method used in output Arrow format. Supported codecs: `lz4_frame`, `zstd`, `none` (uncompressed)
|
||||
|
||||
Default value: `none`.
|
||||
Default value: `lz4_frame`.
|
||||
|
||||
## ORC format settings {#orc-format-settings}
|
||||
|
||||
|
@ -39,7 +39,7 @@ Example:
|
||||
<max_threads>8</max_threads>
|
||||
</default>
|
||||
|
||||
<!-- Settings for quries from the user interface -->
|
||||
<!-- Settings for queries from the user interface -->
|
||||
<web>
|
||||
<max_rows_to_read>1000000000</max_rows_to_read>
|
||||
<max_bytes_to_read>100000000000</max_bytes_to_read>
|
||||
@ -67,6 +67,8 @@ Example:
|
||||
<max_ast_depth>50</max_ast_depth>
|
||||
<max_ast_elements>100</max_ast_elements>
|
||||
|
||||
<max_sessions_for_user>4</max_sessions_for_user>
|
||||
|
||||
<readonly>1</readonly>
|
||||
</web>
|
||||
</profiles>
|
||||
|
@ -3468,11 +3468,11 @@ Possible values:
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## disable_url_encoding {#disable_url_encoding}
|
||||
## enable_url_encoding {#enable_url_encoding}
|
||||
|
||||
Allows to disable decoding/encoding path in uri in [URL](../../engines/table-engines/special/url.md) engine tables.
|
||||
Allows to enable/disable decoding/encoding path in uri in [URL](../../engines/table-engines/special/url.md) engine tables.
|
||||
|
||||
Disabled by default.
|
||||
Enabled by default.
|
||||
|
||||
## database_atomic_wait_for_drop_and_detach_synchronously {#database_atomic_wait_for_drop_and_detach_synchronously}
|
||||
|
||||
|
@ -10,6 +10,7 @@ Columns:
|
||||
- `event` ([String](../../sql-reference/data-types/string.md)) — Event name.
|
||||
- `value` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of events occurred.
|
||||
- `description` ([String](../../sql-reference/data-types/string.md)) — Event description.
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — Alias for `event`.
|
||||
|
||||
You can find all supported events in source file [src/Common/ProfileEvents.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/src/Common/ProfileEvents.cpp).
|
||||
|
||||
|
@ -47,6 +47,10 @@ An example:
|
||||
<engine>ENGINE = MergeTree PARTITION BY toYYYYMM(event_date) ORDER BY (event_date, event_time) SETTINGS index_granularity = 1024</engine>
|
||||
-->
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</query_log>
|
||||
</clickhouse>
|
||||
```
|
||||
|
@ -10,6 +10,7 @@ Columns:
|
||||
- `metric` ([String](../../sql-reference/data-types/string.md)) — Metric name.
|
||||
- `value` ([Int64](../../sql-reference/data-types/int-uint.md)) — Metric value.
|
||||
- `description` ([String](../../sql-reference/data-types/string.md)) — Metric description.
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — Alias for `metric`.
|
||||
|
||||
You can find all supported metrics in source file [src/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/src/Common/CurrentMetrics.cpp).
|
||||
|
||||
|
@ -140,8 +140,8 @@ Time shifts for multiple days. Some pacific islands changed their timezone offse
|
||||
- [Type conversion functions](../../sql-reference/functions/type-conversion-functions.md)
|
||||
- [Functions for working with dates and times](../../sql-reference/functions/date-time-functions.md)
|
||||
- [Functions for working with arrays](../../sql-reference/functions/array-functions.md)
|
||||
- [The `date_time_input_format` setting](../../operations/settings/settings.md#settings-date_time_input_format)
|
||||
- [The `date_time_output_format` setting](../../operations/settings/settings.md#settings-date_time_output_format)
|
||||
- [The `date_time_input_format` setting](../../operations/settings/settings-formats.md#settings-date_time_input_format)
|
||||
- [The `date_time_output_format` setting](../../operations/settings/settings-formats.md#settings-date_time_output_format)
|
||||
- [The `timezone` server configuration parameter](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
|
||||
- [The `session_timezone` setting](../../operations/settings/settings.md#session_timezone)
|
||||
- [Operators for working with dates and times](../../sql-reference/operators/index.md#operators-datetime)
|
||||
|
@ -56,7 +56,7 @@ Character `|` inside patterns is used to specify failover addresses. They are it
|
||||
## Storage Settings {#storage-settings}
|
||||
|
||||
- [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||
- [disable_url_encoding](/docs/en/operations/settings/settings.md#disable_url_encoding) - allows to disable decoding/encoding path in uri. Disabled by default.
|
||||
- [enable_url_encoding](/docs/en/operations/settings/settings.md#enable_url_encoding) - allows to enable/disable decoding/encoding path in uri. Enabled by default.
|
||||
|
||||
**See Also**
|
||||
|
||||
|
@ -1058,6 +1058,10 @@ ClickHouse использует потоки из глобального пул
|
||||
<table>metric_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<collect_interval_milliseconds>1000</collect_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</metric_log>
|
||||
</clickhouse>
|
||||
```
|
||||
@ -1160,7 +1164,14 @@ ClickHouse использует потоки из глобального пул
|
||||
- `partition_by` — устанавливает [произвольный ключ партиционирования](../../operations/server-configuration-parameters/settings.md). Нельзя использовать если используется `engine`
|
||||
- `engine` - устанавливает [настройки MergeTree Engine](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) для системной таблицы. Нельзя использовать если используется `partition_by`.
|
||||
- `flush_interval_milliseconds` — период сброса данных из буфера в памяти в таблицу.
|
||||
|
||||
- `max_size_rows` – максимальный размер в строках для буфера с логами. Когда буфер будет заполнен полностью, сбрасывает логи на диск.
|
||||
Значение по умолчанию: 1048576.
|
||||
- `reserved_size_rows` – преаллоцированный размер в строках для буфера с логами.
|
||||
Значение по умолчанию: 8192.
|
||||
- `buffer_size_bytes_flush_threshold` – количество линий в логе при достижении которого логи начнут скидываться на диск в неблокирующем режиме.
|
||||
Значение по умолчанию: `max_size / 2`.
|
||||
- `flush_on_crash` - должны ли логи быть сброшены на диск в случае неожиданной остановки программы.
|
||||
Значение по умолчанию: false.
|
||||
**Пример**
|
||||
|
||||
``` xml
|
||||
@ -1169,6 +1180,10 @@ ClickHouse использует потоки из глобального пул
|
||||
<table>part_log</table>
|
||||
<partition_by>toMonday(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</part_log>
|
||||
```
|
||||
|
||||
@ -1219,10 +1234,18 @@ ClickHouse использует потоки из глобального пул
|
||||
При настройке логирования используются следующие параметры:
|
||||
|
||||
- `database` — имя базы данных;
|
||||
- `table` — имя таблицы, куда будет записываться лог;
|
||||
- `table` — имя таблицы;
|
||||
- `partition_by` — устанавливает [произвольный ключ партиционирования](../../operations/server-configuration-parameters/settings.md). Нельзя использовать если используется `engine`
|
||||
- `engine` - устанавливает [настройки MergeTree Engine](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) для системной таблицы. Нельзя использовать если используется `partition_by`.
|
||||
- `flush_interval_milliseconds` — период сброса данных из буфера в памяти в таблицу.
|
||||
- `max_size_rows` – максимальный размер в строках для буфера с логами. Когда буфер будет заполнен полностью, сбрасывает логи на диск.
|
||||
Значение по умолчанию: 1048576.
|
||||
- `reserved_size_rows` – преаллоцированный размер в строках для буфера с логами.
|
||||
Значение по умолчанию: 8192.
|
||||
- `buffer_size_bytes_flush_threshold` – количество линий в логе при достижении которого логи начнут скидываться на диск в неблокирующем режиме.
|
||||
Значение по умолчанию: `max_size / 2`.
|
||||
- `flush_on_crash` - должны ли логи быть сброшены на диск в случае неожиданной остановки программы.
|
||||
Значение по умолчанию: false.
|
||||
|
||||
Если таблица не существует, то ClickHouse создаст её. Если структура журнала запросов изменилась при обновлении сервера ClickHouse, то таблица со старой структурой переименовывается, а новая таблица создается автоматически.
|
||||
|
||||
@ -1234,6 +1257,10 @@ ClickHouse использует потоки из глобального пул
|
||||
<table>query_log</table>
|
||||
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + INTERVAL 30 day</engine>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</query_log>
|
||||
```
|
||||
|
||||
@ -1246,10 +1273,18 @@ ClickHouse использует потоки из глобального пул
|
||||
При настройке логирования используются следующие параметры:
|
||||
|
||||
- `database` — имя базы данных;
|
||||
- `table` — имя таблицы, куда будет записываться лог;
|
||||
- `table` — имя таблицы;
|
||||
- `partition_by` — устанавливает [произвольный ключ партиционирования](../../operations/server-configuration-parameters/settings.md). Нельзя использовать если используется `engine`
|
||||
- `engine` - устанавливает [настройки MergeTree Engine](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) для системной таблицы. Нельзя использовать если используется `partition_by`.
|
||||
- `flush_interval_milliseconds` — период сброса данных из буфера в памяти в таблицу.
|
||||
- `max_size_rows` – максимальный размер в строках для буфера с логами. Когда буфер будет заполнен полностью, сбрасывает логи на диск.
|
||||
Значение по умолчанию: 1048576.
|
||||
- `reserved_size_rows` – преаллоцированный размер в строках для буфера с логами.
|
||||
Значение по умолчанию: 8192.
|
||||
- `buffer_size_bytes_flush_threshold` – количество линий в логе при достижении которого логи начнут скидываться на диск в неблокирующем режиме.
|
||||
Значение по умолчанию: `max_size / 2`.
|
||||
- `flush_on_crash` - должны ли логи быть сброшены на диск в случае неожиданной остановки программы.
|
||||
Значение по умолчанию: false.
|
||||
|
||||
Если таблица не существует, то ClickHouse создаст её. Если структура журнала запросов изменилась при обновлении сервера ClickHouse, то таблица со старой структурой переименовывается, а новая таблица создается автоматически.
|
||||
|
||||
@ -1261,6 +1296,10 @@ ClickHouse использует потоки из глобального пул
|
||||
<table>query_thread_log</table>
|
||||
<partition_by>toMonday(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</query_thread_log>
|
||||
```
|
||||
|
||||
@ -1272,11 +1311,19 @@ ClickHouse использует потоки из глобального пул
|
||||
|
||||
При настройке логирования используются следующие параметры:
|
||||
|
||||
- `database` – имя базы данных.
|
||||
- `table` – имя системной таблицы, где будут логироваться запросы.
|
||||
- `partition_by` — устанавливает [произвольный ключ партиционирования](../../engines/table-engines/mergetree-family/custom-partitioning-key.md). Нельзя использовать, если задан параметр `engine`.
|
||||
- `engine` — устанавливает [настройки MergeTree Engine](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) для системной таблицы. Нельзя использовать, если задан параметр `partition_by`.
|
||||
- `database` — имя базы данных;
|
||||
- `table` — имя таблицы;
|
||||
- `partition_by` — устанавливает [произвольный ключ партиционирования](../../operations/server-configuration-parameters/settings.md). Нельзя использовать если используется `engine`
|
||||
- `engine` - устанавливает [настройки MergeTree Engine](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) для системной таблицы. Нельзя использовать если используется `partition_by`.
|
||||
- `flush_interval_milliseconds` — период сброса данных из буфера в памяти в таблицу.
|
||||
- `max_size_rows` – максимальный размер в строках для буфера с логами. Когда буфер будет заполнен полностью, сбрасывает логи на диск.
|
||||
Значение по умолчанию: 1048576.
|
||||
- `reserved_size_rows` – преаллоцированный размер в строках для буфера с логами.
|
||||
Значение по умолчанию: 8192.
|
||||
- `buffer_size_bytes_flush_threshold` – количество линий в логе при достижении которого логи начнут скидываться на диск в неблокирующем режиме.
|
||||
Значение по умолчанию: `max_size / 2`.
|
||||
- `flush_on_crash` - должны ли логи быть сброшены на диск в случае неожиданной остановки программы.
|
||||
Значение по умолчанию: false.
|
||||
|
||||
Если таблица не существует, то ClickHouse создаст её. Если структура журнала запросов изменилась при обновлении сервера ClickHouse, то таблица со старой структурой переименовывается, а новая таблица создается автоматически.
|
||||
|
||||
@ -1288,6 +1335,10 @@ ClickHouse использует потоки из глобального пул
|
||||
<table>query_views_log</table>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</query_views_log>
|
||||
```
|
||||
|
||||
@ -1298,11 +1349,19 @@ ClickHouse использует потоки из глобального пул
|
||||
Параметры:
|
||||
|
||||
- `level` — Максимальный уровень сообщения (по умолчанию `Trace`) которое будет сохранено в таблице.
|
||||
- `database` — имя базы данных для хранения таблицы.
|
||||
- `table` — имя таблицы, куда будут записываться текстовые сообщения.
|
||||
- `partition_by` — устанавливает [произвольный ключ партиционирования](../../engines/table-engines/mergetree-family/custom-partitioning-key.md). Нельзя использовать если используется `engine`
|
||||
- `database` — имя базы данных;
|
||||
- `table` — имя таблицы;
|
||||
- `partition_by` — устанавливает [произвольный ключ партиционирования](../../operations/server-configuration-parameters/settings.md). Нельзя использовать если используется `engine`
|
||||
- `engine` - устанавливает [настройки MergeTree Engine](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) для системной таблицы. Нельзя использовать если используется `partition_by`.
|
||||
- `flush_interval_milliseconds` — период сброса данных из буфера в памяти в таблицу.
|
||||
- `max_size_rows` – максимальный размер в строках для буфера с логами. Когда буфер будет заполнен полностью, сбрасывает логи на диск.
|
||||
Значение по умолчанию: 1048576.
|
||||
- `reserved_size_rows` – преаллоцированный размер в строках для буфера с логами.
|
||||
Значение по умолчанию: 8192.
|
||||
- `buffer_size_bytes_flush_threshold` – количество линий в логе при достижении которого логи начнут скидываться на диск в неблокирующем режиме.
|
||||
Значение по умолчанию: `max_size / 2`.
|
||||
- `flush_on_crash` - должны ли логи быть сброшены на диск в случае неожиданной остановки программы.
|
||||
Значение по умолчанию: false.
|
||||
|
||||
**Пример**
|
||||
```xml
|
||||
@ -1312,6 +1371,10 @@ ClickHouse использует потоки из глобального пул
|
||||
<database>system</database>
|
||||
<table>text_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
<!-- <partition_by>event_date</partition_by> -->
|
||||
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + INTERVAL 30 day</engine>
|
||||
</text_log>
|
||||
@ -1323,13 +1386,21 @@ ClickHouse использует потоки из глобального пул
|
||||
|
||||
Настройки для [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) system table operation.
|
||||
|
||||
Parameters:
|
||||
Параметры:
|
||||
|
||||
- `database` — Database for storing a table.
|
||||
- `table` — Table name.
|
||||
- `database` — имя базы данных;
|
||||
- `table` — имя таблицы;
|
||||
- `partition_by` — устанавливает [произвольный ключ партиционирования](../../operations/server-configuration-parameters/settings.md). Нельзя использовать если используется `engine`
|
||||
- `engine` - устанавливает [настройки MergeTree Engine](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) для системной таблицы. Нельзя использовать если используется `partition_by`.
|
||||
- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table.
|
||||
- `flush_interval_milliseconds` — период сброса данных из буфера в памяти в таблицу.
|
||||
- `max_size_rows` – максимальный размер в строках для буфера с логами. Когда буфер будет заполнен полностью, сбрасывает логи на диск.
|
||||
Значение по умолчанию: 1048576.
|
||||
- `reserved_size_rows` – преаллоцированный размер в строках для буфера с логами.
|
||||
Значение по умолчанию: 8192.
|
||||
- `buffer_size_bytes_flush_threshold` – количество линий в логе при достижении которого логи начнут скидываться на диск в неблокирующем режиме.
|
||||
Значение по умолчанию: `max_size / 2`.
|
||||
- `flush_on_crash` - должны ли логи быть сброшены на диск в случае неожиданной остановки программы.
|
||||
Значение по умолчанию: false.
|
||||
|
||||
По умолчанию файл настроек сервера `config.xml` содержит следующие настройки:
|
||||
|
||||
@ -1339,9 +1410,84 @@ Parameters:
|
||||
<table>trace_log</table>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
</trace_log>
|
||||
```
|
||||
|
||||
## asynchronous_insert_log {#server_configuration_parameters-asynchronous_insert_log}
|
||||
|
||||
Настройки для asynchronous_insert_log Система для логирования ассинхронных вставок.
|
||||
|
||||
Параметры:
|
||||
|
||||
- `database` — имя базы данных;
|
||||
- `table` — имя таблицы;
|
||||
- `partition_by` — устанавливает [произвольный ключ партиционирования](../../operations/server-configuration-parameters/settings.md). Нельзя использовать если используется `engine`
|
||||
- `engine` - устанавливает [настройки MergeTree Engine](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) для системной таблицы. Нельзя использовать если используется `partition_by`.
|
||||
- `flush_interval_milliseconds` — период сброса данных из буфера в памяти в таблицу.
|
||||
- `max_size_rows` – максимальный размер в строках для буфера с логами. Когда буфер будет заполнен полностью, сбрасывает логи на диск.
|
||||
Значение по умолчанию: 1048576.
|
||||
- `reserved_size_rows` – преаллоцированный размер в строках для буфера с логами.
|
||||
Значение по умолчанию: 8192.
|
||||
- `buffer_size_bytes_flush_threshold` – количество линий в логе при достижении которого логи начнут скидываться на диск в неблокирующем режиме.
|
||||
Значение по умолчанию: `max_size / 2`.
|
||||
- `flush_on_crash` - должны ли логи быть сброшены на диск в случае неожиданной остановки программы.
|
||||
Значение по умолчанию: false.
|
||||
|
||||
**Пример**
|
||||
|
||||
```xml
|
||||
<clickhouse>
|
||||
<asynchronous_insert_log>
|
||||
<database>system</database>
|
||||
<table>asynchronous_insert_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<!-- <engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + INTERVAL 30 day</engine> -->
|
||||
</asynchronous_insert_log>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## crash_log {#server_configuration_parameters-crash_log}
|
||||
|
||||
Настройки для таблицы [crash_log](../../operations/system-tables/crash-log.md).
|
||||
|
||||
Параметры:
|
||||
|
||||
- `database` — имя базы данных;
|
||||
- `table` — имя таблицы;
|
||||
- `partition_by` — устанавливает [произвольный ключ партиционирования](../../operations/server-configuration-parameters/settings.md). Нельзя использовать если используется `engine`
|
||||
- `engine` - устанавливает [настройки MergeTree Engine](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) для системной таблицы. Нельзя использовать если используется `partition_by`.
|
||||
- `flush_interval_milliseconds` — период сброса данных из буфера в памяти в таблицу.
|
||||
- `max_size_rows` – максимальный размер в строках для буфера с логами. Когда буфер будет заполнен полностью, сбрасывает логи на диск.
|
||||
Значение по умолчанию: 1024.
|
||||
- `reserved_size_rows` – преаллоцированный размер в строках для буфера с логами.
|
||||
Значение по умолчанию: 1024.
|
||||
- `buffer_size_bytes_flush_threshold` – количество линий в логе при достижении которого логи начнут скидываться на диск в неблокирующем режиме.
|
||||
Значение по умолчанию: `max_size / 2`.
|
||||
- `flush_on_crash` - должны ли логи быть сброшены на диск в случае неожиданной остановки программы.
|
||||
Значение по умолчанию: true.
|
||||
|
||||
**Пример**
|
||||
|
||||
``` xml
|
||||
<crash_log>
|
||||
<database>system</database>
|
||||
<table>crash_log</table>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1024</max_size_rows>
|
||||
<reserved_size_rows>1024</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>512</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>true</flush_on_crash>
|
||||
</crash_log>
|
||||
```
|
||||
|
||||
## query_masking_rules {#query-masking-rules}
|
||||
|
||||
Правила, основанные на регулярных выражениях, которые будут применены для всех запросов, а также для всех сообщений перед сохранением их в лог на сервере,
|
||||
|
@ -314,3 +314,40 @@ FORMAT Null;
|
||||
При вставке данных, ClickHouse вычисляет количество партиций во вставленном блоке. Если число партиций больше, чем `max_partitions_per_insert_block`, ClickHouse генерирует исключение со следующим текстом:
|
||||
|
||||
> «Too many partitions for single INSERT block (more than» + toString(max_parts) + «). The limit is controlled by ‘max_partitions_per_insert_block’ setting. Large number of partitions is a common misconception. It will lead to severe negative performance impact, including slow server startup, slow INSERT queries and slow SELECT queries. Recommended total number of partitions for a table is under 1000..10000. Please note, that partitioning is not intended to speed up SELECT queries (ORDER BY key is sufficient to make range queries fast). Partitions are intended for data manipulation (DROP PARTITION, etc).»
|
||||
|
||||
## max_sessions_for_user {#max-sessions-per-user}
|
||||
|
||||
Максимальное количество одновременных сессий на одного аутентифицированного пользователя.
|
||||
|
||||
Пример:
|
||||
|
||||
``` xml
|
||||
<profiles>
|
||||
<single_session_profile>
|
||||
<max_sessions_for_user>1</max_sessions_for_user>
|
||||
</single_session_profile>
|
||||
<two_sessions_profile>
|
||||
<max_sessions_for_user>2</max_sessions_for_user>
|
||||
</two_sessions_profile>
|
||||
<unlimited_sessions_profile>
|
||||
<max_sessions_for_user>0</max_sessions_for_user>
|
||||
</unlimited_sessions_profile>
|
||||
</profiles>
|
||||
<users>
|
||||
<!-- Пользователь Alice может одновременно подключаться не
|
||||
более одного раза к серверу ClickHouse. -->
|
||||
<Alice>
|
||||
<profile>single_session_profile</profile>
|
||||
</Alice>
|
||||
<!-- Пользователь Bob может использовать 2 одновременных сессии. -->
|
||||
<Bob>
|
||||
<profile>two_sessions_profile</profile>
|
||||
</Bob>
|
||||
<!-- Пользователь Charles может иметь любое количество одновременных сессий. -->
|
||||
<Charles>
|
||||
<profile>unlimited_sessions_profile</profile>
|
||||
</Charles>
|
||||
</users>
|
||||
```
|
||||
|
||||
Значение по умолчанию: 0 (неограниченное количество сессий).
|
||||
|
@ -39,7 +39,7 @@ SET profile = 'web'
|
||||
<max_threads>8</max_threads>
|
||||
</default>
|
||||
|
||||
<!-- Settings for quries from the user interface -->
|
||||
<!-- Settings for queries from the user interface -->
|
||||
<web>
|
||||
<max_rows_to_read>1000000000</max_rows_to_read>
|
||||
<max_bytes_to_read>100000000000</max_bytes_to_read>
|
||||
@ -67,6 +67,7 @@ SET profile = 'web'
|
||||
<max_ast_depth>50</max_ast_depth>
|
||||
<max_ast_elements>100</max_ast_elements>
|
||||
|
||||
<max_sessions_for_user>4</max_sessions_for_user>
|
||||
<readonly>1</readonly>
|
||||
</web>
|
||||
</profiles>
|
||||
|
@ -45,6 +45,10 @@ sidebar_label: "Системные таблицы"
|
||||
<engine>ENGINE = MergeTree PARTITION BY toYYYYMM(event_date) ORDER BY (event_date, event_time) SETTINGS index_granularity = 1024</engine>
|
||||
-->
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</query_log>
|
||||
</clickhouse>
|
||||
```
|
||||
|
@ -13,10 +13,6 @@ set (CLICKHOUSE_LIBRARY_BRIDGE_SOURCES
|
||||
library-bridge.cpp
|
||||
)
|
||||
|
||||
if (OS_LINUX)
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-export-dynamic")
|
||||
endif ()
|
||||
|
||||
clickhouse_add_executable(clickhouse-library-bridge ${CLICKHOUSE_LIBRARY_BRIDGE_SOURCES})
|
||||
|
||||
target_link_libraries(clickhouse-library-bridge PRIVATE
|
||||
|
@ -15,12 +15,6 @@ set (CLICKHOUSE_ODBC_BRIDGE_SOURCES
|
||||
validateODBCConnectionString.cpp
|
||||
)
|
||||
|
||||
if (OS_LINUX)
|
||||
# clickhouse-odbc-bridge is always a separate binary.
|
||||
# Reason: it must not export symbols from SSL, mariadb-client, etc. to not break ABI compatibility with ODBC drivers.
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-export-dynamic")
|
||||
endif ()
|
||||
|
||||
clickhouse_add_executable(clickhouse-odbc-bridge ${CLICKHOUSE_ODBC_BRIDGE_SOURCES})
|
||||
|
||||
target_link_libraries(clickhouse-odbc-bridge PRIVATE
|
||||
|
@ -1035,6 +1035,11 @@ try
|
||||
/// Initialize merge tree metadata cache
|
||||
if (config().has("merge_tree_metadata_cache"))
|
||||
{
|
||||
global_context->addWarningMessage("The setting 'merge_tree_metadata_cache' is enabled."
|
||||
" But the feature of 'metadata cache in RocksDB' is experimental and is not ready for production."
|
||||
" The usage of this feature can lead to data corruption and loss. The setting should be disabled in production."
|
||||
" See the corresponding report at https://github.com/ClickHouse/ClickHouse/issues/51182");
|
||||
|
||||
fs::create_directories(path / "rocksdb/");
|
||||
size_t size = config().getUInt64("merge_tree_metadata_cache.lru_cache_size", 256 << 20);
|
||||
bool continue_if_corrupted = config().getBool("merge_tree_metadata_cache.continue_if_corrupted", false);
|
||||
@ -1686,17 +1691,26 @@ try
|
||||
global_context->initializeTraceCollector();
|
||||
|
||||
/// Set up server-wide memory profiler (for total memory tracker).
|
||||
UInt64 total_memory_profiler_step = config().getUInt64("total_memory_profiler_step", 0);
|
||||
if (total_memory_profiler_step)
|
||||
if (server_settings.total_memory_profiler_step)
|
||||
{
|
||||
total_memory_tracker.setProfilerStep(total_memory_profiler_step);
|
||||
total_memory_tracker.setProfilerStep(server_settings.total_memory_profiler_step);
|
||||
}
|
||||
|
||||
double total_memory_tracker_sample_probability = config().getDouble("total_memory_tracker_sample_probability", 0);
|
||||
if (total_memory_tracker_sample_probability > 0.0)
|
||||
if (server_settings.total_memory_tracker_sample_probability > 0.0)
|
||||
{
|
||||
total_memory_tracker.setSampleProbability(total_memory_tracker_sample_probability);
|
||||
total_memory_tracker.setSampleProbability(server_settings.total_memory_tracker_sample_probability);
|
||||
}
|
||||
|
||||
if (server_settings.total_memory_profiler_sample_min_allocation_size)
|
||||
{
|
||||
total_memory_tracker.setSampleMinAllocationSize(server_settings.total_memory_profiler_sample_min_allocation_size);
|
||||
}
|
||||
|
||||
if (server_settings.total_memory_profiler_sample_max_allocation_size)
|
||||
{
|
||||
total_memory_tracker.setSampleMaxAllocationSize(server_settings.total_memory_profiler_sample_max_allocation_size);
|
||||
}
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -2031,27 +2045,26 @@ void Server::createServers(
|
||||
|
||||
for (const auto & protocol : protocols)
|
||||
{
|
||||
if (!server_type.shouldStart(ServerType::Type::CUSTOM, protocol))
|
||||
std::string prefix = "protocols." + protocol + ".";
|
||||
std::string port_name = prefix + "port";
|
||||
std::string description {"<undefined> protocol"};
|
||||
if (config.has(prefix + "description"))
|
||||
description = config.getString(prefix + "description");
|
||||
|
||||
if (!config.has(prefix + "port"))
|
||||
continue;
|
||||
|
||||
if (!server_type.shouldStart(ServerType::Type::CUSTOM, port_name))
|
||||
continue;
|
||||
|
||||
std::vector<std::string> hosts;
|
||||
if (config.has("protocols." + protocol + ".host"))
|
||||
hosts.push_back(config.getString("protocols." + protocol + ".host"));
|
||||
if (config.has(prefix + "host"))
|
||||
hosts.push_back(config.getString(prefix + "host"));
|
||||
else
|
||||
hosts = listen_hosts;
|
||||
|
||||
for (const auto & host : hosts)
|
||||
{
|
||||
std::string conf_name = "protocols." + protocol;
|
||||
std::string prefix = conf_name + ".";
|
||||
|
||||
if (!config.has(prefix + "port"))
|
||||
continue;
|
||||
|
||||
std::string description {"<undefined> protocol"};
|
||||
if (config.has(prefix + "description"))
|
||||
description = config.getString(prefix + "description");
|
||||
std::string port_name = prefix + "port";
|
||||
bool is_secure = false;
|
||||
auto stack = buildProtocolStackFromConfig(config, protocol, http_params, async_metrics, is_secure);
|
||||
|
||||
|
@ -1026,6 +1026,14 @@
|
||||
|
||||
<!-- Interval of flushing data. -->
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<!-- Maximal size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk. -->
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<!-- Pre-allocated size in lines for the logs. -->
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<!-- Lines amount threshold, reaching it launches flushing logs to the disk in background. -->
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<!-- Indication whether logs should be dumped to the disk in case of a crash -->
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
|
||||
<!-- example of using a different storage policy for a system table -->
|
||||
<!-- storage_policy>local_ssd</storage_policy -->
|
||||
@ -1039,6 +1047,11 @@
|
||||
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<!-- Indication whether logs should be dumped to the disk in case of a crash -->
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</trace_log>
|
||||
|
||||
<!-- Query thread log. Has information about all threads participated in query execution.
|
||||
@ -1048,6 +1061,10 @@
|
||||
<table>query_thread_log</table>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</query_thread_log>
|
||||
|
||||
<!-- Query views log. Has information about all dependent views associated with a query.
|
||||
@ -1066,6 +1083,10 @@
|
||||
<table>part_log</table>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</part_log>
|
||||
|
||||
<!-- Uncomment to write text log into table.
|
||||
@ -1075,6 +1096,10 @@
|
||||
<database>system</database>
|
||||
<table>text_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
<level></level>
|
||||
</text_log>
|
||||
-->
|
||||
@ -1084,7 +1109,11 @@
|
||||
<database>system</database>
|
||||
<table>metric_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<collect_interval_milliseconds>1000</collect_interval_milliseconds>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</metric_log>
|
||||
|
||||
<!--
|
||||
@ -1095,6 +1124,10 @@
|
||||
<database>system</database>
|
||||
<table>asynchronous_metric_log</table>
|
||||
<flush_interval_milliseconds>7000</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</asynchronous_metric_log>
|
||||
|
||||
<!--
|
||||
@ -1119,6 +1152,10 @@
|
||||
<database>system</database>
|
||||
<table>opentelemetry_span_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</opentelemetry_span_log>
|
||||
|
||||
|
||||
@ -1130,6 +1167,10 @@
|
||||
|
||||
<partition_by />
|
||||
<flush_interval_milliseconds>1000</flush_interval_milliseconds>
|
||||
<max_size_rows>1024</max_size_rows>
|
||||
<reserved_size_rows>1024</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>512</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>true</flush_on_crash>
|
||||
</crash_log>
|
||||
|
||||
<!-- Session log. Stores user log in (successful or not) and log out events.
|
||||
@ -1142,6 +1183,10 @@
|
||||
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</session_log> -->
|
||||
|
||||
<!-- Profiling on Processors level. -->
|
||||
@ -1151,6 +1196,10 @@
|
||||
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</processors_profile_log>
|
||||
|
||||
<!-- Log of asynchronous inserts. It allows to check status
|
||||
@ -1161,6 +1210,10 @@
|
||||
<table>asynchronous_insert_log</table>
|
||||
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
<partition_by>event_date</partition_by>
|
||||
<ttl>event_date + INTERVAL 3 DAY</ttl>
|
||||
</asynchronous_insert_log>
|
||||
@ -1418,12 +1471,6 @@
|
||||
<max_entry_size_in_rows>30000000</max_entry_size_in_rows>
|
||||
</query_cache>
|
||||
|
||||
<!-- Uncomment if enable merge tree metadata cache -->
|
||||
<!--merge_tree_metadata_cache>
|
||||
<lru_cache_size>268435456</lru_cache_size>
|
||||
<continue_if_corrupted>true</continue_if_corrupted>
|
||||
</merge_tree_metadata_cache-->
|
||||
|
||||
<!-- This allows to disable exposing addresses in stack traces for security reasons.
|
||||
Please be aware that it does not improve security much, but makes debugging much harder.
|
||||
The addresses that are small offsets from zero will be displayed nevertheless to show nullptr dereferences.
|
||||
|
@ -328,9 +328,6 @@ void ContextAccess::setRolesInfo(const std::shared_ptr<const EnabledRolesInfo> &
|
||||
|
||||
enabled_row_policies = access_control->getEnabledRowPolicies(*params.user_id, roles_info->enabled_roles);
|
||||
|
||||
enabled_quota = access_control->getEnabledQuota(
|
||||
*params.user_id, user_name, roles_info->enabled_roles, params.address, params.forwarded_address, params.quota_key);
|
||||
|
||||
enabled_settings = access_control->getEnabledSettings(
|
||||
*params.user_id, user->settings, roles_info->enabled_roles, roles_info->settings_from_enabled_roles);
|
||||
|
||||
@ -416,19 +413,32 @@ RowPolicyFilterPtr ContextAccess::getRowPolicyFilter(const String & database, co
|
||||
std::shared_ptr<const EnabledQuota> ContextAccess::getQuota() const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
if (enabled_quota)
|
||||
return enabled_quota;
|
||||
|
||||
if (!enabled_quota)
|
||||
{
|
||||
if (roles_info)
|
||||
{
|
||||
enabled_quota = access_control->getEnabledQuota(*params.user_id,
|
||||
user_name,
|
||||
roles_info->enabled_roles,
|
||||
params.address,
|
||||
params.forwarded_address,
|
||||
params.quota_key);
|
||||
}
|
||||
else
|
||||
{
|
||||
static const auto unlimited_quota = EnabledQuota::getUnlimitedQuota();
|
||||
return unlimited_quota;
|
||||
}
|
||||
}
|
||||
|
||||
return enabled_quota;
|
||||
}
|
||||
|
||||
|
||||
std::optional<QuotaUsage> ContextAccess::getQuotaUsage() const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
if (enabled_quota)
|
||||
return enabled_quota->getUsage();
|
||||
return {};
|
||||
return getQuota()->getUsage();
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include <string_view>
|
||||
#include <unordered_map>
|
||||
#include <Access/SettingsConstraints.h>
|
||||
#include <Access/resolveSetting.h>
|
||||
#include <Access/AccessControl.h>
|
||||
@ -6,6 +7,7 @@
|
||||
#include <Storages/MergeTree/MergeTreeSettings.h>
|
||||
#include <Common/FieldVisitorToString.h>
|
||||
#include <Common/FieldVisitorsAccurateComparison.h>
|
||||
#include <Common/SettingSource.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
#include <boost/range/algorithm_ext/erase.hpp>
|
||||
@ -20,6 +22,39 @@ namespace ErrorCodes
|
||||
extern const int UNKNOWN_SETTING;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
struct SettingSourceRestrictions
|
||||
{
|
||||
constexpr SettingSourceRestrictions() { allowed_sources.set(); }
|
||||
|
||||
constexpr SettingSourceRestrictions(std::initializer_list<SettingSource> allowed_sources_)
|
||||
{
|
||||
for (auto allowed_source : allowed_sources_)
|
||||
setSourceAllowed(allowed_source, true);
|
||||
}
|
||||
|
||||
constexpr bool isSourceAllowed(SettingSource source) { return allowed_sources[source]; }
|
||||
constexpr void setSourceAllowed(SettingSource source, bool allowed) { allowed_sources[source] = allowed; }
|
||||
|
||||
std::bitset<SettingSource::COUNT> allowed_sources;
|
||||
};
|
||||
|
||||
const std::unordered_map<std::string_view, SettingSourceRestrictions> SETTINGS_SOURCE_RESTRICTIONS = {
|
||||
{"max_sessions_for_user", {SettingSource::PROFILE}},
|
||||
};
|
||||
|
||||
SettingSourceRestrictions getSettingSourceRestrictions(std::string_view name)
|
||||
{
|
||||
auto settingConstraintIter = SETTINGS_SOURCE_RESTRICTIONS.find(name);
|
||||
if (settingConstraintIter != SETTINGS_SOURCE_RESTRICTIONS.end())
|
||||
return settingConstraintIter->second;
|
||||
else
|
||||
return SettingSourceRestrictions(); // allows everything
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
SettingsConstraints::SettingsConstraints(const AccessControl & access_control_) : access_control(&access_control_)
|
||||
{
|
||||
}
|
||||
@ -98,7 +133,7 @@ void SettingsConstraints::merge(const SettingsConstraints & other)
|
||||
}
|
||||
|
||||
|
||||
void SettingsConstraints::check(const Settings & current_settings, const SettingsProfileElements & profile_elements) const
|
||||
void SettingsConstraints::check(const Settings & current_settings, const SettingsProfileElements & profile_elements, SettingSource source) const
|
||||
{
|
||||
for (const auto & element : profile_elements)
|
||||
{
|
||||
@ -108,19 +143,19 @@ void SettingsConstraints::check(const Settings & current_settings, const Setting
|
||||
if (element.value)
|
||||
{
|
||||
SettingChange value(element.setting_name, *element.value);
|
||||
check(current_settings, value);
|
||||
check(current_settings, value, source);
|
||||
}
|
||||
|
||||
if (element.min_value)
|
||||
{
|
||||
SettingChange value(element.setting_name, *element.min_value);
|
||||
check(current_settings, value);
|
||||
check(current_settings, value, source);
|
||||
}
|
||||
|
||||
if (element.max_value)
|
||||
{
|
||||
SettingChange value(element.setting_name, *element.max_value);
|
||||
check(current_settings, value);
|
||||
check(current_settings, value, source);
|
||||
}
|
||||
|
||||
SettingConstraintWritability new_value = SettingConstraintWritability::WRITABLE;
|
||||
@ -142,24 +177,24 @@ void SettingsConstraints::check(const Settings & current_settings, const Setting
|
||||
}
|
||||
}
|
||||
|
||||
void SettingsConstraints::check(const Settings & current_settings, const SettingChange & change) const
|
||||
void SettingsConstraints::check(const Settings & current_settings, const SettingChange & change, SettingSource source) const
|
||||
{
|
||||
checkImpl(current_settings, const_cast<SettingChange &>(change), THROW_ON_VIOLATION);
|
||||
checkImpl(current_settings, const_cast<SettingChange &>(change), THROW_ON_VIOLATION, source);
|
||||
}
|
||||
|
||||
void SettingsConstraints::check(const Settings & current_settings, const SettingsChanges & changes) const
|
||||
void SettingsConstraints::check(const Settings & current_settings, const SettingsChanges & changes, SettingSource source) const
|
||||
{
|
||||
for (const auto & change : changes)
|
||||
check(current_settings, change);
|
||||
check(current_settings, change, source);
|
||||
}
|
||||
|
||||
void SettingsConstraints::check(const Settings & current_settings, SettingsChanges & changes) const
|
||||
void SettingsConstraints::check(const Settings & current_settings, SettingsChanges & changes, SettingSource source) const
|
||||
{
|
||||
boost::range::remove_erase_if(
|
||||
changes,
|
||||
[&](SettingChange & change) -> bool
|
||||
{
|
||||
return !checkImpl(current_settings, const_cast<SettingChange &>(change), THROW_ON_VIOLATION);
|
||||
return !checkImpl(current_settings, const_cast<SettingChange &>(change), THROW_ON_VIOLATION, source);
|
||||
});
|
||||
}
|
||||
|
||||
@ -174,13 +209,13 @@ void SettingsConstraints::check(const MergeTreeSettings & current_settings, cons
|
||||
check(current_settings, change);
|
||||
}
|
||||
|
||||
void SettingsConstraints::clamp(const Settings & current_settings, SettingsChanges & changes) const
|
||||
void SettingsConstraints::clamp(const Settings & current_settings, SettingsChanges & changes, SettingSource source) const
|
||||
{
|
||||
boost::range::remove_erase_if(
|
||||
changes,
|
||||
[&](SettingChange & change) -> bool
|
||||
{
|
||||
return !checkImpl(current_settings, change, CLAMP_ON_VIOLATION);
|
||||
return !checkImpl(current_settings, change, CLAMP_ON_VIOLATION, source);
|
||||
});
|
||||
}
|
||||
|
||||
@ -215,7 +250,10 @@ bool getNewValueToCheck(const T & current_settings, SettingChange & change, Fiel
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SettingsConstraints::checkImpl(const Settings & current_settings, SettingChange & change, ReactionOnViolation reaction) const
|
||||
bool SettingsConstraints::checkImpl(const Settings & current_settings,
|
||||
SettingChange & change,
|
||||
ReactionOnViolation reaction,
|
||||
SettingSource source) const
|
||||
{
|
||||
std::string_view setting_name = Settings::Traits::resolveName(change.name);
|
||||
|
||||
@ -247,7 +285,7 @@ bool SettingsConstraints::checkImpl(const Settings & current_settings, SettingCh
|
||||
if (!getNewValueToCheck(current_settings, change, new_value, reaction == THROW_ON_VIOLATION))
|
||||
return false;
|
||||
|
||||
return getChecker(current_settings, setting_name).check(change, new_value, reaction);
|
||||
return getChecker(current_settings, setting_name).check(change, new_value, reaction, source);
|
||||
}
|
||||
|
||||
bool SettingsConstraints::checkImpl(const MergeTreeSettings & current_settings, SettingChange & change, ReactionOnViolation reaction) const
|
||||
@ -255,10 +293,13 @@ bool SettingsConstraints::checkImpl(const MergeTreeSettings & current_settings,
|
||||
Field new_value;
|
||||
if (!getNewValueToCheck(current_settings, change, new_value, reaction == THROW_ON_VIOLATION))
|
||||
return false;
|
||||
return getMergeTreeChecker(change.name).check(change, new_value, reaction);
|
||||
return getMergeTreeChecker(change.name).check(change, new_value, reaction, SettingSource::QUERY);
|
||||
}
|
||||
|
||||
bool SettingsConstraints::Checker::check(SettingChange & change, const Field & new_value, ReactionOnViolation reaction) const
|
||||
bool SettingsConstraints::Checker::check(SettingChange & change,
|
||||
const Field & new_value,
|
||||
ReactionOnViolation reaction,
|
||||
SettingSource source) const
|
||||
{
|
||||
if (!explain.empty())
|
||||
{
|
||||
@ -326,6 +367,14 @@ bool SettingsConstraints::Checker::check(SettingChange & change, const Field & n
|
||||
change.value = max_value;
|
||||
}
|
||||
|
||||
if (!getSettingSourceRestrictions(setting_name).isSourceAllowed(source))
|
||||
{
|
||||
if (reaction == THROW_ON_VIOLATION)
|
||||
throw Exception(ErrorCodes::READONLY, "Setting {} is not allowed to be set by {}", setting_name, toString(source));
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <Access/SettingsProfileElement.h>
|
||||
#include <Common/SettingsChanges.h>
|
||||
#include <Common/SettingSource.h>
|
||||
#include <unordered_map>
|
||||
|
||||
namespace Poco::Util
|
||||
@ -73,17 +74,18 @@ public:
|
||||
void merge(const SettingsConstraints & other);
|
||||
|
||||
/// Checks whether `change` violates these constraints and throws an exception if so.
|
||||
void check(const Settings & current_settings, const SettingsProfileElements & profile_elements) const;
|
||||
void check(const Settings & current_settings, const SettingChange & change) const;
|
||||
void check(const Settings & current_settings, const SettingsChanges & changes) const;
|
||||
void check(const Settings & current_settings, SettingsChanges & changes) const;
|
||||
void check(const Settings & current_settings, const SettingsProfileElements & profile_elements, SettingSource source) const;
|
||||
void check(const Settings & current_settings, const SettingChange & change, SettingSource source) const;
|
||||
void check(const Settings & current_settings, const SettingsChanges & changes, SettingSource source) const;
|
||||
void check(const Settings & current_settings, SettingsChanges & changes, SettingSource source) const;
|
||||
|
||||
/// Checks whether `change` violates these constraints and throws an exception if so. (setting short name is expected inside `changes`)
|
||||
void check(const MergeTreeSettings & current_settings, const SettingChange & change) const;
|
||||
void check(const MergeTreeSettings & current_settings, const SettingsChanges & changes) const;
|
||||
|
||||
/// Checks whether `change` violates these and clamps the `change` if so.
|
||||
void clamp(const Settings & current_settings, SettingsChanges & changes) const;
|
||||
void clamp(const Settings & current_settings, SettingsChanges & changes, SettingSource source) const;
|
||||
|
||||
|
||||
friend bool operator ==(const SettingsConstraints & left, const SettingsConstraints & right);
|
||||
friend bool operator !=(const SettingsConstraints & left, const SettingsConstraints & right) { return !(left == right); }
|
||||
@ -133,7 +135,10 @@ private:
|
||||
{}
|
||||
|
||||
// Perform checking
|
||||
bool check(SettingChange & change, const Field & new_value, ReactionOnViolation reaction) const;
|
||||
bool check(SettingChange & change,
|
||||
const Field & new_value,
|
||||
ReactionOnViolation reaction,
|
||||
SettingSource source) const;
|
||||
};
|
||||
|
||||
struct StringHash
|
||||
@ -145,7 +150,11 @@ private:
|
||||
}
|
||||
};
|
||||
|
||||
bool checkImpl(const Settings & current_settings, SettingChange & change, ReactionOnViolation reaction) const;
|
||||
bool checkImpl(const Settings & current_settings,
|
||||
SettingChange & change,
|
||||
ReactionOnViolation reaction,
|
||||
SettingSource source) const;
|
||||
|
||||
bool checkImpl(const MergeTreeSettings & current_settings, SettingChange & change, ReactionOnViolation reaction) const;
|
||||
|
||||
Checker getChecker(const Settings & current_settings, std::string_view setting_name) const;
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
#include <Analyzer/IQueryTreeNode.h>
|
||||
#include <Analyzer/QueryNode.h>
|
||||
#include <Analyzer/TableFunctionNode.h>
|
||||
#include <Analyzer/UnionNode.h>
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
@ -90,26 +91,25 @@ private:
|
||||
template <typename Derived>
|
||||
using ConstInDepthQueryTreeVisitor = InDepthQueryTreeVisitor<Derived, true /*const_visitor*/>;
|
||||
|
||||
/** Same as InDepthQueryTreeVisitor and additionally keeps track of current scope context.
|
||||
/** Same as InDepthQueryTreeVisitor (but has a different interface) and additionally keeps track of current scope context.
|
||||
* This can be useful if your visitor has special logic that depends on current scope context.
|
||||
*
|
||||
* To specify behavior of the visitor you can implement following methods in derived class:
|
||||
* 1. needChildVisit – This methods allows to skip subtree.
|
||||
* 2. enterImpl – This method is called before children are processed.
|
||||
* 3. leaveImpl – This method is called after children are processed.
|
||||
*/
|
||||
template <typename Derived, bool const_visitor = false>
|
||||
class InDepthQueryTreeVisitorWithContext
|
||||
{
|
||||
public:
|
||||
using VisitQueryTreeNodeType = std::conditional_t<const_visitor, const QueryTreeNodePtr, QueryTreeNodePtr>;
|
||||
using VisitQueryTreeNodeType = QueryTreeNodePtr;
|
||||
|
||||
explicit InDepthQueryTreeVisitorWithContext(ContextPtr context, size_t initial_subquery_depth = 0)
|
||||
: current_context(std::move(context))
|
||||
, subquery_depth(initial_subquery_depth)
|
||||
{}
|
||||
|
||||
/// Return true if visitor should traverse tree top to bottom, false otherwise
|
||||
bool shouldTraverseTopToBottom() const
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Return true if visitor should visit child, false otherwise
|
||||
bool needChildVisit(VisitQueryTreeNodeType & parent [[maybe_unused]], VisitQueryTreeNodeType & child [[maybe_unused]])
|
||||
{
|
||||
@ -146,18 +146,16 @@ public:
|
||||
|
||||
++subquery_depth;
|
||||
|
||||
bool traverse_top_to_bottom = getDerived().shouldTraverseTopToBottom();
|
||||
if (!traverse_top_to_bottom)
|
||||
visitChildren(query_tree_node);
|
||||
getDerived().enterImpl(query_tree_node);
|
||||
|
||||
getDerived().visitImpl(query_tree_node);
|
||||
|
||||
if (traverse_top_to_bottom)
|
||||
visitChildren(query_tree_node);
|
||||
|
||||
getDerived().leaveImpl(query_tree_node);
|
||||
}
|
||||
|
||||
void enterImpl(VisitQueryTreeNodeType & node [[maybe_unused]])
|
||||
{}
|
||||
|
||||
void leaveImpl(VisitQueryTreeNodeType & node [[maybe_unused]])
|
||||
{}
|
||||
private:
|
||||
@ -171,17 +169,31 @@ private:
|
||||
return *static_cast<Derived *>(this);
|
||||
}
|
||||
|
||||
bool shouldSkipSubtree(
|
||||
VisitQueryTreeNodeType & parent,
|
||||
VisitQueryTreeNodeType & child,
|
||||
size_t subtree_index)
|
||||
{
|
||||
bool need_visit_child = getDerived().needChildVisit(parent, child);
|
||||
if (!need_visit_child)
|
||||
return true;
|
||||
|
||||
if (auto * table_function_node = parent->as<TableFunctionNode>())
|
||||
{
|
||||
const auto & unresolved_indexes = table_function_node->getUnresolvedArgumentIndexes();
|
||||
return std::find(unresolved_indexes.begin(), unresolved_indexes.end(), subtree_index) != unresolved_indexes.end();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void visitChildren(VisitQueryTreeNodeType & expression)
|
||||
{
|
||||
size_t index = 0;
|
||||
for (auto & child : expression->getChildren())
|
||||
{
|
||||
if (!child)
|
||||
continue;
|
||||
|
||||
bool need_visit_child = getDerived().needChildVisit(expression, child);
|
||||
|
||||
if (need_visit_child)
|
||||
if (child && !shouldSkipSubtree(expression, child, index))
|
||||
visit(child);
|
||||
++index;
|
||||
}
|
||||
}
|
||||
|
||||
@ -189,50 +201,4 @@ private:
|
||||
size_t subquery_depth = 0;
|
||||
};
|
||||
|
||||
template <typename Derived>
|
||||
using ConstInDepthQueryTreeVisitorWithContext = InDepthQueryTreeVisitorWithContext<Derived, true /*const_visitor*/>;
|
||||
|
||||
/** Visitor that use another visitor to visit node only if condition for visiting node is true.
|
||||
* For example, your visitor need to visit only query tree nodes or union nodes.
|
||||
*
|
||||
* Condition interface:
|
||||
* struct Condition
|
||||
* {
|
||||
* bool operator()(VisitQueryTreeNodeType & node)
|
||||
* {
|
||||
* return shouldNestedVisitorVisitNode(node);
|
||||
* }
|
||||
* }
|
||||
*/
|
||||
template <typename Visitor, typename Condition, bool const_visitor = false>
|
||||
class InDepthQueryTreeConditionalVisitor : public InDepthQueryTreeVisitor<InDepthQueryTreeConditionalVisitor<Visitor, Condition, const_visitor>, const_visitor>
|
||||
{
|
||||
public:
|
||||
using Base = InDepthQueryTreeVisitor<InDepthQueryTreeConditionalVisitor<Visitor, Condition, const_visitor>, const_visitor>;
|
||||
using VisitQueryTreeNodeType = typename Base::VisitQueryTreeNodeType;
|
||||
|
||||
explicit InDepthQueryTreeConditionalVisitor(Visitor & visitor_, Condition & condition_)
|
||||
: visitor(visitor_)
|
||||
, condition(condition_)
|
||||
{
|
||||
}
|
||||
|
||||
bool shouldTraverseTopToBottom() const
|
||||
{
|
||||
return visitor.shouldTraverseTopToBottom();
|
||||
}
|
||||
|
||||
void visitImpl(VisitQueryTreeNodeType & query_tree_node)
|
||||
{
|
||||
if (condition(query_tree_node))
|
||||
visitor.visit(query_tree_node);
|
||||
}
|
||||
|
||||
Visitor & visitor;
|
||||
Condition & condition;
|
||||
};
|
||||
|
||||
template <typename Visitor, typename Condition>
|
||||
using ConstInDepthQueryTreeConditionalVisitor = InDepthQueryTreeConditionalVisitor<Visitor, Condition, true /*const_visitor*/>;
|
||||
|
||||
}
|
||||
|
@ -51,13 +51,7 @@ public:
|
||||
using Base = InDepthQueryTreeVisitorWithContext<AggregateFunctionsArithmericOperationsVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
/// Traverse tree bottom to top
|
||||
static bool shouldTraverseTopToBottom()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
void leaveImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_arithmetic_operations_in_aggregate_functions)
|
||||
return;
|
||||
|
@ -22,7 +22,7 @@ public:
|
||||
using Base = InDepthQueryTreeVisitorWithContext<RewriteArrayExistsToHasVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_rewrite_array_exists_to_has)
|
||||
return;
|
||||
|
@ -20,7 +20,7 @@ public:
|
||||
using Base = InDepthQueryTreeVisitorWithContext<AutoFinalOnQueryPassVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().final)
|
||||
return;
|
||||
|
@ -50,7 +50,7 @@ public:
|
||||
&& settings.max_hyperscan_regexp_total_length == 0;
|
||||
}
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
if (!function_node || function_node->getFunctionName() != "or")
|
||||
|
@ -688,7 +688,7 @@ public:
|
||||
using Base = InDepthQueryTreeVisitorWithContext<ConvertQueryToCNFVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
auto * query_node = node->as<QueryNode>();
|
||||
if (!query_node)
|
||||
|
@ -22,7 +22,7 @@ public:
|
||||
using Base = InDepthQueryTreeVisitorWithContext<CountDistinctVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().count_distinct_optimization)
|
||||
return;
|
||||
|
@ -193,7 +193,7 @@ public:
|
||||
return true;
|
||||
}
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!isEnabled())
|
||||
return;
|
||||
|
@ -29,7 +29,7 @@ public:
|
||||
using Base = InDepthQueryTreeVisitorWithContext<FunctionToSubcolumnsVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node) const
|
||||
void enterImpl(QueryTreeNodePtr & node) const
|
||||
{
|
||||
if (!getSettings().optimize_functions_to_subcolumns)
|
||||
return;
|
||||
|
@ -37,7 +37,7 @@ public:
|
||||
, names_to_collect(names_to_collect_)
|
||||
{}
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_syntax_fuse_functions)
|
||||
return;
|
||||
|
@ -46,7 +46,7 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
void visitImpl(const QueryTreeNodePtr & node)
|
||||
void enterImpl(const QueryTreeNodePtr & node)
|
||||
{
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
if (!function_node || function_node->getFunctionName() != "grouping")
|
||||
|
@ -23,7 +23,7 @@ public:
|
||||
, multi_if_function_ptr(std::move(multi_if_function_ptr_))
|
||||
{}
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_if_chain_to_multiif)
|
||||
return;
|
||||
|
@ -113,7 +113,7 @@ public:
|
||||
using Base = InDepthQueryTreeVisitorWithContext<ConvertStringsToEnumVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_if_transform_strings_to_enum)
|
||||
return;
|
||||
|
@ -19,7 +19,7 @@ public:
|
||||
: Base(std::move(context))
|
||||
{}
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
|
||||
|
@ -21,7 +21,7 @@ public:
|
||||
, if_function_ptr(std::move(if_function_ptr_))
|
||||
{}
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_multiif_to_if)
|
||||
return;
|
||||
|
@ -20,7 +20,7 @@ public:
|
||||
using Base = InDepthQueryTreeVisitorWithContext<NormalizeCountVariantsVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_normalize_count_variants)
|
||||
return;
|
||||
|
@ -0,0 +1,221 @@
|
||||
#include <Analyzer/Passes/OptimizeDateOrDateTimeConverterWithPreimagePass.h>
|
||||
|
||||
#include <Functions/FunctionFactory.h>
|
||||
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
#include <Analyzer/ColumnNode.h>
|
||||
#include <Analyzer/ConstantNode.h>
|
||||
#include <Analyzer/FunctionNode.h>
|
||||
#include <Common/DateLUT.h>
|
||||
#include <Common/DateLUTImpl.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
class OptimizeDateOrDateTimeConverterWithPreimageVisitor : public InDepthQueryTreeVisitorWithContext<OptimizeDateOrDateTimeConverterWithPreimageVisitor>
|
||||
{
|
||||
public:
|
||||
using Base = InDepthQueryTreeVisitorWithContext<OptimizeDateOrDateTimeConverterWithPreimageVisitor>;
|
||||
|
||||
explicit OptimizeDateOrDateTimeConverterWithPreimageVisitor(ContextPtr context)
|
||||
: Base(std::move(context))
|
||||
{}
|
||||
|
||||
static bool needChildVisit(QueryTreeNodePtr & node, QueryTreeNodePtr & /*child*/)
|
||||
{
|
||||
const static std::unordered_set<String> relations = {
|
||||
"equals",
|
||||
"notEquals",
|
||||
"less",
|
||||
"greater",
|
||||
"lessOrEquals",
|
||||
"greaterOrEquals",
|
||||
};
|
||||
|
||||
if (const auto * function = node->as<FunctionNode>())
|
||||
{
|
||||
return !relations.contains(function->getFunctionName());
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node) const
|
||||
{
|
||||
const static std::unordered_map<String, String> swap_relations = {
|
||||
{"equals", "equals"},
|
||||
{"notEquals", "notEquals"},
|
||||
{"less", "greater"},
|
||||
{"greater", "less"},
|
||||
{"lessOrEquals", "greaterOrEquals"},
|
||||
{"greaterOrEquals", "lessOrEquals"},
|
||||
};
|
||||
|
||||
const auto * function = node->as<FunctionNode>();
|
||||
|
||||
if (!function || !swap_relations.contains(function->getFunctionName())) return;
|
||||
|
||||
if (function->getArguments().getNodes().size() != 2) return;
|
||||
|
||||
size_t func_id = function->getArguments().getNodes().size();
|
||||
|
||||
for (size_t i = 0; i < function->getArguments().getNodes().size(); i++)
|
||||
{
|
||||
if (const auto * func = function->getArguments().getNodes()[i]->as<FunctionNode>())
|
||||
{
|
||||
func_id = i;
|
||||
}
|
||||
}
|
||||
|
||||
if (func_id == function->getArguments().getNodes().size()) return;
|
||||
|
||||
size_t literal_id = 1 - func_id;
|
||||
const auto * literal = function->getArguments().getNodes()[literal_id]->as<ConstantNode>();
|
||||
|
||||
if (!literal || literal->getValue().getType() != Field::Types::UInt64) return;
|
||||
|
||||
String comparator = literal_id > func_id ? function->getFunctionName(): swap_relations.at(function->getFunctionName());
|
||||
|
||||
const auto * func_node = function->getArguments().getNodes()[func_id]->as<FunctionNode>();
|
||||
/// Currently we only handle single-argument functions.
|
||||
if (!func_node || func_node->getArguments().getNodes().size() != 1) return;
|
||||
|
||||
const auto * column_id = func_node->getArguments().getNodes()[0]->as<ColumnNode>();
|
||||
if (!column_id) return;
|
||||
|
||||
const auto * column_type = column_id->getColumnType().get();
|
||||
if (!isDateOrDate32(column_type) && !isDateTime(column_type) && !isDateTime64(column_type)) return;
|
||||
|
||||
const auto & converter = FunctionFactory::instance().tryGet(func_node->getFunctionName(), getContext());
|
||||
if (!converter) return;
|
||||
|
||||
ColumnsWithTypeAndName args;
|
||||
args.emplace_back(column_id->getColumnType(), "tmp");
|
||||
auto converter_base = converter->build(args);
|
||||
if (!converter_base || !converter_base->hasInformationAboutPreimage()) return;
|
||||
|
||||
auto preimage_range = converter_base->getPreimage(*(column_id->getColumnType()), literal->getValue());
|
||||
if (!preimage_range) return;
|
||||
|
||||
const auto new_node = generateOptimizedDateFilter(comparator, *column_id, *preimage_range);
|
||||
|
||||
if (!new_node) return;
|
||||
|
||||
node = new_node;
|
||||
}
|
||||
|
||||
private:
|
||||
QueryTreeNodePtr generateOptimizedDateFilter(const String & comparator, const ColumnNode & column_node, const std::pair<Field, Field>& range) const
|
||||
{
|
||||
const DateLUTImpl & date_lut = DateLUT::instance("UTC");
|
||||
|
||||
String start_date_or_date_time;
|
||||
String end_date_or_date_time;
|
||||
|
||||
if (isDateOrDate32(column_node.getColumnType().get()))
|
||||
{
|
||||
start_date_or_date_time = date_lut.dateToString(range.first.get<DateLUTImpl::Time>());
|
||||
end_date_or_date_time = date_lut.dateToString(range.second.get<DateLUTImpl::Time>());
|
||||
}
|
||||
else if (isDateTime(column_node.getColumnType().get()) || isDateTime64(column_node.getColumnType().get()))
|
||||
{
|
||||
start_date_or_date_time = date_lut.timeToString(range.first.get<DateLUTImpl::Time>());
|
||||
end_date_or_date_time = date_lut.timeToString(range.second.get<DateLUTImpl::Time>());
|
||||
}
|
||||
else [[unlikely]] return {};
|
||||
|
||||
if (comparator == "equals")
|
||||
{
|
||||
const auto lhs = std::make_shared<FunctionNode>("greaterOrEquals");
|
||||
lhs->getArguments().getNodes().push_back(std::make_shared<ColumnNode>(column_node.getColumn(), column_node.getColumnSource()));
|
||||
lhs->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(start_date_or_date_time));
|
||||
resolveOrdinaryFunctionNode(*lhs, lhs->getFunctionName());
|
||||
|
||||
const auto rhs = std::make_shared<FunctionNode>("less");
|
||||
rhs->getArguments().getNodes().push_back(std::make_shared<ColumnNode>(column_node.getColumn(), column_node.getColumnSource()));
|
||||
rhs->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(end_date_or_date_time));
|
||||
resolveOrdinaryFunctionNode(*rhs, rhs->getFunctionName());
|
||||
|
||||
const auto new_date_filter = std::make_shared<FunctionNode>("and");
|
||||
new_date_filter->getArguments().getNodes() = {lhs, rhs};
|
||||
resolveOrdinaryFunctionNode(*new_date_filter, new_date_filter->getFunctionName());
|
||||
|
||||
return new_date_filter;
|
||||
}
|
||||
else if (comparator == "notEquals")
|
||||
{
|
||||
const auto lhs = std::make_shared<FunctionNode>("less");
|
||||
lhs->getArguments().getNodes().push_back(std::make_shared<ColumnNode>(column_node.getColumn(), column_node.getColumnSource()));
|
||||
lhs->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(start_date_or_date_time));
|
||||
resolveOrdinaryFunctionNode(*lhs, lhs->getFunctionName());
|
||||
|
||||
const auto rhs = std::make_shared<FunctionNode>("greaterOrEquals");
|
||||
rhs->getArguments().getNodes().push_back(std::make_shared<ColumnNode>(column_node.getColumn(), column_node.getColumnSource()));
|
||||
rhs->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(end_date_or_date_time));
|
||||
resolveOrdinaryFunctionNode(*rhs, rhs->getFunctionName());
|
||||
|
||||
const auto new_date_filter = std::make_shared<FunctionNode>("or");
|
||||
new_date_filter->getArguments().getNodes() = {lhs, rhs};
|
||||
resolveOrdinaryFunctionNode(*new_date_filter, new_date_filter->getFunctionName());
|
||||
|
||||
return new_date_filter;
|
||||
}
|
||||
else if (comparator == "greater")
|
||||
{
|
||||
const auto new_date_filter = std::make_shared<FunctionNode>("greaterOrEquals");
|
||||
new_date_filter->getArguments().getNodes().push_back(std::make_shared<ColumnNode>(column_node.getColumn(), column_node.getColumnSource()));
|
||||
new_date_filter->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(end_date_or_date_time));
|
||||
resolveOrdinaryFunctionNode(*new_date_filter, new_date_filter->getFunctionName());
|
||||
|
||||
return new_date_filter;
|
||||
}
|
||||
else if (comparator == "lessOrEquals")
|
||||
{
|
||||
const auto new_date_filter = std::make_shared<FunctionNode>("less");
|
||||
new_date_filter->getArguments().getNodes().push_back(std::make_shared<ColumnNode>(column_node.getColumn(), column_node.getColumnSource()));
|
||||
new_date_filter->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(end_date_or_date_time));
|
||||
resolveOrdinaryFunctionNode(*new_date_filter, new_date_filter->getFunctionName());
|
||||
|
||||
return new_date_filter;
|
||||
}
|
||||
else if (comparator == "less" || comparator == "greaterOrEquals")
|
||||
{
|
||||
const auto new_date_filter = std::make_shared<FunctionNode>(comparator);
|
||||
new_date_filter->getArguments().getNodes().push_back(std::make_shared<ColumnNode>(column_node.getColumn(), column_node.getColumnSource()));
|
||||
new_date_filter->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(start_date_or_date_time));
|
||||
resolveOrdinaryFunctionNode(*new_date_filter, new_date_filter->getFunctionName());
|
||||
|
||||
return new_date_filter;
|
||||
}
|
||||
else [[unlikely]]
|
||||
{
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Expected equals, notEquals, less, lessOrEquals, greater, greaterOrEquals. Actual {}",
|
||||
comparator);
|
||||
}
|
||||
}
|
||||
|
||||
void resolveOrdinaryFunctionNode(FunctionNode & function_node, const String & function_name) const
|
||||
{
|
||||
auto function = FunctionFactory::instance().get(function_name, getContext());
|
||||
function_node.resolveAsFunction(function->build(function_node.getArgumentColumns()));
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
void OptimizeDateOrDateTimeConverterWithPreimagePass::run(QueryTreeNodePtr query_tree_node, ContextPtr context)
|
||||
{
|
||||
OptimizeDateOrDateTimeConverterWithPreimageVisitor visitor(std::move(context));
|
||||
visitor.visit(query_tree_node);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
#pragma once
|
||||
|
||||
#include <Analyzer/IQueryTreePass.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** Replace predicate having Date/DateTime converters with their preimages to improve performance.
|
||||
* Given a Date column c, toYear(c) = 2023 -> c >= '2023-01-01' AND c < '2024-01-01'
|
||||
* Or if c is a DateTime column, toYear(c) = 2023 -> c >= '2023-01-01 00:00:00' AND c < '2024-01-01 00:00:00'.
|
||||
* The similar optimization also applies to other converters.
|
||||
*/
|
||||
class OptimizeDateOrDateTimeConverterWithPreimagePass final : public IQueryTreePass
|
||||
{
|
||||
public:
|
||||
String getName() override { return "OptimizeDateOrDateTimeConverterWithPreimagePass"; }
|
||||
|
||||
String getDescription() override { return "Replace predicate having Date/DateTime converters with their preimages"; }
|
||||
|
||||
void run(QueryTreeNodePtr query_tree_node, ContextPtr context) override;
|
||||
|
||||
};
|
||||
|
||||
}
|
@ -26,7 +26,7 @@ public:
|
||||
return !child->as<FunctionNode>();
|
||||
}
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_group_by_function_keys)
|
||||
return;
|
||||
|
@ -28,7 +28,7 @@ public:
|
||||
return true;
|
||||
}
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_redundant_functions_in_order_by)
|
||||
return;
|
||||
|
@ -116,6 +116,7 @@ namespace ErrorCodes
|
||||
extern const int UNKNOWN_TABLE;
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
extern const int NUMBER_OF_COLUMNS_DOESNT_MATCH;
|
||||
extern const int FUNCTION_CANNOT_HAVE_PARAMETERS;
|
||||
}
|
||||
|
||||
/** Query analyzer implementation overview. Please check documentation in QueryAnalysisPass.h first.
|
||||
@ -4896,6 +4897,12 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
||||
lambda_expression_untyped->formatASTForErrorMessage(),
|
||||
scope.scope_node->formatASTForErrorMessage());
|
||||
|
||||
if (!parameters.empty())
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::FUNCTION_CANNOT_HAVE_PARAMETERS, "Function {} is not parametric", function_node.formatASTForErrorMessage());
|
||||
}
|
||||
|
||||
auto lambda_expression_clone = lambda_expression_untyped->clone();
|
||||
|
||||
IdentifierResolveScope lambda_scope(lambda_expression_clone, &scope /*parent_scope*/);
|
||||
@ -5012,9 +5019,13 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
||||
}
|
||||
|
||||
FunctionOverloadResolverPtr function = UserDefinedExecutableFunctionFactory::instance().tryGet(function_name, scope.context, parameters);
|
||||
bool is_executable_udf = true;
|
||||
|
||||
if (!function)
|
||||
{
|
||||
function = FunctionFactory::instance().tryGet(function_name, scope.context);
|
||||
is_executable_udf = false;
|
||||
}
|
||||
|
||||
if (!function)
|
||||
{
|
||||
@ -5065,6 +5076,12 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
||||
return result_projection_names;
|
||||
}
|
||||
|
||||
/// Executable UDFs may have parameters. They are checked in UserDefinedExecutableFunctionFactory.
|
||||
if (!parameters.empty() && !is_executable_udf)
|
||||
{
|
||||
throw Exception(ErrorCodes::FUNCTION_CANNOT_HAVE_PARAMETERS, "Function {} is not parametric", function_name);
|
||||
}
|
||||
|
||||
/** For lambda arguments we need to initialize lambda argument types DataTypeFunction using `getLambdaArgumentTypes` function.
|
||||
* Then each lambda arguments are initialized with columns, where column source is lambda.
|
||||
* This information is important for later steps of query processing.
|
||||
@ -6434,7 +6451,7 @@ void QueryAnalyzer::resolveTableFunction(QueryTreeNodePtr & table_function_node,
|
||||
table_function_ptr->parseArguments(table_function_ast, scope_context);
|
||||
|
||||
auto table_function_storage = scope_context->getQueryContext()->executeTableFunction(table_function_ast, table_function_ptr);
|
||||
table_function_node_typed.resolve(std::move(table_function_ptr), std::move(table_function_storage), scope_context);
|
||||
table_function_node_typed.resolve(std::move(table_function_ptr), std::move(table_function_storage), scope_context, std::move(skip_analysis_arguments_indexes));
|
||||
}
|
||||
|
||||
/// Resolve array join node in scope
|
||||
@ -6477,7 +6494,9 @@ void QueryAnalyzer::resolveArrayJoin(QueryTreeNodePtr & array_join_node, Identif
|
||||
|
||||
resolveExpressionNode(array_join_expression, scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
|
||||
|
||||
auto result_type = array_join_expression->getResultType();
|
||||
auto process_array_join_expression = [&](QueryTreeNodePtr & expression)
|
||||
{
|
||||
auto result_type = expression->getResultType();
|
||||
bool is_array_type = isArray(result_type);
|
||||
bool is_map_type = isMap(result_type);
|
||||
|
||||
@ -6485,7 +6504,7 @@ void QueryAnalyzer::resolveArrayJoin(QueryTreeNodePtr & array_join_node, Identif
|
||||
throw Exception(ErrorCodes::TYPE_MISMATCH,
|
||||
"ARRAY JOIN {} requires expression {} with Array or Map type. Actual {}. In scope {}",
|
||||
array_join_node_typed.formatASTForErrorMessage(),
|
||||
array_join_expression->formatASTForErrorMessage(),
|
||||
expression->formatASTForErrorMessage(),
|
||||
result_type->getName(),
|
||||
scope.scope_node->formatASTForErrorMessage());
|
||||
|
||||
@ -6523,9 +6542,21 @@ void QueryAnalyzer::resolveArrayJoin(QueryTreeNodePtr & array_join_node, Identif
|
||||
array_join_column_names.emplace(array_join_column_name);
|
||||
|
||||
NameAndTypePair array_join_column(array_join_column_name, result_type);
|
||||
auto array_join_column_node = std::make_shared<ColumnNode>(std::move(array_join_column), array_join_expression, array_join_node);
|
||||
auto array_join_column_node = std::make_shared<ColumnNode>(std::move(array_join_column), expression, array_join_node);
|
||||
array_join_column_node->setAlias(array_join_expression_alias);
|
||||
array_join_column_expressions.push_back(std::move(array_join_column_node));
|
||||
};
|
||||
|
||||
// Support ARRAY JOIN COLUMNS(...). COLUMNS transformer is resolved to list of columns.
|
||||
if (auto * columns_list = array_join_expression->as<ListNode>())
|
||||
{
|
||||
for (auto & array_join_subexpression : columns_list->getNodes())
|
||||
process_array_join_expression(array_join_subexpression);
|
||||
}
|
||||
else
|
||||
{
|
||||
process_array_join_expression(array_join_expression);
|
||||
}
|
||||
}
|
||||
|
||||
/** Allow to resolve ARRAY JOIN columns from aliases with types after ARRAY JOIN only after ARRAY JOIN expression list is resolved, because
|
||||
@ -6537,11 +6568,9 @@ void QueryAnalyzer::resolveArrayJoin(QueryTreeNodePtr & array_join_node, Identif
|
||||
* And it is expected that `value_element` inside projection expression list will be resolved as `value_element` expression
|
||||
* with type after ARRAY JOIN.
|
||||
*/
|
||||
for (size_t i = 0; i < array_join_nodes_size; ++i)
|
||||
array_join_nodes = std::move(array_join_column_expressions);
|
||||
for (auto & array_join_column_expression : array_join_nodes)
|
||||
{
|
||||
auto & array_join_column_expression = array_join_nodes[i];
|
||||
array_join_column_expression = std::move(array_join_column_expressions[i]);
|
||||
|
||||
auto it = scope.alias_name_to_expression_node.find(array_join_column_expression->getAlias());
|
||||
if (it != scope.alias_name_to_expression_node.end())
|
||||
{
|
||||
|
@ -26,7 +26,7 @@ public:
|
||||
using Base = InDepthQueryTreeVisitorWithContext<RewriteAggregateFunctionWithIfVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_rewrite_aggregate_function_with_if)
|
||||
return;
|
||||
|
@ -24,7 +24,7 @@ public:
|
||||
using Base = InDepthQueryTreeVisitorWithContext<ShardNumColumnToFunctionVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node) const
|
||||
void enterImpl(QueryTreeNodePtr & node) const
|
||||
{
|
||||
auto * column_node = node->as<ColumnNode>();
|
||||
if (!column_node)
|
||||
|
@ -26,7 +26,7 @@ public:
|
||||
using Base = InDepthQueryTreeVisitorWithContext<SumIfToCountIfVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_rewrite_sum_if_to_count_if)
|
||||
return;
|
||||
|
@ -31,7 +31,7 @@ public:
|
||||
using Base = InDepthQueryTreeVisitorWithContext<UniqInjectiveFunctionsEliminationVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_injective_functions_inside_uniq)
|
||||
return;
|
||||
|
@ -42,6 +42,7 @@
|
||||
#include <Analyzer/Passes/CrossToInnerJoinPass.h>
|
||||
#include <Analyzer/Passes/ShardNumColumnToFunctionPass.h>
|
||||
#include <Analyzer/Passes/ConvertQueryToCNFPass.h>
|
||||
#include <Analyzer/Passes/OptimizeDateOrDateTimeConverterWithPreimagePass.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -278,6 +279,7 @@ void addQueryTreePasses(QueryTreePassManager & manager)
|
||||
manager.addPass(std::make_unique<AutoFinalOnQueryPass>());
|
||||
manager.addPass(std::make_unique<CrossToInnerJoinPass>());
|
||||
manager.addPass(std::make_unique<ShardNumColumnToFunctionPass>());
|
||||
manager.addPass(std::make_unique<OptimizeDateOrDateTimeConverterWithPreimagePass>());
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -27,12 +27,13 @@ TableFunctionNode::TableFunctionNode(String table_function_name_)
|
||||
children[arguments_child_index] = std::make_shared<ListNode>();
|
||||
}
|
||||
|
||||
void TableFunctionNode::resolve(TableFunctionPtr table_function_value, StoragePtr storage_value, ContextPtr context)
|
||||
void TableFunctionNode::resolve(TableFunctionPtr table_function_value, StoragePtr storage_value, ContextPtr context, std::vector<size_t> unresolved_arguments_indexes_)
|
||||
{
|
||||
table_function = std::move(table_function_value);
|
||||
storage = std::move(storage_value);
|
||||
storage_id = storage->getStorageID();
|
||||
storage_snapshot = storage->getStorageSnapshot(storage->getInMemoryMetadataPtr(), context);
|
||||
unresolved_arguments_indexes = std::move(unresolved_arguments_indexes_);
|
||||
}
|
||||
|
||||
const StorageID & TableFunctionNode::getStorageID() const
|
||||
@ -132,6 +133,7 @@ QueryTreeNodePtr TableFunctionNode::cloneImpl() const
|
||||
result->storage_snapshot = storage_snapshot;
|
||||
result->table_expression_modifiers = table_expression_modifiers;
|
||||
result->settings_changes = settings_changes;
|
||||
result->unresolved_arguments_indexes = unresolved_arguments_indexes;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ public:
|
||||
}
|
||||
|
||||
/// Resolve table function with table function, storage and context
|
||||
void resolve(TableFunctionPtr table_function_value, StoragePtr storage_value, ContextPtr context);
|
||||
void resolve(TableFunctionPtr table_function_value, StoragePtr storage_value, ContextPtr context, std::vector<size_t> unresolved_arguments_indexes_);
|
||||
|
||||
/// Get storage id, throws exception if function node is not resolved
|
||||
const StorageID & getStorageID() const;
|
||||
@ -106,6 +106,11 @@ public:
|
||||
/// Get storage snapshot, throws exception if function node is not resolved
|
||||
const StorageSnapshotPtr & getStorageSnapshot() const;
|
||||
|
||||
const std::vector<size_t> & getUnresolvedArgumentIndexes() const
|
||||
{
|
||||
return unresolved_arguments_indexes;
|
||||
}
|
||||
|
||||
/// Return true if table function node has table expression modifiers, false otherwise
|
||||
bool hasTableExpressionModifiers() const
|
||||
{
|
||||
@ -164,6 +169,7 @@ private:
|
||||
StoragePtr storage;
|
||||
StorageID storage_id;
|
||||
StorageSnapshotPtr storage_snapshot;
|
||||
std::vector<size_t> unresolved_arguments_indexes;
|
||||
std::optional<TableExpressionModifiers> table_expression_modifiers;
|
||||
SettingsChanges settings_changes;
|
||||
|
||||
|
@ -30,6 +30,7 @@ public:
|
||||
String compression_method;
|
||||
int compression_level = -1;
|
||||
String password;
|
||||
String s3_storage_class;
|
||||
ContextPtr context;
|
||||
bool is_internal_backup = false;
|
||||
std::shared_ptr<IBackupCoordination> backup_coordination;
|
||||
|
@ -88,7 +88,7 @@ namespace
|
||||
request.SetMaxKeys(1);
|
||||
auto outcome = client.ListObjects(request);
|
||||
if (!outcome.IsSuccess())
|
||||
throw Exception::createDeprecated(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
||||
throw S3Exception(outcome.GetError().GetMessage(), outcome.GetError().GetErrorType());
|
||||
return outcome.GetResult().GetContents();
|
||||
}
|
||||
|
||||
@ -178,7 +178,7 @@ void BackupReaderS3::copyFileToDisk(const String & path_in_backup, size_t file_s
|
||||
|
||||
|
||||
BackupWriterS3::BackupWriterS3(
|
||||
const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, bool allow_s3_native_copy, const ContextPtr & context_)
|
||||
const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, bool allow_s3_native_copy, const String & storage_class_name, const ContextPtr & context_)
|
||||
: BackupWriterDefault(&Poco::Logger::get("BackupWriterS3"), context_)
|
||||
, s3_uri(s3_uri_)
|
||||
, client(makeS3Client(s3_uri_, access_key_id_, secret_access_key_, context_))
|
||||
@ -188,6 +188,7 @@ BackupWriterS3::BackupWriterS3(
|
||||
request_settings.updateFromSettings(context_->getSettingsRef());
|
||||
request_settings.max_single_read_retries = context_->getSettingsRef().s3_max_single_read_retries; // FIXME: Avoid taking value for endpoint
|
||||
request_settings.allow_native_copy = allow_s3_native_copy;
|
||||
request_settings.setStorageClassName(storage_class_name);
|
||||
}
|
||||
|
||||
void BackupWriterS3::copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
|
||||
@ -271,7 +272,7 @@ void BackupWriterS3::removeFile(const String & file_name)
|
||||
request.SetKey(fs::path(s3_uri.key) / file_name);
|
||||
auto outcome = client->DeleteObject(request);
|
||||
if (!outcome.IsSuccess() && !isNotFoundError(outcome.GetError().GetErrorType()))
|
||||
throw Exception::createDeprecated(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
||||
throw S3Exception(outcome.GetError().GetMessage(), outcome.GetError().GetErrorType());
|
||||
}
|
||||
|
||||
void BackupWriterS3::removeFiles(const Strings & file_names)
|
||||
@ -329,7 +330,7 @@ void BackupWriterS3::removeFilesBatch(const Strings & file_names)
|
||||
|
||||
auto outcome = client->DeleteObjects(request);
|
||||
if (!outcome.IsSuccess() && !isNotFoundError(outcome.GetError().GetErrorType()))
|
||||
throw Exception::createDeprecated(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
||||
throw S3Exception(outcome.GetError().GetMessage(), outcome.GetError().GetErrorType());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -38,7 +38,7 @@ private:
|
||||
class BackupWriterS3 : public BackupWriterDefault
|
||||
{
|
||||
public:
|
||||
BackupWriterS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, bool allow_s3_native_copy, const ContextPtr & context_);
|
||||
BackupWriterS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, bool allow_s3_native_copy, const String & storage_class_name, const ContextPtr & context_);
|
||||
~BackupWriterS3() override;
|
||||
|
||||
bool fileExists(const String & file_name) override;
|
||||
|
@ -21,6 +21,7 @@ namespace ErrorCodes
|
||||
M(String, id) \
|
||||
M(String, compression_method) \
|
||||
M(String, password) \
|
||||
M(String, s3_storage_class) \
|
||||
M(Bool, structure_only) \
|
||||
M(Bool, async) \
|
||||
M(Bool, decrypt_files_from_encrypted_disks) \
|
||||
|
@ -25,6 +25,9 @@ struct BackupSettings
|
||||
/// Password used to encrypt the backup.
|
||||
String password;
|
||||
|
||||
/// S3 storage class.
|
||||
String s3_storage_class = "";
|
||||
|
||||
/// If this is set to true then only create queries will be written to backup,
|
||||
/// without the data of tables.
|
||||
bool structure_only = false;
|
||||
|
@ -344,6 +344,7 @@ void BackupsWorker::doBackup(
|
||||
backup_create_params.compression_method = backup_settings.compression_method;
|
||||
backup_create_params.compression_level = backup_settings.compression_level;
|
||||
backup_create_params.password = backup_settings.password;
|
||||
backup_create_params.s3_storage_class = backup_settings.s3_storage_class;
|
||||
backup_create_params.is_internal_backup = backup_settings.internal;
|
||||
backup_create_params.backup_coordination = backup_coordination;
|
||||
backup_create_params.backup_uuid = backup_settings.backup_uuid;
|
||||
|
@ -112,7 +112,7 @@ void registerBackupEngineS3(BackupFactory & factory)
|
||||
}
|
||||
else
|
||||
{
|
||||
auto writer = std::make_shared<BackupWriterS3>(S3::URI{s3_uri}, access_key_id, secret_access_key, params.allow_s3_native_copy, params.context);
|
||||
auto writer = std::make_shared<BackupWriterS3>(S3::URI{s3_uri}, access_key_id, secret_access_key, params.allow_s3_native_copy, params.s3_storage_class, params.context);
|
||||
return std::make_unique<BackupImpl>(
|
||||
backup_name_for_logging,
|
||||
archive_params,
|
||||
|
@ -124,6 +124,9 @@ void Suggest::load(ContextPtr context, const ConnectionParameters & connection_p
|
||||
if (e.code() == ErrorCodes::DEADLOCK_AVOIDED)
|
||||
continue;
|
||||
|
||||
/// Client can successfully connect to the server and
|
||||
/// get ErrorCodes::USER_SESSION_LIMIT_EXCEEDED for suggestion connection.
|
||||
|
||||
/// We should not use std::cerr here, because this method works concurrently with the main thread.
|
||||
/// WriteBufferFromFileDescriptor will write directly to the file descriptor, avoiding data race on std::cerr.
|
||||
|
||||
|
@ -564,15 +564,22 @@ void ColumnNullable::updatePermutationImpl(IColumn::PermutationSortDirection dir
|
||||
else
|
||||
getNestedColumn().updatePermutation(direction, stability, limit, null_direction_hint, res, new_ranges);
|
||||
|
||||
equal_ranges = std::move(new_ranges);
|
||||
|
||||
if (unlikely(stability == PermutationSortStability::Stable))
|
||||
{
|
||||
for (auto & null_range : null_ranges)
|
||||
::sort(res.begin() + null_range.first, res.begin() + null_range.second);
|
||||
}
|
||||
|
||||
if (is_nulls_last || null_ranges.empty())
|
||||
{
|
||||
equal_ranges = std::move(new_ranges);
|
||||
std::move(null_ranges.begin(), null_ranges.end(), std::back_inserter(equal_ranges));
|
||||
}
|
||||
else
|
||||
{
|
||||
equal_ranges = std::move(null_ranges);
|
||||
std::move(new_ranges.begin(), new_ranges.end(), std::back_inserter(equal_ranges));
|
||||
}
|
||||
}
|
||||
|
||||
void ColumnNullable::getPermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,
|
||||
|
@ -439,7 +439,7 @@ void ColumnSparse::compareColumn(const IColumn & rhs, size_t rhs_row_num,
|
||||
PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results,
|
||||
int direction, int nan_direction_hint) const
|
||||
{
|
||||
if (row_indexes)
|
||||
if (row_indexes || !typeid_cast<const ColumnSparse *>(&rhs))
|
||||
{
|
||||
/// TODO: implement without conversion to full column.
|
||||
auto this_full = convertToFullColumnIfSparse();
|
||||
|
@ -41,9 +41,25 @@ namespace DB
|
||||
}
|
||||
}
|
||||
|
||||
std::mutex CaresPTRResolver::mutex;
|
||||
struct AresChannelRAII
|
||||
{
|
||||
AresChannelRAII()
|
||||
{
|
||||
if (ares_init(&channel) != ARES_SUCCESS)
|
||||
{
|
||||
throw DB::Exception(DB::ErrorCodes::DNS_ERROR, "Failed to initialize c-ares channel");
|
||||
}
|
||||
}
|
||||
|
||||
CaresPTRResolver::CaresPTRResolver(CaresPTRResolver::provider_token) : channel(nullptr)
|
||||
~AresChannelRAII()
|
||||
{
|
||||
ares_destroy(channel);
|
||||
}
|
||||
|
||||
ares_channel channel;
|
||||
};
|
||||
|
||||
CaresPTRResolver::CaresPTRResolver(CaresPTRResolver::provider_token)
|
||||
{
|
||||
/*
|
||||
* ares_library_init is not thread safe. Currently, the only other usage of c-ares seems to be in grpc.
|
||||
@ -57,34 +73,22 @@ namespace DB
|
||||
* */
|
||||
static const auto library_init_result = ares_library_init(ARES_LIB_INIT_ALL);
|
||||
|
||||
if (library_init_result != ARES_SUCCESS || ares_init(&channel) != ARES_SUCCESS)
|
||||
if (library_init_result != ARES_SUCCESS)
|
||||
{
|
||||
throw DB::Exception(DB::ErrorCodes::DNS_ERROR, "Failed to initialize c-ares");
|
||||
}
|
||||
}
|
||||
|
||||
CaresPTRResolver::~CaresPTRResolver()
|
||||
{
|
||||
ares_destroy(channel);
|
||||
/*
|
||||
* Library initialization is currently done only once in the constructor. Multiple instances of CaresPTRResolver
|
||||
* will be used in the lifetime of ClickHouse, thus it's problematic to have de-init here.
|
||||
* In a practical view, it makes little to no sense to de-init a DNS library since DNS requests will happen
|
||||
* until the end of the program. Hence, ares_library_cleanup() will not be called.
|
||||
* */
|
||||
}
|
||||
|
||||
std::unordered_set<std::string> CaresPTRResolver::resolve(const std::string & ip)
|
||||
{
|
||||
std::lock_guard guard(mutex);
|
||||
AresChannelRAII channel_raii;
|
||||
|
||||
std::unordered_set<std::string> ptr_records;
|
||||
|
||||
resolve(ip, ptr_records);
|
||||
resolve(ip, ptr_records, channel_raii.channel);
|
||||
|
||||
if (!wait_and_process())
|
||||
if (!wait_and_process(channel_raii.channel))
|
||||
{
|
||||
cancel_requests();
|
||||
throw DB::Exception(DB::ErrorCodes::DNS_ERROR, "Failed to complete reverse DNS query for IP {}", ip);
|
||||
}
|
||||
|
||||
@ -93,22 +97,21 @@ namespace DB
|
||||
|
||||
std::unordered_set<std::string> CaresPTRResolver::resolve_v6(const std::string & ip)
|
||||
{
|
||||
std::lock_guard guard(mutex);
|
||||
AresChannelRAII channel_raii;
|
||||
|
||||
std::unordered_set<std::string> ptr_records;
|
||||
|
||||
resolve_v6(ip, ptr_records);
|
||||
resolve_v6(ip, ptr_records, channel_raii.channel);
|
||||
|
||||
if (!wait_and_process())
|
||||
if (!wait_and_process(channel_raii.channel))
|
||||
{
|
||||
cancel_requests();
|
||||
throw DB::Exception(DB::ErrorCodes::DNS_ERROR, "Failed to complete reverse DNS query for IP {}", ip);
|
||||
}
|
||||
|
||||
return ptr_records;
|
||||
}
|
||||
|
||||
void CaresPTRResolver::resolve(const std::string & ip, std::unordered_set<std::string> & response)
|
||||
void CaresPTRResolver::resolve(const std::string & ip, std::unordered_set<std::string> & response, ares_channel channel)
|
||||
{
|
||||
in_addr addr;
|
||||
|
||||
@ -117,7 +120,7 @@ namespace DB
|
||||
ares_gethostbyaddr(channel, reinterpret_cast<const void*>(&addr), sizeof(addr), AF_INET, callback, &response);
|
||||
}
|
||||
|
||||
void CaresPTRResolver::resolve_v6(const std::string & ip, std::unordered_set<std::string> & response)
|
||||
void CaresPTRResolver::resolve_v6(const std::string & ip, std::unordered_set<std::string> & response, ares_channel channel)
|
||||
{
|
||||
in6_addr addr;
|
||||
inet_pton(AF_INET6, ip.c_str(), &addr);
|
||||
@ -125,15 +128,15 @@ namespace DB
|
||||
ares_gethostbyaddr(channel, reinterpret_cast<const void*>(&addr), sizeof(addr), AF_INET6, callback, &response);
|
||||
}
|
||||
|
||||
bool CaresPTRResolver::wait_and_process()
|
||||
bool CaresPTRResolver::wait_and_process(ares_channel channel)
|
||||
{
|
||||
int sockets[ARES_GETSOCK_MAXNUM];
|
||||
pollfd pollfd[ARES_GETSOCK_MAXNUM];
|
||||
|
||||
while (true)
|
||||
{
|
||||
auto readable_sockets = get_readable_sockets(sockets, pollfd);
|
||||
auto timeout = calculate_timeout();
|
||||
auto readable_sockets = get_readable_sockets(sockets, pollfd, channel);
|
||||
auto timeout = calculate_timeout(channel);
|
||||
|
||||
int number_of_fds_ready = 0;
|
||||
if (!readable_sockets.empty())
|
||||
@ -158,11 +161,11 @@ namespace DB
|
||||
|
||||
if (number_of_fds_ready > 0)
|
||||
{
|
||||
process_readable_sockets(readable_sockets);
|
||||
process_readable_sockets(readable_sockets, channel);
|
||||
}
|
||||
else
|
||||
{
|
||||
process_possible_timeout();
|
||||
process_possible_timeout(channel);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -170,12 +173,12 @@ namespace DB
|
||||
return true;
|
||||
}
|
||||
|
||||
void CaresPTRResolver::cancel_requests()
|
||||
void CaresPTRResolver::cancel_requests(ares_channel channel)
|
||||
{
|
||||
ares_cancel(channel);
|
||||
}
|
||||
|
||||
std::span<pollfd> CaresPTRResolver::get_readable_sockets(int * sockets, pollfd * pollfd)
|
||||
std::span<pollfd> CaresPTRResolver::get_readable_sockets(int * sockets, pollfd * pollfd, ares_channel channel)
|
||||
{
|
||||
int sockets_bitmask = ares_getsock(channel, sockets, ARES_GETSOCK_MAXNUM);
|
||||
|
||||
@ -205,7 +208,7 @@ namespace DB
|
||||
return std::span<struct pollfd>(pollfd, number_of_sockets_to_poll);
|
||||
}
|
||||
|
||||
int64_t CaresPTRResolver::calculate_timeout()
|
||||
int64_t CaresPTRResolver::calculate_timeout(ares_channel channel)
|
||||
{
|
||||
timeval tv;
|
||||
if (auto * tvp = ares_timeout(channel, nullptr, &tv))
|
||||
@ -218,14 +221,14 @@ namespace DB
|
||||
return 0;
|
||||
}
|
||||
|
||||
void CaresPTRResolver::process_possible_timeout()
|
||||
void CaresPTRResolver::process_possible_timeout(ares_channel channel)
|
||||
{
|
||||
/* Call ares_process() unconditonally here, even if we simply timed out
|
||||
above, as otherwise the ares name resolve won't timeout! */
|
||||
ares_process_fd(channel, ARES_SOCKET_BAD, ARES_SOCKET_BAD);
|
||||
}
|
||||
|
||||
void CaresPTRResolver::process_readable_sockets(std::span<pollfd> readable_sockets)
|
||||
void CaresPTRResolver::process_readable_sockets(std::span<pollfd> readable_sockets, ares_channel channel)
|
||||
{
|
||||
for (auto readable_socket : readable_sockets)
|
||||
{
|
||||
|
@ -28,32 +28,35 @@ namespace DB
|
||||
|
||||
public:
|
||||
explicit CaresPTRResolver(provider_token);
|
||||
~CaresPTRResolver() override;
|
||||
|
||||
/*
|
||||
* Library initialization is currently done only once in the constructor. Multiple instances of CaresPTRResolver
|
||||
* will be used in the lifetime of ClickHouse, thus it's problematic to have de-init here.
|
||||
* In a practical view, it makes little to no sense to de-init a DNS library since DNS requests will happen
|
||||
* until the end of the program. Hence, ares_library_cleanup() will not be called.
|
||||
* */
|
||||
~CaresPTRResolver() override = default;
|
||||
|
||||
std::unordered_set<std::string> resolve(const std::string & ip) override;
|
||||
|
||||
std::unordered_set<std::string> resolve_v6(const std::string & ip) override;
|
||||
|
||||
private:
|
||||
bool wait_and_process();
|
||||
bool wait_and_process(ares_channel channel);
|
||||
|
||||
void cancel_requests();
|
||||
void cancel_requests(ares_channel channel);
|
||||
|
||||
void resolve(const std::string & ip, std::unordered_set<std::string> & response);
|
||||
void resolve(const std::string & ip, std::unordered_set<std::string> & response, ares_channel channel);
|
||||
|
||||
void resolve_v6(const std::string & ip, std::unordered_set<std::string> & response);
|
||||
void resolve_v6(const std::string & ip, std::unordered_set<std::string> & response, ares_channel channel);
|
||||
|
||||
std::span<pollfd> get_readable_sockets(int * sockets, pollfd * pollfd);
|
||||
std::span<pollfd> get_readable_sockets(int * sockets, pollfd * pollfd, ares_channel channel);
|
||||
|
||||
int64_t calculate_timeout();
|
||||
int64_t calculate_timeout(ares_channel channel);
|
||||
|
||||
void process_possible_timeout();
|
||||
void process_possible_timeout(ares_channel channel);
|
||||
|
||||
void process_readable_sockets(std::span<pollfd> readable_sockets);
|
||||
|
||||
ares_channel channel;
|
||||
|
||||
static std::mutex mutex;
|
||||
void process_readable_sockets(std::span<pollfd> readable_sockets, ares_channel channel);
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -328,7 +328,7 @@ void ConfigProcessor::mergeRecursive(XMLDocumentPtr config, Node * config_root,
|
||||
}
|
||||
}
|
||||
|
||||
void ConfigProcessor::merge(XMLDocumentPtr config, XMLDocumentPtr with)
|
||||
bool ConfigProcessor::merge(XMLDocumentPtr config, XMLDocumentPtr with)
|
||||
{
|
||||
Node * config_root = getRootNode(config.get());
|
||||
Node * with_root = getRootNode(with.get());
|
||||
@ -343,11 +343,15 @@ void ConfigProcessor::merge(XMLDocumentPtr config, XMLDocumentPtr with)
|
||||
&& !((config_root_node_name == "yandex" || config_root_node_name == "clickhouse")
|
||||
&& (merged_root_node_name == "yandex" || merged_root_node_name == "clickhouse")))
|
||||
{
|
||||
if (config_root_node_name != "clickhouse" && config_root_node_name != "yandex")
|
||||
return false;
|
||||
|
||||
throw Poco::Exception("Root element doesn't have the corresponding root element as the config file."
|
||||
" It must be <" + config_root->nodeName() + ">");
|
||||
}
|
||||
|
||||
mergeRecursive(config, config_root, with_root);
|
||||
return true;
|
||||
}
|
||||
|
||||
void ConfigProcessor::doIncludesRecursive(
|
||||
@ -645,7 +649,12 @@ XMLDocumentPtr ConfigProcessor::processConfig(
|
||||
with = dom_parser.parse(merge_file);
|
||||
}
|
||||
|
||||
merge(config, with);
|
||||
if (!merge(config, with))
|
||||
{
|
||||
LOG_DEBUG(log, "Merging bypassed - configuration file '{}' doesn't belong to configuration '{}' - merging root node name '{}' doesn't match '{}'",
|
||||
merge_file, path, getRootNode(with.get())->nodeName(), getRootNode(config.get())->nodeName());
|
||||
continue;
|
||||
}
|
||||
|
||||
contributing_files.push_back(merge_file);
|
||||
}
|
||||
|
@ -144,7 +144,9 @@ private:
|
||||
|
||||
void mergeRecursive(XMLDocumentPtr config, Poco::XML::Node * config_root, const Poco::XML::Node * with_root);
|
||||
|
||||
void merge(XMLDocumentPtr config, XMLDocumentPtr with);
|
||||
/// If config root node name is not 'clickhouse' and merging config's root node names doesn't match, bypasses merging and returns false.
|
||||
/// For compatibility root node 'yandex' considered equal to 'clickhouse'.
|
||||
bool merge(XMLDocumentPtr config, XMLDocumentPtr with);
|
||||
|
||||
void doIncludesRecursive(
|
||||
XMLDocumentPtr config,
|
||||
|
@ -582,6 +582,7 @@
|
||||
M(697, CANNOT_RESTORE_TO_NONENCRYPTED_DISK) \
|
||||
M(698, INVALID_REDIS_STORAGE_TYPE) \
|
||||
M(699, INVALID_REDIS_TABLE_STRUCTURE) \
|
||||
M(700, USER_SESSION_LIMIT_EXCEEDED) \
|
||||
\
|
||||
M(999, KEEPER_EXCEPTION) \
|
||||
M(1000, POCO_EXCEPTION) \
|
||||
|
@ -229,7 +229,7 @@ void MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceeded, MemoryT
|
||||
}
|
||||
|
||||
std::bernoulli_distribution sample(sample_probability);
|
||||
if (unlikely(sample_probability > 0.0 && sample(thread_local_rng)))
|
||||
if (unlikely(sample_probability > 0.0 && isSizeOkForSampling(size) && sample(thread_local_rng)))
|
||||
{
|
||||
MemoryTrackerBlockerInThread untrack_lock(VariableContext::Global);
|
||||
DB::TraceSender::send(DB::TraceType::MemorySample, StackTrace(), {.size = size});
|
||||
@ -413,7 +413,7 @@ void MemoryTracker::free(Int64 size)
|
||||
}
|
||||
|
||||
std::bernoulli_distribution sample(sample_probability);
|
||||
if (unlikely(sample_probability > 0.0 && sample(thread_local_rng)))
|
||||
if (unlikely(sample_probability > 0.0 && isSizeOkForSampling(size) && sample(thread_local_rng)))
|
||||
{
|
||||
MemoryTrackerBlockerInThread untrack_lock(VariableContext::Global);
|
||||
DB::TraceSender::send(DB::TraceType::MemorySample, StackTrace(), {.size = -size});
|
||||
@ -534,6 +534,12 @@ void MemoryTracker::setOrRaiseProfilerLimit(Int64 value)
|
||||
;
|
||||
}
|
||||
|
||||
bool MemoryTracker::isSizeOkForSampling(UInt64 size) const
|
||||
{
|
||||
/// We can avoid comparison min_allocation_size_bytes with zero, because we cannot have 0 bytes allocation/deallocation
|
||||
return ((max_allocation_size_bytes == 0 || size <= max_allocation_size_bytes) && size >= min_allocation_size_bytes);
|
||||
}
|
||||
|
||||
bool canEnqueueBackgroundTask()
|
||||
{
|
||||
auto limit = background_memory_tracker.getSoftLimit();
|
||||
|
@ -67,6 +67,12 @@ private:
|
||||
/// To randomly sample allocations and deallocations in trace_log.
|
||||
double sample_probability = 0;
|
||||
|
||||
/// Randomly sample allocations only larger or equal to this size
|
||||
UInt64 min_allocation_size_bytes = 0;
|
||||
|
||||
/// Randomly sample allocations only smaller or equal to this size
|
||||
UInt64 max_allocation_size_bytes = 0;
|
||||
|
||||
/// Singly-linked list. All information will be passed to subsequent memory trackers also (it allows to implement trackers hierarchy).
|
||||
/// In terms of tree nodes it is the list of parents. Lifetime of these trackers should "include" lifetime of current tracker.
|
||||
std::atomic<MemoryTracker *> parent {};
|
||||
@ -88,6 +94,8 @@ private:
|
||||
|
||||
void setOrRaiseProfilerLimit(Int64 value);
|
||||
|
||||
bool isSizeOkForSampling(UInt64 size) const;
|
||||
|
||||
/// allocImpl(...) and free(...) should not be used directly
|
||||
friend struct CurrentMemoryTracker;
|
||||
void allocImpl(Int64 size, bool throw_if_memory_exceeded, MemoryTracker * query_tracker = nullptr);
|
||||
@ -166,6 +174,16 @@ public:
|
||||
sample_probability = value;
|
||||
}
|
||||
|
||||
void setSampleMinAllocationSize(UInt64 value)
|
||||
{
|
||||
min_allocation_size_bytes = value;
|
||||
}
|
||||
|
||||
void setSampleMaxAllocationSize(UInt64 value)
|
||||
{
|
||||
max_allocation_size_bytes = value;
|
||||
}
|
||||
|
||||
void setProfilerStep(Int64 value)
|
||||
{
|
||||
profiler_step = value;
|
||||
|
43
src/Common/SettingSource.h
Normal file
43
src/Common/SettingSource.h
Normal file
@ -0,0 +1,43 @@
|
||||
#pragma once
|
||||
|
||||
#include <string_view>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
enum SettingSource
|
||||
{
|
||||
/// Query or session change:
|
||||
/// SET <setting> = <value>
|
||||
/// SELECT ... SETTINGS [<setting> = <value]
|
||||
QUERY,
|
||||
|
||||
/// Profile creation or altering:
|
||||
/// CREATE SETTINGS PROFILE ... SETTINGS [<setting> = <value]
|
||||
/// ALTER SETTINGS PROFILE ... SETTINGS [<setting> = <value]
|
||||
PROFILE,
|
||||
|
||||
/// Role creation or altering:
|
||||
/// CREATE ROLE ... SETTINGS [<setting> = <value>]
|
||||
/// ALTER ROLE ... SETTINGS [<setting> = <value]
|
||||
ROLE,
|
||||
|
||||
/// User creation or altering:
|
||||
/// CREATE USER ... SETTINGS [<setting> = <value>]
|
||||
/// ALTER USER ... SETTINGS [<setting> = <value]
|
||||
USER,
|
||||
|
||||
COUNT,
|
||||
};
|
||||
|
||||
constexpr std::string_view toString(SettingSource source)
|
||||
{
|
||||
switch (source)
|
||||
{
|
||||
case SettingSource::QUERY: return "query";
|
||||
case SettingSource::PROFILE: return "profile";
|
||||
case SettingSource::USER: return "user";
|
||||
case SettingSource::ROLE: return "role";
|
||||
default: return "unknown";
|
||||
}
|
||||
}
|
||||
}
|
@ -1,7 +1,6 @@
|
||||
#if defined(__ELF__) && !defined(OS_FREEBSD)
|
||||
|
||||
#include <Common/SymbolIndex.h>
|
||||
#include <base/hex.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <optional>
|
||||
@ -62,9 +61,11 @@ Otherwise you will get only exported symbols from program headers.
|
||||
#endif
|
||||
|
||||
#define __msan_unpoison_string(X) // NOLINT
|
||||
#define __msan_unpoison(X, Y) // NOLINT
|
||||
#if defined(ch_has_feature)
|
||||
# if ch_has_feature(memory_sanitizer)
|
||||
# undef __msan_unpoison_string
|
||||
# undef __msan_unpoison
|
||||
# include <sanitizer/msan_interface.h>
|
||||
# endif
|
||||
#endif
|
||||
@ -98,10 +99,13 @@ void collectSymbolsFromProgramHeaders(
|
||||
/* Iterate over all headers of the current shared lib
|
||||
* (first call is for the executable itself)
|
||||
*/
|
||||
__msan_unpoison(&info->dlpi_phnum, sizeof(info->dlpi_phnum));
|
||||
__msan_unpoison(&info->dlpi_phdr, sizeof(info->dlpi_phdr));
|
||||
for (size_t header_index = 0; header_index < info->dlpi_phnum; ++header_index)
|
||||
{
|
||||
/* Further processing is only needed if the dynamic section is reached
|
||||
*/
|
||||
__msan_unpoison(&info->dlpi_phdr[header_index], sizeof(info->dlpi_phdr[header_index]));
|
||||
if (info->dlpi_phdr[header_index].p_type != PT_DYNAMIC)
|
||||
continue;
|
||||
|
||||
@ -109,6 +113,7 @@ void collectSymbolsFromProgramHeaders(
|
||||
* It's address is the shared lib's address + the virtual address
|
||||
*/
|
||||
const ElfW(Dyn) * dyn_begin = reinterpret_cast<const ElfW(Dyn) *>(info->dlpi_addr + info->dlpi_phdr[header_index].p_vaddr);
|
||||
__msan_unpoison(&dyn_begin, sizeof(dyn_begin));
|
||||
|
||||
/// For unknown reason, addresses are sometimes relative sometimes absolute.
|
||||
auto correct_address = [](ElfW(Addr) base, ElfW(Addr) ptr)
|
||||
@ -122,17 +127,16 @@ void collectSymbolsFromProgramHeaders(
|
||||
*/
|
||||
|
||||
size_t sym_cnt = 0;
|
||||
for (const auto * it = dyn_begin; it->d_tag != DT_NULL; ++it)
|
||||
{
|
||||
const auto * it = dyn_begin;
|
||||
while (true)
|
||||
{
|
||||
__msan_unpoison(it, sizeof(*it));
|
||||
if (it->d_tag != DT_NULL)
|
||||
break;
|
||||
|
||||
ElfW(Addr) base_address = correct_address(info->dlpi_addr, it->d_un.d_ptr);
|
||||
|
||||
// TODO: this branch leads to invalid address of the hash table. Need further investigation.
|
||||
// if (it->d_tag == DT_HASH)
|
||||
// {
|
||||
// const ElfW(Word) * hash = reinterpret_cast<const ElfW(Word) *>(base_address);
|
||||
// sym_cnt = hash[1];
|
||||
// break;
|
||||
// }
|
||||
if (it->d_tag == DT_GNU_HASH)
|
||||
{
|
||||
/// This code based on Musl-libc.
|
||||
@ -142,8 +146,14 @@ void collectSymbolsFromProgramHeaders(
|
||||
|
||||
const ElfW(Word) * hash = reinterpret_cast<const ElfW(Word) *>(base_address);
|
||||
|
||||
__msan_unpoison(&hash[0], sizeof(*hash));
|
||||
__msan_unpoison(&hash[1], sizeof(*hash));
|
||||
__msan_unpoison(&hash[2], sizeof(*hash));
|
||||
|
||||
buckets = hash + 4 + (hash[2] * sizeof(size_t) / 4);
|
||||
|
||||
__msan_unpoison(buckets, hash[0] * sizeof(buckets[0]));
|
||||
|
||||
for (ElfW(Word) i = 0; i < hash[0]; ++i)
|
||||
if (buckets[i] > sym_cnt)
|
||||
sym_cnt = buckets[i];
|
||||
@ -152,6 +162,7 @@ void collectSymbolsFromProgramHeaders(
|
||||
{
|
||||
sym_cnt -= hash[1];
|
||||
hashval = buckets + hash[0] + sym_cnt;
|
||||
__msan_unpoison(&hashval, sizeof(hashval));
|
||||
do
|
||||
{
|
||||
++sym_cnt;
|
||||
@ -161,6 +172,9 @@ void collectSymbolsFromProgramHeaders(
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
++it;
|
||||
}
|
||||
}
|
||||
|
||||
if (!sym_cnt)
|
||||
@ -190,6 +204,8 @@ void collectSymbolsFromProgramHeaders(
|
||||
/* Get the pointer to the first entry of the symbol table */
|
||||
const ElfW(Sym) * elf_sym = reinterpret_cast<const ElfW(Sym) *>(base_address);
|
||||
|
||||
__msan_unpoison(elf_sym, sym_cnt * sizeof(*elf_sym));
|
||||
|
||||
/* Iterate over the symbol table */
|
||||
for (ElfW(Word) sym_index = 0; sym_index < ElfW(Word)(sym_cnt); ++sym_index)
|
||||
{
|
||||
@ -197,6 +213,7 @@ void collectSymbolsFromProgramHeaders(
|
||||
* This is located at the address of st_name relative to the beginning of the string table.
|
||||
*/
|
||||
const char * sym_name = &strtab[elf_sym[sym_index].st_name];
|
||||
__msan_unpoison_string(sym_name);
|
||||
|
||||
if (!sym_name)
|
||||
continue;
|
||||
@ -223,13 +240,18 @@ void collectSymbolsFromProgramHeaders(
|
||||
#if !defined USE_MUSL
|
||||
String getBuildIDFromProgramHeaders(dl_phdr_info * info)
|
||||
{
|
||||
__msan_unpoison(&info->dlpi_phnum, sizeof(info->dlpi_phnum));
|
||||
__msan_unpoison(&info->dlpi_phdr, sizeof(info->dlpi_phdr));
|
||||
for (size_t header_index = 0; header_index < info->dlpi_phnum; ++header_index)
|
||||
{
|
||||
const ElfPhdr & phdr = info->dlpi_phdr[header_index];
|
||||
__msan_unpoison(&phdr, sizeof(phdr));
|
||||
if (phdr.p_type != PT_NOTE)
|
||||
continue;
|
||||
|
||||
return Elf::getBuildID(reinterpret_cast<const char *>(info->dlpi_addr + phdr.p_vaddr), phdr.p_memsz);
|
||||
std::string_view view(reinterpret_cast<const char *>(info->dlpi_addr + phdr.p_vaddr), phdr.p_memsz);
|
||||
__msan_unpoison(view.data(), view.size());
|
||||
return Elf::getBuildID(view.data(), view.size());
|
||||
}
|
||||
return {};
|
||||
}
|
||||
@ -318,6 +340,7 @@ void collectSymbolsFromELF(
|
||||
build_id = our_build_id;
|
||||
#else
|
||||
/// MSan does not know that the program segments in memory are initialized.
|
||||
__msan_unpoison(info, sizeof(*info));
|
||||
__msan_unpoison_string(info->dlpi_name);
|
||||
|
||||
object_name = info->dlpi_name;
|
||||
|
@ -31,30 +31,25 @@ namespace ErrorCodes
|
||||
extern const int TIMEOUT_EXCEEDED;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
constexpr size_t DBMS_SYSTEM_LOG_QUEUE_SIZE = 1048576;
|
||||
}
|
||||
|
||||
ISystemLog::~ISystemLog() = default;
|
||||
|
||||
|
||||
template <typename LogElement>
|
||||
SystemLogQueue<LogElement>::SystemLogQueue(
|
||||
const String & table_name_,
|
||||
size_t flush_interval_milliseconds_,
|
||||
bool turn_off_logger_)
|
||||
: log(&Poco::Logger::get("SystemLogQueue (" + table_name_ + ")"))
|
||||
, flush_interval_milliseconds(flush_interval_milliseconds_)
|
||||
SystemLogQueue<LogElement>::SystemLogQueue(const SystemLogQueueSettings & settings_)
|
||||
: log(&Poco::Logger::get("SystemLogQueue (" + settings_.database + "." +settings_.table + ")"))
|
||||
, settings(settings_)
|
||||
|
||||
{
|
||||
if (turn_off_logger_)
|
||||
queue.reserve(settings.reserved_size_rows);
|
||||
|
||||
if (settings.turn_off_logger)
|
||||
log->setLevel(0);
|
||||
}
|
||||
|
||||
static thread_local bool recursive_push_call = false;
|
||||
|
||||
template <typename LogElement>
|
||||
void SystemLogQueue<LogElement>::push(const LogElement & element)
|
||||
void SystemLogQueue<LogElement>::push(LogElement&& element)
|
||||
{
|
||||
/// It is possible that the method will be called recursively.
|
||||
/// Better to drop these events to avoid complications.
|
||||
@ -70,7 +65,7 @@ void SystemLogQueue<LogElement>::push(const LogElement & element)
|
||||
MemoryTrackerBlockerInThread temporarily_disable_memory_tracker;
|
||||
|
||||
/// Should not log messages under mutex.
|
||||
bool queue_is_half_full = false;
|
||||
bool buffer_size_rows_flush_threshold_exceeded = false;
|
||||
|
||||
{
|
||||
std::unique_lock lock(mutex);
|
||||
@ -78,9 +73,9 @@ void SystemLogQueue<LogElement>::push(const LogElement & element)
|
||||
if (is_shutdown)
|
||||
return;
|
||||
|
||||
if (queue.size() == DBMS_SYSTEM_LOG_QUEUE_SIZE / 2)
|
||||
if (queue.size() == settings.buffer_size_rows_flush_threshold)
|
||||
{
|
||||
queue_is_half_full = true;
|
||||
buffer_size_rows_flush_threshold_exceeded = true;
|
||||
|
||||
// The queue more than half full, time to flush.
|
||||
// We only check for strict equality, because messages are added one
|
||||
@ -94,7 +89,7 @@ void SystemLogQueue<LogElement>::push(const LogElement & element)
|
||||
flush_event.notify_all();
|
||||
}
|
||||
|
||||
if (queue.size() >= DBMS_SYSTEM_LOG_QUEUE_SIZE)
|
||||
if (queue.size() >= settings.max_size_rows)
|
||||
{
|
||||
// Ignore all further entries until the queue is flushed.
|
||||
// Log a message about that. Don't spam it -- this might be especially
|
||||
@ -108,27 +103,28 @@ void SystemLogQueue<LogElement>::push(const LogElement & element)
|
||||
// TextLog sets its logger level to 0, so this log is a noop and
|
||||
// there is no recursive logging.
|
||||
lock.unlock();
|
||||
LOG_ERROR(log, "Queue is full for system log '{}' at {}", demangle(typeid(*this).name()), queue_front_index);
|
||||
LOG_ERROR(log, "Queue is full for system log '{}' at {}. max_size_rows {}",
|
||||
demangle(typeid(*this).name()),
|
||||
queue_front_index,
|
||||
settings.max_size_rows);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
queue.push_back(element);
|
||||
queue.push_back(std::move(element));
|
||||
}
|
||||
|
||||
if (queue_is_half_full)
|
||||
LOG_INFO(log, "Queue is half full for system log '{}'.", demangle(typeid(*this).name()));
|
||||
if (buffer_size_rows_flush_threshold_exceeded)
|
||||
LOG_INFO(log, "Queue is half full for system log '{}'. buffer_size_rows_flush_threshold {}",
|
||||
demangle(typeid(*this).name()), settings.buffer_size_rows_flush_threshold);
|
||||
}
|
||||
|
||||
template <typename LogElement>
|
||||
void SystemLogBase<LogElement>::flush(bool force)
|
||||
void SystemLogQueue<LogElement>::handleCrash()
|
||||
{
|
||||
uint64_t this_thread_requested_offset = queue->notifyFlush(force);
|
||||
if (this_thread_requested_offset == uint64_t(-1))
|
||||
return;
|
||||
|
||||
queue->waitFlush(this_thread_requested_offset);
|
||||
if (settings.notify_flush_on_crash)
|
||||
notifyFlush(/* force */ true);
|
||||
}
|
||||
|
||||
template <typename LogElement>
|
||||
@ -185,11 +181,13 @@ void SystemLogQueue<LogElement>::confirm(uint64_t to_flush_end)
|
||||
}
|
||||
|
||||
template <typename LogElement>
|
||||
typename SystemLogQueue<LogElement>::Index SystemLogQueue<LogElement>::pop(std::vector<LogElement>& output, bool& should_prepare_tables_anyway, bool& exit_this_thread)
|
||||
typename SystemLogQueue<LogElement>::Index SystemLogQueue<LogElement>::pop(std::vector<LogElement> & output,
|
||||
bool & should_prepare_tables_anyway,
|
||||
bool & exit_this_thread)
|
||||
{
|
||||
std::unique_lock lock(mutex);
|
||||
flush_event.wait_for(lock,
|
||||
std::chrono::milliseconds(flush_interval_milliseconds),
|
||||
std::chrono::milliseconds(settings.flush_interval_milliseconds),
|
||||
[&] ()
|
||||
{
|
||||
return requested_flush_up_to > flushed_up_to || is_shutdown || is_force_prepare_tables;
|
||||
@ -219,13 +217,28 @@ void SystemLogQueue<LogElement>::shutdown()
|
||||
|
||||
template <typename LogElement>
|
||||
SystemLogBase<LogElement>::SystemLogBase(
|
||||
const String& table_name_,
|
||||
size_t flush_interval_milliseconds_,
|
||||
const SystemLogQueueSettings & settings_,
|
||||
std::shared_ptr<SystemLogQueue<LogElement>> queue_)
|
||||
: queue(queue_ ? queue_ : std::make_shared<SystemLogQueue<LogElement>>(table_name_, flush_interval_milliseconds_))
|
||||
: queue(queue_ ? queue_ : std::make_shared<SystemLogQueue<LogElement>>(settings_))
|
||||
{
|
||||
}
|
||||
|
||||
template <typename LogElement>
|
||||
void SystemLogBase<LogElement>::flush(bool force)
|
||||
{
|
||||
uint64_t this_thread_requested_offset = queue->notifyFlush(force);
|
||||
if (this_thread_requested_offset == uint64_t(-1))
|
||||
return;
|
||||
|
||||
queue->waitFlush(this_thread_requested_offset);
|
||||
}
|
||||
|
||||
template <typename LogElement>
|
||||
void SystemLogBase<LogElement>::handleCrash()
|
||||
{
|
||||
queue->handleCrash();
|
||||
}
|
||||
|
||||
template <typename LogElement>
|
||||
void SystemLogBase<LogElement>::startup()
|
||||
{
|
||||
@ -234,9 +247,9 @@ void SystemLogBase<LogElement>::startup()
|
||||
}
|
||||
|
||||
template <typename LogElement>
|
||||
void SystemLogBase<LogElement>::add(const LogElement & element)
|
||||
void SystemLogBase<LogElement>::add(LogElement element)
|
||||
{
|
||||
queue->push(element);
|
||||
queue->push(std::move(element));
|
||||
}
|
||||
|
||||
template <typename LogElement>
|
||||
|
@ -62,6 +62,9 @@ public:
|
||||
|
||||
virtual void stopFlushThread() = 0;
|
||||
|
||||
/// Handles crash, flushes log without blocking if notify_flush_on_crash is set
|
||||
virtual void handleCrash() = 0;
|
||||
|
||||
virtual ~ISystemLog();
|
||||
|
||||
virtual void savingThreadFunction() = 0;
|
||||
@ -73,26 +76,38 @@ protected:
|
||||
bool is_shutdown = false;
|
||||
};
|
||||
|
||||
struct SystemLogQueueSettings
|
||||
{
|
||||
String database;
|
||||
String table;
|
||||
size_t reserved_size_rows;
|
||||
size_t max_size_rows;
|
||||
size_t buffer_size_rows_flush_threshold;
|
||||
size_t flush_interval_milliseconds;
|
||||
bool notify_flush_on_crash;
|
||||
bool turn_off_logger;
|
||||
};
|
||||
|
||||
template <typename LogElement>
|
||||
class SystemLogQueue
|
||||
{
|
||||
using Index = uint64_t;
|
||||
|
||||
public:
|
||||
SystemLogQueue(
|
||||
const String & table_name_,
|
||||
size_t flush_interval_milliseconds_,
|
||||
bool turn_off_logger_ = false);
|
||||
SystemLogQueue(const SystemLogQueueSettings & settings_);
|
||||
|
||||
void shutdown();
|
||||
|
||||
// producer methods
|
||||
void push(const LogElement & element);
|
||||
void push(LogElement && element);
|
||||
Index notifyFlush(bool should_prepare_tables_anyway);
|
||||
void waitFlush(Index expected_flushed_up_to);
|
||||
|
||||
/// Handles crash, flushes log without blocking if notify_flush_on_crash is set
|
||||
void handleCrash();
|
||||
|
||||
// consumer methods
|
||||
Index pop(std::vector<LogElement>& output, bool& should_prepare_tables_anyway, bool& exit_this_thread);
|
||||
Index pop(std::vector<LogElement>& output, bool & should_prepare_tables_anyway, bool & exit_this_thread);
|
||||
void confirm(Index to_flush_end);
|
||||
|
||||
private:
|
||||
@ -120,7 +135,8 @@ private:
|
||||
bool is_shutdown = false;
|
||||
|
||||
std::condition_variable flush_event;
|
||||
const size_t flush_interval_milliseconds;
|
||||
|
||||
const SystemLogQueueSettings settings;
|
||||
};
|
||||
|
||||
|
||||
@ -131,8 +147,7 @@ public:
|
||||
using Self = SystemLogBase;
|
||||
|
||||
SystemLogBase(
|
||||
const String& table_name_,
|
||||
size_t flush_interval_milliseconds_,
|
||||
const SystemLogQueueSettings & settings_,
|
||||
std::shared_ptr<SystemLogQueue<LogElement>> queue_ = nullptr);
|
||||
|
||||
void startup() override;
|
||||
@ -140,17 +155,25 @@ public:
|
||||
/** Append a record into log.
|
||||
* Writing to table will be done asynchronously and in case of failure, record could be lost.
|
||||
*/
|
||||
void add(const LogElement & element);
|
||||
void add(LogElement element);
|
||||
|
||||
/// Flush data in the buffer to disk. Block the thread until the data is stored on disk.
|
||||
void flush(bool force) override;
|
||||
|
||||
/// Handles crash, flushes log without blocking if notify_flush_on_crash is set
|
||||
void handleCrash() override;
|
||||
|
||||
/// Non-blocking flush data in the buffer to disk.
|
||||
void notifyFlush(bool force);
|
||||
|
||||
String getName() const override { return LogElement::name(); }
|
||||
|
||||
static const char * getDefaultOrderBy() { return "event_date, event_time"; }
|
||||
static consteval size_t getDefaultMaxSize() { return 1048576; }
|
||||
static consteval size_t getDefaultReservedSize() { return 8192; }
|
||||
static consteval size_t getDefaultFlushIntervalMilliseconds() { return 7500; }
|
||||
static consteval bool shouldNotifyFlushOnCrash() { return false; }
|
||||
static consteval bool shouldTurnOffLogger() { return false; }
|
||||
|
||||
protected:
|
||||
std::shared_ptr<SystemLogQueue<LogElement>> queue;
|
||||
|
@ -136,6 +136,8 @@ using ResponseCallback = std::function<void(const Response &)>;
|
||||
struct Response
|
||||
{
|
||||
Error error = Error::ZOK;
|
||||
int64_t zxid = 0;
|
||||
|
||||
Response() = default;
|
||||
Response(const Response &) = default;
|
||||
Response & operator=(const Response &) = default;
|
||||
@ -490,8 +492,6 @@ public:
|
||||
/// Useful to check owner of ephemeral node.
|
||||
virtual int64_t getSessionID() const = 0;
|
||||
|
||||
virtual Poco::Net::SocketAddress getConnectedAddress() const = 0;
|
||||
|
||||
/// If the method will throw an exception, callbacks won't be called.
|
||||
///
|
||||
/// After the method is executed successfully, you must wait for callbacks
|
||||
@ -564,6 +564,10 @@ public:
|
||||
|
||||
virtual const DB::KeeperFeatureFlags * getKeeperFeatureFlags() const { return nullptr; }
|
||||
|
||||
/// A ZooKeeper session can have an optional deadline set on it.
|
||||
/// After it has been reached, the session needs to be finalized.
|
||||
virtual bool hasReachedDeadline() const = 0;
|
||||
|
||||
/// Expire session and finish all pending requests
|
||||
virtual void finalize(const String & reason) = 0;
|
||||
};
|
||||
|
@ -195,6 +195,7 @@ struct TestKeeperMultiRequest final : MultiRequest, TestKeeperRequest
|
||||
std::pair<ResponsePtr, Undo> TestKeeperCreateRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
||||
{
|
||||
CreateResponse response;
|
||||
response.zxid = zxid;
|
||||
Undo undo;
|
||||
|
||||
if (container.contains(path))
|
||||
@ -257,9 +258,10 @@ std::pair<ResponsePtr, Undo> TestKeeperCreateRequest::process(TestKeeper::Contai
|
||||
return { std::make_shared<CreateResponse>(response), undo };
|
||||
}
|
||||
|
||||
std::pair<ResponsePtr, Undo> TestKeeperRemoveRequest::process(TestKeeper::Container & container, int64_t) const
|
||||
std::pair<ResponsePtr, Undo> TestKeeperRemoveRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
||||
{
|
||||
RemoveResponse response;
|
||||
response.zxid = zxid;
|
||||
Undo undo;
|
||||
|
||||
auto it = container.find(path);
|
||||
@ -296,9 +298,10 @@ std::pair<ResponsePtr, Undo> TestKeeperRemoveRequest::process(TestKeeper::Contai
|
||||
return { std::make_shared<RemoveResponse>(response), undo };
|
||||
}
|
||||
|
||||
std::pair<ResponsePtr, Undo> TestKeeperExistsRequest::process(TestKeeper::Container & container, int64_t) const
|
||||
std::pair<ResponsePtr, Undo> TestKeeperExistsRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
||||
{
|
||||
ExistsResponse response;
|
||||
response.zxid = zxid;
|
||||
|
||||
auto it = container.find(path);
|
||||
if (it != container.end())
|
||||
@ -314,9 +317,10 @@ std::pair<ResponsePtr, Undo> TestKeeperExistsRequest::process(TestKeeper::Contai
|
||||
return { std::make_shared<ExistsResponse>(response), {} };
|
||||
}
|
||||
|
||||
std::pair<ResponsePtr, Undo> TestKeeperGetRequest::process(TestKeeper::Container & container, int64_t) const
|
||||
std::pair<ResponsePtr, Undo> TestKeeperGetRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
||||
{
|
||||
GetResponse response;
|
||||
response.zxid = zxid;
|
||||
|
||||
auto it = container.find(path);
|
||||
if (it == container.end())
|
||||
@ -336,6 +340,7 @@ std::pair<ResponsePtr, Undo> TestKeeperGetRequest::process(TestKeeper::Container
|
||||
std::pair<ResponsePtr, Undo> TestKeeperSetRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
||||
{
|
||||
SetResponse response;
|
||||
response.zxid = zxid;
|
||||
Undo undo;
|
||||
|
||||
auto it = container.find(path);
|
||||
@ -370,9 +375,10 @@ std::pair<ResponsePtr, Undo> TestKeeperSetRequest::process(TestKeeper::Container
|
||||
return { std::make_shared<SetResponse>(response), undo };
|
||||
}
|
||||
|
||||
std::pair<ResponsePtr, Undo> TestKeeperListRequest::process(TestKeeper::Container & container, int64_t) const
|
||||
std::pair<ResponsePtr, Undo> TestKeeperListRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
||||
{
|
||||
ListResponse response;
|
||||
response.zxid = zxid;
|
||||
|
||||
auto it = container.find(path);
|
||||
if (it == container.end())
|
||||
@ -414,9 +420,10 @@ std::pair<ResponsePtr, Undo> TestKeeperListRequest::process(TestKeeper::Containe
|
||||
return { std::make_shared<ListResponse>(response), {} };
|
||||
}
|
||||
|
||||
std::pair<ResponsePtr, Undo> TestKeeperCheckRequest::process(TestKeeper::Container & container, int64_t) const
|
||||
std::pair<ResponsePtr, Undo> TestKeeperCheckRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
||||
{
|
||||
CheckResponse response;
|
||||
response.zxid = zxid;
|
||||
auto it = container.find(path);
|
||||
if (it == container.end())
|
||||
{
|
||||
@ -434,10 +441,11 @@ std::pair<ResponsePtr, Undo> TestKeeperCheckRequest::process(TestKeeper::Contain
|
||||
return { std::make_shared<CheckResponse>(response), {} };
|
||||
}
|
||||
|
||||
std::pair<ResponsePtr, Undo> TestKeeperSyncRequest::process(TestKeeper::Container & /*container*/, int64_t) const
|
||||
std::pair<ResponsePtr, Undo> TestKeeperSyncRequest::process(TestKeeper::Container & /*container*/, int64_t zxid) const
|
||||
{
|
||||
SyncResponse response;
|
||||
response.path = path;
|
||||
response.zxid = zxid;
|
||||
|
||||
return { std::make_shared<SyncResponse>(std::move(response)), {} };
|
||||
}
|
||||
@ -456,6 +464,7 @@ std::pair<ResponsePtr, Undo> TestKeeperReconfigRequest::process(TestKeeper::Cont
|
||||
std::pair<ResponsePtr, Undo> TestKeeperMultiRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
||||
{
|
||||
MultiResponse response;
|
||||
response.zxid = zxid;
|
||||
response.responses.reserve(requests.size());
|
||||
std::vector<Undo> undo_actions;
|
||||
|
||||
|
@ -39,8 +39,8 @@ public:
|
||||
~TestKeeper() override;
|
||||
|
||||
bool isExpired() const override { return expired; }
|
||||
bool hasReachedDeadline() const override { return false; }
|
||||
int64_t getSessionID() const override { return 0; }
|
||||
Poco::Net::SocketAddress getConnectedAddress() const override { return connected_zk_address; }
|
||||
|
||||
|
||||
void create(
|
||||
@ -135,8 +135,6 @@ private:
|
||||
|
||||
zkutil::ZooKeeperArgs args;
|
||||
|
||||
Poco::Net::SocketAddress connected_zk_address;
|
||||
|
||||
std::mutex push_request_mutex;
|
||||
std::atomic<bool> expired{false};
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user