mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 01:22:04 +00:00
Merge branch 'master' into kssenii-patch-6
This commit is contained in:
commit
dbc11b83fd
2
.github/workflows/master.yml
vendored
2
.github/workflows/master.yml
vendored
@ -3643,7 +3643,7 @@ jobs:
|
|||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
TEMP_PATH=${{runner.temp}}/unit_tests_asan
|
TEMP_PATH=${{runner.temp}}/unit_tests_asan
|
||||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
CHECK_NAME=Unit tests (release-clang)
|
CHECK_NAME=Unit tests (release)
|
||||||
REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse
|
REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
|
2
.github/workflows/pull_request.yml
vendored
2
.github/workflows/pull_request.yml
vendored
@ -4541,7 +4541,7 @@ jobs:
|
|||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
TEMP_PATH=${{runner.temp}}/unit_tests_asan
|
TEMP_PATH=${{runner.temp}}/unit_tests_asan
|
||||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
CHECK_NAME=Unit tests (release-clang)
|
CHECK_NAME=Unit tests (release)
|
||||||
REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse
|
REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
|
@ -23,7 +23,6 @@
|
|||||||
* Added `Overlay` database engine to combine multiple databases into one. Added `Filesystem` database engine to represent a directory in the filesystem as a set of implicitly available tables with auto-detected formats and structures. A new `S3` database engine allows to read-only interact with s3 storage by representing a prefix as a set of tables. A new `HDFS` database engine allows to interact with HDFS storage in the same way. [#48821](https://github.com/ClickHouse/ClickHouse/pull/48821) ([alekseygolub](https://github.com/alekseygolub)).
|
* Added `Overlay` database engine to combine multiple databases into one. Added `Filesystem` database engine to represent a directory in the filesystem as a set of implicitly available tables with auto-detected formats and structures. A new `S3` database engine allows to read-only interact with s3 storage by representing a prefix as a set of tables. A new `HDFS` database engine allows to interact with HDFS storage in the same way. [#48821](https://github.com/ClickHouse/ClickHouse/pull/48821) ([alekseygolub](https://github.com/alekseygolub)).
|
||||||
* Add support for external disks in Keeper for storing snapshots and logs. [#50098](https://github.com/ClickHouse/ClickHouse/pull/50098) ([Antonio Andelic](https://github.com/antonio2368)).
|
* Add support for external disks in Keeper for storing snapshots and logs. [#50098](https://github.com/ClickHouse/ClickHouse/pull/50098) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
* Add support for multi-directory selection (`{}`) globs. [#50559](https://github.com/ClickHouse/ClickHouse/pull/50559) ([Andrey Zvonov](https://github.com/zvonand)).
|
* Add support for multi-directory selection (`{}`) globs. [#50559](https://github.com/ClickHouse/ClickHouse/pull/50559) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||||
* Support ZooKeeper `reconfig` command for ClickHouse Keeper with incremental reconfiguration which can be enabled via `keeper_server.enable_reconfiguration` setting. Support adding servers, removing servers, and changing server priorities. [#49450](https://github.com/ClickHouse/ClickHouse/pull/49450) ([Mike Kot](https://github.com/myrrc)).
|
|
||||||
* Kafka connector can fetch Avro schema from schema registry with basic authentication using url-encoded credentials. [#49664](https://github.com/ClickHouse/ClickHouse/pull/49664) ([Ilya Golshtein](https://github.com/ilejn)).
|
* Kafka connector can fetch Avro schema from schema registry with basic authentication using url-encoded credentials. [#49664](https://github.com/ClickHouse/ClickHouse/pull/49664) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||||
* Add function `arrayJaccardIndex` which computes the Jaccard similarity between two arrays. [#50076](https://github.com/ClickHouse/ClickHouse/pull/50076) ([FFFFFFFHHHHHHH](https://github.com/FFFFFFFHHHHHHH)).
|
* Add function `arrayJaccardIndex` which computes the Jaccard similarity between two arrays. [#50076](https://github.com/ClickHouse/ClickHouse/pull/50076) ([FFFFFFFHHHHHHH](https://github.com/FFFFFFFHHHHHHH)).
|
||||||
* Add a column `is_obsolete` to `system.settings` and similar tables. Closes [#50819](https://github.com/ClickHouse/ClickHouse/issues/50819). [#50826](https://github.com/ClickHouse/ClickHouse/pull/50826) ([flynn](https://github.com/ucasfl)).
|
* Add a column `is_obsolete` to `system.settings` and similar tables. Closes [#50819](https://github.com/ClickHouse/ClickHouse/issues/50819). [#50826](https://github.com/ClickHouse/ClickHouse/pull/50826) ([flynn](https://github.com/ucasfl)).
|
||||||
@ -124,6 +123,7 @@
|
|||||||
* (experimental MaterializedMySQL) Now double quoted comments are supported in MaterializedMySQL. [#52355](https://github.com/ClickHouse/ClickHouse/pull/52355) ([Val Doroshchuk](https://github.com/valbok)).
|
* (experimental MaterializedMySQL) Now double quoted comments are supported in MaterializedMySQL. [#52355](https://github.com/ClickHouse/ClickHouse/pull/52355) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
* Upgrade Intel QPL from v1.1.0 to v1.2.0 2. Upgrade Intel accel-config from v3.5 to v4.0 3. Fixed issue that Device IOTLB miss has big perf. impact for IAA accelerators. [#52180](https://github.com/ClickHouse/ClickHouse/pull/52180) ([jasperzhu](https://github.com/jinjunzh)).
|
* Upgrade Intel QPL from v1.1.0 to v1.2.0 2. Upgrade Intel accel-config from v3.5 to v4.0 3. Fixed issue that Device IOTLB miss has big perf. impact for IAA accelerators. [#52180](https://github.com/ClickHouse/ClickHouse/pull/52180) ([jasperzhu](https://github.com/jinjunzh)).
|
||||||
* The `session_timezone` setting (new in version 23.6) is demoted to experimental. [#52445](https://github.com/ClickHouse/ClickHouse/pull/52445) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* The `session_timezone` setting (new in version 23.6) is demoted to experimental. [#52445](https://github.com/ClickHouse/ClickHouse/pull/52445) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Support ZooKeeper `reconfig` command for ClickHouse Keeper with incremental reconfiguration which can be enabled via `keeper_server.enable_reconfiguration` setting. Support adding servers, removing servers, and changing server priorities. [#49450](https://github.com/ClickHouse/ClickHouse/pull/49450) ([Mike Kot](https://github.com/myrrc)). It is suspected that this feature is incomplete.
|
||||||
|
|
||||||
#### Build/Testing/Packaging Improvement
|
#### Build/Testing/Packaging Improvement
|
||||||
* Add experimental ClickHouse builds for Linux RISC-V 64 to CI. [#31398](https://github.com/ClickHouse/ClickHouse/pull/31398) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Add experimental ClickHouse builds for Linux RISC-V 64 to CI. [#31398](https://github.com/ClickHouse/ClickHouse/pull/31398) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include <functional>
|
#include <functional>
|
||||||
#include <iosfwd>
|
#include <iosfwd>
|
||||||
|
|
||||||
|
#include <base/defines.h>
|
||||||
#include <base/types.h>
|
#include <base/types.h>
|
||||||
#include <base/unaligned.h>
|
#include <base/unaligned.h>
|
||||||
|
|
||||||
@ -274,6 +275,8 @@ struct CRC32Hash
|
|||||||
if (size == 0)
|
if (size == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
chassert(pos);
|
||||||
|
|
||||||
if (size < 8)
|
if (size < 8)
|
||||||
{
|
{
|
||||||
return static_cast<unsigned>(hashLessThan8(x.data, x.size));
|
return static_cast<unsigned>(hashLessThan8(x.data, x.size));
|
||||||
|
@ -115,8 +115,15 @@
|
|||||||
/// because SIGABRT is easier to debug than SIGTRAP (the second one makes gdb crazy)
|
/// because SIGABRT is easier to debug than SIGTRAP (the second one makes gdb crazy)
|
||||||
#if !defined(chassert)
|
#if !defined(chassert)
|
||||||
#if defined(ABORT_ON_LOGICAL_ERROR)
|
#if defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
|
// clang-format off
|
||||||
|
#include <base/types.h>
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
void abortOnFailedAssertion(const String & description);
|
||||||
|
}
|
||||||
#define chassert(x) static_cast<bool>(x) ? void(0) : ::DB::abortOnFailedAssertion(#x)
|
#define chassert(x) static_cast<bool>(x) ? void(0) : ::DB::abortOnFailedAssertion(#x)
|
||||||
#define UNREACHABLE() abort()
|
#define UNREACHABLE() abort()
|
||||||
|
// clang-format off
|
||||||
#else
|
#else
|
||||||
/// Here sizeof() trick is used to suppress unused warning for result,
|
/// Here sizeof() trick is used to suppress unused warning for result,
|
||||||
/// since simple "(void)x" will evaluate the expression, while
|
/// since simple "(void)x" will evaluate the expression, while
|
||||||
|
@ -57,7 +57,7 @@ public:
|
|||||||
URI();
|
URI();
|
||||||
/// Creates an empty URI.
|
/// Creates an empty URI.
|
||||||
|
|
||||||
explicit URI(const std::string & uri, bool disable_url_encoding = false);
|
explicit URI(const std::string & uri, bool enable_url_encoding = true);
|
||||||
/// Parses an URI from the given string. Throws a
|
/// Parses an URI from the given string. Throws a
|
||||||
/// SyntaxException if the uri is not valid.
|
/// SyntaxException if the uri is not valid.
|
||||||
|
|
||||||
@ -362,7 +362,7 @@ private:
|
|||||||
std::string _query;
|
std::string _query;
|
||||||
std::string _fragment;
|
std::string _fragment;
|
||||||
|
|
||||||
bool _disable_url_encoding = false;
|
bool _enable_url_encoding = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -36,8 +36,8 @@ URI::URI():
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
URI::URI(const std::string& uri, bool decode_and_encode_path):
|
URI::URI(const std::string& uri, bool enable_url_encoding):
|
||||||
_port(0), _disable_url_encoding(decode_and_encode_path)
|
_port(0), _enable_url_encoding(enable_url_encoding)
|
||||||
{
|
{
|
||||||
parse(uri);
|
parse(uri);
|
||||||
}
|
}
|
||||||
@ -108,7 +108,7 @@ URI::URI(const URI& uri):
|
|||||||
_path(uri._path),
|
_path(uri._path),
|
||||||
_query(uri._query),
|
_query(uri._query),
|
||||||
_fragment(uri._fragment),
|
_fragment(uri._fragment),
|
||||||
_disable_url_encoding(uri._disable_url_encoding)
|
_enable_url_encoding(uri._enable_url_encoding)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -121,7 +121,7 @@ URI::URI(const URI& baseURI, const std::string& relativeURI):
|
|||||||
_path(baseURI._path),
|
_path(baseURI._path),
|
||||||
_query(baseURI._query),
|
_query(baseURI._query),
|
||||||
_fragment(baseURI._fragment),
|
_fragment(baseURI._fragment),
|
||||||
_disable_url_encoding(baseURI._disable_url_encoding)
|
_enable_url_encoding(baseURI._enable_url_encoding)
|
||||||
{
|
{
|
||||||
resolve(relativeURI);
|
resolve(relativeURI);
|
||||||
}
|
}
|
||||||
@ -153,7 +153,7 @@ URI& URI::operator = (const URI& uri)
|
|||||||
_path = uri._path;
|
_path = uri._path;
|
||||||
_query = uri._query;
|
_query = uri._query;
|
||||||
_fragment = uri._fragment;
|
_fragment = uri._fragment;
|
||||||
_disable_url_encoding = uri._disable_url_encoding;
|
_enable_url_encoding = uri._enable_url_encoding;
|
||||||
}
|
}
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
@ -184,7 +184,7 @@ void URI::swap(URI& uri)
|
|||||||
std::swap(_path, uri._path);
|
std::swap(_path, uri._path);
|
||||||
std::swap(_query, uri._query);
|
std::swap(_query, uri._query);
|
||||||
std::swap(_fragment, uri._fragment);
|
std::swap(_fragment, uri._fragment);
|
||||||
std::swap(_disable_url_encoding, uri._disable_url_encoding);
|
std::swap(_enable_url_encoding, uri._enable_url_encoding);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -687,18 +687,18 @@ void URI::decode(const std::string& str, std::string& decodedStr, bool plusAsSpa
|
|||||||
|
|
||||||
void URI::encodePath(std::string & encodedStr) const
|
void URI::encodePath(std::string & encodedStr) const
|
||||||
{
|
{
|
||||||
if (_disable_url_encoding)
|
if (_enable_url_encoding)
|
||||||
encodedStr = _path;
|
|
||||||
else
|
|
||||||
encode(_path, RESERVED_PATH, encodedStr);
|
encode(_path, RESERVED_PATH, encodedStr);
|
||||||
|
else
|
||||||
|
encodedStr = _path;
|
||||||
}
|
}
|
||||||
|
|
||||||
void URI::decodePath(const std::string & encodedStr)
|
void URI::decodePath(const std::string & encodedStr)
|
||||||
{
|
{
|
||||||
if (_disable_url_encoding)
|
if (_enable_url_encoding)
|
||||||
_path = encodedStr;
|
|
||||||
else
|
|
||||||
decode(encodedStr, _path);
|
decode(encodedStr, _path);
|
||||||
|
else
|
||||||
|
_path = encodedStr;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool URI::isWellKnownPort() const
|
bool URI::isWellKnownPort() const
|
||||||
|
@ -161,5 +161,9 @@
|
|||||||
"docker/test/sqllogic": {
|
"docker/test/sqllogic": {
|
||||||
"name": "clickhouse/sqllogic-test",
|
"name": "clickhouse/sqllogic-test",
|
||||||
"dependent": []
|
"dependent": []
|
||||||
|
},
|
||||||
|
"docker/test/integration/nginx_dav": {
|
||||||
|
"name": "clickhouse/nginx-dav",
|
||||||
|
"dependent": []
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -32,7 +32,7 @@ ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'
|
|||||||
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen en_US.UTF-8
|
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen en_US.UTF-8
|
||||||
ENV LC_ALL en_US.UTF-8
|
ENV LC_ALL en_US.UTF-8
|
||||||
|
|
||||||
ENV TZ=Europe/Moscow
|
ENV TZ=Europe/Amsterdam
|
||||||
RUN ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
|
RUN ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
|
||||||
|
|
||||||
CMD sleep 1
|
CMD sleep 1
|
||||||
|
@ -32,7 +32,7 @@ RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
|||||||
&& odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \
|
&& odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \
|
||||||
&& rm -rf /tmp/clickhouse-odbc-tmp
|
&& rm -rf /tmp/clickhouse-odbc-tmp
|
||||||
|
|
||||||
ENV TZ=Europe/Moscow
|
ENV TZ=Europe/Amsterdam
|
||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||||
|
|
||||||
ENV COMMIT_SHA=''
|
ENV COMMIT_SHA=''
|
||||||
|
@ -8,7 +8,7 @@ ARG apt_archive="http://archive.ubuntu.com"
|
|||||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||||
|
|
||||||
ENV LANG=C.UTF-8
|
ENV LANG=C.UTF-8
|
||||||
ENV TZ=Europe/Moscow
|
ENV TZ=Europe/Amsterdam
|
||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||||
|
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
|
6
docker/test/integration/nginx_dav/Dockerfile
Normal file
6
docker/test/integration/nginx_dav/Dockerfile
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
FROM nginx:alpine-slim
|
||||||
|
|
||||||
|
COPY default.conf /etc/nginx/conf.d/
|
||||||
|
|
||||||
|
RUN mkdir /usr/share/nginx/files/ \
|
||||||
|
&& chown nginx: /usr/share/nginx/files/ -R
|
25
docker/test/integration/nginx_dav/default.conf
Normal file
25
docker/test/integration/nginx_dav/default.conf
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
|
||||||
|
#root /usr/share/nginx/test.com;
|
||||||
|
index index.html index.htm;
|
||||||
|
|
||||||
|
server_name test.com localhost;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
expires max;
|
||||||
|
root /usr/share/nginx/files;
|
||||||
|
client_max_body_size 20m;
|
||||||
|
client_body_temp_path /usr/share/nginx/tmp;
|
||||||
|
dav_methods PUT; # Allowed methods, only PUT is necessary
|
||||||
|
|
||||||
|
create_full_put_path on; # nginx automatically creates nested directories
|
||||||
|
dav_access user:rw group:r all:r; # access permissions for files
|
||||||
|
|
||||||
|
limit_except GET {
|
||||||
|
allow all;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
error_page 405 =200 $uri;
|
||||||
|
}
|
@ -1,16 +1,15 @@
|
|||||||
version: '2.3'
|
version: '2.3'
|
||||||
services:
|
services:
|
||||||
meili1:
|
meili1:
|
||||||
image: getmeili/meilisearch:v0.27.0
|
image: getmeili/meilisearch:v0.27.0
|
||||||
restart: always
|
restart: always
|
||||||
ports:
|
ports:
|
||||||
- ${MEILI_EXTERNAL_PORT:-7700}:${MEILI_INTERNAL_PORT:-7700}
|
- ${MEILI_EXTERNAL_PORT:-7700}:${MEILI_INTERNAL_PORT:-7700}
|
||||||
|
|
||||||
meili_secure:
|
meili_secure:
|
||||||
image: getmeili/meilisearch:v0.27.0
|
image: getmeili/meilisearch:v0.27.0
|
||||||
restart: always
|
restart: always
|
||||||
ports:
|
ports:
|
||||||
- ${MEILI_SECURE_EXTERNAL_PORT:-7700}:${MEILI_SECURE_INTERNAL_PORT:-7700}
|
- ${MEILI_SECURE_EXTERNAL_PORT:-7700}:${MEILI_SECURE_INTERNAL_PORT:-7700}
|
||||||
environment:
|
environment:
|
||||||
MEILI_MASTER_KEY: "password"
|
MEILI_MASTER_KEY: "password"
|
||||||
|
|
||||||
|
@ -9,10 +9,10 @@ services:
|
|||||||
DATADIR: /mysql/
|
DATADIR: /mysql/
|
||||||
expose:
|
expose:
|
||||||
- ${MYSQL_PORT:-3306}
|
- ${MYSQL_PORT:-3306}
|
||||||
command: --server_id=100
|
command: --server_id=100
|
||||||
--log-bin='mysql-bin-1.log'
|
--log-bin='mysql-bin-1.log'
|
||||||
--default-time-zone='+3:00'
|
--default-time-zone='+3:00'
|
||||||
--gtid-mode="ON"
|
--gtid-mode="ON"
|
||||||
--enforce-gtid-consistency
|
--enforce-gtid-consistency
|
||||||
--log-error-verbosity=3
|
--log-error-verbosity=3
|
||||||
--log-error=/mysql/error.log
|
--log-error=/mysql/error.log
|
||||||
@ -21,4 +21,4 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- type: ${MYSQL_LOGS_FS:-tmpfs}
|
- type: ${MYSQL_LOGS_FS:-tmpfs}
|
||||||
source: ${MYSQL_LOGS:-}
|
source: ${MYSQL_LOGS:-}
|
||||||
target: /mysql/
|
target: /mysql/
|
||||||
|
@ -9,9 +9,9 @@ services:
|
|||||||
DATADIR: /mysql/
|
DATADIR: /mysql/
|
||||||
expose:
|
expose:
|
||||||
- ${MYSQL8_PORT:-3306}
|
- ${MYSQL8_PORT:-3306}
|
||||||
command: --server_id=100 --log-bin='mysql-bin-1.log'
|
command: --server_id=100 --log-bin='mysql-bin-1.log'
|
||||||
--default_authentication_plugin='mysql_native_password'
|
--default_authentication_plugin='mysql_native_password'
|
||||||
--default-time-zone='+3:00' --gtid-mode="ON"
|
--default-time-zone='+3:00' --gtid-mode="ON"
|
||||||
--enforce-gtid-consistency
|
--enforce-gtid-consistency
|
||||||
--log-error-verbosity=3
|
--log-error-verbosity=3
|
||||||
--log-error=/mysql/error.log
|
--log-error=/mysql/error.log
|
||||||
@ -20,4 +20,4 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- type: ${MYSQL8_LOGS_FS:-tmpfs}
|
- type: ${MYSQL8_LOGS_FS:-tmpfs}
|
||||||
source: ${MYSQL8_LOGS:-}
|
source: ${MYSQL8_LOGS:-}
|
||||||
target: /mysql/
|
target: /mysql/
|
||||||
|
@ -9,10 +9,10 @@ services:
|
|||||||
DATADIR: /mysql/
|
DATADIR: /mysql/
|
||||||
expose:
|
expose:
|
||||||
- ${MYSQL_CLUSTER_PORT:-3306}
|
- ${MYSQL_CLUSTER_PORT:-3306}
|
||||||
command: --server_id=100
|
command: --server_id=100
|
||||||
--log-bin='mysql-bin-2.log'
|
--log-bin='mysql-bin-2.log'
|
||||||
--default-time-zone='+3:00'
|
--default-time-zone='+3:00'
|
||||||
--gtid-mode="ON"
|
--gtid-mode="ON"
|
||||||
--enforce-gtid-consistency
|
--enforce-gtid-consistency
|
||||||
--log-error-verbosity=3
|
--log-error-verbosity=3
|
||||||
--log-error=/mysql/2_error.log
|
--log-error=/mysql/2_error.log
|
||||||
@ -31,10 +31,10 @@ services:
|
|||||||
DATADIR: /mysql/
|
DATADIR: /mysql/
|
||||||
expose:
|
expose:
|
||||||
- ${MYSQL_CLUSTER_PORT:-3306}
|
- ${MYSQL_CLUSTER_PORT:-3306}
|
||||||
command: --server_id=100
|
command: --server_id=100
|
||||||
--log-bin='mysql-bin-3.log'
|
--log-bin='mysql-bin-3.log'
|
||||||
--default-time-zone='+3:00'
|
--default-time-zone='+3:00'
|
||||||
--gtid-mode="ON"
|
--gtid-mode="ON"
|
||||||
--enforce-gtid-consistency
|
--enforce-gtid-consistency
|
||||||
--log-error-verbosity=3
|
--log-error-verbosity=3
|
||||||
--log-error=/mysql/3_error.log
|
--log-error=/mysql/3_error.log
|
||||||
@ -53,10 +53,10 @@ services:
|
|||||||
DATADIR: /mysql/
|
DATADIR: /mysql/
|
||||||
expose:
|
expose:
|
||||||
- ${MYSQL_CLUSTER_PORT:-3306}
|
- ${MYSQL_CLUSTER_PORT:-3306}
|
||||||
command: --server_id=100
|
command: --server_id=100
|
||||||
--log-bin='mysql-bin-4.log'
|
--log-bin='mysql-bin-4.log'
|
||||||
--default-time-zone='+3:00'
|
--default-time-zone='+3:00'
|
||||||
--gtid-mode="ON"
|
--gtid-mode="ON"
|
||||||
--enforce-gtid-consistency
|
--enforce-gtid-consistency
|
||||||
--log-error-verbosity=3
|
--log-error-verbosity=3
|
||||||
--log-error=/mysql/4_error.log
|
--log-error=/mysql/4_error.log
|
||||||
@ -65,4 +65,4 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- type: ${MYSQL_CLUSTER_LOGS_FS:-tmpfs}
|
- type: ${MYSQL_CLUSTER_LOGS_FS:-tmpfs}
|
||||||
source: ${MYSQL_CLUSTER_LOGS:-}
|
source: ${MYSQL_CLUSTER_LOGS:-}
|
||||||
target: /mysql/
|
target: /mysql/
|
||||||
|
@ -5,7 +5,7 @@ services:
|
|||||||
# Files will be put into /usr/share/nginx/files.
|
# Files will be put into /usr/share/nginx/files.
|
||||||
|
|
||||||
nginx:
|
nginx:
|
||||||
image: kssenii/nginx-test:1.1
|
image: clickhouse/nginx-dav:${DOCKER_NGINX_DAV_TAG:-latest}
|
||||||
restart: always
|
restart: always
|
||||||
ports:
|
ports:
|
||||||
- 80:80
|
- 80:80
|
||||||
|
@ -12,9 +12,9 @@ services:
|
|||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 5
|
retries: 5
|
||||||
networks:
|
networks:
|
||||||
default:
|
default:
|
||||||
aliases:
|
aliases:
|
||||||
- postgre-sql.local
|
- postgre-sql.local
|
||||||
environment:
|
environment:
|
||||||
POSTGRES_HOST_AUTH_METHOD: "trust"
|
POSTGRES_HOST_AUTH_METHOD: "trust"
|
||||||
POSTGRES_PASSWORD: mysecretpassword
|
POSTGRES_PASSWORD: mysecretpassword
|
||||||
|
@ -12,7 +12,7 @@ services:
|
|||||||
command: ["zkServer.sh", "start-foreground"]
|
command: ["zkServer.sh", "start-foreground"]
|
||||||
entrypoint: /zookeeper-ssl-entrypoint.sh
|
entrypoint: /zookeeper-ssl-entrypoint.sh
|
||||||
volumes:
|
volumes:
|
||||||
- type: bind
|
- type: bind
|
||||||
source: /misc/zookeeper-ssl-entrypoint.sh
|
source: /misc/zookeeper-ssl-entrypoint.sh
|
||||||
target: /zookeeper-ssl-entrypoint.sh
|
target: /zookeeper-ssl-entrypoint.sh
|
||||||
- type: bind
|
- type: bind
|
||||||
@ -37,7 +37,7 @@ services:
|
|||||||
command: ["zkServer.sh", "start-foreground"]
|
command: ["zkServer.sh", "start-foreground"]
|
||||||
entrypoint: /zookeeper-ssl-entrypoint.sh
|
entrypoint: /zookeeper-ssl-entrypoint.sh
|
||||||
volumes:
|
volumes:
|
||||||
- type: bind
|
- type: bind
|
||||||
source: /misc/zookeeper-ssl-entrypoint.sh
|
source: /misc/zookeeper-ssl-entrypoint.sh
|
||||||
target: /zookeeper-ssl-entrypoint.sh
|
target: /zookeeper-ssl-entrypoint.sh
|
||||||
- type: bind
|
- type: bind
|
||||||
@ -61,7 +61,7 @@ services:
|
|||||||
command: ["zkServer.sh", "start-foreground"]
|
command: ["zkServer.sh", "start-foreground"]
|
||||||
entrypoint: /zookeeper-ssl-entrypoint.sh
|
entrypoint: /zookeeper-ssl-entrypoint.sh
|
||||||
volumes:
|
volumes:
|
||||||
- type: bind
|
- type: bind
|
||||||
source: /misc/zookeeper-ssl-entrypoint.sh
|
source: /misc/zookeeper-ssl-entrypoint.sh
|
||||||
target: /zookeeper-ssl-entrypoint.sh
|
target: /zookeeper-ssl-entrypoint.sh
|
||||||
- type: bind
|
- type: bind
|
||||||
|
@ -64,15 +64,16 @@ export CLICKHOUSE_ODBC_BRIDGE_BINARY_PATH=/clickhouse-odbc-bridge
|
|||||||
export CLICKHOUSE_LIBRARY_BRIDGE_BINARY_PATH=/clickhouse-library-bridge
|
export CLICKHOUSE_LIBRARY_BRIDGE_BINARY_PATH=/clickhouse-library-bridge
|
||||||
|
|
||||||
export DOCKER_BASE_TAG=${DOCKER_BASE_TAG:=latest}
|
export DOCKER_BASE_TAG=${DOCKER_BASE_TAG:=latest}
|
||||||
export DOCKER_HELPER_TAG=${DOCKER_HELPER_TAG:=latest}
|
|
||||||
export DOCKER_MYSQL_GOLANG_CLIENT_TAG=${DOCKER_MYSQL_GOLANG_CLIENT_TAG:=latest}
|
|
||||||
export DOCKER_DOTNET_CLIENT_TAG=${DOCKER_DOTNET_CLIENT_TAG:=latest}
|
export DOCKER_DOTNET_CLIENT_TAG=${DOCKER_DOTNET_CLIENT_TAG:=latest}
|
||||||
|
export DOCKER_HELPER_TAG=${DOCKER_HELPER_TAG:=latest}
|
||||||
|
export DOCKER_KERBERIZED_HADOOP_TAG=${DOCKER_KERBERIZED_HADOOP_TAG:=latest}
|
||||||
|
export DOCKER_KERBEROS_KDC_TAG=${DOCKER_KERBEROS_KDC_TAG:=latest}
|
||||||
|
export DOCKER_MYSQL_GOLANG_CLIENT_TAG=${DOCKER_MYSQL_GOLANG_CLIENT_TAG:=latest}
|
||||||
export DOCKER_MYSQL_JAVA_CLIENT_TAG=${DOCKER_MYSQL_JAVA_CLIENT_TAG:=latest}
|
export DOCKER_MYSQL_JAVA_CLIENT_TAG=${DOCKER_MYSQL_JAVA_CLIENT_TAG:=latest}
|
||||||
export DOCKER_MYSQL_JS_CLIENT_TAG=${DOCKER_MYSQL_JS_CLIENT_TAG:=latest}
|
export DOCKER_MYSQL_JS_CLIENT_TAG=${DOCKER_MYSQL_JS_CLIENT_TAG:=latest}
|
||||||
export DOCKER_MYSQL_PHP_CLIENT_TAG=${DOCKER_MYSQL_PHP_CLIENT_TAG:=latest}
|
export DOCKER_MYSQL_PHP_CLIENT_TAG=${DOCKER_MYSQL_PHP_CLIENT_TAG:=latest}
|
||||||
|
export DOCKER_NGINX_DAV_TAG=${DOCKER_NGINX_DAV_TAG:=latest}
|
||||||
export DOCKER_POSTGRESQL_JAVA_CLIENT_TAG=${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG:=latest}
|
export DOCKER_POSTGRESQL_JAVA_CLIENT_TAG=${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG:=latest}
|
||||||
export DOCKER_KERBEROS_KDC_TAG=${DOCKER_KERBEROS_KDC_TAG:=latest}
|
|
||||||
export DOCKER_KERBERIZED_HADOOP_TAG=${DOCKER_KERBERIZED_HADOOP_TAG:=latest}
|
|
||||||
|
|
||||||
cd /ClickHouse/tests/integration
|
cd /ClickHouse/tests/integration
|
||||||
exec "$@"
|
exec "$@"
|
||||||
|
@ -11,7 +11,7 @@ ARG apt_archive="http://archive.ubuntu.com"
|
|||||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||||
|
|
||||||
ENV LANG=C.UTF-8
|
ENV LANG=C.UTF-8
|
||||||
ENV TZ=Europe/Moscow
|
ENV TZ=Europe/Amsterdam
|
||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||||
|
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
|
@ -52,7 +52,7 @@ RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
|||||||
&& odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \
|
&& odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \
|
||||||
&& rm -rf /tmp/clickhouse-odbc-tmp
|
&& rm -rf /tmp/clickhouse-odbc-tmp
|
||||||
|
|
||||||
ENV TZ=Europe/Moscow
|
ENV TZ=Europe/Amsterdam
|
||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||||
|
|
||||||
ENV NUM_TRIES=1
|
ENV NUM_TRIES=1
|
||||||
|
@ -233,4 +233,10 @@ rowNumberInAllBlocks()
|
|||||||
LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv || echo "failure\tCannot parse test_results.tsv" > /test_output/check_status.tsv
|
LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv || echo "failure\tCannot parse test_results.tsv" > /test_output/check_status.tsv
|
||||||
[ -s /test_output/check_status.tsv ] || echo -e "success\tNo errors found" > /test_output/check_status.tsv
|
[ -s /test_output/check_status.tsv ] || echo -e "success\tNo errors found" > /test_output/check_status.tsv
|
||||||
|
|
||||||
|
# But OOMs in stress test are allowed
|
||||||
|
if rg 'OOM in dmesg|Signal 9' /test_output/check_status.tsv
|
||||||
|
then
|
||||||
|
sed -i 's/failure/success/' /test_output/check_status.tsv
|
||||||
|
fi
|
||||||
|
|
||||||
collect_core_dumps
|
collect_core_dumps
|
||||||
|
@ -231,4 +231,10 @@ rowNumberInAllBlocks()
|
|||||||
LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv || echo "failure\tCannot parse test_results.tsv" > /test_output/check_status.tsv
|
LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv || echo "failure\tCannot parse test_results.tsv" > /test_output/check_status.tsv
|
||||||
[ -s /test_output/check_status.tsv ] || echo -e "success\tNo errors found" > /test_output/check_status.tsv
|
[ -s /test_output/check_status.tsv ] || echo -e "success\tNo errors found" > /test_output/check_status.tsv
|
||||||
|
|
||||||
|
# But OOMs in stress test are allowed
|
||||||
|
if rg 'OOM in dmesg|Signal 9' /test_output/check_status.tsv
|
||||||
|
then
|
||||||
|
sed -i 's/failure/success/' /test_output/check_status.tsv
|
||||||
|
fi
|
||||||
|
|
||||||
collect_core_dumps
|
collect_core_dumps
|
||||||
|
@ -106,4 +106,4 @@ For partitioning by month, use the `toYYYYMM(date_column)` expression, where `da
|
|||||||
## Storage Settings {#storage-settings}
|
## Storage Settings {#storage-settings}
|
||||||
|
|
||||||
- [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
- [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
- [disable_url_encoding](/docs/en/operations/settings/settings.md#disable_url_encoding) -allows to disable decoding/encoding path in uri. Disabled by default.
|
- [enable_url_encoding](/docs/en/operations/settings/settings.md#enable_url_encoding) - allows to enable/disable decoding/encoding path in uri. Enabled by default.
|
||||||
|
@ -1723,6 +1723,34 @@ You can select data from a ClickHouse table and save them into some file in the
|
|||||||
``` bash
|
``` bash
|
||||||
$ clickhouse-client --query = "SELECT * FROM test.hits FORMAT CapnProto SETTINGS format_schema = 'schema:Message'"
|
$ clickhouse-client --query = "SELECT * FROM test.hits FORMAT CapnProto SETTINGS format_schema = 'schema:Message'"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Using autogenerated schema {#using-autogenerated-capn-proto-schema}
|
||||||
|
|
||||||
|
If you don't have an external CapnProto schema for your data, you can still output/input data in CapnProto format using autogenerated schema.
|
||||||
|
For example:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT * FROM test.hits format CapnProto SETTINGS format_capn_proto_use_autogenerated_schema=1
|
||||||
|
```
|
||||||
|
|
||||||
|
In this case ClickHouse will autogenerate CapnProto schema according to the table structure using function [structureToCapnProtoSchema](../sql-reference/functions/other-functions.md#structure_to_capn_proto_schema) and will use this schema to serialize data in CapnProto format.
|
||||||
|
|
||||||
|
You can also read CapnProto file with autogenerated schema (in this case the file must be created using the same schema):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ cat hits.bin | clickhouse-client --query "INSERT INTO test.hits SETTINGS format_capn_proto_use_autogenerated_schema=1 FORMAT CapnProto"
|
||||||
|
```
|
||||||
|
|
||||||
|
The setting [format_capn_proto_use_autogenerated_schema](../operations/settings/settings-formats.md#format_capn_proto_use_autogenerated_schema) is enabled by default and applies if [format_schema](../operations/settings/settings-formats.md#formatschema-format-schema) is not set.
|
||||||
|
|
||||||
|
You can also save autogenerated schema in the file during input/output using setting [output_format_schema](../operations/settings/settings-formats.md#outputformatschema-output-format-schema). For example:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT * FROM test.hits format CapnProto SETTINGS format_capn_proto_use_autogenerated_schema=1, output_format_schema='path/to/schema/schema.capnp'
|
||||||
|
```
|
||||||
|
|
||||||
|
In this case autogenerated CapnProto schema will be saved in file `path/to/schema/schema.capnp`.
|
||||||
|
|
||||||
## Prometheus {#prometheus}
|
## Prometheus {#prometheus}
|
||||||
|
|
||||||
Expose metrics in [Prometheus text-based exposition format](https://prometheus.io/docs/instrumenting/exposition_formats/#text-based-format).
|
Expose metrics in [Prometheus text-based exposition format](https://prometheus.io/docs/instrumenting/exposition_formats/#text-based-format).
|
||||||
@ -1861,6 +1889,33 @@ ClickHouse inputs and outputs protobuf messages in the `length-delimited` format
|
|||||||
It means before every message should be written its length as a [varint](https://developers.google.com/protocol-buffers/docs/encoding#varints).
|
It means before every message should be written its length as a [varint](https://developers.google.com/protocol-buffers/docs/encoding#varints).
|
||||||
See also [how to read/write length-delimited protobuf messages in popular languages](https://cwiki.apache.org/confluence/display/GEODE/Delimiting+Protobuf+Messages).
|
See also [how to read/write length-delimited protobuf messages in popular languages](https://cwiki.apache.org/confluence/display/GEODE/Delimiting+Protobuf+Messages).
|
||||||
|
|
||||||
|
### Using autogenerated schema {#using-autogenerated-protobuf-schema}
|
||||||
|
|
||||||
|
If you don't have an external Protobuf schema for your data, you can still output/input data in Protobuf format using autogenerated schema.
|
||||||
|
For example:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT * FROM test.hits format Protobuf SETTINGS format_protobuf_use_autogenerated_schema=1
|
||||||
|
```
|
||||||
|
|
||||||
|
In this case ClickHouse will autogenerate Protobuf schema according to the table structure using function [structureToProtobufSchema](../sql-reference/functions/other-functions.md#structure_to_protobuf_schema) and will use this schema to serialize data in Protobuf format.
|
||||||
|
|
||||||
|
You can also read Protobuf file with autogenerated schema (in this case the file must be created using the same schema):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ cat hits.bin | clickhouse-client --query "INSERT INTO test.hits SETTINGS format_protobuf_use_autogenerated_schema=1 FORMAT Protobuf"
|
||||||
|
```
|
||||||
|
|
||||||
|
The setting [format_protobuf_use_autogenerated_schema](../operations/settings/settings-formats.md#format_protobuf_use_autogenerated_schema) is enabled by default and applies if [format_schema](../operations/settings/settings-formats.md#formatschema-format-schema) is not set.
|
||||||
|
|
||||||
|
You can also save autogenerated schema in the file during input/output using setting [output_format_schema](../operations/settings/settings-formats.md#outputformatschema-output-format-schema). For example:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT * FROM test.hits format Protobuf SETTINGS format_protobuf_use_autogenerated_schema=1, output_format_schema='path/to/schema/schema.proto'
|
||||||
|
```
|
||||||
|
|
||||||
|
In this case autogenerated Protobuf schema will be saved in file `path/to/schema/schema.capnp`.
|
||||||
|
|
||||||
## ProtobufSingle {#protobufsingle}
|
## ProtobufSingle {#protobufsingle}
|
||||||
|
|
||||||
Same as [Protobuf](#protobuf) but for storing/parsing single Protobuf message without length delimiters.
|
Same as [Protobuf](#protobuf) but for storing/parsing single Protobuf message without length delimiters.
|
||||||
|
@ -84,6 +84,7 @@ The BACKUP and RESTORE statements take a list of DATABASE and TABLE names, a des
|
|||||||
- `password` for the file on disk
|
- `password` for the file on disk
|
||||||
- `base_backup`: the destination of the previous backup of this source. For example, `Disk('backups', '1.zip')`
|
- `base_backup`: the destination of the previous backup of this source. For example, `Disk('backups', '1.zip')`
|
||||||
- `structure_only`: if enabled, allows to only backup or restore the CREATE statements without the data of tables
|
- `structure_only`: if enabled, allows to only backup or restore the CREATE statements without the data of tables
|
||||||
|
- `s3_storage_class`: the storage class used for S3 backup. For example, `STANDARD`
|
||||||
|
|
||||||
### Usage examples
|
### Usage examples
|
||||||
|
|
||||||
|
@ -0,0 +1,26 @@
|
|||||||
|
---
|
||||||
|
slug: /en/operations/optimizing-performance/profile-guided-optimization
|
||||||
|
sidebar_position: 54
|
||||||
|
sidebar_label: Profile Guided Optimization (PGO)
|
||||||
|
---
|
||||||
|
import SelfManaged from '@site/docs/en/_snippets/_self_managed_only_no_roadmap.md';
|
||||||
|
|
||||||
|
# Profile Guided Optimization
|
||||||
|
|
||||||
|
Profile-Guided Optimization (PGO) is a compiler optimization technique where a program is optimized based on the runtime profile.
|
||||||
|
|
||||||
|
According to the tests, PGO helps with achieving better performance for ClickHouse. According to the tests, we see improvements up to 15% in QPS on the ClickBench test suite. The more detailed results are available [here](https://pastebin.com/xbue3HMU). The performance benefits depend on your typical workload - you can get better or worse results.
|
||||||
|
|
||||||
|
More information about PGO in ClickHouse you can read in the corresponding GitHub [issue](https://github.com/ClickHouse/ClickHouse/issues/44567).
|
||||||
|
|
||||||
|
## How to build ClickHouse with PGO?
|
||||||
|
|
||||||
|
There are two major kinds of PGO: [Instrumentation](https://clang.llvm.org/docs/UsersManual.html#using-sampling-profilers) and [Sampling](https://clang.llvm.org/docs/UsersManual.html#using-sampling-profilers) (also known as AutoFDO). In this guide is described the Instrumentation PGO with ClickHouse.
|
||||||
|
|
||||||
|
1. Build ClickHouse in Instrumented mode. In Clang it can be done via passing `-fprofile-instr-generate` option to `CXXFLAGS`.
|
||||||
|
2. Run instrumented ClickHouse on a sample workload. Here you need to use your usual workload. One of the approaches could be using [ClickBench](https://github.com/ClickHouse/ClickBench) as a sample workload. ClickHouse in the instrumentation mode could work slowly so be ready for that and do not run instrumented ClickHouse in performance-critical environments.
|
||||||
|
3. Recompile ClickHouse once again with `-fprofile-instr-use` compiler flags and profiles that are collected from the previous step.
|
||||||
|
|
||||||
|
A more detailed guide on how to apply PGO is in the Clang [documentation](https://clang.llvm.org/docs/UsersManual.html#profile-guided-optimization).
|
||||||
|
|
||||||
|
If you are going to collect a sample workload directly from a production environment, we recommend trying to use Sampling PGO.
|
@ -2288,6 +2288,8 @@ This section contains the following parameters:
|
|||||||
- `session_timeout_ms` — Maximum timeout for the client session in milliseconds.
|
- `session_timeout_ms` — Maximum timeout for the client session in milliseconds.
|
||||||
- `operation_timeout_ms` — Maximum timeout for one operation in milliseconds.
|
- `operation_timeout_ms` — Maximum timeout for one operation in milliseconds.
|
||||||
- `root` — The [znode](http://zookeeper.apache.org/doc/r3.5.5/zookeeperOver.html#Nodes+and+ephemeral+nodes) that is used as the root for znodes used by the ClickHouse server. Optional.
|
- `root` — The [znode](http://zookeeper.apache.org/doc/r3.5.5/zookeeperOver.html#Nodes+and+ephemeral+nodes) that is used as the root for znodes used by the ClickHouse server. Optional.
|
||||||
|
- `fallback_session_lifetime.min` - If the first zookeeper host resolved by zookeeper_load_balancing strategy is unavailable, limit the lifetime of a zookeeper session to the fallback node. This is done for load-balancing purposes to avoid excessive load on one of zookeeper hosts. This setting sets the minimal duration of the fallback session. Set in seconds. Optional. Default is 3 hours.
|
||||||
|
- `fallback_session_lifetime.max` - If the first zookeeper host resolved by zookeeper_load_balancing strategy is unavailable, limit the lifetime of a zookeeper session to the fallback node. This is done for load-balancing purposes to avoid excessive load on one of zookeeper hosts. This setting sets the maximum duration of the fallback session. Set in seconds. Optional. Default is 6 hours.
|
||||||
- `identity` — User and password, that can be required by ZooKeeper to give access to requested znodes. Optional.
|
- `identity` — User and password, that can be required by ZooKeeper to give access to requested znodes. Optional.
|
||||||
- zookeeper_load_balancing - Specifies the algorithm of ZooKeeper node selection.
|
- zookeeper_load_balancing - Specifies the algorithm of ZooKeeper node selection.
|
||||||
* random - randomly selects one of ZooKeeper nodes.
|
* random - randomly selects one of ZooKeeper nodes.
|
||||||
|
@ -327,3 +327,39 @@ The maximum amount of data consumed by temporary files on disk in bytes for all
|
|||||||
Zero means unlimited.
|
Zero means unlimited.
|
||||||
|
|
||||||
Default value: 0.
|
Default value: 0.
|
||||||
|
|
||||||
|
## max_sessions_for_user {#max-sessions-per-user}
|
||||||
|
|
||||||
|
Maximum number of simultaneous sessions per authenticated user to the ClickHouse server.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
``` xml
|
||||||
|
<profiles>
|
||||||
|
<single_session_profile>
|
||||||
|
<max_sessions_for_user>1</max_sessions_for_user>
|
||||||
|
</single_session_profile>
|
||||||
|
<two_sessions_profile>
|
||||||
|
<max_sessions_for_user>2</max_sessions_for_user>
|
||||||
|
</two_sessions_profile>
|
||||||
|
<unlimited_sessions_profile>
|
||||||
|
<max_sessions_for_user>0</max_sessions_for_user>
|
||||||
|
</unlimited_sessions_profile>
|
||||||
|
</profiles>
|
||||||
|
<users>
|
||||||
|
<!-- User Alice can connect to a ClickHouse server no more than once at a time. -->
|
||||||
|
<Alice>
|
||||||
|
<profile>single_session_user</profile>
|
||||||
|
</Alice>
|
||||||
|
<!-- User Bob can use 2 simultaneous sessions. -->
|
||||||
|
<Bob>
|
||||||
|
<profile>two_sessions_profile</profile>
|
||||||
|
</Bob>
|
||||||
|
<!-- User Charles can use arbitrarily many of simultaneous sessions. -->
|
||||||
|
<Charles>
|
||||||
|
<profile>unlimited_sessions_profile</profile>
|
||||||
|
</Charles>
|
||||||
|
</users>
|
||||||
|
```
|
||||||
|
|
||||||
|
Default value: 0 (Infinite count of simultaneous sessions).
|
||||||
|
@ -321,6 +321,10 @@ If both `input_format_allow_errors_num` and `input_format_allow_errors_ratio` ar
|
|||||||
|
|
||||||
This parameter is useful when you are using formats that require a schema definition, such as [Cap’n Proto](https://capnproto.org/) or [Protobuf](https://developers.google.com/protocol-buffers/). The value depends on the format.
|
This parameter is useful when you are using formats that require a schema definition, such as [Cap’n Proto](https://capnproto.org/) or [Protobuf](https://developers.google.com/protocol-buffers/). The value depends on the format.
|
||||||
|
|
||||||
|
## output_format_schema {#output-format-schema}
|
||||||
|
|
||||||
|
The path to the file where the automatically generated schema will be saved in [Cap’n Proto](../../interfaces/formats.md#capnproto-capnproto) or [Protobuf](../../interfaces/formats.md#protobuf-protobuf) formats.
|
||||||
|
|
||||||
## output_format_enable_streaming {#output_format_enable_streaming}
|
## output_format_enable_streaming {#output_format_enable_streaming}
|
||||||
|
|
||||||
Enable streaming in output formats that support it.
|
Enable streaming in output formats that support it.
|
||||||
@ -1330,6 +1334,11 @@ When serializing Nullable columns with Google wrappers, serialize default values
|
|||||||
|
|
||||||
Disabled by default.
|
Disabled by default.
|
||||||
|
|
||||||
|
### format_protobuf_use_autogenerated_schema {#format_capn_proto_use_autogenerated_schema}
|
||||||
|
|
||||||
|
Use autogenerated Protobuf schema when [format_schema](#formatschema-format-schema) is not set.
|
||||||
|
The schema is generated from ClickHouse table structure using function [structureToProtobufSchema](../../sql-reference/functions/other-functions.md#structure_to_protobuf_schema)
|
||||||
|
|
||||||
## Avro format settings {#avro-format-settings}
|
## Avro format settings {#avro-format-settings}
|
||||||
|
|
||||||
### input_format_avro_allow_missing_fields {#input_format_avro_allow_missing_fields}
|
### input_format_avro_allow_missing_fields {#input_format_avro_allow_missing_fields}
|
||||||
@ -1626,6 +1635,11 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `'by_values'`.
|
Default value: `'by_values'`.
|
||||||
|
|
||||||
|
### format_capn_proto_use_autogenerated_schema {#format_capn_proto_use_autogenerated_schema}
|
||||||
|
|
||||||
|
Use autogenerated CapnProto schema when [format_schema](#formatschema-format-schema) is not set.
|
||||||
|
The schema is generated from ClickHouse table structure using function [structureToCapnProtoSchema](../../sql-reference/functions/other-functions.md#structure_to_capnproto_schema)
|
||||||
|
|
||||||
## MySQLDump format settings {#musqldump-format-settings}
|
## MySQLDump format settings {#musqldump-format-settings}
|
||||||
|
|
||||||
### input_format_mysql_dump_table_name (#input_format_mysql_dump_table_name)
|
### input_format_mysql_dump_table_name (#input_format_mysql_dump_table_name)
|
||||||
|
@ -39,7 +39,7 @@ Example:
|
|||||||
<max_threads>8</max_threads>
|
<max_threads>8</max_threads>
|
||||||
</default>
|
</default>
|
||||||
|
|
||||||
<!-- Settings for quries from the user interface -->
|
<!-- Settings for queries from the user interface -->
|
||||||
<web>
|
<web>
|
||||||
<max_rows_to_read>1000000000</max_rows_to_read>
|
<max_rows_to_read>1000000000</max_rows_to_read>
|
||||||
<max_bytes_to_read>100000000000</max_bytes_to_read>
|
<max_bytes_to_read>100000000000</max_bytes_to_read>
|
||||||
@ -67,6 +67,8 @@ Example:
|
|||||||
<max_ast_depth>50</max_ast_depth>
|
<max_ast_depth>50</max_ast_depth>
|
||||||
<max_ast_elements>100</max_ast_elements>
|
<max_ast_elements>100</max_ast_elements>
|
||||||
|
|
||||||
|
<max_sessions_for_user>4</max_sessions_for_user>
|
||||||
|
|
||||||
<readonly>1</readonly>
|
<readonly>1</readonly>
|
||||||
</web>
|
</web>
|
||||||
</profiles>
|
</profiles>
|
||||||
|
@ -3468,11 +3468,11 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `0`.
|
Default value: `0`.
|
||||||
|
|
||||||
## disable_url_encoding {#disable_url_encoding}
|
## enable_url_encoding {#enable_url_encoding}
|
||||||
|
|
||||||
Allows to disable decoding/encoding path in uri in [URL](../../engines/table-engines/special/url.md) engine tables.
|
Allows to enable/disable decoding/encoding path in uri in [URL](../../engines/table-engines/special/url.md) engine tables.
|
||||||
|
|
||||||
Disabled by default.
|
Enabled by default.
|
||||||
|
|
||||||
## database_atomic_wait_for_drop_and_detach_synchronously {#database_atomic_wait_for_drop_and_detach_synchronously}
|
## database_atomic_wait_for_drop_and_detach_synchronously {#database_atomic_wait_for_drop_and_detach_synchronously}
|
||||||
|
|
||||||
|
@ -10,6 +10,7 @@ Columns:
|
|||||||
- `event` ([String](../../sql-reference/data-types/string.md)) — Event name.
|
- `event` ([String](../../sql-reference/data-types/string.md)) — Event name.
|
||||||
- `value` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of events occurred.
|
- `value` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of events occurred.
|
||||||
- `description` ([String](../../sql-reference/data-types/string.md)) — Event description.
|
- `description` ([String](../../sql-reference/data-types/string.md)) — Event description.
|
||||||
|
- `name` ([String](../../sql-reference/data-types/string.md)) — Alias for `event`.
|
||||||
|
|
||||||
You can find all supported events in source file [src/Common/ProfileEvents.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/src/Common/ProfileEvents.cpp).
|
You can find all supported events in source file [src/Common/ProfileEvents.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/src/Common/ProfileEvents.cpp).
|
||||||
|
|
||||||
|
@ -10,6 +10,7 @@ Columns:
|
|||||||
- `metric` ([String](../../sql-reference/data-types/string.md)) — Metric name.
|
- `metric` ([String](../../sql-reference/data-types/string.md)) — Metric name.
|
||||||
- `value` ([Int64](../../sql-reference/data-types/int-uint.md)) — Metric value.
|
- `value` ([Int64](../../sql-reference/data-types/int-uint.md)) — Metric value.
|
||||||
- `description` ([String](../../sql-reference/data-types/string.md)) — Metric description.
|
- `description` ([String](../../sql-reference/data-types/string.md)) — Metric description.
|
||||||
|
- `name` ([String](../../sql-reference/data-types/string.md)) — Alias for `metric`.
|
||||||
|
|
||||||
You can find all supported metrics in source file [src/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/src/Common/CurrentMetrics.cpp).
|
You can find all supported metrics in source file [src/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/src/Common/CurrentMetrics.cpp).
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@ Columns:
|
|||||||
- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number of rows read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` and `JOIN`. For distributed queries `read_rows` includes the total number of rows read at all replicas. Each replica sends it’s `read_rows` value, and the server-initiator of the query summarizes all received and local values. The cache volumes do not affect this value.
|
- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number of rows read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` and `JOIN`. For distributed queries `read_rows` includes the total number of rows read at all replicas. Each replica sends it’s `read_rows` value, and the server-initiator of the query summarizes all received and local values. The cache volumes do not affect this value.
|
||||||
- `read_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number of bytes read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` and `JOIN`. For distributed queries `read_bytes` includes the total number of rows read at all replicas. Each replica sends it’s `read_bytes` value, and the server-initiator of the query summarizes all received and local values. The cache volumes do not affect this value.
|
- `read_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number of bytes read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` and `JOIN`. For distributed queries `read_bytes` includes the total number of rows read at all replicas. Each replica sends it’s `read_bytes` value, and the server-initiator of the query summarizes all received and local values. The cache volumes do not affect this value.
|
||||||
- `written_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — For `INSERT` queries, the number of written rows. For other queries, the column value is 0.
|
- `written_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — For `INSERT` queries, the number of written rows. For other queries, the column value is 0.
|
||||||
- `written_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — For `INSERT` queries, the number of written bytes. For other queries, the column value is 0.
|
- `written_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — For `INSERT` queries, the number of written bytes (uncompressed). For other queries, the column value is 0.
|
||||||
- `result_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of rows in a result of the `SELECT` query, or a number of rows in the `INSERT` query.
|
- `result_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of rows in a result of the `SELECT` query, or a number of rows in the `INSERT` query.
|
||||||
- `result_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — RAM volume in bytes used to store a query result.
|
- `result_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — RAM volume in bytes used to store a query result.
|
||||||
- `memory_usage` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Memory consumption by the query.
|
- `memory_usage` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Memory consumption by the query.
|
||||||
|
@ -51,3 +51,7 @@ keeper foo bar
|
|||||||
- `rmr <path>` -- Recursively deletes path. Confirmation required
|
- `rmr <path>` -- Recursively deletes path. Confirmation required
|
||||||
- `flwc <command>` -- Executes four-letter-word command
|
- `flwc <command>` -- Executes four-letter-word command
|
||||||
- `help` -- Prints this message
|
- `help` -- Prints this message
|
||||||
|
- `get_stat [path]` -- Returns the node's stat (default `.`)
|
||||||
|
- `find_super_nodes <threshold> [path]` -- Finds nodes with number of children larger than some threshold for the given path (default `.`)
|
||||||
|
- `delete_stable_backups` -- Deletes ClickHouse nodes used for backups that are now inactive
|
||||||
|
- `find_big_family [path] [n]` -- Returns the top n nodes with the biggest family in the subtree (default path = `.` and n = 10)
|
||||||
|
@ -140,8 +140,8 @@ Time shifts for multiple days. Some pacific islands changed their timezone offse
|
|||||||
- [Type conversion functions](../../sql-reference/functions/type-conversion-functions.md)
|
- [Type conversion functions](../../sql-reference/functions/type-conversion-functions.md)
|
||||||
- [Functions for working with dates and times](../../sql-reference/functions/date-time-functions.md)
|
- [Functions for working with dates and times](../../sql-reference/functions/date-time-functions.md)
|
||||||
- [Functions for working with arrays](../../sql-reference/functions/array-functions.md)
|
- [Functions for working with arrays](../../sql-reference/functions/array-functions.md)
|
||||||
- [The `date_time_input_format` setting](../../operations/settings/settings.md#settings-date_time_input_format)
|
- [The `date_time_input_format` setting](../../operations/settings/settings-formats.md#settings-date_time_input_format)
|
||||||
- [The `date_time_output_format` setting](../../operations/settings/settings.md#settings-date_time_output_format)
|
- [The `date_time_output_format` setting](../../operations/settings/settings-formats.md#settings-date_time_output_format)
|
||||||
- [The `timezone` server configuration parameter](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
|
- [The `timezone` server configuration parameter](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
|
||||||
- [The `session_timezone` setting](../../operations/settings/settings.md#session_timezone)
|
- [The `session_timezone` setting](../../operations/settings/settings.md#session_timezone)
|
||||||
- [Operators for working with dates and times](../../sql-reference/operators/index.md#operators-datetime)
|
- [Operators for working with dates and times](../../sql-reference/operators/index.md#operators-datetime)
|
||||||
|
@ -2552,3 +2552,187 @@ Result:
|
|||||||
|
|
||||||
This function can be used together with [generateRandom](../../sql-reference/table-functions/generate.md) to generate completely random tables.
|
This function can be used together with [generateRandom](../../sql-reference/table-functions/generate.md) to generate completely random tables.
|
||||||
|
|
||||||
|
## structureToCapnProtoSchema {#structure_to_capn_proto_schema}
|
||||||
|
|
||||||
|
Converts ClickHouse table structure to CapnProto schema.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
structureToCapnProtoSchema(structure)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `structure` — Table structure in a format `column1_name column1_type, column2_name column2_type, ...`.
|
||||||
|
- `root_struct_name` — Name for root struct in CapnProto schema. Default value - `Message`;
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- CapnProto schema
|
||||||
|
|
||||||
|
Type: [String](../../sql-reference/data-types/string.md).
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT structureToCapnProtoSchema('column1 String, column2 UInt32, column3 Array(String)') FORMAT RawBLOB
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
@0xf96402dd754d0eb7;
|
||||||
|
|
||||||
|
struct Message
|
||||||
|
{
|
||||||
|
column1 @0 : Data;
|
||||||
|
column2 @1 : UInt32;
|
||||||
|
column3 @2 : List(Data);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT structureToCapnProtoSchema('column1 Nullable(String), column2 Tuple(element1 UInt32, element2 Array(String)), column3 Map(String, String)') FORMAT RawBLOB
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
@0xd1c8320fecad2b7f;
|
||||||
|
|
||||||
|
struct Message
|
||||||
|
{
|
||||||
|
struct Column1
|
||||||
|
{
|
||||||
|
union
|
||||||
|
{
|
||||||
|
value @0 : Data;
|
||||||
|
null @1 : Void;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
column1 @0 : Column1;
|
||||||
|
struct Column2
|
||||||
|
{
|
||||||
|
element1 @0 : UInt32;
|
||||||
|
element2 @1 : List(Data);
|
||||||
|
}
|
||||||
|
column2 @1 : Column2;
|
||||||
|
struct Column3
|
||||||
|
{
|
||||||
|
struct Entry
|
||||||
|
{
|
||||||
|
key @0 : Data;
|
||||||
|
value @1 : Data;
|
||||||
|
}
|
||||||
|
entries @0 : List(Entry);
|
||||||
|
}
|
||||||
|
column3 @2 : Column3;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT structureToCapnProtoSchema('column1 String, column2 UInt32', 'Root') FORMAT RawBLOB
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
@0x96ab2d4ab133c6e1;
|
||||||
|
|
||||||
|
struct Root
|
||||||
|
{
|
||||||
|
column1 @0 : Data;
|
||||||
|
column2 @1 : UInt32;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## structureToProtobufSchema {#structure_to_protobuf_schema}
|
||||||
|
|
||||||
|
Converts ClickHouse table structure to Protobuf schema.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
structureToProtobufSchema(structure)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `structure` — Table structure in a format `column1_name column1_type, column2_name column2_type, ...`.
|
||||||
|
- `root_message_name` — Name for root message in Protobuf schema. Default value - `Message`;
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Protobuf schema
|
||||||
|
|
||||||
|
Type: [String](../../sql-reference/data-types/string.md).
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT structureToProtobufSchema('column1 String, column2 UInt32, column3 Array(String)') FORMAT RawBLOB
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
message Message
|
||||||
|
{
|
||||||
|
bytes column1 = 1;
|
||||||
|
uint32 column2 = 2;
|
||||||
|
repeated bytes column3 = 3;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT structureToProtobufSchema('column1 Nullable(String), column2 Tuple(element1 UInt32, element2 Array(String)), column3 Map(String, String)') FORMAT RawBLOB
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
message Message
|
||||||
|
{
|
||||||
|
bytes column1 = 1;
|
||||||
|
message Column2
|
||||||
|
{
|
||||||
|
uint32 element1 = 1;
|
||||||
|
repeated bytes element2 = 2;
|
||||||
|
}
|
||||||
|
Column2 column2 = 2;
|
||||||
|
map<string, bytes> column3 = 3;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT structureToProtobufSchema('column1 String, column2 UInt32', 'Root') FORMAT RawBLOB
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
message Root
|
||||||
|
{
|
||||||
|
bytes column1 = 1;
|
||||||
|
uint32 column2 = 2;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
@ -56,7 +56,7 @@ Character `|` inside patterns is used to specify failover addresses. They are it
|
|||||||
## Storage Settings {#storage-settings}
|
## Storage Settings {#storage-settings}
|
||||||
|
|
||||||
- [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
- [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
- [disable_url_encoding](/docs/en/operations/settings/settings.md#disable_url_encoding) - allows to disable decoding/encoding path in uri. Disabled by default.
|
- [enable_url_encoding](/docs/en/operations/settings/settings.md#enable_url_encoding) - allows to enable/disable decoding/encoding path in uri. Enabled by default.
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
|
@ -314,3 +314,40 @@ FORMAT Null;
|
|||||||
При вставке данных, ClickHouse вычисляет количество партиций во вставленном блоке. Если число партиций больше, чем `max_partitions_per_insert_block`, ClickHouse генерирует исключение со следующим текстом:
|
При вставке данных, ClickHouse вычисляет количество партиций во вставленном блоке. Если число партиций больше, чем `max_partitions_per_insert_block`, ClickHouse генерирует исключение со следующим текстом:
|
||||||
|
|
||||||
> «Too many partitions for single INSERT block (more than» + toString(max_parts) + «). The limit is controlled by ‘max_partitions_per_insert_block’ setting. Large number of partitions is a common misconception. It will lead to severe negative performance impact, including slow server startup, slow INSERT queries and slow SELECT queries. Recommended total number of partitions for a table is under 1000..10000. Please note, that partitioning is not intended to speed up SELECT queries (ORDER BY key is sufficient to make range queries fast). Partitions are intended for data manipulation (DROP PARTITION, etc).»
|
> «Too many partitions for single INSERT block (more than» + toString(max_parts) + «). The limit is controlled by ‘max_partitions_per_insert_block’ setting. Large number of partitions is a common misconception. It will lead to severe negative performance impact, including slow server startup, slow INSERT queries and slow SELECT queries. Recommended total number of partitions for a table is under 1000..10000. Please note, that partitioning is not intended to speed up SELECT queries (ORDER BY key is sufficient to make range queries fast). Partitions are intended for data manipulation (DROP PARTITION, etc).»
|
||||||
|
|
||||||
|
## max_sessions_for_user {#max-sessions-per-user}
|
||||||
|
|
||||||
|
Максимальное количество одновременных сессий на одного аутентифицированного пользователя.
|
||||||
|
|
||||||
|
Пример:
|
||||||
|
|
||||||
|
``` xml
|
||||||
|
<profiles>
|
||||||
|
<single_session_profile>
|
||||||
|
<max_sessions_for_user>1</max_sessions_for_user>
|
||||||
|
</single_session_profile>
|
||||||
|
<two_sessions_profile>
|
||||||
|
<max_sessions_for_user>2</max_sessions_for_user>
|
||||||
|
</two_sessions_profile>
|
||||||
|
<unlimited_sessions_profile>
|
||||||
|
<max_sessions_for_user>0</max_sessions_for_user>
|
||||||
|
</unlimited_sessions_profile>
|
||||||
|
</profiles>
|
||||||
|
<users>
|
||||||
|
<!-- Пользователь Alice может одновременно подключаться не
|
||||||
|
более одного раза к серверу ClickHouse. -->
|
||||||
|
<Alice>
|
||||||
|
<profile>single_session_profile</profile>
|
||||||
|
</Alice>
|
||||||
|
<!-- Пользователь Bob может использовать 2 одновременных сессии. -->
|
||||||
|
<Bob>
|
||||||
|
<profile>two_sessions_profile</profile>
|
||||||
|
</Bob>
|
||||||
|
<!-- Пользователь Charles может иметь любое количество одновременных сессий. -->
|
||||||
|
<Charles>
|
||||||
|
<profile>unlimited_sessions_profile</profile>
|
||||||
|
</Charles>
|
||||||
|
</users>
|
||||||
|
```
|
||||||
|
|
||||||
|
Значение по умолчанию: 0 (неограниченное количество сессий).
|
||||||
|
@ -39,7 +39,7 @@ SET profile = 'web'
|
|||||||
<max_threads>8</max_threads>
|
<max_threads>8</max_threads>
|
||||||
</default>
|
</default>
|
||||||
|
|
||||||
<!-- Settings for quries from the user interface -->
|
<!-- Settings for queries from the user interface -->
|
||||||
<web>
|
<web>
|
||||||
<max_rows_to_read>1000000000</max_rows_to_read>
|
<max_rows_to_read>1000000000</max_rows_to_read>
|
||||||
<max_bytes_to_read>100000000000</max_bytes_to_read>
|
<max_bytes_to_read>100000000000</max_bytes_to_read>
|
||||||
@ -67,6 +67,7 @@ SET profile = 'web'
|
|||||||
<max_ast_depth>50</max_ast_depth>
|
<max_ast_depth>50</max_ast_depth>
|
||||||
<max_ast_elements>100</max_ast_elements>
|
<max_ast_elements>100</max_ast_elements>
|
||||||
|
|
||||||
|
<max_sessions_for_user>4</max_sessions_for_user>
|
||||||
<readonly>1</readonly>
|
<readonly>1</readonly>
|
||||||
</web>
|
</web>
|
||||||
</profiles>
|
</profiles>
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
|
|
||||||
#include "Commands.h"
|
#include "Commands.h"
|
||||||
|
#include <queue>
|
||||||
#include "KeeperClient.h"
|
#include "KeeperClient.h"
|
||||||
|
|
||||||
|
|
||||||
@ -24,8 +25,18 @@ void LSCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) con
|
|||||||
else
|
else
|
||||||
path = client->cwd;
|
path = client->cwd;
|
||||||
|
|
||||||
for (const auto & child : client->zookeeper->getChildren(path))
|
auto children = client->zookeeper->getChildren(path);
|
||||||
std::cout << child << " ";
|
std::sort(children.begin(), children.end());
|
||||||
|
|
||||||
|
bool need_space = false;
|
||||||
|
for (const auto & child : children)
|
||||||
|
{
|
||||||
|
if (std::exchange(need_space, true))
|
||||||
|
std::cout << " ";
|
||||||
|
|
||||||
|
std::cout << child;
|
||||||
|
}
|
||||||
|
|
||||||
std::cout << "\n";
|
std::cout << "\n";
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -130,6 +141,173 @@ void GetCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) co
|
|||||||
std::cout << client->zookeeper->get(client->getAbsolutePath(query->args[0].safeGet<String>())) << "\n";
|
std::cout << client->zookeeper->get(client->getAbsolutePath(query->args[0].safeGet<String>())) << "\n";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool GetStatCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
|
||||||
|
{
|
||||||
|
String arg;
|
||||||
|
if (!parseKeeperPath(pos, expected, arg))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
node->args.push_back(std::move(arg));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void GetStatCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) const
|
||||||
|
{
|
||||||
|
Coordination::Stat stat;
|
||||||
|
String path;
|
||||||
|
if (!query->args.empty())
|
||||||
|
path = client->getAbsolutePath(query->args[0].safeGet<String>());
|
||||||
|
else
|
||||||
|
path = client->cwd;
|
||||||
|
|
||||||
|
client->zookeeper->get(path, &stat);
|
||||||
|
|
||||||
|
std::cout << "cZxid = " << stat.czxid << "\n";
|
||||||
|
std::cout << "mZxid = " << stat.mzxid << "\n";
|
||||||
|
std::cout << "pZxid = " << stat.pzxid << "\n";
|
||||||
|
std::cout << "ctime = " << stat.ctime << "\n";
|
||||||
|
std::cout << "mtime = " << stat.mtime << "\n";
|
||||||
|
std::cout << "version = " << stat.version << "\n";
|
||||||
|
std::cout << "cversion = " << stat.cversion << "\n";
|
||||||
|
std::cout << "aversion = " << stat.aversion << "\n";
|
||||||
|
std::cout << "ephemeralOwner = " << stat.ephemeralOwner << "\n";
|
||||||
|
std::cout << "dataLength = " << stat.dataLength << "\n";
|
||||||
|
std::cout << "numChildren = " << stat.numChildren << "\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
bool FindSuperNodes::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
|
||||||
|
{
|
||||||
|
ASTPtr threshold;
|
||||||
|
if (!ParserUnsignedInteger{}.parse(pos, threshold, expected))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
node->args.push_back(threshold->as<ASTLiteral &>().value);
|
||||||
|
|
||||||
|
String path;
|
||||||
|
if (!parseKeeperPath(pos, expected, path))
|
||||||
|
path = ".";
|
||||||
|
|
||||||
|
node->args.push_back(std::move(path));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void FindSuperNodes::execute(const ASTKeeperQuery * query, KeeperClient * client) const
|
||||||
|
{
|
||||||
|
auto threshold = query->args[0].safeGet<UInt64>();
|
||||||
|
auto path = client->getAbsolutePath(query->args[1].safeGet<String>());
|
||||||
|
|
||||||
|
Coordination::Stat stat;
|
||||||
|
client->zookeeper->get(path, &stat);
|
||||||
|
|
||||||
|
if (stat.numChildren >= static_cast<Int32>(threshold))
|
||||||
|
{
|
||||||
|
std::cout << static_cast<String>(path) << "\t" << stat.numChildren << "\n";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto children = client->zookeeper->getChildren(path);
|
||||||
|
std::sort(children.begin(), children.end());
|
||||||
|
for (const auto & child : children)
|
||||||
|
{
|
||||||
|
auto next_query = *query;
|
||||||
|
next_query.args[1] = DB::Field(path / child);
|
||||||
|
execute(&next_query, client);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool DeleteStableBackups::parse(IParser::Pos & /* pos */, std::shared_ptr<ASTKeeperQuery> & /* node */, Expected & /* expected */) const
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void DeleteStableBackups::execute(const ASTKeeperQuery * /* query */, KeeperClient * client) const
|
||||||
|
{
|
||||||
|
client->askConfirmation(
|
||||||
|
"You are going to delete all inactive backups in /clickhouse/backups.",
|
||||||
|
[client]
|
||||||
|
{
|
||||||
|
fs::path backup_root = "/clickhouse/backups";
|
||||||
|
auto backups = client->zookeeper->getChildren(backup_root);
|
||||||
|
std::sort(backups.begin(), backups.end());
|
||||||
|
|
||||||
|
for (const auto & child : backups)
|
||||||
|
{
|
||||||
|
auto backup_path = backup_root / child;
|
||||||
|
std::cout << "Found backup " << backup_path << ", checking if it's active\n";
|
||||||
|
|
||||||
|
String stage_path = backup_path / "stage";
|
||||||
|
auto stages = client->zookeeper->getChildren(stage_path);
|
||||||
|
|
||||||
|
bool is_active = false;
|
||||||
|
for (const auto & stage : stages)
|
||||||
|
{
|
||||||
|
if (startsWith(stage, "alive"))
|
||||||
|
{
|
||||||
|
is_active = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_active)
|
||||||
|
{
|
||||||
|
std::cout << "Backup " << backup_path << " is active, not going to delete\n";
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cout << "Backup " << backup_path << " is not active, deleting it\n";
|
||||||
|
client->zookeeper->removeRecursive(backup_path);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
bool FindBigFamily::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
|
||||||
|
{
|
||||||
|
String path;
|
||||||
|
if (!parseKeeperPath(pos, expected, path))
|
||||||
|
path = ".";
|
||||||
|
|
||||||
|
node->args.push_back(std::move(path));
|
||||||
|
|
||||||
|
ASTPtr count;
|
||||||
|
if (ParserUnsignedInteger{}.parse(pos, count, expected))
|
||||||
|
node->args.push_back(count->as<ASTLiteral &>().value);
|
||||||
|
else
|
||||||
|
node->args.push_back(UInt64(10));
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void FindBigFamily::execute(const ASTKeeperQuery * query, KeeperClient * client) const
|
||||||
|
{
|
||||||
|
auto path = client->getAbsolutePath(query->args[0].safeGet<String>());
|
||||||
|
auto n = query->args[1].safeGet<UInt64>();
|
||||||
|
|
||||||
|
std::vector<std::tuple<Int32, String>> result;
|
||||||
|
|
||||||
|
std::queue<fs::path> queue;
|
||||||
|
queue.push(path);
|
||||||
|
while (!queue.empty())
|
||||||
|
{
|
||||||
|
auto next_path = queue.front();
|
||||||
|
queue.pop();
|
||||||
|
|
||||||
|
auto children = client->zookeeper->getChildren(next_path);
|
||||||
|
std::transform(children.cbegin(), children.cend(), children.begin(), [&](const String & child) { return next_path / child; });
|
||||||
|
|
||||||
|
auto response = client->zookeeper->get(children);
|
||||||
|
|
||||||
|
for (size_t i = 0; i < response.size(); ++i)
|
||||||
|
{
|
||||||
|
result.emplace_back(response[i].stat.numChildren, children[i]);
|
||||||
|
queue.push(children[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::sort(result.begin(), result.end(), std::greater());
|
||||||
|
for (UInt64 i = 0; i < std::min(result.size(), static_cast<size_t>(n)); ++i)
|
||||||
|
std::cout << std::get<1>(result[i]) << "\t" << std::get<0>(result[i]) << "\n";
|
||||||
|
}
|
||||||
|
|
||||||
bool RMCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
|
bool RMCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
|
||||||
{
|
{
|
||||||
String arg;
|
String arg;
|
||||||
@ -170,7 +348,7 @@ bool HelpCommand::parse(IParser::Pos & /* pos */, std::shared_ptr<ASTKeeperQuery
|
|||||||
void HelpCommand::execute(const ASTKeeperQuery * /* query */, KeeperClient * /* client */) const
|
void HelpCommand::execute(const ASTKeeperQuery * /* query */, KeeperClient * /* client */) const
|
||||||
{
|
{
|
||||||
for (const auto & pair : KeeperClient::commands)
|
for (const auto & pair : KeeperClient::commands)
|
||||||
std::cout << pair.second->getHelpMessage() << "\n";
|
std::cout << pair.second->generateHelpString() << "\n";
|
||||||
}
|
}
|
||||||
|
|
||||||
bool FourLetterWordCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
|
bool FourLetterWordCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
|
||||||
|
@ -21,6 +21,12 @@ public:
|
|||||||
virtual String getName() const = 0;
|
virtual String getName() const = 0;
|
||||||
|
|
||||||
virtual ~IKeeperClientCommand() = default;
|
virtual ~IKeeperClientCommand() = default;
|
||||||
|
|
||||||
|
String generateHelpString() const
|
||||||
|
{
|
||||||
|
return fmt::vformat(getHelpMessage(), fmt::make_format_args(getName()));
|
||||||
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
using Command = std::shared_ptr<IKeeperClientCommand>;
|
using Command = std::shared_ptr<IKeeperClientCommand>;
|
||||||
@ -34,7 +40,7 @@ class LSCommand : public IKeeperClientCommand
|
|||||||
|
|
||||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||||
|
|
||||||
String getHelpMessage() const override { return "ls [path] -- Lists the nodes for the given path (default: cwd)"; }
|
String getHelpMessage() const override { return "{} [path] -- Lists the nodes for the given path (default: cwd)"; }
|
||||||
};
|
};
|
||||||
|
|
||||||
class CDCommand : public IKeeperClientCommand
|
class CDCommand : public IKeeperClientCommand
|
||||||
@ -45,7 +51,7 @@ class CDCommand : public IKeeperClientCommand
|
|||||||
|
|
||||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||||
|
|
||||||
String getHelpMessage() const override { return "cd [path] -- Change the working path (default `.`)"; }
|
String getHelpMessage() const override { return "{} [path] -- Change the working path (default `.`)"; }
|
||||||
};
|
};
|
||||||
|
|
||||||
class SetCommand : public IKeeperClientCommand
|
class SetCommand : public IKeeperClientCommand
|
||||||
@ -58,7 +64,7 @@ class SetCommand : public IKeeperClientCommand
|
|||||||
|
|
||||||
String getHelpMessage() const override
|
String getHelpMessage() const override
|
||||||
{
|
{
|
||||||
return "set <path> <value> [version] -- Updates the node's value. Only update if version matches (default: -1)";
|
return "{} <path> <value> [version] -- Updates the node's value. Only update if version matches (default: -1)";
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -70,7 +76,7 @@ class CreateCommand : public IKeeperClientCommand
|
|||||||
|
|
||||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||||
|
|
||||||
String getHelpMessage() const override { return "create <path> <value> -- Creates new node"; }
|
String getHelpMessage() const override { return "{} <path> <value> -- Creates new node"; }
|
||||||
};
|
};
|
||||||
|
|
||||||
class GetCommand : public IKeeperClientCommand
|
class GetCommand : public IKeeperClientCommand
|
||||||
@ -81,9 +87,63 @@ class GetCommand : public IKeeperClientCommand
|
|||||||
|
|
||||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||||
|
|
||||||
String getHelpMessage() const override { return "get <path> -- Returns the node's value"; }
|
String getHelpMessage() const override { return "{} <path> -- Returns the node's value"; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class GetStatCommand : public IKeeperClientCommand
|
||||||
|
{
|
||||||
|
String getName() const override { return "get_stat"; }
|
||||||
|
|
||||||
|
bool parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const override;
|
||||||
|
|
||||||
|
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||||
|
|
||||||
|
String getHelpMessage() const override { return "{} [path] -- Returns the node's stat (default `.`)"; }
|
||||||
|
};
|
||||||
|
|
||||||
|
class FindSuperNodes : public IKeeperClientCommand
|
||||||
|
{
|
||||||
|
String getName() const override { return "find_super_nodes"; }
|
||||||
|
|
||||||
|
bool parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const override;
|
||||||
|
|
||||||
|
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||||
|
|
||||||
|
String getHelpMessage() const override
|
||||||
|
{
|
||||||
|
return "{} <threshold> [path] -- Finds nodes with number of children larger than some threshold for the given path (default `.`)";
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class DeleteStableBackups : public IKeeperClientCommand
|
||||||
|
{
|
||||||
|
String getName() const override { return "delete_stable_backups"; }
|
||||||
|
|
||||||
|
bool parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const override;
|
||||||
|
|
||||||
|
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||||
|
|
||||||
|
String getHelpMessage() const override
|
||||||
|
{
|
||||||
|
return "{} -- Deletes ClickHouse nodes used for backups that are now inactive";
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class FindBigFamily : public IKeeperClientCommand
|
||||||
|
{
|
||||||
|
String getName() const override { return "find_big_family"; }
|
||||||
|
|
||||||
|
bool parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const override;
|
||||||
|
|
||||||
|
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||||
|
|
||||||
|
String getHelpMessage() const override
|
||||||
|
{
|
||||||
|
return "{} [path] [n] -- Returns the top n nodes with the biggest family in the subtree (default path = `.` and n = 10)";
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
class RMCommand : public IKeeperClientCommand
|
class RMCommand : public IKeeperClientCommand
|
||||||
{
|
{
|
||||||
String getName() const override { return "rm"; }
|
String getName() const override { return "rm"; }
|
||||||
@ -92,7 +152,7 @@ class RMCommand : public IKeeperClientCommand
|
|||||||
|
|
||||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||||
|
|
||||||
String getHelpMessage() const override { return "remove <path> -- Remove the node"; }
|
String getHelpMessage() const override { return "{} <path> -- Remove the node"; }
|
||||||
};
|
};
|
||||||
|
|
||||||
class RMRCommand : public IKeeperClientCommand
|
class RMRCommand : public IKeeperClientCommand
|
||||||
@ -103,7 +163,7 @@ class RMRCommand : public IKeeperClientCommand
|
|||||||
|
|
||||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||||
|
|
||||||
String getHelpMessage() const override { return "rmr <path> -- Recursively deletes path. Confirmation required"; }
|
String getHelpMessage() const override { return "{} <path> -- Recursively deletes path. Confirmation required"; }
|
||||||
};
|
};
|
||||||
|
|
||||||
class HelpCommand : public IKeeperClientCommand
|
class HelpCommand : public IKeeperClientCommand
|
||||||
@ -114,7 +174,7 @@ class HelpCommand : public IKeeperClientCommand
|
|||||||
|
|
||||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||||
|
|
||||||
String getHelpMessage() const override { return "help -- Prints this message"; }
|
String getHelpMessage() const override { return "{} -- Prints this message"; }
|
||||||
};
|
};
|
||||||
|
|
||||||
class FourLetterWordCommand : public IKeeperClientCommand
|
class FourLetterWordCommand : public IKeeperClientCommand
|
||||||
@ -125,7 +185,7 @@ class FourLetterWordCommand : public IKeeperClientCommand
|
|||||||
|
|
||||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||||
|
|
||||||
String getHelpMessage() const override { return "flwc <command> -- Executes four-letter-word command"; }
|
String getHelpMessage() const override { return "{} <command> -- Executes four-letter-word command"; }
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -177,6 +177,10 @@ void KeeperClient::initialize(Poco::Util::Application & /* self */)
|
|||||||
std::make_shared<SetCommand>(),
|
std::make_shared<SetCommand>(),
|
||||||
std::make_shared<CreateCommand>(),
|
std::make_shared<CreateCommand>(),
|
||||||
std::make_shared<GetCommand>(),
|
std::make_shared<GetCommand>(),
|
||||||
|
std::make_shared<GetStatCommand>(),
|
||||||
|
std::make_shared<FindSuperNodes>(),
|
||||||
|
std::make_shared<DeleteStableBackups>(),
|
||||||
|
std::make_shared<FindBigFamily>(),
|
||||||
std::make_shared<RMCommand>(),
|
std::make_shared<RMCommand>(),
|
||||||
std::make_shared<RMRCommand>(),
|
std::make_shared<RMRCommand>(),
|
||||||
std::make_shared<HelpCommand>(),
|
std::make_shared<HelpCommand>(),
|
||||||
|
@ -58,6 +58,7 @@ bool KeeperParser::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
String command_name(pos->begin, pos->end);
|
String command_name(pos->begin, pos->end);
|
||||||
|
std::transform(command_name.begin(), command_name.end(), command_name.begin(), [](unsigned char c) { return std::tolower(c); });
|
||||||
Command command;
|
Command command;
|
||||||
|
|
||||||
auto iter = KeeperClient::commands.find(command_name);
|
auto iter = KeeperClient::commands.find(command_name);
|
||||||
|
@ -1691,17 +1691,26 @@ try
|
|||||||
global_context->initializeTraceCollector();
|
global_context->initializeTraceCollector();
|
||||||
|
|
||||||
/// Set up server-wide memory profiler (for total memory tracker).
|
/// Set up server-wide memory profiler (for total memory tracker).
|
||||||
UInt64 total_memory_profiler_step = config().getUInt64("total_memory_profiler_step", 0);
|
if (server_settings.total_memory_profiler_step)
|
||||||
if (total_memory_profiler_step)
|
|
||||||
{
|
{
|
||||||
total_memory_tracker.setProfilerStep(total_memory_profiler_step);
|
total_memory_tracker.setProfilerStep(server_settings.total_memory_profiler_step);
|
||||||
}
|
}
|
||||||
|
|
||||||
double total_memory_tracker_sample_probability = config().getDouble("total_memory_tracker_sample_probability", 0);
|
if (server_settings.total_memory_tracker_sample_probability > 0.0)
|
||||||
if (total_memory_tracker_sample_probability > 0.0)
|
|
||||||
{
|
{
|
||||||
total_memory_tracker.setSampleProbability(total_memory_tracker_sample_probability);
|
total_memory_tracker.setSampleProbability(server_settings.total_memory_tracker_sample_probability);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (server_settings.total_memory_profiler_sample_min_allocation_size)
|
||||||
|
{
|
||||||
|
total_memory_tracker.setSampleMinAllocationSize(server_settings.total_memory_profiler_sample_min_allocation_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (server_settings.total_memory_profiler_sample_max_allocation_size)
|
||||||
|
{
|
||||||
|
total_memory_tracker.setSampleMaxAllocationSize(server_settings.total_memory_profiler_sample_max_allocation_size);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -2036,27 +2045,26 @@ void Server::createServers(
|
|||||||
|
|
||||||
for (const auto & protocol : protocols)
|
for (const auto & protocol : protocols)
|
||||||
{
|
{
|
||||||
if (!server_type.shouldStart(ServerType::Type::CUSTOM, protocol))
|
std::string prefix = "protocols." + protocol + ".";
|
||||||
|
std::string port_name = prefix + "port";
|
||||||
|
std::string description {"<undefined> protocol"};
|
||||||
|
if (config.has(prefix + "description"))
|
||||||
|
description = config.getString(prefix + "description");
|
||||||
|
|
||||||
|
if (!config.has(prefix + "port"))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (!server_type.shouldStart(ServerType::Type::CUSTOM, port_name))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
std::vector<std::string> hosts;
|
std::vector<std::string> hosts;
|
||||||
if (config.has("protocols." + protocol + ".host"))
|
if (config.has(prefix + "host"))
|
||||||
hosts.push_back(config.getString("protocols." + protocol + ".host"));
|
hosts.push_back(config.getString(prefix + "host"));
|
||||||
else
|
else
|
||||||
hosts = listen_hosts;
|
hosts = listen_hosts;
|
||||||
|
|
||||||
for (const auto & host : hosts)
|
for (const auto & host : hosts)
|
||||||
{
|
{
|
||||||
std::string conf_name = "protocols." + protocol;
|
|
||||||
std::string prefix = conf_name + ".";
|
|
||||||
|
|
||||||
if (!config.has(prefix + "port"))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
std::string description {"<undefined> protocol"};
|
|
||||||
if (config.has(prefix + "description"))
|
|
||||||
description = config.getString(prefix + "description");
|
|
||||||
std::string port_name = prefix + "port";
|
|
||||||
bool is_secure = false;
|
bool is_secure = false;
|
||||||
auto stack = buildProtocolStackFromConfig(config, protocol, http_params, async_metrics, is_secure);
|
auto stack = buildProtocolStackFromConfig(config, protocol, http_params, async_metrics, is_secure);
|
||||||
|
|
||||||
|
@ -328,9 +328,6 @@ void ContextAccess::setRolesInfo(const std::shared_ptr<const EnabledRolesInfo> &
|
|||||||
|
|
||||||
enabled_row_policies = access_control->getEnabledRowPolicies(*params.user_id, roles_info->enabled_roles);
|
enabled_row_policies = access_control->getEnabledRowPolicies(*params.user_id, roles_info->enabled_roles);
|
||||||
|
|
||||||
enabled_quota = access_control->getEnabledQuota(
|
|
||||||
*params.user_id, user_name, roles_info->enabled_roles, params.address, params.forwarded_address, params.quota_key);
|
|
||||||
|
|
||||||
enabled_settings = access_control->getEnabledSettings(
|
enabled_settings = access_control->getEnabledSettings(
|
||||||
*params.user_id, user->settings, roles_info->enabled_roles, roles_info->settings_from_enabled_roles);
|
*params.user_id, user->settings, roles_info->enabled_roles, roles_info->settings_from_enabled_roles);
|
||||||
|
|
||||||
@ -416,19 +413,32 @@ RowPolicyFilterPtr ContextAccess::getRowPolicyFilter(const String & database, co
|
|||||||
std::shared_ptr<const EnabledQuota> ContextAccess::getQuota() const
|
std::shared_ptr<const EnabledQuota> ContextAccess::getQuota() const
|
||||||
{
|
{
|
||||||
std::lock_guard lock{mutex};
|
std::lock_guard lock{mutex};
|
||||||
if (enabled_quota)
|
|
||||||
return enabled_quota;
|
if (!enabled_quota)
|
||||||
static const auto unlimited_quota = EnabledQuota::getUnlimitedQuota();
|
{
|
||||||
return unlimited_quota;
|
if (roles_info)
|
||||||
|
{
|
||||||
|
enabled_quota = access_control->getEnabledQuota(*params.user_id,
|
||||||
|
user_name,
|
||||||
|
roles_info->enabled_roles,
|
||||||
|
params.address,
|
||||||
|
params.forwarded_address,
|
||||||
|
params.quota_key);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
static const auto unlimited_quota = EnabledQuota::getUnlimitedQuota();
|
||||||
|
return unlimited_quota;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return enabled_quota;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
std::optional<QuotaUsage> ContextAccess::getQuotaUsage() const
|
std::optional<QuotaUsage> ContextAccess::getQuotaUsage() const
|
||||||
{
|
{
|
||||||
std::lock_guard lock{mutex};
|
return getQuota()->getUsage();
|
||||||
if (enabled_quota)
|
|
||||||
return enabled_quota->getUsage();
|
|
||||||
return {};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
#include <string_view>
|
#include <string_view>
|
||||||
|
#include <unordered_map>
|
||||||
#include <Access/SettingsConstraints.h>
|
#include <Access/SettingsConstraints.h>
|
||||||
#include <Access/resolveSetting.h>
|
#include <Access/resolveSetting.h>
|
||||||
#include <Access/AccessControl.h>
|
#include <Access/AccessControl.h>
|
||||||
@ -6,6 +7,7 @@
|
|||||||
#include <Storages/MergeTree/MergeTreeSettings.h>
|
#include <Storages/MergeTree/MergeTreeSettings.h>
|
||||||
#include <Common/FieldVisitorToString.h>
|
#include <Common/FieldVisitorToString.h>
|
||||||
#include <Common/FieldVisitorsAccurateComparison.h>
|
#include <Common/FieldVisitorsAccurateComparison.h>
|
||||||
|
#include <Common/SettingSource.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include <Poco/Util/AbstractConfiguration.h>
|
#include <Poco/Util/AbstractConfiguration.h>
|
||||||
#include <boost/range/algorithm_ext/erase.hpp>
|
#include <boost/range/algorithm_ext/erase.hpp>
|
||||||
@ -20,6 +22,39 @@ namespace ErrorCodes
|
|||||||
extern const int UNKNOWN_SETTING;
|
extern const int UNKNOWN_SETTING;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
struct SettingSourceRestrictions
|
||||||
|
{
|
||||||
|
constexpr SettingSourceRestrictions() { allowed_sources.set(); }
|
||||||
|
|
||||||
|
constexpr SettingSourceRestrictions(std::initializer_list<SettingSource> allowed_sources_)
|
||||||
|
{
|
||||||
|
for (auto allowed_source : allowed_sources_)
|
||||||
|
setSourceAllowed(allowed_source, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr bool isSourceAllowed(SettingSource source) { return allowed_sources[source]; }
|
||||||
|
constexpr void setSourceAllowed(SettingSource source, bool allowed) { allowed_sources[source] = allowed; }
|
||||||
|
|
||||||
|
std::bitset<SettingSource::COUNT> allowed_sources;
|
||||||
|
};
|
||||||
|
|
||||||
|
const std::unordered_map<std::string_view, SettingSourceRestrictions> SETTINGS_SOURCE_RESTRICTIONS = {
|
||||||
|
{"max_sessions_for_user", {SettingSource::PROFILE}},
|
||||||
|
};
|
||||||
|
|
||||||
|
SettingSourceRestrictions getSettingSourceRestrictions(std::string_view name)
|
||||||
|
{
|
||||||
|
auto settingConstraintIter = SETTINGS_SOURCE_RESTRICTIONS.find(name);
|
||||||
|
if (settingConstraintIter != SETTINGS_SOURCE_RESTRICTIONS.end())
|
||||||
|
return settingConstraintIter->second;
|
||||||
|
else
|
||||||
|
return SettingSourceRestrictions(); // allows everything
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
SettingsConstraints::SettingsConstraints(const AccessControl & access_control_) : access_control(&access_control_)
|
SettingsConstraints::SettingsConstraints(const AccessControl & access_control_) : access_control(&access_control_)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -98,7 +133,7 @@ void SettingsConstraints::merge(const SettingsConstraints & other)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void SettingsConstraints::check(const Settings & current_settings, const SettingsProfileElements & profile_elements) const
|
void SettingsConstraints::check(const Settings & current_settings, const SettingsProfileElements & profile_elements, SettingSource source) const
|
||||||
{
|
{
|
||||||
for (const auto & element : profile_elements)
|
for (const auto & element : profile_elements)
|
||||||
{
|
{
|
||||||
@ -108,19 +143,19 @@ void SettingsConstraints::check(const Settings & current_settings, const Setting
|
|||||||
if (element.value)
|
if (element.value)
|
||||||
{
|
{
|
||||||
SettingChange value(element.setting_name, *element.value);
|
SettingChange value(element.setting_name, *element.value);
|
||||||
check(current_settings, value);
|
check(current_settings, value, source);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (element.min_value)
|
if (element.min_value)
|
||||||
{
|
{
|
||||||
SettingChange value(element.setting_name, *element.min_value);
|
SettingChange value(element.setting_name, *element.min_value);
|
||||||
check(current_settings, value);
|
check(current_settings, value, source);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (element.max_value)
|
if (element.max_value)
|
||||||
{
|
{
|
||||||
SettingChange value(element.setting_name, *element.max_value);
|
SettingChange value(element.setting_name, *element.max_value);
|
||||||
check(current_settings, value);
|
check(current_settings, value, source);
|
||||||
}
|
}
|
||||||
|
|
||||||
SettingConstraintWritability new_value = SettingConstraintWritability::WRITABLE;
|
SettingConstraintWritability new_value = SettingConstraintWritability::WRITABLE;
|
||||||
@ -142,24 +177,24 @@ void SettingsConstraints::check(const Settings & current_settings, const Setting
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SettingsConstraints::check(const Settings & current_settings, const SettingChange & change) const
|
void SettingsConstraints::check(const Settings & current_settings, const SettingChange & change, SettingSource source) const
|
||||||
{
|
{
|
||||||
checkImpl(current_settings, const_cast<SettingChange &>(change), THROW_ON_VIOLATION);
|
checkImpl(current_settings, const_cast<SettingChange &>(change), THROW_ON_VIOLATION, source);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SettingsConstraints::check(const Settings & current_settings, const SettingsChanges & changes) const
|
void SettingsConstraints::check(const Settings & current_settings, const SettingsChanges & changes, SettingSource source) const
|
||||||
{
|
{
|
||||||
for (const auto & change : changes)
|
for (const auto & change : changes)
|
||||||
check(current_settings, change);
|
check(current_settings, change, source);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SettingsConstraints::check(const Settings & current_settings, SettingsChanges & changes) const
|
void SettingsConstraints::check(const Settings & current_settings, SettingsChanges & changes, SettingSource source) const
|
||||||
{
|
{
|
||||||
boost::range::remove_erase_if(
|
boost::range::remove_erase_if(
|
||||||
changes,
|
changes,
|
||||||
[&](SettingChange & change) -> bool
|
[&](SettingChange & change) -> bool
|
||||||
{
|
{
|
||||||
return !checkImpl(current_settings, const_cast<SettingChange &>(change), THROW_ON_VIOLATION);
|
return !checkImpl(current_settings, const_cast<SettingChange &>(change), THROW_ON_VIOLATION, source);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -174,13 +209,13 @@ void SettingsConstraints::check(const MergeTreeSettings & current_settings, cons
|
|||||||
check(current_settings, change);
|
check(current_settings, change);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SettingsConstraints::clamp(const Settings & current_settings, SettingsChanges & changes) const
|
void SettingsConstraints::clamp(const Settings & current_settings, SettingsChanges & changes, SettingSource source) const
|
||||||
{
|
{
|
||||||
boost::range::remove_erase_if(
|
boost::range::remove_erase_if(
|
||||||
changes,
|
changes,
|
||||||
[&](SettingChange & change) -> bool
|
[&](SettingChange & change) -> bool
|
||||||
{
|
{
|
||||||
return !checkImpl(current_settings, change, CLAMP_ON_VIOLATION);
|
return !checkImpl(current_settings, change, CLAMP_ON_VIOLATION, source);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -215,7 +250,10 @@ bool getNewValueToCheck(const T & current_settings, SettingChange & change, Fiel
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool SettingsConstraints::checkImpl(const Settings & current_settings, SettingChange & change, ReactionOnViolation reaction) const
|
bool SettingsConstraints::checkImpl(const Settings & current_settings,
|
||||||
|
SettingChange & change,
|
||||||
|
ReactionOnViolation reaction,
|
||||||
|
SettingSource source) const
|
||||||
{
|
{
|
||||||
std::string_view setting_name = Settings::Traits::resolveName(change.name);
|
std::string_view setting_name = Settings::Traits::resolveName(change.name);
|
||||||
|
|
||||||
@ -247,7 +285,7 @@ bool SettingsConstraints::checkImpl(const Settings & current_settings, SettingCh
|
|||||||
if (!getNewValueToCheck(current_settings, change, new_value, reaction == THROW_ON_VIOLATION))
|
if (!getNewValueToCheck(current_settings, change, new_value, reaction == THROW_ON_VIOLATION))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return getChecker(current_settings, setting_name).check(change, new_value, reaction);
|
return getChecker(current_settings, setting_name).check(change, new_value, reaction, source);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool SettingsConstraints::checkImpl(const MergeTreeSettings & current_settings, SettingChange & change, ReactionOnViolation reaction) const
|
bool SettingsConstraints::checkImpl(const MergeTreeSettings & current_settings, SettingChange & change, ReactionOnViolation reaction) const
|
||||||
@ -255,10 +293,13 @@ bool SettingsConstraints::checkImpl(const MergeTreeSettings & current_settings,
|
|||||||
Field new_value;
|
Field new_value;
|
||||||
if (!getNewValueToCheck(current_settings, change, new_value, reaction == THROW_ON_VIOLATION))
|
if (!getNewValueToCheck(current_settings, change, new_value, reaction == THROW_ON_VIOLATION))
|
||||||
return false;
|
return false;
|
||||||
return getMergeTreeChecker(change.name).check(change, new_value, reaction);
|
return getMergeTreeChecker(change.name).check(change, new_value, reaction, SettingSource::QUERY);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool SettingsConstraints::Checker::check(SettingChange & change, const Field & new_value, ReactionOnViolation reaction) const
|
bool SettingsConstraints::Checker::check(SettingChange & change,
|
||||||
|
const Field & new_value,
|
||||||
|
ReactionOnViolation reaction,
|
||||||
|
SettingSource source) const
|
||||||
{
|
{
|
||||||
if (!explain.empty())
|
if (!explain.empty())
|
||||||
{
|
{
|
||||||
@ -326,6 +367,14 @@ bool SettingsConstraints::Checker::check(SettingChange & change, const Field & n
|
|||||||
change.value = max_value;
|
change.value = max_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!getSettingSourceRestrictions(setting_name).isSourceAllowed(source))
|
||||||
|
{
|
||||||
|
if (reaction == THROW_ON_VIOLATION)
|
||||||
|
throw Exception(ErrorCodes::READONLY, "Setting {} is not allowed to be set by {}", setting_name, toString(source));
|
||||||
|
else
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
#include <Access/SettingsProfileElement.h>
|
#include <Access/SettingsProfileElement.h>
|
||||||
#include <Common/SettingsChanges.h>
|
#include <Common/SettingsChanges.h>
|
||||||
|
#include <Common/SettingSource.h>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
|
|
||||||
namespace Poco::Util
|
namespace Poco::Util
|
||||||
@ -73,17 +74,18 @@ public:
|
|||||||
void merge(const SettingsConstraints & other);
|
void merge(const SettingsConstraints & other);
|
||||||
|
|
||||||
/// Checks whether `change` violates these constraints and throws an exception if so.
|
/// Checks whether `change` violates these constraints and throws an exception if so.
|
||||||
void check(const Settings & current_settings, const SettingsProfileElements & profile_elements) const;
|
void check(const Settings & current_settings, const SettingsProfileElements & profile_elements, SettingSource source) const;
|
||||||
void check(const Settings & current_settings, const SettingChange & change) const;
|
void check(const Settings & current_settings, const SettingChange & change, SettingSource source) const;
|
||||||
void check(const Settings & current_settings, const SettingsChanges & changes) const;
|
void check(const Settings & current_settings, const SettingsChanges & changes, SettingSource source) const;
|
||||||
void check(const Settings & current_settings, SettingsChanges & changes) const;
|
void check(const Settings & current_settings, SettingsChanges & changes, SettingSource source) const;
|
||||||
|
|
||||||
/// Checks whether `change` violates these constraints and throws an exception if so. (setting short name is expected inside `changes`)
|
/// Checks whether `change` violates these constraints and throws an exception if so. (setting short name is expected inside `changes`)
|
||||||
void check(const MergeTreeSettings & current_settings, const SettingChange & change) const;
|
void check(const MergeTreeSettings & current_settings, const SettingChange & change) const;
|
||||||
void check(const MergeTreeSettings & current_settings, const SettingsChanges & changes) const;
|
void check(const MergeTreeSettings & current_settings, const SettingsChanges & changes) const;
|
||||||
|
|
||||||
/// Checks whether `change` violates these and clamps the `change` if so.
|
/// Checks whether `change` violates these and clamps the `change` if so.
|
||||||
void clamp(const Settings & current_settings, SettingsChanges & changes) const;
|
void clamp(const Settings & current_settings, SettingsChanges & changes, SettingSource source) const;
|
||||||
|
|
||||||
|
|
||||||
friend bool operator ==(const SettingsConstraints & left, const SettingsConstraints & right);
|
friend bool operator ==(const SettingsConstraints & left, const SettingsConstraints & right);
|
||||||
friend bool operator !=(const SettingsConstraints & left, const SettingsConstraints & right) { return !(left == right); }
|
friend bool operator !=(const SettingsConstraints & left, const SettingsConstraints & right) { return !(left == right); }
|
||||||
@ -133,7 +135,10 @@ private:
|
|||||||
{}
|
{}
|
||||||
|
|
||||||
// Perform checking
|
// Perform checking
|
||||||
bool check(SettingChange & change, const Field & new_value, ReactionOnViolation reaction) const;
|
bool check(SettingChange & change,
|
||||||
|
const Field & new_value,
|
||||||
|
ReactionOnViolation reaction,
|
||||||
|
SettingSource source) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct StringHash
|
struct StringHash
|
||||||
@ -145,7 +150,11 @@ private:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
bool checkImpl(const Settings & current_settings, SettingChange & change, ReactionOnViolation reaction) const;
|
bool checkImpl(const Settings & current_settings,
|
||||||
|
SettingChange & change,
|
||||||
|
ReactionOnViolation reaction,
|
||||||
|
SettingSource source) const;
|
||||||
|
|
||||||
bool checkImpl(const MergeTreeSettings & current_settings, SettingChange & change, ReactionOnViolation reaction) const;
|
bool checkImpl(const MergeTreeSettings & current_settings, SettingChange & change, ReactionOnViolation reaction) const;
|
||||||
|
|
||||||
Checker getChecker(const Settings & current_settings, std::string_view setting_name) const;
|
Checker getChecker(const Settings & current_settings, std::string_view setting_name) const;
|
||||||
|
@ -0,0 +1,221 @@
|
|||||||
|
#include <Analyzer/Passes/OptimizeDateOrDateTimeConverterWithPreimagePass.h>
|
||||||
|
|
||||||
|
#include <Functions/FunctionFactory.h>
|
||||||
|
|
||||||
|
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||||
|
#include <Analyzer/ColumnNode.h>
|
||||||
|
#include <Analyzer/ConstantNode.h>
|
||||||
|
#include <Analyzer/FunctionNode.h>
|
||||||
|
#include <Common/DateLUT.h>
|
||||||
|
#include <Common/DateLUTImpl.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int LOGICAL_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
|
||||||
|
class OptimizeDateOrDateTimeConverterWithPreimageVisitor : public InDepthQueryTreeVisitorWithContext<OptimizeDateOrDateTimeConverterWithPreimageVisitor>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
using Base = InDepthQueryTreeVisitorWithContext<OptimizeDateOrDateTimeConverterWithPreimageVisitor>;
|
||||||
|
|
||||||
|
explicit OptimizeDateOrDateTimeConverterWithPreimageVisitor(ContextPtr context)
|
||||||
|
: Base(std::move(context))
|
||||||
|
{}
|
||||||
|
|
||||||
|
static bool needChildVisit(QueryTreeNodePtr & node, QueryTreeNodePtr & /*child*/)
|
||||||
|
{
|
||||||
|
const static std::unordered_set<String> relations = {
|
||||||
|
"equals",
|
||||||
|
"notEquals",
|
||||||
|
"less",
|
||||||
|
"greater",
|
||||||
|
"lessOrEquals",
|
||||||
|
"greaterOrEquals",
|
||||||
|
};
|
||||||
|
|
||||||
|
if (const auto * function = node->as<FunctionNode>())
|
||||||
|
{
|
||||||
|
return !relations.contains(function->getFunctionName());
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void enterImpl(QueryTreeNodePtr & node) const
|
||||||
|
{
|
||||||
|
const static std::unordered_map<String, String> swap_relations = {
|
||||||
|
{"equals", "equals"},
|
||||||
|
{"notEquals", "notEquals"},
|
||||||
|
{"less", "greater"},
|
||||||
|
{"greater", "less"},
|
||||||
|
{"lessOrEquals", "greaterOrEquals"},
|
||||||
|
{"greaterOrEquals", "lessOrEquals"},
|
||||||
|
};
|
||||||
|
|
||||||
|
const auto * function = node->as<FunctionNode>();
|
||||||
|
|
||||||
|
if (!function || !swap_relations.contains(function->getFunctionName())) return;
|
||||||
|
|
||||||
|
if (function->getArguments().getNodes().size() != 2) return;
|
||||||
|
|
||||||
|
size_t func_id = function->getArguments().getNodes().size();
|
||||||
|
|
||||||
|
for (size_t i = 0; i < function->getArguments().getNodes().size(); i++)
|
||||||
|
{
|
||||||
|
if (const auto * func = function->getArguments().getNodes()[i]->as<FunctionNode>())
|
||||||
|
{
|
||||||
|
func_id = i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (func_id == function->getArguments().getNodes().size()) return;
|
||||||
|
|
||||||
|
size_t literal_id = 1 - func_id;
|
||||||
|
const auto * literal = function->getArguments().getNodes()[literal_id]->as<ConstantNode>();
|
||||||
|
|
||||||
|
if (!literal || literal->getValue().getType() != Field::Types::UInt64) return;
|
||||||
|
|
||||||
|
String comparator = literal_id > func_id ? function->getFunctionName(): swap_relations.at(function->getFunctionName());
|
||||||
|
|
||||||
|
const auto * func_node = function->getArguments().getNodes()[func_id]->as<FunctionNode>();
|
||||||
|
/// Currently we only handle single-argument functions.
|
||||||
|
if (!func_node || func_node->getArguments().getNodes().size() != 1) return;
|
||||||
|
|
||||||
|
const auto * column_id = func_node->getArguments().getNodes()[0]->as<ColumnNode>();
|
||||||
|
if (!column_id) return;
|
||||||
|
|
||||||
|
const auto * column_type = column_id->getColumnType().get();
|
||||||
|
if (!isDateOrDate32(column_type) && !isDateTime(column_type) && !isDateTime64(column_type)) return;
|
||||||
|
|
||||||
|
const auto & converter = FunctionFactory::instance().tryGet(func_node->getFunctionName(), getContext());
|
||||||
|
if (!converter) return;
|
||||||
|
|
||||||
|
ColumnsWithTypeAndName args;
|
||||||
|
args.emplace_back(column_id->getColumnType(), "tmp");
|
||||||
|
auto converter_base = converter->build(args);
|
||||||
|
if (!converter_base || !converter_base->hasInformationAboutPreimage()) return;
|
||||||
|
|
||||||
|
auto preimage_range = converter_base->getPreimage(*(column_id->getColumnType()), literal->getValue());
|
||||||
|
if (!preimage_range) return;
|
||||||
|
|
||||||
|
const auto new_node = generateOptimizedDateFilter(comparator, *column_id, *preimage_range);
|
||||||
|
|
||||||
|
if (!new_node) return;
|
||||||
|
|
||||||
|
node = new_node;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
QueryTreeNodePtr generateOptimizedDateFilter(const String & comparator, const ColumnNode & column_node, const std::pair<Field, Field>& range) const
|
||||||
|
{
|
||||||
|
const DateLUTImpl & date_lut = DateLUT::instance("UTC");
|
||||||
|
|
||||||
|
String start_date_or_date_time;
|
||||||
|
String end_date_or_date_time;
|
||||||
|
|
||||||
|
if (isDateOrDate32(column_node.getColumnType().get()))
|
||||||
|
{
|
||||||
|
start_date_or_date_time = date_lut.dateToString(range.first.get<DateLUTImpl::Time>());
|
||||||
|
end_date_or_date_time = date_lut.dateToString(range.second.get<DateLUTImpl::Time>());
|
||||||
|
}
|
||||||
|
else if (isDateTime(column_node.getColumnType().get()) || isDateTime64(column_node.getColumnType().get()))
|
||||||
|
{
|
||||||
|
start_date_or_date_time = date_lut.timeToString(range.first.get<DateLUTImpl::Time>());
|
||||||
|
end_date_or_date_time = date_lut.timeToString(range.second.get<DateLUTImpl::Time>());
|
||||||
|
}
|
||||||
|
else [[unlikely]] return {};
|
||||||
|
|
||||||
|
if (comparator == "equals")
|
||||||
|
{
|
||||||
|
const auto lhs = std::make_shared<FunctionNode>("greaterOrEquals");
|
||||||
|
lhs->getArguments().getNodes().push_back(std::make_shared<ColumnNode>(column_node.getColumn(), column_node.getColumnSource()));
|
||||||
|
lhs->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(start_date_or_date_time));
|
||||||
|
resolveOrdinaryFunctionNode(*lhs, lhs->getFunctionName());
|
||||||
|
|
||||||
|
const auto rhs = std::make_shared<FunctionNode>("less");
|
||||||
|
rhs->getArguments().getNodes().push_back(std::make_shared<ColumnNode>(column_node.getColumn(), column_node.getColumnSource()));
|
||||||
|
rhs->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(end_date_or_date_time));
|
||||||
|
resolveOrdinaryFunctionNode(*rhs, rhs->getFunctionName());
|
||||||
|
|
||||||
|
const auto new_date_filter = std::make_shared<FunctionNode>("and");
|
||||||
|
new_date_filter->getArguments().getNodes() = {lhs, rhs};
|
||||||
|
resolveOrdinaryFunctionNode(*new_date_filter, new_date_filter->getFunctionName());
|
||||||
|
|
||||||
|
return new_date_filter;
|
||||||
|
}
|
||||||
|
else if (comparator == "notEquals")
|
||||||
|
{
|
||||||
|
const auto lhs = std::make_shared<FunctionNode>("less");
|
||||||
|
lhs->getArguments().getNodes().push_back(std::make_shared<ColumnNode>(column_node.getColumn(), column_node.getColumnSource()));
|
||||||
|
lhs->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(start_date_or_date_time));
|
||||||
|
resolveOrdinaryFunctionNode(*lhs, lhs->getFunctionName());
|
||||||
|
|
||||||
|
const auto rhs = std::make_shared<FunctionNode>("greaterOrEquals");
|
||||||
|
rhs->getArguments().getNodes().push_back(std::make_shared<ColumnNode>(column_node.getColumn(), column_node.getColumnSource()));
|
||||||
|
rhs->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(end_date_or_date_time));
|
||||||
|
resolveOrdinaryFunctionNode(*rhs, rhs->getFunctionName());
|
||||||
|
|
||||||
|
const auto new_date_filter = std::make_shared<FunctionNode>("or");
|
||||||
|
new_date_filter->getArguments().getNodes() = {lhs, rhs};
|
||||||
|
resolveOrdinaryFunctionNode(*new_date_filter, new_date_filter->getFunctionName());
|
||||||
|
|
||||||
|
return new_date_filter;
|
||||||
|
}
|
||||||
|
else if (comparator == "greater")
|
||||||
|
{
|
||||||
|
const auto new_date_filter = std::make_shared<FunctionNode>("greaterOrEquals");
|
||||||
|
new_date_filter->getArguments().getNodes().push_back(std::make_shared<ColumnNode>(column_node.getColumn(), column_node.getColumnSource()));
|
||||||
|
new_date_filter->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(end_date_or_date_time));
|
||||||
|
resolveOrdinaryFunctionNode(*new_date_filter, new_date_filter->getFunctionName());
|
||||||
|
|
||||||
|
return new_date_filter;
|
||||||
|
}
|
||||||
|
else if (comparator == "lessOrEquals")
|
||||||
|
{
|
||||||
|
const auto new_date_filter = std::make_shared<FunctionNode>("less");
|
||||||
|
new_date_filter->getArguments().getNodes().push_back(std::make_shared<ColumnNode>(column_node.getColumn(), column_node.getColumnSource()));
|
||||||
|
new_date_filter->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(end_date_or_date_time));
|
||||||
|
resolveOrdinaryFunctionNode(*new_date_filter, new_date_filter->getFunctionName());
|
||||||
|
|
||||||
|
return new_date_filter;
|
||||||
|
}
|
||||||
|
else if (comparator == "less" || comparator == "greaterOrEquals")
|
||||||
|
{
|
||||||
|
const auto new_date_filter = std::make_shared<FunctionNode>(comparator);
|
||||||
|
new_date_filter->getArguments().getNodes().push_back(std::make_shared<ColumnNode>(column_node.getColumn(), column_node.getColumnSource()));
|
||||||
|
new_date_filter->getArguments().getNodes().push_back(std::make_shared<ConstantNode>(start_date_or_date_time));
|
||||||
|
resolveOrdinaryFunctionNode(*new_date_filter, new_date_filter->getFunctionName());
|
||||||
|
|
||||||
|
return new_date_filter;
|
||||||
|
}
|
||||||
|
else [[unlikely]]
|
||||||
|
{
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"Expected equals, notEquals, less, lessOrEquals, greater, greaterOrEquals. Actual {}",
|
||||||
|
comparator);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void resolveOrdinaryFunctionNode(FunctionNode & function_node, const String & function_name) const
|
||||||
|
{
|
||||||
|
auto function = FunctionFactory::instance().get(function_name, getContext());
|
||||||
|
function_node.resolveAsFunction(function->build(function_node.getArgumentColumns()));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void OptimizeDateOrDateTimeConverterWithPreimagePass::run(QueryTreeNodePtr query_tree_node, ContextPtr context)
|
||||||
|
{
|
||||||
|
OptimizeDateOrDateTimeConverterWithPreimageVisitor visitor(std::move(context));
|
||||||
|
visitor.visit(query_tree_node);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -0,0 +1,24 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Analyzer/IQueryTreePass.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
/** Replace predicate having Date/DateTime converters with their preimages to improve performance.
|
||||||
|
* Given a Date column c, toYear(c) = 2023 -> c >= '2023-01-01' AND c < '2024-01-01'
|
||||||
|
* Or if c is a DateTime column, toYear(c) = 2023 -> c >= '2023-01-01 00:00:00' AND c < '2024-01-01 00:00:00'.
|
||||||
|
* The similar optimization also applies to other converters.
|
||||||
|
*/
|
||||||
|
class OptimizeDateOrDateTimeConverterWithPreimagePass final : public IQueryTreePass
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
String getName() override { return "OptimizeDateOrDateTimeConverterWithPreimagePass"; }
|
||||||
|
|
||||||
|
String getDescription() override { return "Replace predicate having Date/DateTime converters with their preimages"; }
|
||||||
|
|
||||||
|
void run(QueryTreeNodePtr query_tree_node, ContextPtr context) override;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -6494,55 +6494,69 @@ void QueryAnalyzer::resolveArrayJoin(QueryTreeNodePtr & array_join_node, Identif
|
|||||||
|
|
||||||
resolveExpressionNode(array_join_expression, scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
|
resolveExpressionNode(array_join_expression, scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
|
||||||
|
|
||||||
auto result_type = array_join_expression->getResultType();
|
auto process_array_join_expression = [&](QueryTreeNodePtr & expression)
|
||||||
bool is_array_type = isArray(result_type);
|
|
||||||
bool is_map_type = isMap(result_type);
|
|
||||||
|
|
||||||
if (!is_array_type && !is_map_type)
|
|
||||||
throw Exception(ErrorCodes::TYPE_MISMATCH,
|
|
||||||
"ARRAY JOIN {} requires expression {} with Array or Map type. Actual {}. In scope {}",
|
|
||||||
array_join_node_typed.formatASTForErrorMessage(),
|
|
||||||
array_join_expression->formatASTForErrorMessage(),
|
|
||||||
result_type->getName(),
|
|
||||||
scope.scope_node->formatASTForErrorMessage());
|
|
||||||
|
|
||||||
if (is_map_type)
|
|
||||||
result_type = assert_cast<const DataTypeMap &>(*result_type).getNestedType();
|
|
||||||
|
|
||||||
result_type = assert_cast<const DataTypeArray &>(*result_type).getNestedType();
|
|
||||||
|
|
||||||
String array_join_column_name;
|
|
||||||
|
|
||||||
if (!array_join_expression_alias.empty())
|
|
||||||
{
|
{
|
||||||
array_join_column_name = array_join_expression_alias;
|
auto result_type = expression->getResultType();
|
||||||
}
|
bool is_array_type = isArray(result_type);
|
||||||
else if (auto * array_join_expression_inner_column = array_join_expression->as<ColumnNode>())
|
bool is_map_type = isMap(result_type);
|
||||||
|
|
||||||
|
if (!is_array_type && !is_map_type)
|
||||||
|
throw Exception(ErrorCodes::TYPE_MISMATCH,
|
||||||
|
"ARRAY JOIN {} requires expression {} with Array or Map type. Actual {}. In scope {}",
|
||||||
|
array_join_node_typed.formatASTForErrorMessage(),
|
||||||
|
expression->formatASTForErrorMessage(),
|
||||||
|
result_type->getName(),
|
||||||
|
scope.scope_node->formatASTForErrorMessage());
|
||||||
|
|
||||||
|
if (is_map_type)
|
||||||
|
result_type = assert_cast<const DataTypeMap &>(*result_type).getNestedType();
|
||||||
|
|
||||||
|
result_type = assert_cast<const DataTypeArray &>(*result_type).getNestedType();
|
||||||
|
|
||||||
|
String array_join_column_name;
|
||||||
|
|
||||||
|
if (!array_join_expression_alias.empty())
|
||||||
|
{
|
||||||
|
array_join_column_name = array_join_expression_alias;
|
||||||
|
}
|
||||||
|
else if (auto * array_join_expression_inner_column = array_join_expression->as<ColumnNode>())
|
||||||
|
{
|
||||||
|
array_join_column_name = array_join_expression_inner_column->getColumnName();
|
||||||
|
}
|
||||||
|
else if (!identifier_full_name.empty())
|
||||||
|
{
|
||||||
|
array_join_column_name = identifier_full_name;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
array_join_column_name = "__array_join_expression_" + std::to_string(array_join_expressions_counter);
|
||||||
|
++array_join_expressions_counter;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (array_join_column_names.contains(array_join_column_name))
|
||||||
|
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||||
|
"ARRAY JOIN {} multiple columns with name {}. In scope {}",
|
||||||
|
array_join_node_typed.formatASTForErrorMessage(),
|
||||||
|
array_join_column_name,
|
||||||
|
scope.scope_node->formatASTForErrorMessage());
|
||||||
|
array_join_column_names.emplace(array_join_column_name);
|
||||||
|
|
||||||
|
NameAndTypePair array_join_column(array_join_column_name, result_type);
|
||||||
|
auto array_join_column_node = std::make_shared<ColumnNode>(std::move(array_join_column), expression, array_join_node);
|
||||||
|
array_join_column_node->setAlias(array_join_expression_alias);
|
||||||
|
array_join_column_expressions.push_back(std::move(array_join_column_node));
|
||||||
|
};
|
||||||
|
|
||||||
|
// Support ARRAY JOIN COLUMNS(...). COLUMNS transformer is resolved to list of columns.
|
||||||
|
if (auto * columns_list = array_join_expression->as<ListNode>())
|
||||||
{
|
{
|
||||||
array_join_column_name = array_join_expression_inner_column->getColumnName();
|
for (auto & array_join_subexpression : columns_list->getNodes())
|
||||||
}
|
process_array_join_expression(array_join_subexpression);
|
||||||
else if (!identifier_full_name.empty())
|
|
||||||
{
|
|
||||||
array_join_column_name = identifier_full_name;
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
array_join_column_name = "__array_join_expression_" + std::to_string(array_join_expressions_counter);
|
process_array_join_expression(array_join_expression);
|
||||||
++array_join_expressions_counter;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (array_join_column_names.contains(array_join_column_name))
|
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
|
||||||
"ARRAY JOIN {} multiple columns with name {}. In scope {}",
|
|
||||||
array_join_node_typed.formatASTForErrorMessage(),
|
|
||||||
array_join_column_name,
|
|
||||||
scope.scope_node->formatASTForErrorMessage());
|
|
||||||
array_join_column_names.emplace(array_join_column_name);
|
|
||||||
|
|
||||||
NameAndTypePair array_join_column(array_join_column_name, result_type);
|
|
||||||
auto array_join_column_node = std::make_shared<ColumnNode>(std::move(array_join_column), array_join_expression, array_join_node);
|
|
||||||
array_join_column_node->setAlias(array_join_expression_alias);
|
|
||||||
array_join_column_expressions.push_back(std::move(array_join_column_node));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Allow to resolve ARRAY JOIN columns from aliases with types after ARRAY JOIN only after ARRAY JOIN expression list is resolved, because
|
/** Allow to resolve ARRAY JOIN columns from aliases with types after ARRAY JOIN only after ARRAY JOIN expression list is resolved, because
|
||||||
@ -6554,11 +6568,9 @@ void QueryAnalyzer::resolveArrayJoin(QueryTreeNodePtr & array_join_node, Identif
|
|||||||
* And it is expected that `value_element` inside projection expression list will be resolved as `value_element` expression
|
* And it is expected that `value_element` inside projection expression list will be resolved as `value_element` expression
|
||||||
* with type after ARRAY JOIN.
|
* with type after ARRAY JOIN.
|
||||||
*/
|
*/
|
||||||
for (size_t i = 0; i < array_join_nodes_size; ++i)
|
array_join_nodes = std::move(array_join_column_expressions);
|
||||||
|
for (auto & array_join_column_expression : array_join_nodes)
|
||||||
{
|
{
|
||||||
auto & array_join_column_expression = array_join_nodes[i];
|
|
||||||
array_join_column_expression = std::move(array_join_column_expressions[i]);
|
|
||||||
|
|
||||||
auto it = scope.alias_name_to_expression_node.find(array_join_column_expression->getAlias());
|
auto it = scope.alias_name_to_expression_node.find(array_join_column_expression->getAlias());
|
||||||
if (it != scope.alias_name_to_expression_node.end())
|
if (it != scope.alias_name_to_expression_node.end())
|
||||||
{
|
{
|
||||||
|
@ -42,6 +42,7 @@
|
|||||||
#include <Analyzer/Passes/CrossToInnerJoinPass.h>
|
#include <Analyzer/Passes/CrossToInnerJoinPass.h>
|
||||||
#include <Analyzer/Passes/ShardNumColumnToFunctionPass.h>
|
#include <Analyzer/Passes/ShardNumColumnToFunctionPass.h>
|
||||||
#include <Analyzer/Passes/ConvertQueryToCNFPass.h>
|
#include <Analyzer/Passes/ConvertQueryToCNFPass.h>
|
||||||
|
#include <Analyzer/Passes/OptimizeDateOrDateTimeConverterWithPreimagePass.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -278,6 +279,7 @@ void addQueryTreePasses(QueryTreePassManager & manager)
|
|||||||
manager.addPass(std::make_unique<AutoFinalOnQueryPass>());
|
manager.addPass(std::make_unique<AutoFinalOnQueryPass>());
|
||||||
manager.addPass(std::make_unique<CrossToInnerJoinPass>());
|
manager.addPass(std::make_unique<CrossToInnerJoinPass>());
|
||||||
manager.addPass(std::make_unique<ShardNumColumnToFunctionPass>());
|
manager.addPass(std::make_unique<ShardNumColumnToFunctionPass>());
|
||||||
|
manager.addPass(std::make_unique<OptimizeDateOrDateTimeConverterWithPreimagePass>());
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -30,6 +30,7 @@ public:
|
|||||||
String compression_method;
|
String compression_method;
|
||||||
int compression_level = -1;
|
int compression_level = -1;
|
||||||
String password;
|
String password;
|
||||||
|
String s3_storage_class;
|
||||||
ContextPtr context;
|
ContextPtr context;
|
||||||
bool is_internal_backup = false;
|
bool is_internal_backup = false;
|
||||||
std::shared_ptr<IBackupCoordination> backup_coordination;
|
std::shared_ptr<IBackupCoordination> backup_coordination;
|
||||||
|
@ -88,7 +88,7 @@ namespace
|
|||||||
request.SetMaxKeys(1);
|
request.SetMaxKeys(1);
|
||||||
auto outcome = client.ListObjects(request);
|
auto outcome = client.ListObjects(request);
|
||||||
if (!outcome.IsSuccess())
|
if (!outcome.IsSuccess())
|
||||||
throw Exception::createDeprecated(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
throw S3Exception(outcome.GetError().GetMessage(), outcome.GetError().GetErrorType());
|
||||||
return outcome.GetResult().GetContents();
|
return outcome.GetResult().GetContents();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -178,7 +178,7 @@ void BackupReaderS3::copyFileToDisk(const String & path_in_backup, size_t file_s
|
|||||||
|
|
||||||
|
|
||||||
BackupWriterS3::BackupWriterS3(
|
BackupWriterS3::BackupWriterS3(
|
||||||
const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, bool allow_s3_native_copy, const ContextPtr & context_)
|
const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, bool allow_s3_native_copy, const String & storage_class_name, const ContextPtr & context_)
|
||||||
: BackupWriterDefault(&Poco::Logger::get("BackupWriterS3"), context_)
|
: BackupWriterDefault(&Poco::Logger::get("BackupWriterS3"), context_)
|
||||||
, s3_uri(s3_uri_)
|
, s3_uri(s3_uri_)
|
||||||
, client(makeS3Client(s3_uri_, access_key_id_, secret_access_key_, context_))
|
, client(makeS3Client(s3_uri_, access_key_id_, secret_access_key_, context_))
|
||||||
@ -188,6 +188,7 @@ BackupWriterS3::BackupWriterS3(
|
|||||||
request_settings.updateFromSettings(context_->getSettingsRef());
|
request_settings.updateFromSettings(context_->getSettingsRef());
|
||||||
request_settings.max_single_read_retries = context_->getSettingsRef().s3_max_single_read_retries; // FIXME: Avoid taking value for endpoint
|
request_settings.max_single_read_retries = context_->getSettingsRef().s3_max_single_read_retries; // FIXME: Avoid taking value for endpoint
|
||||||
request_settings.allow_native_copy = allow_s3_native_copy;
|
request_settings.allow_native_copy = allow_s3_native_copy;
|
||||||
|
request_settings.setStorageClassName(storage_class_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackupWriterS3::copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
|
void BackupWriterS3::copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
|
||||||
@ -271,7 +272,7 @@ void BackupWriterS3::removeFile(const String & file_name)
|
|||||||
request.SetKey(fs::path(s3_uri.key) / file_name);
|
request.SetKey(fs::path(s3_uri.key) / file_name);
|
||||||
auto outcome = client->DeleteObject(request);
|
auto outcome = client->DeleteObject(request);
|
||||||
if (!outcome.IsSuccess() && !isNotFoundError(outcome.GetError().GetErrorType()))
|
if (!outcome.IsSuccess() && !isNotFoundError(outcome.GetError().GetErrorType()))
|
||||||
throw Exception::createDeprecated(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
throw S3Exception(outcome.GetError().GetMessage(), outcome.GetError().GetErrorType());
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackupWriterS3::removeFiles(const Strings & file_names)
|
void BackupWriterS3::removeFiles(const Strings & file_names)
|
||||||
@ -329,7 +330,7 @@ void BackupWriterS3::removeFilesBatch(const Strings & file_names)
|
|||||||
|
|
||||||
auto outcome = client->DeleteObjects(request);
|
auto outcome = client->DeleteObjects(request);
|
||||||
if (!outcome.IsSuccess() && !isNotFoundError(outcome.GetError().GetErrorType()))
|
if (!outcome.IsSuccess() && !isNotFoundError(outcome.GetError().GetErrorType()))
|
||||||
throw Exception::createDeprecated(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
throw S3Exception(outcome.GetError().GetMessage(), outcome.GetError().GetErrorType());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@ private:
|
|||||||
class BackupWriterS3 : public BackupWriterDefault
|
class BackupWriterS3 : public BackupWriterDefault
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
BackupWriterS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, bool allow_s3_native_copy, const ContextPtr & context_);
|
BackupWriterS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, bool allow_s3_native_copy, const String & storage_class_name, const ContextPtr & context_);
|
||||||
~BackupWriterS3() override;
|
~BackupWriterS3() override;
|
||||||
|
|
||||||
bool fileExists(const String & file_name) override;
|
bool fileExists(const String & file_name) override;
|
||||||
|
@ -21,6 +21,7 @@ namespace ErrorCodes
|
|||||||
M(String, id) \
|
M(String, id) \
|
||||||
M(String, compression_method) \
|
M(String, compression_method) \
|
||||||
M(String, password) \
|
M(String, password) \
|
||||||
|
M(String, s3_storage_class) \
|
||||||
M(Bool, structure_only) \
|
M(Bool, structure_only) \
|
||||||
M(Bool, async) \
|
M(Bool, async) \
|
||||||
M(Bool, decrypt_files_from_encrypted_disks) \
|
M(Bool, decrypt_files_from_encrypted_disks) \
|
||||||
|
@ -25,6 +25,9 @@ struct BackupSettings
|
|||||||
/// Password used to encrypt the backup.
|
/// Password used to encrypt the backup.
|
||||||
String password;
|
String password;
|
||||||
|
|
||||||
|
/// S3 storage class.
|
||||||
|
String s3_storage_class = "";
|
||||||
|
|
||||||
/// If this is set to true then only create queries will be written to backup,
|
/// If this is set to true then only create queries will be written to backup,
|
||||||
/// without the data of tables.
|
/// without the data of tables.
|
||||||
bool structure_only = false;
|
bool structure_only = false;
|
||||||
|
@ -344,6 +344,7 @@ void BackupsWorker::doBackup(
|
|||||||
backup_create_params.compression_method = backup_settings.compression_method;
|
backup_create_params.compression_method = backup_settings.compression_method;
|
||||||
backup_create_params.compression_level = backup_settings.compression_level;
|
backup_create_params.compression_level = backup_settings.compression_level;
|
||||||
backup_create_params.password = backup_settings.password;
|
backup_create_params.password = backup_settings.password;
|
||||||
|
backup_create_params.s3_storage_class = backup_settings.s3_storage_class;
|
||||||
backup_create_params.is_internal_backup = backup_settings.internal;
|
backup_create_params.is_internal_backup = backup_settings.internal;
|
||||||
backup_create_params.backup_coordination = backup_coordination;
|
backup_create_params.backup_coordination = backup_coordination;
|
||||||
backup_create_params.backup_uuid = backup_settings.backup_uuid;
|
backup_create_params.backup_uuid = backup_settings.backup_uuid;
|
||||||
|
@ -112,7 +112,7 @@ void registerBackupEngineS3(BackupFactory & factory)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
auto writer = std::make_shared<BackupWriterS3>(S3::URI{s3_uri}, access_key_id, secret_access_key, params.allow_s3_native_copy, params.context);
|
auto writer = std::make_shared<BackupWriterS3>(S3::URI{s3_uri}, access_key_id, secret_access_key, params.allow_s3_native_copy, params.s3_storage_class, params.context);
|
||||||
return std::make_unique<BackupImpl>(
|
return std::make_unique<BackupImpl>(
|
||||||
backup_name_for_logging,
|
backup_name_for_logging,
|
||||||
archive_params,
|
archive_params,
|
||||||
|
@ -124,6 +124,9 @@ void Suggest::load(ContextPtr context, const ConnectionParameters & connection_p
|
|||||||
if (e.code() == ErrorCodes::DEADLOCK_AVOIDED)
|
if (e.code() == ErrorCodes::DEADLOCK_AVOIDED)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
/// Client can successfully connect to the server and
|
||||||
|
/// get ErrorCodes::USER_SESSION_LIMIT_EXCEEDED for suggestion connection.
|
||||||
|
|
||||||
/// We should not use std::cerr here, because this method works concurrently with the main thread.
|
/// We should not use std::cerr here, because this method works concurrently with the main thread.
|
||||||
/// WriteBufferFromFileDescriptor will write directly to the file descriptor, avoiding data race on std::cerr.
|
/// WriteBufferFromFileDescriptor will write directly to the file descriptor, avoiding data race on std::cerr.
|
||||||
|
|
||||||
|
@ -564,15 +564,22 @@ void ColumnNullable::updatePermutationImpl(IColumn::PermutationSortDirection dir
|
|||||||
else
|
else
|
||||||
getNestedColumn().updatePermutation(direction, stability, limit, null_direction_hint, res, new_ranges);
|
getNestedColumn().updatePermutation(direction, stability, limit, null_direction_hint, res, new_ranges);
|
||||||
|
|
||||||
equal_ranges = std::move(new_ranges);
|
|
||||||
|
|
||||||
if (unlikely(stability == PermutationSortStability::Stable))
|
if (unlikely(stability == PermutationSortStability::Stable))
|
||||||
{
|
{
|
||||||
for (auto & null_range : null_ranges)
|
for (auto & null_range : null_ranges)
|
||||||
::sort(res.begin() + null_range.first, res.begin() + null_range.second);
|
::sort(res.begin() + null_range.first, res.begin() + null_range.second);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::move(null_ranges.begin(), null_ranges.end(), std::back_inserter(equal_ranges));
|
if (is_nulls_last || null_ranges.empty())
|
||||||
|
{
|
||||||
|
equal_ranges = std::move(new_ranges);
|
||||||
|
std::move(null_ranges.begin(), null_ranges.end(), std::back_inserter(equal_ranges));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
equal_ranges = std::move(null_ranges);
|
||||||
|
std::move(new_ranges.begin(), new_ranges.end(), std::back_inserter(equal_ranges));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ColumnNullable::getPermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,
|
void ColumnNullable::getPermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,
|
||||||
|
@ -439,7 +439,7 @@ void ColumnSparse::compareColumn(const IColumn & rhs, size_t rhs_row_num,
|
|||||||
PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results,
|
PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results,
|
||||||
int direction, int nan_direction_hint) const
|
int direction, int nan_direction_hint) const
|
||||||
{
|
{
|
||||||
if (row_indexes)
|
if (row_indexes || !typeid_cast<const ColumnSparse *>(&rhs))
|
||||||
{
|
{
|
||||||
/// TODO: implement without conversion to full column.
|
/// TODO: implement without conversion to full column.
|
||||||
auto this_full = convertToFullColumnIfSparse();
|
auto this_full = convertToFullColumnIfSparse();
|
||||||
|
@ -582,6 +582,7 @@
|
|||||||
M(697, CANNOT_RESTORE_TO_NONENCRYPTED_DISK) \
|
M(697, CANNOT_RESTORE_TO_NONENCRYPTED_DISK) \
|
||||||
M(698, INVALID_REDIS_STORAGE_TYPE) \
|
M(698, INVALID_REDIS_STORAGE_TYPE) \
|
||||||
M(699, INVALID_REDIS_TABLE_STRUCTURE) \
|
M(699, INVALID_REDIS_TABLE_STRUCTURE) \
|
||||||
|
M(700, USER_SESSION_LIMIT_EXCEEDED) \
|
||||||
\
|
\
|
||||||
M(999, KEEPER_EXCEPTION) \
|
M(999, KEEPER_EXCEPTION) \
|
||||||
M(1000, POCO_EXCEPTION) \
|
M(1000, POCO_EXCEPTION) \
|
||||||
|
@ -229,7 +229,7 @@ void MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceeded, MemoryT
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::bernoulli_distribution sample(sample_probability);
|
std::bernoulli_distribution sample(sample_probability);
|
||||||
if (unlikely(sample_probability > 0.0 && sample(thread_local_rng)))
|
if (unlikely(sample_probability > 0.0 && isSizeOkForSampling(size) && sample(thread_local_rng)))
|
||||||
{
|
{
|
||||||
MemoryTrackerBlockerInThread untrack_lock(VariableContext::Global);
|
MemoryTrackerBlockerInThread untrack_lock(VariableContext::Global);
|
||||||
DB::TraceSender::send(DB::TraceType::MemorySample, StackTrace(), {.size = size});
|
DB::TraceSender::send(DB::TraceType::MemorySample, StackTrace(), {.size = size});
|
||||||
@ -413,7 +413,7 @@ void MemoryTracker::free(Int64 size)
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::bernoulli_distribution sample(sample_probability);
|
std::bernoulli_distribution sample(sample_probability);
|
||||||
if (unlikely(sample_probability > 0.0 && sample(thread_local_rng)))
|
if (unlikely(sample_probability > 0.0 && isSizeOkForSampling(size) && sample(thread_local_rng)))
|
||||||
{
|
{
|
||||||
MemoryTrackerBlockerInThread untrack_lock(VariableContext::Global);
|
MemoryTrackerBlockerInThread untrack_lock(VariableContext::Global);
|
||||||
DB::TraceSender::send(DB::TraceType::MemorySample, StackTrace(), {.size = -size});
|
DB::TraceSender::send(DB::TraceType::MemorySample, StackTrace(), {.size = -size});
|
||||||
@ -534,6 +534,12 @@ void MemoryTracker::setOrRaiseProfilerLimit(Int64 value)
|
|||||||
;
|
;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool MemoryTracker::isSizeOkForSampling(UInt64 size) const
|
||||||
|
{
|
||||||
|
/// We can avoid comparison min_allocation_size_bytes with zero, because we cannot have 0 bytes allocation/deallocation
|
||||||
|
return ((max_allocation_size_bytes == 0 || size <= max_allocation_size_bytes) && size >= min_allocation_size_bytes);
|
||||||
|
}
|
||||||
|
|
||||||
bool canEnqueueBackgroundTask()
|
bool canEnqueueBackgroundTask()
|
||||||
{
|
{
|
||||||
auto limit = background_memory_tracker.getSoftLimit();
|
auto limit = background_memory_tracker.getSoftLimit();
|
||||||
|
@ -67,6 +67,12 @@ private:
|
|||||||
/// To randomly sample allocations and deallocations in trace_log.
|
/// To randomly sample allocations and deallocations in trace_log.
|
||||||
double sample_probability = 0;
|
double sample_probability = 0;
|
||||||
|
|
||||||
|
/// Randomly sample allocations only larger or equal to this size
|
||||||
|
UInt64 min_allocation_size_bytes = 0;
|
||||||
|
|
||||||
|
/// Randomly sample allocations only smaller or equal to this size
|
||||||
|
UInt64 max_allocation_size_bytes = 0;
|
||||||
|
|
||||||
/// Singly-linked list. All information will be passed to subsequent memory trackers also (it allows to implement trackers hierarchy).
|
/// Singly-linked list. All information will be passed to subsequent memory trackers also (it allows to implement trackers hierarchy).
|
||||||
/// In terms of tree nodes it is the list of parents. Lifetime of these trackers should "include" lifetime of current tracker.
|
/// In terms of tree nodes it is the list of parents. Lifetime of these trackers should "include" lifetime of current tracker.
|
||||||
std::atomic<MemoryTracker *> parent {};
|
std::atomic<MemoryTracker *> parent {};
|
||||||
@ -88,6 +94,8 @@ private:
|
|||||||
|
|
||||||
void setOrRaiseProfilerLimit(Int64 value);
|
void setOrRaiseProfilerLimit(Int64 value);
|
||||||
|
|
||||||
|
bool isSizeOkForSampling(UInt64 size) const;
|
||||||
|
|
||||||
/// allocImpl(...) and free(...) should not be used directly
|
/// allocImpl(...) and free(...) should not be used directly
|
||||||
friend struct CurrentMemoryTracker;
|
friend struct CurrentMemoryTracker;
|
||||||
void allocImpl(Int64 size, bool throw_if_memory_exceeded, MemoryTracker * query_tracker = nullptr);
|
void allocImpl(Int64 size, bool throw_if_memory_exceeded, MemoryTracker * query_tracker = nullptr);
|
||||||
@ -166,6 +174,16 @@ public:
|
|||||||
sample_probability = value;
|
sample_probability = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void setSampleMinAllocationSize(UInt64 value)
|
||||||
|
{
|
||||||
|
min_allocation_size_bytes = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
void setSampleMaxAllocationSize(UInt64 value)
|
||||||
|
{
|
||||||
|
max_allocation_size_bytes = value;
|
||||||
|
}
|
||||||
|
|
||||||
void setProfilerStep(Int64 value)
|
void setProfilerStep(Int64 value)
|
||||||
{
|
{
|
||||||
profiler_step = value;
|
profiler_step = value;
|
||||||
|
43
src/Common/SettingSource.h
Normal file
43
src/Common/SettingSource.h
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <string_view>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
enum SettingSource
|
||||||
|
{
|
||||||
|
/// Query or session change:
|
||||||
|
/// SET <setting> = <value>
|
||||||
|
/// SELECT ... SETTINGS [<setting> = <value]
|
||||||
|
QUERY,
|
||||||
|
|
||||||
|
/// Profile creation or altering:
|
||||||
|
/// CREATE SETTINGS PROFILE ... SETTINGS [<setting> = <value]
|
||||||
|
/// ALTER SETTINGS PROFILE ... SETTINGS [<setting> = <value]
|
||||||
|
PROFILE,
|
||||||
|
|
||||||
|
/// Role creation or altering:
|
||||||
|
/// CREATE ROLE ... SETTINGS [<setting> = <value>]
|
||||||
|
/// ALTER ROLE ... SETTINGS [<setting> = <value]
|
||||||
|
ROLE,
|
||||||
|
|
||||||
|
/// User creation or altering:
|
||||||
|
/// CREATE USER ... SETTINGS [<setting> = <value>]
|
||||||
|
/// ALTER USER ... SETTINGS [<setting> = <value]
|
||||||
|
USER,
|
||||||
|
|
||||||
|
COUNT,
|
||||||
|
};
|
||||||
|
|
||||||
|
constexpr std::string_view toString(SettingSource source)
|
||||||
|
{
|
||||||
|
switch (source)
|
||||||
|
{
|
||||||
|
case SettingSource::QUERY: return "query";
|
||||||
|
case SettingSource::PROFILE: return "profile";
|
||||||
|
case SettingSource::USER: return "user";
|
||||||
|
case SettingSource::ROLE: return "role";
|
||||||
|
default: return "unknown";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -136,6 +136,8 @@ using ResponseCallback = std::function<void(const Response &)>;
|
|||||||
struct Response
|
struct Response
|
||||||
{
|
{
|
||||||
Error error = Error::ZOK;
|
Error error = Error::ZOK;
|
||||||
|
int64_t zxid = 0;
|
||||||
|
|
||||||
Response() = default;
|
Response() = default;
|
||||||
Response(const Response &) = default;
|
Response(const Response &) = default;
|
||||||
Response & operator=(const Response &) = default;
|
Response & operator=(const Response &) = default;
|
||||||
@ -490,8 +492,6 @@ public:
|
|||||||
/// Useful to check owner of ephemeral node.
|
/// Useful to check owner of ephemeral node.
|
||||||
virtual int64_t getSessionID() const = 0;
|
virtual int64_t getSessionID() const = 0;
|
||||||
|
|
||||||
virtual Poco::Net::SocketAddress getConnectedAddress() const = 0;
|
|
||||||
|
|
||||||
/// If the method will throw an exception, callbacks won't be called.
|
/// If the method will throw an exception, callbacks won't be called.
|
||||||
///
|
///
|
||||||
/// After the method is executed successfully, you must wait for callbacks
|
/// After the method is executed successfully, you must wait for callbacks
|
||||||
@ -564,6 +564,10 @@ public:
|
|||||||
|
|
||||||
virtual const DB::KeeperFeatureFlags * getKeeperFeatureFlags() const { return nullptr; }
|
virtual const DB::KeeperFeatureFlags * getKeeperFeatureFlags() const { return nullptr; }
|
||||||
|
|
||||||
|
/// A ZooKeeper session can have an optional deadline set on it.
|
||||||
|
/// After it has been reached, the session needs to be finalized.
|
||||||
|
virtual bool hasReachedDeadline() const = 0;
|
||||||
|
|
||||||
/// Expire session and finish all pending requests
|
/// Expire session and finish all pending requests
|
||||||
virtual void finalize(const String & reason) = 0;
|
virtual void finalize(const String & reason) = 0;
|
||||||
};
|
};
|
||||||
|
@ -195,6 +195,7 @@ struct TestKeeperMultiRequest final : MultiRequest, TestKeeperRequest
|
|||||||
std::pair<ResponsePtr, Undo> TestKeeperCreateRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
std::pair<ResponsePtr, Undo> TestKeeperCreateRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
||||||
{
|
{
|
||||||
CreateResponse response;
|
CreateResponse response;
|
||||||
|
response.zxid = zxid;
|
||||||
Undo undo;
|
Undo undo;
|
||||||
|
|
||||||
if (container.contains(path))
|
if (container.contains(path))
|
||||||
@ -257,9 +258,10 @@ std::pair<ResponsePtr, Undo> TestKeeperCreateRequest::process(TestKeeper::Contai
|
|||||||
return { std::make_shared<CreateResponse>(response), undo };
|
return { std::make_shared<CreateResponse>(response), undo };
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<ResponsePtr, Undo> TestKeeperRemoveRequest::process(TestKeeper::Container & container, int64_t) const
|
std::pair<ResponsePtr, Undo> TestKeeperRemoveRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
||||||
{
|
{
|
||||||
RemoveResponse response;
|
RemoveResponse response;
|
||||||
|
response.zxid = zxid;
|
||||||
Undo undo;
|
Undo undo;
|
||||||
|
|
||||||
auto it = container.find(path);
|
auto it = container.find(path);
|
||||||
@ -296,9 +298,10 @@ std::pair<ResponsePtr, Undo> TestKeeperRemoveRequest::process(TestKeeper::Contai
|
|||||||
return { std::make_shared<RemoveResponse>(response), undo };
|
return { std::make_shared<RemoveResponse>(response), undo };
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<ResponsePtr, Undo> TestKeeperExistsRequest::process(TestKeeper::Container & container, int64_t) const
|
std::pair<ResponsePtr, Undo> TestKeeperExistsRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
||||||
{
|
{
|
||||||
ExistsResponse response;
|
ExistsResponse response;
|
||||||
|
response.zxid = zxid;
|
||||||
|
|
||||||
auto it = container.find(path);
|
auto it = container.find(path);
|
||||||
if (it != container.end())
|
if (it != container.end())
|
||||||
@ -314,9 +317,10 @@ std::pair<ResponsePtr, Undo> TestKeeperExistsRequest::process(TestKeeper::Contai
|
|||||||
return { std::make_shared<ExistsResponse>(response), {} };
|
return { std::make_shared<ExistsResponse>(response), {} };
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<ResponsePtr, Undo> TestKeeperGetRequest::process(TestKeeper::Container & container, int64_t) const
|
std::pair<ResponsePtr, Undo> TestKeeperGetRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
||||||
{
|
{
|
||||||
GetResponse response;
|
GetResponse response;
|
||||||
|
response.zxid = zxid;
|
||||||
|
|
||||||
auto it = container.find(path);
|
auto it = container.find(path);
|
||||||
if (it == container.end())
|
if (it == container.end())
|
||||||
@ -336,6 +340,7 @@ std::pair<ResponsePtr, Undo> TestKeeperGetRequest::process(TestKeeper::Container
|
|||||||
std::pair<ResponsePtr, Undo> TestKeeperSetRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
std::pair<ResponsePtr, Undo> TestKeeperSetRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
||||||
{
|
{
|
||||||
SetResponse response;
|
SetResponse response;
|
||||||
|
response.zxid = zxid;
|
||||||
Undo undo;
|
Undo undo;
|
||||||
|
|
||||||
auto it = container.find(path);
|
auto it = container.find(path);
|
||||||
@ -370,9 +375,10 @@ std::pair<ResponsePtr, Undo> TestKeeperSetRequest::process(TestKeeper::Container
|
|||||||
return { std::make_shared<SetResponse>(response), undo };
|
return { std::make_shared<SetResponse>(response), undo };
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<ResponsePtr, Undo> TestKeeperListRequest::process(TestKeeper::Container & container, int64_t) const
|
std::pair<ResponsePtr, Undo> TestKeeperListRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
||||||
{
|
{
|
||||||
ListResponse response;
|
ListResponse response;
|
||||||
|
response.zxid = zxid;
|
||||||
|
|
||||||
auto it = container.find(path);
|
auto it = container.find(path);
|
||||||
if (it == container.end())
|
if (it == container.end())
|
||||||
@ -414,9 +420,10 @@ std::pair<ResponsePtr, Undo> TestKeeperListRequest::process(TestKeeper::Containe
|
|||||||
return { std::make_shared<ListResponse>(response), {} };
|
return { std::make_shared<ListResponse>(response), {} };
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<ResponsePtr, Undo> TestKeeperCheckRequest::process(TestKeeper::Container & container, int64_t) const
|
std::pair<ResponsePtr, Undo> TestKeeperCheckRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
||||||
{
|
{
|
||||||
CheckResponse response;
|
CheckResponse response;
|
||||||
|
response.zxid = zxid;
|
||||||
auto it = container.find(path);
|
auto it = container.find(path);
|
||||||
if (it == container.end())
|
if (it == container.end())
|
||||||
{
|
{
|
||||||
@ -434,10 +441,11 @@ std::pair<ResponsePtr, Undo> TestKeeperCheckRequest::process(TestKeeper::Contain
|
|||||||
return { std::make_shared<CheckResponse>(response), {} };
|
return { std::make_shared<CheckResponse>(response), {} };
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<ResponsePtr, Undo> TestKeeperSyncRequest::process(TestKeeper::Container & /*container*/, int64_t) const
|
std::pair<ResponsePtr, Undo> TestKeeperSyncRequest::process(TestKeeper::Container & /*container*/, int64_t zxid) const
|
||||||
{
|
{
|
||||||
SyncResponse response;
|
SyncResponse response;
|
||||||
response.path = path;
|
response.path = path;
|
||||||
|
response.zxid = zxid;
|
||||||
|
|
||||||
return { std::make_shared<SyncResponse>(std::move(response)), {} };
|
return { std::make_shared<SyncResponse>(std::move(response)), {} };
|
||||||
}
|
}
|
||||||
@ -456,6 +464,7 @@ std::pair<ResponsePtr, Undo> TestKeeperReconfigRequest::process(TestKeeper::Cont
|
|||||||
std::pair<ResponsePtr, Undo> TestKeeperMultiRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
std::pair<ResponsePtr, Undo> TestKeeperMultiRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
||||||
{
|
{
|
||||||
MultiResponse response;
|
MultiResponse response;
|
||||||
|
response.zxid = zxid;
|
||||||
response.responses.reserve(requests.size());
|
response.responses.reserve(requests.size());
|
||||||
std::vector<Undo> undo_actions;
|
std::vector<Undo> undo_actions;
|
||||||
|
|
||||||
|
@ -39,8 +39,8 @@ public:
|
|||||||
~TestKeeper() override;
|
~TestKeeper() override;
|
||||||
|
|
||||||
bool isExpired() const override { return expired; }
|
bool isExpired() const override { return expired; }
|
||||||
|
bool hasReachedDeadline() const override { return false; }
|
||||||
int64_t getSessionID() const override { return 0; }
|
int64_t getSessionID() const override { return 0; }
|
||||||
Poco::Net::SocketAddress getConnectedAddress() const override { return connected_zk_address; }
|
|
||||||
|
|
||||||
|
|
||||||
void create(
|
void create(
|
||||||
@ -135,8 +135,6 @@ private:
|
|||||||
|
|
||||||
zkutil::ZooKeeperArgs args;
|
zkutil::ZooKeeperArgs args;
|
||||||
|
|
||||||
Poco::Net::SocketAddress connected_zk_address;
|
|
||||||
|
|
||||||
std::mutex push_request_mutex;
|
std::mutex push_request_mutex;
|
||||||
std::atomic<bool> expired{false};
|
std::atomic<bool> expired{false};
|
||||||
|
|
||||||
|
@ -112,31 +112,17 @@ void ZooKeeper::init(ZooKeeperArgs args_)
|
|||||||
throw KeeperException("Cannot use any of provided ZooKeeper nodes", Coordination::Error::ZCONNECTIONLOSS);
|
throw KeeperException("Cannot use any of provided ZooKeeper nodes", Coordination::Error::ZCONNECTIONLOSS);
|
||||||
}
|
}
|
||||||
|
|
||||||
impl = std::make_unique<Coordination::ZooKeeper>(nodes, args, zk_log);
|
impl = std::make_unique<Coordination::ZooKeeper>(nodes, args, zk_log, [this](size_t node_idx, const Coordination::ZooKeeper::Node & node)
|
||||||
|
{
|
||||||
|
connected_zk_host = node.address.host().toString();
|
||||||
|
connected_zk_port = node.address.port();
|
||||||
|
connected_zk_index = node_idx;
|
||||||
|
});
|
||||||
|
|
||||||
if (args.chroot.empty())
|
if (args.chroot.empty())
|
||||||
LOG_TRACE(log, "Initialized, hosts: {}", fmt::join(args.hosts, ","));
|
LOG_TRACE(log, "Initialized, hosts: {}", fmt::join(args.hosts, ","));
|
||||||
else
|
else
|
||||||
LOG_TRACE(log, "Initialized, hosts: {}, chroot: {}", fmt::join(args.hosts, ","), args.chroot);
|
LOG_TRACE(log, "Initialized, hosts: {}, chroot: {}", fmt::join(args.hosts, ","), args.chroot);
|
||||||
|
|
||||||
Poco::Net::SocketAddress address = impl->getConnectedAddress();
|
|
||||||
|
|
||||||
connected_zk_host = address.host().toString();
|
|
||||||
connected_zk_port = address.port();
|
|
||||||
|
|
||||||
connected_zk_index = 0;
|
|
||||||
|
|
||||||
if (args.hosts.size() > 1)
|
|
||||||
{
|
|
||||||
for (size_t i = 0; i < args.hosts.size(); i++)
|
|
||||||
{
|
|
||||||
if (args.hosts[i] == address.toString())
|
|
||||||
{
|
|
||||||
connected_zk_index = i;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
else if (args.implementation == "testkeeper")
|
else if (args.implementation == "testkeeper")
|
||||||
{
|
{
|
||||||
|
@ -521,6 +521,7 @@ public:
|
|||||||
void setZooKeeperLog(std::shared_ptr<DB::ZooKeeperLog> zk_log_);
|
void setZooKeeperLog(std::shared_ptr<DB::ZooKeeperLog> zk_log_);
|
||||||
|
|
||||||
UInt32 getSessionUptime() const { return static_cast<UInt32>(session_uptime.elapsedSeconds()); }
|
UInt32 getSessionUptime() const { return static_cast<UInt32>(session_uptime.elapsedSeconds()); }
|
||||||
|
bool hasReachedDeadline() const { return impl->hasReachedDeadline(); }
|
||||||
|
|
||||||
void setServerCompletelyStarted();
|
void setServerCompletelyStarted();
|
||||||
|
|
||||||
|
@ -204,6 +204,14 @@ void ZooKeeperArgs::initFromKeeperSection(const Poco::Util::AbstractConfiguratio
|
|||||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Unknown load balancing: {}", load_balancing_str);
|
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Unknown load balancing: {}", load_balancing_str);
|
||||||
get_priority_load_balancing.load_balancing = *load_balancing;
|
get_priority_load_balancing.load_balancing = *load_balancing;
|
||||||
}
|
}
|
||||||
|
else if (key == "fallback_session_lifetime")
|
||||||
|
{
|
||||||
|
fallback_session_lifetime = SessionLifetimeConfiguration
|
||||||
|
{
|
||||||
|
.min_sec = config.getUInt(config_name + "." + key + ".min"),
|
||||||
|
.max_sec = config.getUInt(config_name + "." + key + ".max"),
|
||||||
|
};
|
||||||
|
}
|
||||||
else
|
else
|
||||||
throw KeeperException(std::string("Unknown key ") + key + " in config file", Coordination::Error::ZBADARGUMENTS);
|
throw KeeperException(std::string("Unknown key ") + key + " in config file", Coordination::Error::ZBADARGUMENTS);
|
||||||
}
|
}
|
||||||
|
@ -11,8 +11,17 @@ namespace Poco::Util
|
|||||||
namespace zkutil
|
namespace zkutil
|
||||||
{
|
{
|
||||||
|
|
||||||
|
constexpr UInt32 ZK_MIN_FALLBACK_SESSION_DEADLINE_SEC = 3 * 60 * 60;
|
||||||
|
constexpr UInt32 ZK_MAX_FALLBACK_SESSION_DEADLINE_SEC = 6 * 60 * 60;
|
||||||
|
|
||||||
struct ZooKeeperArgs
|
struct ZooKeeperArgs
|
||||||
{
|
{
|
||||||
|
struct SessionLifetimeConfiguration
|
||||||
|
{
|
||||||
|
UInt32 min_sec = ZK_MIN_FALLBACK_SESSION_DEADLINE_SEC;
|
||||||
|
UInt32 max_sec = ZK_MAX_FALLBACK_SESSION_DEADLINE_SEC;
|
||||||
|
bool operator == (const SessionLifetimeConfiguration &) const = default;
|
||||||
|
};
|
||||||
ZooKeeperArgs(const Poco::Util::AbstractConfiguration & config, const String & config_name);
|
ZooKeeperArgs(const Poco::Util::AbstractConfiguration & config, const String & config_name);
|
||||||
|
|
||||||
/// hosts_string -- comma separated [secure://]host:port list
|
/// hosts_string -- comma separated [secure://]host:port list
|
||||||
@ -36,6 +45,7 @@ struct ZooKeeperArgs
|
|||||||
UInt64 send_sleep_ms = 0;
|
UInt64 send_sleep_ms = 0;
|
||||||
UInt64 recv_sleep_ms = 0;
|
UInt64 recv_sleep_ms = 0;
|
||||||
|
|
||||||
|
SessionLifetimeConfiguration fallback_session_lifetime = {};
|
||||||
DB::GetPriorityForLoadBalancing get_priority_load_balancing;
|
DB::GetPriorityForLoadBalancing get_priority_load_balancing;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -642,6 +642,8 @@ void ZooKeeperMultiResponse::readImpl(ReadBuffer & in)
|
|||||||
|
|
||||||
if (op_error == Error::ZOK || op_num == OpNum::Error)
|
if (op_error == Error::ZOK || op_num == OpNum::Error)
|
||||||
dynamic_cast<ZooKeeperResponse &>(*response).readImpl(in);
|
dynamic_cast<ZooKeeperResponse &>(*response).readImpl(in);
|
||||||
|
|
||||||
|
response->zxid = zxid;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Footer.
|
/// Footer.
|
||||||
|
@ -28,7 +28,6 @@ using LogElements = std::vector<ZooKeeperLogElement>;
|
|||||||
struct ZooKeeperResponse : virtual Response
|
struct ZooKeeperResponse : virtual Response
|
||||||
{
|
{
|
||||||
XID xid = 0;
|
XID xid = 0;
|
||||||
int64_t zxid = 0;
|
|
||||||
|
|
||||||
UInt64 response_created_time_ns = 0;
|
UInt64 response_created_time_ns = 0;
|
||||||
|
|
||||||
|
@ -313,8 +313,8 @@ ZooKeeper::~ZooKeeper()
|
|||||||
ZooKeeper::ZooKeeper(
|
ZooKeeper::ZooKeeper(
|
||||||
const Nodes & nodes,
|
const Nodes & nodes,
|
||||||
const zkutil::ZooKeeperArgs & args_,
|
const zkutil::ZooKeeperArgs & args_,
|
||||||
std::shared_ptr<ZooKeeperLog> zk_log_)
|
std::shared_ptr<ZooKeeperLog> zk_log_, std::optional<ConnectedCallback> && connected_callback_)
|
||||||
: args(args_)
|
: args(args_), connected_callback(std::move(connected_callback_))
|
||||||
{
|
{
|
||||||
log = &Poco::Logger::get("ZooKeeperClient");
|
log = &Poco::Logger::get("ZooKeeperClient");
|
||||||
std::atomic_store(&zk_log, std::move(zk_log_));
|
std::atomic_store(&zk_log, std::move(zk_log_));
|
||||||
@ -395,8 +395,9 @@ void ZooKeeper::connect(
|
|||||||
WriteBufferFromOwnString fail_reasons;
|
WriteBufferFromOwnString fail_reasons;
|
||||||
for (size_t try_no = 0; try_no < num_tries; ++try_no)
|
for (size_t try_no = 0; try_no < num_tries; ++try_no)
|
||||||
{
|
{
|
||||||
for (const auto & node : nodes)
|
for (size_t i = 0; i < nodes.size(); ++i)
|
||||||
{
|
{
|
||||||
|
const auto & node = nodes[i];
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
/// Reset the state of previous attempt.
|
/// Reset the state of previous attempt.
|
||||||
@ -443,9 +444,25 @@ void ZooKeeper::connect(
|
|||||||
e.addMessage("while receiving handshake from ZooKeeper");
|
e.addMessage("while receiving handshake from ZooKeeper");
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
|
|
||||||
connected = true;
|
connected = true;
|
||||||
connected_zk_address = node.address;
|
|
||||||
|
if (connected_callback.has_value())
|
||||||
|
(*connected_callback)(i, node);
|
||||||
|
|
||||||
|
if (i != 0)
|
||||||
|
{
|
||||||
|
std::uniform_int_distribution<UInt32> fallback_session_lifetime_distribution
|
||||||
|
{
|
||||||
|
args.fallback_session_lifetime.min_sec,
|
||||||
|
args.fallback_session_lifetime.max_sec,
|
||||||
|
};
|
||||||
|
UInt32 session_lifetime_seconds = fallback_session_lifetime_distribution(thread_local_rng);
|
||||||
|
client_session_deadline = clock::now() + std::chrono::seconds(session_lifetime_seconds);
|
||||||
|
|
||||||
|
LOG_DEBUG(log, "Connected to a suboptimal ZooKeeper host ({}, index {})."
|
||||||
|
" To preserve balance in ZooKeeper usage, this ZooKeeper session will expire in {} seconds",
|
||||||
|
node.address.toString(), i, session_lifetime_seconds);
|
||||||
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -462,7 +479,6 @@ void ZooKeeper::connect(
|
|||||||
if (!connected)
|
if (!connected)
|
||||||
{
|
{
|
||||||
WriteBufferFromOwnString message;
|
WriteBufferFromOwnString message;
|
||||||
connected_zk_address = Poco::Net::SocketAddress();
|
|
||||||
|
|
||||||
message << "All connection tries failed while connecting to ZooKeeper. nodes: ";
|
message << "All connection tries failed while connecting to ZooKeeper. nodes: ";
|
||||||
bool first = true;
|
bool first = true;
|
||||||
@ -1060,6 +1076,7 @@ void ZooKeeper::pushRequest(RequestInfo && info)
|
|||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
|
checkSessionDeadline();
|
||||||
info.time = clock::now();
|
info.time = clock::now();
|
||||||
if (zk_log)
|
if (zk_log)
|
||||||
{
|
{
|
||||||
@ -1482,6 +1499,17 @@ void ZooKeeper::setupFaultDistributions()
|
|||||||
inject_setup.test_and_set();
|
inject_setup.test_and_set();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ZooKeeper::checkSessionDeadline() const
|
||||||
|
{
|
||||||
|
if (unlikely(hasReachedDeadline()))
|
||||||
|
throw Exception(Error::ZSESSIONEXPIRED, "Session expired (force expiry client-side)");
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ZooKeeper::hasReachedDeadline() const
|
||||||
|
{
|
||||||
|
return client_session_deadline.has_value() && clock::now() >= client_session_deadline.value();
|
||||||
|
}
|
||||||
|
|
||||||
void ZooKeeper::maybeInjectSendFault()
|
void ZooKeeper::maybeInjectSendFault()
|
||||||
{
|
{
|
||||||
if (unlikely(inject_setup.test() && send_inject_fault && send_inject_fault.value()(thread_local_rng)))
|
if (unlikely(inject_setup.test() && send_inject_fault && send_inject_fault.value()(thread_local_rng)))
|
||||||
|
@ -107,6 +107,7 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
using Nodes = std::vector<Node>;
|
using Nodes = std::vector<Node>;
|
||||||
|
using ConnectedCallback = std::function<void(size_t, const Node&)>;
|
||||||
|
|
||||||
/** Connection to nodes is performed in order. If you want, shuffle them manually.
|
/** Connection to nodes is performed in order. If you want, shuffle them manually.
|
||||||
* Operation timeout couldn't be greater than session timeout.
|
* Operation timeout couldn't be greater than session timeout.
|
||||||
@ -115,7 +116,8 @@ public:
|
|||||||
ZooKeeper(
|
ZooKeeper(
|
||||||
const Nodes & nodes,
|
const Nodes & nodes,
|
||||||
const zkutil::ZooKeeperArgs & args_,
|
const zkutil::ZooKeeperArgs & args_,
|
||||||
std::shared_ptr<ZooKeeperLog> zk_log_);
|
std::shared_ptr<ZooKeeperLog> zk_log_,
|
||||||
|
std::optional<ConnectedCallback> && connected_callback_ = {});
|
||||||
|
|
||||||
~ZooKeeper() override;
|
~ZooKeeper() override;
|
||||||
|
|
||||||
@ -123,11 +125,13 @@ public:
|
|||||||
/// If expired, you can only destroy the object. All other methods will throw exception.
|
/// If expired, you can only destroy the object. All other methods will throw exception.
|
||||||
bool isExpired() const override { return requests_queue.isFinished(); }
|
bool isExpired() const override { return requests_queue.isFinished(); }
|
||||||
|
|
||||||
|
/// A ZooKeeper session can have an optional deadline set on it.
|
||||||
|
/// After it has been reached, the session needs to be finalized.
|
||||||
|
bool hasReachedDeadline() const override;
|
||||||
|
|
||||||
/// Useful to check owner of ephemeral node.
|
/// Useful to check owner of ephemeral node.
|
||||||
int64_t getSessionID() const override { return session_id; }
|
int64_t getSessionID() const override { return session_id; }
|
||||||
|
|
||||||
Poco::Net::SocketAddress getConnectedAddress() const override { return connected_zk_address; }
|
|
||||||
|
|
||||||
void executeGenericRequest(
|
void executeGenericRequest(
|
||||||
const ZooKeeperRequestPtr & request,
|
const ZooKeeperRequestPtr & request,
|
||||||
ResponseCallback callback);
|
ResponseCallback callback);
|
||||||
@ -213,9 +217,9 @@ public:
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
ACLs default_acls;
|
ACLs default_acls;
|
||||||
Poco::Net::SocketAddress connected_zk_address;
|
|
||||||
|
|
||||||
zkutil::ZooKeeperArgs args;
|
zkutil::ZooKeeperArgs args;
|
||||||
|
std::optional<ConnectedCallback> connected_callback = {};
|
||||||
|
|
||||||
/// Fault injection
|
/// Fault injection
|
||||||
void maybeInjectSendFault();
|
void maybeInjectSendFault();
|
||||||
@ -252,6 +256,7 @@ private:
|
|||||||
clock::time_point time;
|
clock::time_point time;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
std::optional<clock::time_point> client_session_deadline {};
|
||||||
using RequestsQueue = ConcurrentBoundedQueue<RequestInfo>;
|
using RequestsQueue = ConcurrentBoundedQueue<RequestInfo>;
|
||||||
|
|
||||||
RequestsQueue requests_queue{1024};
|
RequestsQueue requests_queue{1024};
|
||||||
@ -324,6 +329,8 @@ private:
|
|||||||
|
|
||||||
void initFeatureFlags();
|
void initFeatureFlags();
|
||||||
|
|
||||||
|
void checkSessionDeadline() const;
|
||||||
|
|
||||||
CurrentMetrics::Increment active_session_metric_increment{CurrentMetrics::ZooKeeperSession};
|
CurrentMetrics::Increment active_session_metric_increment{CurrentMetrics::ZooKeeperSession};
|
||||||
std::shared_ptr<ZooKeeperLog> zk_log;
|
std::shared_ptr<ZooKeeperLog> zk_log;
|
||||||
|
|
||||||
|
@ -153,7 +153,10 @@ Pool::Entry Pool::get(uint64_t wait_timeout)
|
|||||||
for (auto & connection : connections)
|
for (auto & connection : connections)
|
||||||
{
|
{
|
||||||
if (connection->ref_count == 0)
|
if (connection->ref_count == 0)
|
||||||
|
{
|
||||||
|
logger.test("Found free connection in pool, returning it to the caller");
|
||||||
return Entry(connection, this);
|
return Entry(connection, this);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.trace("(%s): Trying to allocate a new connection.", getDescription());
|
logger.trace("(%s): Trying to allocate a new connection.", getDescription());
|
||||||
|
@ -26,7 +26,7 @@ namespace mysqlxx
|
|||||||
*
|
*
|
||||||
* void thread()
|
* void thread()
|
||||||
* {
|
* {
|
||||||
* mysqlxx::Pool::Entry connection = pool.Get();
|
* mysqlxx::Pool::Entry connection = pool.Get();
|
||||||
* std::string s = connection->query("SELECT 'Hello, world!' AS world").use().fetch()["world"].getString();
|
* std::string s = connection->query("SELECT 'Hello, world!' AS world").use().fetch()["world"].getString();
|
||||||
* }
|
* }
|
||||||
* TODO: simplify with PoolBase.
|
* TODO: simplify with PoolBase.
|
||||||
|
@ -320,8 +320,6 @@ bool KeeperDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & requ
|
|||||||
request_info.time = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
|
request_info.time = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
|
||||||
request_info.session_id = session_id;
|
request_info.session_id = session_id;
|
||||||
|
|
||||||
std::lock_guard lock(push_request_mutex);
|
|
||||||
|
|
||||||
if (shutdown_called)
|
if (shutdown_called)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
@ -423,13 +421,10 @@ void KeeperDispatcher::shutdown()
|
|||||||
try
|
try
|
||||||
{
|
{
|
||||||
{
|
{
|
||||||
std::lock_guard lock(push_request_mutex);
|
if (shutdown_called.exchange(true))
|
||||||
|
|
||||||
if (shutdown_called)
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
LOG_DEBUG(log, "Shutting down storage dispatcher");
|
LOG_DEBUG(log, "Shutting down storage dispatcher");
|
||||||
shutdown_called = true;
|
|
||||||
|
|
||||||
if (session_cleaner_thread.joinable())
|
if (session_cleaner_thread.joinable())
|
||||||
session_cleaner_thread.join();
|
session_cleaner_thread.join();
|
||||||
@ -582,12 +577,9 @@ void KeeperDispatcher::sessionCleanerTask()
|
|||||||
.time = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count(),
|
.time = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count(),
|
||||||
.request = std::move(request),
|
.request = std::move(request),
|
||||||
};
|
};
|
||||||
{
|
if (!requests_queue->push(std::move(request_info)))
|
||||||
std::lock_guard lock(push_request_mutex);
|
LOG_INFO(log, "Cannot push close request to queue while cleaning outdated sessions");
|
||||||
if (!requests_queue->push(std::move(request_info)))
|
CurrentMetrics::add(CurrentMetrics::KeeperOutstandingRequets);
|
||||||
LOG_INFO(log, "Cannot push close request to queue while cleaning outdated sessions");
|
|
||||||
CurrentMetrics::add(CurrentMetrics::KeeperOutstandingRequets);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Remove session from registered sessions
|
/// Remove session from registered sessions
|
||||||
finishSession(dead_session);
|
finishSession(dead_session);
|
||||||
@ -607,6 +599,10 @@ void KeeperDispatcher::sessionCleanerTask()
|
|||||||
|
|
||||||
void KeeperDispatcher::finishSession(int64_t session_id)
|
void KeeperDispatcher::finishSession(int64_t session_id)
|
||||||
{
|
{
|
||||||
|
/// shutdown() method will cleanup sessions if needed
|
||||||
|
if (shutdown_called)
|
||||||
|
return;
|
||||||
|
|
||||||
{
|
{
|
||||||
std::lock_guard lock(session_to_response_callback_mutex);
|
std::lock_guard lock(session_to_response_callback_mutex);
|
||||||
auto session_it = session_to_response_callback.find(session_id);
|
auto session_it = session_to_response_callback.find(session_id);
|
||||||
@ -698,12 +694,9 @@ int64_t KeeperDispatcher::getSessionID(int64_t session_timeout_ms)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Push new session request to queue
|
/// Push new session request to queue
|
||||||
{
|
if (!requests_queue->tryPush(std::move(request_info), session_timeout_ms))
|
||||||
std::lock_guard lock(push_request_mutex);
|
throw Exception(ErrorCodes::TIMEOUT_EXCEEDED, "Cannot push session id request to queue within session timeout");
|
||||||
if (!requests_queue->tryPush(std::move(request_info), session_timeout_ms))
|
CurrentMetrics::add(CurrentMetrics::KeeperOutstandingRequets);
|
||||||
throw Exception(ErrorCodes::TIMEOUT_EXCEEDED, "Cannot push session id request to queue within session timeout");
|
|
||||||
CurrentMetrics::add(CurrentMetrics::KeeperOutstandingRequets);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (future.wait_for(std::chrono::milliseconds(session_timeout_ms)) != std::future_status::ready)
|
if (future.wait_for(std::chrono::milliseconds(session_timeout_ms)) != std::future_status::ready)
|
||||||
throw Exception(ErrorCodes::TIMEOUT_EXCEEDED, "Cannot receive session id within session timeout");
|
throw Exception(ErrorCodes::TIMEOUT_EXCEEDED, "Cannot receive session id within session timeout");
|
||||||
@ -871,10 +864,7 @@ uint64_t KeeperDispatcher::getSnapDirSize() const
|
|||||||
Keeper4LWInfo KeeperDispatcher::getKeeper4LWInfo() const
|
Keeper4LWInfo KeeperDispatcher::getKeeper4LWInfo() const
|
||||||
{
|
{
|
||||||
Keeper4LWInfo result = server->getPartiallyFilled4LWInfo();
|
Keeper4LWInfo result = server->getPartiallyFilled4LWInfo();
|
||||||
{
|
result.outstanding_requests_count = requests_queue->size();
|
||||||
std::lock_guard lock(push_request_mutex);
|
|
||||||
result.outstanding_requests_count = requests_queue->size();
|
|
||||||
}
|
|
||||||
{
|
{
|
||||||
std::lock_guard lock(session_to_response_callback_mutex);
|
std::lock_guard lock(session_to_response_callback_mutex);
|
||||||
result.alive_connections_count = session_to_response_callback.size();
|
result.alive_connections_count = session_to_response_callback.size();
|
||||||
|
@ -27,8 +27,6 @@ using ZooKeeperResponseCallback = std::function<void(const Coordination::ZooKeep
|
|||||||
class KeeperDispatcher
|
class KeeperDispatcher
|
||||||
{
|
{
|
||||||
private:
|
private:
|
||||||
mutable std::mutex push_request_mutex;
|
|
||||||
|
|
||||||
using RequestsQueue = ConcurrentBoundedQueue<KeeperStorage::RequestForSession>;
|
using RequestsQueue = ConcurrentBoundedQueue<KeeperStorage::RequestForSession>;
|
||||||
using SessionToResponseCallback = std::unordered_map<int64_t, ZooKeeperResponseCallback>;
|
using SessionToResponseCallback = std::unordered_map<int64_t, ZooKeeperResponseCallback>;
|
||||||
using ClusterUpdateQueue = ConcurrentBoundedQueue<ClusterUpdateAction>;
|
using ClusterUpdateQueue = ConcurrentBoundedQueue<ClusterUpdateAction>;
|
||||||
|
@ -794,8 +794,14 @@ bool KeeperServer::applyConfigUpdate(const ClusterUpdateAction & action)
|
|||||||
std::lock_guard _{server_write_mutex};
|
std::lock_guard _{server_write_mutex};
|
||||||
|
|
||||||
if (const auto * add = std::get_if<AddRaftServer>(&action))
|
if (const auto * add = std::get_if<AddRaftServer>(&action))
|
||||||
return raft_instance->get_srv_config(add->id) != nullptr
|
{
|
||||||
|| raft_instance->add_srv(static_cast<nuraft::srv_config>(*add))->get_accepted();
|
if (raft_instance->get_srv_config(add->id) != nullptr)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
auto resp = raft_instance->add_srv(static_cast<nuraft::srv_config>(*add));
|
||||||
|
resp->get();
|
||||||
|
return resp->get_accepted();
|
||||||
|
}
|
||||||
else if (const auto * remove = std::get_if<RemoveRaftServer>(&action))
|
else if (const auto * remove = std::get_if<RemoveRaftServer>(&action))
|
||||||
{
|
{
|
||||||
if (remove->id == raft_instance->get_leader())
|
if (remove->id == raft_instance->get_leader())
|
||||||
@ -807,8 +813,12 @@ bool KeeperServer::applyConfigUpdate(const ClusterUpdateAction & action)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
return raft_instance->get_srv_config(remove->id) == nullptr
|
if (raft_instance->get_srv_config(remove->id) == nullptr)
|
||||||
|| raft_instance->remove_srv(remove->id)->get_accepted();
|
return true;
|
||||||
|
|
||||||
|
auto resp = raft_instance->remove_srv(remove->id);
|
||||||
|
resp->get();
|
||||||
|
return resp->get_accepted();
|
||||||
}
|
}
|
||||||
else if (const auto * update = std::get_if<UpdateRaftServerPriority>(&action))
|
else if (const auto * update = std::get_if<UpdateRaftServerPriority>(&action))
|
||||||
{
|
{
|
||||||
|
@ -83,8 +83,12 @@ namespace DB
|
|||||||
M(UInt64, background_schedule_pool_size, 128, "The maximum number of threads that will be used for constantly executing some lightweight periodic operations.", 0) \
|
M(UInt64, background_schedule_pool_size, 128, "The maximum number of threads that will be used for constantly executing some lightweight periodic operations.", 0) \
|
||||||
M(UInt64, background_message_broker_schedule_pool_size, 16, "The maximum number of threads that will be used for executing background operations for message streaming.", 0) \
|
M(UInt64, background_message_broker_schedule_pool_size, 16, "The maximum number of threads that will be used for executing background operations for message streaming.", 0) \
|
||||||
M(UInt64, background_distributed_schedule_pool_size, 16, "The maximum number of threads that will be used for executing distributed sends.", 0) \
|
M(UInt64, background_distributed_schedule_pool_size, 16, "The maximum number of threads that will be used for executing distributed sends.", 0) \
|
||||||
M(Bool, display_secrets_in_show_and_select, false, "Allow showing secrets in SHOW and SELECT queries via a format setting and a grant", 0)
|
M(Bool, display_secrets_in_show_and_select, false, "Allow showing secrets in SHOW and SELECT queries via a format setting and a grant", 0) \
|
||||||
|
\
|
||||||
|
M(UInt64, total_memory_profiler_step, 0, "Whenever server memory usage becomes larger than every next step in number of bytes the memory profiler will collect the allocating stack trace. Zero means disabled memory profiler. Values lower than a few megabytes will slow down server.", 0) \
|
||||||
|
M(Double, total_memory_tracker_sample_probability, 0, "Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type. The probability is for every alloc/free regardless to the size of the allocation (can be changed with `memory_profiler_sample_min_allocation_size` and `memory_profiler_sample_max_allocation_size`). Note that sampling happens only when the amount of untracked memory exceeds 'max_untracked_memory'. You may want to set 'max_untracked_memory' to 0 for extra fine grained sampling.", 0) \
|
||||||
|
M(UInt64, total_memory_profiler_sample_min_allocation_size, 0, "Collect random allocations of size greater or equal than specified value with probability equal to `total_memory_profiler_sample_probability`. 0 means disabled. You may want to set 'max_untracked_memory' to 0 to make this threshold to work as expected.", 0) \
|
||||||
|
M(UInt64, total_memory_profiler_sample_max_allocation_size, 0, "Collect random allocations of size less or equal than specified value with probability equal to `total_memory_profiler_sample_probability`. 0 means disabled. You may want to set 'max_untracked_memory' to 0 to make this threshold to work as expected.", 0)
|
||||||
|
|
||||||
DECLARE_SETTINGS_TRAITS(ServerSettingsTraits, SERVER_SETTINGS)
|
DECLARE_SETTINGS_TRAITS(ServerSettingsTraits, SERVER_SETTINGS)
|
||||||
|
|
||||||
|
@ -386,6 +386,8 @@ class IColumn;
|
|||||||
M(UInt64, max_temporary_columns, 0, "If a query generates more than the specified number of temporary columns in memory as a result of intermediate calculation, exception is thrown. Zero value means unlimited. This setting is useful to prevent too complex queries.", 0) \
|
M(UInt64, max_temporary_columns, 0, "If a query generates more than the specified number of temporary columns in memory as a result of intermediate calculation, exception is thrown. Zero value means unlimited. This setting is useful to prevent too complex queries.", 0) \
|
||||||
M(UInt64, max_temporary_non_const_columns, 0, "Similar to the 'max_temporary_columns' setting but applies only to non-constant columns. This makes sense, because constant columns are cheap and it is reasonable to allow more of them.", 0) \
|
M(UInt64, max_temporary_non_const_columns, 0, "Similar to the 'max_temporary_columns' setting but applies only to non-constant columns. This makes sense, because constant columns are cheap and it is reasonable to allow more of them.", 0) \
|
||||||
\
|
\
|
||||||
|
M(UInt64, max_sessions_for_user, 0, "Maximum number of simultaneous sessions for a user.", 0) \
|
||||||
|
\
|
||||||
M(UInt64, max_subquery_depth, 100, "If a query has more than specified number of nested subqueries, throw an exception. This allows you to have a sanity check to protect the users of your cluster from going insane with their queries.", 0) \
|
M(UInt64, max_subquery_depth, 100, "If a query has more than specified number of nested subqueries, throw an exception. This allows you to have a sanity check to protect the users of your cluster from going insane with their queries.", 0) \
|
||||||
M(UInt64, max_analyze_depth, 5000, "Maximum number of analyses performed by interpreter.", 0) \
|
M(UInt64, max_analyze_depth, 5000, "Maximum number of analyses performed by interpreter.", 0) \
|
||||||
M(UInt64, max_ast_depth, 1000, "Maximum depth of query syntax tree. Checked after parsing.", 0) \
|
M(UInt64, max_ast_depth, 1000, "Maximum depth of query syntax tree. Checked after parsing.", 0) \
|
||||||
@ -427,7 +429,9 @@ class IColumn;
|
|||||||
M(UInt64, memory_overcommit_ratio_denominator_for_user, 1_GiB, "It represents soft memory limit on the global level. This value is used to compute query overcommit ratio.", 0) \
|
M(UInt64, memory_overcommit_ratio_denominator_for_user, 1_GiB, "It represents soft memory limit on the global level. This value is used to compute query overcommit ratio.", 0) \
|
||||||
M(UInt64, max_untracked_memory, (4 * 1024 * 1024), "Small allocations and deallocations are grouped in thread local variable and tracked or profiled only when amount (in absolute value) becomes larger than specified value. If the value is higher than 'memory_profiler_step' it will be effectively lowered to 'memory_profiler_step'.", 0) \
|
M(UInt64, max_untracked_memory, (4 * 1024 * 1024), "Small allocations and deallocations are grouped in thread local variable and tracked or profiled only when amount (in absolute value) becomes larger than specified value. If the value is higher than 'memory_profiler_step' it will be effectively lowered to 'memory_profiler_step'.", 0) \
|
||||||
M(UInt64, memory_profiler_step, (4 * 1024 * 1024), "Whenever query memory usage becomes larger than every next step in number of bytes the memory profiler will collect the allocating stack trace. Zero means disabled memory profiler. Values lower than a few megabytes will slow down query processing.", 0) \
|
M(UInt64, memory_profiler_step, (4 * 1024 * 1024), "Whenever query memory usage becomes larger than every next step in number of bytes the memory profiler will collect the allocating stack trace. Zero means disabled memory profiler. Values lower than a few megabytes will slow down query processing.", 0) \
|
||||||
M(Float, memory_profiler_sample_probability, 0., "Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type. The probability is for every alloc/free regardless to the size of the allocation. Note that sampling happens only when the amount of untracked memory exceeds 'max_untracked_memory'. You may want to set 'max_untracked_memory' to 0 for extra fine grained sampling.", 0) \
|
M(Float, memory_profiler_sample_probability, 0., "Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type. The probability is for every alloc/free regardless to the size of the allocation (can be changed with `memory_profiler_sample_min_allocation_size` and `memory_profiler_sample_max_allocation_size`). Note that sampling happens only when the amount of untracked memory exceeds 'max_untracked_memory'. You may want to set 'max_untracked_memory' to 0 for extra fine grained sampling.", 0) \
|
||||||
|
M(UInt64, memory_profiler_sample_min_allocation_size, 0, "Collect random allocations of size greater or equal than specified value with probability equal to `memory_profiler_sample_probability`. 0 means disabled. You may want to set 'max_untracked_memory' to 0 to make this threshold to work as expected.", 0) \
|
||||||
|
M(UInt64, memory_profiler_sample_max_allocation_size, 0, "Collect random allocations of size less or equal than specified value with probability equal to `memory_profiler_sample_probability`. 0 means disabled. You may want to set 'max_untracked_memory' to 0 to make this threshold to work as expected.", 0) \
|
||||||
M(Bool, trace_profile_events, false, "Send to system.trace_log profile event and value of increment on each increment with 'ProfileEvent' trace_type", 0) \
|
M(Bool, trace_profile_events, false, "Send to system.trace_log profile event and value of increment on each increment with 'ProfileEvent' trace_type", 0) \
|
||||||
\
|
\
|
||||||
M(UInt64, memory_usage_overcommit_max_wait_microseconds, 5'000'000, "Maximum time thread will wait for memory to be freed in the case of memory overcommit. If timeout is reached and memory is not freed, exception is thrown.", 0) \
|
M(UInt64, memory_usage_overcommit_max_wait_microseconds, 5'000'000, "Maximum time thread will wait for memory to be freed in the case of memory overcommit. If timeout is reached and memory is not freed, exception is thrown.", 0) \
|
||||||
@ -622,7 +626,7 @@ class IColumn;
|
|||||||
M(Bool, engine_file_allow_create_multiple_files, false, "Enables or disables creating a new file on each insert in file engine tables if format has suffix.", 0) \
|
M(Bool, engine_file_allow_create_multiple_files, false, "Enables or disables creating a new file on each insert in file engine tables if format has suffix.", 0) \
|
||||||
M(Bool, engine_file_skip_empty_files, false, "Allows to skip empty files in file table engine", 0) \
|
M(Bool, engine_file_skip_empty_files, false, "Allows to skip empty files in file table engine", 0) \
|
||||||
M(Bool, engine_url_skip_empty_files, false, "Allows to skip empty files in url table engine", 0) \
|
M(Bool, engine_url_skip_empty_files, false, "Allows to skip empty files in url table engine", 0) \
|
||||||
M(Bool, disable_url_encoding, false, " Allows to disable decoding/encoding path in uri in URL table engine", 0) \
|
M(Bool, enable_url_encoding, true, " Allows to enable/disable decoding/encoding path in uri in URL table engine", 0) \
|
||||||
M(Bool, allow_experimental_database_replicated, false, "Allow to create databases with Replicated engine", 0) \
|
M(Bool, allow_experimental_database_replicated, false, "Allow to create databases with Replicated engine", 0) \
|
||||||
M(UInt64, database_replicated_initial_query_timeout_sec, 300, "How long initial DDL query should wait for Replicated database to precess previous DDL queue entries", 0) \
|
M(UInt64, database_replicated_initial_query_timeout_sec, 300, "How long initial DDL query should wait for Replicated database to precess previous DDL queue entries", 0) \
|
||||||
M(Bool, database_replicated_enforce_synchronous_settings, false, "Enforces synchronous waiting for some queries (see also database_atomic_wait_for_drop_and_detach_synchronously, mutation_sync, alter_sync). Not recommended to enable these settings.", 0) \
|
M(Bool, database_replicated_enforce_synchronous_settings, false, "Enforces synchronous waiting for some queries (see also database_atomic_wait_for_drop_and_detach_synchronously, mutation_sync, alter_sync). Not recommended to enable these settings.", 0) \
|
||||||
@ -1007,6 +1011,10 @@ class IColumn;
|
|||||||
\
|
\
|
||||||
M(CapnProtoEnumComparingMode, format_capn_proto_enum_comparising_mode, FormatSettings::CapnProtoEnumComparingMode::BY_VALUES, "How to map ClickHouse Enum and CapnProto Enum", 0) \
|
M(CapnProtoEnumComparingMode, format_capn_proto_enum_comparising_mode, FormatSettings::CapnProtoEnumComparingMode::BY_VALUES, "How to map ClickHouse Enum and CapnProto Enum", 0) \
|
||||||
\
|
\
|
||||||
|
M(Bool, format_capn_proto_use_autogenerated_schema, true, "Use autogenerated CapnProto schema when format_schema is not set", 0) \
|
||||||
|
M(Bool, format_protobuf_use_autogenerated_schema, true, "Use autogenerated Protobuf when format_schema is not set", 0) \
|
||||||
|
M(String, output_format_schema, "", "The path to the file where the automatically generated schema will be saved", 0) \
|
||||||
|
\
|
||||||
M(String, input_format_mysql_dump_table_name, "", "Name of the table in MySQL dump from which to read data", 0) \
|
M(String, input_format_mysql_dump_table_name, "", "Name of the table in MySQL dump from which to read data", 0) \
|
||||||
M(Bool, input_format_mysql_dump_map_column_names, true, "Match columns from table in MySQL dump and columns from ClickHouse table by names", 0) \
|
M(Bool, input_format_mysql_dump_map_column_names, true, "Match columns from table in MySQL dump and columns from ClickHouse table by names", 0) \
|
||||||
\
|
\
|
||||||
|
@ -37,6 +37,7 @@ public:
|
|||||||
|
|
||||||
bool canBeInsideNullable() const override { return false; }
|
bool canBeInsideNullable() const override { return false; }
|
||||||
bool supportsSparseSerialization() const override { return true; }
|
bool supportsSparseSerialization() const override { return true; }
|
||||||
|
bool canBeInsideSparseColumns() const override { return false; }
|
||||||
|
|
||||||
MutableColumnPtr createColumn() const override;
|
MutableColumnPtr createColumn() const override;
|
||||||
MutableColumnPtr createColumn(const ISerialization & serialization) const override;
|
MutableColumnPtr createColumn(const ISerialization & serialization) const override;
|
||||||
|
@ -110,6 +110,7 @@ public:
|
|||||||
|
|
||||||
/// TODO: support more types.
|
/// TODO: support more types.
|
||||||
virtual bool supportsSparseSerialization() const { return !haveSubtypes(); }
|
virtual bool supportsSparseSerialization() const { return !haveSubtypes(); }
|
||||||
|
virtual bool canBeInsideSparseColumns() const { return supportsSparseSerialization(); }
|
||||||
|
|
||||||
SerializationPtr getDefaultSerialization() const;
|
SerializationPtr getDefaultSerialization() const;
|
||||||
SerializationPtr getSparseSerialization() const;
|
SerializationPtr getSparseSerialization() const;
|
||||||
|
@ -65,6 +65,7 @@ void DatabaseMaterializedMySQL::setException(const std::exception_ptr & exceptio
|
|||||||
|
|
||||||
void DatabaseMaterializedMySQL::startupTables(ThreadPool & thread_pool, LoadingStrictnessLevel mode)
|
void DatabaseMaterializedMySQL::startupTables(ThreadPool & thread_pool, LoadingStrictnessLevel mode)
|
||||||
{
|
{
|
||||||
|
LOG_TRACE(log, "Starting MaterializeMySQL tables");
|
||||||
DatabaseAtomic::startupTables(thread_pool, mode);
|
DatabaseAtomic::startupTables(thread_pool, mode);
|
||||||
|
|
||||||
if (mode < LoadingStrictnessLevel::FORCE_ATTACH)
|
if (mode < LoadingStrictnessLevel::FORCE_ATTACH)
|
||||||
@ -122,6 +123,7 @@ void DatabaseMaterializedMySQL::alterTable(ContextPtr context_, const StorageID
|
|||||||
|
|
||||||
void DatabaseMaterializedMySQL::drop(ContextPtr context_)
|
void DatabaseMaterializedMySQL::drop(ContextPtr context_)
|
||||||
{
|
{
|
||||||
|
LOG_TRACE(log, "Dropping MaterializeMySQL database");
|
||||||
/// Remove metadata info
|
/// Remove metadata info
|
||||||
fs::path metadata(getMetadataPath() + "/.metadata");
|
fs::path metadata(getMetadataPath() + "/.metadata");
|
||||||
|
|
||||||
|
@ -11,6 +11,7 @@
|
|||||||
#include <Databases/DatabaseAtomic.h>
|
#include <Databases/DatabaseAtomic.h>
|
||||||
#include <Databases/MySQL/MaterializedMySQLSettings.h>
|
#include <Databases/MySQL/MaterializedMySQLSettings.h>
|
||||||
#include <Databases/MySQL/MaterializedMySQLSyncThread.h>
|
#include <Databases/MySQL/MaterializedMySQLSyncThread.h>
|
||||||
|
#include <Common/logger_useful.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
#include "Common/logger_useful.h"
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
|
|
||||||
#if USE_MYSQL
|
#if USE_MYSQL
|
||||||
@ -499,7 +500,10 @@ bool MaterializedMySQLSyncThread::prepareSynchronized(MaterializeMetadata & meta
|
|||||||
{
|
{
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
catch (const mysqlxx::ConnectionFailed &) {}
|
catch (const mysqlxx::ConnectionFailed & ex)
|
||||||
|
{
|
||||||
|
LOG_TRACE(log, "Connection to MySQL failed {}", ex.displayText());
|
||||||
|
}
|
||||||
catch (const mysqlxx::BadQuery & e)
|
catch (const mysqlxx::BadQuery & e)
|
||||||
{
|
{
|
||||||
// Lost connection to MySQL server during query
|
// Lost connection to MySQL server during query
|
||||||
|
@ -17,13 +17,13 @@ namespace ErrorCodes
|
|||||||
extern const int UNKNOWN_ELEMENT_IN_CONFIG;
|
extern const int UNKNOWN_ELEMENT_IN_CONFIG;
|
||||||
}
|
}
|
||||||
|
|
||||||
void DictionaryFactory::registerLayout(const std::string & layout_type, LayoutCreateFunction create_layout, bool is_layout_complex)
|
void DictionaryFactory::registerLayout(const std::string & layout_type, LayoutCreateFunction create_layout, bool is_layout_complex, bool has_layout_complex)
|
||||||
{
|
{
|
||||||
auto it = registered_layouts.find(layout_type);
|
auto it = registered_layouts.find(layout_type);
|
||||||
if (it != registered_layouts.end())
|
if (it != registered_layouts.end())
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "DictionaryFactory: the layout name '{}' is not unique", layout_type);
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "DictionaryFactory: the layout name '{}' is not unique", layout_type);
|
||||||
|
|
||||||
RegisteredLayout layout { .layout_create_function = create_layout, .is_layout_complex = is_layout_complex };
|
RegisteredLayout layout { .layout_create_function = create_layout, .is_layout_complex = is_layout_complex, .has_layout_complex = has_layout_complex };
|
||||||
registered_layouts.emplace(layout_type, std::move(layout));
|
registered_layouts.emplace(layout_type, std::move(layout));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -89,6 +89,25 @@ bool DictionaryFactory::isComplex(const std::string & layout_type) const
|
|||||||
return it->second.is_layout_complex;
|
return it->second.is_layout_complex;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool DictionaryFactory::convertToComplex(std::string & layout_type) const
|
||||||
|
{
|
||||||
|
auto it = registered_layouts.find(layout_type);
|
||||||
|
|
||||||
|
if (it == registered_layouts.end())
|
||||||
|
{
|
||||||
|
throw Exception(ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG,
|
||||||
|
"Unknown dictionary layout type: {}",
|
||||||
|
layout_type);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!it->second.is_layout_complex && it->second.has_layout_complex)
|
||||||
|
{
|
||||||
|
layout_type = "complex_key_" + layout_type;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
DictionaryFactory & DictionaryFactory::instance()
|
DictionaryFactory & DictionaryFactory::instance()
|
||||||
{
|
{
|
||||||
|
@ -55,13 +55,18 @@ public:
|
|||||||
|
|
||||||
bool isComplex(const std::string & layout_type) const;
|
bool isComplex(const std::string & layout_type) const;
|
||||||
|
|
||||||
void registerLayout(const std::string & layout_type, LayoutCreateFunction create_layout, bool is_layout_complex);
|
/// If the argument `layout_type` is not complex layout and has corresponding complex layout,
|
||||||
|
/// change `layout_type` to corresponding complex and return true; otherwise do nothing and return false.
|
||||||
|
bool convertToComplex(std::string & layout_type) const;
|
||||||
|
|
||||||
|
void registerLayout(const std::string & layout_type, LayoutCreateFunction create_layout, bool is_layout_complex, bool has_layout_complex = true);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
struct RegisteredLayout
|
struct RegisteredLayout
|
||||||
{
|
{
|
||||||
LayoutCreateFunction layout_create_function;
|
LayoutCreateFunction layout_create_function;
|
||||||
bool is_layout_complex;
|
bool is_layout_complex;
|
||||||
|
bool has_layout_complex;
|
||||||
};
|
};
|
||||||
|
|
||||||
using LayoutRegistry = std::unordered_map<std::string, RegisteredLayout>;
|
using LayoutRegistry = std::unordered_map<std::string, RegisteredLayout>;
|
||||||
|
@ -683,7 +683,7 @@ void registerDictionaryFlat(DictionaryFactory & factory)
|
|||||||
return std::make_unique<FlatDictionary>(dict_id, dict_struct, std::move(source_ptr), configuration);
|
return std::make_unique<FlatDictionary>(dict_id, dict_struct, std::move(source_ptr), configuration);
|
||||||
};
|
};
|
||||||
|
|
||||||
factory.registerLayout("flat", create_layout, false);
|
factory.registerLayout("flat", create_layout, false, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include <Common/isLocalAddress.h>
|
#include <Common/isLocalAddress.h>
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
|
#include <DataTypes/DataTypeFactory.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -614,6 +615,16 @@ getDictionaryConfigurationFromAST(const ASTCreateQuery & query, ContextPtr conte
|
|||||||
|
|
||||||
checkPrimaryKey(all_attr_names_and_types, pk_attrs);
|
checkPrimaryKey(all_attr_names_and_types, pk_attrs);
|
||||||
|
|
||||||
|
/// If the pk size is 1 and pk's DataType is not number, we should convert to complex.
|
||||||
|
/// NOTE: the data type of Numeric key(simple layout) is UInt64, so if the type is not under UInt64, type casting will lead to precision loss.
|
||||||
|
DataTypePtr first_key_type = DataTypeFactory::instance().get(all_attr_names_and_types.find(pk_attrs[0])->second.type);
|
||||||
|
if ((pk_attrs.size() > 1 || (pk_attrs.size() == 1 && !isNumber(first_key_type)))
|
||||||
|
&& !complex
|
||||||
|
&& DictionaryFactory::instance().convertToComplex(dictionary_layout->layout_type))
|
||||||
|
{
|
||||||
|
complex = true;
|
||||||
|
}
|
||||||
|
|
||||||
buildPrimaryKeyConfiguration(xml_document, structure_element, complex, pk_attrs, query.dictionary_attributes_list);
|
buildPrimaryKeyConfiguration(xml_document, structure_element, complex, pk_attrs, query.dictionary_attributes_list);
|
||||||
|
|
||||||
buildLayoutConfiguration(xml_document, current_dictionary, query.dictionary->dict_settings, dictionary_layout);
|
buildLayoutConfiguration(xml_document, current_dictionary, query.dictionary->dict_settings, dictionary_layout);
|
||||||
|
@ -14,7 +14,7 @@ namespace ErrorCodes
|
|||||||
}
|
}
|
||||||
|
|
||||||
WriteBufferFromTemporaryFile::WriteBufferFromTemporaryFile(TemporaryFileOnDiskHolder && tmp_file_)
|
WriteBufferFromTemporaryFile::WriteBufferFromTemporaryFile(TemporaryFileOnDiskHolder && tmp_file_)
|
||||||
: WriteBufferFromFile(tmp_file_->getPath(), DBMS_DEFAULT_BUFFER_SIZE, O_RDWR | O_TRUNC | O_CREAT, /* throttler= */ {}, 0600)
|
: WriteBufferFromFile(tmp_file_->getAbsolutePath(), DBMS_DEFAULT_BUFFER_SIZE, O_RDWR | O_TRUNC | O_CREAT, /* throttler= */ {}, 0600)
|
||||||
, tmp_file(std::move(tmp_file_))
|
, tmp_file(std::move(tmp_file_))
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
|
|
||||||
#include <IO/ReadBufferFromString.h>
|
#include <IO/ReadBufferFromString.h>
|
||||||
#include <IO/ReadBufferFromFile.h>
|
#include <IO/ReadBufferFromFile.h>
|
||||||
|
#include <IO/ReadBufferFromEmptyFile.h>
|
||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
#include <IO/WriteBufferFromFile.h>
|
#include <IO/WriteBufferFromFile.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
@ -485,8 +486,15 @@ std::unique_ptr<ReadBufferFromFileBase> DiskObjectStorage::readFile(
|
|||||||
std::optional<size_t> read_hint,
|
std::optional<size_t> read_hint,
|
||||||
std::optional<size_t> file_size) const
|
std::optional<size_t> file_size) const
|
||||||
{
|
{
|
||||||
|
auto storage_objects = metadata_storage->getStorageObjects(path);
|
||||||
|
|
||||||
|
const bool file_can_be_empty = !file_size.has_value() || *file_size == 0;
|
||||||
|
|
||||||
|
if (storage_objects.empty() && file_can_be_empty)
|
||||||
|
return std::make_unique<ReadBufferFromEmptyFile>();
|
||||||
|
|
||||||
return object_storage->readObjects(
|
return object_storage->readObjects(
|
||||||
metadata_storage->getStorageObjects(path),
|
storage_objects,
|
||||||
object_storage->getAdjustedSettingsFromMetadataFile(settings, path),
|
object_storage->getAdjustedSettingsFromMetadataFile(settings, path),
|
||||||
read_hint,
|
read_hint,
|
||||||
file_size);
|
file_size);
|
||||||
|
@ -135,7 +135,7 @@ private:
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
throw Exception(ErrorCodes::S3_ERROR, "Could not list objects in bucket {} with prefix {}, S3 exception: {}, message: {}",
|
throw S3Exception(outcome.GetError().GetErrorType(), "Could not list objects in bucket {} with prefix {}, S3 exception: {}, message: {}",
|
||||||
quoteString(request.GetBucket()), quoteString(request.GetPrefix()),
|
quoteString(request.GetBucket()), quoteString(request.GetPrefix()),
|
||||||
backQuote(outcome.GetError().GetExceptionName()), quoteString(outcome.GetError().GetMessage()));
|
backQuote(outcome.GetError().GetExceptionName()), quoteString(outcome.GetError().GetMessage()));
|
||||||
}
|
}
|
||||||
|
@ -54,7 +54,7 @@ TemporaryFileOnDisk::TemporaryFileOnDisk(const DiskPtr & disk_, const String & p
|
|||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file name is empty");
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file name is empty");
|
||||||
}
|
}
|
||||||
|
|
||||||
String TemporaryFileOnDisk::getPath() const
|
String TemporaryFileOnDisk::getAbsolutePath() const
|
||||||
{
|
{
|
||||||
return std::filesystem::path(disk->getPath()) / relative_path;
|
return std::filesystem::path(disk->getPath()) / relative_path;
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user