mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-29 19:12:03 +00:00
Merge branch 'master' into concurrency-control-controllable
This commit is contained in:
commit
771710b377
3
.github/workflows/backport_branches.yml
vendored
3
.github/workflows/backport_branches.yml
vendored
@ -3,6 +3,9 @@ name: BackportPR
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
# Export system tables to ClickHouse Cloud
|
||||
CLICKHOUSE_CI_LOGS_HOST: ${{ secrets.CLICKHOUSE_CI_LOGS_HOST }}
|
||||
CLICKHOUSE_CI_LOGS_PASSWORD: ${{ secrets.CLICKHOUSE_CI_LOGS_PASSWORD }}
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
push:
|
||||
|
3
.github/workflows/master.yml
vendored
3
.github/workflows/master.yml
vendored
@ -3,6 +3,9 @@ name: MasterCI
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
# Export system tables to ClickHouse Cloud
|
||||
CLICKHOUSE_CI_LOGS_HOST: ${{ secrets.CLICKHOUSE_CI_LOGS_HOST }}
|
||||
CLICKHOUSE_CI_LOGS_PASSWORD: ${{ secrets.CLICKHOUSE_CI_LOGS_PASSWORD }}
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
push:
|
||||
|
3
.github/workflows/pull_request.yml
vendored
3
.github/workflows/pull_request.yml
vendored
@ -3,6 +3,9 @@ name: PullRequestCI
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
# Export system tables to ClickHouse Cloud
|
||||
CLICKHOUSE_CI_LOGS_HOST: ${{ secrets.CLICKHOUSE_CI_LOGS_HOST }}
|
||||
CLICKHOUSE_CI_LOGS_PASSWORD: ${{ secrets.CLICKHOUSE_CI_LOGS_PASSWORD }}
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
pull_request:
|
||||
|
3
.github/workflows/release_branches.yml
vendored
3
.github/workflows/release_branches.yml
vendored
@ -3,6 +3,9 @@ name: ReleaseBranchCI
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
# Export system tables to ClickHouse Cloud
|
||||
CLICKHOUSE_CI_LOGS_HOST: ${{ secrets.CLICKHOUSE_CI_LOGS_HOST }}
|
||||
CLICKHOUSE_CI_LOGS_PASSWORD: ${{ secrets.CLICKHOUSE_CI_LOGS_PASSWORD }}
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
push:
|
||||
|
@ -52,7 +52,6 @@
|
||||
* Add new setting `disable_url_encoding` that allows to disable decoding/encoding path in uri in URL engine. [#52337](https://github.com/ClickHouse/ClickHouse/pull/52337) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Writing parquet files is 10x faster, it's multi-threaded now. Almost the same speed as reading. [#49367](https://github.com/ClickHouse/ClickHouse/pull/49367) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Enable automatic selection of the sparse serialization format by default. It improves performance. The format is supported since version 22.1. After this change, downgrading to versions older than 22.1 might not be possible. You can turn off the usage of the sparse serialization format by providing the `ratio_of_defaults_for_sparse_serialization = 1` setting for your MergeTree tables. [#49631](https://github.com/ClickHouse/ClickHouse/pull/49631) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Enable `move_all_conditions_to_prewhere` and `enable_multiple_prewhere_read_steps` settings by default. [#46365](https://github.com/ClickHouse/ClickHouse/pull/46365) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Improves performance of some queries by tuning allocator. [#46416](https://github.com/ClickHouse/ClickHouse/pull/46416) ([Azat Khuzhin](https://github.com/azat)).
|
||||
@ -114,6 +113,7 @@
|
||||
* Now interserver port will be closed only after tables are shut down. [#52498](https://github.com/ClickHouse/ClickHouse/pull/52498) ([alesapin](https://github.com/alesapin)).
|
||||
|
||||
#### Experimental Feature
|
||||
* Writing parquet files is 10x faster, it's multi-threaded now. Almost the same speed as reading. [#49367](https://github.com/ClickHouse/ClickHouse/pull/49367) ([Michael Kolupaev](https://github.com/al13n321)). This is controlled by the setting `output_format_parquet_use_custom_encoder` which is disabled by default, because the feature is non-ideal.
|
||||
* Added support for [PRQL](https://prql-lang.org/) as a query language. [#50686](https://github.com/ClickHouse/ClickHouse/pull/50686) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Allow to add disk name for custom disks. Previously custom disks would use an internal generated disk name. Now it will be possible with `disk = disk_<name>(...)` (e.g. disk will have name `name`) . [#51552](https://github.com/ClickHouse/ClickHouse/pull/51552) ([Kseniia Sumarokova](https://github.com/kssenii)). This syntax can be changed in this release.
|
||||
* (experimental MaterializedMySQL) Fixed crash when `mysqlxx::Pool::Entry` is used after it was disconnected. [#52063](https://github.com/ClickHouse/ClickHouse/pull/52063) ([Val Doroshchuk](https://github.com/valbok)).
|
||||
|
@ -208,9 +208,6 @@ option(OMIT_HEAVY_DEBUG_SYMBOLS
|
||||
"Do not generate debugger info for heavy modules (ClickHouse functions and dictionaries, some contrib)"
|
||||
${OMIT_HEAVY_DEBUG_SYMBOLS_DEFAULT})
|
||||
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
||||
set(USE_DEBUG_HELPERS ON)
|
||||
endif()
|
||||
option(USE_DEBUG_HELPERS "Enable debug helpers" ${USE_DEBUG_HELPERS})
|
||||
|
||||
option(BUILD_STANDALONE_KEEPER "Build keeper as small standalone binary" OFF)
|
||||
|
@ -7,8 +7,6 @@
|
||||
#include <base/find_symbols.h>
|
||||
#include <base/preciseExp10.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#define JSON_MAX_DEPTH 100
|
||||
|
||||
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include <tuple>
|
||||
#include <limits>
|
||||
|
||||
#include <boost/multiprecision/cpp_bin_float.hpp>
|
||||
#include <boost/math/special_functions/fpclassify.hpp>
|
||||
|
||||
// NOLINTBEGIN(*)
|
||||
@ -22,6 +21,7 @@
|
||||
#define CONSTEXPR_FROM_DOUBLE constexpr
|
||||
using FromDoubleIntermediateType = long double;
|
||||
#else
|
||||
#include <boost/multiprecision/cpp_bin_float.hpp>
|
||||
/// `wide_integer_from_builtin` can't be constexpr with non-literal `cpp_bin_float_double_extended`
|
||||
#define CONSTEXPR_FROM_DOUBLE
|
||||
using FromDoubleIntermediateType = boost::multiprecision::cpp_bin_float_double_extended;
|
||||
|
@ -19,7 +19,6 @@
|
||||
#include "Poco/UTF16Encoding.h"
|
||||
#include "Poco/Buffer.h"
|
||||
#include "Poco/Exception.h"
|
||||
#include <iostream>
|
||||
|
||||
|
||||
using Poco::Buffer;
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include "Poco/TaskManager.h"
|
||||
#include "Poco/Exception.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <array>
|
||||
|
||||
|
||||
|
@ -14,7 +14,6 @@
|
||||
|
||||
#include "Poco/JSON/Object.h"
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
|
||||
|
||||
using Poco::Dynamic::Var;
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include "Poco/CountingStream.h"
|
||||
#include "Poco/RegularExpression.h"
|
||||
#include <sstream>
|
||||
#include <iostream>
|
||||
|
||||
|
||||
using Poco::NumberFormatter;
|
||||
|
@ -1,5 +1,5 @@
|
||||
## ClickHouse Dockerfiles
|
||||
|
||||
This directory contain Dockerfiles for `clickhouse-client` and `clickhouse-server`. They are updated in each release.
|
||||
This directory contain Dockerfiles for `clickhouse-server`. They are updated in each release.
|
||||
|
||||
Also there is bunch of images for testing and CI. They are listed in `images.json` file and updated on each commit to master. If you need to add another image, place information about it into `images.json`.
|
||||
|
@ -1,34 +0,0 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
# ARG for quick switch to a given ubuntu mirror
|
||||
ARG apt_archive="http://archive.ubuntu.com"
|
||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
|
||||
ARG version=22.1.1.*
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install --yes --no-install-recommends \
|
||||
apt-transport-https \
|
||||
ca-certificates \
|
||||
dirmngr \
|
||||
gnupg \
|
||||
&& mkdir -p /etc/apt/sources.list.d \
|
||||
&& apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 \
|
||||
&& echo $repository > /etc/apt/sources.list.d/clickhouse.list \
|
||||
&& apt-get update \
|
||||
&& env DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install --allow-unauthenticated --yes --no-install-recommends \
|
||||
clickhouse-client=$version \
|
||||
clickhouse-common-static=$version \
|
||||
locales \
|
||||
tzdata \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf \
|
||||
&& apt-get clean
|
||||
|
||||
RUN locale-gen en_US.UTF-8
|
||||
ENV LANG en_US.UTF-8
|
||||
ENV LANGUAGE en_US:en
|
||||
ENV LC_ALL en_US.UTF-8
|
||||
|
||||
ENTRYPOINT ["/usr/bin/clickhouse-client"]
|
@ -1,7 +0,0 @@
|
||||
# ClickHouse Client Docker Image
|
||||
|
||||
For more information see [ClickHouse Server Docker Image](https://hub.docker.com/r/clickhouse/clickhouse-server/).
|
||||
|
||||
## License
|
||||
|
||||
View [license information](https://github.com/ClickHouse/ClickHouse/blob/master/LICENSE) for the software contained in this image.
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
esac
|
||||
|
||||
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
||||
ARG VERSION="23.7.3.14"
|
||||
ARG VERSION="23.7.4.5"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -58,33 +58,6 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \
|
||||
rustup target add aarch64-apple-darwin && \
|
||||
rustup target add powerpc64le-unknown-linux-gnu
|
||||
|
||||
# Create vendor cache for cargo.
|
||||
#
|
||||
# Note, that the config.toml for the root is used, you will not be able to
|
||||
# install any other crates, except those which had been vendored (since if
|
||||
# there is "replace-with" for some source, then cargo will not look to other
|
||||
# remotes except this).
|
||||
#
|
||||
# Notes for the command itself:
|
||||
# - --chown is required to preserve the rights
|
||||
# - unstable-options for -C
|
||||
# - chmod is required to fix the permissions, since builds are running from a different user
|
||||
# - copy of the Cargo.lock is required for proper dependencies versions
|
||||
# - cargo vendor --sync is requried to overcome [1] bug.
|
||||
#
|
||||
# [1]: https://github.com/rust-lang/wg-cargo-std-aware/issues/23
|
||||
COPY --chown=root:root /rust /rust/packages
|
||||
RUN cargo -Z unstable-options -C /rust/packages vendor > $CARGO_HOME/config.toml && \
|
||||
cp "$(rustc --print=sysroot)"/lib/rustlib/src/rust/Cargo.lock "$(rustc --print=sysroot)"/lib/rustlib/src/rust/library/test/ && \
|
||||
cargo -Z unstable-options -C /rust/packages vendor --sync "$(rustc --print=sysroot)"/lib/rustlib/src/rust/library/test/Cargo.toml && \
|
||||
rm "$(rustc --print=sysroot)"/lib/rustlib/src/rust/library/test/Cargo.lock && \
|
||||
sed -i "s#\"vendor\"#\"/rust/vendor\"#" $CARGO_HOME/config.toml && \
|
||||
cat $CARGO_HOME/config.toml && \
|
||||
mv /rust/packages/vendor /rust/vendor && \
|
||||
chmod -R o=r+X /rust/vendor && \
|
||||
ls -R -l /rust/packages && \
|
||||
rm -r /rust/packages
|
||||
|
||||
# NOTE: Seems like gcc-11 is too new for ubuntu20 repository
|
||||
# A cross-linker for RISC-V 64 (we need it, because LLVM's LLD does not work):
|
||||
RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \
|
||||
|
@ -1 +0,0 @@
|
||||
../../../rust
|
@ -22,7 +22,7 @@ def check_image_exists_locally(image_name: str) -> bool:
|
||||
output = subprocess.check_output(
|
||||
f"docker images -q {image_name} 2> /dev/null", shell=True
|
||||
)
|
||||
return output != ""
|
||||
return output != b""
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
|
||||
@ -46,7 +46,7 @@ def build_image(image_name: str, filepath: Path) -> None:
|
||||
)
|
||||
|
||||
|
||||
def pre_build(repo_path: Path, env_variables: List[str]):
|
||||
def pre_build(repo_path: Path, env_variables: List[str]) -> None:
|
||||
if "WITH_PERFORMANCE=1" in env_variables:
|
||||
current_branch = subprocess.check_output(
|
||||
"git branch --show-current", shell=True, encoding="utf-8"
|
||||
@ -80,9 +80,12 @@ def run_docker_image_with_env(
|
||||
output_dir: Path,
|
||||
env_variables: List[str],
|
||||
ch_root: Path,
|
||||
cargo_cache_dir: Path,
|
||||
ccache_dir: Optional[Path],
|
||||
):
|
||||
) -> None:
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
cargo_cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
env_part = " -e ".join(env_variables)
|
||||
if env_part:
|
||||
env_part = " -e " + env_part
|
||||
@ -104,7 +107,7 @@ def run_docker_image_with_env(
|
||||
cmd = (
|
||||
f"docker run --network=host --user={user} --rm {ccache_mount}"
|
||||
f"--volume={output_dir}:/output --volume={ch_root}:/build {env_part} "
|
||||
f"{interactive} {image_name}"
|
||||
f"--volume={cargo_cache_dir}:/rust/cargo/registry {interactive} {image_name}"
|
||||
)
|
||||
|
||||
logging.info("Will build ClickHouse pkg with cmd: '%s'", cmd)
|
||||
@ -129,9 +132,10 @@ def parse_env_variables(
|
||||
version: str,
|
||||
official: bool,
|
||||
additional_pkgs: bool,
|
||||
with_profiler: bool,
|
||||
with_coverage: bool,
|
||||
with_binaries: str,
|
||||
):
|
||||
) -> List[str]:
|
||||
DARWIN_SUFFIX = "-darwin"
|
||||
DARWIN_ARM_SUFFIX = "-darwin-aarch64"
|
||||
ARM_SUFFIX = "-aarch64"
|
||||
@ -322,6 +326,9 @@ def parse_env_variables(
|
||||
# utils are not included into clickhouse-bundle, so build everything
|
||||
build_target = "all"
|
||||
|
||||
if with_profiler:
|
||||
cmake_flags.append("-DENABLE_BUILD_PROFILING=1")
|
||||
|
||||
if with_coverage:
|
||||
cmake_flags.append("-DWITH_COVERAGE=1")
|
||||
|
||||
@ -412,10 +419,18 @@ def parse_args() -> argparse.Namespace:
|
||||
action="store_true",
|
||||
help="if set, the build fails on errors writing cache to S3",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--cargo-cache-dir",
|
||||
default=Path(os.getenv("CARGO_HOME", "") or Path.home() / ".cargo")
|
||||
/ "registry",
|
||||
type=dir_name,
|
||||
help="a directory to preserve the rust cargo crates",
|
||||
)
|
||||
parser.add_argument("--force-build-image", action="store_true")
|
||||
parser.add_argument("--version")
|
||||
parser.add_argument("--official", action="store_true")
|
||||
parser.add_argument("--additional-pkgs", action="store_true")
|
||||
parser.add_argument("--with-profiler", action="store_true")
|
||||
parser.add_argument("--with-coverage", action="store_true")
|
||||
parser.add_argument(
|
||||
"--with-binaries", choices=("programs", "tests", ""), default=""
|
||||
@ -451,7 +466,7 @@ def parse_args() -> argparse.Namespace:
|
||||
return args
|
||||
|
||||
|
||||
def main():
|
||||
def main() -> None:
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
|
||||
args = parse_args()
|
||||
|
||||
@ -479,6 +494,7 @@ def main():
|
||||
args.version,
|
||||
args.official,
|
||||
args.additional_pkgs,
|
||||
args.with_profiler,
|
||||
args.with_coverage,
|
||||
args.with_binaries,
|
||||
)
|
||||
@ -490,6 +506,7 @@ def main():
|
||||
args.output_dir,
|
||||
env_prepared,
|
||||
ch_root,
|
||||
args.cargo_cache_dir,
|
||||
args.ccache_dir,
|
||||
)
|
||||
logging.info("Output placed into %s", args.output_dir)
|
||||
|
@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="23.7.3.14"
|
||||
ARG VERSION="23.7.4.5"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -23,7 +23,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="23.7.3.14"
|
||||
ARG VERSION="23.7.4.5"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
|
@ -19,13 +19,13 @@ RUN apt-get update \
|
||||
# and MEMORY_LIMIT_EXCEEDED exceptions in Functional tests (total memory limit in Functional tests is ~55.24 GiB).
|
||||
# TSAN will flush shadow memory when reaching this limit.
|
||||
# It may cause false-negatives, but it's better than OOM.
|
||||
RUN echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1'" >> /etc/environment
|
||||
RUN echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1'" >> /etc/environment
|
||||
RUN echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment
|
||||
RUN echo "MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'" >> /etc/environment
|
||||
RUN echo "LSAN_OPTIONS='suppressions=/usr/share/clickhouse-test/config/lsan_suppressions.txt'" >> /etc/environment
|
||||
# Sanitizer options for current shell (not current, but the one that will be spawned on "docker run")
|
||||
# (but w/o verbosity for TSAN, otherwise test.reference will not match)
|
||||
ENV TSAN_OPTIONS='halt_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1'
|
||||
ENV TSAN_OPTIONS='halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1'
|
||||
ENV UBSAN_OPTIONS='print_stacktrace=1'
|
||||
ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'
|
||||
|
||||
|
@ -130,7 +130,7 @@ COPY misc/ /misc/
|
||||
|
||||
# Same options as in test/base/Dockerfile
|
||||
# (in case you need to override them in tests)
|
||||
ENV TSAN_OPTIONS='halt_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1'
|
||||
ENV TSAN_OPTIONS='halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1'
|
||||
ENV UBSAN_OPTIONS='print_stacktrace=1'
|
||||
ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'
|
||||
|
||||
|
@ -665,9 +665,8 @@ create view partial_query_times as select * from
|
||||
-- Report for backward-incompatible ('partial') queries that we could only run on the new server (e.g.
|
||||
-- queries with new functions added in the tested PR).
|
||||
create table partial_queries_report engine File(TSV, 'report/partial-queries-report.tsv')
|
||||
settings output_format_decimal_trailing_zeros = 1
|
||||
as select toDecimal64(time_median, 3) time,
|
||||
toDecimal64(time_stddev / time_median, 3) relative_time_stddev,
|
||||
as select round(time_median, 3) time,
|
||||
round(time_stddev / time_median, 3) relative_time_stddev,
|
||||
test, query_index, query_display_name
|
||||
from partial_query_times
|
||||
join query_display_names using (test, query_index)
|
||||
@ -739,28 +738,26 @@ create table queries engine File(TSVWithNamesAndTypes, 'report/queries.tsv')
|
||||
;
|
||||
|
||||
create table changed_perf_report engine File(TSV, 'report/changed-perf.tsv')
|
||||
settings output_format_decimal_trailing_zeros = 1
|
||||
as with
|
||||
-- server_time is sometimes reported as zero (if it's less than 1 ms),
|
||||
-- so we have to work around this to not get an error about conversion
|
||||
-- of NaN to decimal.
|
||||
(left > right ? left / right : right / left) as times_change_float,
|
||||
isFinite(times_change_float) as times_change_finite,
|
||||
toDecimal64(times_change_finite ? times_change_float : 1., 3) as times_change_decimal,
|
||||
round(times_change_finite ? times_change_float : 1., 3) as times_change_decimal,
|
||||
times_change_finite
|
||||
? (left > right ? '-' : '+') || toString(times_change_decimal) || 'x'
|
||||
: '--' as times_change_str
|
||||
select
|
||||
toDecimal64(left, 3), toDecimal64(right, 3), times_change_str,
|
||||
toDecimal64(diff, 3), toDecimal64(stat_threshold, 3),
|
||||
round(left, 3), round(right, 3), times_change_str,
|
||||
round(diff, 3), round(stat_threshold, 3),
|
||||
changed_fail, test, query_index, query_display_name
|
||||
from queries where changed_show order by abs(diff) desc;
|
||||
|
||||
create table unstable_queries_report engine File(TSV, 'report/unstable-queries.tsv')
|
||||
settings output_format_decimal_trailing_zeros = 1
|
||||
as select
|
||||
toDecimal64(left, 3), toDecimal64(right, 3), toDecimal64(diff, 3),
|
||||
toDecimal64(stat_threshold, 3), unstable_fail, test, query_index, query_display_name
|
||||
round(left, 3), round(right, 3), round(diff, 3),
|
||||
round(stat_threshold, 3), unstable_fail, test, query_index, query_display_name
|
||||
from queries where unstable_show order by stat_threshold desc;
|
||||
|
||||
|
||||
@ -789,11 +786,10 @@ create view total_speedup as
|
||||
;
|
||||
|
||||
create table test_perf_changes_report engine File(TSV, 'report/test-perf-changes.tsv')
|
||||
settings output_format_decimal_trailing_zeros = 1
|
||||
as with
|
||||
(times_speedup >= 1
|
||||
? '-' || toString(toDecimal64(times_speedup, 3)) || 'x'
|
||||
: '+' || toString(toDecimal64(1 / times_speedup, 3)) || 'x')
|
||||
? '-' || toString(round(times_speedup, 3)) || 'x'
|
||||
: '+' || toString(round(1 / times_speedup, 3)) || 'x')
|
||||
as times_speedup_str
|
||||
select test, times_speedup_str, queries, bad, changed, unstable
|
||||
-- Not sure what's the precedence of UNION ALL vs WHERE & ORDER BY, hence all
|
||||
@ -817,11 +813,10 @@ create view total_client_time_per_query as select *
|
||||
'test text, query_index int, client float, server float');
|
||||
|
||||
create table slow_on_client_report engine File(TSV, 'report/slow-on-client.tsv')
|
||||
settings output_format_decimal_trailing_zeros = 1
|
||||
as select client, server, toDecimal64(client/server, 3) p,
|
||||
as select client, server, round(client/server, 3) p,
|
||||
test, query_display_name
|
||||
from total_client_time_per_query left join query_display_names using (test, query_index)
|
||||
where p > toDecimal64(1.02, 3) order by p desc;
|
||||
where p > round(1.02, 3) order by p desc;
|
||||
|
||||
create table wall_clock_time_per_test engine Memory as select *
|
||||
from file('wall-clock-times.tsv', TSV, 'test text, real float, user float, system float');
|
||||
@ -899,15 +894,14 @@ create view test_times_view_total as
|
||||
;
|
||||
|
||||
create table test_times_report engine File(TSV, 'report/test-times.tsv')
|
||||
settings output_format_decimal_trailing_zeros = 1
|
||||
as select
|
||||
test,
|
||||
toDecimal64(real, 3),
|
||||
toDecimal64(total_client_time, 3),
|
||||
round(real, 3),
|
||||
round(total_client_time, 3),
|
||||
queries,
|
||||
toDecimal64(query_max, 3),
|
||||
toDecimal64(avg_real_per_query, 3),
|
||||
toDecimal64(query_min, 3),
|
||||
round(query_max, 3),
|
||||
round(avg_real_per_query, 3),
|
||||
round(query_min, 3),
|
||||
runs
|
||||
from (
|
||||
select * from test_times_view
|
||||
@ -919,21 +913,20 @@ create table test_times_report engine File(TSV, 'report/test-times.tsv')
|
||||
|
||||
-- report for all queries page, only main metric
|
||||
create table all_tests_report engine File(TSV, 'report/all-queries.tsv')
|
||||
settings output_format_decimal_trailing_zeros = 1
|
||||
as with
|
||||
-- server_time is sometimes reported as zero (if it's less than 1 ms),
|
||||
-- so we have to work around this to not get an error about conversion
|
||||
-- of NaN to decimal.
|
||||
(left > right ? left / right : right / left) as times_change_float,
|
||||
isFinite(times_change_float) as times_change_finite,
|
||||
toDecimal64(times_change_finite ? times_change_float : 1., 3) as times_change_decimal,
|
||||
round(times_change_finite ? times_change_float : 1., 3) as times_change_decimal,
|
||||
times_change_finite
|
||||
? (left > right ? '-' : '+') || toString(times_change_decimal) || 'x'
|
||||
: '--' as times_change_str
|
||||
select changed_fail, unstable_fail,
|
||||
toDecimal64(left, 3), toDecimal64(right, 3), times_change_str,
|
||||
toDecimal64(isFinite(diff) ? diff : 0, 3),
|
||||
toDecimal64(isFinite(stat_threshold) ? stat_threshold : 0, 3),
|
||||
round(left, 3), round(right, 3), times_change_str,
|
||||
round(isFinite(diff) ? diff : 0, 3),
|
||||
round(isFinite(stat_threshold) ? stat_threshold : 0, 3),
|
||||
test, query_index, query_display_name
|
||||
from queries order by test, query_index;
|
||||
|
||||
@ -1044,27 +1037,6 @@ create table unstable_run_traces engine File(TSVWithNamesAndTypes,
|
||||
order by count() desc
|
||||
;
|
||||
|
||||
create table metric_devation engine File(TSVWithNamesAndTypes,
|
||||
'report/metric-deviation.$version.tsv')
|
||||
settings output_format_decimal_trailing_zeros = 1
|
||||
-- first goes the key used to split the file with grep
|
||||
as select test, query_index, query_display_name,
|
||||
toDecimal64(d, 3) d, q, metric
|
||||
from (
|
||||
select
|
||||
test, query_index,
|
||||
(q[3] - q[1])/q[2] d,
|
||||
quantilesExact(0, 0.5, 1)(value) q, metric
|
||||
from (select * from unstable_run_metrics
|
||||
union all select * from unstable_run_traces
|
||||
union all select * from unstable_run_metrics_2) mm
|
||||
group by test, query_index, metric
|
||||
having isFinite(d) and d > 0.5 and q[3] > 5
|
||||
) metrics
|
||||
left join query_display_names using (test, query_index)
|
||||
order by test, query_index, d desc
|
||||
;
|
||||
|
||||
create table stacks engine File(TSV, 'report/stacks.$version.tsv') as
|
||||
select
|
||||
-- first goes the key used to split the file with grep
|
||||
@ -1173,9 +1145,8 @@ create table metrics engine File(TSV, 'metrics/metrics.tsv') as
|
||||
|
||||
-- Show metrics that have changed
|
||||
create table changes engine File(TSV, 'metrics/changes.tsv')
|
||||
settings output_format_decimal_trailing_zeros = 1
|
||||
as select metric, left, right,
|
||||
toDecimal64(diff, 3), toDecimal64(times_diff, 3)
|
||||
round(diff, 3), round(times_diff, 3)
|
||||
from (
|
||||
select metric, median(left) as left, median(right) as right,
|
||||
(right - left) / left diff,
|
||||
@ -1226,7 +1197,6 @@ create table ci_checks engine File(TSVWithNamesAndTypes, 'ci-checks.tsv')
|
||||
'$SHA_TO_TEST' :: LowCardinality(String) AS commit_sha,
|
||||
'${CLICKHOUSE_PERFORMANCE_COMPARISON_CHECK_NAME:-Performance}' :: LowCardinality(String) AS check_name,
|
||||
'$(sed -n 's/.*<!--status: \(.*\)-->/\1/p' report.html)' :: LowCardinality(String) AS check_status,
|
||||
-- TODO toDateTime() can't parse output of 'date', so no time for now.
|
||||
(($(date +%s) - $CHPC_CHECK_START_TIMESTAMP) * 1000) :: UInt64 AS check_duration_ms,
|
||||
fromUnixTimestamp($CHPC_CHECK_START_TIMESTAMP) check_start_time,
|
||||
test_name :: LowCardinality(String) AS test_name ,
|
||||
|
@ -3,7 +3,7 @@
|
||||
<default>
|
||||
<allow_introspection_functions>1</allow_introspection_functions>
|
||||
<log_queries>1</log_queries>
|
||||
<metrics_perf_events_enabled>1</metrics_perf_events_enabled>
|
||||
<metrics_perf_events_enabled>0</metrics_perf_events_enabled>
|
||||
<!--
|
||||
If a test takes too long by mistake, the entire test task can
|
||||
time out and the author won't get a proper message. Put some cap
|
||||
|
@ -369,6 +369,7 @@ for query_index in queries_to_run:
|
||||
"max_execution_time": args.prewarm_max_query_seconds,
|
||||
"query_profiler_real_time_period_ns": 10000000,
|
||||
"query_profiler_cpu_time_period_ns": 10000000,
|
||||
"metrics_perf_events_enabled": 1,
|
||||
"memory_profiler_step": "4Mi",
|
||||
},
|
||||
)
|
||||
@ -503,6 +504,7 @@ for query_index in queries_to_run:
|
||||
settings={
|
||||
"query_profiler_real_time_period_ns": 10000000,
|
||||
"query_profiler_cpu_time_period_ns": 10000000,
|
||||
"metrics_perf_events_enabled": 1,
|
||||
},
|
||||
)
|
||||
print(
|
||||
|
17
docs/changelogs/v23.7.4.5-stable.md
Normal file
17
docs/changelogs/v23.7.4.5-stable.md
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.7.4.5-stable (bd2fcd44553) FIXME as compared to v23.7.3.14-stable (bd9a510550c)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Disable the new parquet encoder [#53130](https://github.com/ClickHouse/ClickHouse/pull/53130) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Revert changes in `ZstdDeflatingAppendableWriteBuffer` [#53111](https://github.com/ClickHouse/ClickHouse/pull/53111) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
@ -42,20 +42,20 @@ sudo apt-get install git cmake ccache python3 ninja-build nasm yasm gawk lsb-rel
|
||||
|
||||
### Install and Use the Clang compiler
|
||||
|
||||
On Ubuntu/Debian you can use LLVM's automatic installation script, see [here](https://apt.llvm.org/).
|
||||
On Ubuntu/Debian, you can use LLVM's automatic installation script; see [here](https://apt.llvm.org/).
|
||||
|
||||
``` bash
|
||||
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
```
|
||||
|
||||
Note: in case of troubles, you can also use this:
|
||||
Note: in case of trouble, you can also use this:
|
||||
|
||||
```bash
|
||||
sudo apt-get install software-properties-common
|
||||
sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
|
||||
```
|
||||
|
||||
For other Linux distribution - check the availability of LLVM's [prebuild packages](https://releases.llvm.org/download.html).
|
||||
For other Linux distributions - check the availability of LLVM's [prebuild packages](https://releases.llvm.org/download.html).
|
||||
|
||||
As of April 2023, clang-16 or higher will work.
|
||||
GCC as a compiler is not supported.
|
||||
@ -92,8 +92,12 @@ cmake -S . -B build
|
||||
cmake --build build # or: `cd build; ninja`
|
||||
```
|
||||
|
||||
:::tip
|
||||
In case `cmake` isn't able to detect the number of available logical cores, the build will be done by one thread. To overcome this, you can tweak `cmake` to use a specific number of threads with `-j` flag, for example, `cmake --build build -j 16`. Alternatively, you can generate build files with a specific number of jobs in advance to avoid always setting the flag: `cmake -DPARALLEL_COMPILE_JOBS=16 -S . -B build`, where `16` is the desired number of threads.
|
||||
:::
|
||||
|
||||
To create an executable, run `cmake --build build --target clickhouse` (or: `cd build; ninja clickhouse`).
|
||||
This will create executable `build/programs/clickhouse` which can be used with `client` or `server` arguments.
|
||||
This will create an executable `build/programs/clickhouse`, which can be used with `client` or `server` arguments.
|
||||
|
||||
## Building on Any Linux {#how-to-build-clickhouse-on-any-linux}
|
||||
|
||||
@ -107,7 +111,7 @@ The build requires the following components:
|
||||
- Yasm
|
||||
- Gawk
|
||||
|
||||
If all the components are installed, you may build in the same way as the steps above.
|
||||
If all the components are installed, you may build it in the same way as the steps above.
|
||||
|
||||
Example for OpenSUSE Tumbleweed:
|
||||
|
||||
@ -123,7 +127,7 @@ Example for Fedora Rawhide:
|
||||
|
||||
``` bash
|
||||
sudo yum update
|
||||
sudo yum --nogpg install git cmake make clang python3 ccache nasm yasm gawk
|
||||
sudo yum --nogpg install git cmake make clang python3 ccache lld nasm yasm gawk
|
||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||
mkdir build
|
||||
cmake -S . -B build
|
||||
|
@ -21,7 +21,7 @@ CREATE TABLE azure_blob_storage_table (name String, value UInt32)
|
||||
|
||||
- `connection_string|storage_account_url` — connection_string includes account name & key ([Create connection string](https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json&bc=%2Fazure%2Fstorage%2Fblobs%2Fbreadcrumb%2Ftoc.json#configure-a-connection-string-for-an-azure-storage-account)) or you could also provide the storage account url here and account name & account key as separate parameters (see parameters account_name & account_key)
|
||||
- `container_name` - Container name
|
||||
- `blobpath` - file path. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings.
|
||||
- `blobpath` - file path. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings.
|
||||
- `account_name` - if storage_account_url is used, then account name can be specified here
|
||||
- `account_key` - if storage_account_url is used, then account key can be specified here
|
||||
- `format` — The [format](/docs/en/interfaces/formats.md) of the file.
|
||||
|
@ -37,7 +37,7 @@ CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||
|
||||
### Engine parameters
|
||||
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
|
||||
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
||||
- `format` — The [format](../../../interfaces/formats.md#formats) of the file.
|
||||
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3).
|
||||
@ -164,6 +164,7 @@ For more information about virtual columns see [here](../../../engines/table-eng
|
||||
`path` argument can specify multiple files using bash-like wildcards. For being processed file should exist and match to the whole path pattern. Listing of files is determined during `SELECT` (not at `CREATE` moment).
|
||||
|
||||
- `*` — Substitutes any number of any characters except `/` including empty string.
|
||||
- `**` — Substitutes any number of any character include `/` including empty string.
|
||||
- `?` — Substitutes any single character.
|
||||
- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`.
|
||||
- `{N..M}` — Substitutes any number in range from N to M including both borders. N and M can have leading zeroes e.g. `000..078`.
|
||||
|
@ -27,7 +27,7 @@ CREATE TABLE s3_queue_engine_table (name String, value UInt32)
|
||||
|
||||
**Engine parameters**
|
||||
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
|
||||
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
||||
- `format` — The [format](../../../interfaces/formats.md#formats) of the file.
|
||||
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3).
|
||||
@ -213,6 +213,7 @@ For more information about virtual columns see [here](../../../engines/table-eng
|
||||
`path` argument can specify multiple files using bash-like wildcards. For being processed file should exist and match to the whole path pattern. Listing of files is determined during `SELECT` (not at `CREATE` moment).
|
||||
|
||||
- `*` — Substitutes any number of any characters except `/` including empty string.
|
||||
- `**` — Substitutes any number of any characters include `/` including empty string.
|
||||
- `?` — Substitutes any single character.
|
||||
- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`.
|
||||
- `{N..M}` — Substitutes any number in range from N to M including both borders. N and M can have leading zeroes e.g. `000..078`.
|
||||
|
@ -84,6 +84,7 @@ The BACKUP and RESTORE statements take a list of DATABASE and TABLE names, a des
|
||||
- `password` for the file on disk
|
||||
- `base_backup`: the destination of the previous backup of this source. For example, `Disk('backups', '1.zip')`
|
||||
- `structure_only`: if enabled, allows to only backup or restore the CREATE statements without the data of tables
|
||||
- `storage_policy`: storage policy for the tables being restored. See [Using Multiple Block Devices for Data Storage](../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes). This setting is only applicable to the `RESTORE` command. The specified storage policy applies only to tables with an engine from the `MergeTree` family.
|
||||
- `s3_storage_class`: the storage class used for S3 backup. For example, `STANDARD`
|
||||
|
||||
### Usage examples
|
||||
|
@ -7,6 +7,10 @@ pagination_next: en/operations/settings/settings
|
||||
|
||||
# Settings Overview
|
||||
|
||||
:::note
|
||||
XML-based Settings Profiles and [configuration files](https://clickhouse.com/docs/en/operations/configuration-files) are currently not supported for ClickHouse Cloud. To specify settings for your ClickHouse Cloud service, you must use [SQL-driven Settings Profiles](https://clickhouse.com/docs/en/operations/access-rights#settings-profiles-management).
|
||||
:::
|
||||
|
||||
There are two main groups of ClickHouse settings:
|
||||
|
||||
- Global server settings
|
||||
|
@ -298,7 +298,7 @@ Default value: `THROW`.
|
||||
- [JOIN clause](../../sql-reference/statements/select/join.md#select-join)
|
||||
- [Join table engine](../../engines/table-engines/special/join.md)
|
||||
|
||||
## max_partitions_per_insert_block {#max-partitions-per-insert-block}
|
||||
## max_partitions_per_insert_block {#settings-max_partitions_per_insert_block}
|
||||
|
||||
Limits the maximum number of partitions in a single inserted block.
|
||||
|
||||
@ -309,9 +309,18 @@ Default value: 100.
|
||||
|
||||
**Details**
|
||||
|
||||
When inserting data, ClickHouse calculates the number of partitions in the inserted block. If the number of partitions is more than `max_partitions_per_insert_block`, ClickHouse throws an exception with the following text:
|
||||
When inserting data, ClickHouse calculates the number of partitions in the inserted block. If the number of partitions is more than `max_partitions_per_insert_block`, ClickHouse either logs a warning or throws an exception based on `throw_on_max_partitions_per_insert_block`. Exceptions have the following text:
|
||||
|
||||
> “Too many partitions for single INSERT block (more than” + toString(max_parts) + “). The limit is controlled by ‘max_partitions_per_insert_block’ setting. A large number of partitions is a common misconception. It will lead to severe negative performance impact, including slow server startup, slow INSERT queries and slow SELECT queries. Recommended total number of partitions for a table is under 1000..10000. Please note, that partitioning is not intended to speed up SELECT queries (ORDER BY key is sufficient to make range queries fast). Partitions are intended for data manipulation (DROP PARTITION, etc).”
|
||||
> “Too many partitions for a single INSERT block (`partitions_count` partitions, limit is ” + toString(max_partitions) + “). The limit is controlled by the ‘max_partitions_per_insert_block’ setting. A large number of partitions is a common misconception. It will lead to severe negative performance impact, including slow server startup, slow INSERT queries and slow SELECT queries. Recommended total number of partitions for a table is under 1000..10000. Please note, that partitioning is not intended to speed up SELECT queries (ORDER BY key is sufficient to make range queries fast). Partitions are intended for data manipulation (DROP PARTITION, etc).”
|
||||
|
||||
## throw_on_max_partitions_per_insert_block {#settings-throw_on_max_partition_per_insert_block}
|
||||
|
||||
Allows you to control behaviour when `max_partitions_per_insert_block` is reached.
|
||||
|
||||
- `true` - When an insert block reaches `max_partitions_per_insert_block`, an exception is raised.
|
||||
- `false` - Logs a warning when `max_partitions_per_insert_block` is reached.
|
||||
|
||||
Default value: `true`
|
||||
|
||||
## max_temporary_data_on_disk_size_for_user {#settings_max_temporary_data_on_disk_size_for_user}
|
||||
|
||||
|
@ -11,7 +11,7 @@ A client application to interact with clickhouse-keeper by its native protocol.
|
||||
|
||||
- `-q QUERY`, `--query=QUERY` — Query to execute. If this parameter is not passed, `clickhouse-keeper-client` will start in interactive mode.
|
||||
- `-h HOST`, `--host=HOST` — Server host. Default value: `localhost`.
|
||||
- `-p N`, `--port=N` — Server port. Default value: 2181
|
||||
- `-p N`, `--port=N` — Server port. Default value: 9181
|
||||
- `--connection-timeout=TIMEOUT` — Set connection timeout in seconds. Default value: 10s.
|
||||
- `--session-timeout=TIMEOUT` — Set session timeout in seconds. Default value: 10s.
|
||||
- `--operation-timeout=TIMEOUT` — Set operation timeout in seconds. Default value: 10s.
|
||||
@ -21,8 +21,8 @@ A client application to interact with clickhouse-keeper by its native protocol.
|
||||
## Example {#clickhouse-keeper-client-example}
|
||||
|
||||
```bash
|
||||
./clickhouse-keeper-client -h localhost:2181 --connection-timeout 30 --session-timeout 30 --operation-timeout 30
|
||||
Connected to ZooKeeper at [::1]:2181 with session_id 137
|
||||
./clickhouse-keeper-client -h localhost:9181 --connection-timeout 30 --session-timeout 30 --operation-timeout 30
|
||||
Connected to ZooKeeper at [::1]:9181 with session_id 137
|
||||
/ :) ls
|
||||
keeper foo bar
|
||||
/ :) cd keeper
|
||||
@ -45,9 +45,14 @@ keeper foo bar
|
||||
- `ls [path]` -- Lists the nodes for the given path (default: cwd)
|
||||
- `cd [path]` -- Change the working path (default `.`)
|
||||
- `set <path> <value> [version]` -- Updates the node's value. Only update if version matches (default: -1)
|
||||
- `create <path> <value>` -- Creates new node
|
||||
- `create <path> <value> [mode]` -- Creates new node with the set value
|
||||
- `touch <path>` -- Creates new node with an empty string as value. Doesn't throw an exception if the node already exists
|
||||
- `get <path>` -- Returns the node's value
|
||||
- `remove <path>` -- Remove the node
|
||||
- `rmr <path>` -- Recursively deletes path. Confirmation required
|
||||
- `flwc <command>` -- Executes four-letter-word command
|
||||
- `help` -- Prints this message
|
||||
- `get_stat [path]` -- Returns the node's stat (default `.`)
|
||||
- `find_super_nodes <threshold> [path]` -- Finds nodes with number of children larger than some threshold for the given path (default `.`)
|
||||
- `delete_stale_backups` -- Deletes ClickHouse nodes used for backups that are now inactive
|
||||
- `find_big_family [path] [n]` -- Returns the top n nodes with the biggest family in the subtree (default path = `.` and n = 10)
|
||||
|
@ -6,42 +6,42 @@ sidebar_label: UUID
|
||||
|
||||
# UUID
|
||||
|
||||
A universally unique identifier (UUID) is a 16-byte number used to identify records. For detailed information about the UUID, see [Wikipedia](https://en.wikipedia.org/wiki/Universally_unique_identifier).
|
||||
A Universally Unique Identifier (UUID) is a 16-byte value used to identify records. For detailed information about UUIDs, see [Wikipedia](https://en.wikipedia.org/wiki/Universally_unique_identifier).
|
||||
|
||||
The example of UUID type value is represented below:
|
||||
While different UUID variants exist (see [here](https://datatracker.ietf.org/doc/html/draft-ietf-uuidrev-rfc4122bis)), ClickHouse does not validate that inserted UUIDs conform to a particular variant. UUIDs are internally treated as a sequence of 16 random bytes with [8-4-4-4-12 representation](https://en.wikipedia.org/wiki/Universally_unique_identifier#Textual_representation) at SQL level.
|
||||
|
||||
Example UUID value:
|
||||
|
||||
``` text
|
||||
61f0c404-5cb3-11e7-907b-a6006ad3dba0
|
||||
```
|
||||
|
||||
If you do not specify the UUID column value when inserting a new record, the UUID value is filled with zero:
|
||||
The default UUID is all-zero. It is used, for example, when a new record is inserted but no value for a UUID column is specified:
|
||||
|
||||
``` text
|
||||
00000000-0000-0000-0000-000000000000
|
||||
```
|
||||
|
||||
## How to Generate
|
||||
## Generating UUIDs
|
||||
|
||||
To generate the UUID value, ClickHouse provides the [generateUUIDv4](../../sql-reference/functions/uuid-functions.md) function.
|
||||
ClickHouse provides the [generateUUIDv4](../../sql-reference/functions/uuid-functions.md) function to generate random UUID version 4 values.
|
||||
|
||||
## Usage Example
|
||||
|
||||
**Example 1**
|
||||
|
||||
This example demonstrates creating a table with the UUID type column and inserting a value into the table.
|
||||
This example demonstrates the creation of a table with a UUID column and the insertion of a value into the table.
|
||||
|
||||
``` sql
|
||||
CREATE TABLE t_uuid (x UUID, y String) ENGINE=TinyLog
|
||||
```
|
||||
|
||||
``` sql
|
||||
INSERT INTO t_uuid SELECT generateUUIDv4(), 'Example 1'
|
||||
```
|
||||
|
||||
``` sql
|
||||
SELECT * FROM t_uuid
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌────────────────────────────────────x─┬─y─────────┐
|
||||
│ 417ddc5d-e556-4d27-95dd-a34d84e46a50 │ Example 1 │
|
||||
@ -50,13 +50,11 @@ SELECT * FROM t_uuid
|
||||
|
||||
**Example 2**
|
||||
|
||||
In this example, the UUID column value is not specified when inserting a new record.
|
||||
In this example, no UUID column value is specified when the record is inserted, i.e. the default UUID value is inserted:
|
||||
|
||||
``` sql
|
||||
INSERT INTO t_uuid (y) VALUES ('Example 2')
|
||||
```
|
||||
|
||||
``` sql
|
||||
SELECT * FROM t_uuid
|
||||
```
|
||||
|
||||
|
@ -18,7 +18,7 @@ file(path[, default])
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `path` — The path of the file relative to [user_files_path](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Supports wildcards `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` are numbers and `'abc', 'def'` are strings.
|
||||
- `path` — The path of the file relative to [user_files_path](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Supports wildcards `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` are numbers and `'abc', 'def'` are strings.
|
||||
- `default` — The value returned if the file does not exist or cannot be accessed. Supported data types: [String](../../sql-reference/data-types/string.md) and [NULL](../../sql-reference/syntax.md#null-literal).
|
||||
|
||||
**Example**
|
||||
|
@ -729,6 +729,30 @@ Returns whether string `str` ends with `suffix`.
|
||||
endsWith(str, suffix)
|
||||
```
|
||||
|
||||
## endsWithUTF8
|
||||
|
||||
Returns whether string `str` ends with `suffix`, the difference between `endsWithUTF8` and `endsWith` is that `endsWithUTF8` match `str` and `suffix` by UTF-8 characters.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
endsWithUTF8(str, suffix)
|
||||
```
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT endsWithUTF8('中国', '\xbd'), endsWith('中国', '\xbd')
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```result
|
||||
┌─endsWithUTF8('中国', '½')─┬─endsWith('中国', '½')─┐
|
||||
│ 0 │ 1 │
|
||||
└──────────────────────────┴──────────────────────┘
|
||||
```
|
||||
|
||||
## startsWith
|
||||
|
||||
Returns whether string `str` starts with `prefix`.
|
||||
@ -745,6 +769,25 @@ startsWith(str, prefix)
|
||||
SELECT startsWith('Spider-Man', 'Spi');
|
||||
```
|
||||
|
||||
## startsWithUTF8
|
||||
|
||||
Returns whether string `str` starts with `prefix`, the difference between `startsWithUTF8` and `startsWith` is that `startsWithUTF8` match `str` and `suffix` by UTF-8 characters.
|
||||
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT startsWithUTF8('中国', '\xe4'), startsWith('中国', '\xe4')
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```result
|
||||
┌─startsWithUTF8('中国', '⥩─┬─startsWith('中国', '⥩─┐
|
||||
│ 0 │ 1 │
|
||||
└────────────────────────────┴────────────────────────┘
|
||||
```
|
||||
|
||||
## trim
|
||||
|
||||
Removes the specified characters from the start or end of a string. If not specified otherwise, the function removes whitespace (ASCII-character 32).
|
||||
|
@ -11,6 +11,7 @@ Syntax:
|
||||
|
||||
``` sql
|
||||
CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name]
|
||||
[IN access_storage_type]
|
||||
[KEYED BY {user_name | ip_address | client_key | client_key,user_name | client_key,ip_address} | NOT KEYED]
|
||||
[FOR [RANDOMIZED] INTERVAL number {second | minute | hour | day | week | month | quarter | year}
|
||||
{MAX { {queries | query_selects | query_inserts | errors | result_rows | result_bytes | read_rows | read_bytes | execution_time} = number } [,...] |
|
||||
|
@ -11,6 +11,7 @@ Syntax:
|
||||
|
||||
``` sql
|
||||
CREATE ROLE [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1] [, name2 [ON CLUSTER cluster_name2] ...]
|
||||
[IN access_storage_type]
|
||||
[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [CONST|READONLY|WRITABLE|CHANGEABLE_IN_READONLY] | PROFILE 'profile_name'] [,...]
|
||||
```
|
||||
|
||||
|
@ -16,6 +16,7 @@ Syntax:
|
||||
``` sql
|
||||
CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluster_name1] ON [db1.]table1|db1.*
|
||||
[, policy_name2 [ON CLUSTER cluster_name2] ON [db2.]table2|db2.* ...]
|
||||
[IN access_storage_type]
|
||||
[FOR SELECT] USING condition
|
||||
[AS {PERMISSIVE | RESTRICTIVE}]
|
||||
[TO {role1 [, role2 ...] | ALL | ALL EXCEPT role1 [, role2 ...]}]
|
||||
|
@ -12,6 +12,7 @@ Syntax:
|
||||
``` sql
|
||||
CREATE SETTINGS PROFILE [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1]
|
||||
[, name2 [ON CLUSTER cluster_name2] ...]
|
||||
[IN access_storage_type]
|
||||
[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [CONST|READONLY|WRITABLE|CHANGEABLE_IN_READONLY] | INHERIT 'profile_name'] [,...]
|
||||
```
|
||||
|
||||
|
@ -14,6 +14,7 @@ CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1]
|
||||
[, name2 [ON CLUSTER cluster_name2] ...]
|
||||
[NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']} | {WITH ssl_certificate CN 'common_name'}]
|
||||
[HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
|
||||
[IN access_storage_type]
|
||||
[DEFAULT ROLE role [,...]]
|
||||
[DEFAULT DATABASE database | NONE]
|
||||
[GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]]
|
||||
|
@ -49,7 +49,7 @@ Deletes a user.
|
||||
Syntax:
|
||||
|
||||
``` sql
|
||||
DROP USER [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
|
||||
DROP USER [IF EXISTS] name [,...] [ON CLUSTER cluster_name] [FROM access_storage_type]
|
||||
```
|
||||
|
||||
## DROP ROLE
|
||||
@ -59,7 +59,7 @@ Deletes a role. The deleted role is revoked from all the entities where it was a
|
||||
Syntax:
|
||||
|
||||
``` sql
|
||||
DROP ROLE [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
|
||||
DROP ROLE [IF EXISTS] name [,...] [ON CLUSTER cluster_name] [FROM access_storage_type]
|
||||
```
|
||||
|
||||
## DROP ROW POLICY
|
||||
@ -69,7 +69,7 @@ Deletes a row policy. Deleted row policy is revoked from all the entities where
|
||||
Syntax:
|
||||
|
||||
``` sql
|
||||
DROP [ROW] POLICY [IF EXISTS] name [,...] ON [database.]table [,...] [ON CLUSTER cluster_name]
|
||||
DROP [ROW] POLICY [IF EXISTS] name [,...] ON [database.]table [,...] [ON CLUSTER cluster_name] [FROM access_storage_type]
|
||||
```
|
||||
|
||||
## DROP QUOTA
|
||||
@ -79,7 +79,7 @@ Deletes a quota. The deleted quota is revoked from all the entities where it was
|
||||
Syntax:
|
||||
|
||||
``` sql
|
||||
DROP QUOTA [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
|
||||
DROP QUOTA [IF EXISTS] name [,...] [ON CLUSTER cluster_name] [FROM access_storage_type]
|
||||
```
|
||||
|
||||
## DROP SETTINGS PROFILE
|
||||
@ -89,7 +89,7 @@ Deletes a settings profile. The deleted settings profile is revoked from all the
|
||||
Syntax:
|
||||
|
||||
``` sql
|
||||
DROP [SETTINGS] PROFILE [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
|
||||
DROP [SETTINGS] PROFILE [IF EXISTS] name [,...] [ON CLUSTER cluster_name] [FROM access_storage_type]
|
||||
```
|
||||
|
||||
## DROP VIEW
|
||||
|
32
docs/en/sql-reference/statements/move.md
Normal file
32
docs/en/sql-reference/statements/move.md
Normal file
@ -0,0 +1,32 @@
|
||||
---
|
||||
slug: /en/sql-reference/statements/move
|
||||
sidebar_position: 54
|
||||
sidebar_label: MOVE
|
||||
---
|
||||
|
||||
# MOVE access entity statement
|
||||
|
||||
This statement allows to move an access entity from one access storage to another.
|
||||
|
||||
Syntax:
|
||||
|
||||
```sql
|
||||
MOVE {USER, ROLE, QUOTA, SETTINGS PROFILE, ROW POLICY} name1 [, name2, ...] TO access_storage_type
|
||||
```
|
||||
|
||||
Currently, there are five access storages in ClickHouse:
|
||||
- `local_directory`
|
||||
- `memory`
|
||||
- `replicated`
|
||||
- `users_xml` (ro)
|
||||
- `ldap` (ro)
|
||||
|
||||
Examples:
|
||||
|
||||
```sql
|
||||
MOVE USER test TO local_directory
|
||||
```
|
||||
|
||||
```sql
|
||||
MOVE ROLE test TO memory
|
||||
```
|
@ -19,7 +19,7 @@ azureBlobStorage(- connection_string|storage_account_url, container_name, blobpa
|
||||
|
||||
- `connection_string|storage_account_url` — connection_string includes account name & key ([Create connection string](https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json&bc=%2Fazure%2Fstorage%2Fblobs%2Fbreadcrumb%2Ftoc.json#configure-a-connection-string-for-an-azure-storage-account)) or you could also provide the storage account url here and account name & account key as separate parameters (see parameters account_name & account_key)
|
||||
- `container_name` - Container name
|
||||
- `blobpath` - file path. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings.
|
||||
- `blobpath` - file path. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings.
|
||||
- `account_name` - if storage_account_url is used, then account name can be specified here
|
||||
- `account_key` - if storage_account_url is used, then account key can be specified here
|
||||
- `format` — The [format](../../interfaces/formats.md#formats) of the file.
|
||||
|
@ -19,7 +19,7 @@ azureBlobStorageCluster(cluster_name, connection_string|storage_account_url, con
|
||||
- `cluster_name` — Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers.
|
||||
- `connection_string|storage_account_url` — connection_string includes account name & key ([Create connection string](https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json&bc=%2Fazure%2Fstorage%2Fblobs%2Fbreadcrumb%2Ftoc.json#configure-a-connection-string-for-an-azure-storage-account)) or you could also provide the storage account url here and account name & account key as separate parameters (see parameters account_name & account_key)
|
||||
- `container_name` - Container name
|
||||
- `blobpath` - file path. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings.
|
||||
- `blobpath` - file path. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings.
|
||||
- `account_name` - if storage_account_url is used, then account name can be specified here
|
||||
- `account_key` - if storage_account_url is used, then account key can be specified here
|
||||
- `format` — The [format](../../interfaces/formats.md#formats) of the file.
|
||||
|
@ -22,7 +22,7 @@ The GCS Table Function integrates with Google Cloud Storage by using the GCS XML
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings.
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings.
|
||||
|
||||
:::note GCS
|
||||
The GCS path is in this format as the endpoint for the Google XML API is different than the JSON API:
|
||||
|
@ -17,7 +17,7 @@ hdfsCluster(cluster_name, URI, format, structure)
|
||||
**Arguments**
|
||||
|
||||
- `cluster_name` — Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers.
|
||||
- `URI` — URI to a file or a bunch of files. Supports following wildcards in readonly mode: `*`, `?`, `{'abc','def'}` and `{N..M}` where `N`, `M` — numbers, `abc`, `def` — strings. For more information see [Wildcards In Path](../../engines/table-engines/integrations/s3.md#wildcards-in-path).
|
||||
- `URI` — URI to a file or a bunch of files. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{'abc','def'}` and `{N..M}` where `N`, `M` — numbers, `abc`, `def` — strings. For more information see [Wildcards In Path](../../engines/table-engines/integrations/s3.md#wildcards-in-path).
|
||||
- `format` — The [format](../../interfaces/formats.md#formats) of the file.
|
||||
- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`.
|
||||
|
||||
|
@ -23,7 +23,7 @@ For GCS, substitute your HMAC key and HMAC secret where you see `aws_access_key_
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [here](../../engines/table-engines/integrations/s3.md#wildcards-in-path).
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [here](../../engines/table-engines/integrations/s3.md#wildcards-in-path).
|
||||
|
||||
:::note GCS
|
||||
The GCS path is in this format as the endpoint for the Google XML API is different than the JSON API:
|
||||
|
@ -16,7 +16,7 @@ s3Cluster(cluster_name, source, [,access_key_id, secret_access_key] [,format] [,
|
||||
**Arguments**
|
||||
|
||||
- `cluster_name` — Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers.
|
||||
- `source` — URL to a file or a bunch of files. Supports following wildcards in readonly mode: `*`, `?`, `{'abc','def'}` and `{N..M}` where `N`, `M` — numbers, `abc`, `def` — strings. For more information see [Wildcards In Path](../../engines/table-engines/integrations/s3.md#wildcards-in-path).
|
||||
- `source` — URL to a file or a bunch of files. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{'abc','def'}` and `{N..M}` where `N`, `M` — numbers, `abc`, `def` — strings. For more information see [Wildcards In Path](../../engines/table-engines/integrations/s3.md#wildcards-in-path).
|
||||
- `access_key_id` and `secret_access_key` — Keys that specify credentials to use with given endpoint. Optional.
|
||||
- `format` — The [format](../../interfaces/formats.md#formats) of the file.
|
||||
- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`.
|
||||
|
@ -1,32 +0,0 @@
|
||||
---
|
||||
slug: /ru/getting-started/example-datasets/wikistat
|
||||
sidebar_position: 17
|
||||
sidebar_label: WikiStat
|
||||
---
|
||||
|
||||
# WikiStat {#wikistat}
|
||||
|
||||
См: http://dumps.wikimedia.org/other/pagecounts-raw/
|
||||
|
||||
Создание таблицы:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE wikistat
|
||||
(
|
||||
date Date,
|
||||
time DateTime,
|
||||
project String,
|
||||
subproject String,
|
||||
path String,
|
||||
hits UInt64,
|
||||
size UInt64
|
||||
) ENGINE = MergeTree(date, (path, time), 8192);
|
||||
```
|
||||
|
||||
Загрузка данных:
|
||||
|
||||
``` bash
|
||||
$ for i in {2007..2016}; do for j in {01..12}; do echo $i-$j >&2; curl -sSL "http://dumps.wikimedia.org/other/pagecounts-raw/$i/$i-$j/" | grep -oE 'pagecounts-[0-9]+-[0-9]+\.gz'; done; done | sort | uniq | tee links.txt
|
||||
$ cat links.txt | while read link; do wget http://dumps.wikimedia.org/other/pagecounts-raw/$(echo $link | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})[0-9]{2}-[0-9]+\.gz/\1/')/$(echo $link | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})[0-9]{2}-[0-9]+\.gz/\1-\2/')/$link; done
|
||||
$ ls -1 /opt/wikistat/ | grep gz | while read i; do echo $i; gzip -cd /opt/wikistat/$i | ./wikistat-loader --time="$(echo -n $i | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})([0-9]{2})-([0-9]{2})([0-9]{2})([0-9]{2})\.gz/\1-\2-\3 \4-00-00/')" | clickhouse-client --query="INSERT INTO wikistat FORMAT TabSeparated"; done
|
||||
```
|
1
docs/ru/getting-started/example-datasets/wikistat.md
Symbolic link
1
docs/ru/getting-started/example-datasets/wikistat.md
Symbolic link
@ -0,0 +1 @@
|
||||
../../../en/getting-started/example-datasets/wikistat.md
|
@ -311,9 +311,18 @@ FORMAT Null;
|
||||
|
||||
**Подробности**
|
||||
|
||||
При вставке данных, ClickHouse вычисляет количество партиций во вставленном блоке. Если число партиций больше, чем `max_partitions_per_insert_block`, ClickHouse генерирует исключение со следующим текстом:
|
||||
При вставке данных ClickHouse проверяет количество партиций во вставляемом блоке. Если количество разделов превышает число `max_partitions_per_insert_block`, ClickHouse либо логирует предупреждение, либо выбрасывает исключение в зависимости от значения `throw_on_max_partitions_per_insert_block`. Исключения имеют следующий текст:
|
||||
|
||||
> «Too many partitions for single INSERT block (more than» + toString(max_parts) + «). The limit is controlled by ‘max_partitions_per_insert_block’ setting. Large number of partitions is a common misconception. It will lead to severe negative performance impact, including slow server startup, slow INSERT queries and slow SELECT queries. Recommended total number of partitions for a table is under 1000..10000. Please note, that partitioning is not intended to speed up SELECT queries (ORDER BY key is sufficient to make range queries fast). Partitions are intended for data manipulation (DROP PARTITION, etc).»
|
||||
> “Too many partitions for a single INSERT block (`partitions_count` partitions, limit is ” + toString(max_partitions) + “). The limit is controlled by the ‘max_partitions_per_insert_block’ setting. A large number of partitions is a common misconception. It will lead to severe negative performance impact, including slow server startup, slow INSERT queries and slow SELECT queries. Recommended total number of partitions for a table is under 1000..10000. Please note, that partitioning is not intended to speed up SELECT queries (ORDER BY key is sufficient to make range queries fast). Partitions are intended for data manipulation (DROP PARTITION, etc).”
|
||||
|
||||
## throw_on_max_partitions_per_insert_block {#settings-throw_on_max_partition_per_insert_block}
|
||||
|
||||
Позволяет контролировать поведение при достижении `max_partitions_per_insert_block`
|
||||
|
||||
- `true` - Когда вставляемый блок достигает `max_partitions_per_insert_block`, возникает исключение.
|
||||
- `false` - Записывает предупреждение при достижении `max_partitions_per_insert_block`.
|
||||
|
||||
Значение по умолчанию: `true`
|
||||
|
||||
## max_sessions_for_user {#max-sessions-per-user}
|
||||
|
||||
|
@ -1,32 +0,0 @@
|
||||
---
|
||||
slug: /zh/getting-started/example-datasets/wikistat
|
||||
sidebar_position: 17
|
||||
sidebar_label: WikiStat
|
||||
---
|
||||
|
||||
# WikiStat {#wikistat}
|
||||
|
||||
参考: http://dumps.wikimedia.org/other/pagecounts-raw/
|
||||
|
||||
创建表结构:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE wikistat
|
||||
(
|
||||
date Date,
|
||||
time DateTime,
|
||||
project String,
|
||||
subproject String,
|
||||
path String,
|
||||
hits UInt64,
|
||||
size UInt64
|
||||
) ENGINE = MergeTree(date, (path, time), 8192);
|
||||
```
|
||||
|
||||
加载数据:
|
||||
|
||||
``` bash
|
||||
$ for i in {2007..2016}; do for j in {01..12}; do echo $i-$j >&2; curl -sSL "http://dumps.wikimedia.org/other/pagecounts-raw/$i/$i-$j/" | grep -oE 'pagecounts-[0-9]+-[0-9]+\.gz'; done; done | sort | uniq | tee links.txt
|
||||
$ cat links.txt | while read link; do wget http://dumps.wikimedia.org/other/pagecounts-raw/$(echo $link | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})[0-9]{2}-[0-9]+\.gz/\1/')/$(echo $link | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})[0-9]{2}-[0-9]+\.gz/\1-\2/')/$link; done
|
||||
$ ls -1 /opt/wikistat/ | grep gz | while read i; do echo $i; gzip -cd /opt/wikistat/$i | ./wikistat-loader --time="$(echo -n $i | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})([0-9]{2})-([0-9]{2})([0-9]{2})([0-9]{2})\.gz/\1-\2-\3 \4-00-00/')" | clickhouse-client --query="INSERT INTO wikistat FORMAT TabSeparated"; done
|
||||
```
|
1
docs/zh/getting-started/example-datasets/wikistat.md
Symbolic link
1
docs/zh/getting-started/example-datasets/wikistat.md
Symbolic link
@ -0,0 +1 @@
|
||||
../../../en/getting-started/example-datasets/wikistat.md
|
@ -1,4 +1,6 @@
|
||||
#include "ICommand.h"
|
||||
#include <iostream>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -1,5 +1,6 @@
|
||||
|
||||
#include "Commands.h"
|
||||
#include <queue>
|
||||
#include "KeeperClient.h"
|
||||
|
||||
|
||||
@ -24,8 +25,18 @@ void LSCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) con
|
||||
else
|
||||
path = client->cwd;
|
||||
|
||||
for (const auto & child : client->zookeeper->getChildren(path))
|
||||
std::cout << child << " ";
|
||||
auto children = client->zookeeper->getChildren(path);
|
||||
std::sort(children.begin(), children.end());
|
||||
|
||||
bool need_space = false;
|
||||
for (const auto & child : children)
|
||||
{
|
||||
if (std::exchange(need_space, true))
|
||||
std::cout << " ";
|
||||
|
||||
std::cout << child;
|
||||
}
|
||||
|
||||
std::cout << "\n";
|
||||
}
|
||||
|
||||
@ -115,6 +126,21 @@ void CreateCommand::execute(const ASTKeeperQuery * query, KeeperClient * client)
|
||||
static_cast<int>(query->args[2].safeGet<Int64>()));
|
||||
}
|
||||
|
||||
bool TouchCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
|
||||
{
|
||||
String arg;
|
||||
if (!parseKeeperPath(pos, expected, arg))
|
||||
return false;
|
||||
node->args.push_back(std::move(arg));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void TouchCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) const
|
||||
{
|
||||
client->zookeeper->createIfNotExists(client->getAbsolutePath(query->args[0].safeGet<String>()), "");
|
||||
}
|
||||
|
||||
bool GetCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
|
||||
{
|
||||
String arg;
|
||||
@ -130,6 +156,173 @@ void GetCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) co
|
||||
std::cout << client->zookeeper->get(client->getAbsolutePath(query->args[0].safeGet<String>())) << "\n";
|
||||
}
|
||||
|
||||
bool GetStatCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
|
||||
{
|
||||
String arg;
|
||||
if (!parseKeeperPath(pos, expected, arg))
|
||||
return true;
|
||||
|
||||
node->args.push_back(std::move(arg));
|
||||
return true;
|
||||
}
|
||||
|
||||
void GetStatCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) const
|
||||
{
|
||||
Coordination::Stat stat;
|
||||
String path;
|
||||
if (!query->args.empty())
|
||||
path = client->getAbsolutePath(query->args[0].safeGet<String>());
|
||||
else
|
||||
path = client->cwd;
|
||||
|
||||
client->zookeeper->get(path, &stat);
|
||||
|
||||
std::cout << "cZxid = " << stat.czxid << "\n";
|
||||
std::cout << "mZxid = " << stat.mzxid << "\n";
|
||||
std::cout << "pZxid = " << stat.pzxid << "\n";
|
||||
std::cout << "ctime = " << stat.ctime << "\n";
|
||||
std::cout << "mtime = " << stat.mtime << "\n";
|
||||
std::cout << "version = " << stat.version << "\n";
|
||||
std::cout << "cversion = " << stat.cversion << "\n";
|
||||
std::cout << "aversion = " << stat.aversion << "\n";
|
||||
std::cout << "ephemeralOwner = " << stat.ephemeralOwner << "\n";
|
||||
std::cout << "dataLength = " << stat.dataLength << "\n";
|
||||
std::cout << "numChildren = " << stat.numChildren << "\n";
|
||||
}
|
||||
|
||||
bool FindSuperNodes::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
|
||||
{
|
||||
ASTPtr threshold;
|
||||
if (!ParserUnsignedInteger{}.parse(pos, threshold, expected))
|
||||
return false;
|
||||
|
||||
node->args.push_back(threshold->as<ASTLiteral &>().value);
|
||||
|
||||
String path;
|
||||
if (!parseKeeperPath(pos, expected, path))
|
||||
path = ".";
|
||||
|
||||
node->args.push_back(std::move(path));
|
||||
return true;
|
||||
}
|
||||
|
||||
void FindSuperNodes::execute(const ASTKeeperQuery * query, KeeperClient * client) const
|
||||
{
|
||||
auto threshold = query->args[0].safeGet<UInt64>();
|
||||
auto path = client->getAbsolutePath(query->args[1].safeGet<String>());
|
||||
|
||||
Coordination::Stat stat;
|
||||
client->zookeeper->get(path, &stat);
|
||||
|
||||
if (stat.numChildren >= static_cast<Int32>(threshold))
|
||||
{
|
||||
std::cout << static_cast<String>(path) << "\t" << stat.numChildren << "\n";
|
||||
return;
|
||||
}
|
||||
|
||||
auto children = client->zookeeper->getChildren(path);
|
||||
std::sort(children.begin(), children.end());
|
||||
for (const auto & child : children)
|
||||
{
|
||||
auto next_query = *query;
|
||||
next_query.args[1] = DB::Field(path / child);
|
||||
execute(&next_query, client);
|
||||
}
|
||||
}
|
||||
|
||||
bool DeleteStaleBackups::parse(IParser::Pos & /* pos */, std::shared_ptr<ASTKeeperQuery> & /* node */, Expected & /* expected */) const
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
void DeleteStaleBackups::execute(const ASTKeeperQuery * /* query */, KeeperClient * client) const
|
||||
{
|
||||
client->askConfirmation(
|
||||
"You are going to delete all inactive backups in /clickhouse/backups.",
|
||||
[client]
|
||||
{
|
||||
fs::path backup_root = "/clickhouse/backups";
|
||||
auto backups = client->zookeeper->getChildren(backup_root);
|
||||
std::sort(backups.begin(), backups.end());
|
||||
|
||||
for (const auto & child : backups)
|
||||
{
|
||||
auto backup_path = backup_root / child;
|
||||
std::cout << "Found backup " << backup_path << ", checking if it's active\n";
|
||||
|
||||
String stage_path = backup_path / "stage";
|
||||
auto stages = client->zookeeper->getChildren(stage_path);
|
||||
|
||||
bool is_active = false;
|
||||
for (const auto & stage : stages)
|
||||
{
|
||||
if (startsWith(stage, "alive"))
|
||||
{
|
||||
is_active = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (is_active)
|
||||
{
|
||||
std::cout << "Backup " << backup_path << " is active, not going to delete\n";
|
||||
continue;
|
||||
}
|
||||
|
||||
std::cout << "Backup " << backup_path << " is not active, deleting it\n";
|
||||
client->zookeeper->removeRecursive(backup_path);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
bool FindBigFamily::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
|
||||
{
|
||||
String path;
|
||||
if (!parseKeeperPath(pos, expected, path))
|
||||
path = ".";
|
||||
|
||||
node->args.push_back(std::move(path));
|
||||
|
||||
ASTPtr count;
|
||||
if (ParserUnsignedInteger{}.parse(pos, count, expected))
|
||||
node->args.push_back(count->as<ASTLiteral &>().value);
|
||||
else
|
||||
node->args.push_back(UInt64(10));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void FindBigFamily::execute(const ASTKeeperQuery * query, KeeperClient * client) const
|
||||
{
|
||||
auto path = client->getAbsolutePath(query->args[0].safeGet<String>());
|
||||
auto n = query->args[1].safeGet<UInt64>();
|
||||
|
||||
std::vector<std::tuple<Int32, String>> result;
|
||||
|
||||
std::queue<fs::path> queue;
|
||||
queue.push(path);
|
||||
while (!queue.empty())
|
||||
{
|
||||
auto next_path = queue.front();
|
||||
queue.pop();
|
||||
|
||||
auto children = client->zookeeper->getChildren(next_path);
|
||||
std::transform(children.cbegin(), children.cend(), children.begin(), [&](const String & child) { return next_path / child; });
|
||||
|
||||
auto response = client->zookeeper->get(children);
|
||||
|
||||
for (size_t i = 0; i < response.size(); ++i)
|
||||
{
|
||||
result.emplace_back(response[i].stat.numChildren, children[i]);
|
||||
queue.push(children[i]);
|
||||
}
|
||||
}
|
||||
|
||||
std::sort(result.begin(), result.end(), std::greater());
|
||||
for (UInt64 i = 0; i < std::min(result.size(), static_cast<size_t>(n)); ++i)
|
||||
std::cout << std::get<1>(result[i]) << "\t" << std::get<0>(result[i]) << "\n";
|
||||
}
|
||||
|
||||
bool RMCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
|
||||
{
|
||||
String arg;
|
||||
@ -170,7 +363,7 @@ bool HelpCommand::parse(IParser::Pos & /* pos */, std::shared_ptr<ASTKeeperQuery
|
||||
void HelpCommand::execute(const ASTKeeperQuery * /* query */, KeeperClient * /* client */) const
|
||||
{
|
||||
for (const auto & pair : KeeperClient::commands)
|
||||
std::cout << pair.second->getHelpMessage() << "\n";
|
||||
std::cout << pair.second->generateHelpString() << "\n";
|
||||
}
|
||||
|
||||
bool FourLetterWordCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
|
||||
|
@ -21,6 +21,12 @@ public:
|
||||
virtual String getName() const = 0;
|
||||
|
||||
virtual ~IKeeperClientCommand() = default;
|
||||
|
||||
String generateHelpString() const
|
||||
{
|
||||
return fmt::vformat(getHelpMessage(), fmt::make_format_args(getName()));
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
using Command = std::shared_ptr<IKeeperClientCommand>;
|
||||
@ -34,7 +40,7 @@ class LSCommand : public IKeeperClientCommand
|
||||
|
||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||
|
||||
String getHelpMessage() const override { return "ls [path] -- Lists the nodes for the given path (default: cwd)"; }
|
||||
String getHelpMessage() const override { return "{} [path] -- Lists the nodes for the given path (default: cwd)"; }
|
||||
};
|
||||
|
||||
class CDCommand : public IKeeperClientCommand
|
||||
@ -45,7 +51,7 @@ class CDCommand : public IKeeperClientCommand
|
||||
|
||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||
|
||||
String getHelpMessage() const override { return "cd [path] -- Change the working path (default `.`)"; }
|
||||
String getHelpMessage() const override { return "{} [path] -- Change the working path (default `.`)"; }
|
||||
};
|
||||
|
||||
class SetCommand : public IKeeperClientCommand
|
||||
@ -58,7 +64,7 @@ class SetCommand : public IKeeperClientCommand
|
||||
|
||||
String getHelpMessage() const override
|
||||
{
|
||||
return "set <path> <value> [version] -- Updates the node's value. Only update if version matches (default: -1)";
|
||||
return "{} <path> <value> [version] -- Updates the node's value. Only update if version matches (default: -1)";
|
||||
}
|
||||
};
|
||||
|
||||
@ -70,7 +76,18 @@ class CreateCommand : public IKeeperClientCommand
|
||||
|
||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||
|
||||
String getHelpMessage() const override { return "create <path> <value> -- Creates new node"; }
|
||||
String getHelpMessage() const override { return "{} <path> <value> [mode] -- Creates new node with the set value"; }
|
||||
};
|
||||
|
||||
class TouchCommand : public IKeeperClientCommand
|
||||
{
|
||||
String getName() const override { return "touch"; }
|
||||
|
||||
bool parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const override;
|
||||
|
||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||
|
||||
String getHelpMessage() const override { return "{} <path> -- Creates new node with an empty string as value. Doesn't throw an exception if the node already exists"; }
|
||||
};
|
||||
|
||||
class GetCommand : public IKeeperClientCommand
|
||||
@ -81,9 +98,63 @@ class GetCommand : public IKeeperClientCommand
|
||||
|
||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||
|
||||
String getHelpMessage() const override { return "get <path> -- Returns the node's value"; }
|
||||
String getHelpMessage() const override { return "{} <path> -- Returns the node's value"; }
|
||||
};
|
||||
|
||||
class GetStatCommand : public IKeeperClientCommand
|
||||
{
|
||||
String getName() const override { return "get_stat"; }
|
||||
|
||||
bool parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const override;
|
||||
|
||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||
|
||||
String getHelpMessage() const override { return "{} [path] -- Returns the node's stat (default `.`)"; }
|
||||
};
|
||||
|
||||
class FindSuperNodes : public IKeeperClientCommand
|
||||
{
|
||||
String getName() const override { return "find_super_nodes"; }
|
||||
|
||||
bool parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const override;
|
||||
|
||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||
|
||||
String getHelpMessage() const override
|
||||
{
|
||||
return "{} <threshold> [path] -- Finds nodes with number of children larger than some threshold for the given path (default `.`)";
|
||||
}
|
||||
};
|
||||
|
||||
class DeleteStaleBackups : public IKeeperClientCommand
|
||||
{
|
||||
String getName() const override { return "delete_stale_backups"; }
|
||||
|
||||
bool parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const override;
|
||||
|
||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||
|
||||
String getHelpMessage() const override
|
||||
{
|
||||
return "{} -- Deletes ClickHouse nodes used for backups that are now inactive";
|
||||
}
|
||||
};
|
||||
|
||||
class FindBigFamily : public IKeeperClientCommand
|
||||
{
|
||||
String getName() const override { return "find_big_family"; }
|
||||
|
||||
bool parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const override;
|
||||
|
||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||
|
||||
String getHelpMessage() const override
|
||||
{
|
||||
return "{} [path] [n] -- Returns the top n nodes with the biggest family in the subtree (default path = `.` and n = 10)";
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
class RMCommand : public IKeeperClientCommand
|
||||
{
|
||||
String getName() const override { return "rm"; }
|
||||
@ -92,7 +163,7 @@ class RMCommand : public IKeeperClientCommand
|
||||
|
||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||
|
||||
String getHelpMessage() const override { return "remove <path> -- Remove the node"; }
|
||||
String getHelpMessage() const override { return "{} <path> -- Remove the node"; }
|
||||
};
|
||||
|
||||
class RMRCommand : public IKeeperClientCommand
|
||||
@ -103,7 +174,7 @@ class RMRCommand : public IKeeperClientCommand
|
||||
|
||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||
|
||||
String getHelpMessage() const override { return "rmr <path> -- Recursively deletes path. Confirmation required"; }
|
||||
String getHelpMessage() const override { return "{} <path> -- Recursively deletes path. Confirmation required"; }
|
||||
};
|
||||
|
||||
class HelpCommand : public IKeeperClientCommand
|
||||
@ -114,7 +185,7 @@ class HelpCommand : public IKeeperClientCommand
|
||||
|
||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||
|
||||
String getHelpMessage() const override { return "help -- Prints this message"; }
|
||||
String getHelpMessage() const override { return "{} -- Prints this message"; }
|
||||
};
|
||||
|
||||
class FourLetterWordCommand : public IKeeperClientCommand
|
||||
@ -125,7 +196,7 @@ class FourLetterWordCommand : public IKeeperClientCommand
|
||||
|
||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||
|
||||
String getHelpMessage() const override { return "flwc <command> -- Executes four-letter-word command"; }
|
||||
String getHelpMessage() const override { return "{} <command> -- Executes four-letter-word command"; }
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -131,7 +131,7 @@ void KeeperClient::defineOptions(Poco::Util::OptionSet & options)
|
||||
.binding("host"));
|
||||
|
||||
options.addOption(
|
||||
Poco::Util::Option("port", "p", "server port. default `2181`")
|
||||
Poco::Util::Option("port", "p", "server port. default `9181`")
|
||||
.argument("<port>")
|
||||
.binding("port"));
|
||||
|
||||
@ -176,7 +176,12 @@ void KeeperClient::initialize(Poco::Util::Application & /* self */)
|
||||
std::make_shared<CDCommand>(),
|
||||
std::make_shared<SetCommand>(),
|
||||
std::make_shared<CreateCommand>(),
|
||||
std::make_shared<TouchCommand>(),
|
||||
std::make_shared<GetCommand>(),
|
||||
std::make_shared<GetStatCommand>(),
|
||||
std::make_shared<FindSuperNodes>(),
|
||||
std::make_shared<DeleteStaleBackups>(),
|
||||
std::make_shared<FindBigFamily>(),
|
||||
std::make_shared<RMCommand>(),
|
||||
std::make_shared<RMRCommand>(),
|
||||
std::make_shared<HelpCommand>(),
|
||||
@ -307,7 +312,7 @@ int KeeperClient::main(const std::vector<String> & /* args */)
|
||||
}
|
||||
|
||||
auto host = config().getString("host", "localhost");
|
||||
auto port = config().getString("port", "2181");
|
||||
auto port = config().getString("port", "9181");
|
||||
zk_args.hosts = {host + ":" + port};
|
||||
zk_args.connection_timeout_ms = config().getInt("connection-timeout", 10) * 1000;
|
||||
zk_args.session_timeout_ms = config().getInt("session-timeout", 10) * 1000;
|
||||
|
@ -57,6 +57,7 @@ if (BUILD_STANDALONE_KEEPER)
|
||||
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/IO/ReadBuffer.cpp
|
||||
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTPPathHints.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/KeeperTCPHandler.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/TCPServer.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/NotFoundHandler.cpp
|
||||
|
@ -150,7 +150,7 @@ int Keeper::run()
|
||||
}
|
||||
if (config().hasOption("version"))
|
||||
{
|
||||
std::cout << DBMS_NAME << " keeper version " << VERSION_STRING << VERSION_OFFICIAL << "." << std::endl;
|
||||
std::cout << VERSION_NAME << " keeper version " << VERSION_STRING << VERSION_OFFICIAL << "." << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -389,7 +389,7 @@ int Server::run()
|
||||
}
|
||||
if (config().hasOption("version"))
|
||||
{
|
||||
std::cout << DBMS_NAME << " server version " << VERSION_STRING << VERSION_OFFICIAL << "." << std::endl;
|
||||
std::cout << VERSION_NAME << " server version " << VERSION_STRING << VERSION_OFFICIAL << "." << std::endl;
|
||||
return 0;
|
||||
}
|
||||
return Application::run(); // NOLINT
|
||||
|
@ -317,7 +317,7 @@
|
||||
<concurrent_threads_soft_limit_ratio_to_cores>0</concurrent_threads_soft_limit_ratio_to_cores>
|
||||
|
||||
<!-- Maximum number of concurrent queries. -->
|
||||
<max_concurrent_queries>100</max_concurrent_queries>
|
||||
<max_concurrent_queries>1000</max_concurrent_queries>
|
||||
|
||||
<!-- Maximum memory usage (resident set size) for server process.
|
||||
Zero value or unset means default. Default is "max_server_memory_usage_to_ram_ratio" of available physical RAM.
|
||||
|
@ -5,6 +5,7 @@ CXXFLAGS = "@RUST_CXXFLAGS@"
|
||||
[build]
|
||||
rustflags = @RUSTFLAGS@
|
||||
rustdocflags = @RUSTFLAGS@
|
||||
@RUSTCWRAPPER@
|
||||
|
||||
[unstable]
|
||||
@RUST_CARGO_BUILD_STD@
|
||||
|
@ -1,4 +0,0 @@
|
||||
# Just in case ignore any cargo stuff (and just in case someone will run this
|
||||
# docker build locally with build context using folder root):
|
||||
target
|
||||
vendor
|
4
rust/.gitignore
vendored
4
rust/.gitignore
vendored
@ -1,4 +0,0 @@
|
||||
# This is for tar --exclude-vcs-ignores (and just in case someone will run
|
||||
# docker build locally with build context created via tar):
|
||||
target
|
||||
vendor
|
@ -14,6 +14,13 @@ macro(configure_rustc)
|
||||
set(RUST_CFLAGS "${RUST_CFLAGS} --sysroot ${CMAKE_SYSROOT}")
|
||||
endif()
|
||||
|
||||
if(CCACHE_EXECUTABLE MATCHES "/sccache$")
|
||||
message(STATUS "Using RUSTC_WRAPPER: ${CCACHE_EXECUTABLE}")
|
||||
set(RUSTCWRAPPER "rustc-wrapper = \"${CCACHE_EXECUTABLE}\"")
|
||||
else()
|
||||
set(RUSTCWRAPPER "")
|
||||
endif()
|
||||
|
||||
set(RUSTFLAGS "[]")
|
||||
set(RUST_CARGO_BUILD_STD "")
|
||||
# For more info: https://doc.rust-lang.org/beta/unstable-book/compiler-flags/sanitizer.html#memorysanitizer
|
||||
|
@ -418,7 +418,7 @@ void AccessControl::addStoragesFromUserDirectoriesConfig(
|
||||
String type = key_in_user_directories;
|
||||
if (size_t bracket_pos = type.find('['); bracket_pos != String::npos)
|
||||
type.resize(bracket_pos);
|
||||
if ((type == "users_xml") || (type == "users_config"))
|
||||
if ((type == "users.xml") || (type == "users_config"))
|
||||
type = UsersConfigAccessStorage::STORAGE_TYPE;
|
||||
else if ((type == "local") || (type == "local_directory"))
|
||||
type = DiskAccessStorage::STORAGE_TYPE;
|
||||
@ -528,12 +528,14 @@ scope_guard AccessControl::subscribeForChanges(const std::vector<UUID> & ids, co
|
||||
return changes_notifier->subscribeForChanges(ids, handler);
|
||||
}
|
||||
|
||||
std::optional<UUID> AccessControl::insertImpl(const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists)
|
||||
bool AccessControl::insertImpl(const UUID & id, const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists)
|
||||
{
|
||||
auto id = MultipleAccessStorage::insertImpl(entity, replace_if_exists, throw_if_exists);
|
||||
if (id)
|
||||
if (MultipleAccessStorage::insertImpl(id, entity, replace_if_exists, throw_if_exists))
|
||||
{
|
||||
changes_notifier->sendNotifications();
|
||||
return id;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool AccessControl::removeImpl(const UUID & id, bool throw_if_not_exists)
|
||||
|
@ -232,7 +232,7 @@ private:
|
||||
class CustomSettingsPrefixes;
|
||||
class PasswordComplexityRules;
|
||||
|
||||
std::optional<UUID> insertImpl(const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists) override;
|
||||
bool insertImpl(const UUID & id, const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists) override;
|
||||
bool removeImpl(const UUID & id, bool throw_if_not_exists) override;
|
||||
bool updateImpl(const UUID & id, const UpdateFunc & update_func, bool throw_if_not_exists) override;
|
||||
|
||||
|
@ -498,20 +498,10 @@ std::optional<std::pair<String, AccessEntityType>> DiskAccessStorage::readNameWi
|
||||
}
|
||||
|
||||
|
||||
std::optional<UUID> DiskAccessStorage::insertImpl(const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists)
|
||||
{
|
||||
UUID id = generateRandomID();
|
||||
if (insertWithID(id, new_entity, replace_if_exists, throw_if_exists, /* write_on_disk= */ true))
|
||||
return id;
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
|
||||
bool DiskAccessStorage::insertWithID(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists, bool write_on_disk)
|
||||
bool DiskAccessStorage::insertImpl(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists)
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
return insertNoLock(id, new_entity, replace_if_exists, throw_if_exists, write_on_disk);
|
||||
return insertNoLock(id, new_entity, replace_if_exists, throw_if_exists, /* write_on_disk = */ true);
|
||||
}
|
||||
|
||||
|
||||
@ -745,7 +735,7 @@ void DiskAccessStorage::restoreFromBackup(RestorerFromBackup & restorer)
|
||||
restorer.addDataRestoreTask([this, my_entities = std::move(entities), replace_if_exists, throw_if_exists]
|
||||
{
|
||||
for (const auto & [id, entity] : my_entities)
|
||||
insertWithID(id, entity, replace_if_exists, throw_if_exists, /* write_on_disk= */ true);
|
||||
insert(id, entity, replace_if_exists, throw_if_exists);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -13,7 +13,7 @@ class AccessChangesNotifier;
|
||||
class DiskAccessStorage : public IAccessStorage
|
||||
{
|
||||
public:
|
||||
static constexpr char STORAGE_TYPE[] = "local directory";
|
||||
static constexpr char STORAGE_TYPE[] = "local_directory";
|
||||
|
||||
DiskAccessStorage(const String & storage_name_, const String & directory_path_, AccessChangesNotifier & changes_notifier_, bool readonly_, bool allow_backup_);
|
||||
~DiskAccessStorage() override;
|
||||
@ -39,7 +39,7 @@ private:
|
||||
std::vector<UUID> findAllImpl(AccessEntityType type) const override;
|
||||
AccessEntityPtr readImpl(const UUID & id, bool throw_if_not_exists) const override;
|
||||
std::optional<std::pair<String, AccessEntityType>> readNameWithTypeImpl(const UUID & id, bool throw_if_not_exists) const override;
|
||||
std::optional<UUID> insertImpl(const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists) override;
|
||||
bool insertImpl(const UUID & id, const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists) override;
|
||||
bool removeImpl(const UUID & id, bool throw_if_not_exists) override;
|
||||
bool updateImpl(const UUID & id, const UpdateFunc & update_func, bool throw_if_not_exists) override;
|
||||
|
||||
@ -53,7 +53,6 @@ private:
|
||||
void listsWritingThreadFunc() TSA_NO_THREAD_SAFETY_ANALYSIS;
|
||||
void stopListsWritingThread();
|
||||
|
||||
bool insertWithID(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists, bool write_on_disk);
|
||||
bool insertNoLock(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists, bool write_on_disk) TSA_REQUIRES(mutex);
|
||||
bool updateNoLock(const UUID & id, const UpdateFunc & update_func, bool throw_if_not_exists, bool write_on_disk) TSA_REQUIRES(mutex);
|
||||
bool removeNoLock(const UUID & id, bool throw_if_not_exists, bool write_on_disk) TSA_REQUIRES(mutex);
|
||||
|
@ -93,6 +93,17 @@ String IAccessStorage::readName(const UUID & id) const
|
||||
}
|
||||
|
||||
|
||||
bool IAccessStorage::exists(const std::vector<UUID> & ids) const
|
||||
{
|
||||
for (const auto & id : ids)
|
||||
{
|
||||
if (!exists(id))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
std::optional<String> IAccessStorage::readName(const UUID & id, bool throw_if_not_exists) const
|
||||
{
|
||||
if (auto name_and_type = readNameWithType(id, throw_if_not_exists))
|
||||
@ -167,38 +178,69 @@ UUID IAccessStorage::insert(const AccessEntityPtr & entity)
|
||||
return *insert(entity, /* replace_if_exists = */ false, /* throw_if_exists = */ true);
|
||||
}
|
||||
|
||||
|
||||
std::optional<UUID> IAccessStorage::insert(const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists)
|
||||
{
|
||||
return insertImpl(entity, replace_if_exists, throw_if_exists);
|
||||
auto id = generateRandomID();
|
||||
|
||||
if (insert(id, entity, replace_if_exists, throw_if_exists))
|
||||
return id;
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
|
||||
bool IAccessStorage::insert(const DB::UUID & id, const DB::AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists)
|
||||
{
|
||||
return insertImpl(id, entity, replace_if_exists, throw_if_exists);
|
||||
}
|
||||
|
||||
|
||||
std::vector<UUID> IAccessStorage::insert(const std::vector<AccessEntityPtr> & multiple_entities, bool replace_if_exists, bool throw_if_exists)
|
||||
{
|
||||
return insert(multiple_entities, /* ids = */ {}, replace_if_exists, throw_if_exists);
|
||||
}
|
||||
|
||||
std::vector<UUID> IAccessStorage::insert(const std::vector<AccessEntityPtr> & multiple_entities, const std::vector<UUID> & ids, bool replace_if_exists, bool throw_if_exists)
|
||||
{
|
||||
assert(ids.empty() || (multiple_entities.size() == ids.size()));
|
||||
|
||||
if (multiple_entities.empty())
|
||||
return {};
|
||||
|
||||
if (multiple_entities.size() == 1)
|
||||
{
|
||||
if (auto id = insert(multiple_entities[0], replace_if_exists, throw_if_exists))
|
||||
return {*id};
|
||||
UUID id;
|
||||
if (!ids.empty())
|
||||
id = ids[0];
|
||||
else
|
||||
id = generateRandomID();
|
||||
|
||||
if (insert(id, multiple_entities[0], replace_if_exists, throw_if_exists))
|
||||
return {id};
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<AccessEntityPtr> successfully_inserted;
|
||||
try
|
||||
{
|
||||
std::vector<UUID> ids;
|
||||
for (const auto & entity : multiple_entities)
|
||||
std::vector<UUID> new_ids;
|
||||
for (size_t i = 0; i < multiple_entities.size(); ++i)
|
||||
{
|
||||
if (auto id = insertImpl(entity, replace_if_exists, throw_if_exists))
|
||||
const auto & entity = multiple_entities[i];
|
||||
|
||||
UUID id;
|
||||
if (!ids.empty())
|
||||
id = ids[i];
|
||||
else
|
||||
id = generateRandomID();
|
||||
|
||||
if (insert(id, entity, replace_if_exists, throw_if_exists))
|
||||
{
|
||||
successfully_inserted.push_back(entity);
|
||||
ids.push_back(*id);
|
||||
new_ids.push_back(id);
|
||||
}
|
||||
}
|
||||
return ids;
|
||||
return new_ids;
|
||||
}
|
||||
catch (Exception & e)
|
||||
{
|
||||
@ -244,7 +286,7 @@ std::vector<UUID> IAccessStorage::insertOrReplace(const std::vector<AccessEntity
|
||||
}
|
||||
|
||||
|
||||
std::optional<UUID> IAccessStorage::insertImpl(const AccessEntityPtr & entity, bool, bool)
|
||||
bool IAccessStorage::insertImpl(const UUID &, const AccessEntityPtr & entity, bool, bool)
|
||||
{
|
||||
if (isReadOnly())
|
||||
throwReadonlyCannotInsert(entity->getType(), entity->getName());
|
||||
|
@ -3,6 +3,8 @@
|
||||
#include <Access/IAccessEntity.h>
|
||||
#include <Core/Types.h>
|
||||
#include <Core/UUID.h>
|
||||
#include <Parsers/IParser.h>
|
||||
#include <Parsers/parseIdentifierOrStringLiteral.h>
|
||||
#include <functional>
|
||||
#include <optional>
|
||||
#include <vector>
|
||||
@ -92,6 +94,7 @@ public:
|
||||
|
||||
/// Returns whether there is an entity with such identifier in the storage.
|
||||
virtual bool exists(const UUID & id) const = 0;
|
||||
bool exists(const std::vector<UUID> & ids) const;
|
||||
|
||||
/// Reads an entity. Throws an exception if not found.
|
||||
template <typename EntityClassT = IAccessEntity>
|
||||
@ -100,6 +103,9 @@ public:
|
||||
template <typename EntityClassT = IAccessEntity>
|
||||
std::shared_ptr<const EntityClassT> read(const String & name, bool throw_if_not_exists = true) const;
|
||||
|
||||
template <typename EntityClassT = IAccessEntity>
|
||||
std::vector<AccessEntityPtr> read(const std::vector<UUID> & ids, bool throw_if_not_exists = true) const;
|
||||
|
||||
/// Reads an entity. Returns nullptr if not found.
|
||||
template <typename EntityClassT = IAccessEntity>
|
||||
std::shared_ptr<const EntityClassT> tryRead(const UUID & id) const;
|
||||
@ -128,7 +134,9 @@ public:
|
||||
/// Throws an exception if the specified name already exists.
|
||||
UUID insert(const AccessEntityPtr & entity);
|
||||
std::optional<UUID> insert(const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists);
|
||||
bool insert(const UUID & id, const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists);
|
||||
std::vector<UUID> insert(const std::vector<AccessEntityPtr> & multiple_entities, bool replace_if_exists = false, bool throw_if_exists = true);
|
||||
std::vector<UUID> insert(const std::vector<AccessEntityPtr> & multiple_entities, const std::vector<UUID> & ids, bool replace_if_exists = false, bool throw_if_exists = true);
|
||||
|
||||
/// Inserts an entity to the storage. Returns ID of a new entry in the storage.
|
||||
std::optional<UUID> tryInsert(const AccessEntityPtr & entity);
|
||||
@ -179,7 +187,7 @@ protected:
|
||||
virtual std::vector<UUID> findAllImpl(AccessEntityType type) const = 0;
|
||||
virtual AccessEntityPtr readImpl(const UUID & id, bool throw_if_not_exists) const = 0;
|
||||
virtual std::optional<std::pair<String, AccessEntityType>> readNameWithTypeImpl(const UUID & id, bool throw_if_not_exists) const;
|
||||
virtual std::optional<UUID> insertImpl(const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists);
|
||||
virtual bool insertImpl(const UUID & id, const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists);
|
||||
virtual bool removeImpl(const UUID & id, bool throw_if_not_exists);
|
||||
virtual bool updateImpl(const UUID & id, const UpdateFunc & update_func, bool throw_if_not_exists);
|
||||
virtual std::optional<UUID> authenticateImpl(const Credentials & credentials, const Poco::Net::IPAddress & address, const ExternalAuthenticators & external_authenticators, bool throw_if_user_not_exists, bool allow_no_password, bool allow_plaintext_password) const;
|
||||
@ -240,6 +248,19 @@ std::shared_ptr<const EntityClassT> IAccessStorage::read(const String & name, bo
|
||||
}
|
||||
|
||||
|
||||
template <typename EntityClassT>
|
||||
std::vector<AccessEntityPtr> IAccessStorage::read(const std::vector<UUID> & ids, bool throw_if_not_exists) const
|
||||
{
|
||||
std::vector<AccessEntityPtr> result;
|
||||
result.reserve(ids.size());
|
||||
|
||||
for (const auto & id : ids)
|
||||
result.push_back(read<EntityClassT>(id, throw_if_not_exists));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
template <typename EntityClassT>
|
||||
std::shared_ptr<const EntityClassT> IAccessStorage::tryRead(const UUID & id) const
|
||||
{
|
||||
@ -265,4 +286,9 @@ std::vector<std::pair<UUID, std::shared_ptr<const EntityClassT>>> IAccessStorage
|
||||
return entities;
|
||||
}
|
||||
|
||||
inline bool parseAccessStorageName(IParser::Pos & pos, Expected & expected, String & storage_name)
|
||||
{
|
||||
return parseIdentifierOrStringLiteral(pos, expected, storage_name);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -18,7 +18,8 @@
|
||||
namespace
|
||||
{
|
||||
|
||||
template <typename T, typename = std::enable_if_t<std::is_fundamental_v<std::decay_t<T>>>>
|
||||
template <typename T>
|
||||
requires std::is_fundamental_v<std::decay_t<T>>
|
||||
void updateHash(SipHash & hash, const T & value)
|
||||
{
|
||||
hash.update(value);
|
||||
|
@ -63,17 +63,7 @@ AccessEntityPtr MemoryAccessStorage::readImpl(const UUID & id, bool throw_if_not
|
||||
}
|
||||
|
||||
|
||||
std::optional<UUID> MemoryAccessStorage::insertImpl(const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists)
|
||||
{
|
||||
UUID id = generateRandomID();
|
||||
if (insertWithID(id, new_entity, replace_if_exists, throw_if_exists))
|
||||
return id;
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
|
||||
bool MemoryAccessStorage::insertWithID(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists)
|
||||
bool MemoryAccessStorage::insertImpl(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists)
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
return insertNoLock(id, new_entity, replace_if_exists, throw_if_exists);
|
||||
@ -300,7 +290,7 @@ void MemoryAccessStorage::restoreFromBackup(RestorerFromBackup & restorer)
|
||||
restorer.addDataRestoreTask([this, my_entities = std::move(entities), replace_if_exists, throw_if_exists]
|
||||
{
|
||||
for (const auto & [id, entity] : my_entities)
|
||||
insertWithID(id, entity, replace_if_exists, throw_if_exists);
|
||||
insert(id, entity, replace_if_exists, throw_if_exists);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <unordered_map>
|
||||
#include <boost/container/flat_set.hpp>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -22,11 +23,6 @@ public:
|
||||
|
||||
const char * getStorageType() const override { return STORAGE_TYPE; }
|
||||
|
||||
/// Inserts an entity with a specified ID.
|
||||
/// If `replace_if_exists == true` it can replace an existing entry with such ID and also remove an existing entry
|
||||
/// with such name & type.
|
||||
bool insertWithID(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists);
|
||||
|
||||
/// Removes all entities except the specified list `ids_to_keep`.
|
||||
/// The function skips IDs not contained in the storage.
|
||||
void removeAllExcept(const std::vector<UUID> & ids_to_keep);
|
||||
@ -44,7 +40,7 @@ private:
|
||||
std::optional<UUID> findImpl(AccessEntityType type, const String & name) const override;
|
||||
std::vector<UUID> findAllImpl(AccessEntityType type) const override;
|
||||
AccessEntityPtr readImpl(const UUID & id, bool throw_if_not_exists) const override;
|
||||
std::optional<UUID> insertImpl(const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists) override;
|
||||
bool insertImpl(const UUID & id, const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists) override;
|
||||
bool removeImpl(const UUID & id, bool throw_if_not_exists) override;
|
||||
bool updateImpl(const UUID & id, const UpdateFunc & update_func, bool throw_if_not_exists) override;
|
||||
|
||||
|
@ -16,6 +16,7 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int ACCESS_ENTITY_ALREADY_EXISTS;
|
||||
extern const int ACCESS_STORAGE_FOR_INSERTION_NOT_FOUND;
|
||||
extern const int ACCESS_ENTITY_NOT_FOUND;
|
||||
}
|
||||
|
||||
using Storage = IAccessStorage;
|
||||
@ -178,6 +179,91 @@ ConstStoragePtr MultipleAccessStorage::getStorage(const UUID & id) const
|
||||
return const_cast<MultipleAccessStorage *>(this)->getStorage(id);
|
||||
}
|
||||
|
||||
StoragePtr MultipleAccessStorage::findStorageByName(const DB::String & storage_name)
|
||||
{
|
||||
auto storages = getStoragesInternal();
|
||||
for (const auto & storage : *storages)
|
||||
{
|
||||
if (storage->getStorageName() == storage_name)
|
||||
return storage;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
||||
ConstStoragePtr MultipleAccessStorage::findStorageByName(const DB::String & storage_name) const
|
||||
{
|
||||
return const_cast<MultipleAccessStorage *>(this)->findStorageByName(storage_name);
|
||||
}
|
||||
|
||||
|
||||
StoragePtr MultipleAccessStorage::getStorageByName(const DB::String & storage_name)
|
||||
{
|
||||
auto storage = findStorageByName(storage_name);
|
||||
if (storage)
|
||||
return storage;
|
||||
|
||||
throw Exception(ErrorCodes::ACCESS_ENTITY_NOT_FOUND, "Access storage with name {} is not found", storage_name);
|
||||
}
|
||||
|
||||
|
||||
ConstStoragePtr MultipleAccessStorage::getStorageByName(const DB::String & storage_name) const
|
||||
{
|
||||
return const_cast<MultipleAccessStorage *>(this)->getStorageByName(storage_name);
|
||||
}
|
||||
|
||||
StoragePtr MultipleAccessStorage::findExcludingStorage(AccessEntityType type, const DB::String & name, DB::MultipleAccessStorage::StoragePtr exclude) const
|
||||
{
|
||||
auto storages = getStoragesInternal();
|
||||
for (const auto & storage : *storages)
|
||||
{
|
||||
if (storage == exclude)
|
||||
continue;
|
||||
|
||||
if (storage->find(type, name))
|
||||
return storage;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void MultipleAccessStorage::moveAccessEntities(const std::vector<UUID> & ids, const String & source_storage_name, const String & destination_storage_name)
|
||||
{
|
||||
auto source_storage = getStorageByName(source_storage_name);
|
||||
auto destination_storage = getStorageByName(destination_storage_name);
|
||||
|
||||
auto to_move = source_storage->read(ids);
|
||||
bool need_rollback = false;
|
||||
|
||||
try
|
||||
{
|
||||
source_storage->remove(ids);
|
||||
need_rollback = true;
|
||||
destination_storage->insert(to_move, ids);
|
||||
}
|
||||
catch (Exception & e)
|
||||
{
|
||||
String message;
|
||||
|
||||
bool need_comma = false;
|
||||
for (const auto & entity : to_move)
|
||||
{
|
||||
if (std::exchange(need_comma, true))
|
||||
message += ", ";
|
||||
|
||||
message += entity->formatTypeWithName();
|
||||
}
|
||||
|
||||
e.addMessage("while moving {} from {} to {}", message, source_storage_name, destination_storage_name);
|
||||
|
||||
if (need_rollback)
|
||||
source_storage->insert(to_move, ids);
|
||||
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
AccessEntityPtr MultipleAccessStorage::readImpl(const UUID & id, bool throw_if_not_exists) const
|
||||
{
|
||||
if (auto storage = findStorage(id))
|
||||
@ -245,7 +331,7 @@ void MultipleAccessStorage::reload(ReloadMode reload_mode)
|
||||
}
|
||||
|
||||
|
||||
std::optional<UUID> MultipleAccessStorage::insertImpl(const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists)
|
||||
bool MultipleAccessStorage::insertImpl(const UUID & id, const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists)
|
||||
{
|
||||
std::shared_ptr<IAccessStorage> storage_for_insertion;
|
||||
|
||||
@ -268,13 +354,14 @@ std::optional<UUID> MultipleAccessStorage::insertImpl(const AccessEntityPtr & en
|
||||
getStorageName());
|
||||
}
|
||||
|
||||
auto id = storage_for_insertion->insert(entity, replace_if_exists, throw_if_exists);
|
||||
if (id)
|
||||
if (storage_for_insertion->insert(id, entity, replace_if_exists, throw_if_exists))
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
ids_cache.set(*id, storage_for_insertion);
|
||||
ids_cache.set(id, storage_for_insertion);
|
||||
return true;
|
||||
}
|
||||
return id;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
|
@ -41,6 +41,16 @@ public:
|
||||
ConstStoragePtr getStorage(const UUID & id) const;
|
||||
StoragePtr getStorage(const UUID & id);
|
||||
|
||||
ConstStoragePtr findStorageByName(const String & storage_name) const;
|
||||
StoragePtr findStorageByName(const String & storage_name);
|
||||
ConstStoragePtr getStorageByName(const String & storage_name) const;
|
||||
StoragePtr getStorageByName(const String & storage_name);
|
||||
|
||||
/// Search for an access entity storage, excluding one. Returns nullptr if not found.
|
||||
StoragePtr findExcludingStorage(AccessEntityType type, const String & name, StoragePtr exclude) const;
|
||||
|
||||
void moveAccessEntities(const std::vector<UUID> & ids, const String & source_storage_name, const String & destination_storage_name);
|
||||
|
||||
bool exists(const UUID & id) const override;
|
||||
|
||||
bool isBackupAllowed() const override;
|
||||
@ -53,7 +63,7 @@ protected:
|
||||
std::vector<UUID> findAllImpl(AccessEntityType type) const override;
|
||||
AccessEntityPtr readImpl(const UUID & id, bool throw_if_not_exists) const override;
|
||||
std::optional<std::pair<String, AccessEntityType>> readNameWithTypeImpl(const UUID & id, bool throw_if_not_exists) const override;
|
||||
std::optional<UUID> insertImpl(const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists) override;
|
||||
bool insertImpl(const UUID & id, const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists) override;
|
||||
bool removeImpl(const UUID & id, bool throw_if_not_exists) override;
|
||||
bool updateImpl(const UUID & id, const UpdateFunc & update_func, bool throw_if_not_exists) override;
|
||||
std::optional<UUID> authenticateImpl(const Credentials & credentials, const Poco::Net::IPAddress & address, const ExternalAuthenticators & external_authenticators, bool throw_if_user_not_exists, bool allow_no_password, bool allow_plaintext_password) const override;
|
||||
@ -65,6 +75,8 @@ private:
|
||||
std::shared_ptr<const Storages> nested_storages TSA_GUARDED_BY(mutex);
|
||||
mutable CacheBase<UUID, Storage> ids_cache TSA_GUARDED_BY(mutex);
|
||||
mutable std::mutex mutex;
|
||||
|
||||
mutable std::mutex move_mutex;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -108,17 +108,7 @@ static void retryOnZooKeeperUserError(size_t attempts, Func && function)
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<UUID> ReplicatedAccessStorage::insertImpl(const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists)
|
||||
{
|
||||
const UUID id = generateRandomID();
|
||||
if (insertWithID(id, new_entity, replace_if_exists, throw_if_exists))
|
||||
return id;
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
|
||||
bool ReplicatedAccessStorage::insertWithID(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists)
|
||||
bool ReplicatedAccessStorage::insertImpl(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists)
|
||||
{
|
||||
const AccessEntityTypeInfo type_info = AccessEntityTypeInfo::get(new_entity->getType());
|
||||
const String & name = new_entity->getName();
|
||||
@ -619,7 +609,7 @@ AccessEntityPtr ReplicatedAccessStorage::tryReadEntityFromZooKeeper(const zkutil
|
||||
void ReplicatedAccessStorage::setEntityNoLock(const UUID & id, const AccessEntityPtr & entity)
|
||||
{
|
||||
LOG_DEBUG(getLogger(), "Setting id {} to entity named {}", toString(id), entity->getName());
|
||||
memory_storage.insertWithID(id, entity, /* replace_if_exists= */ true, /* throw_if_exists= */ false);
|
||||
memory_storage.insert(id, entity, /* replace_if_exists= */ true, /* throw_if_exists= */ false);
|
||||
}
|
||||
|
||||
|
||||
@ -711,7 +701,7 @@ void ReplicatedAccessStorage::restoreFromBackup(RestorerFromBackup & restorer)
|
||||
restorer.addDataRestoreTask([this, my_entities = std::move(entities), replace_if_exists, throw_if_exists]
|
||||
{
|
||||
for (const auto & [id, entity] : my_entities)
|
||||
insertWithID(id, entity, replace_if_exists, throw_if_exists);
|
||||
insert(id, entity, replace_if_exists, throw_if_exists);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -46,11 +46,10 @@ private:
|
||||
std::unique_ptr<ThreadFromGlobalPool> watching_thread;
|
||||
std::shared_ptr<ConcurrentBoundedQueue<UUID>> watched_queue;
|
||||
|
||||
std::optional<UUID> insertImpl(const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists) override;
|
||||
bool insertImpl(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists) override;
|
||||
bool removeImpl(const UUID & id, bool throw_if_not_exists) override;
|
||||
bool updateImpl(const UUID & id, const UpdateFunc & update_func, bool throw_if_not_exists) override;
|
||||
|
||||
bool insertWithID(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists);
|
||||
bool insertZooKeeper(const zkutil::ZooKeeperPtr & zookeeper, const UUID & id, const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists);
|
||||
bool removeZooKeeper(const zkutil::ZooKeeperPtr & zookeeper, const UUID & id, bool throw_if_not_exists);
|
||||
bool updateZooKeeper(const zkutil::ZooKeeperPtr & zookeeper, const UUID & id, const UpdateFunc & update_func, bool throw_if_not_exists);
|
||||
|
@ -20,7 +20,7 @@ class UsersConfigAccessStorage : public IAccessStorage
|
||||
{
|
||||
public:
|
||||
|
||||
static constexpr char STORAGE_TYPE[] = "users.xml";
|
||||
static constexpr char STORAGE_TYPE[] = "users_xml";
|
||||
|
||||
UsersConfigAccessStorage(const String & storage_name_, AccessControl & access_control_, bool allow_backup_);
|
||||
~UsersConfigAccessStorage() override;
|
||||
|
646
src/AggregateFunctions/AggregateFunctionFlameGraph.cpp
Normal file
646
src/AggregateFunctions/AggregateFunctionFlameGraph.cpp
Normal file
@ -0,0 +1,646 @@
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <AggregateFunctions/FactoryHelpers.h>
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
#include <Common/SymbolIndex.h>
|
||||
#include <Common/ArenaAllocator.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <filesystem>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int FUNCTION_NOT_ALLOWED;
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
}
|
||||
|
||||
struct AggregateFunctionFlameGraphTree
|
||||
{
|
||||
struct ListNode;
|
||||
|
||||
struct TreeNode
|
||||
{
|
||||
TreeNode * parent = nullptr;
|
||||
ListNode * children = nullptr;
|
||||
UInt64 ptr = 0;
|
||||
size_t allocated = 0;
|
||||
};
|
||||
|
||||
struct ListNode
|
||||
{
|
||||
ListNode * next = nullptr;
|
||||
TreeNode * child = nullptr;
|
||||
};
|
||||
|
||||
TreeNode root;
|
||||
|
||||
static ListNode * createChild(TreeNode * parent, UInt64 ptr, Arena * arena)
|
||||
{
|
||||
|
||||
ListNode * list_node = reinterpret_cast<ListNode *>(arena->alloc(sizeof(ListNode)));
|
||||
TreeNode * tree_node = reinterpret_cast<TreeNode *>(arena->alloc(sizeof(TreeNode)));
|
||||
|
||||
list_node->child = tree_node;
|
||||
list_node->next = nullptr;
|
||||
|
||||
tree_node->parent =parent;
|
||||
tree_node->children = nullptr;
|
||||
tree_node->ptr = ptr;
|
||||
tree_node->allocated = 0;
|
||||
|
||||
return list_node;
|
||||
}
|
||||
|
||||
TreeNode * find(const UInt64 * stack, size_t stack_size, Arena * arena)
|
||||
{
|
||||
TreeNode * node = &root;
|
||||
for (size_t i = 0; i < stack_size; ++i)
|
||||
{
|
||||
UInt64 ptr = stack[i];
|
||||
if (ptr == 0)
|
||||
break;
|
||||
|
||||
if (!node->children)
|
||||
{
|
||||
node->children = createChild(node, ptr, arena);
|
||||
node = node->children->child;
|
||||
}
|
||||
else
|
||||
{
|
||||
ListNode * list = node->children;
|
||||
while (list->child->ptr != ptr && list->next)
|
||||
list = list->next;
|
||||
|
||||
if (list->child->ptr != ptr)
|
||||
{
|
||||
list->next = createChild(node, ptr, arena);
|
||||
list = list->next;
|
||||
}
|
||||
|
||||
node = list->child;
|
||||
}
|
||||
}
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
static void append(DB::PaddedPODArray<UInt64> & values, DB::PaddedPODArray<UInt64> & offsets, std::vector<UInt64> & frame)
|
||||
{
|
||||
UInt64 prev = offsets.empty() ? 0 : offsets.back();
|
||||
offsets.push_back(prev + frame.size());
|
||||
for (UInt64 val : frame)
|
||||
values.push_back(val);
|
||||
}
|
||||
|
||||
struct Trace
|
||||
{
|
||||
using Frames = std::vector<UInt64>;
|
||||
|
||||
Frames frames;
|
||||
|
||||
/// The total number of bytes allocated for traces with the same prefix.
|
||||
size_t allocated_total = 0;
|
||||
/// This counter is relevant in case we want to filter some traces with small amount of bytes.
|
||||
/// It shows the total number of bytes for *filtered* traces with the same prefix.
|
||||
/// This is the value which is used in flamegraph.
|
||||
size_t allocated_self = 0;
|
||||
};
|
||||
|
||||
using Traces = std::vector<Trace>;
|
||||
|
||||
Traces dump(size_t max_depth, size_t min_bytes) const
|
||||
{
|
||||
Traces traces;
|
||||
Trace::Frames frames;
|
||||
std::vector<size_t> allocated_total;
|
||||
std::vector<size_t> allocated_self;
|
||||
std::vector<ListNode *> nodes;
|
||||
|
||||
nodes.push_back(root.children);
|
||||
allocated_total.push_back(root.allocated);
|
||||
allocated_self.push_back(root.allocated);
|
||||
|
||||
while (!nodes.empty())
|
||||
{
|
||||
if (nodes.back() == nullptr)
|
||||
{
|
||||
traces.push_back({frames, allocated_total.back(), allocated_self.back()});
|
||||
|
||||
nodes.pop_back();
|
||||
allocated_total.pop_back();
|
||||
allocated_self.pop_back();
|
||||
|
||||
/// We don't have root's frame so framers are empty in the end.
|
||||
if (!frames.empty())
|
||||
frames.pop_back();
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
TreeNode * current = nodes.back()->child;
|
||||
nodes.back() = nodes.back()->next;
|
||||
|
||||
bool enough_bytes = current->allocated >= min_bytes;
|
||||
bool enough_depth = max_depth == 0 || nodes.size() < max_depth;
|
||||
|
||||
if (enough_bytes)
|
||||
{
|
||||
frames.push_back(current->ptr);
|
||||
allocated_self.back() -= current->allocated;
|
||||
|
||||
if (enough_depth)
|
||||
{
|
||||
allocated_total.push_back(current->allocated);
|
||||
allocated_self.push_back(current->allocated);
|
||||
nodes.push_back(current->children);
|
||||
}
|
||||
else
|
||||
{
|
||||
traces.push_back({frames, current->allocated, current->allocated});
|
||||
frames.pop_back();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return traces;
|
||||
}
|
||||
};
|
||||
|
||||
static void insertData(DB::PaddedPODArray<UInt8> & chars, DB::PaddedPODArray<UInt64> & offsets, const char * pos, size_t length)
|
||||
{
|
||||
const size_t old_size = chars.size();
|
||||
const size_t new_size = old_size + length + 1;
|
||||
|
||||
chars.resize(new_size);
|
||||
if (length)
|
||||
memcpy(chars.data() + old_size, pos, length);
|
||||
chars[old_size + length] = 0;
|
||||
offsets.push_back(new_size);
|
||||
}
|
||||
|
||||
/// Split str by line feed and write as separate row to ColumnString.
|
||||
static void fillColumn(DB::PaddedPODArray<UInt8> & chars, DB::PaddedPODArray<UInt64> & offsets, const std::string & str)
|
||||
{
|
||||
size_t start = 0;
|
||||
size_t end = 0;
|
||||
size_t size = str.size();
|
||||
|
||||
while (end < size)
|
||||
{
|
||||
if (str[end] == '\n')
|
||||
{
|
||||
insertData(chars, offsets, str.data() + start, end - start);
|
||||
start = end + 1;
|
||||
}
|
||||
|
||||
++end;
|
||||
}
|
||||
|
||||
if (start < end)
|
||||
insertData(chars, offsets, str.data() + start, end - start);
|
||||
}
|
||||
|
||||
void dumpFlameGraph(
|
||||
const AggregateFunctionFlameGraphTree::Traces & traces,
|
||||
DB::PaddedPODArray<UInt8> & chars,
|
||||
DB::PaddedPODArray<UInt64> & offsets)
|
||||
{
|
||||
DB::WriteBufferFromOwnString out;
|
||||
|
||||
std::unordered_map<uintptr_t, size_t> mapping;
|
||||
|
||||
#if defined(__ELF__) && !defined(OS_FREEBSD)
|
||||
const DB::SymbolIndex & symbol_index = DB::SymbolIndex::instance();
|
||||
#endif
|
||||
|
||||
for (const auto & trace : traces)
|
||||
{
|
||||
if (trace.allocated_self == 0)
|
||||
continue;
|
||||
|
||||
for (size_t i = 0; i < trace.frames.size(); ++i)
|
||||
{
|
||||
if (i)
|
||||
out << ";";
|
||||
|
||||
const void * ptr = reinterpret_cast<const void *>(trace.frames[i]);
|
||||
|
||||
#if defined(__ELF__) && !defined(OS_FREEBSD)
|
||||
if (const auto * symbol = symbol_index.findSymbol(ptr))
|
||||
writeString(demangle(symbol->name), out);
|
||||
else
|
||||
DB::writePointerHex(ptr, out);
|
||||
#else
|
||||
DB::writePointerHex(ptr, out);
|
||||
#endif
|
||||
}
|
||||
|
||||
out << ' ' << trace.allocated_self << "\n";
|
||||
}
|
||||
|
||||
fillColumn(chars, offsets, out.str());
|
||||
}
|
||||
|
||||
struct AggregateFunctionFlameGraphData
|
||||
{
|
||||
struct Entry
|
||||
{
|
||||
AggregateFunctionFlameGraphTree::TreeNode * trace;
|
||||
UInt64 size;
|
||||
Entry * next = nullptr;
|
||||
};
|
||||
|
||||
struct Pair
|
||||
{
|
||||
Entry * allocation = nullptr;
|
||||
Entry * deallocation = nullptr;
|
||||
};
|
||||
|
||||
using Entries = HashMap<UInt64, Pair>;
|
||||
|
||||
AggregateFunctionFlameGraphTree tree;
|
||||
Entries entries;
|
||||
Entry * free_list = nullptr;
|
||||
|
||||
Entry * alloc(Arena * arena)
|
||||
{
|
||||
if (free_list)
|
||||
{
|
||||
auto * res = free_list;
|
||||
free_list = free_list->next;
|
||||
return res;
|
||||
}
|
||||
|
||||
return reinterpret_cast<Entry *>(arena->alloc(sizeof(Entry)));
|
||||
}
|
||||
|
||||
void release(Entry * entry)
|
||||
{
|
||||
entry->next = free_list;
|
||||
free_list = entry;
|
||||
}
|
||||
|
||||
static void track(Entry * allocation)
|
||||
{
|
||||
auto * node = allocation->trace;
|
||||
while (node)
|
||||
{
|
||||
node->allocated += allocation->size;
|
||||
node = node->parent;
|
||||
}
|
||||
}
|
||||
|
||||
static void untrack(Entry * allocation)
|
||||
{
|
||||
auto * node = allocation->trace;
|
||||
while (node)
|
||||
{
|
||||
node->allocated -= allocation->size;
|
||||
node = node->parent;
|
||||
}
|
||||
}
|
||||
|
||||
static Entry * tryFindMatchAndRemove(Entry *& list, UInt64 size)
|
||||
{
|
||||
if (!list)
|
||||
return nullptr;
|
||||
|
||||
if (list->size == size)
|
||||
{
|
||||
Entry * entry = list;
|
||||
list = list->next;
|
||||
return entry;
|
||||
}
|
||||
else
|
||||
{
|
||||
Entry * parent = list;
|
||||
while (parent->next && parent->next->size != size)
|
||||
parent = parent->next;
|
||||
|
||||
if (parent->next && parent->next->size == size)
|
||||
{
|
||||
Entry * entry = parent->next;
|
||||
parent->next = entry->next;
|
||||
return entry;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void add(UInt64 ptr, Int64 size, const UInt64 * stack, size_t stack_size, Arena * arena)
|
||||
{
|
||||
/// In case if argument is nullptr, only track allocations.
|
||||
if (ptr == 0)
|
||||
{
|
||||
if (size > 0)
|
||||
{
|
||||
auto * node = tree.find(stack, stack_size, arena);
|
||||
Entry entry{.trace = node, .size = UInt64(size)};
|
||||
track(&entry);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
auto & place = entries[ptr];
|
||||
if (size > 0)
|
||||
{
|
||||
if (auto * deallocation = tryFindMatchAndRemove(place.deallocation, size))
|
||||
{
|
||||
release(deallocation);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto * node = tree.find(stack, stack_size, arena);
|
||||
|
||||
auto * allocation = alloc(arena);
|
||||
allocation->size = UInt64(size);
|
||||
allocation->trace = node;
|
||||
|
||||
track(allocation);
|
||||
|
||||
allocation->next = place.allocation;
|
||||
place.allocation = allocation;
|
||||
}
|
||||
}
|
||||
else if (size < 0)
|
||||
{
|
||||
UInt64 abs_size = -size;
|
||||
if (auto * allocation = tryFindMatchAndRemove(place.allocation, abs_size))
|
||||
{
|
||||
untrack(allocation);
|
||||
release(allocation);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto * deallocation = alloc(arena);
|
||||
deallocation->size = abs_size;
|
||||
|
||||
deallocation->next = place.deallocation;
|
||||
place.deallocation = deallocation;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void merge(const AggregateFunctionFlameGraphTree & other_tree, Arena * arena)
|
||||
{
|
||||
AggregateFunctionFlameGraphTree::Trace::Frames frames;
|
||||
std::vector<AggregateFunctionFlameGraphTree::ListNode *> nodes;
|
||||
|
||||
nodes.push_back(other_tree.root.children);
|
||||
|
||||
while (!nodes.empty())
|
||||
{
|
||||
if (nodes.back() == nullptr)
|
||||
{
|
||||
nodes.pop_back();
|
||||
|
||||
/// We don't have root's frame so framers are empty in the end.
|
||||
if (!frames.empty())
|
||||
frames.pop_back();
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
AggregateFunctionFlameGraphTree::TreeNode * current = nodes.back()->child;
|
||||
nodes.back() = nodes.back()->next;
|
||||
|
||||
frames.push_back(current->ptr);
|
||||
|
||||
if (current->children)
|
||||
nodes.push_back(current->children);
|
||||
else
|
||||
{
|
||||
if (current->allocated)
|
||||
add(0, current->allocated, frames.data(), frames.size(), arena);
|
||||
|
||||
frames.pop_back();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void merge(const AggregateFunctionFlameGraphData & other, Arena * arena)
|
||||
{
|
||||
AggregateFunctionFlameGraphTree::Trace::Frames frames;
|
||||
for (const auto & entry : other.entries)
|
||||
{
|
||||
for (auto * allocation = entry.value.second.allocation; allocation; allocation = allocation->next)
|
||||
{
|
||||
frames.clear();
|
||||
const auto * node = allocation->trace;
|
||||
while (node->ptr)
|
||||
{
|
||||
frames.push_back(node->ptr);
|
||||
node = node->parent;
|
||||
}
|
||||
|
||||
std::reverse(frames.begin(), frames.end());
|
||||
add(entry.value.first, allocation->size, frames.data(), frames.size(), arena);
|
||||
untrack(allocation);
|
||||
}
|
||||
|
||||
for (auto * deallocation = entry.value.second.deallocation; deallocation; deallocation = deallocation->next)
|
||||
{
|
||||
add(entry.value.first, -Int64(deallocation->size), nullptr, 0, arena);
|
||||
}
|
||||
}
|
||||
|
||||
merge(other.tree, arena);
|
||||
}
|
||||
|
||||
void dumpFlameGraph(
|
||||
DB::PaddedPODArray<UInt8> & chars,
|
||||
DB::PaddedPODArray<UInt64> & offsets,
|
||||
size_t max_depth, size_t min_bytes) const
|
||||
{
|
||||
DB::dumpFlameGraph(tree.dump(max_depth, min_bytes), chars, offsets);
|
||||
}
|
||||
};
|
||||
|
||||
/// Aggregate function which builds a flamegraph using the list of stacktraces.
|
||||
/// The output is an array of strings which can be used by flamegraph.pl util.
|
||||
/// See https://github.com/brendangregg/FlameGraph
|
||||
///
|
||||
/// Syntax: flameGraph(traces, [size = 1], [ptr = 0])
|
||||
/// - trace : Array(UInt64), a stacktrace
|
||||
/// - size : Int64, an allocation size (for memory profiling)
|
||||
/// - ptr : UInt64, an allocation address
|
||||
/// In case if ptr != 0, a flameGraph will map allocations (size > 0) and deallocations (size < 0) with the same size and ptr.
|
||||
/// Only allocations which were not freed are shown. Not mapped deallocations are ignored.
|
||||
///
|
||||
/// Usage:
|
||||
///
|
||||
/// * Build a flamegraph based on CPU query profiler
|
||||
/// set query_profiler_cpu_time_period_ns=10000000;
|
||||
/// SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits WHERE SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
/// clickhouse client --allow_introspection_functions=1
|
||||
/// -q "select arrayJoin(flameGraph(arrayReverse(trace))) from system.trace_log where trace_type = 'CPU' and query_id = 'xxx'"
|
||||
/// | ~/dev/FlameGraph/flamegraph.pl > flame_cpu.svg
|
||||
///
|
||||
/// * Build a flamegraph based on memory query profiler, showing all allocations
|
||||
/// set memory_profiler_sample_probability=1, max_untracked_memory=1;
|
||||
/// SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits WHERE SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
/// clickhouse client --allow_introspection_functions=1
|
||||
/// -q "select arrayJoin(flameGraph(trace, size)) from system.trace_log where trace_type = 'MemorySample' and query_id = 'xxx'"
|
||||
/// | ~/dev/FlameGraph/flamegraph.pl --countname=bytes --color=mem > flame_mem.svg
|
||||
///
|
||||
/// * Build a flamegraph based on memory query profiler, showing allocations which were not deallocated in query context
|
||||
/// set memory_profiler_sample_probability=1, max_untracked_memory=1, use_uncompressed_cache=1, merge_tree_max_rows_to_use_cache=100000000000, merge_tree_max_bytes_to_use_cache=1000000000000;
|
||||
/// SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits WHERE SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
/// clickhouse client --allow_introspection_functions=1
|
||||
/// -q "select arrayJoin(flameGraph(trace, size, ptr)) from system.trace_log where trace_type = 'MemorySample' and query_id = 'xxx'"
|
||||
/// | ~/dev/FlameGraph/flamegraph.pl --countname=bytes --color=mem > flame_mem_untracked.svg
|
||||
///
|
||||
/// * Build a flamegraph based on memory query profiler, showing active allocations at the fixed point of time
|
||||
/// set memory_profiler_sample_probability=1, max_untracked_memory=1;
|
||||
/// SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits WHERE SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
/// 1. Memory usage per second
|
||||
/// select event_time, m, formatReadableSize(max(s) as m) from (select event_time, sum(size) over (order by event_time) as s from system.trace_log where query_id = 'xxx' and trace_type = 'MemorySample') group by event_time order by event_time;
|
||||
/// 2. Find a time point with maximal memory usage
|
||||
/// select argMax(event_time, s), max(s) from (select event_time, sum(size) over (order by event_time) as s from system.trace_log where query_id = 'xxx' and trace_type = 'MemorySample');
|
||||
/// 3. Fix active allocations at fixed point of time
|
||||
/// clickhouse client --allow_introspection_functions=1
|
||||
/// -q "select arrayJoin(flameGraph(trace, size, ptr)) from (select * from system.trace_log where trace_type = 'MemorySample' and query_id = 'xxx' and event_time <= 'yyy' order by event_time)"
|
||||
/// | ~/dev/FlameGraph/flamegraph.pl --countname=bytes --color=mem > flame_mem_time_point_pos.svg
|
||||
/// 4. Find deallocations at fixed point of time
|
||||
/// clickhouse client --allow_introspection_functions=1
|
||||
/// -q "select arrayJoin(flameGraph(trace, -size, ptr)) from (select * from system.trace_log where trace_type = 'MemorySample' and query_id = 'xxx' and event_time > 'yyy' order by event_time desc)"
|
||||
/// | ~/dev/FlameGraph/flamegraph.pl --countname=bytes --color=mem > flame_mem_time_point_neg.svg
|
||||
class AggregateFunctionFlameGraph final : public IAggregateFunctionDataHelper<AggregateFunctionFlameGraphData, AggregateFunctionFlameGraph>
|
||||
{
|
||||
public:
|
||||
explicit AggregateFunctionFlameGraph(const DataTypes & argument_types_)
|
||||
: IAggregateFunctionDataHelper<AggregateFunctionFlameGraphData, AggregateFunctionFlameGraph>(argument_types_, {}, createResultType())
|
||||
{}
|
||||
|
||||
String getName() const override { return "flameGraph"; }
|
||||
|
||||
static DataTypePtr createResultType()
|
||||
{
|
||||
return std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>());
|
||||
}
|
||||
|
||||
bool allocatesMemoryInArena() const override { return true; }
|
||||
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override
|
||||
{
|
||||
const auto & trace = assert_cast<const ColumnArray &>(*columns[0]);
|
||||
|
||||
const auto & trace_offsets = trace.getOffsets();
|
||||
const auto & trace_values = assert_cast<const ColumnUInt64 &>(trace.getData()).getData();
|
||||
UInt64 prev_offset = 0;
|
||||
if (row_num)
|
||||
prev_offset = trace_offsets[row_num - 1];
|
||||
UInt64 trace_size = trace_offsets[row_num] - prev_offset;
|
||||
|
||||
Int64 allocated = 1;
|
||||
if (argument_types.size() >= 2)
|
||||
{
|
||||
const auto & sizes = assert_cast<const ColumnInt64 &>(*columns[1]).getData();
|
||||
allocated = sizes[row_num];
|
||||
}
|
||||
|
||||
UInt64 ptr = 0;
|
||||
if (argument_types.size() >= 3)
|
||||
{
|
||||
const auto & ptrs = assert_cast<const ColumnUInt64 &>(*columns[2]).getData();
|
||||
ptr = ptrs[row_num];
|
||||
}
|
||||
|
||||
this->data(place).add(ptr, allocated, trace_values.data() + prev_offset, trace_size, arena);
|
||||
}
|
||||
|
||||
void addManyDefaults(
|
||||
AggregateDataPtr __restrict /*place*/,
|
||||
const IColumn ** /*columns*/,
|
||||
size_t /*length*/,
|
||||
Arena * /*arena*/) const override
|
||||
{
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
this->data(place).merge(this->data(rhs), arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict, WriteBuffer &, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Serialization for function flameGraph is not implemented.");
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict, ReadBuffer &, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Deserialization for function flameGraph is not implemented.");
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
auto & array = assert_cast<ColumnArray &>(to);
|
||||
auto & str = assert_cast<ColumnString &>(array.getData());
|
||||
|
||||
this->data(place).dumpFlameGraph(str.getChars(), str.getOffsets(), 0, 0);
|
||||
|
||||
array.getOffsets().push_back(str.size());
|
||||
}
|
||||
};
|
||||
|
||||
static void check(const std::string & name, const DataTypes & argument_types, const Array & params)
|
||||
{
|
||||
assertNoParameters(name, params);
|
||||
|
||||
if (argument_types.empty() || argument_types.size() > 3)
|
||||
throw Exception(
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||
"Aggregate function {} requires 1 to 3 arguments : trace, [size = 1], [ptr = 0]",
|
||||
name);
|
||||
|
||||
auto ptr_type = std::make_shared<DataTypeUInt64>();
|
||||
auto trace_type = std::make_shared<DataTypeArray>(ptr_type);
|
||||
auto size_type = std::make_shared<DataTypeInt64>();
|
||||
|
||||
if (!argument_types[0]->equals(*trace_type))
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"First argument (trace) for function {} must be Array(UInt64), but it has type {}",
|
||||
name, argument_types[0]->getName());
|
||||
|
||||
if (argument_types.size() >= 2 && !argument_types[1]->equals(*size_type))
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Second argument (size) for function {} must be Int64, but it has type {}",
|
||||
name, argument_types[1]->getName());
|
||||
|
||||
if (argument_types.size() >= 3 && !argument_types[2]->equals(*ptr_type))
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Third argument (ptr) for function {} must be UInt64, but it has type {}",
|
||||
name, argument_types[2]->getName());
|
||||
}
|
||||
|
||||
AggregateFunctionPtr createAggregateFunctionFlameGraph(const std::string & name, const DataTypes & argument_types, const Array & params, const Settings * settings)
|
||||
{
|
||||
if (!settings->allow_introspection_functions)
|
||||
throw Exception(ErrorCodes::FUNCTION_NOT_ALLOWED,
|
||||
"Introspection functions are disabled, because setting 'allow_introspection_functions' is set to 0");
|
||||
|
||||
check(name, argument_types, params);
|
||||
return std::make_shared<AggregateFunctionFlameGraph>(argument_types);
|
||||
}
|
||||
|
||||
void registerAggregateFunctionFlameGraph(AggregateFunctionFactory & factory)
|
||||
{
|
||||
AggregateFunctionProperties properties = { .returns_default_when_only_null = true, .is_order_dependent = true };
|
||||
|
||||
factory.registerFunction("flameGraph", { createAggregateFunctionFlameGraph, properties });
|
||||
}
|
||||
|
||||
}
|
@ -80,6 +80,7 @@ void registerAggregateFunctionExponentialMovingAverage(AggregateFunctionFactory
|
||||
void registerAggregateFunctionSparkbar(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionIntervalLengthSum(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionAnalysisOfVariance(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionFlameGraph(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionKolmogorovSmirnovTest(AggregateFunctionFactory & factory);
|
||||
|
||||
class AggregateFunctionCombinatorFactory;
|
||||
@ -173,6 +174,7 @@ void registerAggregateFunctions()
|
||||
registerAggregateFunctionExponentialMovingAverage(factory);
|
||||
registerAggregateFunctionSparkbar(factory);
|
||||
registerAggregateFunctionAnalysisOfVariance(factory);
|
||||
registerAggregateFunctionFlameGraph(factory);
|
||||
registerAggregateFunctionKolmogorovSmirnovTest(factory);
|
||||
|
||||
registerWindowFunctions(factory);
|
||||
|
@ -2,7 +2,6 @@
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <Common/PODArray.h>
|
||||
#include <AggregateFunctions/StatCommon.h>
|
||||
#include <iostream>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
|
@ -14,6 +14,9 @@
|
||||
#include <Analyzer/FunctionNode.h>
|
||||
#include <Analyzer/HashUtils.h>
|
||||
|
||||
#include <numeric>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
@ -70,10 +70,13 @@ Block createBlockFromCollection(const Collection & collection, const DataTypes &
|
||||
{
|
||||
if (columns_size == 1)
|
||||
{
|
||||
auto field = convertFieldToType(value, *block_types[0]);
|
||||
auto field = convertFieldToTypeStrict(value, *block_types[0]);
|
||||
if (!field)
|
||||
continue;
|
||||
|
||||
bool need_insert_null = transform_null_in && block_types[0]->isNullable();
|
||||
if (!field.isNull() || need_insert_null)
|
||||
columns[0]->insert(std::move(field));
|
||||
if (!field->isNull() || need_insert_null)
|
||||
columns[0]->insert(*field);
|
||||
|
||||
continue;
|
||||
}
|
||||
@ -98,7 +101,11 @@ Block createBlockFromCollection(const Collection & collection, const DataTypes &
|
||||
size_t i = 0;
|
||||
for (; i < tuple_size; ++i)
|
||||
{
|
||||
tuple_values[i] = convertFieldToType(tuple[i], *block_types[i]);
|
||||
auto converted_field = convertFieldToTypeStrict(tuple[i], *block_types[i]);
|
||||
if (!converted_field)
|
||||
break;
|
||||
tuple_values[i] = std::move(*converted_field);
|
||||
|
||||
bool need_insert_null = transform_null_in && block_types[i]->isNullable();
|
||||
if (tuple_values[i].isNull() && !need_insert_null)
|
||||
break;
|
||||
|
@ -1,4 +1,3 @@
|
||||
#include <iostream>
|
||||
|
||||
int main(int argc, char ** argv)
|
||||
{
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
#include <Common/FieldVisitorConvertToNumber.h>
|
||||
#include <Backups/SettingsFieldOptionalUUID.h>
|
||||
#include <Backups/SettingsFieldOptionalString.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -164,6 +165,7 @@ namespace
|
||||
M(Bool, allow_s3_native_copy) \
|
||||
M(Bool, internal) \
|
||||
M(String, host_id) \
|
||||
M(OptionalString, storage_policy) \
|
||||
M(OptionalUUID, restore_uuid)
|
||||
|
||||
|
||||
|
@ -117,6 +117,9 @@ struct RestoreSettings
|
||||
/// The current host's ID in the format 'escaped_host_name:port'.
|
||||
String host_id;
|
||||
|
||||
/// Alternative storage policy that may be specified in the SETTINGS clause of RESTORE queries
|
||||
std::optional<String> storage_policy;
|
||||
|
||||
/// Internal, should not be specified by user.
|
||||
/// Cluster's hosts' IDs in the format 'escaped_host_name:port' for all shards and replicas in a cluster specified in BACKUP ON CLUSTER.
|
||||
std::vector<Strings> cluster_host_ids;
|
||||
|
@ -322,6 +322,7 @@ void RestorerFromBackup::findTableInBackup(const QualifiedTableName & table_name
|
||||
read_buffer.reset();
|
||||
ParserCreateQuery create_parser;
|
||||
ASTPtr create_table_query = parseQuery(create_parser, create_query_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
|
||||
applyCustomStoragePolicy(create_table_query);
|
||||
renameDatabaseAndTableNameInCreateQuery(create_table_query, renaming_map, context->getGlobalContext());
|
||||
|
||||
QualifiedTableName table_name = renaming_map.getNewTableName(table_name_in_backup);
|
||||
@ -625,6 +626,24 @@ void RestorerFromBackup::checkDatabase(const String & database_name)
|
||||
}
|
||||
}
|
||||
|
||||
void RestorerFromBackup::applyCustomStoragePolicy(ASTPtr query_ptr)
|
||||
{
|
||||
constexpr auto setting_name = "storage_policy";
|
||||
if (query_ptr && restore_settings.storage_policy.has_value())
|
||||
{
|
||||
ASTStorage * storage = query_ptr->as<ASTCreateQuery &>().storage;
|
||||
if (storage && storage->settings)
|
||||
{
|
||||
if (restore_settings.storage_policy.value().empty())
|
||||
/// it has been set to "" deliberately, so the source storage policy is erased
|
||||
storage->settings->changes.removeSetting(setting_name);
|
||||
else
|
||||
/// it has been set to a custom value, so it either overwrites the existing value or is added as a new one
|
||||
storage->settings->changes.setSetting(setting_name, restore_settings.storage_policy.value());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void RestorerFromBackup::removeUnresolvedDependencies()
|
||||
{
|
||||
auto need_exclude_dependency = [this](const StorageID & table_id)
|
||||
|
@ -95,6 +95,8 @@ private:
|
||||
void createDatabase(const String & database_name) const;
|
||||
void checkDatabase(const String & database_name);
|
||||
|
||||
void applyCustomStoragePolicy(ASTPtr query_ptr);
|
||||
|
||||
void removeUnresolvedDependencies();
|
||||
void createTables();
|
||||
void createTable(const QualifiedTableName & table_name);
|
||||
|
29
src/Backups/SettingsFieldOptionalString.cpp
Normal file
29
src/Backups/SettingsFieldOptionalString.cpp
Normal file
@ -0,0 +1,29 @@
|
||||
#include <Backups/SettingsFieldOptionalString.h>
|
||||
#include <Common/ErrorCodes.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int CANNOT_PARSE_BACKUP_SETTINGS;
|
||||
}
|
||||
|
||||
SettingFieldOptionalString::SettingFieldOptionalString(const Field & field)
|
||||
{
|
||||
if (field.getType() == Field::Types::Null)
|
||||
{
|
||||
value = std::nullopt;
|
||||
return;
|
||||
}
|
||||
|
||||
if (field.getType() == Field::Types::String)
|
||||
{
|
||||
value = field.get<const String &>();
|
||||
return;
|
||||
}
|
||||
|
||||
throw Exception(ErrorCodes::CANNOT_PARSE_BACKUP_SETTINGS, "Cannot get string from {}", field);
|
||||
}
|
||||
|
||||
}
|
20
src/Backups/SettingsFieldOptionalString.h
Normal file
20
src/Backups/SettingsFieldOptionalString.h
Normal file
@ -0,0 +1,20 @@
|
||||
#pragma once
|
||||
|
||||
#include <optional>
|
||||
#include <Core/SettingsFields.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
struct SettingFieldOptionalString
|
||||
{
|
||||
std::optional<String> value;
|
||||
|
||||
explicit SettingFieldOptionalString(const std::optional<String> & value_) : value(value_) {}
|
||||
|
||||
explicit SettingFieldOptionalString(const Field & field);
|
||||
|
||||
explicit operator Field() const { return Field(value ? toString(*value) : ""); }
|
||||
};
|
||||
|
||||
}
|
@ -19,6 +19,7 @@ message (STATUS "Will build ${VERSION_FULL} revision ${VERSION_REVISION} ${VERSI
|
||||
include (configure_config.cmake)
|
||||
configure_file (Common/config.h.in ${CONFIG_INCLUDE_PATH}/config.h)
|
||||
configure_file (Common/config_version.h.in ${CONFIG_INCLUDE_PATH}/config_version.h)
|
||||
configure_file (Common/config_version.cpp.in ${CONFIG_INCLUDE_PATH}/config_version.cpp)
|
||||
|
||||
if (USE_DEBUG_HELPERS)
|
||||
get_target_property(MAGIC_ENUM_INCLUDE_DIR ch_contrib::magic_enum INTERFACE_INCLUDE_DIRECTORIES)
|
||||
@ -150,7 +151,7 @@ else()
|
||||
message(STATUS "StorageFileLog is only supported on Linux")
|
||||
endif ()
|
||||
|
||||
list (APPEND clickhouse_common_io_sources ${CONFIG_BUILD})
|
||||
list (APPEND clickhouse_common_io_sources ${CONFIG_INCLUDE_PATH}/config_version.cpp)
|
||||
|
||||
list (APPEND dbms_sources Functions/IFunction.cpp Functions/FunctionFactory.cpp Functions/FunctionHelpers.cpp Functions/extractTimeZoneFromFunctionArguments.cpp Functions/FunctionsLogical.cpp Functions/indexHint.cpp)
|
||||
list (APPEND dbms_headers Functions/IFunction.h Functions/FunctionFactory.h Functions/FunctionHelpers.h Functions/extractTimeZoneFromFunctionArguments.h Functions/FunctionsLogical.h Functions/indexHint.h)
|
||||
|
@ -2504,7 +2504,7 @@ void ClientBase::clearTerminal()
|
||||
|
||||
void ClientBase::showClientVersion()
|
||||
{
|
||||
std::cout << DBMS_NAME << " " + getName() + " version " << VERSION_STRING << VERSION_OFFICIAL << "." << std::endl;
|
||||
std::cout << VERSION_NAME << " " + getName() + " version " << VERSION_STRING << VERSION_OFFICIAL << "." << std::endl;
|
||||
}
|
||||
|
||||
namespace
|
||||
|
@ -280,9 +280,9 @@ void Connection::sendHello()
|
||||
"Parameters 'default_database', 'user' and 'password' must not contain ASCII control characters");
|
||||
|
||||
writeVarUInt(Protocol::Client::Hello, *out);
|
||||
writeStringBinary((DBMS_NAME " ") + client_name, *out);
|
||||
writeVarUInt(DBMS_VERSION_MAJOR, *out);
|
||||
writeVarUInt(DBMS_VERSION_MINOR, *out);
|
||||
writeStringBinary((VERSION_NAME " ") + client_name, *out);
|
||||
writeVarUInt(VERSION_MAJOR, *out);
|
||||
writeVarUInt(VERSION_MINOR, *out);
|
||||
// NOTE For backward compatibility of the protocol, client cannot send its version_patch.
|
||||
writeVarUInt(DBMS_TCP_PROTOCOL_VERSION, *out);
|
||||
writeStringBinary(default_database, *out);
|
||||
|
@ -1,6 +1,5 @@
|
||||
#include "ConnectionParameters.h"
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <Core/Defines.h>
|
||||
#include <Core/Protocol.h>
|
||||
#include <Core/Types.h>
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include <Poco/URI.h>
|
||||
|
||||
#include <array>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
|
||||
|
@ -2,17 +2,17 @@
|
||||
#include <Columns/ColumnObject.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/ColumnSparse.h>
|
||||
#include <DataTypes/ObjectUtils.h>
|
||||
#include <DataTypes/getLeastSupertype.h>
|
||||
#include <DataTypes/DataTypeNothing.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
#include <DataTypes/NestedUtils.h>
|
||||
#include <Interpreters/castColumn.h>
|
||||
#include <Interpreters/convertFieldToType.h>
|
||||
#include <Common/HashTable/HashSet.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
#include <numeric>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user