Merge remote-tracking branch 'origin/master' into fs-cache-better-resize-of-file-segments

This commit is contained in:
kssenii 2024-11-14 12:55:54 +01:00
commit 25be6bb68f
348 changed files with 7270 additions and 2006 deletions

View File

@ -12,7 +12,7 @@ tests/ci/cancel_and_rerun_workflow_lambda/app.py
- Backward Incompatible Change
- Build/Testing/Packaging Improvement
- Documentation (changelog entry is not required)
- Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
- Critical Bug Fix (crash, data loss, RBAC)
- Bug Fix (user-visible misbehavior in an official stable release)
- CI Fix or Improvement (changelog entry is not required)
- Not for changelog (changelog entry is not required)

View File

@ -42,16 +42,17 @@ Keep an eye out for upcoming meetups and events around the world. Somewhere else
Upcoming meetups
* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - November 12
* [Ghent Meetup](https://www.meetup.com/clickhouse-belgium-user-group/events/303049405/) - November 19
* [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21
* [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26
* [Amsterdam Meetup](https://www.meetup.com/clickhouse-netherlands-user-group/events/303638814) - December 3
* [Stockholm Meetup](https://www.meetup.com/clickhouse-stockholm-user-group/events/304382411) - December 9
* [New York Meetup](https://www.meetup.com/clickhouse-new-york-user-group/events/304268174) - December 9
* [San Francisco Meetup](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/304286951/) - December 12
Recently completed meetups
* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - November 12
* [Madrid Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096564/) - October 22
* [Singapore Meetup](https://www.meetup.com/clickhouse-singapore-meetup-group/events/303212064/) - October 3
* [Jakarta Meetup](https://www.meetup.com/clickhouse-indonesia-user-group/events/303191359/) - October 1

View File

@ -145,6 +145,7 @@
#define TSA_TRY_ACQUIRE_SHARED(...) __attribute__((try_acquire_shared_capability(__VA_ARGS__))) /// function tries to acquire a shared capability and returns a boolean value indicating success or failure
#define TSA_RELEASE_SHARED(...) __attribute__((release_shared_capability(__VA_ARGS__))) /// function releases the given shared capability
#define TSA_SCOPED_LOCKABLE __attribute__((scoped_lockable)) /// object of a class has scoped lockable capability
#define TSA_RETURN_CAPABILITY(...) __attribute__((lock_returned(__VA_ARGS__))) /// to return capabilities in functions
/// Macros for suppressing TSA warnings for specific reads/writes (instead of suppressing it for the whole function)
/// They use a lambda function to apply function attribute to a single statement. This enable us to suppress warnings locally instead of

2
contrib/SimSIMD vendored

@ -1 +1 @@
Subproject commit ee3c9c9c00b51645f62a1a9e99611b78c0052a21
Subproject commit fa60f1b8e3582c50978f0ae86c2ebb6c9af957f3

View File

@ -1,21 +1,31 @@
#!/bin/bash
set +x
set -eo pipefail
shopt -s nullglob
DO_CHOWN=1
if [ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]; then
if [[ "${CLICKHOUSE_RUN_AS_ROOT:=0}" = "1" || "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]]; then
DO_CHOWN=0
fi
CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
# CLICKHOUSE_UID and CLICKHOUSE_GID are kept for backward compatibility, but deprecated
# One must use either "docker run --user" or CLICKHOUSE_RUN_AS_ROOT=1 to run the process as
# FIXME: Remove ALL CLICKHOUSE_UID CLICKHOUSE_GID before 25.3
if [[ "${CLICKHOUSE_UID:-}" || "${CLICKHOUSE_GID:-}" ]]; then
echo 'WARNING: Support for CLICKHOUSE_UID/CLICKHOUSE_GID will be removed in a couple of releases.' >&2
echo 'WARNING: Either use a proper "docker run --user=xxx:xxxx" argument instead of CLICKHOUSE_UID/CLICKHOUSE_GID' >&2
echo 'WARNING: or set "CLICKHOUSE_RUN_AS_ROOT=1" ENV to run the clickhouse-server as root:root' >&2
fi
# support --user
if [ "$(id -u)" = "0" ]; then
USER=$CLICKHOUSE_UID
GROUP=$CLICKHOUSE_GID
# support `docker run --user=xxx:xxxx`
if [[ "$(id -u)" = "0" ]]; then
if [[ "$CLICKHOUSE_RUN_AS_ROOT" = 1 ]]; then
USER=0
GROUP=0
else
USER="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
GROUP="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
fi
if command -v gosu &> /dev/null; then
gosu="gosu $USER:$GROUP"
elif command -v su-exec &> /dev/null; then
@ -82,11 +92,11 @@ if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
# There is a config file. It is already tested with gosu (if it is readably by keeper user)
if [ -f "$KEEPER_CONFIG" ]; then
exec $gosu /usr/bin/clickhouse-keeper --config-file="$KEEPER_CONFIG" "$@"
exec $gosu clickhouse-keeper --config-file="$KEEPER_CONFIG" "$@"
fi
# There is no config file. Will use embedded one
exec $gosu /usr/bin/clickhouse-keeper --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@"
exec $gosu clickhouse-keeper --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@"
fi
# Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image

View File

@ -88,34 +88,34 @@ RUN if [ -n "${single_binary_location_url}" ]; then \
#docker-official-library:on
# A fallback to installation from ClickHouse repository
RUN if ! clickhouse local -q "SELECT ''" > /dev/null 2>&1; then \
apt-get update \
&& apt-get install --yes --no-install-recommends \
apt-transport-https \
dirmngr \
gnupg2 \
&& mkdir -p /etc/apt/sources.list.d \
&& GNUPGHOME=$(mktemp -d) \
&& GNUPGHOME="$GNUPGHOME" gpg --batch --no-default-keyring \
--keyring /usr/share/keyrings/clickhouse-keyring.gpg \
--keyserver hkp://keyserver.ubuntu.com:80 \
--recv-keys 3a9ea1193a97b548be1457d48919f6bd2b48d754 \
&& rm -rf "$GNUPGHOME" \
&& chmod +r /usr/share/keyrings/clickhouse-keyring.gpg \
&& echo "${REPOSITORY}" > /etc/apt/sources.list.d/clickhouse.list \
&& echo "installing from repository: ${REPOSITORY}" \
&& apt-get update \
&& for package in ${PACKAGES}; do \
packages="${packages} ${package}=${VERSION}" \
; done \
&& apt-get install --allow-unauthenticated --yes --no-install-recommends ${packages} || exit 1 \
&& rm -rf \
/var/lib/apt/lists/* \
/var/cache/debconf \
/tmp/* \
&& apt-get autoremove --purge -yq libksba8 \
&& apt-get autoremove -yq \
; fi
# It works unless the clickhouse binary already exists
RUN clickhouse local -q 'SELECT 1' >/dev/null 2>&1 && exit 0 || : \
; apt-get update \
&& apt-get install --yes --no-install-recommends \
dirmngr \
gnupg2 \
&& mkdir -p /etc/apt/sources.list.d \
&& GNUPGHOME=$(mktemp -d) \
&& GNUPGHOME="$GNUPGHOME" gpg --batch --no-default-keyring \
--keyring /usr/share/keyrings/clickhouse-keyring.gpg \
--keyserver hkp://keyserver.ubuntu.com:80 \
--recv-keys 3a9ea1193a97b548be1457d48919f6bd2b48d754 \
&& rm -rf "$GNUPGHOME" \
&& chmod +r /usr/share/keyrings/clickhouse-keyring.gpg \
&& echo "${REPOSITORY}" > /etc/apt/sources.list.d/clickhouse.list \
&& echo "installing from repository: ${REPOSITORY}" \
&& apt-get update \
&& for package in ${PACKAGES}; do \
packages="${packages} ${package}=${VERSION}" \
; done \
&& apt-get install --yes --no-install-recommends ${packages} || exit 1 \
&& rm -rf \
/var/lib/apt/lists/* \
/var/cache/debconf \
/tmp/* \
&& apt-get autoremove --purge -yq dirmngr gnupg2 \
&& chmod ugo+Xrw -R /etc/clickhouse-server /etc/clickhouse-client
# The last chmod is here to make the next one is No-op in docker official library Dockerfile
# post install
# we need to allow "others" access to clickhouse folder, because docker container
@ -126,8 +126,6 @@ RUN clickhouse-local -q 'SELECT * FROM system.build_options' \
RUN locale-gen en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US:en
ENV LC_ALL en_US.UTF-8
ENV TZ UTC
RUN mkdir /docker-entrypoint-initdb.d

View File

@ -1,3 +1,11 @@
<!---
The README.md is generated by README.sh from the following sources:
- README.src/content.md
- README.src/license.md
If you want to change it, edit these files
-->
# ClickHouse Server Docker Image
## What is ClickHouse?
@ -8,6 +16,7 @@ ClickHouse works 100-1000x faster than traditional database management systems,
For more information and documentation see https://clickhouse.com/.
<!-- This is not related to the docker official library, remove it before commit to https://github.com/docker-library/docs -->
## Versions
- The `latest` tag points to the latest release of the latest stable branch.
@ -16,11 +25,12 @@ For more information and documentation see https://clickhouse.com/.
- The tag `head` is built from the latest commit to the default branch.
- Each tag has optional `-alpine` suffix to reflect that it's built on top of `alpine`.
<!-- REMOVE UNTIL HERE -->
### Compatibility
- The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3.
- The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A) and additionally the Load-Acquire RCpc register. The register is optional in version ARMv8.2-A and mandatory in [ARMv8.3-A](https://en.wikipedia.org/wiki/AArch64#ARMv8.3-A). Supported in Graviton >=2, Azure and GCP instances. Examples for unsupported devices are Raspberry Pi 4 (ARMv8.0-A) and Jetson AGX Xavier/Orin (ARMv8.2-A).
- Since the Clickhouse 24.11 Ubuntu images started using `ubuntu:22.04` as its base image. It requires docker version >= `20.10.10` containing [patch](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468). As a workaround you could use `docker run [--privileged | --security-opt seccomp=unconfined]` instead, however that has security implications.
- Since the Clickhouse 24.11 Ubuntu images started using `ubuntu:22.04` as its base image. It requires docker version >= `20.10.10` containing [patch](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468). As a workaround you could use `docker run --security-opt seccomp=unconfined` instead, however that has security implications.
## How to use this image
@ -30,7 +40,7 @@ For more information and documentation see https://clickhouse.com/.
docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
```
By default, ClickHouse will be accessible only via the Docker network. See the [networking section below](#networking).
By default, ClickHouse will be accessible only via the Docker network. See the **networking** section below.
By default, starting above server instance will be run as the `default` user without password.
@ -47,7 +57,7 @@ More information about the [ClickHouse client](https://clickhouse.com/docs/en/in
### connect to it using curl
```bash
echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server curlimages/curl 'http://clickhouse-server:8123/?query=' -s --data-binary @-
echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server buildpack-deps:curl curl 'http://clickhouse-server:8123/?query=' -s --data-binary @-
```
More information about the [ClickHouse HTTP Interface](https://clickhouse.com/docs/en/interfaces/http/).
@ -70,7 +80,7 @@ echo 'SELECT version()' | curl 'http://localhost:18123/' --data-binary @-
`22.6.3.35`
or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance):
Or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance):
```bash
docker run -d --network=host --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
@ -88,8 +98,8 @@ Typically you may want to mount the following folders inside your container to a
```bash
docker run -d \
-v $(realpath ./ch_data):/var/lib/clickhouse/ \
-v $(realpath ./ch_logs):/var/log/clickhouse-server/ \
-v "$PWD/ch_data:/var/lib/clickhouse/" \
-v "$PWD/ch_logs:/var/log/clickhouse-server/" \
--name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
```
@ -111,6 +121,8 @@ docker run -d \
--name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
```
Read more in [knowledge base](https://clickhouse.com/docs/knowledgebase/configure_cap_ipc_lock_and_cap_sys_nice_in_docker).
## Configuration
The container exposes port 8123 for the [HTTP interface](https://clickhouse.com/docs/en/interfaces/http_interface/) and port 9000 for the [native client](https://clickhouse.com/docs/en/interfaces/tcp/).
@ -126,8 +138,8 @@ docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 -v /pa
### Start server as custom user
```bash
# $(pwd)/data/clickhouse should exist and be owned by current user
docker run --rm --user ${UID}:${GID} --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$(pwd)/logs/clickhouse:/var/log/clickhouse-server" -v "$(pwd)/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server
# $PWD/data/clickhouse should exist and be owned by current user
docker run --rm --user "${UID}:${GID}" --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server
```
When you use the image with local directories mounted, you probably want to specify the user to maintain the proper file ownership. Use the `--user` argument and mount `/var/lib/clickhouse` and `/var/log/clickhouse-server` inside the container. Otherwise, the image will complain and not start.
@ -135,7 +147,7 @@ When you use the image with local directories mounted, you probably want to spec
### Start server from root (useful in case of enabled user namespace)
```bash
docker run --rm -e CLICKHOUSE_UID=0 -e CLICKHOUSE_GID=0 --name clickhouse-server-userns -v "$(pwd)/logs/clickhouse:/var/log/clickhouse-server" -v "$(pwd)/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server
docker run --rm -e CLICKHOUSE_RUN_AS_ROOT=1 --name clickhouse-server-userns -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server
```
### How to create default database and user on starting

38
docker/server/README.sh Executable file
View File

@ -0,0 +1,38 @@
#!/usr/bin/env bash
set -ueo pipefail
# A script to generate README.sh close to as it done in https://github.com/docker-library/docs
WORKDIR=$(dirname "$0")
SCRIPT_NAME=$(basename "$0")
CONTENT=README.src/content.md
LICENSE=README.src/license.md
cd "$WORKDIR"
R=README.md
cat > "$R" <<EOD
<!---
The $R is generated by $SCRIPT_NAME from the following sources:
- $CONTENT
- $LICENSE
If you want to change it, edit these files
-->
EOD
cat "$CONTENT" >> "$R"
cat >> "$R" <<EOD
## License
$(cat $LICENSE)
EOD
# Remove %%LOGO%% from the file with one line below
sed -i '/^%%LOGO%%/,+1d' "$R"
# Replace each %%IMAGE%% with our `clickhouse/clickhouse-server`
sed -i '/%%IMAGE%%/s:%%IMAGE%%:clickhouse/clickhouse-server:g' $R

View File

@ -0,0 +1 @@
ClickHouse is the fastest and most resource efficient OSS database for real-time apps and analytics.

View File

@ -0,0 +1,170 @@
# ClickHouse Server Docker Image
## What is ClickHouse?
%%LOGO%%
ClickHouse is an open-source column-oriented DBMS (columnar database management system) for online analytical processing (OLAP) that allows users to generate analytical reports using SQL queries in real-time.
ClickHouse works 100-1000x faster than traditional database management systems, and processes hundreds of millions to over a billion rows and tens of gigabytes of data per server per second. With a widespread user base around the globe, the technology has received praise for its reliability, ease of use, and fault tolerance.
For more information and documentation see https://clickhouse.com/.
<!-- This is not related to the docker official library, remove it before commit to https://github.com/docker-library/docs -->
## Versions
- The `latest` tag points to the latest release of the latest stable branch.
- Branch tags like `22.2` point to the latest release of the corresponding branch.
- Full version tags like `22.2.3.5` point to the corresponding release.
- The tag `head` is built from the latest commit to the default branch.
- Each tag has optional `-alpine` suffix to reflect that it's built on top of `alpine`.
<!-- REMOVE UNTIL HERE -->
### Compatibility
- The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3.
- The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A) and additionally the Load-Acquire RCpc register. The register is optional in version ARMv8.2-A and mandatory in [ARMv8.3-A](https://en.wikipedia.org/wiki/AArch64#ARMv8.3-A). Supported in Graviton >=2, Azure and GCP instances. Examples for unsupported devices are Raspberry Pi 4 (ARMv8.0-A) and Jetson AGX Xavier/Orin (ARMv8.2-A).
- Since the Clickhouse 24.11 Ubuntu images started using `ubuntu:22.04` as its base image. It requires docker version >= `20.10.10` containing [patch](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468). As a workaround you could use `docker run --security-opt seccomp=unconfined` instead, however that has security implications.
## How to use this image
### start server instance
```bash
docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
```
By default, ClickHouse will be accessible only via the Docker network. See the **networking** section below.
By default, starting above server instance will be run as the `default` user without password.
### connect to it from a native client
```bash
docker run -it --rm --link some-clickhouse-server:clickhouse-server --entrypoint clickhouse-client %%IMAGE%% --host clickhouse-server
# OR
docker exec -it some-clickhouse-server clickhouse-client
```
More information about the [ClickHouse client](https://clickhouse.com/docs/en/interfaces/cli/).
### connect to it using curl
```bash
echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server buildpack-deps:curl curl 'http://clickhouse-server:8123/?query=' -s --data-binary @-
```
More information about the [ClickHouse HTTP Interface](https://clickhouse.com/docs/en/interfaces/http/).
### stopping / removing the container
```bash
docker stop some-clickhouse-server
docker rm some-clickhouse-server
```
### networking
You can expose your ClickHouse running in docker by [mapping a particular port](https://docs.docker.com/config/containers/container-networking/) from inside the container using host ports:
```bash
docker run -d -p 18123:8123 -p19000:9000 --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
echo 'SELECT version()' | curl 'http://localhost:18123/' --data-binary @-
```
`22.6.3.35`
Or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance):
```bash
docker run -d --network=host --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
echo 'SELECT version()' | curl 'http://localhost:8123/' --data-binary @-
```
`22.6.3.35`
### Volumes
Typically you may want to mount the following folders inside your container to achieve persistency:
- `/var/lib/clickhouse/` - main folder where ClickHouse stores the data
- `/var/log/clickhouse-server/` - logs
```bash
docker run -d \
-v "$PWD/ch_data:/var/lib/clickhouse/" \
-v "$PWD/ch_logs:/var/log/clickhouse-server/" \
--name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
```
You may also want to mount:
- `/etc/clickhouse-server/config.d/*.xml` - files with server configuration adjustments
- `/etc/clickhouse-server/users.d/*.xml` - files with user settings adjustments
- `/docker-entrypoint-initdb.d/` - folder with database initialization scripts (see below).
### Linux capabilities
ClickHouse has some advanced functionality, which requires enabling several [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html).
They are optional and can be enabled using the following [docker command-line arguments](https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities):
```bash
docker run -d \
--cap-add=SYS_NICE --cap-add=NET_ADMIN --cap-add=IPC_LOCK \
--name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
```
Read more in [knowledge base](https://clickhouse.com/docs/knowledgebase/configure_cap_ipc_lock_and_cap_sys_nice_in_docker).
## Configuration
The container exposes port 8123 for the [HTTP interface](https://clickhouse.com/docs/en/interfaces/http_interface/) and port 9000 for the [native client](https://clickhouse.com/docs/en/interfaces/tcp/).
ClickHouse configuration is represented with a file "config.xml" ([documentation](https://clickhouse.com/docs/en/operations/configuration_files/))
### Start server instance with custom configuration
```bash
docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 -v /path/to/your/config.xml:/etc/clickhouse-server/config.xml %%IMAGE%%
```
### Start server as custom user
```bash
# $PWD/data/clickhouse should exist and be owned by current user
docker run --rm --user "${UID}:${GID}" --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" %%IMAGE%%
```
When you use the image with local directories mounted, you probably want to specify the user to maintain the proper file ownership. Use the `--user` argument and mount `/var/lib/clickhouse` and `/var/log/clickhouse-server` inside the container. Otherwise, the image will complain and not start.
### Start server from root (useful in case of enabled user namespace)
```bash
docker run --rm -e CLICKHOUSE_RUN_AS_ROOT=1 --name clickhouse-server-userns -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" %%IMAGE%%
```
### How to create default database and user on starting
Sometimes you may want to create a user (user named `default` is used by default) and database on a container start. You can do it using environment variables `CLICKHOUSE_DB`, `CLICKHOUSE_USER`, `CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT` and `CLICKHOUSE_PASSWORD`:
```bash
docker run --rm -e CLICKHOUSE_DB=my_database -e CLICKHOUSE_USER=username -e CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT=1 -e CLICKHOUSE_PASSWORD=password -p 9000:9000/tcp %%IMAGE%%
```
## How to extend this image
To perform additional initialization in an image derived from this one, add one or more `*.sql`, `*.sql.gz`, or `*.sh` scripts under `/docker-entrypoint-initdb.d`. After the entrypoint calls `initdb`, it will run any `*.sql` files, run any executable `*.sh` scripts, and source any non-executable `*.sh` scripts found in that directory to do further initialization before starting the service.
Also, you can provide environment variables `CLICKHOUSE_USER` & `CLICKHOUSE_PASSWORD` that will be used for clickhouse-client during initialization.
For example, to add an additional user and database, add the following to `/docker-entrypoint-initdb.d/init-db.sh`:
```bash
#!/bin/bash
set -e
clickhouse client -n <<-EOSQL
CREATE DATABASE docker;
CREATE TABLE docker.docker (x Int32) ENGINE = Log;
EOSQL
```

View File

@ -0,0 +1 @@
https://github.com/ClickHouse/ClickHouse

View File

@ -0,0 +1 @@
View [license information](https://github.com/ClickHouse/ClickHouse/blob/master/LICENSE) for the software contained in this image.

View File

@ -0,0 +1,43 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 616 616">
<defs>
<style>
.cls-1 {
clip-path: url(#clippath);
}
.cls-2 {
fill: none;
}
.cls-2, .cls-3, .cls-4 {
stroke-width: 0px;
}
.cls-3 {
fill: #1e1e1e;
}
.cls-4 {
fill: #faff69;
}
</style>
<clipPath id="clippath">
<rect class="cls-2" x="83.23" y="71.73" width="472.55" height="472.55"/>
</clipPath>
</defs>
<g id="Layer_2" data-name="Layer 2">
<rect class="cls-4" width="616" height="616"/>
</g>
<g id="Layer_1" data-name="Layer 1">
<g class="cls-1">
<g>
<path class="cls-3" d="m120.14,113.3c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.66,2.09,4.66,4.66v389.38c0,2.57-2.09,4.66-4.66,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66V113.3Z"/>
<path class="cls-3" d="m208.75,113.3c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.66,2.09,4.66,4.66v389.38c0,2.57-2.09,4.66-4.66,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66V113.3Z"/>
<path class="cls-3" d="m297.35,113.3c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.66,2.09,4.66,4.66v389.38c0,2.57-2.09,4.66-4.66,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66V113.3Z"/>
<path class="cls-3" d="m385.94,113.3c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.66,2.09,4.66,4.66v389.38c0,2.57-2.09,4.66-4.66,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66V113.3Z"/>
<path class="cls-3" d="m474.56,268.36c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.65,2.09,4.65,4.66v79.28c0,2.57-2.09,4.66-4.65,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66v-79.28Z"/>
</g>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.7 KiB

View File

@ -0,0 +1 @@
[ClickHouse Inc.](%%GITHUB-REPO%%)

View File

@ -0,0 +1,7 @@
{
"hub": {
"categories": [
"databases-and-storage"
]
}
}

View File

@ -4,17 +4,28 @@ set -eo pipefail
shopt -s nullglob
DO_CHOWN=1
if [ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]; then
if [[ "${CLICKHOUSE_RUN_AS_ROOT:=0}" = "1" || "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]]; then
DO_CHOWN=0
fi
CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
# CLICKHOUSE_UID and CLICKHOUSE_GID are kept for backward compatibility, but deprecated
# One must use either "docker run --user" or CLICKHOUSE_RUN_AS_ROOT=1 to run the process as
# FIXME: Remove ALL CLICKHOUSE_UID CLICKHOUSE_GID before 25.3
if [[ "${CLICKHOUSE_UID:-}" || "${CLICKHOUSE_GID:-}" ]]; then
echo 'WARNING: Support for CLICKHOUSE_UID/CLICKHOUSE_GID will be removed in a couple of releases.' >&2
echo 'WARNING: Either use a proper "docker run --user=xxx:xxxx" argument instead of CLICKHOUSE_UID/CLICKHOUSE_GID' >&2
echo 'WARNING: or set "CLICKHOUSE_RUN_AS_ROOT=1" ENV to run the clickhouse-server as root:root' >&2
fi
# support --user
if [ "$(id -u)" = "0" ]; then
USER=$CLICKHOUSE_UID
GROUP=$CLICKHOUSE_GID
# support `docker run --user=xxx:xxxx`
if [[ "$(id -u)" = "0" ]]; then
if [[ "$CLICKHOUSE_RUN_AS_ROOT" = 1 ]]; then
USER=0
GROUP=0
else
USER="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
GROUP="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
fi
else
USER="$(id -u)"
GROUP="$(id -g)"
@ -55,14 +66,14 @@ function create_directory_and_do_chown() {
[ -z "$dir" ] && return
# ensure directories exist
if [ "$DO_CHOWN" = "1" ]; then
mkdir="mkdir"
mkdir=( mkdir )
else
# if DO_CHOWN=0 it means that the system does not map root user to "admin" permissions
# it mainly happens on NFS mounts where root==nobody for security reasons
# thus mkdir MUST run with user id/gid and not from nobody that has zero permissions
mkdir="/usr/bin/clickhouse su "${USER}:${GROUP}" mkdir"
mkdir=( clickhouse su "${USER}:${GROUP}" mkdir )
fi
if ! $mkdir -p "$dir"; then
if ! "${mkdir[@]}" -p "$dir"; then
echo "Couldn't create necessary directory: $dir"
exit 1
fi
@ -143,7 +154,7 @@ if [ -n "${RUN_INITDB_SCRIPTS}" ]; then
fi
# Listen only on localhost until the initialization is done
/usr/bin/clickhouse su "${USER}:${GROUP}" /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" -- --listen_host=127.0.0.1 &
clickhouse su "${USER}:${GROUP}" clickhouse-server --config-file="$CLICKHOUSE_CONFIG" -- --listen_host=127.0.0.1 &
pid="$!"
# check if clickhouse is ready to accept connections
@ -151,7 +162,7 @@ if [ -n "${RUN_INITDB_SCRIPTS}" ]; then
tries=${CLICKHOUSE_INIT_TIMEOUT:-1000}
while ! wget --spider --no-check-certificate -T 1 -q "$URL" 2>/dev/null; do
if [ "$tries" -le "0" ]; then
echo >&2 'ClickHouse init process failed.'
echo >&2 'ClickHouse init process timeout.'
exit 1
fi
tries=$(( tries-1 ))
@ -203,18 +214,8 @@ if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
CLICKHOUSE_WATCHDOG_ENABLE=${CLICKHOUSE_WATCHDOG_ENABLE:-0}
export CLICKHOUSE_WATCHDOG_ENABLE
# An option for easy restarting and replacing clickhouse-server in a container, especially in Kubernetes.
# For example, you can replace the clickhouse-server binary to another and restart it while keeping the container running.
if [[ "${CLICKHOUSE_DOCKER_RESTART_ON_EXIT:-0}" -eq "1" ]]; then
while true; do
# This runs the server as a child process of the shell script:
/usr/bin/clickhouse su "${USER}:${GROUP}" /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@" ||:
echo >&2 'ClickHouse Server exited, and the environment variable CLICKHOUSE_DOCKER_RESTART_ON_EXIT is set to 1. Restarting the server.'
done
else
# This replaces the shell script with the server:
exec /usr/bin/clickhouse su "${USER}:${GROUP}" /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@"
fi
# This replaces the shell script with the server:
exec clickhouse su "${USER}:${GROUP}" clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@"
fi
# Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image

View File

@ -33,6 +33,21 @@ Then, generate the data. Parameter `-s` specifies the scale factor. For example,
./dbgen -s 100
```
Detailed table sizes with scale factor 100:
| Table | size (in rows) | size (compressed in ClickHouse) |
|----------|----------------|---------------------------------|
| nation | 25 | 2 kB |
| region | 5 | 1 kB |
| part | 20.000.000 | 895 MB |
| supplier | 1.000.000 | 75 MB |
| partsupp | 80.000.000 | 4.37 GB |
| customer | 15.000.000 | 1.19 GB |
| orders | 150.000.000 | 6.15 GB |
| lineitem | 600.00.00 | 26.69 GB |
(Compressed sizes in ClickHouse are taken from `system.tables.total_bytes` and based on below table definitions.)
Now create tables in ClickHouse.
We stick as closely as possible to the rules of the TPC-H specification:
@ -151,10 +166,37 @@ clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO orders FORMAT
clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO lineitem FORMAT CSV" < lineitem.tbl
```
The queries are generated by `./qgen -s <scaling_factor>`. Example queries for `s = 100`:
:::note
Instead of using tpch-kit and generating the tables by yourself, you can alternatively import the data from a public S3 bucket. Make sure
to create empty tables first using above `CREATE` statements.
```sql
-- Scaling factor 1
INSERT INTO nation SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/nation.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO region SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/region.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO part SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/part.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO supplier SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/supplier.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO partsupp SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/partsupp.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO customer SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/customer.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO orders SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/orders.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO lineitem SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/lineitem.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
-- Scaling factor 100
INSERT INTO nation SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/nation.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO region SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/region.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO part SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/part.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO supplier SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/supplier.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO partsupp SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/partsupp.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO customer SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/customer.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO orders SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/orders.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO lineitem SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/lineitem.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
````
:::
## Queries
The queries are generated by `./qgen -s <scaling_factor>`. Example queries for `s = 100`:
**Correctness**
The result of the queries agrees with the official results unless mentioned otherwise. To verify, generate a TPC-H database with scale

View File

@ -597,6 +597,30 @@ If number of tables is greater than this value, server will throw an exception.
<max_table_num_to_throw>400</max_table_num_to_throw>
```
## max\_replicated\_table\_num\_to\_throw {#max-replicated-table-num-to-throw}
If number of replicated tables is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.
**Example**
```xml
<max_replicated_table_num_to_throw>400</max_replicated_table_num_to_throw>
```
## max\_dictionary\_num\_to\_throw {#max-dictionary-num-to-throw}
If number of dictionaries is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.
**Example**
```xml
<max_dictionary_num_to_throw>400</max_dictionary_num_to_throw>
```
## max\_view\_num\_to\_throw {#max-view-num-to-throw}
If number of views is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.
**Example**
```xml
<max_view_num_to_throw>400</max_view_num_to_throw>
```
## max\_database\_num\_to\_throw {#max-table-num-to-throw}
If number of _database is greater than this value, server will throw an exception. 0 means no limitation.
Default value: 0

View File

@ -7,119 +7,4 @@ toc_hidden: true
# List of Aggregate Functions
Standard aggregate functions:
- [count](../reference/count.md)
- [min](../reference/min.md)
- [max](../reference/max.md)
- [sum](../reference/sum.md)
- [avg](../reference/avg.md)
- [any](../reference/any.md)
- [stddevPop](../reference/stddevpop.md)
- [stddevPopStable](../reference/stddevpopstable.md)
- [stddevSamp](../reference/stddevsamp.md)
- [stddevSampStable](../reference/stddevsampstable.md)
- [varPop](../reference/varpop.md)
- [varSamp](../reference/varsamp.md)
- [corr](../reference/corr.md)
- [corr](../reference/corrstable.md)
- [corrMatrix](../reference/corrmatrix.md)
- [covarPop](../reference/covarpop.md)
- [covarStable](../reference/covarpopstable.md)
- [covarPopMatrix](../reference/covarpopmatrix.md)
- [covarSamp](../reference/covarsamp.md)
- [covarSampStable](../reference/covarsampstable.md)
- [covarSampMatrix](../reference/covarsampmatrix.md)
- [entropy](../reference/entropy.md)
- [exponentialMovingAverage](../reference/exponentialmovingaverage.md)
- [intervalLengthSum](../reference/intervalLengthSum.md)
- [kolmogorovSmirnovTest](../reference/kolmogorovsmirnovtest.md)
- [mannwhitneyutest](../reference/mannwhitneyutest.md)
- [median](../reference/median.md)
- [rankCorr](../reference/rankCorr.md)
- [sumKahan](../reference/sumkahan.md)
- [studentTTest](../reference/studentttest.md)
- [welchTTest](../reference/welchttest.md)
ClickHouse-specific aggregate functions:
- [aggThrow](../reference/aggthrow.md)
- [analysisOfVariance](../reference/analysis_of_variance.md)
- [any](../reference/any.md)
- [anyHeavy](../reference/anyheavy.md)
- [anyLast](../reference/anylast.md)
- [boundingRatio](../reference/boundrat.md)
- [first_value](../reference/first_value.md)
- [last_value](../reference/last_value.md)
- [argMin](../reference/argmin.md)
- [argMax](../reference/argmax.md)
- [avgWeighted](../reference/avgweighted.md)
- [topK](../reference/topk.md)
- [topKWeighted](../reference/topkweighted.md)
- [deltaSum](../reference/deltasum.md)
- [deltaSumTimestamp](../reference/deltasumtimestamp.md)
- [flameGraph](../reference/flame_graph.md)
- [groupArray](../reference/grouparray.md)
- [groupArrayLast](../reference/grouparraylast.md)
- [groupUniqArray](../reference/groupuniqarray.md)
- [groupArrayInsertAt](../reference/grouparrayinsertat.md)
- [groupArrayMovingAvg](../reference/grouparraymovingavg.md)
- [groupArrayMovingSum](../reference/grouparraymovingsum.md)
- [groupArraySample](../reference/grouparraysample.md)
- [groupArraySorted](../reference/grouparraysorted.md)
- [groupArrayIntersect](../reference/grouparrayintersect.md)
- [groupBitAnd](../reference/groupbitand.md)
- [groupBitOr](../reference/groupbitor.md)
- [groupBitXor](../reference/groupbitxor.md)
- [groupBitmap](../reference/groupbitmap.md)
- [groupBitmapAnd](../reference/groupbitmapand.md)
- [groupBitmapOr](../reference/groupbitmapor.md)
- [groupBitmapXor](../reference/groupbitmapxor.md)
- [sumWithOverflow](../reference/sumwithoverflow.md)
- [sumMap](../reference/summap.md)
- [sumMapWithOverflow](../reference/summapwithoverflow.md)
- [sumMapFiltered](../parametric-functions.md/#summapfiltered)
- [sumMapFilteredWithOverflow](../parametric-functions.md/#summapfilteredwithoverflow)
- [minMap](../reference/minmap.md)
- [maxMap](../reference/maxmap.md)
- [skewSamp](../reference/skewsamp.md)
- [skewPop](../reference/skewpop.md)
- [kurtSamp](../reference/kurtsamp.md)
- [kurtPop](../reference/kurtpop.md)
- [uniq](../reference/uniq.md)
- [uniqExact](../reference/uniqexact.md)
- [uniqCombined](../reference/uniqcombined.md)
- [uniqCombined64](../reference/uniqcombined64.md)
- [uniqHLL12](../reference/uniqhll12.md)
- [uniqTheta](../reference/uniqthetasketch.md)
- [quantile](../reference/quantile.md)
- [quantiles](../reference/quantiles.md)
- [quantileExact](../reference/quantileexact.md)
- [quantileExactLow](../reference/quantileexact.md#quantileexactlow)
- [quantileExactHigh](../reference/quantileexact.md#quantileexacthigh)
- [quantileExactWeighted](../reference/quantileexactweighted.md)
- [quantileTiming](../reference/quantiletiming.md)
- [quantileTimingWeighted](../reference/quantiletimingweighted.md)
- [quantileDeterministic](../reference/quantiledeterministic.md)
- [quantileTDigest](../reference/quantiletdigest.md)
- [quantileTDigestWeighted](../reference/quantiletdigestweighted.md)
- [quantileBFloat16](../reference/quantilebfloat16.md#quantilebfloat16)
- [quantileBFloat16Weighted](../reference/quantilebfloat16.md#quantilebfloat16weighted)
- [quantileDD](../reference/quantileddsketch.md#quantileddsketch)
- [simpleLinearRegression](../reference/simplelinearregression.md)
- [singleValueOrNull](../reference/singlevalueornull.md)
- [stochasticLinearRegression](../reference/stochasticlinearregression.md)
- [stochasticLogisticRegression](../reference/stochasticlogisticregression.md)
- [categoricalInformationValue](../reference/categoricalinformationvalue.md)
- [contingency](../reference/contingency.md)
- [cramersV](../reference/cramersv.md)
- [cramersVBiasCorrected](../reference/cramersvbiascorrected.md)
- [theilsU](../reference/theilsu.md)
- [maxIntersections](../reference/maxintersections.md)
- [maxIntersectionsPosition](../reference/maxintersectionsposition.md)
- [meanZTest](../reference/meanztest.md)
- [quantileGK](../reference/quantileGK.md)
- [quantileInterpolatedWeighted](../reference/quantileinterpolatedweighted.md)
- [sparkBar](../reference/sparkbar.md)
- [sumCount](../reference/sumcount.md)
- [largestTriangleThreeBuckets](../reference/largestTriangleThreeBuckets.md)
ClickHouse supports all standard SQL aggregate functions ([sum](../reference/sum.md), [avg](../reference/avg.md), [min](../reference/min.md), [max](../reference/max.md), [count](../reference/count.md)), as well as a wide range of other aggregate functions.

View File

@ -6,7 +6,9 @@ sidebar_label: AggregateFunction
# AggregateFunction
Aggregate functions can have an implementation-defined intermediate state that can be serialized to an `AggregateFunction(...)` data type and stored in a table, usually, by means of [a materialized view](../../sql-reference/statements/create/view.md). The common way to produce an aggregate function state is by calling the aggregate function with the `-State` suffix. To get the final result of aggregation in the future, you must use the same aggregate function with the `-Merge`suffix.
Aggregate functions have an implementation-defined intermediate state that can be serialized to an `AggregateFunction(...)` data type and stored in a table, usually, by means of [a materialized view](../../sql-reference/statements/create/view.md).
The common way to produce an aggregate function state is by calling the aggregate function with the `-State` suffix.
To get the final result of aggregation in the future, you must use the same aggregate function with the `-Merge`suffix.
`AggregateFunction(name, types_of_arguments...)` — parametric data type.

View File

@ -6,29 +6,8 @@ sidebar_position: 1
# Data Types in ClickHouse
ClickHouse can store various kinds of data in table cells. This section describes the supported data types and special considerations for using and/or implementing them if any.
This section describes the data types supported by ClickHouse, for example [integers](int-uint.md), [floats](float.md) and [strings](string.md).
:::note
You can check whether a data type name is case-sensitive in the [system.data_type_families](../../operations/system-tables/data_type_families.md#system_tables-data_type_families) table.
:::
ClickHouse data types include:
- **Integer types**: [signed and unsigned integers](./int-uint.md) (`UInt8`, `UInt16`, `UInt32`, `UInt64`, `UInt128`, `UInt256`, `Int8`, `Int16`, `Int32`, `Int64`, `Int128`, `Int256`)
- **Floating-point numbers**: [floats](./float.md)(`Float32` and `Float64`) and [`Decimal` values](./decimal.md)
- **Boolean**: ClickHouse has a [`Boolean` type](./boolean.md)
- **Strings**: [`String`](./string.md) and [`FixedString`](./fixedstring.md)
- **Dates**: use [`Date`](./date.md) and [`Date32`](./date32.md) for days, and [`DateTime`](./datetime.md) and [`DateTime64`](./datetime64.md) for instances in time
- **Object**: the [`Object`](./json.md) stores a JSON document in a single column (deprecated)
- **JSON**: the [`JSON` object](./newjson.md) stores a JSON document in a single column
- **UUID**: a performant option for storing [`UUID` values](./uuid.md)
- **Low cardinality types**: use an [`Enum`](./enum.md) when you have a handful of unique values, or use [`LowCardinality`](./lowcardinality.md) when you have up to 10,000 unique values of a column
- **Arrays**: any column can be defined as an [`Array` of values](./array.md)
- **Maps**: use [`Map`](./map.md) for storing key/value pairs
- **Aggregation function types**: use [`SimpleAggregateFunction`](./simpleaggregatefunction.md) and [`AggregateFunction`](./aggregatefunction.md) for storing the intermediate status of aggregate function results
- **Nested data structures**: A [`Nested` data structure](./nested-data-structures/index.md) is like a table inside a cell
- **Tuples**: A [`Tuple` of elements](./tuple.md), each having an individual type.
- **Nullable**: [`Nullable`](./nullable.md) allows you to store a value as `NULL` when a value is "missing" (instead of the column settings its default value for the data type)
- **IP addresses**: use [`IPv4`](./ipv4.md) and [`IPv6`](./ipv6.md) to efficiently store IP addresses
- **Geo types**: for [geographical data](./geo.md), including `Point`, `Ring`, `Polygon` and `MultiPolygon`
- **Special data types**: including [`Expression`](./special-data-types/expression.md), [`Set`](./special-data-types/set.md), [`Nothing`](./special-data-types/nothing.md) and [`Interval`](./special-data-types/interval.md)
System table [system.data_type_families](../../operations/system-tables/data_type_families.md#system_tables-data_type_families) provides an
overview of all available data types.
It also shows whether a data type is an alias to another data type and its name is case-sensitive (e.g. `bool` vs. `BOOL`).

View File

@ -7,7 +7,7 @@ keywords: [object, data type]
# Object Data Type (deprecated)
**This feature is not production-ready and is now deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-formats/json/overview) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864).
**This feature is not production-ready and deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-formats/json/overview) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864).
<hr />

View File

@ -5,7 +5,9 @@ sidebar_label: SimpleAggregateFunction
---
# SimpleAggregateFunction
`SimpleAggregateFunction(name, types_of_arguments...)` data type stores current value of the aggregate function, and does not store its full state as [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md) does. This optimization can be applied to functions for which the following property holds: the result of applying a function `f` to a row set `S1 UNION ALL S2` can be obtained by applying `f` to parts of the row set separately, and then again applying `f` to the results: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`. This property guarantees that partial aggregation results are enough to compute the combined one, so we do not have to store and process any extra data.
`SimpleAggregateFunction(name, types_of_arguments...)` data type stores current value (intermediate state) of the aggregate function, but not its full state as [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md) does.
This optimization can be applied to functions for which the following property holds: the result of applying a function `f` to a row set `S1 UNION ALL S2` can be obtained by applying `f` to parts of the row set separately, and then again applying `f` to the results: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`.
This property guarantees that partial aggregation results are enough to compute the combined one, so we do not have to store and process any extra data.
The common way to produce an aggregate function value is by calling the aggregate function with the [-SimpleState](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-simplestate) suffix.

View File

@ -4773,7 +4773,7 @@ Result:
## toUTCTimestamp
Convert DateTime/DateTime64 type value from other time zone to UTC timezone timestamp
Convert DateTime/DateTime64 type value from other time zone to UTC timezone timestamp. This function is mainly included for compatibility with Apache Spark and similar frameworks.
**Syntax**
@ -4799,14 +4799,14 @@ SELECT toUTCTimestamp(toDateTime('2023-03-16'), 'Asia/Shanghai');
Result:
``` text
┌─toUTCTimestamp(toDateTime('2023-03-16'),'Asia/Shanghai')┐
┌─toUTCTimestamp(toDateTime('2023-03-16'), 'Asia/Shanghai')┐
│ 2023-03-15 16:00:00 │
└─────────────────────────────────────────────────────────┘
```
## fromUTCTimestamp
Convert DateTime/DateTime64 type value from UTC timezone to other time zone timestamp
Convert DateTime/DateTime64 type value from UTC timezone to other time zone timestamp. This function is mainly included for compatibility with Apache Spark and similar frameworks.
**Syntax**
@ -4832,7 +4832,7 @@ SELECT fromUTCTimestamp(toDateTime64('2023-03-16 10:00:00', 3), 'Asia/Shanghai')
Result:
``` text
┌─fromUTCTimestamp(toDateTime64('2023-03-16 10:00:00',3),'Asia/Shanghai')─┐
┌─fromUTCTimestamp(toDateTime64('2023-03-16 10:00:00',3), 'Asia/Shanghai')─┐
│ 2023-03-16 18:00:00.000 │
└─────────────────────────────────────────────────────────────────────────┘
```

View File

@ -5,70 +5,4 @@ sidebar_position: 62
title: "Geo Functions"
---
## Geographical Coordinates Functions
- [greatCircleDistance](./coordinates.md#greatcircledistance)
- [geoDistance](./coordinates.md#geodistance)
- [greatCircleAngle](./coordinates.md#greatcircleangle)
- [pointInEllipses](./coordinates.md#pointinellipses)
- [pointInPolygon](./coordinates.md#pointinpolygon)
## Geohash Functions
- [geohashEncode](./geohash.md#geohashencode)
- [geohashDecode](./geohash.md#geohashdecode)
- [geohashesInBox](./geohash.md#geohashesinbox)
## H3 Indexes Functions
- [h3IsValid](./h3.md#h3isvalid)
- [h3GetResolution](./h3.md#h3getresolution)
- [h3EdgeAngle](./h3.md#h3edgeangle)
- [h3EdgeLengthM](./h3.md#h3edgelengthm)
- [h3EdgeLengthKm](./h3.md#h3edgelengthkm)
- [geoToH3](./h3.md#geotoh3)
- [h3ToGeo](./h3.md#h3togeo)
- [h3ToGeoBoundary](./h3.md#h3togeoboundary)
- [h3kRing](./h3.md#h3kring)
- [h3GetBaseCell](./h3.md#h3getbasecell)
- [h3HexAreaM2](./h3.md#h3hexaream2)
- [h3HexAreaKm2](./h3.md#h3hexareakm2)
- [h3IndexesAreNeighbors](./h3.md#h3indexesareneighbors)
- [h3ToChildren](./h3.md#h3tochildren)
- [h3ToParent](./h3.md#h3toparent)
- [h3ToString](./h3.md#h3tostring)
- [stringToH3](./h3.md#stringtoh3)
- [h3GetResolution](./h3.md#h3getresolution)
- [h3IsResClassIII](./h3.md#h3isresclassiii)
- [h3IsPentagon](./h3.md#h3ispentagon)
- [h3GetFaces](./h3.md#h3getfaces)
- [h3CellAreaM2](./h3.md#h3cellaream2)
- [h3CellAreaRads2](./h3.md#h3cellarearads2)
- [h3ToCenterChild](./h3.md#h3tocenterchild)
- [h3ExactEdgeLengthM](./h3.md#h3exactedgelengthm)
- [h3ExactEdgeLengthKm](./h3.md#h3exactedgelengthkm)
- [h3ExactEdgeLengthRads](./h3.md#h3exactedgelengthrads)
- [h3NumHexagons](./h3.md#h3numhexagons)
- [h3Line](./h3.md#h3line)
- [h3Distance](./h3.md#h3distance)
- [h3HexRing](./h3.md#h3hexring)
- [h3GetUnidirectionalEdge](./h3.md#h3getunidirectionaledge)
- [h3UnidirectionalEdgeIsValid](./h3.md#h3unidirectionaledgeisvalid)
- [h3GetOriginIndexFromUnidirectionalEdge](./h3.md#h3getoriginindexfromunidirectionaledge)
- [h3GetDestinationIndexFromUnidirectionalEdge](./h3.md#h3getdestinationindexfromunidirectionaledge)
- [h3GetIndexesFromUnidirectionalEdge](./h3.md#h3getindexesfromunidirectionaledge)
- [h3GetUnidirectionalEdgesFromHexagon](./h3.md#h3getunidirectionaledgesfromhexagon)
- [h3GetUnidirectionalEdgeBoundary](./h3.md#h3getunidirectionaledgeboundary)
## S2 Index Functions
- [geoToS2](./s2.md#geotos2)
- [s2ToGeo](./s2.md#s2togeo)
- [s2GetNeighbors](./s2.md#s2getneighbors)
- [s2CellsIntersect](./s2.md#s2cellsintersect)
- [s2CapContains](./s2.md#s2capcontains)
- [s2CapUnion](./s2.md#s2capunion)
- [s2RectAdd](./s2.md#s2rectadd)
- [s2RectContains](./s2.md#s2rectcontains)
- [s2RectUnion](./s2.md#s2rectunion)
- [s2RectIntersection](./s2.md#s2rectintersection)
Functions for working with geometric objects, for example [to calculate distances between points on a sphere](./coordinates.md), [compute geohashes](./geohash.md), and work with [h3 indexes](./h3.md).

View File

@ -24,7 +24,7 @@ All expressions in a query that have the same AST (the same record or same resul
## Types of Results
All functions return a single return as the result (not several values, and not zero values). The type of result is usually defined only by the types of arguments, not by the values. Exceptions are the tupleElement function (the a.N operator), and the toFixedString function.
All functions return a single value as the result (not several values, and not zero values). The type of result is usually defined only by the types of arguments, not by the values. Exceptions are the tupleElement function (the a.N operator), and the toFixedString function.
## Constants

View File

@ -279,7 +279,7 @@ For columns with a new or updated `MATERIALIZED` value expression, all existing
For columns with a new or updated `DEFAULT` value expression, the behavior depends on the ClickHouse version:
- In ClickHouse < v24.2, all existing rows are rewritten.
- ClickHouse >= v24.2 distinguishes if a row value in a column with `DEFAULT` value expression was explicitly specified when it was inserted, or not, i.e. calculated from the `DEFAULT` value expression. If the value was explicitly specified, ClickHouse keeps it as is. If the value was was calculated, ClickHouse changes it to the new or updated `MATERIALIZED` value expression.
- ClickHouse >= v24.2 distinguishes if a row value in a column with `DEFAULT` value expression was explicitly specified when it was inserted, or not, i.e. calculated from the `DEFAULT` value expression. If the value was explicitly specified, ClickHouse keeps it as is. If the value was calculated, ClickHouse changes it to the new or updated `MATERIALIZED` value expression.
Syntax:

View File

@ -6,16 +6,4 @@ sidebar_label: CREATE
# CREATE Queries
Create queries make a new entity of one of the following kinds:
- [DATABASE](/docs/en/sql-reference/statements/create/database.md)
- [TABLE](/docs/en/sql-reference/statements/create/table.md)
- [VIEW](/docs/en/sql-reference/statements/create/view.md)
- [DICTIONARY](/docs/en/sql-reference/statements/create/dictionary.md)
- [FUNCTION](/docs/en/sql-reference/statements/create/function.md)
- [USER](/docs/en/sql-reference/statements/create/user.md)
- [ROLE](/docs/en/sql-reference/statements/create/role.md)
- [ROW POLICY](/docs/en/sql-reference/statements/create/row-policy.md)
- [QUOTA](/docs/en/sql-reference/statements/create/quota.md)
- [SETTINGS PROFILE](/docs/en/sql-reference/statements/create/settings-profile.md)
- [NAMED COLLECTION](/docs/en/sql-reference/statements/create/named-collection.md)
CREATE queries create (for example) new [databases](/docs/en/sql-reference/statements/create/database.md), [tables](/docs/en/sql-reference/statements/create/table.md) and [views](/docs/en/sql-reference/statements/create/view.md).

View File

@ -6,27 +6,4 @@ sidebar_label: List of statements
# ClickHouse SQL Statements
Statements represent various kinds of action you can perform using SQL queries. Each kind of statement has its own syntax and usage details that are described separately:
- [SELECT](/docs/en/sql-reference/statements/select/index.md)
- [INSERT INTO](/docs/en/sql-reference/statements/insert-into.md)
- [CREATE](/docs/en/sql-reference/statements/create/index.md)
- [ALTER](/docs/en/sql-reference/statements/alter/index.md)
- [SYSTEM](/docs/en/sql-reference/statements/system.md)
- [SHOW](/docs/en/sql-reference/statements/show.md)
- [GRANT](/docs/en/sql-reference/statements/grant.md)
- [REVOKE](/docs/en/sql-reference/statements/revoke.md)
- [ATTACH](/docs/en/sql-reference/statements/attach.md)
- [CHECK TABLE](/docs/en/sql-reference/statements/check-table.md)
- [DESCRIBE TABLE](/docs/en/sql-reference/statements/describe-table.md)
- [DETACH](/docs/en/sql-reference/statements/detach.md)
- [DROP](/docs/en/sql-reference/statements/drop.md)
- [EXISTS](/docs/en/sql-reference/statements/exists.md)
- [KILL](/docs/en/sql-reference/statements/kill.md)
- [OPTIMIZE](/docs/en/sql-reference/statements/optimize.md)
- [RENAME](/docs/en/sql-reference/statements/rename.md)
- [SET](/docs/en/sql-reference/statements/set.md)
- [SET ROLE](/docs/en/sql-reference/statements/set-role.md)
- [TRUNCATE](/docs/en/sql-reference/statements/truncate.md)
- [USE](/docs/en/sql-reference/statements/use.md)
- [EXPLAIN](/docs/en/sql-reference/statements/explain.md)
Users interact with ClickHouse using SQL statements. ClickHouse supports common SQL statements like [SELECT](select/index.md) and [CREATE](create/index.md), but it also provides specialized statements like [KILL](kill.md) and [OPTIMIZE](optimize.md).

View File

@ -431,7 +431,7 @@ catch (const Exception & e)
bool need_print_stack_trace = config().getBool("stacktrace", false) && e.code() != ErrorCodes::NETWORK_ERROR;
std::cerr << getExceptionMessage(e, need_print_stack_trace, true) << std::endl << std::endl;
/// If exception code isn't zero, we should return non-zero return code anyway.
return e.code() ? e.code() : -1;
return static_cast<UInt8>(e.code()) ? e.code() : -1;
}
catch (...)
{
@ -1390,7 +1390,8 @@ int mainEntryClickHouseClient(int argc, char ** argv)
catch (const DB::Exception & e)
{
std::cerr << DB::getExceptionMessage(e, false) << std::endl;
return 1;
auto code = DB::getCurrentExceptionCode();
return static_cast<UInt8>(code) ? code : 1;
}
catch (const boost::program_options::error & e)
{
@ -1399,7 +1400,8 @@ int mainEntryClickHouseClient(int argc, char ** argv)
}
catch (...)
{
std::cerr << DB::getCurrentExceptionMessage(true) << std::endl;
return 1;
std::cerr << DB::getCurrentExceptionMessage(true) << '\n';
auto code = DB::getCurrentExceptionCode();
return static_cast<UInt8>(code) ? code : 1;
}
}

View File

@ -9,14 +9,20 @@
#include <IO/WriteBufferFromFile.h>
#include <IO/ReadBufferFromFile.h>
#include <Compression/CompressedWriteBuffer.h>
#include <Compression/ParallelCompressedWriteBuffer.h>
#include <Compression/CompressedReadBuffer.h>
#include <Compression/CompressedReadBufferFromFile.h>
#include <Compression/getCompressionCodecForFile.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h>
#include <IO/copyData.h>
#include <Parsers/parseQuery.h>
#include <Parsers/queryToString.h>
#include <Parsers/ExpressionElementParsers.h>
#include <Compression/CompressionFactory.h>
#include <Common/TerminalSize.h>
#include <Common/ThreadPool.h>
#include <Common/CurrentMetrics.h>
#include <Core/Defines.h>
@ -29,33 +35,35 @@ namespace DB
}
}
namespace CurrentMetrics
{
extern const Metric LocalThread;
extern const Metric LocalThreadActive;
extern const Metric LocalThreadScheduled;
}
namespace
{
/// Outputs sizes of uncompressed and compressed blocks for compressed file.
/// Outputs method, sizes of uncompressed and compressed blocks for compressed file.
void checkAndWriteHeader(DB::ReadBuffer & in, DB::WriteBuffer & out)
{
while (!in.eof())
{
in.ignore(16); /// checksum
char header[COMPRESSED_BLOCK_HEADER_SIZE];
in.readStrict(header, COMPRESSED_BLOCK_HEADER_SIZE);
UInt32 size_compressed = unalignedLoad<UInt32>(&header[1]);
UInt32 size_compressed;
UInt32 size_decompressed;
auto codec = DB::getCompressionCodecForFile(in, size_compressed, size_decompressed, true /* skip_to_next_block */);
if (size_compressed > DBMS_MAX_COMPRESSED_SIZE)
throw DB::Exception(DB::ErrorCodes::TOO_LARGE_SIZE_COMPRESSED, "Too large size_compressed. Most likely corrupted data.");
UInt32 size_decompressed = unalignedLoad<UInt32>(&header[5]);
DB::writeText(queryToString(codec->getFullCodecDesc()), out);
DB::writeChar('\t', out);
DB::writeText(size_decompressed, out);
DB::writeChar('\t', out);
DB::writeText(size_compressed, out);
DB::writeChar('\n', out);
in.ignore(size_compressed - COMPRESSED_BLOCK_HEADER_SIZE);
}
}
@ -77,11 +85,12 @@ int mainEntryClickHouseCompressor(int argc, char ** argv)
("decompress,d", "decompress")
("offset-in-compressed-file", po::value<size_t>()->default_value(0ULL), "offset to the compressed block (i.e. physical file offset)")
("offset-in-decompressed-block", po::value<size_t>()->default_value(0ULL), "offset to the decompressed block (i.e. virtual offset)")
("block-size,b", po::value<unsigned>()->default_value(DBMS_DEFAULT_BUFFER_SIZE), "compress in blocks of specified size")
("block-size,b", po::value<size_t>()->default_value(DBMS_DEFAULT_BUFFER_SIZE), "compress in blocks of specified size")
("hc", "use LZ4HC instead of LZ4")
("zstd", "use ZSTD instead of LZ4")
("codec", po::value<std::vector<std::string>>()->multitoken(), "use codecs combination instead of LZ4")
("level", po::value<int>(), "compression level for codecs specified via flags")
("threads", po::value<size_t>()->default_value(1), "number of threads for parallel compression")
("none", "use no compression instead of LZ4")
("stat", "print block statistics of compressed data")
("stacktrace", "print stacktrace of exception")
@ -109,7 +118,8 @@ int mainEntryClickHouseCompressor(int argc, char ** argv)
bool stat_mode = options.count("stat");
bool use_none = options.count("none");
print_stacktrace = options.count("stacktrace");
unsigned block_size = options["block-size"].as<unsigned>();
size_t block_size = options["block-size"].as<size_t>();
size_t num_threads = options["threads"].as<size_t>();
std::vector<std::string> codecs;
if (options.count("codec"))
codecs = options["codec"].as<std::vector<std::string>>();
@ -117,6 +127,12 @@ int mainEntryClickHouseCompressor(int argc, char ** argv)
if ((use_lz4hc || use_zstd || use_none) && !codecs.empty())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Wrong options, codec flags like --zstd and --codec options are mutually exclusive");
if (num_threads < 1)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Invalid value of `threads` parameter");
if (num_threads > 1 && decompress)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parallel mode is only implemented for compression (not for decompression)");
if (!codecs.empty() && options.count("level"))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Wrong options, --level is not compatible with --codec list");
@ -145,7 +161,6 @@ int mainEntryClickHouseCompressor(int argc, char ** argv)
else
codec = CompressionCodecFactory::instance().get(method_family, level);
std::unique_ptr<ReadBufferFromFileBase> rb;
std::unique_ptr<WriteBufferFromFileBase> wb;
@ -186,9 +201,20 @@ int mainEntryClickHouseCompressor(int argc, char ** argv)
else
{
/// Compression
CompressedWriteBuffer to(*wb, codec, block_size);
copyData(*rb, to);
to.finalize();
if (num_threads == 1)
{
CompressedWriteBuffer to(*wb, codec, block_size);
copyData(*rb, to);
to.finalize();
}
else
{
ThreadPool pool(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, CurrentMetrics::LocalThreadScheduled, num_threads);
ParallelCompressedWriteBuffer to(*wb, codec, block_size, num_threads, pool);
copyData(*rb, to);
to.finalize();
}
}
}
catch (...)

View File

@ -546,16 +546,18 @@ int mainEntryClickHouseDisks(int argc, char ** argv)
catch (const DB::Exception & e)
{
std::cerr << DB::getExceptionMessage(e, false) << std::endl;
return 0;
auto code = DB::getCurrentExceptionCode();
return static_cast<UInt8>(code) ? code : 1;
}
catch (const boost::program_options::error & e)
{
std::cerr << "Bad arguments: " << e.what() << std::endl;
return 0;
return DB::ErrorCodes::BAD_ARGUMENTS;
}
catch (...)
{
std::cerr << DB::getCurrentExceptionMessage(true) << std::endl;
return 0;
auto code = DB::getCurrentExceptionCode();
return static_cast<UInt8>(code) ? code : 1;
}
}

View File

@ -448,7 +448,8 @@ int mainEntryClickHouseKeeperClient(int argc, char ** argv)
catch (const DB::Exception & e)
{
std::cerr << DB::getExceptionMessage(e, false) << std::endl;
return 1;
auto code = DB::getCurrentExceptionCode();
return static_cast<UInt8>(code) ? code : 1;
}
catch (const boost::program_options::error & e)
{
@ -458,6 +459,7 @@ int mainEntryClickHouseKeeperClient(int argc, char ** argv)
catch (...)
{
std::cerr << DB::getCurrentExceptionMessage(true) << std::endl;
return 1;
auto code = DB::getCurrentExceptionCode();
return static_cast<UInt8>(code) ? code : 1;
}
}

View File

@ -81,7 +81,7 @@ int mainEntryClickHouseKeeper(int argc, char ** argv)
{
std::cerr << DB::getCurrentExceptionMessage(true) << "\n";
auto code = DB::getCurrentExceptionCode();
return code ? code : 1;
return static_cast<UInt8>(code) ? code : 1;
}
}
@ -672,7 +672,7 @@ catch (...)
/// Poco does not provide stacktrace.
tryLogCurrentException("Application");
auto code = getCurrentExceptionCode();
return code ? code : -1;
return static_cast<UInt8>(code) ? code : -1;
}

View File

@ -13,7 +13,7 @@ int mainEntryClickHouseLibraryBridge(int argc, char ** argv)
{
std::cerr << DB::getCurrentExceptionMessage(true) << "\n";
auto code = DB::getCurrentExceptionCode();
return code ? code : 1;
return static_cast<UInt8>(code) ? code : 1;
}
}

View File

@ -14,6 +14,7 @@
#include <Databases/registerDatabases.h>
#include <Databases/DatabaseFilesystem.h>
#include <Databases/DatabaseMemory.h>
#include <Databases/DatabaseAtomic.h>
#include <Databases/DatabasesOverlay.h>
#include <Storages/System/attachSystemTables.h>
#include <Storages/System/attachInformationSchemaTables.h>
@ -22,7 +23,6 @@
#include <Interpreters/ProcessList.h>
#include <Interpreters/loadMetadata.h>
#include <Interpreters/registerInterpreters.h>
#include <base/getFQDNOrHostName.h>
#include <Access/AccessControl.h>
#include <Common/PoolId.h>
#include <Common/Exception.h>
@ -31,7 +31,6 @@
#include <Common/ThreadStatus.h>
#include <Common/TLDListsHolder.h>
#include <Common/quoteString.h>
#include <Common/randomSeed.h>
#include <Common/ThreadPool.h>
#include <Common/CurrentMetrics.h>
#include <Loggers/OwnFormattingChannel.h>
@ -50,7 +49,6 @@
#include <Dictionaries/registerDictionaries.h>
#include <Disks/registerDisks.h>
#include <Formats/registerFormats.h>
#include <boost/algorithm/string/replace.hpp>
#include <boost/program_options/options_description.hpp>
#include <base/argsToConfig.h>
#include <filesystem>
@ -71,9 +69,11 @@ namespace CurrentMetrics
namespace DB
{
namespace Setting
{
extern const SettingsBool allow_introspection_functions;
extern const SettingsBool implicit_select;
extern const SettingsLocalFSReadMethod storage_file_read_method;
}
@ -126,6 +126,7 @@ void applySettingsOverridesForLocal(ContextMutablePtr context)
settings[Setting::allow_introspection_functions] = true;
settings[Setting::storage_file_read_method] = LocalFSReadMethod::mmap;
settings[Setting::implicit_select] = true;
context->setSettings(settings);
}
@ -257,12 +258,12 @@ static DatabasePtr createMemoryDatabaseIfNotExists(ContextPtr context, const Str
return system_database;
}
static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context_)
static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context)
{
auto databaseCombiner = std::make_shared<DatabasesOverlay>(name_, context_);
databaseCombiner->registerNextDatabase(std::make_shared<DatabaseFilesystem>(name_, "", context_));
databaseCombiner->registerNextDatabase(std::make_shared<DatabaseMemory>(name_, context_));
return databaseCombiner;
auto overlay = std::make_shared<DatabasesOverlay>(name_, context);
overlay->registerNextDatabase(std::make_shared<DatabaseAtomic>(name_, fs::weakly_canonical(context->getPath()), UUIDHelpers::generateV4(), context));
overlay->registerNextDatabase(std::make_shared<DatabaseFilesystem>(name_, "", context));
return overlay;
}
/// If path is specified and not empty, will try to setup server environment and load existing metadata
@ -615,12 +616,14 @@ catch (const DB::Exception & e)
{
bool need_print_stack_trace = getClientConfiguration().getBool("stacktrace", false);
std::cerr << getExceptionMessage(e, need_print_stack_trace, true) << std::endl;
return e.code() ? e.code() : -1;
auto code = DB::getCurrentExceptionCode();
return static_cast<UInt8>(code) ? code : 1;
}
catch (...)
{
std::cerr << getCurrentExceptionMessage(false) << std::endl;
return getCurrentExceptionCode();
std::cerr << DB::getCurrentExceptionMessage(true) << '\n';
auto code = DB::getCurrentExceptionCode();
return static_cast<UInt8>(code) ? code : 1;
}
void LocalServer::updateLoggerLevel(const String & logs_level)
@ -809,7 +812,12 @@ void LocalServer::processConfig()
DatabaseCatalog::instance().initializeAndLoadTemporaryDatabase();
std::string default_database = server_settings[ServerSetting::default_database];
DatabaseCatalog::instance().attachDatabase(default_database, createClickHouseLocalDatabaseOverlay(default_database, global_context));
{
DatabasePtr database = createClickHouseLocalDatabaseOverlay(default_database, global_context);
if (UUID uuid = database->getUUID(); uuid != UUIDHelpers::Nil)
DatabaseCatalog::instance().addUUIDMapping(uuid);
DatabaseCatalog::instance().attachDatabase(default_database, database);
}
global_context->setCurrentDatabase(default_database);
if (getClientConfiguration().has("path"))
@ -1029,7 +1037,7 @@ int mainEntryClickHouseLocal(int argc, char ** argv)
{
std::cerr << DB::getExceptionMessage(e, false) << std::endl;
auto code = DB::getCurrentExceptionCode();
return code ? code : 1;
return static_cast<UInt8>(code) ? code : 1;
}
catch (const boost::program_options::error & e)
{
@ -1040,6 +1048,6 @@ int mainEntryClickHouseLocal(int argc, char ** argv)
{
std::cerr << DB::getCurrentExceptionMessage(true) << '\n';
auto code = DB::getCurrentExceptionCode();
return code ? code : 1;
return static_cast<UInt8>(code) ? code : 1;
}
}

View File

@ -1,27 +1,22 @@
#include <unistd.h>
#include <fcntl.h>
#include <base/phdr_cache.h>
#include <Common/EnvironmentChecks.h>
#include <Common/StringUtils.h>
#include <Common/getHashOfLoadedBinary.h>
#include <new>
#include <iostream>
#include <vector>
#include <string>
#include <string_view>
#include <utility> /// pair
#include <fmt/format.h>
#if defined(SANITIZE_COVERAGE)
# include <Common/Coverage.h>
#endif
#include "config.h"
#include "config_tools.h"
#include <Common/EnvironmentChecks.h>
#include <Common/Coverage.h>
#include <Common/StringUtils.h>
#include <Common/getHashOfLoadedBinary.h>
#include <Common/IO.h>
#include <base/phdr_cache.h>
#include <base/coverage.h>
#include <filesystem>
#include <iostream>
#include <new>
#include <string>
#include <string_view>
#include <utility> /// pair
#include <vector>
/// Universal executable for various clickhouse applications
int mainEntryClickHouseServer(int argc, char ** argv);
@ -238,9 +233,12 @@ int main(int argc_, char ** argv_)
/// clickhouse # spawn local
/// clickhouse local # spawn local
/// clickhouse "select ..." # spawn local
/// clickhouse /tmp/repro --enable-analyzer
///
if (main_func == printHelp && !argv.empty() && (argv.size() == 1 || argv[1][0] == '-'
|| std::string_view(argv[1]).contains(' ')))
std::error_code ec;
if (main_func == printHelp && !argv.empty()
&& (argv.size() == 1 || argv[1][0] == '-' || std::string_view(argv[1]).contains(' ')
|| std::filesystem::is_regular_file(std::filesystem::path{argv[1]}, ec)))
{
main_func = mainEntryClickHouseLocal;
}

View File

@ -1480,5 +1480,5 @@ catch (...)
{
std::cerr << DB::getCurrentExceptionMessage(true) << "\n";
auto code = DB::getCurrentExceptionCode();
return code ? code : 1;
return static_cast<UInt8>(code) ? code : 1;
}

View File

@ -13,7 +13,7 @@ int mainEntryClickHouseODBCBridge(int argc, char ** argv)
{
std::cerr << DB::getCurrentExceptionMessage(true) << "\n";
auto code = DB::getCurrentExceptionCode();
return code ? code : 1;
return static_cast<UInt8>(code) ? code : 1;
}
}

View File

@ -343,7 +343,7 @@ int mainEntryClickHouseServer(int argc, char ** argv)
{
std::cerr << DB::getCurrentExceptionMessage(true) << "\n";
auto code = DB::getCurrentExceptionCode();
return code ? code : 1;
return static_cast<UInt8>(code) ? code : 1;
}
}
@ -2537,7 +2537,7 @@ catch (...)
/// Poco does not provide stacktrace.
tryLogCurrentException("Application");
auto code = getCurrentExceptionCode();
return code ? code : -1;
return static_cast<UInt8>(code) ? code : -1;
}
std::unique_ptr<TCPProtocolStackFactory> Server::buildProtocolStackFromConfig(

View File

@ -59,7 +59,13 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid)
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getgrnam_r' to obtain gid from group name ({})", arg_gid);
if (!result)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Group {} is not found in the system", arg_gid);
{
if (0 != getgrgid_r(gid, &entry, buf.get(), buf_size, &result))
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getgrnam_r' to obtain gid from group name ({})", arg_gid);
if (!result)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Group {} is not found in the system", arg_gid);
}
gid = entry.gr_gid;
}
@ -84,7 +90,13 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid)
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getpwnam_r' to obtain uid from user name ({})", arg_uid);
if (!result)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "User {} is not found in the system", arg_uid);
{
if (0 != getpwuid_r(uid, &entry, buf.get(), buf_size, &result))
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getpwuid_r' to obtain uid from user name ({})", uid);
if (!result)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "User {} is not found in the system", arg_uid);
}
uid = entry.pw_uid;
}

View File

@ -22,6 +22,13 @@ namespace ErrorCodes
namespace
{
/** Due to a lack of proper code review, this code was contributed with a multiplication of template instantiations
* over all pairs of data types, and we deeply regret that.
*
* We cannot remove all combinations, because the binary representation of serialized data has to remain the same,
* but we can partially heal the wound by treating unsigned and signed data types in the same way.
*/
template <typename ValueType, typename TimestampType>
struct AggregationFunctionDeltaSumTimestampData
{
@ -37,23 +44,22 @@ template <typename ValueType, typename TimestampType>
class AggregationFunctionDeltaSumTimestamp final
: public IAggregateFunctionDataHelper<
AggregationFunctionDeltaSumTimestampData<ValueType, TimestampType>,
AggregationFunctionDeltaSumTimestamp<ValueType, TimestampType>
>
AggregationFunctionDeltaSumTimestamp<ValueType, TimestampType>>
{
public:
AggregationFunctionDeltaSumTimestamp(const DataTypes & arguments, const Array & params)
: IAggregateFunctionDataHelper<
AggregationFunctionDeltaSumTimestampData<ValueType, TimestampType>,
AggregationFunctionDeltaSumTimestamp<ValueType, TimestampType>
>{arguments, params, createResultType()}
{}
AggregationFunctionDeltaSumTimestamp<ValueType, TimestampType>>{arguments, params, createResultType()}
{
}
AggregationFunctionDeltaSumTimestamp()
: IAggregateFunctionDataHelper<
AggregationFunctionDeltaSumTimestampData<ValueType, TimestampType>,
AggregationFunctionDeltaSumTimestamp<ValueType, TimestampType>
>{}
{}
AggregationFunctionDeltaSumTimestamp<ValueType, TimestampType>>{}
{
}
bool allocatesMemoryInArena() const override { return false; }
@ -63,8 +69,8 @@ public:
void NO_SANITIZE_UNDEFINED ALWAYS_INLINE add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override
{
auto value = assert_cast<const ColumnVector<ValueType> &>(*columns[0]).getData()[row_num];
auto ts = assert_cast<const ColumnVector<TimestampType> &>(*columns[1]).getData()[row_num];
auto value = unalignedLoad<ValueType>(columns[0]->getRawData().data() + row_num * sizeof(ValueType));
auto ts = unalignedLoad<TimestampType>(columns[1]->getRawData().data() + row_num * sizeof(TimestampType));
auto & data = this->data(place);
@ -172,10 +178,48 @@ public:
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
{
assert_cast<ColumnVector<ValueType> &>(to).getData().push_back(this->data(place).sum);
static_cast<ColumnFixedSizeHelper &>(to).template insertRawData<sizeof(ValueType)>(
reinterpret_cast<const char *>(&this->data(place).sum));
}
};
template <typename FirstType, template <typename, typename> class AggregateFunctionTemplate, typename... TArgs>
IAggregateFunction * createWithTwoTypesSecond(const IDataType & second_type, TArgs && ... args)
{
WhichDataType which(second_type);
if (which.idx == TypeIndex::UInt32) return new AggregateFunctionTemplate<FirstType, UInt32>(args...);
if (which.idx == TypeIndex::UInt64) return new AggregateFunctionTemplate<FirstType, UInt64>(args...);
if (which.idx == TypeIndex::Int32) return new AggregateFunctionTemplate<FirstType, UInt32>(args...);
if (which.idx == TypeIndex::Int64) return new AggregateFunctionTemplate<FirstType, UInt64>(args...);
if (which.idx == TypeIndex::Float32) return new AggregateFunctionTemplate<FirstType, Float32>(args...);
if (which.idx == TypeIndex::Float64) return new AggregateFunctionTemplate<FirstType, Float64>(args...);
if (which.idx == TypeIndex::Date) return new AggregateFunctionTemplate<FirstType, UInt16>(args...);
if (which.idx == TypeIndex::DateTime) return new AggregateFunctionTemplate<FirstType, UInt32>(args...);
return nullptr;
}
template <template <typename, typename> class AggregateFunctionTemplate, typename... TArgs>
IAggregateFunction * createWithTwoTypes(const IDataType & first_type, const IDataType & second_type, TArgs && ... args)
{
WhichDataType which(first_type);
if (which.idx == TypeIndex::UInt8) return createWithTwoTypesSecond<UInt8, AggregateFunctionTemplate>(second_type, args...);
if (which.idx == TypeIndex::UInt16) return createWithTwoTypesSecond<UInt16, AggregateFunctionTemplate>(second_type, args...);
if (which.idx == TypeIndex::UInt32) return createWithTwoTypesSecond<UInt32, AggregateFunctionTemplate>(second_type, args...);
if (which.idx == TypeIndex::UInt64) return createWithTwoTypesSecond<UInt64, AggregateFunctionTemplate>(second_type, args...);
if (which.idx == TypeIndex::Int8) return createWithTwoTypesSecond<UInt8, AggregateFunctionTemplate>(second_type, args...);
if (which.idx == TypeIndex::Int16) return createWithTwoTypesSecond<UInt16, AggregateFunctionTemplate>(second_type, args...);
if (which.idx == TypeIndex::Int32) return createWithTwoTypesSecond<UInt32, AggregateFunctionTemplate>(second_type, args...);
if (which.idx == TypeIndex::Int64) return createWithTwoTypesSecond<UInt64, AggregateFunctionTemplate>(second_type, args...);
if (which.idx == TypeIndex::Float32) return createWithTwoTypesSecond<Float32, AggregateFunctionTemplate>(second_type, args...);
if (which.idx == TypeIndex::Float64) return createWithTwoTypesSecond<Float64, AggregateFunctionTemplate>(second_type, args...);
return nullptr;
}
AggregateFunctionPtr createAggregateFunctionDeltaSumTimestamp(
const String & name,
const DataTypes & arguments,
@ -193,7 +237,7 @@ AggregateFunctionPtr createAggregateFunctionDeltaSumTimestamp(
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for aggregate function {}, "
"must be Int, Float, Date, DateTime", arguments[1]->getName(), name);
return AggregateFunctionPtr(createWithTwoNumericOrDateTypes<AggregationFunctionDeltaSumTimestamp>(
return AggregateFunctionPtr(createWithTwoTypes<AggregationFunctionDeltaSumTimestamp>(
*arguments[0], *arguments[1], arguments, params));
}
}

View File

@ -184,36 +184,8 @@ static IAggregateFunction * createWithDecimalType(const IDataType & argument_typ
}
/** For template with two arguments.
* This is an extremely dangerous for code bloat - do not use.
*/
template <typename FirstType, template <typename, typename> class AggregateFunctionTemplate, typename... TArgs>
static IAggregateFunction * createWithTwoNumericTypesSecond(const IDataType & second_type, TArgs && ... args)
{
WhichDataType which(second_type);
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<FirstType, TYPE>(args...);
FOR_NUMERIC_TYPES(DISPATCH)
#undef DISPATCH
if (which.idx == TypeIndex::Enum8) return new AggregateFunctionTemplate<FirstType, Int8>(args...);
if (which.idx == TypeIndex::Enum16) return new AggregateFunctionTemplate<FirstType, Int16>(args...);
return nullptr;
}
template <template <typename, typename> class AggregateFunctionTemplate, typename... TArgs>
static IAggregateFunction * createWithTwoNumericTypes(const IDataType & first_type, const IDataType & second_type, TArgs && ... args)
{
WhichDataType which(first_type);
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) \
return createWithTwoNumericTypesSecond<TYPE, AggregateFunctionTemplate>(second_type, args...);
FOR_NUMERIC_TYPES(DISPATCH)
#undef DISPATCH
if (which.idx == TypeIndex::Enum8)
return createWithTwoNumericTypesSecond<Int8, AggregateFunctionTemplate>(second_type, args...);
if (which.idx == TypeIndex::Enum16)
return createWithTwoNumericTypesSecond<Int16, AggregateFunctionTemplate>(second_type, args...);
return nullptr;
}
template <typename FirstType, template <typename, typename> class AggregateFunctionTemplate, typename... TArgs>
static IAggregateFunction * createWithTwoBasicNumericTypesSecond(const IDataType & second_type, TArgs && ... args)
{
@ -237,46 +209,6 @@ static IAggregateFunction * createWithTwoBasicNumericTypes(const IDataType & fir
return nullptr;
}
template <typename FirstType, template <typename, typename> class AggregateFunctionTemplate, typename... TArgs>
static IAggregateFunction * createWithTwoNumericOrDateTypesSecond(const IDataType & second_type, TArgs && ... args)
{
WhichDataType which(second_type);
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<FirstType, TYPE>(args...);
FOR_NUMERIC_TYPES(DISPATCH)
#undef DISPATCH
if (which.idx == TypeIndex::Enum8) return new AggregateFunctionTemplate<FirstType, Int8>(args...);
if (which.idx == TypeIndex::Enum16) return new AggregateFunctionTemplate<FirstType, Int16>(args...);
/// expects that DataTypeDate based on UInt16, DataTypeDateTime based on UInt32
if (which.idx == TypeIndex::Date) return new AggregateFunctionTemplate<FirstType, UInt16>(args...);
if (which.idx == TypeIndex::DateTime) return new AggregateFunctionTemplate<FirstType, UInt32>(args...);
return nullptr;
}
template <template <typename, typename> class AggregateFunctionTemplate, typename... TArgs>
static IAggregateFunction * createWithTwoNumericOrDateTypes(const IDataType & first_type, const IDataType & second_type, TArgs && ... args)
{
WhichDataType which(first_type);
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) \
return createWithTwoNumericOrDateTypesSecond<TYPE, AggregateFunctionTemplate>(second_type, args...);
FOR_NUMERIC_TYPES(DISPATCH)
#undef DISPATCH
if (which.idx == TypeIndex::Enum8)
return createWithTwoNumericOrDateTypesSecond<Int8, AggregateFunctionTemplate>(second_type, args...);
if (which.idx == TypeIndex::Enum16)
return createWithTwoNumericOrDateTypesSecond<Int16, AggregateFunctionTemplate>(second_type, args...);
/// expects that DataTypeDate based on UInt16, DataTypeDateTime based on UInt32
if (which.idx == TypeIndex::Date)
return createWithTwoNumericOrDateTypesSecond<UInt16, AggregateFunctionTemplate>(second_type, args...);
if (which.idx == TypeIndex::DateTime)
return createWithTwoNumericOrDateTypesSecond<UInt32, AggregateFunctionTemplate>(second_type, args...);
return nullptr;
}
template <template <typename> class AggregateFunctionTemplate, typename... TArgs>
static IAggregateFunction * createWithStringType(const IDataType & argument_type, TArgs && ... args)
{

View File

@ -88,6 +88,7 @@ void FunctionNode::resolveAsFunction(FunctionBasePtr function_value)
function_name = function_value->getName();
function = std::move(function_value);
kind = FunctionKind::ORDINARY;
nulls_action = NullsAction::EMPTY;
}
void FunctionNode::resolveAsAggregateFunction(AggregateFunctionPtr aggregate_function_value)
@ -95,6 +96,12 @@ void FunctionNode::resolveAsAggregateFunction(AggregateFunctionPtr aggregate_fun
function_name = aggregate_function_value->getName();
function = std::move(aggregate_function_value);
kind = FunctionKind::AGGREGATE;
/** When the function is resolved, we do not need the nulls action anymore.
* The only thing that the nulls action does is map from one function to another.
* Thus, the nulls action is encoded in the function name and does not make sense anymore.
* Keeping the nulls action may lead to incorrect comparison of functions, e.g., count() and count() IGNORE NULLS are the same function.
*/
nulls_action = NullsAction::EMPTY;
}
void FunctionNode::resolveAsWindowFunction(AggregateFunctionPtr window_function_value)

View File

@ -48,9 +48,15 @@ ASTPtr JoinNode::toASTTableJoin() const
auto join_expression_ast = children[join_expression_child_index]->toAST();
if (is_using_join_expression)
join_ast->using_expression_list = std::move(join_expression_ast);
{
join_ast->using_expression_list = join_expression_ast;
join_ast->children.push_back(join_ast->using_expression_list);
}
else
join_ast->on_expression = std::move(join_expression_ast);
{
join_ast->on_expression = join_expression_ast;
join_ast->children.push_back(join_ast->on_expression);
}
}
return join_ast;

View File

@ -85,10 +85,9 @@ QueryTreeNodePtr createResolvedFunction(const ContextPtr & context, const String
}
FunctionNodePtr createResolvedAggregateFunction(
const String & name, const QueryTreeNodePtr & argument, const Array & parameters = {}, NullsAction action = NullsAction::EMPTY)
const String & name, const QueryTreeNodePtr & argument, const Array & parameters = {})
{
auto function_node = std::make_shared<FunctionNode>(name);
function_node->setNullsAction(action);
if (!parameters.empty())
{
@ -100,7 +99,7 @@ FunctionNodePtr createResolvedAggregateFunction(
function_node->getArguments().getNodes() = { argument };
AggregateFunctionProperties properties;
auto aggregate_function = AggregateFunctionFactory::instance().get(name, action, {argument->getResultType()}, parameters, properties);
auto aggregate_function = AggregateFunctionFactory::instance().get(name, NullsAction::EMPTY, {argument->getResultType()}, parameters, properties);
function_node->resolveAsAggregateFunction(std::move(aggregate_function));
return function_node;

View File

@ -3,7 +3,6 @@
#include <memory>
#include <Common/Exception.h>
#include "Analyzer/Passes/OptimizeGroupByInjectiveFunctionsPass.h"
#include <IO/WriteHelpers.h>
#include <IO/Operators.h>
@ -16,39 +15,39 @@
#include <Analyzer/ColumnNode.h>
#include <Analyzer/FunctionNode.h>
#include <Analyzer/InDepthQueryTreeVisitor.h>
#include <Analyzer/Utils.h>
#include <Analyzer/Passes/AggregateFunctionOfGroupByKeysPass.h>
#include <Analyzer/Passes/AggregateFunctionsArithmericOperationsPass.h>
#include <Analyzer/Passes/ArrayExistsToHasPass.h>
#include <Analyzer/Passes/AutoFinalOnQueryPass.h>
#include <Analyzer/Passes/ComparisonTupleEliminationPass.h>
#include <Analyzer/Passes/ConvertOrLikeChainPass.h>
#include <Analyzer/Passes/ConvertQueryToCNFPass.h>
#include <Analyzer/Passes/CountDistinctPass.h>
#include <Analyzer/Passes/CrossToInnerJoinPass.h>
#include <Analyzer/Passes/FunctionToSubcolumnsPass.h>
#include <Analyzer/Passes/FuseFunctionsPass.h>
#include <Analyzer/Passes/GroupingFunctionsResolvePass.h>
#include <Analyzer/Passes/IfChainToMultiIfPass.h>
#include <Analyzer/Passes/IfConstantConditionPass.h>
#include <Analyzer/Passes/IfTransformStringsToEnumPass.h>
#include <Analyzer/Passes/LogicalExpressionOptimizerPass.h>
#include <Analyzer/Passes/MultiIfToIfPass.h>
#include <Analyzer/Passes/NormalizeCountVariantsPass.h>
#include <Analyzer/Passes/OptimizeDateOrDateTimeConverterWithPreimagePass.h>
#include <Analyzer/Passes/OptimizeGroupByFunctionKeysPass.h>
#include <Analyzer/Passes/OptimizeGroupByInjectiveFunctionsPass.h>
#include <Analyzer/Passes/OptimizeRedundantFunctionsInOrderByPass.h>
#include <Analyzer/Passes/OrderByLimitByDuplicateEliminationPass.h>
#include <Analyzer/Passes/OrderByTupleEliminationPass.h>
#include <Analyzer/Passes/QueryAnalysisPass.h>
#include <Analyzer/Passes/RemoveUnusedProjectionColumnsPass.h>
#include <Analyzer/Passes/RewriteSumFunctionWithSumAndCountPass.h>
#include <Analyzer/Passes/CountDistinctPass.h>
#include <Analyzer/Passes/UniqToCountPass.h>
#include <Analyzer/Passes/FunctionToSubcolumnsPass.h>
#include <Analyzer/Passes/RewriteAggregateFunctionWithIfPass.h>
#include <Analyzer/Passes/SumIfToCountIfPass.h>
#include <Analyzer/Passes/MultiIfToIfPass.h>
#include <Analyzer/Passes/IfConstantConditionPass.h>
#include <Analyzer/Passes/IfChainToMultiIfPass.h>
#include <Analyzer/Passes/OrderByTupleEliminationPass.h>
#include <Analyzer/Passes/NormalizeCountVariantsPass.h>
#include <Analyzer/Passes/AggregateFunctionsArithmericOperationsPass.h>
#include <Analyzer/Passes/UniqInjectiveFunctionsEliminationPass.h>
#include <Analyzer/Passes/OrderByLimitByDuplicateEliminationPass.h>
#include <Analyzer/Passes/FuseFunctionsPass.h>
#include <Analyzer/Passes/OptimizeGroupByFunctionKeysPass.h>
#include <Analyzer/Passes/IfTransformStringsToEnumPass.h>
#include <Analyzer/Passes/ConvertOrLikeChainPass.h>
#include <Analyzer/Passes/OptimizeRedundantFunctionsInOrderByPass.h>
#include <Analyzer/Passes/GroupingFunctionsResolvePass.h>
#include <Analyzer/Passes/AutoFinalOnQueryPass.h>
#include <Analyzer/Passes/ArrayExistsToHasPass.h>
#include <Analyzer/Passes/ComparisonTupleEliminationPass.h>
#include <Analyzer/Passes/LogicalExpressionOptimizerPass.h>
#include <Analyzer/Passes/CrossToInnerJoinPass.h>
#include <Analyzer/Passes/RewriteSumFunctionWithSumAndCountPass.h>
#include <Analyzer/Passes/ShardNumColumnToFunctionPass.h>
#include <Analyzer/Passes/ConvertQueryToCNFPass.h>
#include <Analyzer/Passes/AggregateFunctionOfGroupByKeysPass.h>
#include <Analyzer/Passes/OptimizeDateOrDateTimeConverterWithPreimagePass.h>
#include <Analyzer/Passes/SumIfToCountIfPass.h>
#include <Analyzer/Passes/UniqInjectiveFunctionsEliminationPass.h>
#include <Analyzer/Passes/UniqToCountPass.h>
#include <Analyzer/Utils.h>
namespace DB
{

View File

@ -1,3 +1,4 @@
#include <Interpreters/ProcessorsProfileLog.h>
#include <Common/FieldVisitorToString.h>
#include <DataTypes/DataTypesNumber.h>
@ -51,7 +52,6 @@
#include <Analyzer/ArrayJoinNode.h>
#include <Analyzer/JoinNode.h>
#include <Analyzer/UnionNode.h>
#include <Analyzer/InDepthQueryTreeVisitor.h>
#include <Analyzer/QueryTreeBuilder.h>
#include <Analyzer/IQueryTreeNode.h>
#include <Analyzer/Identifier.h>
@ -677,6 +677,8 @@ void QueryAnalyzer::evaluateScalarSubqueryIfNeeded(QueryTreeNodePtr & node, Iden
"tuple"});
}
}
logProcessorProfile(context, io.pipeline.getProcessors());
}
scalars_cache.emplace(node_with_hash, scalar_block);
@ -3023,9 +3025,10 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
argument_column.name = arguments_projection_names[function_argument_index];
/** If function argument is lambda, save lambda argument index and initialize argument type as DataTypeFunction
* where function argument types are initialized with empty array of lambda arguments size.
* where function argument types are initialized with empty arrays of lambda arguments size.
*/
if (const auto * lambda_node = function_argument->as<const LambdaNode>())
const auto * lambda_node = function_argument->as<const LambdaNode>();
if (lambda_node)
{
size_t lambda_arguments_size = lambda_node->getArguments().getNodes().size();
argument_column.type = std::make_shared<DataTypeFunction>(DataTypes(lambda_arguments_size, nullptr), nullptr);
@ -3497,15 +3500,11 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
else
function_base = function->build(argument_columns);
/// Do not constant fold get scalar functions
// bool disable_constant_folding = function_name == "__getScalar" || function_name == "shardNum" ||
// function_name == "shardCount" || function_name == "hostName" || function_name == "tcpPort";
/** If function is suitable for constant folding try to convert it to constant.
* Example: SELECT plus(1, 1);
* Result: SELECT 2;
*/
if (function_base->isSuitableForConstantFolding()) // && !disable_constant_folding)
if (function_base->isSuitableForConstantFolding())
{
auto result_type = function_base->getResultType();
auto executable_function = function_base->prepare(argument_columns);
@ -3514,7 +3513,9 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
if (all_arguments_constants)
{
size_t num_rows = function_arguments.empty() ? 0 : argument_columns.front().column->size();
size_t num_rows = 0;
if (!argument_columns.empty())
num_rows = argument_columns.front().column->size();
column = executable_function->execute(argument_columns, result_type, num_rows, true);
}
else

View File

@ -14,12 +14,12 @@ namespace ErrorCodes
BackupConcurrencyCheck::BackupConcurrencyCheck(
const UUID & backup_or_restore_uuid_,
bool is_restore_,
bool on_cluster_,
const String & zookeeper_path_,
bool allow_concurrency_,
BackupConcurrencyCounters & counters_)
: is_restore(is_restore_), backup_or_restore_uuid(backup_or_restore_uuid_), on_cluster(on_cluster_), counters(counters_)
: is_restore(is_restore_), on_cluster(on_cluster_), zookeeper_path(zookeeper_path_), counters(counters_)
{
std::lock_guard lock{counters.mutex};
@ -32,7 +32,7 @@ BackupConcurrencyCheck::BackupConcurrencyCheck(
size_t num_on_cluster_restores = counters.on_cluster_restores.size();
if (on_cluster)
{
if (!counters.on_cluster_restores.contains(backup_or_restore_uuid))
if (!counters.on_cluster_restores.contains(zookeeper_path))
++num_on_cluster_restores;
}
else
@ -47,7 +47,7 @@ BackupConcurrencyCheck::BackupConcurrencyCheck(
size_t num_on_cluster_backups = counters.on_cluster_backups.size();
if (on_cluster)
{
if (!counters.on_cluster_backups.contains(backup_or_restore_uuid))
if (!counters.on_cluster_backups.contains(zookeeper_path))
++num_on_cluster_backups;
}
else
@ -64,9 +64,9 @@ BackupConcurrencyCheck::BackupConcurrencyCheck(
if (on_cluster)
{
if (is_restore)
++counters.on_cluster_restores[backup_or_restore_uuid];
++counters.on_cluster_restores[zookeeper_path];
else
++counters.on_cluster_backups[backup_or_restore_uuid];
++counters.on_cluster_backups[zookeeper_path];
}
else
{
@ -86,7 +86,7 @@ BackupConcurrencyCheck::~BackupConcurrencyCheck()
{
if (is_restore)
{
auto it = counters.on_cluster_restores.find(backup_or_restore_uuid);
auto it = counters.on_cluster_restores.find(zookeeper_path);
if (it != counters.on_cluster_restores.end())
{
if (!--it->second)
@ -95,7 +95,7 @@ BackupConcurrencyCheck::~BackupConcurrencyCheck()
}
else
{
auto it = counters.on_cluster_backups.find(backup_or_restore_uuid);
auto it = counters.on_cluster_backups.find(zookeeper_path);
if (it != counters.on_cluster_backups.end())
{
if (!--it->second)

View File

@ -1,7 +1,8 @@
#pragma once
#include <Core/UUID.h>
#include <base/defines.h>
#include <base/scope_guard.h>
#include <base/types.h>
#include <mutex>
#include <unordered_map>
@ -19,9 +20,9 @@ public:
/// Checks concurrency of a BACKUP operation or a RESTORE operation.
/// Keep a constructed instance of BackupConcurrencyCheck until the operation is done.
BackupConcurrencyCheck(
const UUID & backup_or_restore_uuid_,
bool is_restore_,
bool on_cluster_,
const String & zookeeper_path_,
bool allow_concurrency_,
BackupConcurrencyCounters & counters_);
@ -31,8 +32,8 @@ public:
private:
const bool is_restore;
const UUID backup_or_restore_uuid;
const bool on_cluster;
const String zookeeper_path;
BackupConcurrencyCounters & counters;
};
@ -47,8 +48,8 @@ private:
friend class BackupConcurrencyCheck;
size_t local_backups TSA_GUARDED_BY(mutex) = 0;
size_t local_restores TSA_GUARDED_BY(mutex) = 0;
std::unordered_map<UUID /* backup_uuid */, size_t /* num_refs */> on_cluster_backups TSA_GUARDED_BY(mutex);
std::unordered_map<UUID /* restore_uuid */, size_t /* num_refs */> on_cluster_restores TSA_GUARDED_BY(mutex);
std::unordered_map<String /* zookeeper_path */, size_t /* num_refs */> on_cluster_backups TSA_GUARDED_BY(mutex);
std::unordered_map<String /* zookeeper_path */, size_t /* num_refs */> on_cluster_restores TSA_GUARDED_BY(mutex);
std::mutex mutex;
};

View File

@ -4,31 +4,29 @@
namespace DB
{
BackupCoordinationCleaner::BackupCoordinationCleaner(const String & zookeeper_path_, const WithRetries & with_retries_, LoggerPtr log_)
: zookeeper_path(zookeeper_path_), with_retries(with_retries_), log(log_)
BackupCoordinationCleaner::BackupCoordinationCleaner(bool is_restore_, const String & zookeeper_path_, const WithRetries & with_retries_, LoggerPtr log_)
: is_restore(is_restore_), zookeeper_path(zookeeper_path_), with_retries(with_retries_), log(log_)
{
}
void BackupCoordinationCleaner::cleanup()
bool BackupCoordinationCleaner::cleanup(bool throw_if_error)
{
tryRemoveAllNodes(/* throw_if_error = */ true, /* retries_kind = */ WithRetries::kNormal);
WithRetries::Kind retries_kind = throw_if_error ? WithRetries::kNormal : WithRetries::kErrorHandling;
return cleanupImpl(throw_if_error, retries_kind);
}
bool BackupCoordinationCleaner::tryCleanupAfterError() noexcept
{
return tryRemoveAllNodes(/* throw_if_error = */ false, /* retries_kind = */ WithRetries::kNormal);
}
bool BackupCoordinationCleaner::tryRemoveAllNodes(bool throw_if_error, WithRetries::Kind retries_kind)
bool BackupCoordinationCleaner::cleanupImpl(bool throw_if_error, WithRetries::Kind retries_kind)
{
{
std::lock_guard lock{mutex};
if (cleanup_result.succeeded)
return true;
if (cleanup_result.exception)
if (succeeded)
{
if (throw_if_error)
std::rethrow_exception(cleanup_result.exception);
LOG_TRACE(log, "Nodes from ZooKeeper are already removed");
return true;
}
if (tried)
{
LOG_INFO(log, "Skipped removing nodes from ZooKeeper because because earlier we failed to do that");
return false;
}
}
@ -44,16 +42,18 @@ bool BackupCoordinationCleaner::tryRemoveAllNodes(bool throw_if_error, WithRetri
});
std::lock_guard lock{mutex};
cleanup_result.succeeded = true;
tried = true;
succeeded = true;
return true;
}
catch (...)
{
LOG_TRACE(log, "Caught exception while removing nodes from ZooKeeper for this restore: {}",
LOG_TRACE(log, "Caught exception while removing nodes from ZooKeeper for this {}: {}",
is_restore ? "restore" : "backup",
getCurrentExceptionMessage(/* with_stacktrace= */ false, /* check_embedded_stacktrace= */ true));
std::lock_guard lock{mutex};
cleanup_result.exception = std::current_exception();
tried = true;
if (throw_if_error)
throw;

View File

@ -12,14 +12,14 @@ namespace DB
class BackupCoordinationCleaner
{
public:
BackupCoordinationCleaner(const String & zookeeper_path_, const WithRetries & with_retries_, LoggerPtr log_);
BackupCoordinationCleaner(bool is_restore_, const String & zookeeper_path_, const WithRetries & with_retries_, LoggerPtr log_);
void cleanup();
bool tryCleanupAfterError() noexcept;
bool cleanup(bool throw_if_error);
private:
bool tryRemoveAllNodes(bool throw_if_error, WithRetries::Kind retries_kind);
bool cleanupImpl(bool throw_if_error, WithRetries::Kind retries_kind);
const bool is_restore;
const String zookeeper_path;
/// A reference to a field of the parent object which is either BackupCoordinationOnCluster or RestoreCoordinationOnCluster.
@ -27,13 +27,8 @@ private:
const LoggerPtr log;
struct CleanupResult
{
bool succeeded = false;
std::exception_ptr exception;
};
CleanupResult cleanup_result TSA_GUARDED_BY(mutex);
bool tried TSA_GUARDED_BY(mutex) = false;
bool succeeded TSA_GUARDED_BY(mutex) = false;
std::mutex mutex;
};

View File

@ -11,12 +11,11 @@ namespace DB
{
BackupCoordinationLocal::BackupCoordinationLocal(
const UUID & backup_uuid_,
bool is_plain_backup_,
bool allow_concurrent_backup_,
BackupConcurrencyCounters & concurrency_counters_)
: log(getLogger("BackupCoordinationLocal"))
, concurrency_check(backup_uuid_, /* is_restore = */ false, /* on_cluster = */ false, allow_concurrent_backup_, concurrency_counters_)
, concurrency_check(/* is_restore = */ false, /* on_cluster = */ false, /* zookeeper_path = */ "", allow_concurrent_backup_, concurrency_counters_)
, file_infos(is_plain_backup_)
{
}

View File

@ -23,20 +23,19 @@ class BackupCoordinationLocal : public IBackupCoordination
{
public:
explicit BackupCoordinationLocal(
const UUID & backup_uuid_,
bool is_plain_backup_,
bool allow_concurrent_backup_,
BackupConcurrencyCounters & concurrency_counters_);
~BackupCoordinationLocal() override;
void setBackupQueryIsSentToOtherHosts() override {}
bool isBackupQuerySentToOtherHosts() const override { return false; }
Strings setStage(const String &, const String &, bool) override { return {}; }
void setBackupQueryWasSentToOtherHosts() override {}
bool trySetError(std::exception_ptr) override { return true; }
void finish() override {}
bool tryFinishAfterError() noexcept override { return true; }
void waitForOtherHostsToFinish() override {}
bool tryWaitForOtherHostsToFinishAfterError() noexcept override { return true; }
bool setError(std::exception_ptr, bool) override { return true; }
bool waitOtherHostsFinish(bool) const override { return true; }
bool finish(bool) override { return true; }
bool cleanup(bool) override { return true; }
void addReplicatedPartNames(const String & table_zk_path, const String & table_name_for_logs, const String & replica_name,
const std::vector<PartNameAndChecksum> & part_names_and_checksums) override;

View File

@ -184,17 +184,21 @@ BackupCoordinationOnCluster::BackupCoordinationOnCluster(
, plain_backup(is_plain_backup_)
, log(getLogger("BackupCoordinationOnCluster"))
, with_retries(log, get_zookeeper_, keeper_settings, process_list_element_, [root_zookeeper_path_](Coordination::ZooKeeperWithFaultInjection::Ptr zk) { zk->sync(root_zookeeper_path_); })
, concurrency_check(backup_uuid_, /* is_restore = */ false, /* on_cluster = */ true, allow_concurrent_backup_, concurrency_counters_)
, stage_sync(/* is_restore = */ false, fs::path{zookeeper_path} / "stage", current_host, all_hosts, allow_concurrent_backup_, with_retries, schedule_, process_list_element_, log)
, cleaner(zookeeper_path, with_retries, log)
, cleaner(/* is_restore = */ false, zookeeper_path, with_retries, log)
, stage_sync(/* is_restore = */ false, fs::path{zookeeper_path} / "stage", current_host, all_hosts, allow_concurrent_backup_, concurrency_counters_, with_retries, schedule_, process_list_element_, log)
{
createRootNodes();
try
{
createRootNodes();
}
catch (...)
{
stage_sync.setError(std::current_exception(), /* throw_if_error = */ false);
throw;
}
}
BackupCoordinationOnCluster::~BackupCoordinationOnCluster()
{
tryFinishImpl();
}
BackupCoordinationOnCluster::~BackupCoordinationOnCluster() = default;
void BackupCoordinationOnCluster::createRootNodes()
{
@ -217,69 +221,52 @@ void BackupCoordinationOnCluster::createRootNodes()
});
}
void BackupCoordinationOnCluster::setBackupQueryIsSentToOtherHosts()
{
stage_sync.setQueryIsSentToOtherHosts();
}
bool BackupCoordinationOnCluster::isBackupQuerySentToOtherHosts() const
{
return stage_sync.isQuerySentToOtherHosts();
}
Strings BackupCoordinationOnCluster::setStage(const String & new_stage, const String & message, bool sync)
{
stage_sync.setStage(new_stage, message);
if (!sync)
return {};
return stage_sync.waitForHostsToReachStage(new_stage, all_hosts_without_initiator);
if (sync)
return stage_sync.waitHostsReachStage(all_hosts_without_initiator, new_stage);
return {};
}
void BackupCoordinationOnCluster::setBackupQueryWasSentToOtherHosts()
bool BackupCoordinationOnCluster::setError(std::exception_ptr exception, bool throw_if_error)
{
backup_query_was_sent_to_other_hosts = true;
return stage_sync.setError(exception, throw_if_error);
}
bool BackupCoordinationOnCluster::trySetError(std::exception_ptr exception)
bool BackupCoordinationOnCluster::waitOtherHostsFinish(bool throw_if_error) const
{
return stage_sync.trySetError(exception);
return stage_sync.waitOtherHostsFinish(throw_if_error);
}
void BackupCoordinationOnCluster::finish()
bool BackupCoordinationOnCluster::finish(bool throw_if_error)
{
bool other_hosts_also_finished = false;
stage_sync.finish(other_hosts_also_finished);
if ((current_host == kInitiator) && (other_hosts_also_finished || !backup_query_was_sent_to_other_hosts))
cleaner.cleanup();
return stage_sync.finish(throw_if_error);
}
bool BackupCoordinationOnCluster::tryFinishAfterError() noexcept
bool BackupCoordinationOnCluster::cleanup(bool throw_if_error)
{
return tryFinishImpl();
}
bool BackupCoordinationOnCluster::tryFinishImpl() noexcept
{
bool other_hosts_also_finished = false;
if (!stage_sync.tryFinishAfterError(other_hosts_also_finished))
return false;
if ((current_host == kInitiator) && (other_hosts_also_finished || !backup_query_was_sent_to_other_hosts))
/// All the hosts must finish before we remove the coordination nodes.
bool expect_other_hosts_finished = stage_sync.isQuerySentToOtherHosts() || !stage_sync.isErrorSet();
bool all_hosts_finished = stage_sync.finished() && (stage_sync.otherHostsFinished() || !expect_other_hosts_finished);
if (!all_hosts_finished)
{
if (!cleaner.tryCleanupAfterError())
return false;
}
return true;
}
void BackupCoordinationOnCluster::waitForOtherHostsToFinish()
{
if ((current_host != kInitiator) || !backup_query_was_sent_to_other_hosts)
return;
stage_sync.waitForOtherHostsToFinish();
}
bool BackupCoordinationOnCluster::tryWaitForOtherHostsToFinishAfterError() noexcept
{
if (current_host != kInitiator)
auto unfinished_hosts = expect_other_hosts_finished ? stage_sync.getUnfinishedHosts() : Strings{current_host};
LOG_INFO(log, "Skipping removing nodes from ZooKeeper because hosts {} didn't finish",
BackupCoordinationStageSync::getHostsDesc(unfinished_hosts));
return false;
if (!backup_query_was_sent_to_other_hosts)
return true;
return stage_sync.tryWaitForOtherHostsToFinishAfterError();
}
return cleaner.cleanup(throw_if_error);
}
ZooKeeperRetriesInfo BackupCoordinationOnCluster::getOnClusterInitializationKeeperRetriesInfo() const

View File

@ -1,7 +1,6 @@
#pragma once
#include <Backups/IBackupCoordination.h>
#include <Backups/BackupConcurrencyCheck.h>
#include <Backups/BackupCoordinationCleaner.h>
#include <Backups/BackupCoordinationFileInfos.h>
#include <Backups/BackupCoordinationReplicatedAccess.h>
@ -20,7 +19,7 @@ class BackupCoordinationOnCluster : public IBackupCoordination
{
public:
/// Empty string as the current host is used to mark the initiator of a BACKUP ON CLUSTER query.
static const constexpr std::string_view kInitiator;
static const constexpr std::string_view kInitiator = BackupCoordinationStageSync::kInitiator;
BackupCoordinationOnCluster(
const UUID & backup_uuid_,
@ -37,13 +36,13 @@ public:
~BackupCoordinationOnCluster() override;
void setBackupQueryIsSentToOtherHosts() override;
bool isBackupQuerySentToOtherHosts() const override;
Strings setStage(const String & new_stage, const String & message, bool sync) override;
void setBackupQueryWasSentToOtherHosts() override;
bool trySetError(std::exception_ptr exception) override;
void finish() override;
bool tryFinishAfterError() noexcept override;
void waitForOtherHostsToFinish() override;
bool tryWaitForOtherHostsToFinishAfterError() noexcept override;
bool setError(std::exception_ptr exception, bool throw_if_error) override;
bool waitOtherHostsFinish(bool throw_if_error) const override;
bool finish(bool throw_if_error) override;
bool cleanup(bool throw_if_error) override;
void addReplicatedPartNames(
const String & table_zk_path,
@ -110,11 +109,10 @@ private:
const bool plain_backup;
LoggerPtr const log;
/// The order is important: `stage_sync` must be initialized after `with_retries` and `cleaner`.
const WithRetries with_retries;
BackupConcurrencyCheck concurrency_check;
BackupCoordinationStageSync stage_sync;
BackupCoordinationCleaner cleaner;
std::atomic<bool> backup_query_was_sent_to_other_hosts = false;
BackupCoordinationStageSync stage_sync;
mutable std::optional<BackupCoordinationReplicatedTables> replicated_tables TSA_GUARDED_BY(replicated_tables_mutex);
mutable std::optional<BackupCoordinationReplicatedAccess> replicated_access TSA_GUARDED_BY(replicated_access_mutex);

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,9 @@
#pragma once
#include <Backups/BackupConcurrencyCheck.h>
#include <Backups/WithRetries.h>
namespace DB
{
@ -9,12 +11,16 @@ namespace DB
class BackupCoordinationStageSync
{
public:
/// Empty string as the current host is used to mark the initiator of a BACKUP ON CLUSTER or RESTORE ON CLUSTER query.
static const constexpr std::string_view kInitiator;
BackupCoordinationStageSync(
bool is_restore_, /// true if this is a RESTORE ON CLUSTER command, false if this is a BACKUP ON CLUSTER command
const String & zookeeper_path_, /// path to the "stage" folder in ZooKeeper
const String & current_host_, /// the current host, or an empty string if it's the initiator of the BACKUP/RESTORE ON CLUSTER command
const Strings & all_hosts_, /// all the hosts (including the initiator and the current host) performing the BACKUP/RESTORE ON CLUSTER command
bool allow_concurrency_, /// whether it's allowed to have concurrent backups or restores.
BackupConcurrencyCounters & concurrency_counters_,
const WithRetries & with_retries_,
ThreadPoolCallbackRunnerUnsafe<void> schedule_,
QueryStatusPtr process_list_element_,
@ -22,30 +28,37 @@ public:
~BackupCoordinationStageSync();
/// Sets that the BACKUP or RESTORE query was sent to other hosts.
void setQueryIsSentToOtherHosts();
bool isQuerySentToOtherHosts() const;
/// Sets the stage of the current host and signal other hosts if there were other hosts waiting for that.
void setStage(const String & stage, const String & stage_result = {});
/// Waits until all the specified hosts come to the specified stage.
/// The function returns the results which specified hosts set when they came to the required stage.
/// If it doesn't happen before the timeout then the function will stop waiting and throw an exception.
Strings waitForHostsToReachStage(const String & stage_to_wait, const Strings & hosts, std::optional<std::chrono::milliseconds> timeout = {}) const;
/// Waits until all the other hosts finish their work.
/// Stops waiting and throws an exception if another host encounters an error or if some host gets cancelled.
void waitForOtherHostsToFinish() const;
/// Lets other host know that the current host has finished its work.
void finish(bool & other_hosts_also_finished);
/// Waits until specified hosts come to the specified stage.
/// The function returns the results which the specified hosts set when they came to the required stage.
Strings waitHostsReachStage(const Strings & hosts, const String & stage_to_wait) const;
/// Lets other hosts know that the current host has encountered an error.
bool trySetError(std::exception_ptr exception) noexcept;
/// The function returns true if it successfully created the error node or if the error node was found already exist.
bool setError(std::exception_ptr exception, bool throw_if_error);
bool isErrorSet() const;
/// Waits until all the other hosts finish their work (as a part of error-handling process).
/// Doesn't stops waiting if some host encounters an error or gets cancelled.
bool tryWaitForOtherHostsToFinishAfterError() const noexcept;
/// Waits until the hosts other than the current host finish their work. Must be called before finish().
/// Stops waiting and throws an exception if another host encounters an error or if some host gets cancelled.
bool waitOtherHostsFinish(bool throw_if_error) const;
bool otherHostsFinished() const;
/// Lets other host know that the current host has finished its work (as a part of error-handling process).
bool tryFinishAfterError(bool & other_hosts_also_finished) noexcept;
/// Lets other hosts know that the current host has finished its work.
bool finish(bool throw_if_error);
bool finished() const;
/// Returns true if all the hosts have finished.
bool allHostsFinished() const { return finished() && otherHostsFinished(); }
/// Returns a list of the hosts which haven't finished yet.
Strings getUnfinishedHosts() const;
Strings getUnfinishedOtherHosts() const;
/// Returns a printable name of a specific host. For empty host the function returns "initiator".
static String getHostDesc(const String & host);
@ -78,14 +91,17 @@ private:
/// Reads the current state from ZooKeeper without throwing exceptions.
void readCurrentState(Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper);
/// Creates a stage node to let other hosts know we've reached the specified stage.
void createStageNode(const String & stage, const String & stage_result, Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper);
String getStageNodePath(const String & stage) const;
/// Lets other hosts know that the current host has encountered an error.
bool trySetError(const Exception & exception);
void setError(const Exception & exception);
bool setError(const Exception & exception, bool throw_if_error);
void createErrorNode(const Exception & exception, Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper);
/// Deserializes an error stored in the error node.
static std::pair<std::exception_ptr, String> parseErrorNode(const String & error_node_contents);
std::pair<std::exception_ptr, String> parseErrorNode(const String & error_node_contents) const;
/// Reset the `connected` flag for each host.
void resetConnectedFlag();
@ -102,19 +118,27 @@ private:
void cancelQueryIfDisconnectedTooLong();
/// Used by waitForHostsToReachStage() to check if everything is ready to return.
bool checkIfHostsReachStage(const Strings & hosts, const String & stage_to_wait, bool time_is_out, std::optional<std::chrono::milliseconds> timeout, Strings & results) const TSA_REQUIRES(mutex);
bool checkIfHostsReachStage(const Strings & hosts, const String & stage_to_wait, Strings & results) const TSA_REQUIRES(mutex);
/// Creates the 'finish' node.
bool tryFinishImpl();
bool tryFinishImpl(bool & other_hosts_also_finished, bool throw_if_error, WithRetries::Kind retries_kind);
void createFinishNodeAndRemoveAliveNode(Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper);
bool finishImpl(bool throw_if_error, WithRetries::Kind retries_kind);
void createFinishNodeAndRemoveAliveNode(Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper, bool throw_if_error);
/// Returns the version used by the initiator.
int getInitiatorVersion() const;
/// Waits until all the other hosts finish their work.
bool tryWaitForOtherHostsToFinishImpl(const String & reason, bool throw_if_error, std::optional<std::chrono::seconds> timeout) const;
bool checkIfOtherHostsFinish(const String & reason, bool throw_if_error, bool time_is_out, std::optional<std::chrono::milliseconds> timeout) const TSA_REQUIRES(mutex);
bool waitOtherHostsFinishImpl(const String & reason, std::optional<std::chrono::seconds> timeout, bool throw_if_error) const;
bool checkIfOtherHostsFinish(const String & reason, std::optional<std::chrono::milliseconds> timeout, bool time_is_out, bool & result, bool throw_if_error) const TSA_REQUIRES(mutex);
/// Returns true if all the hosts have finished.
bool allHostsFinishedNoLock() const TSA_REQUIRES(mutex);
bool finishedNoLock() const TSA_REQUIRES(mutex);
bool otherHostsFinishedNoLock() const TSA_REQUIRES(mutex);
/// Returns a list of the hosts which haven't finished yet.
Strings getUnfinishedHostsNoLock() const TSA_REQUIRES(mutex);
Strings getUnfinishedOtherHostsNoLock() const TSA_REQUIRES(mutex);
const bool is_restore;
const String operation_name;
@ -138,15 +162,16 @@ private:
/// Paths in ZooKeeper.
const std::filesystem::path zookeeper_path;
const String root_zookeeper_path;
const String operation_node_path;
const String operation_zookeeper_path;
const String operation_node_name;
const String stage_node_path;
const String start_node_path;
const String finish_node_path;
const String num_hosts_node_path;
const String error_node_path;
const String alive_node_path;
const String alive_tracker_node_path;
const String error_node_path;
std::optional<BackupConcurrencyCheck> concurrency_check;
std::shared_ptr<Poco::Event> zk_nodes_changed;
@ -176,25 +201,21 @@ private:
{
std::map<String /* host */, HostInfo> hosts; /// std::map because we need to compare states
std::optional<String> host_with_error;
bool cancelled = false;
bool operator ==(const State & other) const;
bool operator !=(const State & other) const;
void merge(const State & other);
};
State state TSA_GUARDED_BY(mutex);
mutable std::condition_variable state_changed;
std::future<void> watching_thread_future;
std::atomic<bool> should_stop_watching_thread = false;
bool should_stop_watching_thread TSA_GUARDED_BY(mutex) = false;
struct FinishResult
{
bool succeeded = false;
std::exception_ptr exception;
bool other_hosts_also_finished = false;
};
FinishResult finish_result TSA_GUARDED_BY(mutex);
bool query_is_sent_to_other_hosts TSA_GUARDED_BY(mutex) = false;
bool tried_to_finish TSA_GUARDED_BY(mutex) = false;
bool tried_to_set_error TSA_GUARDED_BY(mutex) = false;
mutable std::mutex mutex;
};

View File

@ -329,6 +329,7 @@ std::pair<OperationID, BackupStatus> BackupsWorker::start(const ASTPtr & backup_
struct BackupsWorker::BackupStarter
{
BackupsWorker & backups_worker;
LoggerPtr log;
std::shared_ptr<ASTBackupQuery> backup_query;
ContextPtr query_context; /// We have to keep `query_context` until the end of the operation because a pointer to it is stored inside the ThreadGroup we're using.
ContextMutablePtr backup_context;
@ -345,6 +346,7 @@ struct BackupsWorker::BackupStarter
BackupStarter(BackupsWorker & backups_worker_, const ASTPtr & query_, const ContextPtr & context_)
: backups_worker(backups_worker_)
, log(backups_worker.log)
, backup_query(std::static_pointer_cast<ASTBackupQuery>(query_->clone()))
, query_context(context_)
, backup_context(Context::createCopy(query_context))
@ -399,9 +401,20 @@ struct BackupsWorker::BackupStarter
chassert(!backup);
backup = backups_worker.openBackupForWriting(backup_info, backup_settings, backup_coordination, backup_context);
backups_worker.doBackup(
backup, backup_query, backup_id, backup_name_for_logging, backup_settings, backup_coordination, backup_context,
on_cluster, cluster);
backups_worker.doBackup(backup, backup_query, backup_id, backup_settings, backup_coordination, backup_context,
on_cluster, cluster);
backup_coordination->finish(/* throw_if_error = */ true);
backup.reset();
/// The backup coordination is not needed anymore.
if (!is_internal_backup)
backup_coordination->cleanup(/* throw_if_error = */ true);
backup_coordination.reset();
/// NOTE: setStatus is called after setNumFilesAndSize in order to have actual information in a backup log record
LOG_INFO(log, "{} {} was created successfully", (is_internal_backup ? "Internal backup" : "Backup"), backup_name_for_logging);
backups_worker.setStatus(backup_id, BackupStatus::BACKUP_CREATED);
}
void onException()
@ -416,16 +429,29 @@ struct BackupsWorker::BackupStarter
if (backup && !backup->setIsCorrupted())
should_remove_files_in_backup = false;
if (backup_coordination && backup_coordination->trySetError(std::current_exception()))
bool all_hosts_finished = false;
if (backup_coordination && backup_coordination->setError(std::current_exception(), /* throw_if_error = */ false))
{
bool other_hosts_finished = backup_coordination->tryWaitForOtherHostsToFinishAfterError();
bool other_hosts_finished = !is_internal_backup
&& (!backup_coordination->isBackupQuerySentToOtherHosts() || backup_coordination->waitOtherHostsFinish(/* throw_if_error = */ false));
if (should_remove_files_in_backup && other_hosts_finished)
backup->tryRemoveAllFiles();
backup_coordination->tryFinishAfterError();
all_hosts_finished = backup_coordination->finish(/* throw_if_error = */ false) && other_hosts_finished;
}
if (!all_hosts_finished)
should_remove_files_in_backup = false;
if (backup && should_remove_files_in_backup)
backup->tryRemoveAllFiles();
backup.reset();
if (backup_coordination && all_hosts_finished)
backup_coordination->cleanup(/* throw_if_error = */ false);
backup_coordination.reset();
backups_worker.setStatusSafe(backup_id, getBackupStatusFromCurrentException());
}
};
@ -497,7 +523,6 @@ void BackupsWorker::doBackup(
BackupMutablePtr backup,
const std::shared_ptr<ASTBackupQuery> & backup_query,
const OperationID & backup_id,
const String & backup_name_for_logging,
const BackupSettings & backup_settings,
std::shared_ptr<IBackupCoordination> backup_coordination,
ContextMutablePtr context,
@ -521,10 +546,10 @@ void BackupsWorker::doBackup(
backup_settings.copySettingsToQuery(*backup_query);
sendQueryToOtherHosts(*backup_query, cluster, backup_settings.shard_num, backup_settings.replica_num,
context, required_access, backup_coordination->getOnClusterInitializationKeeperRetriesInfo());
backup_coordination->setBackupQueryWasSentToOtherHosts();
backup_coordination->setBackupQueryIsSentToOtherHosts();
/// Wait until all the hosts have written their backup entries.
backup_coordination->waitForOtherHostsToFinish();
backup_coordination->waitOtherHostsFinish(/* throw_if_error = */ true);
}
else
{
@ -569,18 +594,8 @@ void BackupsWorker::doBackup(
compressed_size = backup->getCompressedSize();
}
/// Close the backup.
backup.reset();
/// The backup coordination is not needed anymore.
backup_coordination->finish();
/// NOTE: we need to update metadata again after backup->finalizeWriting(), because backup metadata is written there.
setNumFilesAndSize(backup_id, num_files, total_size, num_entries, uncompressed_size, compressed_size, 0, 0);
/// NOTE: setStatus is called after setNumFilesAndSize in order to have actual information in a backup log record
LOG_INFO(log, "{} {} was created successfully", (is_internal_backup ? "Internal backup" : "Backup"), backup_name_for_logging);
setStatus(backup_id, BackupStatus::BACKUP_CREATED);
}
@ -687,6 +702,7 @@ void BackupsWorker::writeBackupEntries(
struct BackupsWorker::RestoreStarter
{
BackupsWorker & backups_worker;
LoggerPtr log;
std::shared_ptr<ASTBackupQuery> restore_query;
ContextPtr query_context; /// We have to keep `query_context` until the end of the operation because a pointer to it is stored inside the ThreadGroup we're using.
ContextMutablePtr restore_context;
@ -702,6 +718,7 @@ struct BackupsWorker::RestoreStarter
RestoreStarter(BackupsWorker & backups_worker_, const ASTPtr & query_, const ContextPtr & context_)
: backups_worker(backups_worker_)
, log(backups_worker.log)
, restore_query(std::static_pointer_cast<ASTBackupQuery>(query_->clone()))
, query_context(context_)
, restore_context(Context::createCopy(query_context))
@ -753,16 +770,17 @@ struct BackupsWorker::RestoreStarter
}
restore_coordination = backups_worker.makeRestoreCoordination(on_cluster, restore_settings, restore_context);
backups_worker.doRestore(
restore_query,
restore_id,
backup_name_for_logging,
backup_info,
restore_settings,
restore_coordination,
restore_context,
on_cluster,
cluster);
backups_worker.doRestore(restore_query, restore_id, backup_info, restore_settings, restore_coordination, restore_context,
on_cluster, cluster);
/// The restore coordination is not needed anymore.
restore_coordination->finish(/* throw_if_error = */ true);
if (!is_internal_restore)
restore_coordination->cleanup(/* throw_if_error = */ true);
restore_coordination.reset();
LOG_INFO(log, "Restored from {} {} successfully", (is_internal_restore ? "internal backup" : "backup"), backup_name_for_logging);
backups_worker.setStatus(restore_id, BackupStatus::RESTORED);
}
void onException()
@ -770,12 +788,16 @@ struct BackupsWorker::RestoreStarter
/// Something bad happened, some data were not restored.
tryLogCurrentException(backups_worker.log, fmt::format("Failed to restore from {} {}", (is_internal_restore ? "internal backup" : "backup"), backup_name_for_logging));
if (restore_coordination && restore_coordination->trySetError(std::current_exception()))
if (restore_coordination && restore_coordination->setError(std::current_exception(), /* throw_if_error = */ false))
{
restore_coordination->tryWaitForOtherHostsToFinishAfterError();
restore_coordination->tryFinishAfterError();
bool other_hosts_finished = !is_internal_restore
&& (!restore_coordination->isRestoreQuerySentToOtherHosts() || restore_coordination->waitOtherHostsFinish(/* throw_if_error = */ false));
if (restore_coordination->finish(/* throw_if_error = */ false) && other_hosts_finished)
restore_coordination->cleanup(/* throw_if_error = */ false);
}
restore_coordination.reset();
backups_worker.setStatusSafe(restore_id, getRestoreStatusFromCurrentException());
}
};
@ -838,7 +860,6 @@ BackupPtr BackupsWorker::openBackupForReading(const BackupInfo & backup_info, co
void BackupsWorker::doRestore(
const std::shared_ptr<ASTBackupQuery> & restore_query,
const OperationID & restore_id,
const String & backup_name_for_logging,
const BackupInfo & backup_info,
RestoreSettings restore_settings,
std::shared_ptr<IRestoreCoordination> restore_coordination,
@ -882,10 +903,10 @@ void BackupsWorker::doRestore(
restore_settings.copySettingsToQuery(*restore_query);
sendQueryToOtherHosts(*restore_query, cluster, restore_settings.shard_num, restore_settings.replica_num,
context, {}, restore_coordination->getOnClusterInitializationKeeperRetriesInfo());
restore_coordination->setRestoreQueryWasSentToOtherHosts();
restore_coordination->setRestoreQueryIsSentToOtherHosts();
/// Wait until all the hosts have done with their restoring work.
restore_coordination->waitForOtherHostsToFinish();
restore_coordination->waitOtherHostsFinish(/* throw_if_error = */ true);
}
else
{
@ -905,12 +926,6 @@ void BackupsWorker::doRestore(
backup, context, getThreadPool(ThreadPoolId::RESTORE), after_task_callback};
restorer.run(RestorerFromBackup::RESTORE);
}
/// The restore coordination is not needed anymore.
restore_coordination->finish();
LOG_INFO(log, "Restored from {} {} successfully", (is_internal_restore ? "internal backup" : "backup"), backup_name_for_logging);
setStatus(restore_id, BackupStatus::RESTORED);
}
@ -943,7 +958,7 @@ BackupsWorker::makeBackupCoordination(bool on_cluster, const BackupSettings & ba
if (!on_cluster)
{
return std::make_shared<BackupCoordinationLocal>(
*backup_settings.backup_uuid, !backup_settings.deduplicate_files, allow_concurrent_backups, *concurrency_counters);
!backup_settings.deduplicate_files, allow_concurrent_backups, *concurrency_counters);
}
bool is_internal_backup = backup_settings.internal;
@ -981,8 +996,7 @@ BackupsWorker::makeRestoreCoordination(bool on_cluster, const RestoreSettings &
{
if (!on_cluster)
{
return std::make_shared<RestoreCoordinationLocal>(
*restore_settings.restore_uuid, allow_concurrent_restores, *concurrency_counters);
return std::make_shared<RestoreCoordinationLocal>(allow_concurrent_restores, *concurrency_counters);
}
bool is_internal_restore = restore_settings.internal;

View File

@ -81,7 +81,6 @@ private:
BackupMutablePtr backup,
const std::shared_ptr<ASTBackupQuery> & backup_query,
const BackupOperationID & backup_id,
const String & backup_name_for_logging,
const BackupSettings & backup_settings,
std::shared_ptr<IBackupCoordination> backup_coordination,
ContextMutablePtr context,
@ -102,7 +101,6 @@ private:
void doRestore(
const std::shared_ptr<ASTBackupQuery> & restore_query,
const BackupOperationID & restore_id,
const String & backup_name_for_logging,
const BackupInfo & backup_info,
RestoreSettings restore_settings,
std::shared_ptr<IRestoreCoordination> restore_coordination,

View File

@ -20,29 +20,27 @@ class IBackupCoordination
public:
virtual ~IBackupCoordination() = default;
/// Sets that the backup query was sent to other hosts.
/// Function waitOtherHostsFinish() will check that to find out if it should really wait or not.
virtual void setBackupQueryIsSentToOtherHosts() = 0;
virtual bool isBackupQuerySentToOtherHosts() const = 0;
/// Sets the current stage and waits for other hosts to come to this stage too.
virtual Strings setStage(const String & new_stage, const String & message, bool sync) = 0;
/// Sets that the backup query was sent to other hosts.
/// Function waitForOtherHostsToFinish() will check that to find out if it should really wait or not.
virtual void setBackupQueryWasSentToOtherHosts() = 0;
/// Lets other hosts know that the current host has encountered an error.
virtual bool trySetError(std::exception_ptr exception) = 0;
/// Lets other hosts know that the current host has finished its work.
virtual void finish() = 0;
/// Lets other hosts know that the current host has finished its work (as a part of error-handling process).
virtual bool tryFinishAfterError() noexcept = 0;
/// Returns true if the information is successfully passed so other hosts can read it.
virtual bool setError(std::exception_ptr exception, bool throw_if_error) = 0;
/// Waits until all the other hosts finish their work.
/// Stops waiting and throws an exception if another host encounters an error or if some host gets cancelled.
virtual void waitForOtherHostsToFinish() = 0;
virtual bool waitOtherHostsFinish(bool throw_if_error) const = 0;
/// Waits until all the other hosts finish their work (as a part of error-handling process).
/// Doesn't stops waiting if some host encounters an error or gets cancelled.
virtual bool tryWaitForOtherHostsToFinishAfterError() noexcept = 0;
/// Lets other hosts know that the current host has finished its work.
virtual bool finish(bool throw_if_error) = 0;
/// Removes temporary nodes in ZooKeeper.
virtual bool cleanup(bool throw_if_error) = 0;
struct PartNameAndChecksum
{

View File

@ -18,29 +18,27 @@ class IRestoreCoordination
public:
virtual ~IRestoreCoordination() = default;
/// Sets that the restore query was sent to other hosts.
/// Function waitOtherHostsFinish() will check that to find out if it should really wait or not.
virtual void setRestoreQueryIsSentToOtherHosts() = 0;
virtual bool isRestoreQuerySentToOtherHosts() const = 0;
/// Sets the current stage and waits for other hosts to come to this stage too.
virtual Strings setStage(const String & new_stage, const String & message, bool sync) = 0;
/// Sets that the restore query was sent to other hosts.
/// Function waitForOtherHostsToFinish() will check that to find out if it should really wait or not.
virtual void setRestoreQueryWasSentToOtherHosts() = 0;
/// Lets other hosts know that the current host has encountered an error.
virtual bool trySetError(std::exception_ptr exception) = 0;
/// Lets other hosts know that the current host has finished its work.
virtual void finish() = 0;
/// Lets other hosts know that the current host has finished its work (as a part of error-handling process).
virtual bool tryFinishAfterError() noexcept = 0;
/// Returns true if the information is successfully passed so other hosts can read it.
virtual bool setError(std::exception_ptr exception, bool throw_if_error) = 0;
/// Waits until all the other hosts finish their work.
/// Stops waiting and throws an exception if another host encounters an error or if some host gets cancelled.
virtual void waitForOtherHostsToFinish() = 0;
virtual bool waitOtherHostsFinish(bool throw_if_error) const = 0;
/// Waits until all the other hosts finish their work (as a part of error-handling process).
/// Doesn't stops waiting if some host encounters an error or gets cancelled.
virtual bool tryWaitForOtherHostsToFinishAfterError() noexcept = 0;
/// Lets other hosts know that the current host has finished its work.
virtual bool finish(bool throw_if_error) = 0;
/// Removes temporary nodes in ZooKeeper.
virtual bool cleanup(bool throw_if_error) = 0;
/// Starts creating a table in a replicated database. Returns false if there is another host which is already creating this table.
virtual bool acquireCreatingTableInReplicatedDatabase(const String & database_zk_path, const String & table_name) = 0;

View File

@ -10,9 +10,9 @@ namespace DB
{
RestoreCoordinationLocal::RestoreCoordinationLocal(
const UUID & restore_uuid, bool allow_concurrent_restore_, BackupConcurrencyCounters & concurrency_counters_)
bool allow_concurrent_restore_, BackupConcurrencyCounters & concurrency_counters_)
: log(getLogger("RestoreCoordinationLocal"))
, concurrency_check(restore_uuid, /* is_restore = */ true, /* on_cluster = */ false, allow_concurrent_restore_, concurrency_counters_)
, concurrency_check(/* is_restore = */ true, /* on_cluster = */ false, /* zookeeper_path = */ "", allow_concurrent_restore_, concurrency_counters_)
{
}

View File

@ -17,16 +17,16 @@ class ASTCreateQuery;
class RestoreCoordinationLocal : public IRestoreCoordination
{
public:
RestoreCoordinationLocal(const UUID & restore_uuid_, bool allow_concurrent_restore_, BackupConcurrencyCounters & concurrency_counters_);
RestoreCoordinationLocal(bool allow_concurrent_restore_, BackupConcurrencyCounters & concurrency_counters_);
~RestoreCoordinationLocal() override;
void setRestoreQueryIsSentToOtherHosts() override {}
bool isRestoreQuerySentToOtherHosts() const override { return false; }
Strings setStage(const String &, const String &, bool) override { return {}; }
void setRestoreQueryWasSentToOtherHosts() override {}
bool trySetError(std::exception_ptr) override { return true; }
void finish() override {}
bool tryFinishAfterError() noexcept override { return true; }
void waitForOtherHostsToFinish() override {}
bool tryWaitForOtherHostsToFinishAfterError() noexcept override { return true; }
bool setError(std::exception_ptr, bool) override { return true; }
bool waitOtherHostsFinish(bool) const override { return true; }
bool finish(bool) override { return true; }
bool cleanup(bool) override { return true; }
/// Starts creating a table in a replicated database. Returns false if there is another host which is already creating this table.
bool acquireCreatingTableInReplicatedDatabase(const String & database_zk_path, const String & table_name) override;

View File

@ -35,17 +35,21 @@ RestoreCoordinationOnCluster::RestoreCoordinationOnCluster(
, current_host_index(BackupCoordinationOnCluster::findCurrentHostIndex(current_host, all_hosts))
, log(getLogger("RestoreCoordinationOnCluster"))
, with_retries(log, get_zookeeper_, keeper_settings, process_list_element_, [root_zookeeper_path_](Coordination::ZooKeeperWithFaultInjection::Ptr zk) { zk->sync(root_zookeeper_path_); })
, concurrency_check(restore_uuid_, /* is_restore = */ true, /* on_cluster = */ true, allow_concurrent_restore_, concurrency_counters_)
, stage_sync(/* is_restore = */ true, fs::path{zookeeper_path} / "stage", current_host, all_hosts, allow_concurrent_restore_, with_retries, schedule_, process_list_element_, log)
, cleaner(zookeeper_path, with_retries, log)
, cleaner(/* is_restore = */ true, zookeeper_path, with_retries, log)
, stage_sync(/* is_restore = */ true, fs::path{zookeeper_path} / "stage", current_host, all_hosts, allow_concurrent_restore_, concurrency_counters_, with_retries, schedule_, process_list_element_, log)
{
createRootNodes();
try
{
createRootNodes();
}
catch (...)
{
stage_sync.setError(std::current_exception(), /* throw_if_error = */ false);
throw;
}
}
RestoreCoordinationOnCluster::~RestoreCoordinationOnCluster()
{
tryFinishImpl();
}
RestoreCoordinationOnCluster::~RestoreCoordinationOnCluster() = default;
void RestoreCoordinationOnCluster::createRootNodes()
{
@ -66,69 +70,52 @@ void RestoreCoordinationOnCluster::createRootNodes()
});
}
void RestoreCoordinationOnCluster::setRestoreQueryIsSentToOtherHosts()
{
stage_sync.setQueryIsSentToOtherHosts();
}
bool RestoreCoordinationOnCluster::isRestoreQuerySentToOtherHosts() const
{
return stage_sync.isQuerySentToOtherHosts();
}
Strings RestoreCoordinationOnCluster::setStage(const String & new_stage, const String & message, bool sync)
{
stage_sync.setStage(new_stage, message);
if (!sync)
return {};
return stage_sync.waitForHostsToReachStage(new_stage, all_hosts_without_initiator);
if (sync)
return stage_sync.waitHostsReachStage(all_hosts_without_initiator, new_stage);
return {};
}
void RestoreCoordinationOnCluster::setRestoreQueryWasSentToOtherHosts()
bool RestoreCoordinationOnCluster::setError(std::exception_ptr exception, bool throw_if_error)
{
restore_query_was_sent_to_other_hosts = true;
return stage_sync.setError(exception, throw_if_error);
}
bool RestoreCoordinationOnCluster::trySetError(std::exception_ptr exception)
bool RestoreCoordinationOnCluster::waitOtherHostsFinish(bool throw_if_error) const
{
return stage_sync.trySetError(exception);
return stage_sync.waitOtherHostsFinish(throw_if_error);
}
void RestoreCoordinationOnCluster::finish()
bool RestoreCoordinationOnCluster::finish(bool throw_if_error)
{
bool other_hosts_also_finished = false;
stage_sync.finish(other_hosts_also_finished);
if ((current_host == kInitiator) && (other_hosts_also_finished || !restore_query_was_sent_to_other_hosts))
cleaner.cleanup();
return stage_sync.finish(throw_if_error);
}
bool RestoreCoordinationOnCluster::tryFinishAfterError() noexcept
bool RestoreCoordinationOnCluster::cleanup(bool throw_if_error)
{
return tryFinishImpl();
}
bool RestoreCoordinationOnCluster::tryFinishImpl() noexcept
{
bool other_hosts_also_finished = false;
if (!stage_sync.tryFinishAfterError(other_hosts_also_finished))
return false;
if ((current_host == kInitiator) && (other_hosts_also_finished || !restore_query_was_sent_to_other_hosts))
/// All the hosts must finish before we remove the coordination nodes.
bool expect_other_hosts_finished = stage_sync.isQuerySentToOtherHosts() || !stage_sync.isErrorSet();
bool all_hosts_finished = stage_sync.finished() && (stage_sync.otherHostsFinished() || !expect_other_hosts_finished);
if (!all_hosts_finished)
{
if (!cleaner.tryCleanupAfterError())
return false;
}
return true;
}
void RestoreCoordinationOnCluster::waitForOtherHostsToFinish()
{
if ((current_host != kInitiator) || !restore_query_was_sent_to_other_hosts)
return;
stage_sync.waitForOtherHostsToFinish();
}
bool RestoreCoordinationOnCluster::tryWaitForOtherHostsToFinishAfterError() noexcept
{
if (current_host != kInitiator)
auto unfinished_hosts = expect_other_hosts_finished ? stage_sync.getUnfinishedHosts() : Strings{current_host};
LOG_INFO(log, "Skipping removing nodes from ZooKeeper because hosts {} didn't finish",
BackupCoordinationStageSync::getHostsDesc(unfinished_hosts));
return false;
if (!restore_query_was_sent_to_other_hosts)
return true;
return stage_sync.tryWaitForOtherHostsToFinishAfterError();
}
return cleaner.cleanup(throw_if_error);
}
ZooKeeperRetriesInfo RestoreCoordinationOnCluster::getOnClusterInitializationKeeperRetriesInfo() const

View File

@ -1,7 +1,6 @@
#pragma once
#include <Backups/IRestoreCoordination.h>
#include <Backups/BackupConcurrencyCheck.h>
#include <Backups/BackupCoordinationCleaner.h>
#include <Backups/BackupCoordinationStageSync.h>
#include <Backups/WithRetries.h>
@ -15,7 +14,7 @@ class RestoreCoordinationOnCluster : public IRestoreCoordination
{
public:
/// Empty string as the current host is used to mark the initiator of a RESTORE ON CLUSTER query.
static const constexpr std::string_view kInitiator;
static const constexpr std::string_view kInitiator = BackupCoordinationStageSync::kInitiator;
RestoreCoordinationOnCluster(
const UUID & restore_uuid_,
@ -31,13 +30,13 @@ public:
~RestoreCoordinationOnCluster() override;
void setRestoreQueryIsSentToOtherHosts() override;
bool isRestoreQuerySentToOtherHosts() const override;
Strings setStage(const String & new_stage, const String & message, bool sync) override;
void setRestoreQueryWasSentToOtherHosts() override;
bool trySetError(std::exception_ptr exception) override;
void finish() override;
bool tryFinishAfterError() noexcept override;
void waitForOtherHostsToFinish() override;
bool tryWaitForOtherHostsToFinishAfterError() noexcept override;
bool setError(std::exception_ptr exception, bool throw_if_error) override;
bool waitOtherHostsFinish(bool throw_if_error) const override;
bool finish(bool throw_if_error) override;
bool cleanup(bool throw_if_error) override;
/// Starts creating a table in a replicated database. Returns false if there is another host which is already creating this table.
bool acquireCreatingTableInReplicatedDatabase(const String & database_zk_path, const String & table_name) override;
@ -78,11 +77,10 @@ private:
const size_t current_host_index;
LoggerPtr const log;
/// The order is important: `stage_sync` must be initialized after `with_retries` and `cleaner`.
const WithRetries with_retries;
BackupConcurrencyCheck concurrency_check;
BackupCoordinationStageSync stage_sync;
BackupCoordinationCleaner cleaner;
std::atomic<bool> restore_query_was_sent_to_other_hosts = false;
BackupCoordinationStageSync stage_sync;
};
}

View File

@ -1650,6 +1650,11 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des
if (!parsed_insert_query)
return;
/// If it's clickhouse-local, and the input data reading is already baked into the query pipeline,
/// don't read the data again here. This happens in some cases (e.g. input() table function) but not others (e.g. INFILE).
if (!connection->isSendDataNeeded())
return;
bool have_data_in_stdin = !is_interactive && !stdin_is_a_tty && isStdinNotEmptyAndValid(std_in);
if (need_render_progress)
@ -2674,7 +2679,10 @@ void ClientBase::runInteractive()
#if USE_REPLXX
replxx::Replxx::highlighter_callback_t highlight_callback{};
if (getClientConfiguration().getBool("highlight", true))
highlight_callback = highlight;
highlight_callback = [this](const String & query, std::vector<replxx::Replxx::Color> & colors)
{
highlight(query, colors, *client_context);
};
ReplxxLineReader lr(
*suggest,

View File

@ -5,6 +5,8 @@
#include <Parsers/ParserQuery.h>
#include <Parsers/parseQuery.h>
#include <Common/UTF8Helpers.h>
#include <Core/Settings.h>
#include <Interpreters/Context.h>
#include <iostream>
@ -12,6 +14,11 @@
namespace DB
{
namespace Setting
{
extern const SettingsBool implicit_select;
}
/// Should we celebrate a bit?
bool isNewYearMode()
{
@ -95,7 +102,7 @@ bool isChineseNewYearMode(const String & local_tz)
}
#if USE_REPLXX
void highlight(const String & query, std::vector<replxx::Replxx::Color> & colors)
void highlight(const String & query, std::vector<replxx::Replxx::Color> & colors, const Context & context)
{
using namespace replxx;
@ -135,13 +142,27 @@ void highlight(const String & query, std::vector<replxx::Replxx::Color> & colors
/// Currently we highlight only the first query in the multi-query mode.
ParserQuery parser(end);
ParserQuery parser(end, false, context.getSettingsRef()[Setting::implicit_select]);
ASTPtr ast;
bool parse_res = false;
try
{
parse_res = parser.parse(token_iterator, ast, expected);
while (!token_iterator->isEnd())
{
parse_res = parser.parse(token_iterator, ast, expected);
if (!parse_res)
break;
if (!token_iterator->isEnd() && token_iterator->type != TokenType::Semicolon)
{
parse_res = false;
break;
}
while (token_iterator->type == TokenType::Semicolon)
++token_iterator;
}
}
catch (...)
{
@ -175,7 +196,7 @@ void highlight(const String & query, std::vector<replxx::Replxx::Color> & colors
/// Highlight the last error in red. If the parser failed or the lexer found an invalid token,
/// or if it didn't parse all the data (except, the data for INSERT query, which is legitimately unparsed)
if ((!parse_res || last_token.isError() || (!token_iterator->isEnd() && token_iterator->type != TokenType::Semicolon))
if ((!parse_res || last_token.isError())
&& !(insert_data && expected.max_parsed_pos >= insert_data)
&& expected.max_parsed_pos >= prev)
{

View File

@ -11,13 +11,15 @@
namespace DB
{
class Context;
/// Should we celebrate a bit?
bool isNewYearMode();
bool isChineseNewYearMode(const String & local_tz);
#if USE_REPLXX
void highlight(const String & query, std::vector<replxx::Replxx::Color> & colors);
void highlight(const String & query, std::vector<replxx::Replxx::Color> & colors, const Context & context);
#endif
}

View File

@ -1,5 +1,7 @@
#include <Client/ClientApplicationBase.h>
#include <filesystem>
namespace DB
{
@ -108,6 +110,7 @@ void ClientApplicationBase::parseAndCheckOptions(OptionsDescription & options_de
{
/// Two special cases for better usability:
/// - if the option contains a whitespace, it might be a query: clickhouse "SELECT 1"
/// - if the option is a filesystem file, then it's likely a queries file (clickhouse repro.sql)
/// These are relevant for interactive usage - user-friendly, but questionable in general.
/// In case of ambiguity or for scripts, prefer using proper options.
@ -115,8 +118,11 @@ void ClientApplicationBase::parseAndCheckOptions(OptionsDescription & options_de
po::variable_value value(boost::any(op.value), false);
const char * option;
std::error_code ec;
if (token.contains(' '))
option = "query";
else if (std::filesystem::is_regular_file(std::filesystem::path{token}, ec))
option = "queries-file";
else
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Positional option `{}` is not supported.", token);

View File

@ -109,6 +109,10 @@ public:
/// Send block of data; if name is specified, server will write it to external (temporary) table of that name.
virtual void sendData(const Block & block, const String & name, bool scalar) = 0;
/// Whether the client needs to read and send the data for the INSERT.
/// False if the server will read the data through other means (in particular if clickhouse-local added input reading step directly into the query pipeline).
virtual bool isSendDataNeeded() const { return true; }
/// Send all contents of external (temporary) tables.
virtual void sendExternalTablesData(ExternalTablesData & data) = 0;

View File

@ -328,6 +328,11 @@ void LocalConnection::sendData(const Block & block, const String &, bool)
sendProfileEvents();
}
bool LocalConnection::isSendDataNeeded() const
{
return !state || state->input_pipeline == nullptr;
}
void LocalConnection::sendCancel()
{
state->is_cancelled = true;

View File

@ -120,6 +120,8 @@ public:
void sendData(const Block & block, const String & name/* = "" */, bool scalar/* = false */) override;
bool isSendDataNeeded() const override;
void sendExternalTablesData(ExternalTablesData &) override;
void sendMergeTreeReadTaskResponse(const ParallelReadResponse & response) override;

View File

@ -72,6 +72,26 @@ ColumnPtr ColumnFunction::cut(size_t start, size_t length) const
return ColumnFunction::create(length, function, capture, is_short_circuit_argument, is_function_compiled);
}
Field ColumnFunction::operator[](size_t n) const
{
Field res;
get(n, res);
return res;
}
void ColumnFunction::get(size_t n, Field & res) const
{
const size_t tuple_size = captured_columns.size();
res = Tuple();
Tuple & res_tuple = res.safeGet<Tuple &>();
res_tuple.reserve(tuple_size);
for (size_t i = 0; i < tuple_size; ++i)
res_tuple.push_back((*captured_columns[i].column)[n]);
}
#if !defined(DEBUG_OR_SANITIZER_BUILD)
void ColumnFunction::insertFrom(const IColumn & src, size_t n)
#else

View File

@ -60,15 +60,9 @@ public:
void appendArguments(const ColumnsWithTypeAndName & columns);
ColumnWithTypeAndName reduce() const;
Field operator[](size_t) const override
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot get value from {}", getName());
}
Field operator[](size_t n) const override;
void get(size_t, Field &) const override
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot get value from {}", getName());
}
void get(size_t n, Field & res) const override;
StringRef getDataAt(size_t) const override
{

View File

@ -331,7 +331,7 @@ AsynchronousMetrics::~AsynchronousMetrics()
AsynchronousMetricValues AsynchronousMetrics::getValues() const
{
std::lock_guard lock(data_mutex);
SharedLockGuard lock(values_mutex);
return values;
}
@ -1807,7 +1807,10 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update)
first_run = false;
// Finally, update the current metrics.
values = new_values;
{
std::lock_guard values_lock(values_mutex);
values.swap(new_values);
}
}
}

View File

@ -4,6 +4,7 @@
#include <Common/MemoryStatisticsOS.h>
#include <Common/ThreadPool.h>
#include <Common/Stopwatch.h>
#include <Common/SharedMutex.h>
#include <IO/ReadBufferFromFile.h>
#include <condition_variable>
@ -100,6 +101,7 @@ private:
std::condition_variable wait_cond;
bool quit TSA_GUARDED_BY(thread_mutex) = false;
/// Protects all raw data and serializes multiple updates.
mutable std::mutex data_mutex;
/// Some values are incremental and we have to calculate the difference.
@ -107,7 +109,15 @@ private:
bool first_run TSA_GUARDED_BY(data_mutex) = true;
TimePoint previous_update_time TSA_GUARDED_BY(data_mutex);
AsynchronousMetricValues values TSA_GUARDED_BY(data_mutex);
/// Protects saved values.
mutable SharedMutex values_mutex;
/// Values store the result of the last update prepared for reading.
#ifdef OS_LINUX
AsynchronousMetricValues values TSA_GUARDED_BY(values_mutex);
#else
/// When SharedMutex == std::shared_mutex it may not be annotated with the 'capability'.
AsynchronousMetricValues values;
#endif
#if defined(OS_LINUX) || defined(OS_FREEBSD)
MemoryStatisticsOS memory_stat TSA_GUARDED_BY(data_mutex);

View File

@ -41,6 +41,10 @@
M(PostgreSQLConnection, "Number of client connections using PostgreSQL protocol") \
M(OpenFileForRead, "Number of files open for reading") \
M(OpenFileForWrite, "Number of files open for writing") \
M(Compressing, "Number of compress operations using internal compression codecs") \
M(Decompressing, "Number of decompress operations using internal compression codecs") \
M(ParallelCompressedWriteBufferThreads, "Number of threads in all instances of ParallelCompressedWriteBuffer - these threads are doing parallel compression and writing") \
M(ParallelCompressedWriteBufferWait, "Number of threads in all instances of ParallelCompressedWriteBuffer that are currently waiting for buffer to become available for writing") \
M(TotalTemporaryFiles, "Number of temporary files created") \
M(TemporaryFilesForSort, "Number of temporary files created for external sorting") \
M(TemporaryFilesForAggregation, "Number of temporary files created for external aggregation") \
@ -99,6 +103,9 @@
M(IOThreads, "Number of threads in the IO thread pool.") \
M(IOThreadsActive, "Number of threads in the IO thread pool running a task.") \
M(IOThreadsScheduled, "Number of queued or active jobs in the IO thread pool.") \
M(CompressionThread, "Number of threads in compression thread pools.") \
M(CompressionThreadActive, "Number of threads in compression thread pools running a task.") \
M(CompressionThreadScheduled, "Number of queued or active jobs in compression thread pools.") \
M(ThreadPoolRemoteFSReaderThreads, "Number of threads in the thread pool for remote_filesystem_read_method=threadpool.") \
M(ThreadPoolRemoteFSReaderThreadsActive, "Number of threads in the thread pool for remote_filesystem_read_method=threadpool running a task.") \
M(ThreadPoolRemoteFSReaderThreadsScheduled, "Number of queued or active jobs in the thread pool for remote_filesystem_read_method=threadpool.") \
@ -248,6 +255,7 @@
M(PartsActive, "Active data part, used by current and upcoming SELECTs.") \
M(AttachedDatabase, "Active databases.") \
M(AttachedTable, "Active tables.") \
M(AttachedReplicatedTable, "Active replicated tables.") \
M(AttachedView, "Active views.") \
M(AttachedDictionary, "Active dictionaries.") \
M(PartsOutdated, "Not active data part, but could be used by only current SELECTs, could be deleted after SELECTs finishes.") \

View File

@ -1,23 +1,47 @@
#pragma once
#include <Common/OvercommitTracker.h>
#include <base/defines.h>
#include <Common/Exception.h>
#include <Common/OvercommitTracker.h>
namespace DB
{
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
};
/** LockGuard provides RAII-style locking mechanism for a mutex.
** It's intended to be used like std::unique_ptr but with TSA annotations
** It's intended to be used like std::unique_lock but with TSA annotations
*/
template <typename Mutex>
class TSA_SCOPED_LOCKABLE LockGuard
{
public:
explicit LockGuard(Mutex & mutex_) TSA_ACQUIRE(mutex_) : mutex(mutex_) { mutex.lock(); }
~LockGuard() TSA_RELEASE() { mutex.unlock(); }
explicit LockGuard(Mutex & mutex_) TSA_ACQUIRE(mutex_) : mutex(mutex_) { lock(); }
~LockGuard() TSA_RELEASE() { if (locked) unlock(); }
void lock() TSA_ACQUIRE()
{
/// Don't allow recursive_mutex for now.
if (locked)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't lock twice the same mutex");
mutex.lock();
locked = true;
}
void unlock() TSA_RELEASE()
{
if (!locked)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't unlock the mutex without locking it first");
mutex.unlock();
locked = false;
}
private:
Mutex & mutex;
bool locked = false;
};
template <template<typename> typename TLockGuard, typename Mutex>

View File

@ -122,7 +122,7 @@ public:
void scheduleOrThrowOnError(Job job, Priority priority = {});
/// Similar to scheduleOrThrowOnError(...). Wait for specified amount of time and schedule a job or return false.
bool trySchedule(Job job, Priority priority = {}, uint64_t wait_microseconds = 0) noexcept;
[[nodiscard]] bool trySchedule(Job job, Priority priority = {}, uint64_t wait_microseconds = 0) noexcept;
/// Similar to scheduleOrThrowOnError(...). Wait for specified amount of time and schedule a job or throw an exception.
void scheduleOrThrow(Job job, Priority priority = {}, uint64_t wait_microseconds = 0, bool propagate_opentelemetry_tracing_context = true);
@ -142,7 +142,7 @@ public:
/// Returns true if the pool already terminated
/// (and any further scheduling will produce CANNOT_SCHEDULE_TASK exception)
bool finished() const;
[[nodiscard]] bool finished() const;
void setMaxThreads(size_t value);
void setMaxFreeThreads(size_t value);

View File

@ -2,7 +2,6 @@
#include <cstring>
#include <base/types.h>
#include <base/unaligned.h>
#include <base/defines.h>
#include <IO/WriteHelpers.h>

View File

@ -5,11 +5,18 @@
#include <Parsers/ASTFunction.h>
#include <base/unaligned.h>
#include <Common/Exception.h>
#include <Common/CurrentMetrics.h>
#include <Parsers/queryToString.h>
#include <Parsers/ASTIdentifier.h>
#include <Compression/CompressionCodecMultiple.h>
namespace CurrentMetrics
{
extern const Metric Compressing;
extern const Metric Decompressing;
}
namespace DB
{
@ -80,6 +87,8 @@ UInt32 ICompressionCodec::compress(const char * source, UInt32 source_size, char
{
assert(source != nullptr && dest != nullptr);
CurrentMetrics::Increment metric_increment(CurrentMetrics::Compressing);
dest[0] = getMethodByte();
UInt8 header_size = getHeaderSize();
/// Write data from header_size
@ -93,6 +102,8 @@ UInt32 ICompressionCodec::decompress(const char * source, UInt32 source_size, ch
{
assert(source != nullptr && dest != nullptr);
CurrentMetrics::Increment metric_increment(CurrentMetrics::Decompressing);
UInt8 header_size = getHeaderSize();
if (source_size < header_size)
throw Exception(decompression_error_code,

View File

@ -0,0 +1,166 @@
#include <city.h>
#include <base/types.h>
#include <base/defines.h>
#include <IO/WriteHelpers.h>
#include <Common/setThreadName.h>
#include <Common/scope_guard_safe.h>
#include <Common/CurrentThread.h>
#include <Common/CurrentMetrics.h>
#include <Compression/ParallelCompressedWriteBuffer.h>
namespace CurrentMetrics
{
extern const Metric ParallelCompressedWriteBufferThreads;
extern const Metric ParallelCompressedWriteBufferWait;
}
namespace DB
{
ParallelCompressedWriteBuffer::ParallelCompressedWriteBuffer(
WriteBuffer & out_,
CompressionCodecPtr codec_,
size_t buf_size_,
size_t num_threads_,
ThreadPool & pool_)
: WriteBuffer(nullptr, 0), out(out_), codec(codec_), buf_size(buf_size_), num_threads(num_threads_), pool(pool_)
{
buffers.emplace_back(buf_size);
current_buffer = buffers.begin();
BufferBase::set(current_buffer->uncompressed.data(), buf_size, 0);
}
void ParallelCompressedWriteBuffer::nextImpl()
{
if (!offset())
return;
std::unique_lock lock(mutex);
/// The buffer will be compressed and processed in the thread.
current_buffer->busy = true;
current_buffer->sequence_num = current_sequence_num;
++current_sequence_num;
current_buffer->uncompressed_size = offset();
pool.scheduleOrThrowOnError([this, my_current_buffer = current_buffer, thread_group = CurrentThread::getGroup()]
{
SCOPE_EXIT_SAFE(
if (thread_group)
CurrentThread::detachFromGroupIfNotDetached();
);
if (thread_group)
CurrentThread::attachToGroupIfDetached(thread_group);
setThreadName("ParallelCompres");
compress(my_current_buffer);
});
BufferPair * previous_buffer = &*current_buffer;
++current_buffer;
if (current_buffer == buffers.end())
{
if (buffers.size() < num_threads)
{
/// If we didn't use all num_threads buffers yet, create a new one.
current_buffer = buffers.emplace(current_buffer, buf_size);
}
else
{
/// Otherwise, wrap around to the first buffer in the list.
current_buffer = buffers.begin();
}
}
/// Wait while the buffer becomes not busy
if (current_buffer->busy)
{
CurrentMetrics::Increment metric_increment(CurrentMetrics::ParallelCompressedWriteBufferWait);
cond.wait(lock, [&]{ return !current_buffer->busy; });
}
/// Now this buffer can be used.
current_buffer->previous = previous_buffer;
BufferBase::set(current_buffer->uncompressed.data(), buf_size, 0);
}
void ParallelCompressedWriteBuffer::finalizeImpl()
{
next();
pool.wait();
}
void ParallelCompressedWriteBuffer::compress(Iterator buffer)
{
CurrentMetrics::Increment metric_increment(CurrentMetrics::ParallelCompressedWriteBufferThreads);
chassert(buffer->uncompressed_size <= INT_MAX);
UInt32 uncompressed_size = static_cast<UInt32>(buffer->uncompressed_size);
UInt32 compressed_reserve_size = codec->getCompressedReserveSize(uncompressed_size);
/// If all previous buffers have been written,
/// and if the output buffer has the required capacity,
/// we can compress data directly into the output buffer.
size_t required_out_capacity = compressed_reserve_size + sizeof(CityHash_v1_0_2::uint128);
bool can_write_directly = false;
if (!buffer->previous)
{
can_write_directly = out.available() >= required_out_capacity;
}
else
{
std::unique_lock lock(mutex);
can_write_directly = (!buffer->previous->busy || buffer->previous->sequence_num > buffer->sequence_num)
&& out.available() >= required_out_capacity;
}
if (can_write_directly)
{
char * out_compressed_ptr = out.position() + sizeof(CityHash_v1_0_2::uint128);
UInt32 compressed_size = codec->compress(buffer->uncompressed.data(), uncompressed_size, out_compressed_ptr);
CityHash_v1_0_2::uint128 checksum = CityHash_v1_0_2::CityHash128(out_compressed_ptr, compressed_size);
writeBinaryLittleEndian(checksum.low64, out);
writeBinaryLittleEndian(checksum.high64, out);
out.position() += compressed_size;
}
else
{
buffer->compressed.resize(compressed_reserve_size);
UInt32 compressed_size = codec->compress(buffer->uncompressed.data(), uncompressed_size, buffer->compressed.data());
CityHash_v1_0_2::uint128 checksum = CityHash_v1_0_2::CityHash128(buffer->compressed.data(), compressed_size);
/// Wait while all previous buffers have been written.
if (buffer->previous)
{
CurrentMetrics::Increment metric_wait_increment(CurrentMetrics::ParallelCompressedWriteBufferWait);
std::unique_lock lock(mutex);
cond.wait(lock, [&]{ return !buffer->previous->busy || buffer->previous->sequence_num > buffer->sequence_num; });
}
writeBinaryLittleEndian(checksum.low64, out);
writeBinaryLittleEndian(checksum.high64, out);
out.write(buffer->compressed.data(), compressed_size);
}
std::unique_lock lock(mutex);
buffer->busy = false;
cond.notify_all();
}
ParallelCompressedWriteBuffer::~ParallelCompressedWriteBuffer()
{
if (!canceled)
finalize();
}
}

View File

@ -0,0 +1,70 @@
#pragma once
#include <list>
#include <memory>
#include <Common/PODArray.h>
#include <IO/WriteBuffer.h>
#include <IO/BufferWithOwnMemory.h>
#include <Compression/ICompressionCodec.h>
#include <Compression/CompressionFactory.h>
#include <Common/ThreadPool.h>
namespace DB
{
/** Uses multi-buffering for parallel compression.
* When the buffer is filled, it will be compressed in the background,
* and a new buffer is created for the next input data.
*/
class ParallelCompressedWriteBuffer final : public WriteBuffer
{
public:
explicit ParallelCompressedWriteBuffer(
WriteBuffer & out_,
CompressionCodecPtr codec_,
size_t buf_size_,
size_t num_threads_,
ThreadPool & pool_);
~ParallelCompressedWriteBuffer() override;
private:
void nextImpl() override;
void finalizeImpl() override;
WriteBuffer & out;
CompressionCodecPtr codec;
size_t buf_size;
size_t num_threads;
ThreadPool & pool;
struct BufferPair
{
explicit BufferPair(size_t input_size)
: uncompressed(input_size)
{
}
Memory<> uncompressed;
size_t uncompressed_size = 0;
PODArray<char> compressed;
BufferPair * previous = nullptr;
size_t sequence_num = 0;
bool busy = false;
};
std::mutex mutex;
std::condition_variable cond;
std::list<BufferPair> buffers;
using Iterator = std::list<BufferPair>::iterator;
Iterator current_buffer;
size_t current_sequence_num = 0;
void compress(Iterator buffer);
};
}

View File

@ -10,33 +10,50 @@
namespace DB
{
using Checksum = CityHash_v1_0_2::uint128;
CompressionCodecPtr getCompressionCodecForFile(const IDataPartStorage & data_part_storage, const String & relative_path)
CompressionCodecPtr
getCompressionCodecForFile(ReadBuffer & read_buffer, UInt32 & size_compressed, UInt32 & size_decompressed, bool skip_to_next_block)
{
auto read_buffer = data_part_storage.readFile(relative_path, {}, std::nullopt, std::nullopt);
read_buffer->ignore(sizeof(Checksum));
read_buffer.ignore(sizeof(Checksum));
UInt8 header_size = ICompressionCodec::getHeaderSize();
size_t starting_bytes = read_buffer.count();
PODArray<char> compressed_buffer;
compressed_buffer.resize(header_size);
read_buffer->readStrict(compressed_buffer.data(), header_size);
read_buffer.readStrict(compressed_buffer.data(), header_size);
uint8_t method = ICompressionCodec::readMethod(compressed_buffer.data());
size_compressed = unalignedLoad<UInt32>(&compressed_buffer[1]);
size_decompressed = unalignedLoad<UInt32>(&compressed_buffer[5]);
if (method == static_cast<uint8_t>(CompressionMethodByte::Multiple))
{
compressed_buffer.resize(1);
read_buffer->readStrict(compressed_buffer.data(), 1);
read_buffer.readStrict(compressed_buffer.data(), 1);
compressed_buffer.resize(1 + compressed_buffer[0]);
read_buffer->readStrict(compressed_buffer.data() + 1, compressed_buffer[0]);
read_buffer.readStrict(compressed_buffer.data() + 1, compressed_buffer[0]);
auto codecs_bytes = CompressionCodecMultiple::getCodecsBytesFromData(compressed_buffer.data());
Codecs codecs;
for (auto byte : codecs_bytes)
codecs.push_back(CompressionCodecFactory::instance().get(byte));
if (skip_to_next_block)
read_buffer.ignore(size_compressed - (read_buffer.count() - starting_bytes));
return std::make_shared<CompressionCodecMultiple>(codecs);
}
if (skip_to_next_block)
read_buffer.ignore(size_compressed - (read_buffer.count() - starting_bytes));
return CompressionCodecFactory::instance().get(method);
}
CompressionCodecPtr getCompressionCodecForFile(const IDataPartStorage & data_part_storage, const String & relative_path)
{
auto read_buffer = data_part_storage.readFile(relative_path, {}, std::nullopt, std::nullopt);
UInt32 size_compressed;
UInt32 size_decompressed;
return getCompressionCodecForFile(*read_buffer, size_compressed, size_decompressed, false);
}
}

View File

@ -13,4 +13,8 @@ namespace DB
/// from metadata.
CompressionCodecPtr getCompressionCodecForFile(const IDataPartStorage & data_part_storage, const String & relative_path);
/// Same as above which is used by clickhouse-compressor to print compression statistics of each data block.
CompressionCodecPtr
getCompressionCodecForFile(ReadBuffer & read_buffer, UInt32 & size_compressed, UInt32 & size_decompressed, bool skip_to_next_block);
}

View File

@ -330,7 +330,7 @@ TYPED_TEST(CoordinationTest, TestSummingRaft1)
this->setLogDirectory("./logs");
this->setStateFileDirectory(".");
SummingRaftServer s1(1, "localhost", 44444, this->keeper_context);
SummingRaftServer s1(1, "localhost", 0, this->keeper_context);
SCOPE_EXIT(if (std::filesystem::exists("./state")) std::filesystem::remove("./state"););
/// Single node is leader

View File

@ -131,6 +131,9 @@ namespace DB
DECLARE(UInt64, max_database_num_to_warn, 1000lu, "If the number of databases is greater than this value, the server will create a warning that will displayed to user.", 0) \
DECLARE(UInt64, max_part_num_to_warn, 100000lu, "If the number of parts is greater than this value, the server will create a warning that will displayed to user.", 0) \
DECLARE(UInt64, max_table_num_to_throw, 0lu, "If number of tables is greater than this value, server will throw an exception. 0 means no limitation. View, remote tables, dictionary, system tables are not counted. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.", 0) \
DECLARE(UInt64, max_replicated_table_num_to_throw, 0lu, "If number of replicated tables is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.", 0) \
DECLARE(UInt64, max_dictionary_num_to_throw, 0lu, "If number of dictionaries is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.", 0) \
DECLARE(UInt64, max_view_num_to_throw, 0lu, "If number of views is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.", 0) \
DECLARE(UInt64, max_database_num_to_throw, 0lu, "If number of databases is greater than this value, server will throw an exception. 0 means no limitation.", 0) \
DECLARE(UInt64, max_authentication_methods_per_user, 100, "The maximum number of authentication methods a user can be created with or altered. Changing this setting does not affect existing users. Zero means unlimited", 0) \
DECLARE(UInt64, concurrent_threads_soft_limit_num, 0, "Sets how many concurrent thread can be allocated before applying CPU pressure. Zero means unlimited.", 0) \

View File

@ -2869,7 +2869,7 @@ Limit on size of multipart/form-data content. This setting cannot be parsed from
DECLARE(Bool, calculate_text_stack_trace, true, R"(
Calculate text stack trace in case of exceptions during query execution. This is the default. It requires symbol lookups that may slow down fuzzing tests when a huge amount of wrong queries are executed. In normal cases, you should not disable this option.
)", 0) \
DECLARE(Bool, enable_job_stack_trace, false, R"(
DECLARE(Bool, enable_job_stack_trace, true, R"(
Output stack trace of a job creator when job results in exception
)", 0) \
DECLARE(Bool, allow_ddl, true, R"(
@ -2892,6 +2892,9 @@ Possible values:
**See Also**
- [ORDER BY Clause](../../sql-reference/statements/select/order-by.md/#optimize_read_in_order)
)", 0) \
DECLARE(Bool, read_in_order_use_virtual_row, false, R"(
Use virtual row while reading in order of primary key or its monotonic function fashion. It is useful when searching over multiple parts as only relevant ones are touched.
)", 0) \
DECLARE(Bool, optimize_read_in_window_order, true, R"(
Enable ORDER BY optimization in window clause for reading data in corresponding order in MergeTree tables.
@ -3666,6 +3669,11 @@ Given that, for example, dictionaries, can be out of sync across nodes, mutation
</profiles>
```
)", 0) \
DECLARE(Bool, validate_mutation_query, true, R"(
Validate mutation queries before accepting them. Mutations are executed in the background, and running an invalid query will cause mutations to get stuck, requiring manual intervention.
Only change this setting if you encounter a backward-incompatible bug.
)", 0) \
DECLARE(Seconds, lock_acquire_timeout, DBMS_DEFAULT_LOCK_ACQUIRE_TIMEOUT_SEC, R"(
Defines how many seconds a locking request waits before failing.
@ -4557,7 +4565,7 @@ Possible values:
- 0 - Disable
- 1 - Enable
)", 0) \
DECLARE(Bool, query_plan_merge_filters, false, R"(
DECLARE(Bool, query_plan_merge_filters, true, R"(
Allow to merge filters in the query plan
)", 0) \
DECLARE(Bool, query_plan_filter_push_down, true, R"(
@ -4858,9 +4866,9 @@ Allows to record the filesystem caching log for each query
DECLARE(Bool, read_from_filesystem_cache_if_exists_otherwise_bypass_cache, false, R"(
Allow to use the filesystem cache in passive mode - benefit from the existing cache entries, but don't put more entries into the cache. If you set this setting for heavy ad-hoc queries and leave it disabled for short real-time queries, this will allows to avoid cache threshing by too heavy queries and to improve the overall system efficiency.
)", 0) \
DECLARE(Bool, skip_download_if_exceeds_query_cache, true, R"(
DECLARE(Bool, filesystem_cache_skip_download_if_exceeds_per_query_cache_write_limit, true, R"(
Skip download from remote filesystem if exceeds query cache size
)", 0) \
)", 0) ALIAS(skip_download_if_exceeds_query_cache) \
DECLARE(UInt64, filesystem_cache_max_download_size, (128UL * 1024 * 1024 * 1024), R"(
Max remote filesystem cache size that can be downloaded by a single query
)", 0) \
@ -4872,6 +4880,9 @@ Limit on size of a single batch of file segments that a read buffer can request
)", 0) \
DECLARE(UInt64, filesystem_cache_reserve_space_wait_lock_timeout_milliseconds, 1000, R"(
Wait time to lock cache for space reservation in filesystem cache
)", 0) \
DECLARE(Bool, filesystem_cache_prefer_bigger_buffer_size, true, R"(
Prefer bigger buffer size if filesystem cache is enabled to avoid writing small file segments which deteriorate cache performance. On the other hand, enabling this setting might increase memory usage.
)", 0) \
DECLARE(UInt64, temporary_data_in_cache_reserve_space_wait_lock_timeout_milliseconds, (10 * 60 * 1000), R"(
Wait time to lock cache for space reservation for temporary data in filesystem cache
@ -5708,6 +5719,8 @@ If enabled, MongoDB tables will return an error when a MongoDB query cannot be b
)", 0) \
DECLARE(Bool, implicit_select, false, R"(
Allow writing simple SELECT queries without the leading SELECT keyword, which makes it simple for calculator-style usage, e.g. `1 + 2` becomes a valid query.
In `clickhouse-local` it is enabled by default and can be explicitly disabled.
)", 0) \
\
\

View File

@ -64,6 +64,8 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
},
{"24.11",
{
{"validate_mutation_query", false, true, "New setting to validate mutation queries by default."},
{"enable_job_stack_trace", false, true, "Enable by default collecting stack traces from job's scheduling."},
{"allow_suspicious_types_in_group_by", true, false, "Don't allow Variant/Dynamic types in GROUP BY by default"},
{"allow_suspicious_types_in_order_by", true, false, "Don't allow Variant/Dynamic types in ORDER BY by default"},
{"distributed_cache_discard_connection_if_unread_data", true, true, "New setting"},
@ -75,7 +77,11 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
{"backup_restore_keeper_max_retries_while_initializing", 0, 20, "New setting."},
{"backup_restore_keeper_max_retries_while_handling_error", 0, 20, "New setting."},
{"backup_restore_finish_timeout_after_error_sec", 0, 180, "New setting."},
{"query_plan_merge_filters", false, true, "Allow to merge filters in the query plan. This is required to properly support filter-push-down with a new analyzer."},
{"parallel_replicas_local_plan", false, true, "Use local plan for local replica in a query with parallel replicas"},
{"filesystem_cache_skip_download_if_exceeds_per_query_cache_write_limit", 1, 1, "Rename of setting skip_download_if_exceeds_query_cache_limit"},
{"filesystem_cache_prefer_bigger_buffer_size", true, true, "New setting"},
{"read_in_order_use_virtual_row", false, false, "Use virtual row while reading in order of primary key or its monotonic function fashion. It is useful when searching over multiple parts as only relevant ones are touched."},
}
},
{"24.10",

View File

@ -6,7 +6,6 @@
#include <Databases/DatabaseReplicated.h>
#include <IO/ReadBufferFromFile.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h>
#include <Interpreters/Context.h>
#include <Interpreters/DDLTask.h>
#include <Interpreters/DatabaseCatalog.h>
@ -19,6 +18,7 @@
#include <Common/filesystemHelpers.h>
#include <Core/Settings.h>
namespace fs = std::filesystem;
namespace DB
@ -60,9 +60,6 @@ DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, c
, db_uuid(uuid)
{
assert(db_uuid != UUIDHelpers::Nil);
fs::create_directories(fs::path(getContext()->getPath()) / "metadata");
fs::create_directories(path_to_table_symlinks);
tryCreateMetadataSymlink();
}
DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, ContextPtr context_)
@ -70,6 +67,20 @@ DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, C
{
}
void DatabaseAtomic::createDirectories()
{
std::lock_guard lock(mutex);
createDirectoriesUnlocked();
}
void DatabaseAtomic::createDirectoriesUnlocked()
{
DatabaseOnDisk::createDirectoriesUnlocked();
fs::create_directories(fs::path(getContext()->getPath()) / "metadata");
fs::create_directories(path_to_table_symlinks);
tryCreateMetadataSymlink();
}
String DatabaseAtomic::getTableDataPath(const String & table_name) const
{
std::lock_guard lock(mutex);
@ -108,6 +119,7 @@ void DatabaseAtomic::attachTable(ContextPtr /* context_ */, const String & name,
assert(relative_table_path != data_path && !relative_table_path.empty());
DetachedTables not_in_use;
std::lock_guard lock(mutex);
createDirectoriesUnlocked();
not_in_use = cleanupDetachedTables();
auto table_id = table->getStorageID();
assertDetachedTableNotInUse(table_id.uuid);
@ -208,11 +220,15 @@ void DatabaseAtomic::renameTable(ContextPtr local_context, const String & table_
if (exchange && !supportsAtomicRename(&message))
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "RENAME EXCHANGE is not supported because exchanging files is not supported by the OS ({})", message);
createDirectories();
waitDatabaseStarted();
auto & other_db = dynamic_cast<DatabaseAtomic &>(to_database);
bool inside_database = this == &other_db;
if (!inside_database)
other_db.createDirectories();
String old_metadata_path = getObjectMetadataPath(table_name);
String new_metadata_path = to_database.getObjectMetadataPath(to_table_name);
@ -333,6 +349,7 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora
const String & table_metadata_tmp_path, const String & table_metadata_path,
ContextPtr query_context)
{
createDirectories();
DetachedTables not_in_use;
auto table_data_path = getTableDataPath(query);
try
@ -469,6 +486,9 @@ void DatabaseAtomic::beforeLoadingMetadata(ContextMutablePtr /*context*/, Loadin
if (mode < LoadingStrictnessLevel::FORCE_RESTORE)
return;
if (!fs::exists(path_to_table_symlinks))
return;
/// Recreate symlinks to table data dirs in case of force restore, because some of them may be broken
for (const auto & table_path : fs::directory_iterator(path_to_table_symlinks))
{
@ -611,6 +631,7 @@ void DatabaseAtomic::renameDatabase(ContextPtr query_context, const String & new
{
/// CREATE, ATTACH, DROP, DETACH and RENAME DATABASE must hold DDLGuard
createDirectories();
waitDatabaseStarted();
bool check_ref_deps = query_context->getSettingsRef()[Setting::check_referential_table_dependencies];
@ -702,4 +723,5 @@ void registerDatabaseAtomic(DatabaseFactory & factory)
};
factory.registerDatabase("Atomic", create_fn);
}
}

View File

@ -76,6 +76,9 @@ protected:
using DetachedTables = std::unordered_map<UUID, StoragePtr>;
[[nodiscard]] DetachedTables cleanupDetachedTables() TSA_REQUIRES(mutex);
void createDirectories();
void createDirectoriesUnlocked() TSA_REQUIRES(mutex);
void tryCreateMetadataSymlink();
virtual bool allowMoveTableToOtherDatabaseEngine(IDatabase & /*to_database*/) const { return false; }

View File

@ -47,6 +47,7 @@ DatabaseLazy::DatabaseLazy(const String & name_, const String & metadata_path_,
: DatabaseOnDisk(name_, metadata_path_, std::filesystem::path("data") / escapeForFileName(name_) / "", "DatabaseLazy (" + name_ + ")", context_)
, expiration_time(expiration_time_)
{
createDirectories();
}

View File

@ -180,7 +180,18 @@ DatabaseOnDisk::DatabaseOnDisk(
, metadata_path(metadata_path_)
, data_path(data_path_)
{
fs::create_directories(local_context->getPath() + data_path);
}
void DatabaseOnDisk::createDirectories()
{
std::lock_guard lock(mutex);
createDirectoriesUnlocked();
}
void DatabaseOnDisk::createDirectoriesUnlocked()
{
fs::create_directories(std::filesystem::path(getContext()->getPath()) / data_path);
fs::create_directories(metadata_path);
}
@ -198,6 +209,8 @@ void DatabaseOnDisk::createTable(
const StoragePtr & table,
const ASTPtr & query)
{
createDirectories();
const auto & settings = local_context->getSettingsRef();
const auto & create = query->as<ASTCreateQuery &>();
assert(table_name == create.getTable());
@ -265,7 +278,6 @@ void DatabaseOnDisk::createTable(
}
commitCreateTable(create, table, table_metadata_tmp_path, table_metadata_path, local_context);
removeDetachedPermanentlyFlag(local_context, table_name, table_metadata_path, false);
}
@ -293,6 +305,8 @@ void DatabaseOnDisk::commitCreateTable(const ASTCreateQuery & query, const Stora
{
try
{
createDirectories();
/// Add a table to the map of known tables.
attachTable(query_context, query.getTable(), table, getTableDataPath(query));
@ -426,6 +440,7 @@ void DatabaseOnDisk::renameTable(
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Moving tables between databases of different engines is not supported");
}
createDirectories();
waitDatabaseStarted();
auto table_data_relative_path = getTableDataPath(table_name);
@ -621,6 +636,9 @@ time_t DatabaseOnDisk::getObjectMetadataModificationTime(const String & object_n
void DatabaseOnDisk::iterateMetadataFiles(const IteratingFunction & process_metadata_file) const
{
if (!fs::exists(metadata_path))
return;
auto process_tmp_drop_metadata_file = [&](const String & file_name)
{
assert(getUUID() == UUIDHelpers::Nil);

View File

@ -99,6 +99,9 @@ protected:
virtual void removeDetachedPermanentlyFlag(ContextPtr context, const String & table_name, const String & table_metadata_path, bool attach);
virtual void setDetachedTableNotInUseForce(const UUID & /*uuid*/) {}
void createDirectories();
void createDirectoriesUnlocked() TSA_REQUIRES(mutex);
const String metadata_path;
const String data_path;
};

View File

@ -382,7 +382,8 @@ StoragePtr DatabaseWithOwnTablesBase::detachTableUnlocked(const String & table_n
if (!table_storage->isSystemStorage() && !DatabaseCatalog::isPredefinedDatabase(database_name))
{
LOG_TEST(log, "Counting detached table {} to database {}", table_name, database_name);
CurrentMetrics::sub(getAttachedCounterForStorage(table_storage));
for (auto metric : getAttachedCountersForStorage(table_storage))
CurrentMetrics::sub(metric);
}
auto table_id = table_storage->getStorageID();
@ -430,7 +431,8 @@ void DatabaseWithOwnTablesBase::attachTableUnlocked(const String & table_name, c
if (!table->isSystemStorage() && !DatabaseCatalog::isPredefinedDatabase(database_name))
{
LOG_TEST(log, "Counting attached table {} to database {}", table_name, database_name);
CurrentMetrics::add(getAttachedCounterForStorage(table));
for (auto metric : getAttachedCountersForStorage(table))
CurrentMetrics::add(metric);
}
}

View File

@ -416,6 +416,7 @@ public:
std::lock_guard lock{mutex};
return database_name;
}
/// Get UUID of database.
virtual UUID getUUID() const { return UUIDHelpers::Nil; }

View File

@ -62,6 +62,7 @@ DatabaseMaterializedMySQL::DatabaseMaterializedMySQL(
, settings(std::move(settings_))
, materialize_thread(context_, database_name_, mysql_database_name_, std::move(pool_), std::move(client_), binlog_client_, settings.get())
{
createDirectories();
}
DatabaseMaterializedMySQL::~DatabaseMaterializedMySQL() = default;

View File

@ -334,22 +334,26 @@ HashedDictionary<dictionary_key_type, sparse, sharded>::~HashedDictionary()
if (container.empty())
return;
pool.trySchedule([&container, thread_group = CurrentThread::getGroup()]
{
SCOPE_EXIT_SAFE(
if (!pool.trySchedule([&container, thread_group = CurrentThread::getGroup()]
{
SCOPE_EXIT_SAFE(
if (thread_group)
CurrentThread::detachFromGroupIfNotDetached();
);
/// Do not account memory that was occupied by the dictionaries for the query/user context.
MemoryTrackerBlockerInThread memory_blocker;
if (thread_group)
CurrentThread::detachFromGroupIfNotDetached();
);
CurrentThread::attachToGroupIfDetached(thread_group);
setThreadName("HashedDictDtor");
/// Do not account memory that was occupied by the dictionaries for the query/user context.
clearContainer(container);
}))
{
MemoryTrackerBlockerInThread memory_blocker;
if (thread_group)
CurrentThread::attachToGroupIfDetached(thread_group);
setThreadName("HashedDictDtor");
clearContainer(container);
});
}
++hash_tables_count;
};

View File

@ -277,19 +277,6 @@ void AzureObjectStorage::removeObjectImpl(const StoredObject & object, const Sha
}
}
/// Remove file. Throws exception if file doesn't exists or it's a directory.
void AzureObjectStorage::removeObject(const StoredObject & object)
{
removeObjectImpl(object, client.get(), false);
}
void AzureObjectStorage::removeObjects(const StoredObjects & objects)
{
auto client_ptr = client.get();
for (const auto & object : objects)
removeObjectImpl(object, client_ptr, false);
}
void AzureObjectStorage::removeObjectIfExists(const StoredObject & object)
{
removeObjectImpl(object, client.get(), true);

Some files were not shown because too many files have changed in this diff Show More