diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 3dcce68ab46..976c69d3c34 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -12,7 +12,7 @@ tests/ci/cancel_and_rerun_workflow_lambda/app.py - Backward Incompatible Change - Build/Testing/Packaging Improvement - Documentation (changelog entry is not required) -- Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC) +- Critical Bug Fix (crash, data loss, RBAC) - Bug Fix (user-visible misbehavior in an official stable release) - CI Fix or Improvement (changelog entry is not required) - Not for changelog (changelog entry is not required) diff --git a/README.md b/README.md index dcaeda13acd..3270cd19671 100644 --- a/README.md +++ b/README.md @@ -42,16 +42,17 @@ Keep an eye out for upcoming meetups and events around the world. Somewhere else Upcoming meetups -* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - November 12 * [Ghent Meetup](https://www.meetup.com/clickhouse-belgium-user-group/events/303049405/) - November 19 * [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21 * [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26 * [Amsterdam Meetup](https://www.meetup.com/clickhouse-netherlands-user-group/events/303638814) - December 3 +* [Stockholm Meetup](https://www.meetup.com/clickhouse-stockholm-user-group/events/304382411) - December 9 * [New York Meetup](https://www.meetup.com/clickhouse-new-york-user-group/events/304268174) - December 9 * [San Francisco Meetup](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/304286951/) - December 12 Recently completed meetups +* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - November 12 * [Madrid Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096564/) - October 22 * [Singapore Meetup](https://www.meetup.com/clickhouse-singapore-meetup-group/events/303212064/) - October 3 * [Jakarta Meetup](https://www.meetup.com/clickhouse-indonesia-user-group/events/303191359/) - October 1 diff --git a/base/base/defines.h b/base/base/defines.h index 5685a6d9833..a0c3c0d1de5 100644 --- a/base/base/defines.h +++ b/base/base/defines.h @@ -145,6 +145,7 @@ #define TSA_TRY_ACQUIRE_SHARED(...) __attribute__((try_acquire_shared_capability(__VA_ARGS__))) /// function tries to acquire a shared capability and returns a boolean value indicating success or failure #define TSA_RELEASE_SHARED(...) __attribute__((release_shared_capability(__VA_ARGS__))) /// function releases the given shared capability #define TSA_SCOPED_LOCKABLE __attribute__((scoped_lockable)) /// object of a class has scoped lockable capability +#define TSA_RETURN_CAPABILITY(...) __attribute__((lock_returned(__VA_ARGS__))) /// to return capabilities in functions /// Macros for suppressing TSA warnings for specific reads/writes (instead of suppressing it for the whole function) /// They use a lambda function to apply function attribute to a single statement. This enable us to suppress warnings locally instead of diff --git a/contrib/SimSIMD b/contrib/SimSIMD index ee3c9c9c00b..fa60f1b8e35 160000 --- a/contrib/SimSIMD +++ b/contrib/SimSIMD @@ -1 +1 @@ -Subproject commit ee3c9c9c00b51645f62a1a9e99611b78c0052a21 +Subproject commit fa60f1b8e3582c50978f0ae86c2ebb6c9af957f3 diff --git a/docker/keeper/entrypoint.sh b/docker/keeper/entrypoint.sh index 68bd0ef9d87..934605b0b6f 100644 --- a/docker/keeper/entrypoint.sh +++ b/docker/keeper/entrypoint.sh @@ -1,21 +1,31 @@ #!/bin/bash -set +x set -eo pipefail shopt -s nullglob DO_CHOWN=1 -if [ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]; then +if [[ "${CLICKHOUSE_RUN_AS_ROOT:=0}" = "1" || "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]]; then DO_CHOWN=0 fi -CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}" -CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}" +# CLICKHOUSE_UID and CLICKHOUSE_GID are kept for backward compatibility, but deprecated +# One must use either "docker run --user" or CLICKHOUSE_RUN_AS_ROOT=1 to run the process as +# FIXME: Remove ALL CLICKHOUSE_UID CLICKHOUSE_GID before 25.3 +if [[ "${CLICKHOUSE_UID:-}" || "${CLICKHOUSE_GID:-}" ]]; then + echo 'WARNING: Support for CLICKHOUSE_UID/CLICKHOUSE_GID will be removed in a couple of releases.' >&2 + echo 'WARNING: Either use a proper "docker run --user=xxx:xxxx" argument instead of CLICKHOUSE_UID/CLICKHOUSE_GID' >&2 + echo 'WARNING: or set "CLICKHOUSE_RUN_AS_ROOT=1" ENV to run the clickhouse-server as root:root' >&2 +fi -# support --user -if [ "$(id -u)" = "0" ]; then - USER=$CLICKHOUSE_UID - GROUP=$CLICKHOUSE_GID +# support `docker run --user=xxx:xxxx` +if [[ "$(id -u)" = "0" ]]; then + if [[ "$CLICKHOUSE_RUN_AS_ROOT" = 1 ]]; then + USER=0 + GROUP=0 + else + USER="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}" + GROUP="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}" + fi if command -v gosu &> /dev/null; then gosu="gosu $USER:$GROUP" elif command -v su-exec &> /dev/null; then @@ -82,11 +92,11 @@ if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then # There is a config file. It is already tested with gosu (if it is readably by keeper user) if [ -f "$KEEPER_CONFIG" ]; then - exec $gosu /usr/bin/clickhouse-keeper --config-file="$KEEPER_CONFIG" "$@" + exec $gosu clickhouse-keeper --config-file="$KEEPER_CONFIG" "$@" fi # There is no config file. Will use embedded one - exec $gosu /usr/bin/clickhouse-keeper --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@" + exec $gosu clickhouse-keeper --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@" fi # Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 0d5c983f5e6..e6bde845c4e 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -88,34 +88,34 @@ RUN if [ -n "${single_binary_location_url}" ]; then \ #docker-official-library:on # A fallback to installation from ClickHouse repository -RUN if ! clickhouse local -q "SELECT ''" > /dev/null 2>&1; then \ - apt-get update \ - && apt-get install --yes --no-install-recommends \ - apt-transport-https \ - dirmngr \ - gnupg2 \ - && mkdir -p /etc/apt/sources.list.d \ - && GNUPGHOME=$(mktemp -d) \ - && GNUPGHOME="$GNUPGHOME" gpg --batch --no-default-keyring \ - --keyring /usr/share/keyrings/clickhouse-keyring.gpg \ - --keyserver hkp://keyserver.ubuntu.com:80 \ - --recv-keys 3a9ea1193a97b548be1457d48919f6bd2b48d754 \ - && rm -rf "$GNUPGHOME" \ - && chmod +r /usr/share/keyrings/clickhouse-keyring.gpg \ - && echo "${REPOSITORY}" > /etc/apt/sources.list.d/clickhouse.list \ - && echo "installing from repository: ${REPOSITORY}" \ - && apt-get update \ - && for package in ${PACKAGES}; do \ - packages="${packages} ${package}=${VERSION}" \ - ; done \ - && apt-get install --allow-unauthenticated --yes --no-install-recommends ${packages} || exit 1 \ - && rm -rf \ - /var/lib/apt/lists/* \ - /var/cache/debconf \ - /tmp/* \ - && apt-get autoremove --purge -yq libksba8 \ - && apt-get autoremove -yq \ - ; fi +# It works unless the clickhouse binary already exists +RUN clickhouse local -q 'SELECT 1' >/dev/null 2>&1 && exit 0 || : \ + ; apt-get update \ + && apt-get install --yes --no-install-recommends \ + dirmngr \ + gnupg2 \ + && mkdir -p /etc/apt/sources.list.d \ + && GNUPGHOME=$(mktemp -d) \ + && GNUPGHOME="$GNUPGHOME" gpg --batch --no-default-keyring \ + --keyring /usr/share/keyrings/clickhouse-keyring.gpg \ + --keyserver hkp://keyserver.ubuntu.com:80 \ + --recv-keys 3a9ea1193a97b548be1457d48919f6bd2b48d754 \ + && rm -rf "$GNUPGHOME" \ + && chmod +r /usr/share/keyrings/clickhouse-keyring.gpg \ + && echo "${REPOSITORY}" > /etc/apt/sources.list.d/clickhouse.list \ + && echo "installing from repository: ${REPOSITORY}" \ + && apt-get update \ + && for package in ${PACKAGES}; do \ + packages="${packages} ${package}=${VERSION}" \ + ; done \ + && apt-get install --yes --no-install-recommends ${packages} || exit 1 \ + && rm -rf \ + /var/lib/apt/lists/* \ + /var/cache/debconf \ + /tmp/* \ + && apt-get autoremove --purge -yq dirmngr gnupg2 \ + && chmod ugo+Xrw -R /etc/clickhouse-server /etc/clickhouse-client +# The last chmod is here to make the next one is No-op in docker official library Dockerfile # post install # we need to allow "others" access to clickhouse folder, because docker container @@ -126,8 +126,6 @@ RUN clickhouse-local -q 'SELECT * FROM system.build_options' \ RUN locale-gen en_US.UTF-8 ENV LANG en_US.UTF-8 -ENV LANGUAGE en_US:en -ENV LC_ALL en_US.UTF-8 ENV TZ UTC RUN mkdir /docker-entrypoint-initdb.d diff --git a/docker/server/README.md b/docker/server/README.md index 1dc636414ac..5f6144d0633 100644 --- a/docker/server/README.md +++ b/docker/server/README.md @@ -1,3 +1,11 @@ + + # ClickHouse Server Docker Image ## What is ClickHouse? @@ -8,6 +16,7 @@ ClickHouse works 100-1000x faster than traditional database management systems, For more information and documentation see https://clickhouse.com/. + ## Versions - The `latest` tag points to the latest release of the latest stable branch. @@ -16,11 +25,12 @@ For more information and documentation see https://clickhouse.com/. - The tag `head` is built from the latest commit to the default branch. - Each tag has optional `-alpine` suffix to reflect that it's built on top of `alpine`. + ### Compatibility - The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3. - The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A) and additionally the Load-Acquire RCpc register. The register is optional in version ARMv8.2-A and mandatory in [ARMv8.3-A](https://en.wikipedia.org/wiki/AArch64#ARMv8.3-A). Supported in Graviton >=2, Azure and GCP instances. Examples for unsupported devices are Raspberry Pi 4 (ARMv8.0-A) and Jetson AGX Xavier/Orin (ARMv8.2-A). -- Since the Clickhouse 24.11 Ubuntu images started using `ubuntu:22.04` as its base image. It requires docker version >= `20.10.10` containing [patch](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468). As a workaround you could use `docker run [--privileged | --security-opt seccomp=unconfined]` instead, however that has security implications. +- Since the Clickhouse 24.11 Ubuntu images started using `ubuntu:22.04` as its base image. It requires docker version >= `20.10.10` containing [patch](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468). As a workaround you could use `docker run --security-opt seccomp=unconfined` instead, however that has security implications. ## How to use this image @@ -30,7 +40,7 @@ For more information and documentation see https://clickhouse.com/. docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server ``` -By default, ClickHouse will be accessible only via the Docker network. See the [networking section below](#networking). +By default, ClickHouse will be accessible only via the Docker network. See the **networking** section below. By default, starting above server instance will be run as the `default` user without password. @@ -47,7 +57,7 @@ More information about the [ClickHouse client](https://clickhouse.com/docs/en/in ### connect to it using curl ```bash -echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server curlimages/curl 'http://clickhouse-server:8123/?query=' -s --data-binary @- +echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server buildpack-deps:curl curl 'http://clickhouse-server:8123/?query=' -s --data-binary @- ``` More information about the [ClickHouse HTTP Interface](https://clickhouse.com/docs/en/interfaces/http/). @@ -70,7 +80,7 @@ echo 'SELECT version()' | curl 'http://localhost:18123/' --data-binary @- `22.6.3.35` -or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance): +Or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance): ```bash docker run -d --network=host --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server @@ -88,8 +98,8 @@ Typically you may want to mount the following folders inside your container to a ```bash docker run -d \ - -v $(realpath ./ch_data):/var/lib/clickhouse/ \ - -v $(realpath ./ch_logs):/var/log/clickhouse-server/ \ + -v "$PWD/ch_data:/var/lib/clickhouse/" \ + -v "$PWD/ch_logs:/var/log/clickhouse-server/" \ --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server ``` @@ -111,6 +121,8 @@ docker run -d \ --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server ``` +Read more in [knowledge base](https://clickhouse.com/docs/knowledgebase/configure_cap_ipc_lock_and_cap_sys_nice_in_docker). + ## Configuration The container exposes port 8123 for the [HTTP interface](https://clickhouse.com/docs/en/interfaces/http_interface/) and port 9000 for the [native client](https://clickhouse.com/docs/en/interfaces/tcp/). @@ -126,8 +138,8 @@ docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 -v /pa ### Start server as custom user ```bash -# $(pwd)/data/clickhouse should exist and be owned by current user -docker run --rm --user ${UID}:${GID} --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$(pwd)/logs/clickhouse:/var/log/clickhouse-server" -v "$(pwd)/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server +# $PWD/data/clickhouse should exist and be owned by current user +docker run --rm --user "${UID}:${GID}" --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server ``` When you use the image with local directories mounted, you probably want to specify the user to maintain the proper file ownership. Use the `--user` argument and mount `/var/lib/clickhouse` and `/var/log/clickhouse-server` inside the container. Otherwise, the image will complain and not start. @@ -135,7 +147,7 @@ When you use the image with local directories mounted, you probably want to spec ### Start server from root (useful in case of enabled user namespace) ```bash -docker run --rm -e CLICKHOUSE_UID=0 -e CLICKHOUSE_GID=0 --name clickhouse-server-userns -v "$(pwd)/logs/clickhouse:/var/log/clickhouse-server" -v "$(pwd)/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server +docker run --rm -e CLICKHOUSE_RUN_AS_ROOT=1 --name clickhouse-server-userns -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server ``` ### How to create default database and user on starting diff --git a/docker/server/README.sh b/docker/server/README.sh new file mode 100755 index 00000000000..42fa72404d1 --- /dev/null +++ b/docker/server/README.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +set -ueo pipefail + +# A script to generate README.sh close to as it done in https://github.com/docker-library/docs + +WORKDIR=$(dirname "$0") +SCRIPT_NAME=$(basename "$0") +CONTENT=README.src/content.md +LICENSE=README.src/license.md +cd "$WORKDIR" + +R=README.md + +cat > "$R" < + +EOD + +cat "$CONTENT" >> "$R" + +cat >> "$R" < +## Versions + +- The `latest` tag points to the latest release of the latest stable branch. +- Branch tags like `22.2` point to the latest release of the corresponding branch. +- Full version tags like `22.2.3.5` point to the corresponding release. +- The tag `head` is built from the latest commit to the default branch. +- Each tag has optional `-alpine` suffix to reflect that it's built on top of `alpine`. + + +### Compatibility + +- The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3. +- The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A) and additionally the Load-Acquire RCpc register. The register is optional in version ARMv8.2-A and mandatory in [ARMv8.3-A](https://en.wikipedia.org/wiki/AArch64#ARMv8.3-A). Supported in Graviton >=2, Azure and GCP instances. Examples for unsupported devices are Raspberry Pi 4 (ARMv8.0-A) and Jetson AGX Xavier/Orin (ARMv8.2-A). +- Since the Clickhouse 24.11 Ubuntu images started using `ubuntu:22.04` as its base image. It requires docker version >= `20.10.10` containing [patch](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468). As a workaround you could use `docker run --security-opt seccomp=unconfined` instead, however that has security implications. + +## How to use this image + +### start server instance + +```bash +docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%% +``` + +By default, ClickHouse will be accessible only via the Docker network. See the **networking** section below. + +By default, starting above server instance will be run as the `default` user without password. + +### connect to it from a native client + +```bash +docker run -it --rm --link some-clickhouse-server:clickhouse-server --entrypoint clickhouse-client %%IMAGE%% --host clickhouse-server +# OR +docker exec -it some-clickhouse-server clickhouse-client +``` + +More information about the [ClickHouse client](https://clickhouse.com/docs/en/interfaces/cli/). + +### connect to it using curl + +```bash +echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server buildpack-deps:curl curl 'http://clickhouse-server:8123/?query=' -s --data-binary @- +``` + +More information about the [ClickHouse HTTP Interface](https://clickhouse.com/docs/en/interfaces/http/). + +### stopping / removing the container + +```bash +docker stop some-clickhouse-server +docker rm some-clickhouse-server +``` + +### networking + +You can expose your ClickHouse running in docker by [mapping a particular port](https://docs.docker.com/config/containers/container-networking/) from inside the container using host ports: + +```bash +docker run -d -p 18123:8123 -p19000:9000 --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%% +echo 'SELECT version()' | curl 'http://localhost:18123/' --data-binary @- +``` + +`22.6.3.35` + +Or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance): + +```bash +docker run -d --network=host --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%% +echo 'SELECT version()' | curl 'http://localhost:8123/' --data-binary @- +``` + +`22.6.3.35` + +### Volumes + +Typically you may want to mount the following folders inside your container to achieve persistency: + +- `/var/lib/clickhouse/` - main folder where ClickHouse stores the data +- `/var/log/clickhouse-server/` - logs + +```bash +docker run -d \ + -v "$PWD/ch_data:/var/lib/clickhouse/" \ + -v "$PWD/ch_logs:/var/log/clickhouse-server/" \ + --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%% +``` + +You may also want to mount: + +- `/etc/clickhouse-server/config.d/*.xml` - files with server configuration adjustments +- `/etc/clickhouse-server/users.d/*.xml` - files with user settings adjustments +- `/docker-entrypoint-initdb.d/` - folder with database initialization scripts (see below). + +### Linux capabilities + +ClickHouse has some advanced functionality, which requires enabling several [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html). + +They are optional and can be enabled using the following [docker command-line arguments](https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities): + +```bash +docker run -d \ + --cap-add=SYS_NICE --cap-add=NET_ADMIN --cap-add=IPC_LOCK \ + --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%% +``` + +Read more in [knowledge base](https://clickhouse.com/docs/knowledgebase/configure_cap_ipc_lock_and_cap_sys_nice_in_docker). + +## Configuration + +The container exposes port 8123 for the [HTTP interface](https://clickhouse.com/docs/en/interfaces/http_interface/) and port 9000 for the [native client](https://clickhouse.com/docs/en/interfaces/tcp/). + +ClickHouse configuration is represented with a file "config.xml" ([documentation](https://clickhouse.com/docs/en/operations/configuration_files/)) + +### Start server instance with custom configuration + +```bash +docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 -v /path/to/your/config.xml:/etc/clickhouse-server/config.xml %%IMAGE%% +``` + +### Start server as custom user + +```bash +# $PWD/data/clickhouse should exist and be owned by current user +docker run --rm --user "${UID}:${GID}" --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" %%IMAGE%% +``` + +When you use the image with local directories mounted, you probably want to specify the user to maintain the proper file ownership. Use the `--user` argument and mount `/var/lib/clickhouse` and `/var/log/clickhouse-server` inside the container. Otherwise, the image will complain and not start. + +### Start server from root (useful in case of enabled user namespace) + +```bash +docker run --rm -e CLICKHOUSE_RUN_AS_ROOT=1 --name clickhouse-server-userns -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" %%IMAGE%% +``` + +### How to create default database and user on starting + +Sometimes you may want to create a user (user named `default` is used by default) and database on a container start. You can do it using environment variables `CLICKHOUSE_DB`, `CLICKHOUSE_USER`, `CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT` and `CLICKHOUSE_PASSWORD`: + +```bash +docker run --rm -e CLICKHOUSE_DB=my_database -e CLICKHOUSE_USER=username -e CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT=1 -e CLICKHOUSE_PASSWORD=password -p 9000:9000/tcp %%IMAGE%% +``` + +## How to extend this image + +To perform additional initialization in an image derived from this one, add one or more `*.sql`, `*.sql.gz`, or `*.sh` scripts under `/docker-entrypoint-initdb.d`. After the entrypoint calls `initdb`, it will run any `*.sql` files, run any executable `*.sh` scripts, and source any non-executable `*.sh` scripts found in that directory to do further initialization before starting the service. +Also, you can provide environment variables `CLICKHOUSE_USER` & `CLICKHOUSE_PASSWORD` that will be used for clickhouse-client during initialization. + +For example, to add an additional user and database, add the following to `/docker-entrypoint-initdb.d/init-db.sh`: + +```bash +#!/bin/bash +set -e + +clickhouse client -n <<-EOSQL + CREATE DATABASE docker; + CREATE TABLE docker.docker (x Int32) ENGINE = Log; +EOSQL +``` diff --git a/docker/server/README.src/github-repo b/docker/server/README.src/github-repo new file mode 100644 index 00000000000..70a009ec958 --- /dev/null +++ b/docker/server/README.src/github-repo @@ -0,0 +1 @@ +https://github.com/ClickHouse/ClickHouse diff --git a/docker/server/README.src/license.md b/docker/server/README.src/license.md new file mode 100644 index 00000000000..6be024edcde --- /dev/null +++ b/docker/server/README.src/license.md @@ -0,0 +1 @@ +View [license information](https://github.com/ClickHouse/ClickHouse/blob/master/LICENSE) for the software contained in this image. diff --git a/docker/server/README.src/logo.svg b/docker/server/README.src/logo.svg new file mode 100644 index 00000000000..a50dd81a164 --- /dev/null +++ b/docker/server/README.src/logo.svg @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docker/server/README.src/maintainer.md b/docker/server/README.src/maintainer.md new file mode 100644 index 00000000000..26c7db1a293 --- /dev/null +++ b/docker/server/README.src/maintainer.md @@ -0,0 +1 @@ +[ClickHouse Inc.](%%GITHUB-REPO%%) diff --git a/docker/server/README.src/metadata.json b/docker/server/README.src/metadata.json new file mode 100644 index 00000000000..3d3937b21fb --- /dev/null +++ b/docker/server/README.src/metadata.json @@ -0,0 +1,7 @@ +{ + "hub": { + "categories": [ + "databases-and-storage" + ] + } +} diff --git a/docker/server/entrypoint.sh b/docker/server/entrypoint.sh index 3102ab8297c..947244dd97f 100755 --- a/docker/server/entrypoint.sh +++ b/docker/server/entrypoint.sh @@ -4,17 +4,28 @@ set -eo pipefail shopt -s nullglob DO_CHOWN=1 -if [ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]; then +if [[ "${CLICKHOUSE_RUN_AS_ROOT:=0}" = "1" || "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]]; then DO_CHOWN=0 fi -CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}" -CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}" +# CLICKHOUSE_UID and CLICKHOUSE_GID are kept for backward compatibility, but deprecated +# One must use either "docker run --user" or CLICKHOUSE_RUN_AS_ROOT=1 to run the process as +# FIXME: Remove ALL CLICKHOUSE_UID CLICKHOUSE_GID before 25.3 +if [[ "${CLICKHOUSE_UID:-}" || "${CLICKHOUSE_GID:-}" ]]; then + echo 'WARNING: Support for CLICKHOUSE_UID/CLICKHOUSE_GID will be removed in a couple of releases.' >&2 + echo 'WARNING: Either use a proper "docker run --user=xxx:xxxx" argument instead of CLICKHOUSE_UID/CLICKHOUSE_GID' >&2 + echo 'WARNING: or set "CLICKHOUSE_RUN_AS_ROOT=1" ENV to run the clickhouse-server as root:root' >&2 +fi -# support --user -if [ "$(id -u)" = "0" ]; then - USER=$CLICKHOUSE_UID - GROUP=$CLICKHOUSE_GID +# support `docker run --user=xxx:xxxx` +if [[ "$(id -u)" = "0" ]]; then + if [[ "$CLICKHOUSE_RUN_AS_ROOT" = 1 ]]; then + USER=0 + GROUP=0 + else + USER="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}" + GROUP="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}" + fi else USER="$(id -u)" GROUP="$(id -g)" @@ -55,14 +66,14 @@ function create_directory_and_do_chown() { [ -z "$dir" ] && return # ensure directories exist if [ "$DO_CHOWN" = "1" ]; then - mkdir="mkdir" + mkdir=( mkdir ) else # if DO_CHOWN=0 it means that the system does not map root user to "admin" permissions # it mainly happens on NFS mounts where root==nobody for security reasons # thus mkdir MUST run with user id/gid and not from nobody that has zero permissions - mkdir="/usr/bin/clickhouse su "${USER}:${GROUP}" mkdir" + mkdir=( clickhouse su "${USER}:${GROUP}" mkdir ) fi - if ! $mkdir -p "$dir"; then + if ! "${mkdir[@]}" -p "$dir"; then echo "Couldn't create necessary directory: $dir" exit 1 fi @@ -143,7 +154,7 @@ if [ -n "${RUN_INITDB_SCRIPTS}" ]; then fi # Listen only on localhost until the initialization is done - /usr/bin/clickhouse su "${USER}:${GROUP}" /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" -- --listen_host=127.0.0.1 & + clickhouse su "${USER}:${GROUP}" clickhouse-server --config-file="$CLICKHOUSE_CONFIG" -- --listen_host=127.0.0.1 & pid="$!" # check if clickhouse is ready to accept connections @@ -151,7 +162,7 @@ if [ -n "${RUN_INITDB_SCRIPTS}" ]; then tries=${CLICKHOUSE_INIT_TIMEOUT:-1000} while ! wget --spider --no-check-certificate -T 1 -q "$URL" 2>/dev/null; do if [ "$tries" -le "0" ]; then - echo >&2 'ClickHouse init process failed.' + echo >&2 'ClickHouse init process timeout.' exit 1 fi tries=$(( tries-1 )) @@ -203,18 +214,8 @@ if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then CLICKHOUSE_WATCHDOG_ENABLE=${CLICKHOUSE_WATCHDOG_ENABLE:-0} export CLICKHOUSE_WATCHDOG_ENABLE - # An option for easy restarting and replacing clickhouse-server in a container, especially in Kubernetes. - # For example, you can replace the clickhouse-server binary to another and restart it while keeping the container running. - if [[ "${CLICKHOUSE_DOCKER_RESTART_ON_EXIT:-0}" -eq "1" ]]; then - while true; do - # This runs the server as a child process of the shell script: - /usr/bin/clickhouse su "${USER}:${GROUP}" /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@" ||: - echo >&2 'ClickHouse Server exited, and the environment variable CLICKHOUSE_DOCKER_RESTART_ON_EXIT is set to 1. Restarting the server.' - done - else - # This replaces the shell script with the server: - exec /usr/bin/clickhouse su "${USER}:${GROUP}" /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@" - fi + # This replaces the shell script with the server: + exec clickhouse su "${USER}:${GROUP}" clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@" fi # Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image diff --git a/docs/en/getting-started/example-datasets/tpch.md b/docs/en/getting-started/example-datasets/tpch.md index 5fa0d779ecd..3ea4bffec38 100644 --- a/docs/en/getting-started/example-datasets/tpch.md +++ b/docs/en/getting-started/example-datasets/tpch.md @@ -33,6 +33,21 @@ Then, generate the data. Parameter `-s` specifies the scale factor. For example, ./dbgen -s 100 ``` +Detailed table sizes with scale factor 100: + +| Table | size (in rows) | size (compressed in ClickHouse) | +|----------|----------------|---------------------------------| +| nation | 25 | 2 kB | +| region | 5 | 1 kB | +| part | 20.000.000 | 895 MB | +| supplier | 1.000.000 | 75 MB | +| partsupp | 80.000.000 | 4.37 GB | +| customer | 15.000.000 | 1.19 GB | +| orders | 150.000.000 | 6.15 GB | +| lineitem | 600.00.00 | 26.69 GB | + +(Compressed sizes in ClickHouse are taken from `system.tables.total_bytes` and based on below table definitions.) + Now create tables in ClickHouse. We stick as closely as possible to the rules of the TPC-H specification: @@ -151,10 +166,37 @@ clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO orders FORMAT clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO lineitem FORMAT CSV" < lineitem.tbl ``` -The queries are generated by `./qgen -s `. Example queries for `s = 100`: +:::note +Instead of using tpch-kit and generating the tables by yourself, you can alternatively import the data from a public S3 bucket. Make sure +to create empty tables first using above `CREATE` statements. + +```sql +-- Scaling factor 1 +INSERT INTO nation SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/nation.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO region SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/region.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO part SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/part.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO supplier SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/supplier.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO partsupp SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/partsupp.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO customer SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/customer.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO orders SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/orders.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO lineitem SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/lineitem.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; + +-- Scaling factor 100 +INSERT INTO nation SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/nation.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO region SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/region.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO part SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/part.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO supplier SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/supplier.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO partsupp SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/partsupp.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO customer SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/customer.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO orders SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/orders.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO lineitem SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/lineitem.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +```` +::: ## Queries +The queries are generated by `./qgen -s `. Example queries for `s = 100`: + **Correctness** The result of the queries agrees with the official results unless mentioned otherwise. To verify, generate a TPC-H database with scale diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index c5f92ccdf68..ca4938b1a47 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -597,6 +597,30 @@ If number of tables is greater than this value, server will throw an exception. 400 ``` +## max\_replicated\_table\_num\_to\_throw {#max-replicated-table-num-to-throw} +If number of replicated tables is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine. + +**Example** +```xml +400 +``` + +## max\_dictionary\_num\_to\_throw {#max-dictionary-num-to-throw} +If number of dictionaries is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine. + +**Example** +```xml +400 +``` + +## max\_view\_num\_to\_throw {#max-view-num-to-throw} +If number of views is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine. + +**Example** +```xml +400 +``` + ## max\_database\_num\_to\_throw {#max-table-num-to-throw} If number of _database is greater than this value, server will throw an exception. 0 means no limitation. Default value: 0 diff --git a/docs/en/sql-reference/aggregate-functions/reference/index.md b/docs/en/sql-reference/aggregate-functions/reference/index.md index 2dce0afe2e1..ee8f0d5882e 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/index.md +++ b/docs/en/sql-reference/aggregate-functions/reference/index.md @@ -7,119 +7,4 @@ toc_hidden: true # List of Aggregate Functions -Standard aggregate functions: - -- [count](../reference/count.md) -- [min](../reference/min.md) -- [max](../reference/max.md) -- [sum](../reference/sum.md) -- [avg](../reference/avg.md) -- [any](../reference/any.md) -- [stddevPop](../reference/stddevpop.md) -- [stddevPopStable](../reference/stddevpopstable.md) -- [stddevSamp](../reference/stddevsamp.md) -- [stddevSampStable](../reference/stddevsampstable.md) -- [varPop](../reference/varpop.md) -- [varSamp](../reference/varsamp.md) -- [corr](../reference/corr.md) -- [corr](../reference/corrstable.md) -- [corrMatrix](../reference/corrmatrix.md) -- [covarPop](../reference/covarpop.md) -- [covarStable](../reference/covarpopstable.md) -- [covarPopMatrix](../reference/covarpopmatrix.md) -- [covarSamp](../reference/covarsamp.md) -- [covarSampStable](../reference/covarsampstable.md) -- [covarSampMatrix](../reference/covarsampmatrix.md) -- [entropy](../reference/entropy.md) -- [exponentialMovingAverage](../reference/exponentialmovingaverage.md) -- [intervalLengthSum](../reference/intervalLengthSum.md) -- [kolmogorovSmirnovTest](../reference/kolmogorovsmirnovtest.md) -- [mannwhitneyutest](../reference/mannwhitneyutest.md) -- [median](../reference/median.md) -- [rankCorr](../reference/rankCorr.md) -- [sumKahan](../reference/sumkahan.md) -- [studentTTest](../reference/studentttest.md) -- [welchTTest](../reference/welchttest.md) - -ClickHouse-specific aggregate functions: - -- [aggThrow](../reference/aggthrow.md) -- [analysisOfVariance](../reference/analysis_of_variance.md) -- [any](../reference/any.md) -- [anyHeavy](../reference/anyheavy.md) -- [anyLast](../reference/anylast.md) -- [boundingRatio](../reference/boundrat.md) -- [first_value](../reference/first_value.md) -- [last_value](../reference/last_value.md) -- [argMin](../reference/argmin.md) -- [argMax](../reference/argmax.md) -- [avgWeighted](../reference/avgweighted.md) -- [topK](../reference/topk.md) -- [topKWeighted](../reference/topkweighted.md) -- [deltaSum](../reference/deltasum.md) -- [deltaSumTimestamp](../reference/deltasumtimestamp.md) -- [flameGraph](../reference/flame_graph.md) -- [groupArray](../reference/grouparray.md) -- [groupArrayLast](../reference/grouparraylast.md) -- [groupUniqArray](../reference/groupuniqarray.md) -- [groupArrayInsertAt](../reference/grouparrayinsertat.md) -- [groupArrayMovingAvg](../reference/grouparraymovingavg.md) -- [groupArrayMovingSum](../reference/grouparraymovingsum.md) -- [groupArraySample](../reference/grouparraysample.md) -- [groupArraySorted](../reference/grouparraysorted.md) -- [groupArrayIntersect](../reference/grouparrayintersect.md) -- [groupBitAnd](../reference/groupbitand.md) -- [groupBitOr](../reference/groupbitor.md) -- [groupBitXor](../reference/groupbitxor.md) -- [groupBitmap](../reference/groupbitmap.md) -- [groupBitmapAnd](../reference/groupbitmapand.md) -- [groupBitmapOr](../reference/groupbitmapor.md) -- [groupBitmapXor](../reference/groupbitmapxor.md) -- [sumWithOverflow](../reference/sumwithoverflow.md) -- [sumMap](../reference/summap.md) -- [sumMapWithOverflow](../reference/summapwithoverflow.md) -- [sumMapFiltered](../parametric-functions.md/#summapfiltered) -- [sumMapFilteredWithOverflow](../parametric-functions.md/#summapfilteredwithoverflow) -- [minMap](../reference/minmap.md) -- [maxMap](../reference/maxmap.md) -- [skewSamp](../reference/skewsamp.md) -- [skewPop](../reference/skewpop.md) -- [kurtSamp](../reference/kurtsamp.md) -- [kurtPop](../reference/kurtpop.md) -- [uniq](../reference/uniq.md) -- [uniqExact](../reference/uniqexact.md) -- [uniqCombined](../reference/uniqcombined.md) -- [uniqCombined64](../reference/uniqcombined64.md) -- [uniqHLL12](../reference/uniqhll12.md) -- [uniqTheta](../reference/uniqthetasketch.md) -- [quantile](../reference/quantile.md) -- [quantiles](../reference/quantiles.md) -- [quantileExact](../reference/quantileexact.md) -- [quantileExactLow](../reference/quantileexact.md#quantileexactlow) -- [quantileExactHigh](../reference/quantileexact.md#quantileexacthigh) -- [quantileExactWeighted](../reference/quantileexactweighted.md) -- [quantileTiming](../reference/quantiletiming.md) -- [quantileTimingWeighted](../reference/quantiletimingweighted.md) -- [quantileDeterministic](../reference/quantiledeterministic.md) -- [quantileTDigest](../reference/quantiletdigest.md) -- [quantileTDigestWeighted](../reference/quantiletdigestweighted.md) -- [quantileBFloat16](../reference/quantilebfloat16.md#quantilebfloat16) -- [quantileBFloat16Weighted](../reference/quantilebfloat16.md#quantilebfloat16weighted) -- [quantileDD](../reference/quantileddsketch.md#quantileddsketch) -- [simpleLinearRegression](../reference/simplelinearregression.md) -- [singleValueOrNull](../reference/singlevalueornull.md) -- [stochasticLinearRegression](../reference/stochasticlinearregression.md) -- [stochasticLogisticRegression](../reference/stochasticlogisticregression.md) -- [categoricalInformationValue](../reference/categoricalinformationvalue.md) -- [contingency](../reference/contingency.md) -- [cramersV](../reference/cramersv.md) -- [cramersVBiasCorrected](../reference/cramersvbiascorrected.md) -- [theilsU](../reference/theilsu.md) -- [maxIntersections](../reference/maxintersections.md) -- [maxIntersectionsPosition](../reference/maxintersectionsposition.md) -- [meanZTest](../reference/meanztest.md) -- [quantileGK](../reference/quantileGK.md) -- [quantileInterpolatedWeighted](../reference/quantileinterpolatedweighted.md) -- [sparkBar](../reference/sparkbar.md) -- [sumCount](../reference/sumcount.md) -- [largestTriangleThreeBuckets](../reference/largestTriangleThreeBuckets.md) +ClickHouse supports all standard SQL aggregate functions ([sum](../reference/sum.md), [avg](../reference/avg.md), [min](../reference/min.md), [max](../reference/max.md), [count](../reference/count.md)), as well as a wide range of other aggregate functions. diff --git a/docs/en/sql-reference/data-types/aggregatefunction.md b/docs/en/sql-reference/data-types/aggregatefunction.md index 37f0d0e50ae..4cad27db68b 100644 --- a/docs/en/sql-reference/data-types/aggregatefunction.md +++ b/docs/en/sql-reference/data-types/aggregatefunction.md @@ -6,7 +6,9 @@ sidebar_label: AggregateFunction # AggregateFunction -Aggregate functions can have an implementation-defined intermediate state that can be serialized to an `AggregateFunction(...)` data type and stored in a table, usually, by means of [a materialized view](../../sql-reference/statements/create/view.md). The common way to produce an aggregate function state is by calling the aggregate function with the `-State` suffix. To get the final result of aggregation in the future, you must use the same aggregate function with the `-Merge`suffix. +Aggregate functions have an implementation-defined intermediate state that can be serialized to an `AggregateFunction(...)` data type and stored in a table, usually, by means of [a materialized view](../../sql-reference/statements/create/view.md). +The common way to produce an aggregate function state is by calling the aggregate function with the `-State` suffix. +To get the final result of aggregation in the future, you must use the same aggregate function with the `-Merge`suffix. `AggregateFunction(name, types_of_arguments...)` — parametric data type. diff --git a/docs/en/sql-reference/data-types/index.md b/docs/en/sql-reference/data-types/index.md index 2b89dd145e6..134678f71bb 100644 --- a/docs/en/sql-reference/data-types/index.md +++ b/docs/en/sql-reference/data-types/index.md @@ -6,29 +6,8 @@ sidebar_position: 1 # Data Types in ClickHouse -ClickHouse can store various kinds of data in table cells. This section describes the supported data types and special considerations for using and/or implementing them if any. +This section describes the data types supported by ClickHouse, for example [integers](int-uint.md), [floats](float.md) and [strings](string.md). -:::note -You can check whether a data type name is case-sensitive in the [system.data_type_families](../../operations/system-tables/data_type_families.md#system_tables-data_type_families) table. -::: - -ClickHouse data types include: - -- **Integer types**: [signed and unsigned integers](./int-uint.md) (`UInt8`, `UInt16`, `UInt32`, `UInt64`, `UInt128`, `UInt256`, `Int8`, `Int16`, `Int32`, `Int64`, `Int128`, `Int256`) -- **Floating-point numbers**: [floats](./float.md)(`Float32` and `Float64`) and [`Decimal` values](./decimal.md) -- **Boolean**: ClickHouse has a [`Boolean` type](./boolean.md) -- **Strings**: [`String`](./string.md) and [`FixedString`](./fixedstring.md) -- **Dates**: use [`Date`](./date.md) and [`Date32`](./date32.md) for days, and [`DateTime`](./datetime.md) and [`DateTime64`](./datetime64.md) for instances in time -- **Object**: the [`Object`](./json.md) stores a JSON document in a single column (deprecated) -- **JSON**: the [`JSON` object](./newjson.md) stores a JSON document in a single column -- **UUID**: a performant option for storing [`UUID` values](./uuid.md) -- **Low cardinality types**: use an [`Enum`](./enum.md) when you have a handful of unique values, or use [`LowCardinality`](./lowcardinality.md) when you have up to 10,000 unique values of a column -- **Arrays**: any column can be defined as an [`Array` of values](./array.md) -- **Maps**: use [`Map`](./map.md) for storing key/value pairs -- **Aggregation function types**: use [`SimpleAggregateFunction`](./simpleaggregatefunction.md) and [`AggregateFunction`](./aggregatefunction.md) for storing the intermediate status of aggregate function results -- **Nested data structures**: A [`Nested` data structure](./nested-data-structures/index.md) is like a table inside a cell -- **Tuples**: A [`Tuple` of elements](./tuple.md), each having an individual type. -- **Nullable**: [`Nullable`](./nullable.md) allows you to store a value as `NULL` when a value is "missing" (instead of the column settings its default value for the data type) -- **IP addresses**: use [`IPv4`](./ipv4.md) and [`IPv6`](./ipv6.md) to efficiently store IP addresses -- **Geo types**: for [geographical data](./geo.md), including `Point`, `Ring`, `Polygon` and `MultiPolygon` -- **Special data types**: including [`Expression`](./special-data-types/expression.md), [`Set`](./special-data-types/set.md), [`Nothing`](./special-data-types/nothing.md) and [`Interval`](./special-data-types/interval.md) +System table [system.data_type_families](../../operations/system-tables/data_type_families.md#system_tables-data_type_families) provides an +overview of all available data types. +It also shows whether a data type is an alias to another data type and its name is case-sensitive (e.g. `bool` vs. `BOOL`). diff --git a/docs/en/sql-reference/data-types/json.md b/docs/en/sql-reference/data-types/json.md index e48b308a620..ce69f15f0fa 100644 --- a/docs/en/sql-reference/data-types/json.md +++ b/docs/en/sql-reference/data-types/json.md @@ -7,7 +7,7 @@ keywords: [object, data type] # Object Data Type (deprecated) -**This feature is not production-ready and is now deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-formats/json/overview) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864). +**This feature is not production-ready and deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-formats/json/overview) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864).
diff --git a/docs/en/sql-reference/data-types/simpleaggregatefunction.md b/docs/en/sql-reference/data-types/simpleaggregatefunction.md index 4fb74ac30e4..8edd8b5b8ff 100644 --- a/docs/en/sql-reference/data-types/simpleaggregatefunction.md +++ b/docs/en/sql-reference/data-types/simpleaggregatefunction.md @@ -5,7 +5,9 @@ sidebar_label: SimpleAggregateFunction --- # SimpleAggregateFunction -`SimpleAggregateFunction(name, types_of_arguments...)` data type stores current value of the aggregate function, and does not store its full state as [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md) does. This optimization can be applied to functions for which the following property holds: the result of applying a function `f` to a row set `S1 UNION ALL S2` can be obtained by applying `f` to parts of the row set separately, and then again applying `f` to the results: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`. This property guarantees that partial aggregation results are enough to compute the combined one, so we do not have to store and process any extra data. +`SimpleAggregateFunction(name, types_of_arguments...)` data type stores current value (intermediate state) of the aggregate function, but not its full state as [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md) does. +This optimization can be applied to functions for which the following property holds: the result of applying a function `f` to a row set `S1 UNION ALL S2` can be obtained by applying `f` to parts of the row set separately, and then again applying `f` to the results: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`. +This property guarantees that partial aggregation results are enough to compute the combined one, so we do not have to store and process any extra data. The common way to produce an aggregate function value is by calling the aggregate function with the [-SimpleState](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-simplestate) suffix. diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index 2357b5b2fdd..34dc6e996ee 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -4773,7 +4773,7 @@ Result: ## toUTCTimestamp -Convert DateTime/DateTime64 type value from other time zone to UTC timezone timestamp +Convert DateTime/DateTime64 type value from other time zone to UTC timezone timestamp. This function is mainly included for compatibility with Apache Spark and similar frameworks. **Syntax** @@ -4799,14 +4799,14 @@ SELECT toUTCTimestamp(toDateTime('2023-03-16'), 'Asia/Shanghai'); Result: ``` text -┌─toUTCTimestamp(toDateTime('2023-03-16'),'Asia/Shanghai')┐ +┌─toUTCTimestamp(toDateTime('2023-03-16'), 'Asia/Shanghai')┐ │ 2023-03-15 16:00:00 │ └─────────────────────────────────────────────────────────┘ ``` ## fromUTCTimestamp -Convert DateTime/DateTime64 type value from UTC timezone to other time zone timestamp +Convert DateTime/DateTime64 type value from UTC timezone to other time zone timestamp. This function is mainly included for compatibility with Apache Spark and similar frameworks. **Syntax** @@ -4832,7 +4832,7 @@ SELECT fromUTCTimestamp(toDateTime64('2023-03-16 10:00:00', 3), 'Asia/Shanghai') Result: ``` text -┌─fromUTCTimestamp(toDateTime64('2023-03-16 10:00:00',3),'Asia/Shanghai')─┐ +┌─fromUTCTimestamp(toDateTime64('2023-03-16 10:00:00',3), 'Asia/Shanghai')─┐ │ 2023-03-16 18:00:00.000 │ └─────────────────────────────────────────────────────────────────────────┘ ``` diff --git a/docs/en/sql-reference/functions/geo/index.md b/docs/en/sql-reference/functions/geo/index.md index d46e60281e2..51b6868611a 100644 --- a/docs/en/sql-reference/functions/geo/index.md +++ b/docs/en/sql-reference/functions/geo/index.md @@ -5,70 +5,4 @@ sidebar_position: 62 title: "Geo Functions" --- - -## Geographical Coordinates Functions - -- [greatCircleDistance](./coordinates.md#greatcircledistance) -- [geoDistance](./coordinates.md#geodistance) -- [greatCircleAngle](./coordinates.md#greatcircleangle) -- [pointInEllipses](./coordinates.md#pointinellipses) -- [pointInPolygon](./coordinates.md#pointinpolygon) - -## Geohash Functions -- [geohashEncode](./geohash.md#geohashencode) -- [geohashDecode](./geohash.md#geohashdecode) -- [geohashesInBox](./geohash.md#geohashesinbox) - -## H3 Indexes Functions - -- [h3IsValid](./h3.md#h3isvalid) -- [h3GetResolution](./h3.md#h3getresolution) -- [h3EdgeAngle](./h3.md#h3edgeangle) -- [h3EdgeLengthM](./h3.md#h3edgelengthm) -- [h3EdgeLengthKm](./h3.md#h3edgelengthkm) -- [geoToH3](./h3.md#geotoh3) -- [h3ToGeo](./h3.md#h3togeo) -- [h3ToGeoBoundary](./h3.md#h3togeoboundary) -- [h3kRing](./h3.md#h3kring) -- [h3GetBaseCell](./h3.md#h3getbasecell) -- [h3HexAreaM2](./h3.md#h3hexaream2) -- [h3HexAreaKm2](./h3.md#h3hexareakm2) -- [h3IndexesAreNeighbors](./h3.md#h3indexesareneighbors) -- [h3ToChildren](./h3.md#h3tochildren) -- [h3ToParent](./h3.md#h3toparent) -- [h3ToString](./h3.md#h3tostring) -- [stringToH3](./h3.md#stringtoh3) -- [h3GetResolution](./h3.md#h3getresolution) -- [h3IsResClassIII](./h3.md#h3isresclassiii) -- [h3IsPentagon](./h3.md#h3ispentagon) -- [h3GetFaces](./h3.md#h3getfaces) -- [h3CellAreaM2](./h3.md#h3cellaream2) -- [h3CellAreaRads2](./h3.md#h3cellarearads2) -- [h3ToCenterChild](./h3.md#h3tocenterchild) -- [h3ExactEdgeLengthM](./h3.md#h3exactedgelengthm) -- [h3ExactEdgeLengthKm](./h3.md#h3exactedgelengthkm) -- [h3ExactEdgeLengthRads](./h3.md#h3exactedgelengthrads) -- [h3NumHexagons](./h3.md#h3numhexagons) -- [h3Line](./h3.md#h3line) -- [h3Distance](./h3.md#h3distance) -- [h3HexRing](./h3.md#h3hexring) -- [h3GetUnidirectionalEdge](./h3.md#h3getunidirectionaledge) -- [h3UnidirectionalEdgeIsValid](./h3.md#h3unidirectionaledgeisvalid) -- [h3GetOriginIndexFromUnidirectionalEdge](./h3.md#h3getoriginindexfromunidirectionaledge) -- [h3GetDestinationIndexFromUnidirectionalEdge](./h3.md#h3getdestinationindexfromunidirectionaledge) -- [h3GetIndexesFromUnidirectionalEdge](./h3.md#h3getindexesfromunidirectionaledge) -- [h3GetUnidirectionalEdgesFromHexagon](./h3.md#h3getunidirectionaledgesfromhexagon) -- [h3GetUnidirectionalEdgeBoundary](./h3.md#h3getunidirectionaledgeboundary) - -## S2 Index Functions - -- [geoToS2](./s2.md#geotos2) -- [s2ToGeo](./s2.md#s2togeo) -- [s2GetNeighbors](./s2.md#s2getneighbors) -- [s2CellsIntersect](./s2.md#s2cellsintersect) -- [s2CapContains](./s2.md#s2capcontains) -- [s2CapUnion](./s2.md#s2capunion) -- [s2RectAdd](./s2.md#s2rectadd) -- [s2RectContains](./s2.md#s2rectcontains) -- [s2RectUnion](./s2.md#s2rectunion) -- [s2RectIntersection](./s2.md#s2rectintersection) +Functions for working with geometric objects, for example [to calculate distances between points on a sphere](./coordinates.md), [compute geohashes](./geohash.md), and work with [h3 indexes](./h3.md). diff --git a/docs/en/sql-reference/functions/index.md b/docs/en/sql-reference/functions/index.md index c0256ba4735..04a87c369ab 100644 --- a/docs/en/sql-reference/functions/index.md +++ b/docs/en/sql-reference/functions/index.md @@ -24,7 +24,7 @@ All expressions in a query that have the same AST (the same record or same resul ## Types of Results -All functions return a single return as the result (not several values, and not zero values). The type of result is usually defined only by the types of arguments, not by the values. Exceptions are the tupleElement function (the a.N operator), and the toFixedString function. +All functions return a single value as the result (not several values, and not zero values). The type of result is usually defined only by the types of arguments, not by the values. Exceptions are the tupleElement function (the a.N operator), and the toFixedString function. ## Constants diff --git a/docs/en/sql-reference/statements/alter/column.md b/docs/en/sql-reference/statements/alter/column.md index 29df041ccc6..fb16dacb7c8 100644 --- a/docs/en/sql-reference/statements/alter/column.md +++ b/docs/en/sql-reference/statements/alter/column.md @@ -279,7 +279,7 @@ For columns with a new or updated `MATERIALIZED` value expression, all existing For columns with a new or updated `DEFAULT` value expression, the behavior depends on the ClickHouse version: - In ClickHouse < v24.2, all existing rows are rewritten. -- ClickHouse >= v24.2 distinguishes if a row value in a column with `DEFAULT` value expression was explicitly specified when it was inserted, or not, i.e. calculated from the `DEFAULT` value expression. If the value was explicitly specified, ClickHouse keeps it as is. If the value was was calculated, ClickHouse changes it to the new or updated `MATERIALIZED` value expression. +- ClickHouse >= v24.2 distinguishes if a row value in a column with `DEFAULT` value expression was explicitly specified when it was inserted, or not, i.e. calculated from the `DEFAULT` value expression. If the value was explicitly specified, ClickHouse keeps it as is. If the value was calculated, ClickHouse changes it to the new or updated `MATERIALIZED` value expression. Syntax: diff --git a/docs/en/sql-reference/statements/create/index.md b/docs/en/sql-reference/statements/create/index.md index fa39526a53e..5854d7cf9d2 100644 --- a/docs/en/sql-reference/statements/create/index.md +++ b/docs/en/sql-reference/statements/create/index.md @@ -6,16 +6,4 @@ sidebar_label: CREATE # CREATE Queries -Create queries make a new entity of one of the following kinds: - -- [DATABASE](/docs/en/sql-reference/statements/create/database.md) -- [TABLE](/docs/en/sql-reference/statements/create/table.md) -- [VIEW](/docs/en/sql-reference/statements/create/view.md) -- [DICTIONARY](/docs/en/sql-reference/statements/create/dictionary.md) -- [FUNCTION](/docs/en/sql-reference/statements/create/function.md) -- [USER](/docs/en/sql-reference/statements/create/user.md) -- [ROLE](/docs/en/sql-reference/statements/create/role.md) -- [ROW POLICY](/docs/en/sql-reference/statements/create/row-policy.md) -- [QUOTA](/docs/en/sql-reference/statements/create/quota.md) -- [SETTINGS PROFILE](/docs/en/sql-reference/statements/create/settings-profile.md) -- [NAMED COLLECTION](/docs/en/sql-reference/statements/create/named-collection.md) +CREATE queries create (for example) new [databases](/docs/en/sql-reference/statements/create/database.md), [tables](/docs/en/sql-reference/statements/create/table.md) and [views](/docs/en/sql-reference/statements/create/view.md). diff --git a/docs/en/sql-reference/statements/index.md b/docs/en/sql-reference/statements/index.md index 5aa61cf8d21..f288b30b27b 100644 --- a/docs/en/sql-reference/statements/index.md +++ b/docs/en/sql-reference/statements/index.md @@ -6,27 +6,4 @@ sidebar_label: List of statements # ClickHouse SQL Statements -Statements represent various kinds of action you can perform using SQL queries. Each kind of statement has it’s own syntax and usage details that are described separately: - -- [SELECT](/docs/en/sql-reference/statements/select/index.md) -- [INSERT INTO](/docs/en/sql-reference/statements/insert-into.md) -- [CREATE](/docs/en/sql-reference/statements/create/index.md) -- [ALTER](/docs/en/sql-reference/statements/alter/index.md) -- [SYSTEM](/docs/en/sql-reference/statements/system.md) -- [SHOW](/docs/en/sql-reference/statements/show.md) -- [GRANT](/docs/en/sql-reference/statements/grant.md) -- [REVOKE](/docs/en/sql-reference/statements/revoke.md) -- [ATTACH](/docs/en/sql-reference/statements/attach.md) -- [CHECK TABLE](/docs/en/sql-reference/statements/check-table.md) -- [DESCRIBE TABLE](/docs/en/sql-reference/statements/describe-table.md) -- [DETACH](/docs/en/sql-reference/statements/detach.md) -- [DROP](/docs/en/sql-reference/statements/drop.md) -- [EXISTS](/docs/en/sql-reference/statements/exists.md) -- [KILL](/docs/en/sql-reference/statements/kill.md) -- [OPTIMIZE](/docs/en/sql-reference/statements/optimize.md) -- [RENAME](/docs/en/sql-reference/statements/rename.md) -- [SET](/docs/en/sql-reference/statements/set.md) -- [SET ROLE](/docs/en/sql-reference/statements/set-role.md) -- [TRUNCATE](/docs/en/sql-reference/statements/truncate.md) -- [USE](/docs/en/sql-reference/statements/use.md) -- [EXPLAIN](/docs/en/sql-reference/statements/explain.md) +Users interact with ClickHouse using SQL statements. ClickHouse supports common SQL statements like [SELECT](select/index.md) and [CREATE](create/index.md), but it also provides specialized statements like [KILL](kill.md) and [OPTIMIZE](optimize.md). diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index d7190444f0b..05e1e61be7b 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -431,7 +431,7 @@ catch (const Exception & e) bool need_print_stack_trace = config().getBool("stacktrace", false) && e.code() != ErrorCodes::NETWORK_ERROR; std::cerr << getExceptionMessage(e, need_print_stack_trace, true) << std::endl << std::endl; /// If exception code isn't zero, we should return non-zero return code anyway. - return e.code() ? e.code() : -1; + return static_cast(e.code()) ? e.code() : -1; } catch (...) { @@ -1390,7 +1390,8 @@ int mainEntryClickHouseClient(int argc, char ** argv) catch (const DB::Exception & e) { std::cerr << DB::getExceptionMessage(e, false) << std::endl; - return 1; + auto code = DB::getCurrentExceptionCode(); + return static_cast(code) ? code : 1; } catch (const boost::program_options::error & e) { @@ -1399,7 +1400,8 @@ int mainEntryClickHouseClient(int argc, char ** argv) } catch (...) { - std::cerr << DB::getCurrentExceptionMessage(true) << std::endl; - return 1; + std::cerr << DB::getCurrentExceptionMessage(true) << '\n'; + auto code = DB::getCurrentExceptionCode(); + return static_cast(code) ? code : 1; } } diff --git a/programs/compressor/Compressor.cpp b/programs/compressor/Compressor.cpp index 819f16cfd64..e73f61dde83 100644 --- a/programs/compressor/Compressor.cpp +++ b/programs/compressor/Compressor.cpp @@ -9,14 +9,20 @@ #include #include #include +#include #include #include +#include +#include #include #include #include +#include #include #include #include +#include +#include #include @@ -29,33 +35,35 @@ namespace DB } } +namespace CurrentMetrics +{ + extern const Metric LocalThread; + extern const Metric LocalThreadActive; + extern const Metric LocalThreadScheduled; +} + namespace { -/// Outputs sizes of uncompressed and compressed blocks for compressed file. +/// Outputs method, sizes of uncompressed and compressed blocks for compressed file. void checkAndWriteHeader(DB::ReadBuffer & in, DB::WriteBuffer & out) { while (!in.eof()) { - in.ignore(16); /// checksum - - char header[COMPRESSED_BLOCK_HEADER_SIZE]; - in.readStrict(header, COMPRESSED_BLOCK_HEADER_SIZE); - - UInt32 size_compressed = unalignedLoad(&header[1]); + UInt32 size_compressed; + UInt32 size_decompressed; + auto codec = DB::getCompressionCodecForFile(in, size_compressed, size_decompressed, true /* skip_to_next_block */); if (size_compressed > DBMS_MAX_COMPRESSED_SIZE) throw DB::Exception(DB::ErrorCodes::TOO_LARGE_SIZE_COMPRESSED, "Too large size_compressed. Most likely corrupted data."); - UInt32 size_decompressed = unalignedLoad(&header[5]); - + DB::writeText(queryToString(codec->getFullCodecDesc()), out); + DB::writeChar('\t', out); DB::writeText(size_decompressed, out); DB::writeChar('\t', out); DB::writeText(size_compressed, out); DB::writeChar('\n', out); - - in.ignore(size_compressed - COMPRESSED_BLOCK_HEADER_SIZE); } } @@ -77,11 +85,12 @@ int mainEntryClickHouseCompressor(int argc, char ** argv) ("decompress,d", "decompress") ("offset-in-compressed-file", po::value()->default_value(0ULL), "offset to the compressed block (i.e. physical file offset)") ("offset-in-decompressed-block", po::value()->default_value(0ULL), "offset to the decompressed block (i.e. virtual offset)") - ("block-size,b", po::value()->default_value(DBMS_DEFAULT_BUFFER_SIZE), "compress in blocks of specified size") + ("block-size,b", po::value()->default_value(DBMS_DEFAULT_BUFFER_SIZE), "compress in blocks of specified size") ("hc", "use LZ4HC instead of LZ4") ("zstd", "use ZSTD instead of LZ4") ("codec", po::value>()->multitoken(), "use codecs combination instead of LZ4") ("level", po::value(), "compression level for codecs specified via flags") + ("threads", po::value()->default_value(1), "number of threads for parallel compression") ("none", "use no compression instead of LZ4") ("stat", "print block statistics of compressed data") ("stacktrace", "print stacktrace of exception") @@ -109,7 +118,8 @@ int mainEntryClickHouseCompressor(int argc, char ** argv) bool stat_mode = options.count("stat"); bool use_none = options.count("none"); print_stacktrace = options.count("stacktrace"); - unsigned block_size = options["block-size"].as(); + size_t block_size = options["block-size"].as(); + size_t num_threads = options["threads"].as(); std::vector codecs; if (options.count("codec")) codecs = options["codec"].as>(); @@ -117,6 +127,12 @@ int mainEntryClickHouseCompressor(int argc, char ** argv) if ((use_lz4hc || use_zstd || use_none) && !codecs.empty()) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Wrong options, codec flags like --zstd and --codec options are mutually exclusive"); + if (num_threads < 1) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Invalid value of `threads` parameter"); + + if (num_threads > 1 && decompress) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parallel mode is only implemented for compression (not for decompression)"); + if (!codecs.empty() && options.count("level")) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Wrong options, --level is not compatible with --codec list"); @@ -145,7 +161,6 @@ int mainEntryClickHouseCompressor(int argc, char ** argv) else codec = CompressionCodecFactory::instance().get(method_family, level); - std::unique_ptr rb; std::unique_ptr wb; @@ -186,9 +201,20 @@ int mainEntryClickHouseCompressor(int argc, char ** argv) else { /// Compression - CompressedWriteBuffer to(*wb, codec, block_size); - copyData(*rb, to); - to.finalize(); + + if (num_threads == 1) + { + CompressedWriteBuffer to(*wb, codec, block_size); + copyData(*rb, to); + to.finalize(); + } + else + { + ThreadPool pool(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, CurrentMetrics::LocalThreadScheduled, num_threads); + ParallelCompressedWriteBuffer to(*wb, codec, block_size, num_threads, pool); + copyData(*rb, to); + to.finalize(); + } } } catch (...) diff --git a/programs/disks/DisksApp.cpp b/programs/disks/DisksApp.cpp index 610d8eaa638..d6541e99288 100644 --- a/programs/disks/DisksApp.cpp +++ b/programs/disks/DisksApp.cpp @@ -546,16 +546,18 @@ int mainEntryClickHouseDisks(int argc, char ** argv) catch (const DB::Exception & e) { std::cerr << DB::getExceptionMessage(e, false) << std::endl; - return 0; + auto code = DB::getCurrentExceptionCode(); + return static_cast(code) ? code : 1; } catch (const boost::program_options::error & e) { std::cerr << "Bad arguments: " << e.what() << std::endl; - return 0; + return DB::ErrorCodes::BAD_ARGUMENTS; } catch (...) { std::cerr << DB::getCurrentExceptionMessage(true) << std::endl; - return 0; + auto code = DB::getCurrentExceptionCode(); + return static_cast(code) ? code : 1; } } diff --git a/programs/keeper-client/KeeperClient.cpp b/programs/keeper-client/KeeperClient.cpp index 2a426fad7ac..4bdddaec59c 100644 --- a/programs/keeper-client/KeeperClient.cpp +++ b/programs/keeper-client/KeeperClient.cpp @@ -448,7 +448,8 @@ int mainEntryClickHouseKeeperClient(int argc, char ** argv) catch (const DB::Exception & e) { std::cerr << DB::getExceptionMessage(e, false) << std::endl; - return 1; + auto code = DB::getCurrentExceptionCode(); + return static_cast(code) ? code : 1; } catch (const boost::program_options::error & e) { @@ -458,6 +459,7 @@ int mainEntryClickHouseKeeperClient(int argc, char ** argv) catch (...) { std::cerr << DB::getCurrentExceptionMessage(true) << std::endl; - return 1; + auto code = DB::getCurrentExceptionCode(); + return static_cast(code) ? code : 1; } } diff --git a/programs/keeper/Keeper.cpp b/programs/keeper/Keeper.cpp index 74af9950e13..936ce15f4c9 100644 --- a/programs/keeper/Keeper.cpp +++ b/programs/keeper/Keeper.cpp @@ -81,7 +81,7 @@ int mainEntryClickHouseKeeper(int argc, char ** argv) { std::cerr << DB::getCurrentExceptionMessage(true) << "\n"; auto code = DB::getCurrentExceptionCode(); - return code ? code : 1; + return static_cast(code) ? code : 1; } } @@ -672,7 +672,7 @@ catch (...) /// Poco does not provide stacktrace. tryLogCurrentException("Application"); auto code = getCurrentExceptionCode(); - return code ? code : -1; + return static_cast(code) ? code : -1; } diff --git a/programs/library-bridge/LibraryBridge.cpp b/programs/library-bridge/LibraryBridge.cpp index 261484ac744..62dbd12aaf0 100644 --- a/programs/library-bridge/LibraryBridge.cpp +++ b/programs/library-bridge/LibraryBridge.cpp @@ -13,7 +13,7 @@ int mainEntryClickHouseLibraryBridge(int argc, char ** argv) { std::cerr << DB::getCurrentExceptionMessage(true) << "\n"; auto code = DB::getCurrentExceptionCode(); - return code ? code : 1; + return static_cast(code) ? code : 1; } } diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 1dcef5eb25e..e6f8ecef097 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -22,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include @@ -50,7 +49,6 @@ #include #include #include -#include #include #include #include @@ -71,9 +69,11 @@ namespace CurrentMetrics namespace DB { + namespace Setting { extern const SettingsBool allow_introspection_functions; + extern const SettingsBool implicit_select; extern const SettingsLocalFSReadMethod storage_file_read_method; } @@ -126,6 +126,7 @@ void applySettingsOverridesForLocal(ContextMutablePtr context) settings[Setting::allow_introspection_functions] = true; settings[Setting::storage_file_read_method] = LocalFSReadMethod::mmap; + settings[Setting::implicit_select] = true; context->setSettings(settings); } @@ -257,12 +258,12 @@ static DatabasePtr createMemoryDatabaseIfNotExists(ContextPtr context, const Str return system_database; } -static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context_) +static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context) { - auto databaseCombiner = std::make_shared(name_, context_); - databaseCombiner->registerNextDatabase(std::make_shared(name_, "", context_)); - databaseCombiner->registerNextDatabase(std::make_shared(name_, context_)); - return databaseCombiner; + auto overlay = std::make_shared(name_, context); + overlay->registerNextDatabase(std::make_shared(name_, fs::weakly_canonical(context->getPath()), UUIDHelpers::generateV4(), context)); + overlay->registerNextDatabase(std::make_shared(name_, "", context)); + return overlay; } /// If path is specified and not empty, will try to setup server environment and load existing metadata @@ -615,12 +616,14 @@ catch (const DB::Exception & e) { bool need_print_stack_trace = getClientConfiguration().getBool("stacktrace", false); std::cerr << getExceptionMessage(e, need_print_stack_trace, true) << std::endl; - return e.code() ? e.code() : -1; + auto code = DB::getCurrentExceptionCode(); + return static_cast(code) ? code : 1; } catch (...) { - std::cerr << getCurrentExceptionMessage(false) << std::endl; - return getCurrentExceptionCode(); + std::cerr << DB::getCurrentExceptionMessage(true) << '\n'; + auto code = DB::getCurrentExceptionCode(); + return static_cast(code) ? code : 1; } void LocalServer::updateLoggerLevel(const String & logs_level) @@ -809,7 +812,12 @@ void LocalServer::processConfig() DatabaseCatalog::instance().initializeAndLoadTemporaryDatabase(); std::string default_database = server_settings[ServerSetting::default_database]; - DatabaseCatalog::instance().attachDatabase(default_database, createClickHouseLocalDatabaseOverlay(default_database, global_context)); + { + DatabasePtr database = createClickHouseLocalDatabaseOverlay(default_database, global_context); + if (UUID uuid = database->getUUID(); uuid != UUIDHelpers::Nil) + DatabaseCatalog::instance().addUUIDMapping(uuid); + DatabaseCatalog::instance().attachDatabase(default_database, database); + } global_context->setCurrentDatabase(default_database); if (getClientConfiguration().has("path")) @@ -1029,7 +1037,7 @@ int mainEntryClickHouseLocal(int argc, char ** argv) { std::cerr << DB::getExceptionMessage(e, false) << std::endl; auto code = DB::getCurrentExceptionCode(); - return code ? code : 1; + return static_cast(code) ? code : 1; } catch (const boost::program_options::error & e) { @@ -1040,6 +1048,6 @@ int mainEntryClickHouseLocal(int argc, char ** argv) { std::cerr << DB::getCurrentExceptionMessage(true) << '\n'; auto code = DB::getCurrentExceptionCode(); - return code ? code : 1; + return static_cast(code) ? code : 1; } } diff --git a/programs/main.cpp b/programs/main.cpp index 02ea1471108..d15c20867d1 100644 --- a/programs/main.cpp +++ b/programs/main.cpp @@ -1,27 +1,22 @@ -#include -#include +#include +#include +#include +#include -#include -#include -#include -#include -#include -#include /// pair - -#include +#if defined(SANITIZE_COVERAGE) +# include +#endif #include "config.h" #include "config_tools.h" -#include -#include -#include -#include -#include - -#include -#include - +#include +#include +#include +#include +#include +#include /// pair +#include /// Universal executable for various clickhouse applications int mainEntryClickHouseServer(int argc, char ** argv); @@ -238,9 +233,12 @@ int main(int argc_, char ** argv_) /// clickhouse # spawn local /// clickhouse local # spawn local /// clickhouse "select ..." # spawn local + /// clickhouse /tmp/repro --enable-analyzer /// - if (main_func == printHelp && !argv.empty() && (argv.size() == 1 || argv[1][0] == '-' - || std::string_view(argv[1]).contains(' '))) + std::error_code ec; + if (main_func == printHelp && !argv.empty() + && (argv.size() == 1 || argv[1][0] == '-' || std::string_view(argv[1]).contains(' ') + || std::filesystem::is_regular_file(std::filesystem::path{argv[1]}, ec))) { main_func = mainEntryClickHouseLocal; } diff --git a/programs/obfuscator/Obfuscator.cpp b/programs/obfuscator/Obfuscator.cpp index 324a4573b24..6bd3865b591 100644 --- a/programs/obfuscator/Obfuscator.cpp +++ b/programs/obfuscator/Obfuscator.cpp @@ -1480,5 +1480,5 @@ catch (...) { std::cerr << DB::getCurrentExceptionMessage(true) << "\n"; auto code = DB::getCurrentExceptionCode(); - return code ? code : 1; + return static_cast(code) ? code : 1; } diff --git a/programs/odbc-bridge/ODBCBridge.cpp b/programs/odbc-bridge/ODBCBridge.cpp index 096d1b2dcca..e5ae3272d40 100644 --- a/programs/odbc-bridge/ODBCBridge.cpp +++ b/programs/odbc-bridge/ODBCBridge.cpp @@ -13,7 +13,7 @@ int mainEntryClickHouseODBCBridge(int argc, char ** argv) { std::cerr << DB::getCurrentExceptionMessage(true) << "\n"; auto code = DB::getCurrentExceptionCode(); - return code ? code : 1; + return static_cast(code) ? code : 1; } } diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 5159f95419e..68f262079ff 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -343,7 +343,7 @@ int mainEntryClickHouseServer(int argc, char ** argv) { std::cerr << DB::getCurrentExceptionMessage(true) << "\n"; auto code = DB::getCurrentExceptionCode(); - return code ? code : 1; + return static_cast(code) ? code : 1; } } @@ -2537,7 +2537,7 @@ catch (...) /// Poco does not provide stacktrace. tryLogCurrentException("Application"); auto code = getCurrentExceptionCode(); - return code ? code : -1; + return static_cast(code) ? code : -1; } std::unique_ptr Server::buildProtocolStackFromConfig( diff --git a/programs/su/su.cpp b/programs/su/su.cpp index 33d929898f4..40242d0687f 100644 --- a/programs/su/su.cpp +++ b/programs/su/su.cpp @@ -59,7 +59,13 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid) throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getgrnam_r' to obtain gid from group name ({})", arg_gid); if (!result) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Group {} is not found in the system", arg_gid); + { + if (0 != getgrgid_r(gid, &entry, buf.get(), buf_size, &result)) + throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getgrnam_r' to obtain gid from group name ({})", arg_gid); + + if (!result) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Group {} is not found in the system", arg_gid); + } gid = entry.gr_gid; } @@ -84,7 +90,13 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid) throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getpwnam_r' to obtain uid from user name ({})", arg_uid); if (!result) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "User {} is not found in the system", arg_uid); + { + if (0 != getpwuid_r(uid, &entry, buf.get(), buf_size, &result)) + throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getpwuid_r' to obtain uid from user name ({})", uid); + + if (!result) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "User {} is not found in the system", arg_uid); + } uid = entry.pw_uid; } diff --git a/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp b/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp index 5819c533fd9..ad1fecac784 100644 --- a/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp +++ b/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp @@ -22,6 +22,13 @@ namespace ErrorCodes namespace { +/** Due to a lack of proper code review, this code was contributed with a multiplication of template instantiations + * over all pairs of data types, and we deeply regret that. + * + * We cannot remove all combinations, because the binary representation of serialized data has to remain the same, + * but we can partially heal the wound by treating unsigned and signed data types in the same way. + */ + template struct AggregationFunctionDeltaSumTimestampData { @@ -37,23 +44,22 @@ template class AggregationFunctionDeltaSumTimestamp final : public IAggregateFunctionDataHelper< AggregationFunctionDeltaSumTimestampData, - AggregationFunctionDeltaSumTimestamp - > + AggregationFunctionDeltaSumTimestamp> { public: AggregationFunctionDeltaSumTimestamp(const DataTypes & arguments, const Array & params) : IAggregateFunctionDataHelper< AggregationFunctionDeltaSumTimestampData, - AggregationFunctionDeltaSumTimestamp - >{arguments, params, createResultType()} - {} + AggregationFunctionDeltaSumTimestamp>{arguments, params, createResultType()} + { + } AggregationFunctionDeltaSumTimestamp() : IAggregateFunctionDataHelper< AggregationFunctionDeltaSumTimestampData, - AggregationFunctionDeltaSumTimestamp - >{} - {} + AggregationFunctionDeltaSumTimestamp>{} + { + } bool allocatesMemoryInArena() const override { return false; } @@ -63,8 +69,8 @@ public: void NO_SANITIZE_UNDEFINED ALWAYS_INLINE add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { - auto value = assert_cast &>(*columns[0]).getData()[row_num]; - auto ts = assert_cast &>(*columns[1]).getData()[row_num]; + auto value = unalignedLoad(columns[0]->getRawData().data() + row_num * sizeof(ValueType)); + auto ts = unalignedLoad(columns[1]->getRawData().data() + row_num * sizeof(TimestampType)); auto & data = this->data(place); @@ -172,10 +178,48 @@ public: void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override { - assert_cast &>(to).getData().push_back(this->data(place).sum); + static_cast(to).template insertRawData( + reinterpret_cast(&this->data(place).sum)); } }; + +template class AggregateFunctionTemplate, typename... TArgs> +IAggregateFunction * createWithTwoTypesSecond(const IDataType & second_type, TArgs && ... args) +{ + WhichDataType which(second_type); + + if (which.idx == TypeIndex::UInt32) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::UInt64) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Int32) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Int64) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Float32) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Float64) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Date) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::DateTime) return new AggregateFunctionTemplate(args...); + + return nullptr; +} + +template