Merge branch '24.3' into backport/24.3/64517

This commit is contained in:
Nikolai Kochetov 2024-11-13 15:48:07 +01:00
commit d1e0feccb1
382 changed files with 6039 additions and 2351 deletions

View File

@ -4,6 +4,5 @@ self-hosted-runner:
- func-tester
- func-tester-aarch64
- fuzzer-unit-tester
- stress-tester
- style-checker
- style-checker-aarch64

34
.github/actions/debug/action.yml vendored Normal file
View File

@ -0,0 +1,34 @@
name: DebugInfo
description: Prints workflow debug info
runs:
using: "composite"
steps:
- name: Envs, event.json and contexts
shell: bash
run: |
echo '::group::Environment variables'
env | sort
echo '::endgroup::'
echo '::group::event.json'
python3 -m json.tool "$GITHUB_EVENT_PATH"
echo '::endgroup::'
cat << 'EOF'
::group::github context
${{ toJSON(github) }}
::endgroup::
::group::env context
${{ toJSON(env) }}
::endgroup::
::group::runner context
${{ toJSON(runner) }}
::endgroup::
::group::job context
${{ toJSON(job) }}
::endgroup::
EOF

View File

@ -21,6 +21,8 @@ jobs:
clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version
filter: tree:0
- name: Debug Info
uses: ./.github/actions/debug
- name: Labels check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
@ -234,18 +236,26 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (tsan)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
#############################################################################################
############################# INTEGRATION TESTS #############################################
#############################################################################################
IntegrationTestsRelease:
needs: [RunConfig, BuilderDebRelease]
IntegrationTestsAsanOldAnalyzer:
needs: [RunConfig, BuilderDebAsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (release)
runner_type: stress-tester
test_name: Integration tests (asan, old analyzer)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsTsan:
needs: [RunConfig, BuilderDebTsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (tsan)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
FinishCheck:
if: ${{ !failure() && !cancelled() }}
@ -255,7 +265,8 @@ jobs:
- FunctionalStatelessTestAsan
- FunctionalStatefulTestDebug
- StressTestTsan
- IntegrationTestsRelease
- IntegrationTestsTsan
- IntegrationTestsAsanOldAnalyzer
- CompatibilityCheckX86
- CompatibilityCheckAarch64
runs-on: [self-hosted, style-checker]

View File

@ -33,6 +33,8 @@ jobs:
clear-repository: true
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
fetch-depth: 0
- name: Debug Info
uses: ./.github/actions/debug
- name: Cherry pick
run: |
cd "$GITHUB_WORKSPACE/tests/ci"

View File

@ -15,14 +15,14 @@ jobs:
outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }}
steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version
filter: tree:0
- name: Debug Info
uses: ./.github/actions/debug
- name: Python unit tests
run: |
cd "$GITHUB_WORKSPACE/tests/ci"

View File

@ -24,6 +24,8 @@ jobs:
clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version
filter: tree:0
- name: Debug Info
uses: ./.github/actions/debug
- name: PrepareRunConfig
id: runconfig
run: |

View File

@ -21,14 +21,14 @@ jobs:
outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }}
steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version
filter: tree:0
- name: Debug Info
uses: ./.github/actions/debug
- name: Labels check
if: ${{ github.event_name != 'merge_group' }}
run: |

View File

@ -24,6 +24,8 @@ jobs:
clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version
filter: tree:0
- name: Debug Info
uses: ./.github/actions/debug
- name: Labels check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
@ -134,6 +136,7 @@ jobs:
with:
build_name: package_debug
data: ${{ needs.RunConfig.outputs.data }}
force: true
BuilderBinDarwin:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
@ -386,7 +389,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (asan)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
StressTestTsan:
needs: [RunConfig, BuilderDebTsan]
@ -394,7 +397,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (tsan)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
StressTestMsan:
needs: [RunConfig, BuilderDebMsan]
@ -402,7 +405,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (msan)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
StressTestUBsan:
needs: [RunConfig, BuilderDebUBsan]
@ -410,7 +413,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (ubsan)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
StressTestDebug:
needs: [RunConfig, BuilderDebDebug]
@ -418,7 +421,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (debug)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
#############################################################################################
############################# INTEGRATION TESTS #############################################
@ -429,7 +432,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (asan)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsAnalyzerAsan:
needs: [RunConfig, BuilderDebAsan]
@ -437,7 +440,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (asan, old analyzer)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsTsan:
needs: [RunConfig, BuilderDebTsan]
@ -445,7 +448,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (tsan)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsRelease:
needs: [RunConfig, BuilderDebRelease]
@ -453,7 +456,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (release)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
FinishCheck:
if: ${{ !failure() && !cancelled() }}

View File

@ -11,6 +11,7 @@ name: Build docker images
required: false
type: boolean
default: false
jobs:
DockerBuildAarch64:
runs-on: [self-hosted, style-checker-aarch64]

View File

@ -62,8 +62,6 @@ jobs:
env:
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}
steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
@ -72,6 +70,8 @@ jobs:
submodules: ${{inputs.submodules}}
fetch-depth: ${{inputs.checkout_depth}}
filter: tree:0
- name: Debug Info
uses: ./.github/actions/debug
- name: Set build envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'

3
.gitmodules vendored
View File

@ -369,3 +369,6 @@
[submodule "contrib/idna"]
path = contrib/idna
url = https://github.com/ada-url/idna.git
[submodule "contrib/rust_vendor"]
path = contrib/rust_vendor
url = https://github.com/ClickHouse/rust_vendor.git

View File

@ -367,11 +367,15 @@ namespace PackedZeroTraits
{
template <typename Second, template <typename, typename> class PackedPairNoInit>
inline bool check(const PackedPairNoInit<StringRef, Second> p)
{ return 0 == p.key.size; }
{
return 0 == p.key.size;
}
template <typename Second, template <typename, typename> class PackedPairNoInit>
inline void set(PackedPairNoInit<StringRef, Second> & p)
{ p.key.size = 0; }
{
p.key.size = 0;
}
}

View File

@ -2,11 +2,11 @@
# NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION,
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
SET(VERSION_REVISION 54484)
SET(VERSION_REVISION 54492)
SET(VERSION_MAJOR 24)
SET(VERSION_MINOR 3)
SET(VERSION_PATCH 6)
SET(VERSION_GITHASH fe54cead6b6eaa09f22cee3ceb9f607eecadc859)
SET(VERSION_DESCRIBE v24.3.6.1-lts)
SET(VERSION_STRING 24.3.6.1)
SET(VERSION_PATCH 14)
SET(VERSION_GITHASH 7acabd773896fb8c2ab383cd005e3e77ebb556c3)
SET(VERSION_DESCRIBE v24.3.14.1-lts)
SET(VERSION_STRING 24.3.14.1)
# end of autochange

2
contrib/corrosion vendored

@ -1 +1 @@
Subproject commit d9dfdefaa3d9ec4ba1245c7070727359c65c7869
Subproject commit d5bdbfacb4d2c013f7bebabc6c95a118dc1e9fe1

1
contrib/rust_vendor vendored Submodule

@ -0,0 +1 @@
Subproject commit 08e82ca6543683abe4770305ad811a942186a520

2
contrib/sysroot vendored

@ -1 +1 @@
Subproject commit b5fcabb24d28fc33024291b2c6c1abd807c7dba8
Subproject commit 39c4713334f9f156dbf508f548d510d9129a657c

View File

@ -1,21 +1,31 @@
#!/bin/bash
set +x
set -eo pipefail
shopt -s nullglob
DO_CHOWN=1
if [ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]; then
if [[ "${CLICKHOUSE_RUN_AS_ROOT:=0}" = "1" || "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]]; then
DO_CHOWN=0
fi
CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
# CLICKHOUSE_UID and CLICKHOUSE_GID are kept for backward compatibility, but deprecated
# One must use either "docker run --user" or CLICKHOUSE_RUN_AS_ROOT=1 to run the process as
# FIXME: Remove ALL CLICKHOUSE_UID CLICKHOUSE_GID before 25.3
if [[ "${CLICKHOUSE_UID:-}" || "${CLICKHOUSE_GID:-}" ]]; then
echo 'WARNING: Support for CLICKHOUSE_UID/CLICKHOUSE_GID will be removed in a couple of releases.' >&2
echo 'WARNING: Either use a proper "docker run --user=xxx:xxxx" argument instead of CLICKHOUSE_UID/CLICKHOUSE_GID' >&2
echo 'WARNING: or set "CLICKHOUSE_RUN_AS_ROOT=1" ENV to run the clickhouse-server as root:root' >&2
fi
# support --user
if [ "$(id -u)" = "0" ]; then
USER=$CLICKHOUSE_UID
GROUP=$CLICKHOUSE_GID
# support `docker run --user=xxx:xxxx`
if [[ "$(id -u)" = "0" ]]; then
if [[ "$CLICKHOUSE_RUN_AS_ROOT" = 1 ]]; then
USER=0
GROUP=0
else
USER="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
GROUP="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
fi
if command -v gosu &> /dev/null; then
gosu="gosu $USER:$GROUP"
elif command -v su-exec &> /dev/null; then
@ -84,11 +94,11 @@ if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
# There is a config file. It is already tested with gosu (if it is readably by keeper user)
if [ -f "$KEEPER_CONFIG" ]; then
exec $gosu /usr/bin/clickhouse-keeper --config-file="$KEEPER_CONFIG" --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@"
exec $gosu clickhouse-keeper --config-file="$KEEPER_CONFIG" --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@"
fi
# There is no config file. Will use embedded one
exec $gosu /usr/bin/clickhouse-keeper --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@"
exec $gosu clickhouse-keeper --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@"
fi
# Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image

View File

@ -13,8 +13,8 @@ ENV CARGO_HOME=/rust/cargo
ENV PATH="/rust/cargo/bin:${PATH}"
RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \
chmod 777 -R /rust && \
rustup toolchain install nightly-2023-07-04 && \
rustup default nightly-2023-07-04 && \
rustup toolchain install nightly-2024-04-01 && \
rustup default nightly-2024-04-01 && \
rustup toolchain remove stable && \
rustup component add rust-src && \
rustup target add x86_64-unknown-linux-gnu && \

View File

@ -88,34 +88,34 @@ RUN if [ -n "${single_binary_location_url}" ]; then \
#docker-official-library:on
# A fallback to installation from ClickHouse repository
RUN if ! clickhouse local -q "SELECT ''" > /dev/null 2>&1; then \
apt-get update \
&& apt-get install --yes --no-install-recommends \
apt-transport-https \
dirmngr \
gnupg2 \
&& mkdir -p /etc/apt/sources.list.d \
&& GNUPGHOME=$(mktemp -d) \
&& GNUPGHOME="$GNUPGHOME" gpg --batch --no-default-keyring \
--keyring /usr/share/keyrings/clickhouse-keyring.gpg \
--keyserver hkp://keyserver.ubuntu.com:80 \
--recv-keys 3a9ea1193a97b548be1457d48919f6bd2b48d754 \
&& rm -rf "$GNUPGHOME" \
&& chmod +r /usr/share/keyrings/clickhouse-keyring.gpg \
&& echo "${REPOSITORY}" > /etc/apt/sources.list.d/clickhouse.list \
&& echo "installing from repository: ${REPOSITORY}" \
&& apt-get update \
&& for package in ${PACKAGES}; do \
packages="${packages} ${package}=${VERSION}" \
; done \
&& apt-get install --allow-unauthenticated --yes --no-install-recommends ${packages} || exit 1 \
&& rm -rf \
/var/lib/apt/lists/* \
/var/cache/debconf \
/tmp/* \
&& apt-get autoremove --purge -yq libksba8 \
&& apt-get autoremove -yq \
; fi
# It works unless the clickhouse binary already exists
RUN clickhouse local -q 'SELECT 1' >/dev/null 2>&1 && exit 0 || : \
; apt-get update \
&& apt-get install --yes --no-install-recommends \
dirmngr \
gnupg2 \
&& mkdir -p /etc/apt/sources.list.d \
&& GNUPGHOME=$(mktemp -d) \
&& GNUPGHOME="$GNUPGHOME" gpg --batch --no-default-keyring \
--keyring /usr/share/keyrings/clickhouse-keyring.gpg \
--keyserver hkp://keyserver.ubuntu.com:80 \
--recv-keys 3a9ea1193a97b548be1457d48919f6bd2b48d754 \
&& rm -rf "$GNUPGHOME" \
&& chmod +r /usr/share/keyrings/clickhouse-keyring.gpg \
&& echo "${REPOSITORY}" > /etc/apt/sources.list.d/clickhouse.list \
&& echo "installing from repository: ${REPOSITORY}" \
&& apt-get update \
&& for package in ${PACKAGES}; do \
packages="${packages} ${package}=${VERSION}" \
; done \
&& apt-get install --yes --no-install-recommends ${packages} || exit 1 \
&& rm -rf \
/var/lib/apt/lists/* \
/var/cache/debconf \
/tmp/* \
&& apt-get autoremove --purge -yq dirmngr gnupg2 \
&& chmod ugo+Xrw -R /etc/clickhouse-server /etc/clickhouse-client
# The last chmod is here to make the next one is No-op in docker official library Dockerfile
# post install
# we need to allow "others" access to clickhouse folder, because docker container
@ -126,8 +126,6 @@ RUN clickhouse-local -q 'SELECT * FROM system.build_options' \
RUN locale-gen en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US:en
ENV LC_ALL en_US.UTF-8
ENV TZ UTC
RUN mkdir /docker-entrypoint-initdb.d

View File

@ -1,3 +1,11 @@
<!---
The README.md is generated by README.sh from the following sources:
- README.src/content.md
- README.src/license.md
If you want to change it, edit these files
-->
# ClickHouse Server Docker Image
## What is ClickHouse?
@ -8,6 +16,7 @@ ClickHouse works 100-1000x faster than traditional database management systems,
For more information and documentation see https://clickhouse.com/.
<!-- This is not related to the docker official library, remove it before commit to https://github.com/docker-library/docs -->
## Versions
- The `latest` tag points to the latest release of the latest stable branch.
@ -16,10 +25,12 @@ For more information and documentation see https://clickhouse.com/.
- The tag `head` is built from the latest commit to the default branch.
- Each tag has optional `-alpine` suffix to reflect that it's built on top of `alpine`.
<!-- REMOVE UNTIL HERE -->
### Compatibility
- The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3.
- The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A) and additionally the Load-Acquire RCpc register. The register is optional in version ARMv8.2-A and mandatory in [ARMv8.3-A](https://en.wikipedia.org/wiki/AArch64#ARMv8.3-A). Supported in Graviton >=2, Azure and GCP instances. Examples for unsupported devices are Raspberry Pi 4 (ARMv8.0-A) and Jetson AGX Xavier/Orin (ARMv8.2-A).
- Since the Clickhouse 24.11 Ubuntu images started using `ubuntu:22.04` as its base image. It requires docker version >= `20.10.10` containing [patch](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468). As a workaround you could use `docker run --security-opt seccomp=unconfined` instead, however that has security implications.
## How to use this image
@ -29,7 +40,7 @@ For more information and documentation see https://clickhouse.com/.
docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
```
By default, ClickHouse will be accessible only via the Docker network. See the [networking section below](#networking).
By default, ClickHouse will be accessible only via the Docker network. See the **networking** section below.
By default, starting above server instance will be run as the `default` user without password.
@ -46,7 +57,7 @@ More information about the [ClickHouse client](https://clickhouse.com/docs/en/in
### connect to it using curl
```bash
echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server curlimages/curl 'http://clickhouse-server:8123/?query=' -s --data-binary @-
echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server buildpack-deps:curl curl 'http://clickhouse-server:8123/?query=' -s --data-binary @-
```
More information about the [ClickHouse HTTP Interface](https://clickhouse.com/docs/en/interfaces/http/).
@ -69,7 +80,7 @@ echo 'SELECT version()' | curl 'http://localhost:18123/' --data-binary @-
`22.6.3.35`
or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance):
Or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance):
```bash
docker run -d --network=host --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
@ -87,8 +98,8 @@ Typically you may want to mount the following folders inside your container to a
```bash
docker run -d \
-v $(realpath ./ch_data):/var/lib/clickhouse/ \
-v $(realpath ./ch_logs):/var/log/clickhouse-server/ \
-v "$PWD/ch_data:/var/lib/clickhouse/" \
-v "$PWD/ch_logs:/var/log/clickhouse-server/" \
--name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
```
@ -110,6 +121,8 @@ docker run -d \
--name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
```
Read more in [knowledge base](https://clickhouse.com/docs/knowledgebase/configure_cap_ipc_lock_and_cap_sys_nice_in_docker).
## Configuration
The container exposes port 8123 for the [HTTP interface](https://clickhouse.com/docs/en/interfaces/http_interface/) and port 9000 for the [native client](https://clickhouse.com/docs/en/interfaces/tcp/).
@ -125,8 +138,8 @@ docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 -v /pa
### Start server as custom user
```bash
# $(pwd)/data/clickhouse should exist and be owned by current user
docker run --rm --user ${UID}:${GID} --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$(pwd)/logs/clickhouse:/var/log/clickhouse-server" -v "$(pwd)/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server
# $PWD/data/clickhouse should exist and be owned by current user
docker run --rm --user "${UID}:${GID}" --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server
```
When you use the image with local directories mounted, you probably want to specify the user to maintain the proper file ownership. Use the `--user` argument and mount `/var/lib/clickhouse` and `/var/log/clickhouse-server` inside the container. Otherwise, the image will complain and not start.
@ -134,7 +147,7 @@ When you use the image with local directories mounted, you probably want to spec
### Start server from root (useful in case of enabled user namespace)
```bash
docker run --rm -e CLICKHOUSE_UID=0 -e CLICKHOUSE_GID=0 --name clickhouse-server-userns -v "$(pwd)/logs/clickhouse:/var/log/clickhouse-server" -v "$(pwd)/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server
docker run --rm -e CLICKHOUSE_RUN_AS_ROOT=1 --name clickhouse-server-userns -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server
```
### How to create default database and user on starting

38
docker/server/README.sh Executable file
View File

@ -0,0 +1,38 @@
#!/usr/bin/env bash
set -ueo pipefail
# A script to generate README.sh close to as it done in https://github.com/docker-library/docs
WORKDIR=$(dirname "$0")
SCRIPT_NAME=$(basename "$0")
CONTENT=README.src/content.md
LICENSE=README.src/license.md
cd "$WORKDIR"
R=README.md
cat > "$R" <<EOD
<!---
The $R is generated by $SCRIPT_NAME from the following sources:
- $CONTENT
- $LICENSE
If you want to change it, edit these files
-->
EOD
cat "$CONTENT" >> "$R"
cat >> "$R" <<EOD
## License
$(cat $LICENSE)
EOD
# Remove %%LOGO%% from the file with one line below
sed -i '/^%%LOGO%%/,+1d' "$R"
# Replace each %%IMAGE%% with our `clickhouse/clickhouse-server`
sed -i '/%%IMAGE%%/s:%%IMAGE%%:clickhouse/clickhouse-server:g' $R

View File

@ -0,0 +1 @@
ClickHouse is the fastest and most resource efficient OSS database for real-time apps and analytics.

View File

@ -0,0 +1,170 @@
# ClickHouse Server Docker Image
## What is ClickHouse?
%%LOGO%%
ClickHouse is an open-source column-oriented DBMS (columnar database management system) for online analytical processing (OLAP) that allows users to generate analytical reports using SQL queries in real-time.
ClickHouse works 100-1000x faster than traditional database management systems, and processes hundreds of millions to over a billion rows and tens of gigabytes of data per server per second. With a widespread user base around the globe, the technology has received praise for its reliability, ease of use, and fault tolerance.
For more information and documentation see https://clickhouse.com/.
<!-- This is not related to the docker official library, remove it before commit to https://github.com/docker-library/docs -->
## Versions
- The `latest` tag points to the latest release of the latest stable branch.
- Branch tags like `22.2` point to the latest release of the corresponding branch.
- Full version tags like `22.2.3.5` point to the corresponding release.
- The tag `head` is built from the latest commit to the default branch.
- Each tag has optional `-alpine` suffix to reflect that it's built on top of `alpine`.
<!-- REMOVE UNTIL HERE -->
### Compatibility
- The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3.
- The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A) and additionally the Load-Acquire RCpc register. The register is optional in version ARMv8.2-A and mandatory in [ARMv8.3-A](https://en.wikipedia.org/wiki/AArch64#ARMv8.3-A). Supported in Graviton >=2, Azure and GCP instances. Examples for unsupported devices are Raspberry Pi 4 (ARMv8.0-A) and Jetson AGX Xavier/Orin (ARMv8.2-A).
- Since the Clickhouse 24.11 Ubuntu images started using `ubuntu:22.04` as its base image. It requires docker version >= `20.10.10` containing [patch](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468). As a workaround you could use `docker run --security-opt seccomp=unconfined` instead, however that has security implications.
## How to use this image
### start server instance
```bash
docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
```
By default, ClickHouse will be accessible only via the Docker network. See the **networking** section below.
By default, starting above server instance will be run as the `default` user without password.
### connect to it from a native client
```bash
docker run -it --rm --link some-clickhouse-server:clickhouse-server --entrypoint clickhouse-client %%IMAGE%% --host clickhouse-server
# OR
docker exec -it some-clickhouse-server clickhouse-client
```
More information about the [ClickHouse client](https://clickhouse.com/docs/en/interfaces/cli/).
### connect to it using curl
```bash
echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server buildpack-deps:curl curl 'http://clickhouse-server:8123/?query=' -s --data-binary @-
```
More information about the [ClickHouse HTTP Interface](https://clickhouse.com/docs/en/interfaces/http/).
### stopping / removing the container
```bash
docker stop some-clickhouse-server
docker rm some-clickhouse-server
```
### networking
You can expose your ClickHouse running in docker by [mapping a particular port](https://docs.docker.com/config/containers/container-networking/) from inside the container using host ports:
```bash
docker run -d -p 18123:8123 -p19000:9000 --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
echo 'SELECT version()' | curl 'http://localhost:18123/' --data-binary @-
```
`22.6.3.35`
Or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance):
```bash
docker run -d --network=host --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
echo 'SELECT version()' | curl 'http://localhost:8123/' --data-binary @-
```
`22.6.3.35`
### Volumes
Typically you may want to mount the following folders inside your container to achieve persistency:
- `/var/lib/clickhouse/` - main folder where ClickHouse stores the data
- `/var/log/clickhouse-server/` - logs
```bash
docker run -d \
-v "$PWD/ch_data:/var/lib/clickhouse/" \
-v "$PWD/ch_logs:/var/log/clickhouse-server/" \
--name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
```
You may also want to mount:
- `/etc/clickhouse-server/config.d/*.xml` - files with server configuration adjustments
- `/etc/clickhouse-server/users.d/*.xml` - files with user settings adjustments
- `/docker-entrypoint-initdb.d/` - folder with database initialization scripts (see below).
### Linux capabilities
ClickHouse has some advanced functionality, which requires enabling several [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html).
They are optional and can be enabled using the following [docker command-line arguments](https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities):
```bash
docker run -d \
--cap-add=SYS_NICE --cap-add=NET_ADMIN --cap-add=IPC_LOCK \
--name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
```
Read more in [knowledge base](https://clickhouse.com/docs/knowledgebase/configure_cap_ipc_lock_and_cap_sys_nice_in_docker).
## Configuration
The container exposes port 8123 for the [HTTP interface](https://clickhouse.com/docs/en/interfaces/http_interface/) and port 9000 for the [native client](https://clickhouse.com/docs/en/interfaces/tcp/).
ClickHouse configuration is represented with a file "config.xml" ([documentation](https://clickhouse.com/docs/en/operations/configuration_files/))
### Start server instance with custom configuration
```bash
docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 -v /path/to/your/config.xml:/etc/clickhouse-server/config.xml %%IMAGE%%
```
### Start server as custom user
```bash
# $PWD/data/clickhouse should exist and be owned by current user
docker run --rm --user "${UID}:${GID}" --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" %%IMAGE%%
```
When you use the image with local directories mounted, you probably want to specify the user to maintain the proper file ownership. Use the `--user` argument and mount `/var/lib/clickhouse` and `/var/log/clickhouse-server` inside the container. Otherwise, the image will complain and not start.
### Start server from root (useful in case of enabled user namespace)
```bash
docker run --rm -e CLICKHOUSE_RUN_AS_ROOT=1 --name clickhouse-server-userns -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" %%IMAGE%%
```
### How to create default database and user on starting
Sometimes you may want to create a user (user named `default` is used by default) and database on a container start. You can do it using environment variables `CLICKHOUSE_DB`, `CLICKHOUSE_USER`, `CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT` and `CLICKHOUSE_PASSWORD`:
```bash
docker run --rm -e CLICKHOUSE_DB=my_database -e CLICKHOUSE_USER=username -e CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT=1 -e CLICKHOUSE_PASSWORD=password -p 9000:9000/tcp %%IMAGE%%
```
## How to extend this image
To perform additional initialization in an image derived from this one, add one or more `*.sql`, `*.sql.gz`, or `*.sh` scripts under `/docker-entrypoint-initdb.d`. After the entrypoint calls `initdb`, it will run any `*.sql` files, run any executable `*.sh` scripts, and source any non-executable `*.sh` scripts found in that directory to do further initialization before starting the service.
Also, you can provide environment variables `CLICKHOUSE_USER` & `CLICKHOUSE_PASSWORD` that will be used for clickhouse-client during initialization.
For example, to add an additional user and database, add the following to `/docker-entrypoint-initdb.d/init-db.sh`:
```bash
#!/bin/bash
set -e
clickhouse client -n <<-EOSQL
CREATE DATABASE docker;
CREATE TABLE docker.docker (x Int32) ENGINE = Log;
EOSQL
```

View File

@ -0,0 +1 @@
https://github.com/ClickHouse/ClickHouse

View File

@ -0,0 +1 @@
View [license information](https://github.com/ClickHouse/ClickHouse/blob/master/LICENSE) for the software contained in this image.

View File

@ -0,0 +1,43 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 616 616">
<defs>
<style>
.cls-1 {
clip-path: url(#clippath);
}
.cls-2 {
fill: none;
}
.cls-2, .cls-3, .cls-4 {
stroke-width: 0px;
}
.cls-3 {
fill: #1e1e1e;
}
.cls-4 {
fill: #faff69;
}
</style>
<clipPath id="clippath">
<rect class="cls-2" x="83.23" y="71.73" width="472.55" height="472.55"/>
</clipPath>
</defs>
<g id="Layer_2" data-name="Layer 2">
<rect class="cls-4" width="616" height="616"/>
</g>
<g id="Layer_1" data-name="Layer 1">
<g class="cls-1">
<g>
<path class="cls-3" d="m120.14,113.3c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.66,2.09,4.66,4.66v389.38c0,2.57-2.09,4.66-4.66,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66V113.3Z"/>
<path class="cls-3" d="m208.75,113.3c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.66,2.09,4.66,4.66v389.38c0,2.57-2.09,4.66-4.66,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66V113.3Z"/>
<path class="cls-3" d="m297.35,113.3c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.66,2.09,4.66,4.66v389.38c0,2.57-2.09,4.66-4.66,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66V113.3Z"/>
<path class="cls-3" d="m385.94,113.3c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.66,2.09,4.66,4.66v389.38c0,2.57-2.09,4.66-4.66,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66V113.3Z"/>
<path class="cls-3" d="m474.56,268.36c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.65,2.09,4.65,4.66v79.28c0,2.57-2.09,4.66-4.65,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66v-79.28Z"/>
</g>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.7 KiB

View File

@ -0,0 +1 @@
[ClickHouse Inc.](%%GITHUB-REPO%%)

View File

@ -0,0 +1,7 @@
{
"hub": {
"categories": [
"databases-and-storage"
]
}
}

View File

@ -4,17 +4,28 @@ set -eo pipefail
shopt -s nullglob
DO_CHOWN=1
if [ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]; then
if [[ "${CLICKHOUSE_RUN_AS_ROOT:=0}" = "1" || "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]]; then
DO_CHOWN=0
fi
CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
# CLICKHOUSE_UID and CLICKHOUSE_GID are kept for backward compatibility, but deprecated
# One must use either "docker run --user" or CLICKHOUSE_RUN_AS_ROOT=1 to run the process as
# FIXME: Remove ALL CLICKHOUSE_UID CLICKHOUSE_GID before 25.3
if [[ "${CLICKHOUSE_UID:-}" || "${CLICKHOUSE_GID:-}" ]]; then
echo 'WARNING: Support for CLICKHOUSE_UID/CLICKHOUSE_GID will be removed in a couple of releases.' >&2
echo 'WARNING: Either use a proper "docker run --user=xxx:xxxx" argument instead of CLICKHOUSE_UID/CLICKHOUSE_GID' >&2
echo 'WARNING: or set "CLICKHOUSE_RUN_AS_ROOT=1" ENV to run the clickhouse-server as root:root' >&2
fi
# support --user
if [ "$(id -u)" = "0" ]; then
USER=$CLICKHOUSE_UID
GROUP=$CLICKHOUSE_GID
# support `docker run --user=xxx:xxxx`
if [[ "$(id -u)" = "0" ]]; then
if [[ "$CLICKHOUSE_RUN_AS_ROOT" = 1 ]]; then
USER=0
GROUP=0
else
USER="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
GROUP="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
fi
else
USER="$(id -u)"
GROUP="$(id -g)"
@ -55,14 +66,14 @@ function create_directory_and_do_chown() {
[ -z "$dir" ] && return
# ensure directories exist
if [ "$DO_CHOWN" = "1" ]; then
mkdir="mkdir"
mkdir=( mkdir )
else
# if DO_CHOWN=0 it means that the system does not map root user to "admin" permissions
# it mainly happens on NFS mounts where root==nobody for security reasons
# thus mkdir MUST run with user id/gid and not from nobody that has zero permissions
mkdir="/usr/bin/clickhouse su "${USER}:${GROUP}" mkdir"
mkdir=( clickhouse su "${USER}:${GROUP}" mkdir )
fi
if ! $mkdir -p "$dir"; then
if ! "${mkdir[@]}" -p "$dir"; then
echo "Couldn't create necessary directory: $dir"
exit 1
fi
@ -109,7 +120,7 @@ if [ -n "$CLICKHOUSE_USER" ] && [ "$CLICKHOUSE_USER" != "default" ] || [ -n "$CL
<networks>
<ip>::/0</ip>
</networks>
<password>${CLICKHOUSE_PASSWORD}</password>
<password><![CDATA[${CLICKHOUSE_PASSWORD//]]>/]]]]><![CDATA[>}]]></password>
<quota>default</quota>
<access_management>${CLICKHOUSE_ACCESS_MANAGEMENT}</access_management>
</${CLICKHOUSE_USER}>
@ -143,7 +154,7 @@ if [ -n "${RUN_INITDB_SCRIPTS}" ]; then
fi
# Listen only on localhost until the initialization is done
/usr/bin/clickhouse su "${USER}:${GROUP}" /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" -- --listen_host=127.0.0.1 &
clickhouse su "${USER}:${GROUP}" clickhouse-server --config-file="$CLICKHOUSE_CONFIG" -- --listen_host=127.0.0.1 &
pid="$!"
# check if clickhouse is ready to accept connections
@ -203,18 +214,8 @@ if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
CLICKHOUSE_WATCHDOG_ENABLE=${CLICKHOUSE_WATCHDOG_ENABLE:-0}
export CLICKHOUSE_WATCHDOG_ENABLE
# An option for easy restarting and replacing clickhouse-server in a container, especially in Kubernetes.
# For example, you can replace the clickhouse-server binary to another and restart it while keeping the container running.
if [[ "${CLICKHOUSE_DOCKER_RESTART_ON_EXIT:-0}" -eq "1" ]]; then
while true; do
# This runs the server as a child process of the shell script:
/usr/bin/clickhouse su "${USER}:${GROUP}" /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@" ||:
echo >&2 'ClickHouse Server exited, and the environment variable CLICKHOUSE_DOCKER_RESTART_ON_EXIT is set to 1. Restarting the server.'
done
else
# This replaces the shell script with the server:
exec /usr/bin/clickhouse su "${USER}:${GROUP}" /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@"
fi
# This replaces the shell script with the server:
exec clickhouse su "${USER}:${GROUP}" clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@"
fi
# Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image

View File

@ -58,6 +58,23 @@ export CC=clang-18
export CXX=clang++-18
```
### Install Rust compiler
First follow the steps in the official [rust documentation](https://www.rust-lang.org/tools/install) to install `rustup`.
As with C++ dependencies, ClickHouse uses vendoring to control exactly what's installed and avoid depending on third
party services (like the `crates.io` registry).
Although in release mode any rust modern rustup toolchain version should work with this dependencies, if you plan to
enable sanitizers you must use a version that matches the exact same `std` as the one used in CI (for which we vendor
the crates):
```bash
rustup toolchain install nightly-2024-04-01
rustup default nightly-2024-04-01
rustup component add rust-src
```
### Checkout ClickHouse Sources {#checkout-clickhouse-sources}
``` bash
@ -97,6 +114,7 @@ The build requires the following components:
- Ninja
- Yasm
- Gawk
- rustc
If all the components are installed, you may build it in the same way as the steps above.

View File

@ -32,20 +32,21 @@ WHERE name LIKE '%thread_pool%'
```
``` text
┌─name────────────────────────────────────────┬─value─┬─default─┬─changed─┬─description─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─type───┬─changeable_without_restart─┬─is_obsolete─┐
│ max_thread_pool_size │ 10000 │ 10000 │ 0 │ The maximum number of threads that could be allocated from the OS and used for query execution and background operations. │ UInt64 │ No │ 0 │
│ max_thread_pool_free_size │ 1000 │ 1000 │ 0 │ The maximum number of threads that will always stay in a global thread pool once allocated and remain idle in case of insufficient number of tasks. │ UInt64 │ No │ 0 │
│ thread_pool_queue_size │ 10000 │ 10000 │ 0 │ The maximum number of tasks that will be placed in a queue and wait for execution. │ UInt64 │ No │ 0 │
│ max_io_thread_pool_size │ 100 │ 100 │ 0 │ The maximum number of threads that would be used for IO operations │ UInt64 │ No │ 0 │
│ max_io_thread_pool_free_size │ 0 │ 0 │ 0 │ Max free size for IO thread pool. │ UInt64 │ No │ 0 │
│ io_thread_pool_queue_size │ 10000 │ 10000 │ 0 │ Queue size for IO thread pool. │ UInt64 │ No │ 0 │
│ max_active_parts_loading_thread_pool_size │ 64 │ 64 │ 0 │ The number of threads to load active set of data parts (Active ones) at startup. │ UInt64 │ No │ 0 │
│ max_outdated_parts_loading_thread_pool_size │ 32 │ 32 │ 0 │ The number of threads to load inactive set of data parts (Outdated ones) at startup. │ UInt64 │ No │ 0 │
│ max_parts_cleaning_thread_pool_size │ 128 │ 128 │ 0 │ The number of threads for concurrent removal of inactive data parts. │ UInt64 │ No │ 0 │
│ max_backups_io_thread_pool_size │ 1000 │ 1000 │ 0 │ The maximum number of threads that would be used for IO operations for BACKUP queries │ UInt64 │ No │ 0 │
│ max_backups_io_thread_pool_free_size │ 0 │ 0 │ 0 │ Max free size for backups IO thread pool. │ UInt64 │ No │ 0 │
│ backups_io_thread_pool_queue_size │ 0 │ 0 │ 0 │ Queue size for backups IO thread pool. │ UInt64 │ No │ 0 │
└─────────────────────────────────────────────┴───────┴─────────┴─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴────────┴────────────────────────────┴─────────────┘
┌─name──────────────────────────────────────────┬─value─┬─default─┬─changed─┬─description─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─type───┬─changeable_without_restart─┬─is_obsolete─┐
│ max_thread_pool_size │ 10000 │ 10000 │ 0 │ The maximum number of threads that could be allocated from the OS and used for query execution and background operations. │ UInt64 │ No │ 0 │
│ max_thread_pool_free_size │ 1000 │ 1000 │ 0 │ The maximum number of threads that will always stay in a global thread pool once allocated and remain idle in case of insufficient number of tasks. │ UInt64 │ No │ 0 │
│ thread_pool_queue_size │ 10000 │ 10000 │ 0 │ The maximum number of tasks that will be placed in a queue and wait for execution. │ UInt64 │ No │ 0 │
│ max_io_thread_pool_size │ 100 │ 100 │ 0 │ The maximum number of threads that would be used for IO operations │ UInt64 │ No │ 0 │
│ max_io_thread_pool_free_size │ 0 │ 0 │ 0 │ Max free size for IO thread pool. │ UInt64 │ No │ 0 │
│ io_thread_pool_queue_size │ 10000 │ 10000 │ 0 │ Queue size for IO thread pool. │ UInt64 │ No │ 0 │
│ max_active_parts_loading_thread_pool_size │ 64 │ 64 │ 0 │ The number of threads to load active set of data parts (Active ones) at startup. │ UInt64 │ No │ 0 │
│ max_outdated_parts_loading_thread_pool_size │ 32 │ 32 │ 0 │ The number of threads to load inactive set of data parts (Outdated ones) at startup. │ UInt64 │ No │ 0 │
│ max_unexpected_parts_loading_thread_pool_size │ 32 │ 32 │ 0 │ The number of threads to load inactive set of data parts (Unexpected ones) at startup. │ UInt64 │ No │ 0 │
│ max_parts_cleaning_thread_pool_size │ 128 │ 128 │ 0 │ The number of threads for concurrent removal of inactive data parts. │ UInt64 │ No │ 0 │
│ max_backups_io_thread_pool_size │ 1000 │ 1000 │ 0 │ The maximum number of threads that would be used for IO operations for BACKUP queries │ UInt64 │ No │ 0 │
│ max_backups_io_thread_pool_free_size │ 0 │ 0 │ 0 │ Max free size for backups IO thread pool. │ UInt64 │ No │ 0 │
│ backups_io_thread_pool_queue_size │ 0 │ 0 │ 0 │ Queue size for backups IO thread pool. │ UInt64 │ No │ 0 │
└───────────────────────────────────────────────┴───────┴─────────┴─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴────────┴────────────────────────────┴─────────────┘
```

View File

@ -2968,7 +2968,7 @@ Result:
## fromModifiedJulianDay
Converts a [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) number to a [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) date in text form `YYYY-MM-DD`. This function supports day number from `-678941` to `2973119` (which represent 0000-01-01 and 9999-12-31 respectively). It raises an exception if the day number is outside of the supported range.
Converts a [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) number to a [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) date in text form `YYYY-MM-DD`. This function supports day number from `-678941` to `2973483` (which represent 0000-01-01 and 9999-12-31 respectively). It raises an exception if the day number is outside of the supported range.
**Syntax**

View File

@ -1156,7 +1156,7 @@ SELECT toModifiedJulianDayOrNull('2020-01-01');
## fromModifiedJulianDay {#frommodifiedjulianday}
将 [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) 数字转换为 `YYYY-MM-DD` 文本格式的 [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) 日期。该函数支持从 `-678941``2973119` 的天数(分别代表 0000-01-01 和 9999-12-31。如果天数超出支持范围则会引发异常。
将 [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) 数字转换为 `YYYY-MM-DD` 文本格式的 [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) 日期。该函数支持从 `-678941``2973483` 的天数(分别代表 0000-01-01 和 9999-12-31。如果天数超出支持范围则会引发异常。
**语法**

View File

@ -161,6 +161,14 @@ void LocalServer::initialize(Poco::Util::Application & self)
getOutdatedPartsLoadingThreadPool().setMaxTurboThreads(active_parts_loading_threads);
const size_t unexpected_parts_loading_threads = config().getUInt("max_unexpected_parts_loading_thread_pool_size", 32);
getUnexpectedPartsLoadingThreadPool().initialize(
unexpected_parts_loading_threads,
0, // We don't need any threads one all the parts will be loaded
unexpected_parts_loading_threads);
getUnexpectedPartsLoadingThreadPool().setMaxTurboThreads(active_parts_loading_threads);
const size_t cleanup_threads = config().getUInt("max_parts_cleaning_thread_pool_size", 128);
getPartsCleaningThreadPool().initialize(
cleanup_threads,

View File

@ -713,10 +713,11 @@ try
const size_t physical_server_memory = getMemoryAmount();
LOG_INFO(log, "Available RAM: {}; physical cores: {}; logical cores: {}.",
LOG_INFO(log, "Available RAM: {}; logical cores: {}; used cores: {}.",
formatReadableSizeWithBinarySuffix(physical_server_memory),
getNumberOfPhysicalCPUCores(), // on ARM processors it can show only enabled at current moment cores
std::thread::hardware_concurrency());
std::thread::hardware_concurrency(),
getNumberOfPhysicalCPUCores() // on ARM processors it can show only enabled at current moment cores
);
#if defined(__x86_64__)
String cpu_info;
@ -842,6 +843,16 @@ try
server_settings.max_active_parts_loading_thread_pool_size
);
getUnexpectedPartsLoadingThreadPool().initialize(
server_settings.max_unexpected_parts_loading_thread_pool_size,
0, // We don't need any threads once all the parts will be loaded
server_settings.max_unexpected_parts_loading_thread_pool_size);
/// It could grow if we need to synchronously wait until all the data parts will be loaded.
getUnexpectedPartsLoadingThreadPool().setMaxTurboThreads(
server_settings.max_active_parts_loading_thread_pool_size
);
getPartsCleaningThreadPool().initialize(
server_settings.max_parts_cleaning_thread_pool_size,
0, // We don't need any threads one all the parts will be deleted
@ -1431,7 +1442,7 @@ try
concurrent_threads_soft_limit = new_server_settings.concurrent_threads_soft_limit_num;
if (new_server_settings.concurrent_threads_soft_limit_ratio_to_cores > 0)
{
auto value = new_server_settings.concurrent_threads_soft_limit_ratio_to_cores * std::thread::hardware_concurrency();
auto value = new_server_settings.concurrent_threads_soft_limit_ratio_to_cores * getNumberOfPhysicalCPUCores();
if (value > 0 && value < concurrent_threads_soft_limit)
concurrent_threads_soft_limit = value;
}

View File

@ -59,7 +59,13 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid)
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getgrnam_r' to obtain gid from group name ({})", arg_gid);
if (!result)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Group {} is not found in the system", arg_gid);
{
if (0 != getgrgid_r(gid, &entry, buf.get(), buf_size, &result))
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getgrnam_r' to obtain gid from group name ({})", arg_gid);
if (!result)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Group {} is not found in the system", arg_gid);
}
gid = entry.gr_gid;
}
@ -84,7 +90,13 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid)
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getpwnam_r' to obtain uid from user name ({})", arg_uid);
if (!result)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "User {} is not found in the system", arg_uid);
{
if (0 != getpwuid_r(uid, &entry, buf.get(), buf_size, &result))
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getpwuid_r' to obtain uid from user name ({})", uid);
if (!result)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "User {} is not found in the system", arg_uid);
}
uid = entry.pw_uid;
}

View File

@ -1,11 +0,0 @@
[env]
CFLAGS = "@RUST_CFLAGS@"
CXXFLAGS = "@RUST_CXXFLAGS@"
[build]
rustflags = @RUSTFLAGS@
rustdocflags = @RUSTFLAGS@
@RUSTCWRAPPER@
[unstable]
@RUST_CARGO_BUILD_STD@

View File

@ -1,3 +1,21 @@
if (OS_FREEBSD)
# Right nix/libc requires fspacectl and it had been added only since FreeBSD14.
# And since sysroot has older libraries you will got undefined reference for clickhouse binary.
#
# But likely everything should work without this syscall, however it is not
# possible right now to gently override libraries versions for dependencies,
# and forking rust modules is a little bit too much for this thing.
#
# You can take a look at the details in the following issue [1].
#
# [1]: https://github.com/rust-lang/cargo/issues/5640
#
# Update 2024-04: Now prql also requires getrandom() via std::sys::pal::unix::rand::imp::getrandom_fill_bytes
message(STATUS "Rust build is disabled for FreeBSD because we use old sysroot files")
return()
endif()
# NOTE: should be macro to export RUST_CXXFLAGS/RUST_CFLAGS for subfolders
macro(configure_rustc)
# NOTE: this can also be done by overriding rustc, but it not trivial with rustup.
@ -52,66 +70,19 @@ macro(configure_rustc)
message(STATUS "RUSTFLAGS: ${RUSTFLAGS}")
message(STATUS "RUST_CARGO_BUILD_STD: ${RUST_CARGO_BUILD_STD}")
# NOTE: requires RW access for the source dir
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/.cargo/config.toml.in" "${CMAKE_CURRENT_SOURCE_DIR}/.cargo/config.toml" @ONLY)
set(RUST_VENDOR_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../contrib/rust_vendor")
endmacro()
configure_rustc()
function(clickhouse_import_crate)
# This is a workaround for Corrosion case sensitive build type matching in
# _generator_add_cargo_targets(), that leads to different paths in
# IMPORTED_LOCATION and real path of the library.
#
# It uses CMAKE_CONFIGURATION_TYPES and $<CONFIG>, so here we preserve the
# case of ${CMAKE_BUILD_TYPE} in ${CMAKE_CONFIGURATION_TYPES}.
if ("${CMAKE_BUILD_TYPE_UC}" STREQUAL "DEBUG")
set(CMAKE_CONFIGURATION_TYPES "${CMAKE_BUILD_TYPE};release")
else()
set(CMAKE_CONFIGURATION_TYPES "${CMAKE_BUILD_TYPE};debug")
endif()
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
set(profile "")
else()
if (ENABLE_THINLTO)
set(profile "release-thinlto")
else()
set(profile "release")
endif()
endif()
# Note, here --offline is not used, since on CI vendor archive is used, and
# passing --offline here will be inconvenient for local development.
corrosion_import_crate(NO_STD ${ARGN} PROFILE ${profile})
endfunction()
# Add crate from the build directory.
#
# Our crates has configuration files:
# - config for cargo (see config.toml.in)
# - and possibly config for build (build.rs.in)
#
# And to avoid overlaps different builds for one source directory, crate will
# To avoid overlaps different builds for one source directory, crate will
# be copied from source directory to the binary directory.
file(COPY ".cargo" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}")
function(add_rust_subdirectory src)
set(dst "${CMAKE_CURRENT_BINARY_DIR}/${src}")
message(STATUS "Copy ${src} to ${dst}")
file(COPY "${src}" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}"
PATTERN target EXCLUDE)
# Check is Rust available or not.
#
# `cargo update --dry-run` will not update anything, but will check the internet connectivity.
execute_process(COMMAND ${Rust_CARGO_CACHED} update --dry-run
WORKING_DIRECTORY "${dst}"
RESULT_VARIABLE CARGO_UPDATE_RESULT
OUTPUT_VARIABLE CARGO_UPDATE_STDOUT
ERROR_VARIABLE CARGO_UPDATE_STDERR)
if (CARGO_UPDATE_RESULT)
message(FATAL_ERROR "Rust (${Rust_CARGO_CACHED}) support is not available (likely there is no internet connectivity):\n${CARGO_UPDATE_STDERR}\nYou can disable Rust support with -DENABLE_RUST=OFF")
endif()
add_subdirectory("${dst}" "${dst}")
# cmake -E copy* do now know how to exclude files
@ -126,5 +97,4 @@ function(add_rust_subdirectory src)
VERBATIM)
endfunction()
add_rust_subdirectory (skim)
add_rust_subdirectory (prql)
add_rust_subdirectory (workspace)

View File

@ -1,11 +0,0 @@
# workspace is required to vendor crates for all packages.
[workspace]
members = [
"skim",
"prql",
]
resolver = "2"
# FIXME: even though the profiles should be defined in the main cargo config we
# cannot do this yet, since we compile each package separatelly, so you should
# ignore warning from cargo about this.

51
rust/VENDOR.md Normal file
View File

@ -0,0 +1,51 @@
As we have multiple projects we use a workspace to manage them (it's way simpler and leads to less issues). In order
to vendor all the dependencies we need to store both the registry and the packages themselves.
Note that this includes the exact `std` dependencies for the rustc version used in CI (currently nightly-2024-04-01),
so you need to install `rustup component add rust-src` for the specific version.
* First step: (Re)-generate the Cargo.lock file (run under `workspace/`).
```bash
cargo generate-lockfile
```
* Generate the local registry:
Note that we use both commands to vendor both registry and crates. No idea why both are necessary.
* First we need to install the tool if you don't already have it:
```bash
cargo install --version 0.2.6 cargo-local-registry
```
* Now add the local packages:
```bash
export CH_TOP_DIR=$(git rev-parse --show-toplevel)
export RUSTC_ROOT=$(rustc --print=sysroot)
cd "$CH_TOP_DIR"/rust/workspace
cargo local-registry --git --sync Cargo.lock "$CH_TOP_DIR"/contrib/rust_vendor
cp "$RUSTC_ROOT"/lib/rustlib/src/rust/Cargo.lock "$RUSTC_ROOT"/lib/rustlib/src/rust/library/std/
cargo local-registry --no-delete --git --sync "$RUSTC_ROOT"/lib/rustlib/src/rust/library/std/Cargo.lock "$CH_TOP_DIR"/contrib/rust_vendor
cp "$RUSTC_ROOT"/lib/rustlib/src/rust/Cargo.lock "$RUSTC_ROOT"/lib/rustlib/src/rust/library/test/
cargo local-registry --no-delete --git --sync "$RUSTC_ROOT"/lib/rustlib/src/rust/library/test/Cargo.lock "$CH_TOP_DIR"/contrib/rust_vendor
cargo vendor --no-delete --locked "$CH_TOP_DIR"/contrib/rust_vendor
cd "$RUSTC_ROOT"/lib/rustlib/src/rust/library/std/
cargo vendor --no-delete "$CH_TOP_DIR"/contrib/rust_vendor
cd "$RUSTC_ROOT"/lib/rustlib/src/rust/library/test/
cargo vendor --no-delete "$CH_TOP_DIR"/contrib/rust_vendor
# Remove windows only dependencies (which are really heavy and we don't want in the repo)
rm -rf "$CH_TOP_DIR"/contrib/rust_vendor/winapi* "$CH_TOP_DIR"/contrib/rust_vendor/windows*
# Cleanup the lock files we copied
rm "$RUSTC_ROOT"/lib/rustlib/src/rust/library/std/Cargo.lock "$RUSTC_ROOT"/lib/rustlib/src/rust/library/test/Cargo.lock
cd "$CH_TOP_DIR"/rust/workspace
```
The `rustc --print=sysroot` part includes `std` dependencies, required to build with sanitizer flags. It must be kept
in sync with the rustc version used in CI.

View File

@ -1,3 +0,0 @@
clickhouse_import_crate(MANIFEST_PATH Cargo.toml)
target_include_directories(_ch_rust_prql INTERFACE include)
add_library(ch_rust::prql ALIAS _ch_rust_prql)

View File

@ -1,2 +0,0 @@
[env]
CXXFLAGS = "@RUST_CXXFLAGS@"

View File

@ -1,55 +0,0 @@
if (OS_FREEBSD)
# Right nix/libc requires fspacectl and it had been added only since FreeBSD14.
# And sicne sysroot has older libararies you will got undefined reference for clickhouse binary.
#
# But likely everything should work without this syscall, however it is not
# possible right now to gently override libraries versions for depdendcies,
# and forking rust modules is a little bit too much for this thing.
#
# You can take a look at the details in the fillowing issue [1].
#
# [1]: https://github.com/rust-lang/cargo/issues/5640
#
message(STATUS "skim is disabled for FreeBSD")
return()
endif()
if (SANITIZE STREQUAL "thread")
# Rust does not supports Thread Sanitizer [1]
#
# [1]: https://doc.rust-lang.org/beta/unstable-book/compiler-flags/sanitizer.html#threadsanitizer
message(STATUS "skim is disabled under Thread Sanitizer")
return()
endif()
clickhouse_import_crate(MANIFEST_PATH Cargo.toml)
# -Wno-dollar-in-identifier-extension: cxx bridge complies names with '$'
# -Wno-unused-macros: unused CXXBRIDGE1_RUST_STRING
set(CXXBRIDGE_CXXFLAGS "-Wno-dollar-in-identifier-extension -Wno-unused-macros")
set(RUST_CXXFLAGS "${RUST_CXXFLAGS} ${CXXBRIDGE_CXXFLAGS}")
message(STATUS "RUST_CXXFLAGS (for skim): ${RUST_CXXFLAGS}")
# NOTE: requires RW access for the source dir
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/build.rs.in" "${CMAKE_CURRENT_SOURCE_DIR}/build.rs" @ONLY)
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/.cargo/config.toml.in" "${CMAKE_CURRENT_SOURCE_DIR}/.cargo/config.toml" @ONLY)
set (ffi_binding_generated_path
${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}/cargo/build/${Rust_CARGO_TARGET_CACHED}/cxxbridge/_ch_rust_skim_rust/src/lib.rs.cc)
set (ffi_binding_final_path ${CMAKE_CURRENT_BINARY_DIR}/skim-ffi.cc)
message(STATUS "Writing FFI Binding for skim: ${ffi_binding_generated_path} => ${ffi_binding_final_path}")
add_custom_command(OUTPUT ${ffi_binding_final_path}
COMMAND ${CMAKE_COMMAND} -E copy ${ffi_binding_generated_path} ${ffi_binding_final_path}
DEPENDS cargo-build__ch_rust_skim_rust)
add_library(_ch_rust_skim_ffi ${ffi_binding_final_path})
# cxx bridge compiles such bindings
set_target_properties(_ch_rust_skim_ffi PROPERTIES COMPILE_FLAGS "${CXXBRIDGE_CXXFLAGS}")
add_library(_ch_rust_skim INTERFACE)
target_include_directories(_ch_rust_skim INTERFACE include)
target_link_libraries(_ch_rust_skim INTERFACE
_ch_rust_skim_rust
_ch_rust_skim_ffi)
add_library(ch_rust::skim ALIAS _ch_rust_skim)

View File

@ -1,9 +0,0 @@
fn main() {
let mut build = cxx_build::bridge("src/lib.rs");
for flag in "@RUST_CXXFLAGS@".split(' ') {
build.flag(flag);
}
build.compile("skim");
println!("cargo:rerun-if-changed=src/lib.rs");
println!("cargo:rerun-if-changed=.cargo/config.toml");
}

View File

@ -0,0 +1,26 @@
[env]
CFLAGS = "@RUST_CFLAGS@"
CXXFLAGS = "@RUST_CXXFLAGS@"
[build]
rustflags = @RUSTFLAGS@
rustdocflags = @RUSTFLAGS@
@RUSTCWRAPPER@
[unstable]
@RUST_CARGO_BUILD_STD@
[source.crates-io]
registry = 'sparse+https://index.crates.io/'
replace-with = 'local-registry'
[source."git+https://github.com/azat-rust/tuikit.git?rev=e1994c0e03ff02c49cf1471f0cc3cbf185ce0104"]
git = "https://github.com/azat-rust/tuikit.git"
rev = "e1994c0e03ff02c49cf1471f0cc3cbf185ce0104"
replace-with = "vendored-sources"
[source.local-registry]
local-registry = "@RUST_VENDOR_DIR@"
[source.vendored-sources]
directory = "@RUST_VENDOR_DIR@"

View File

@ -0,0 +1,42 @@
function(clickhouse_import_crate)
# This is a workaround for Corrosion case sensitive build type matching in
# _generator_add_cargo_targets(), that leads to different paths in
# IMPORTED_LOCATION and real path of the library.
#
# It uses CMAKE_CONFIGURATION_TYPES and $<CONFIG>, so here we preserve the
# case of ${CMAKE_BUILD_TYPE} in ${CMAKE_CONFIGURATION_TYPES}.
if ("${CMAKE_BUILD_TYPE_UC}" STREQUAL "DEBUG")
set(CMAKE_CONFIGURATION_TYPES "${CMAKE_BUILD_TYPE};release")
else()
set(CMAKE_CONFIGURATION_TYPES "${CMAKE_BUILD_TYPE};debug")
endif()
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
set(profile "")
else()
if (ENABLE_THINLTO)
set(profile "release-thinlto")
else()
set(profile "release")
endif()
endif()
corrosion_import_crate(${ARGN} NO_STD PROFILE ${profile} LOCKED FLAGS --offline)
endfunction()
# -Wno-dollar-in-identifier-extension: cxx bridge complies names with '$'
# -Wno-unused-macros: unused CXXBRIDGE1_RUST_STRING
set(CXXBRIDGE_CXXFLAGS "-Wno-dollar-in-identifier-extension -Wno-unused-macros")
set(RUST_CXXFLAGS "${RUST_CXXFLAGS} ${CXXBRIDGE_CXXFLAGS}")
message(STATUS "RUST_CXXFLAGS (for skim): ${RUST_CXXFLAGS}")
configure_file(".cargo/config.toml.in" ".cargo/config.toml" @ONLY)
clickhouse_import_crate(MANIFEST_PATH Cargo.toml)
target_include_directories(_ch_rust_prql INTERFACE prql/include)
add_library(ch_rust::prql ALIAS _ch_rust_prql)
target_include_directories(_ch_rust_skim_rust INTERFACE skim/include)
add_library(ch_rust::skim ALIAS _ch_rust_skim_rust)

View File

@ -38,9 +38,9 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
[[package]]
name = "ahash"
version = "0.8.6"
version = "0.8.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a"
checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011"
dependencies = [
"cfg-if",
"once_cell",
@ -50,9 +50,9 @@ dependencies = [
[[package]]
name = "aho-corasick"
version = "1.1.2"
version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0"
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
dependencies = [
"memchr",
]
@ -94,9 +94,9 @@ dependencies = [
[[package]]
name = "anstyle"
version = "1.0.4"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87"
checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc"
[[package]]
name = "anstyle-parse"
@ -128,9 +128,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.80"
version = "1.0.81"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1"
checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247"
dependencies = [
"backtrace",
]
@ -153,15 +153,15 @@ checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711"
[[package]]
name = "autocfg"
version = "1.1.0"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80"
[[package]]
name = "backtrace"
version = "0.3.69"
version = "0.3.71"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837"
checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d"
dependencies = [
"addr2line",
"cc",
@ -186,24 +186,21 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitflags"
version = "2.4.1"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07"
checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1"
[[package]]
name = "bumpalo"
version = "3.14.0"
version = "3.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec"
checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
[[package]]
name = "cc"
version = "1.0.83"
version = "1.0.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0"
dependencies = [
"libc",
]
checksum = "2678b2e3449475e95b0aa6f9b506a28e61b3dc8996592b983695e8ebb58a8b41"
[[package]]
name = "cfg-if"
@ -213,16 +210,16 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "chrono"
version = "0.4.34"
version = "0.4.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5bc015644b92d5890fab7489e49d21f879d5c990186827d42ec511919404f38b"
checksum = "8a0d04d43504c61aa6c7531f1871dd0d418d91130162063b789da00fd7057a5e"
dependencies = [
"android-tzdata",
"iana-time-zone",
"js-sys",
"num-traits",
"wasm-bindgen",
"windows-targets 0.52.0",
"windows-targets",
]
[[package]]
@ -259,11 +256,10 @@ checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f"
[[package]]
name = "crossbeam"
version = "0.8.2"
version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c"
checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8"
dependencies = [
"cfg-if",
"crossbeam-channel",
"crossbeam-deque",
"crossbeam-epoch",
@ -273,55 +269,46 @@ dependencies = [
[[package]]
name = "crossbeam-channel"
version = "0.5.9"
version = "0.5.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "14c3242926edf34aec4ac3a77108ad4854bffaa2e4ddc1824124ce59231302d5"
checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95"
dependencies = [
"cfg-if",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.4"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fca89a0e215bab21874660c67903c5f143333cab1da83d041c7ded6053774751"
checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d"
dependencies = [
"cfg-if",
"crossbeam-epoch",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-epoch"
version = "0.9.16"
version = "0.9.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2d2fe95351b870527a5d09bf563ed3c97c0cffb87cf1c78a591bf48bb218d9aa"
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
dependencies = [
"autocfg",
"cfg-if",
"crossbeam-utils",
"memoffset 0.9.0",
]
[[package]]
name = "crossbeam-queue"
version = "0.3.9"
version = "0.3.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9bcf5bdbfdd6030fb4a1c497b5d5fc5921aa2f60d359a17e249c0e6df3de153"
checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35"
dependencies = [
"cfg-if",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.17"
version = "0.8.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c06d96137f14f244c37f989d9fff8f95e6c18b918e71f36638f8c49112e4c78f"
dependencies = [
"cfg-if",
]
checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345"
[[package]]
name = "csv"
@ -346,9 +333,9 @@ dependencies = [
[[package]]
name = "cxx"
version = "1.0.111"
version = "1.0.121"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e9fc0c733f71e58dedf4f034cd2a266f80b94cc9ed512729e1798651b68c2cba"
checksum = "21db378d04296a84d8b7d047c36bb3954f0b46529db725d7e62fb02f9ba53ccc"
dependencies = [
"cc",
"cxxbridge-flags",
@ -358,9 +345,9 @@ dependencies = [
[[package]]
name = "cxx-build"
version = "1.0.111"
version = "1.0.121"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51bc81d2664db24cf1d35405f66e18a85cffd4d49ab930c71a5c6342a410f38c"
checksum = "3e5262a7fa3f0bae2a55b767c223ba98032d7c328f5c13fa5cdc980b77fc0658"
dependencies = [
"cc",
"codespan-reporting",
@ -368,24 +355,24 @@ dependencies = [
"proc-macro2",
"quote",
"scratch",
"syn 2.0.52",
"syn 2.0.58",
]
[[package]]
name = "cxxbridge-flags"
version = "1.0.111"
version = "1.0.121"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8511afbe34ea242697784da5cb2c5d4a0afb224ca8b136bdf93bfe180cbe5884"
checksum = "be8dcadd2e2fb4a501e1d9e93d6e88e6ea494306d8272069c92d5a9edf8855c0"
[[package]]
name = "cxxbridge-macro"
version = "1.0.111"
version = "1.0.121"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c6888cd161769d65134846d4d4981d5a6654307cc46ec83fb917e530aea5f84"
checksum = "ad08a837629ad949b73d032c637653d069e909cffe4ee7870b02301939ce39cc"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.58",
]
[[package]]
@ -435,9 +422,9 @@ dependencies = [
[[package]]
name = "deranged"
version = "0.3.10"
version = "0.3.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8eb30d70a07a3b04884d2677f06bec33509dc67ca60d92949e5535352d3191dc"
checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4"
dependencies = [
"powerfmt",
]
@ -496,9 +483,9 @@ dependencies = [
[[package]]
name = "either"
version = "1.9.0"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07"
checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a"
[[package]]
name = "enum-as-inner"
@ -509,7 +496,7 @@ dependencies = [
"heck",
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.58",
]
[[package]]
@ -535,9 +522,9 @@ dependencies = [
[[package]]
name = "getrandom"
version = "0.2.11"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f"
checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c"
dependencies = [
"cfg-if",
"libc",
@ -568,9 +555,9 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
[[package]]
name = "iana-time-zone"
version = "0.1.58"
version = "0.1.60"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8326b86b6cff230b97d0d312a6c40a60726df3332e721f72a1b035f451663b20"
checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141"
dependencies = [
"android_system_properties",
"core-foundation-sys",
@ -597,9 +584,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
[[package]]
name = "indexmap"
version = "2.2.5"
version = "2.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4"
checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26"
dependencies = [
"equivalent",
"hashbrown",
@ -607,24 +594,24 @@ dependencies = [
[[package]]
name = "itertools"
version = "0.12.0"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "25db6b064527c5d482d0423354fcd07a89a2dfe07b67892e62411946db7f07b0"
checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569"
dependencies = [
"either",
]
[[package]]
name = "itoa"
version = "1.0.10"
version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c"
checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
[[package]]
name = "js-sys"
version = "0.3.66"
version = "0.3.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca"
checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d"
dependencies = [
"wasm-bindgen",
]
@ -637,19 +624,18 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.151"
version = "0.2.153"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4"
checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
[[package]]
name = "libredox"
version = "0.0.1"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8"
checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d"
dependencies = [
"bitflags 2.4.1",
"bitflags 2.5.0",
"libc",
"redox_syscall",
]
[[package]]
@ -663,15 +649,15 @@ dependencies = [
[[package]]
name = "log"
version = "0.4.20"
version = "0.4.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
[[package]]
name = "memchr"
version = "2.6.4"
version = "2.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167"
checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d"
[[package]]
name = "memoffset"
@ -682,15 +668,6 @@ dependencies = [
"autocfg",
]
[[package]]
name = "memoffset"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c"
dependencies = [
"autocfg",
]
[[package]]
name = "minimal-lexical"
version = "0.2.1"
@ -727,7 +704,7 @@ dependencies = [
"bitflags 1.3.2",
"cfg-if",
"libc",
"memoffset 0.6.5",
"memoffset",
"pin-utils",
]
@ -742,10 +719,16 @@ dependencies = [
]
[[package]]
name = "num-traits"
version = "0.2.17"
name = "num-conv"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c"
checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9"
[[package]]
name = "num-traits"
version = "0.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a"
dependencies = [
"autocfg",
]
@ -779,9 +762,9 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391"
[[package]]
name = "proc-macro2"
version = "1.0.78"
version = "1.0.79"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae"
checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e"
dependencies = [
"unicode-ident",
]
@ -860,9 +843,9 @@ dependencies = [
[[package]]
name = "rayon"
version = "1.8.0"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1"
checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa"
dependencies = [
"either",
"rayon-core",
@ -870,28 +853,19 @@ dependencies = [
[[package]]
name = "rayon-core"
version = "1.12.0"
version = "1.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed"
checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2"
dependencies = [
"crossbeam-deque",
"crossbeam-utils",
]
[[package]]
name = "redox_syscall"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa"
dependencies = [
"bitflags 1.3.2",
]
[[package]]
name = "redox_users"
version = "0.4.4"
version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4"
checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891"
dependencies = [
"getrandom",
"libredox",
@ -900,9 +874,9 @@ dependencies = [
[[package]]
name = "regex"
version = "1.10.3"
version = "1.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15"
checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c"
dependencies = [
"aho-corasick",
"memchr",
@ -912,9 +886,9 @@ dependencies = [
[[package]]
name = "regex-automata"
version = "0.4.5"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd"
checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea"
dependencies = [
"aho-corasick",
"memchr",
@ -923,9 +897,9 @@ dependencies = [
[[package]]
name = "regex-syntax"
version = "0.8.2"
version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f"
checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56"
[[package]]
name = "rustc-demangle"
@ -935,15 +909,15 @@ checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76"
[[package]]
name = "rustversion"
version = "1.0.14"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4"
checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47"
[[package]]
name = "ryu"
version = "1.0.16"
version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c"
checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1"
[[package]]
name = "scratch"
@ -977,14 +951,14 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.58",
]
[[package]]
name = "serde_json"
version = "1.0.114"
version = "1.0.115"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0"
checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd"
dependencies = [
"itoa",
"ryu",
@ -993,9 +967,9 @@ dependencies = [
[[package]]
name = "serde_yaml"
version = "0.9.32"
version = "0.9.34+deprecated"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8fd075d994154d4a774f95b51fb96bdc2832b0ea48425c92546073816cda1f2f"
checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47"
dependencies = [
"indexmap",
"itoa",
@ -1071,24 +1045,24 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
[[package]]
name = "strum"
version = "0.26.1"
version = "0.26.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "723b93e8addf9aa965ebe2d11da6d7540fa2283fcea14b3371ff055f7ba13f5f"
checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29"
dependencies = [
"strum_macros",
]
[[package]]
name = "strum_macros"
version = "0.26.1"
version = "0.26.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a3417fc93d76740d974a01654a09777cb500428cc874ca9f45edfe0c4d4cd18"
checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946"
dependencies = [
"heck",
"proc-macro2",
"quote",
"rustversion",
"syn 2.0.52",
"syn 2.0.58",
]
[[package]]
@ -1104,9 +1078,9 @@ dependencies = [
[[package]]
name = "syn"
version = "2.0.52"
version = "2.0.58"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07"
checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687"
dependencies = [
"proc-macro2",
"quote",
@ -1126,38 +1100,38 @@ dependencies = [
[[package]]
name = "termcolor"
version = "1.4.0"
version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff1bc3d3f05aff0403e8ac0d92ced918ec05b666a43f83297ccef5bea8a3d449"
checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755"
dependencies = [
"winapi-util",
]
[[package]]
name = "thiserror"
version = "1.0.51"
version = "1.0.58"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f11c217e1416d6f036b870f14e0413d480dbf28edbee1f877abaf0206af43bb7"
checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.51"
version = "1.0.58"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "01742297787513b79cf8e29d1056ede1313e2420b7b3b15d0a768b4921f549df"
checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.58",
]
[[package]]
name = "thread_local"
version = "1.1.7"
version = "1.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152"
checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c"
dependencies = [
"cfg-if",
"once_cell",
@ -1165,11 +1139,12 @@ dependencies = [
[[package]]
name = "time"
version = "0.3.30"
version = "0.3.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5"
checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749"
dependencies = [
"deranged",
"num-conv",
"powerfmt",
"serde",
"time-core",
@ -1193,8 +1168,7 @@ dependencies = [
[[package]]
name = "tuikit"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e19c6ab038babee3d50c8c12ff8b910bdb2196f62278776422f50390d8e53d8"
source = "git+https://github.com/azat-rust/tuikit.git?rev=e1994c0e03ff02c49cf1471f0cc3cbf185ce0104#e1994c0e03ff02c49cf1471f0cc3cbf185ce0104"
dependencies = [
"bitflags 1.3.2",
"lazy_static",
@ -1224,9 +1198,9 @@ checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e"
[[package]]
name = "unsafe-libyaml"
version = "0.2.10"
version = "0.2.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b"
checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861"
[[package]]
name = "utf8parse"
@ -1269,9 +1243,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasm-bindgen"
version = "0.2.89"
version = "0.2.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e"
checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8"
dependencies = [
"cfg-if",
"wasm-bindgen-macro",
@ -1279,24 +1253,24 @@ dependencies = [
[[package]]
name = "wasm-bindgen-backend"
version = "0.2.89"
version = "0.2.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826"
checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da"
dependencies = [
"bumpalo",
"log",
"once_cell",
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.58",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.89"
version = "0.2.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2"
checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
@ -1304,22 +1278,22 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.89"
version = "0.2.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283"
checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.58",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
version = "0.2.89"
version = "0.2.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f"
checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96"
[[package]]
name = "winapi"
@ -1354,11 +1328,11 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "windows-core"
version = "0.51.1"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64"
checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9"
dependencies = [
"windows-targets 0.48.5",
"windows-targets",
]
[[package]]
@ -1367,122 +1341,65 @@ version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
dependencies = [
"windows-targets 0.52.0",
"windows-targets",
]
[[package]]
name = "windows-targets"
version = "0.48.5"
version = "0.52.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b"
dependencies = [
"windows_aarch64_gnullvm 0.48.5",
"windows_aarch64_msvc 0.48.5",
"windows_i686_gnu 0.48.5",
"windows_i686_msvc 0.48.5",
"windows_x86_64_gnu 0.48.5",
"windows_x86_64_gnullvm 0.48.5",
"windows_x86_64_msvc 0.48.5",
]
[[package]]
name = "windows-targets"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd"
dependencies = [
"windows_aarch64_gnullvm 0.52.0",
"windows_aarch64_msvc 0.52.0",
"windows_i686_gnu 0.52.0",
"windows_i686_msvc 0.52.0",
"windows_x86_64_gnu 0.52.0",
"windows_x86_64_gnullvm 0.52.0",
"windows_x86_64_msvc 0.52.0",
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.48.5"
version = "0.52.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea"
checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9"
[[package]]
name = "windows_aarch64_msvc"
version = "0.48.5"
version = "0.52.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef"
checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675"
[[package]]
name = "windows_i686_gnu"
version = "0.48.5"
version = "0.52.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
[[package]]
name = "windows_i686_gnu"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313"
checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3"
[[package]]
name = "windows_i686_msvc"
version = "0.48.5"
version = "0.52.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
[[package]]
name = "windows_i686_msvc"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a"
checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02"
[[package]]
name = "windows_x86_64_gnu"
version = "0.48.5"
version = "0.52.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd"
checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.48.5"
version = "0.52.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e"
checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177"
[[package]]
name = "windows_x86_64_msvc"
version = "0.48.5"
version = "0.52.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04"
checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8"
[[package]]
name = "yansi"
@ -1492,20 +1409,20 @@ checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec"
[[package]]
name = "zerocopy"
version = "0.7.31"
version = "0.7.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1c4061bedbb353041c12f413700357bec76df2c7e2ca8e4df8bac24c6bf68e3d"
checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.7.31"
version = "0.7.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a"
checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.52",
"syn 2.0.58",
]

19
rust/workspace/Cargo.toml Normal file
View File

@ -0,0 +1,19 @@
# workspace is required to vendor crates for all packages.
[workspace]
members = [
"skim",
"prql",
]
resolver = "2"
[profile.release]
debug = false
# We use LTO to slightly decrease binary size
[profile.release-thinlto]
inherits = "release"
lto = true
[patch.crates-io]
# Ref: https://github.com/lotabout/tuikit/pull/51
tuikit = { git = "https://github.com/azat-rust/tuikit.git", rev = "e1994c0e03ff02c49cf1471f0cc3cbf185ce0104" }

View File

@ -10,10 +10,3 @@ serde_json = "1.0"
[lib]
crate-type = ["staticlib"]
[profile.release]
debug = false
[profile.release-thinlto]
inherits = "release"
lto = true

View File

@ -15,15 +15,3 @@ cxx-build = "1.0.83"
[lib]
crate-type = ["staticlib"]
[profile.release]
debug = false
[profile.release-thinlto]
inherits = "release"
# We use LTO here as well to slightly decrease binary size
lto = true
[patch.crates-io]
# Ref: https://github.com/lotabout/tuikit/pull/51
tuikit = { git = "https://github.com/azat-rust/tuikit.git", rev = "e1994c0e03ff02c49cf1471f0cc3cbf185ce0104" }

View File

@ -0,0 +1,4 @@
fn main() {
let build = cxx_build::bridge("src/lib.rs");
build.compile("skim");
}

View File

@ -120,7 +120,7 @@ void RoleCache::collectEnabledRoles(EnabledRoles & enabled_roles, SubscriptionsO
SubscriptionsOnRoles new_subscriptions_on_roles;
new_subscriptions_on_roles.reserve(subscriptions_on_roles.size());
auto get_role_function = [this, &subscriptions_on_roles](const UUID & id) TSA_NO_THREAD_SAFETY_ANALYSIS { return getRole(id, subscriptions_on_roles); };
auto get_role_function = [this, &new_subscriptions_on_roles](const UUID & id) TSA_NO_THREAD_SAFETY_ANALYSIS { return getRole(id, new_subscriptions_on_roles); };
for (const auto & current_role : enabled_roles.params.current_roles)
collectRoles(*new_info, skip_ids, get_role_function, current_role, true, false);

View File

@ -68,7 +68,10 @@ public:
if (data().isEqualTo(to.data()))
counter += to.counter;
else if (!data().has() || counter < to.counter)
{
data().set(to.data(), arena);
counter = to.counter - counter;
}
else
counter -= to.counter;
}

View File

@ -59,13 +59,13 @@ constexpr size_t group_array_sorted_sort_strategy_max_elements_threshold = 10000
template <typename T, GroupArraySortedStrategy strategy>
struct GroupArraySortedData
{
static constexpr bool is_value_generic_field = std::is_same_v<T, Field>;
using Allocator = MixedAlignedArenaAllocator<alignof(T), 4096>;
using Array = PODArray<T, 32, Allocator>;
using Array = typename std::conditional_t<is_value_generic_field, std::vector<T>, PODArray<T, 32, Allocator>>;
static constexpr size_t partial_sort_max_elements_factor = 2;
static constexpr bool is_value_generic_field = std::is_same_v<T, Field>;
Array values;
static bool compare(const T & lhs, const T & rhs)
@ -144,7 +144,7 @@ struct GroupArraySortedData
}
if (values.size() > max_elements)
values.resize(max_elements, arena);
resize(max_elements, arena);
}
ALWAYS_INLINE void partialSortAndLimitIfNeeded(size_t max_elements, Arena * arena)
@ -153,7 +153,23 @@ struct GroupArraySortedData
return;
::nth_element(values.begin(), values.begin() + max_elements, values.end(), Comparator());
values.resize(max_elements, arena);
resize(max_elements, arena);
}
ALWAYS_INLINE void resize(size_t n, Arena * arena)
{
if constexpr (is_value_generic_field)
values.resize(n);
else
values.resize(n, arena);
}
ALWAYS_INLINE void push_back(T && element, Arena * arena)
{
if constexpr (is_value_generic_field)
values.push_back(element);
else
values.push_back(element, arena);
}
ALWAYS_INLINE void addElement(T && element, size_t max_elements, Arena * arena)
@ -171,12 +187,12 @@ struct GroupArraySortedData
return;
}
values.push_back(std::move(element), arena);
push_back(std::move(element), arena);
std::push_heap(values.begin(), values.end(), Comparator());
}
else
{
values.push_back(std::move(element), arena);
push_back(std::move(element), arena);
partialSortAndLimitIfNeeded(max_elements, arena);
}
}
@ -210,14 +226,6 @@ struct GroupArraySortedData
result_array_data[result_array_data_insert_begin + i] = values[i];
}
}
~GroupArraySortedData()
{
for (auto & value : values)
{
value.~T();
}
}
};
template <typename T>
@ -313,14 +321,12 @@ public:
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size, it should not exceed {}", max_elements);
auto & values = this->data(place).values;
values.resize_exact(size, arena);
if constexpr (std::is_same_v<T, Field>)
if constexpr (Data::is_value_generic_field)
{
values.resize(size);
for (Field & element : values)
{
/// We must initialize the Field type since some internal functions (like operator=) use them
new (&element) Field;
bool has_value = false;
readBinary(has_value, buf);
if (has_value)
@ -329,6 +335,7 @@ public:
}
else
{
values.resize_exact(size, arena);
if constexpr (std::endian::native == std::endian::little)
{
buf.readStrict(reinterpret_cast<char *>(values.data()), size * sizeof(values[0]));

View File

@ -11,6 +11,7 @@
#include <IO/Operators.h>
#include <DataTypes/FieldToDataType.h>
#include <DataTypes/DataTypeDateTime64.h>
#include <Parsers/ASTLiteral.h>
#include <Parsers/ASTFunction.h>
@ -162,6 +163,7 @@ QueryTreeNodePtr ConstantNode::cloneImpl() const
ASTPtr ConstantNode::toASTImpl(const ConvertToASTOptions & options) const
{
const auto & constant_value_literal = constant_value->getValue();
const auto & constant_value_type = constant_value->getType();
auto constant_value_ast = std::make_shared<ASTLiteral>(constant_value_literal);
if (!options.add_cast_for_constants)
@ -169,7 +171,26 @@ ASTPtr ConstantNode::toASTImpl(const ConvertToASTOptions & options) const
if (requiresCastCall())
{
auto constant_type_name_ast = std::make_shared<ASTLiteral>(constant_value->getType()->getName());
/** Value for DateTime64 is Decimal64, which is serialized as a string literal.
* If we serialize it as is, DateTime64 would be parsed from that string literal, which can be incorrect.
* For example, DateTime64 cannot be parsed from the short value, like '1', while it's a valid Decimal64 value.
* It could also lead to ambiguous parsing because we don't know if the string literal represents a date or a Decimal64 literal.
* For this reason, we use a string literal representing a date instead of a Decimal64 literal.
*/
const auto & constant_value_end_type = removeNullable(constant_value_type); /// if Nullable
if (WhichDataType(constant_value_end_type->getTypeId()).isDateTime64())
{
const auto * date_time_type = typeid_cast<const DataTypeDateTime64 *>(constant_value_end_type.get());
DecimalField<Decimal64> decimal_value;
if (constant_value_literal.tryGet<DecimalField<Decimal64>>(decimal_value))
{
WriteBufferFromOwnString ostr;
writeDateTimeText(decimal_value.getValue(), date_time_type->getScale(), ostr, date_time_type->getTimeZone());
constant_value_ast = std::make_shared<ASTLiteral>(ostr.str());
}
}
auto constant_type_name_ast = std::make_shared<ASTLiteral>(constant_value_type->getName());
return makeASTFunction("_CAST", std::move(constant_value_ast), std::move(constant_type_name_ast));
}

View File

@ -48,9 +48,15 @@ ASTPtr JoinNode::toASTTableJoin() const
auto join_expression_ast = children[join_expression_child_index]->toAST();
if (children[join_expression_child_index]->getNodeType() == QueryTreeNodeType::LIST)
join_ast->using_expression_list = std::move(join_expression_ast);
{
join_ast->using_expression_list = join_expression_ast;
join_ast->children.push_back(join_ast->using_expression_list);
}
else
join_ast->on_expression = std::move(join_expression_ast);
{
join_ast->on_expression = join_expression_ast;
join_ast->children.push_back(join_ast->on_expression);
}
}
return join_ast;

View File

@ -6724,9 +6724,10 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_
const auto * constant_node = sort_node.getFillTo()->as<ConstantNode>();
if (!constant_node || !isColumnedAsNumber(constant_node->getResultType()))
throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION,
throw Exception(
ErrorCodes::INVALID_WITH_FILL_EXPRESSION,
"Sort FILL TO expression must be constant with numeric type. Actual {}. In scope {}",
sort_node.getFillFrom()->formatASTForErrorMessage(),
sort_node.getFillTo()->formatASTForErrorMessage(),
scope.scope_node->formatASTForErrorMessage());
size_t fill_to_expression_projection_names_size = fill_to_expression_projection_names.size();

View File

@ -168,7 +168,7 @@ std::vector<ConnectionPoolWithFailover::TryResult> ConnectionPoolWithFailover::g
{ return tryGetEntry(pool, timeouts, fail_message, settings, &table_to_check, /*async_callback=*/ {}); };
return getManyImpl(settings, pool_mode, try_get_entry,
/*skip_unavailable_endpoints=*/ std::nullopt,
/*skip_unavailable_endpoints=*/ false, /// skip_unavailable_endpoints is used to get the min number of entries, and we need at least one
/*priority_func=*/ {},
settings.distributed_insert_skip_read_only_replicas);
}

View File

@ -42,6 +42,7 @@ public:
size_t max_error_cap = DBMS_CONNECTION_POOL_WITH_FAILOVER_MAX_ERROR_COUNT);
using Entry = IConnectionPool::Entry;
using PoolWithFailoverBase<IConnectionPool>::getValidTryResult;
/** Allocates connection to work. */
Entry get(const ConnectionTimeouts & timeouts) override;
@ -97,7 +98,7 @@ public:
std::vector<Base::ShuffledPool> getShuffledPools(const Settings & settings, GetPriorityFunc priority_func = {}, bool use_slowdown_count = false);
size_t getMaxErrorCup() const { return Base::max_error_cap; }
size_t getMaxErrorCap() const { return Base::max_error_cap; }
void updateSharedError(std::vector<ShuffledPool> & shuffled_pools)
{

View File

@ -195,6 +195,12 @@ void HedgedConnections::sendQuery(
modified_settings.parallel_replica_offset = fd_to_replica_location[replica.packet_receiver->getFileDescriptor()].offset;
}
/// FIXME: Remove once we will make `allow_experimental_analyzer` obsolete setting.
/// Make the analyzer being set, so it will be effectively applied on the remote server.
/// In other words, the initiator always controls whether the analyzer enabled or not for
/// all servers involved in the distributed query processing.
modified_settings.set("allow_experimental_analyzer", static_cast<bool>(modified_settings.allow_experimental_analyzer));
replica.connection->sendQuery(timeouts, query, /* query_parameters */ {}, query_id, stage, &modified_settings, &client_info, with_pending_data, {});
replica.change_replica_timeout.setRelative(timeouts.receive_data_timeout);
replica.packet_receiver->setTimeout(hedged_connections_factory.getConnectionTimeouts().receive_timeout);

View File

@ -329,7 +329,7 @@ HedgedConnectionsFactory::State HedgedConnectionsFactory::processFinishedConnect
LOG_INFO(log, "Connection failed at try №{}, reason: {}", (shuffled_pool.error_count + 1), fail_message);
ProfileEvents::increment(ProfileEvents::DistributedConnectionFailTry);
shuffled_pool.error_count = std::min(pool->getMaxErrorCup(), shuffled_pool.error_count + 1);
shuffled_pool.error_count = std::min(pool->getMaxErrorCap(), shuffled_pool.error_count + 1);
shuffled_pool.slowdown_count = 0;
if (shuffled_pool.error_count >= max_tries)

View File

@ -150,6 +150,12 @@ void MultiplexedConnections::sendQuery(
}
}
/// FIXME: Remove once we will make `allow_experimental_analyzer` obsolete setting.
/// Make the analyzer being set, so it will be effectively applied on the remote server.
/// In other words, the initiator always controls whether the analyzer enabled or not for
/// all servers involved in the distributed query processing.
modified_settings.set("allow_experimental_analyzer", static_cast<bool>(modified_settings.allow_experimental_analyzer));
const bool enable_sample_offset_parallel_processing = settings.max_parallel_replicas > 1 && settings.allow_experimental_parallel_reading_from_replicas == 0;
size_t num_replicas = replica_states.size();

View File

@ -319,7 +319,38 @@ ColumnPtr ColumnAggregateFunction::filter(const Filter & filter, ssize_t result_
void ColumnAggregateFunction::expand(const Filter & mask, bool inverted)
{
expandDataByMask<char *>(data, mask, inverted);
ensureOwnership();
Arena & arena = createOrGetArena();
if (mask.size() < data.size())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Mask size should be no less than data size.");
ssize_t from = data.size() - 1;
ssize_t index = mask.size() - 1;
data.resize(mask.size());
while (index >= 0)
{
if (!!mask[index] ^ inverted)
{
if (from < 0)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Too many bytes in mask");
/// Copy only if it makes sense.
if (index != from)
data[index] = data[from];
--from;
}
else
{
data[index] = arena.alignedAlloc(func->sizeOfData(), func->alignOfData());
func->create(data[index]);
}
--index;
}
if (from != -1)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Not enough bytes in mask");
}
ColumnPtr ColumnAggregateFunction::permute(const Permutation & perm, size_t limit) const

View File

@ -288,16 +288,28 @@ ColumnWithTypeAndName ColumnFunction::reduce() const
function->getName(), toString(args), toString(captured));
ColumnsWithTypeAndName columns = captured_columns;
IFunction::ShortCircuitSettings settings;
/// Arguments of lazy executed function can also be lazy executed.
/// But we shouldn't execute arguments if this function is short circuit,
/// because it will handle lazy executed arguments by itself.
if (is_short_circuit_argument && !function->isShortCircuit(settings, args))
if (is_short_circuit_argument)
{
for (auto & col : columns)
IFunction::ShortCircuitSettings settings;
/// We shouldn't execute all arguments if this function is short circuit,
/// because it will handle lazy executed arguments by itself.
/// Execute only arguments with disabled lazy execution.
if (function->isShortCircuit(settings, args))
{
if (const ColumnFunction * arg = checkAndGetShortCircuitArgument(col.column))
col = arg->reduce();
for (size_t i : settings.arguments_with_disabled_lazy_execution)
{
if (const ColumnFunction * arg = checkAndGetShortCircuitArgument(columns[i].column))
columns[i] = arg->reduce();
}
}
else
{
for (auto & col : columns)
{
if (const ColumnFunction * arg = checkAndGetShortCircuitArgument(col.column))
col = arg->reduce();
}
}
}

View File

@ -587,9 +587,11 @@ ColumnPtr ColumnTuple::compress() const
return ColumnCompressed::create(size(), byte_size,
[my_compressed = std::move(compressed)]() mutable
{
for (auto & column : my_compressed)
column = column->decompress();
return ColumnTuple::create(my_compressed);
Columns decompressed;
decompressed.reserve(my_compressed.size());
for (const auto & column : my_compressed)
decompressed.push_back(column->decompress());
return ColumnTuple::create(decompressed);
});
}

View File

@ -815,7 +815,7 @@ ColumnPtr ColumnVariant::permute(const Permutation & perm, size_t limit) const
if (hasOnlyNulls())
{
if (limit)
return cloneResized(limit);
return cloneResized(limit ? std::min(size(), limit) : size());
/// If no limit, we can just return current immutable column.
return this->getPtr();
@ -849,7 +849,7 @@ ColumnPtr ColumnVariant::index(const IColumn & indexes, size_t limit) const
{
/// If we have only NULLs, index will take no effect, just return resized column.
if (hasOnlyNulls())
return cloneResized(limit);
return cloneResized(limit == 0 ? indexes.size(): limit);
/// Optimization when we have only one non empty variant and no NULLs.
/// In this case local_discriminators column is filled with identical values and offsets column
@ -905,8 +905,16 @@ ColumnPtr ColumnVariant::indexImpl(const PaddedPODArray<Type> & indexes, size_t
new_variants.reserve(num_variants);
for (size_t i = 0; i != num_variants; ++i)
{
size_t nested_limit = nested_perms[i].size() == variants[i]->size() ? 0 : nested_perms[i].size();
new_variants.emplace_back(variants[i]->permute(nested_perms[i], nested_limit));
/// Check if no values from this variant were selected.
if (nested_perms[i].empty())
{
new_variants.emplace_back(variants[i]->cloneEmpty());
}
else
{
size_t nested_limit = nested_perms[i].size() == variants[i]->size() ? 0 : nested_perms[i].size();
new_variants.emplace_back(variants[i]->permute(nested_perms[i], nested_limit));
}
}
/// We cannot use new_offsets column as an offset column, because it became invalid after variants permutation.
@ -1257,9 +1265,11 @@ ColumnPtr ColumnVariant::compress() const
return ColumnCompressed::create(size(), byte_size,
[my_local_discriminators_compressed = std::move(local_discriminators_compressed), my_offsets_compressed = std::move(offsets_compressed), my_compressed = std::move(compressed), my_local_to_global_discriminators = this->local_to_global_discriminators]() mutable
{
for (auto & variant : my_compressed)
variant = variant->decompress();
return ColumnVariant::create(my_local_discriminators_compressed->decompress(), my_offsets_compressed->decompress(), my_compressed, my_local_to_global_discriminators);
Columns decompressed;
decompressed.reserve(my_compressed.size());
for (const auto & variant : my_compressed)
decompressed.push_back(variant->decompress());
return ColumnVariant::create(my_local_discriminators_compressed->decompress(), my_offsets_compressed->decompress(), decompressed, my_local_to_global_discriminators);
});
}

View File

@ -12,7 +12,9 @@
#include <base/getMemoryAmount.h>
#include <base/sleep.h>
#include <cstdint>
#include <filesystem>
#include <memory>
#include <optional>
#include "config.h"
@ -22,24 +24,170 @@
#define STRINGIFY(x) STRINGIFY_HELPER(x)
#endif
using namespace DB;
namespace DB
{
namespace ErrorCodes
{
extern const int CANNOT_CLOSE_FILE;
extern const int CANNOT_OPEN_FILE;
extern const int FILE_DOESNT_EXIST;
extern const int INCORRECT_DATA;
extern const int FILE_DOESNT_EXIST;
extern const int INCORRECT_DATA;
}
CgroupsMemoryUsageObserver::CgroupsMemoryUsageObserver(std::chrono::seconds wait_time_)
: log(getLogger("CgroupsMemoryUsageObserver"))
, wait_time(wait_time_)
, memory_usage_file(log)
}
namespace
{
LOG_INFO(log, "Initialized cgroups memory limit observer, wait time is {} sec", wait_time.count());
/// Format is
/// kernel 5
/// rss 15
/// [...]
uint64_t readMetricFromStatFile(ReadBufferFromFile & buf, const std::string & key)
{
while (!buf.eof())
{
std::string current_key;
readStringUntilWhitespace(current_key, buf);
if (current_key != key)
{
std::string dummy;
readStringUntilNewlineInto(dummy, buf);
buf.ignore();
continue;
}
assertChar(' ', buf);
uint64_t value = 0;
readIntText(value, buf);
return value;
}
throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot find '{}' in '{}'", key, buf.getFileName());
}
struct CgroupsV1Reader : ICgroupsReader
{
explicit CgroupsV1Reader(const std::filesystem::path & stat_file_dir) : buf(stat_file_dir / "memory.stat") { }
uint64_t readMemoryUsage() override
{
std::lock_guard lock(mutex);
buf.rewind();
return readMetricFromStatFile(buf, "rss");
}
private:
std::mutex mutex;
ReadBufferFromFile buf TSA_GUARDED_BY(mutex);
};
struct CgroupsV2Reader : ICgroupsReader
{
explicit CgroupsV2Reader(const std::filesystem::path & stat_file_dir)
: current_buf(stat_file_dir / "memory.current"), stat_buf(stat_file_dir / "memory.stat")
{
}
uint64_t readMemoryUsage() override
{
std::lock_guard lock(mutex);
current_buf.rewind();
stat_buf.rewind();
int64_t mem_usage = 0;
/// memory.current contains a single number
/// the reason why we subtract it described here: https://github.com/ClickHouse/ClickHouse/issues/64652#issuecomment-2149630667
readIntText(mem_usage, current_buf);
mem_usage -= readMetricFromStatFile(stat_buf, "inactive_file");
chassert(mem_usage >= 0, "Negative memory usage");
return mem_usage;
}
private:
std::mutex mutex;
ReadBufferFromFile current_buf TSA_GUARDED_BY(mutex);
ReadBufferFromFile stat_buf TSA_GUARDED_BY(mutex);
};
/// Caveats:
/// - All of the logic in this file assumes that the current process is the only process in the
/// containing cgroup (or more precisely: the only process with significant memory consumption).
/// If this is not the case, then other processe's memory consumption may affect the internal
/// memory tracker ...
/// - Cgroups v1 and v2 allow nested cgroup hierarchies. As v1 is deprecated for over half a
/// decade and will go away at some point, hierarchical detection is only implemented for v2.
/// - I did not test what happens if a host has v1 and v2 simultaneously enabled. I believe such
/// systems existed only for a short transition period.
std::optional<std::string> getCgroupsV2Path()
{
if (!cgroupsV2Enabled())
return {};
if (!cgroupsV2MemoryControllerEnabled())
return {};
std::filesystem::path current_cgroup = cgroupV2PathOfProcess();
if (current_cgroup.empty())
return {};
/// Return the bottom-most nested current memory file. If there is no such file at the current
/// level, try again at the parent level as memory settings are inherited.
while (current_cgroup != default_cgroups_mount.parent_path())
{
const auto current_path = current_cgroup / "memory.current";
const auto stat_path = current_cgroup / "memory.stat";
if (std::filesystem::exists(current_path) && std::filesystem::exists(stat_path))
return {current_cgroup};
current_cgroup = current_cgroup.parent_path();
}
return {};
}
std::optional<std::string> getCgroupsV1Path()
{
auto path = default_cgroups_mount / "memory/memory.stat";
if (!std::filesystem::exists(path))
return {};
return {default_cgroups_mount / "memory"};
}
std::pair<std::string, CgroupsMemoryUsageObserver::CgroupsVersion> getCgroupsPath()
{
auto v2_path = getCgroupsV2Path();
if (v2_path.has_value())
return {*v2_path, CgroupsMemoryUsageObserver::CgroupsVersion::V2};
auto v1_path = getCgroupsV1Path();
if (v1_path.has_value())
return {*v1_path, CgroupsMemoryUsageObserver::CgroupsVersion::V1};
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot find cgroups v1 or v2 current memory file");
}
}
namespace DB
{
CgroupsMemoryUsageObserver::CgroupsMemoryUsageObserver(std::chrono::seconds wait_time_)
: log(getLogger("CgroupsMemoryUsageObserver")), wait_time(wait_time_)
{
const auto [cgroup_path, version] = getCgroupsPath();
if (version == CgroupsVersion::V2)
cgroup_reader = std::make_unique<CgroupsV2Reader>(cgroup_path);
else
cgroup_reader = std::make_unique<CgroupsV1Reader>(cgroup_path);
LOG_INFO(
log,
"Will read the current memory usage from '{}' (cgroups version: {}), wait time is {} sec",
cgroup_path,
(version == CgroupsVersion::V1) ? "v1" : "v2",
wait_time.count());
}
CgroupsMemoryUsageObserver::~CgroupsMemoryUsageObserver()
@ -77,14 +225,15 @@ void CgroupsMemoryUsageObserver::setMemoryUsageLimits(uint64_t hard_limit_, uint
{
if (up)
{
LOG_WARNING(log, "Exceeded sort memory limit ({})", ReadableSize(soft_limit_));
LOG_WARNING(log, "Exceeded soft memory limit ({})", ReadableSize(soft_limit_));
#if USE_JEMALLOC
# if USE_JEMALLOC
LOG_INFO(log, "Purging jemalloc arenas");
mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", nullptr, nullptr, nullptr, 0);
#endif
# endif
/// Reset current usage in memory tracker. Expect zero for free_memory_in_allocator_arenas as we just purged them.
uint64_t memory_usage = memory_usage_file.readMemoryUsage();
uint64_t memory_usage = cgroup_reader->readMemoryUsage();
LOG_TRACE(log, "Read current memory usage {} bytes ({}) from cgroups", memory_usage, ReadableSize(memory_usage));
MemoryTracker::setRSS(memory_usage, 0);
LOG_INFO(log, "Purged jemalloc arenas. Current memory usage is {}", ReadableSize(memory_usage));
@ -104,153 +253,6 @@ void CgroupsMemoryUsageObserver::setOnMemoryAmountAvailableChangedFn(OnMemoryAmo
on_memory_amount_available_changed = on_memory_amount_available_changed_;
}
namespace
{
/// Caveats:
/// - All of the logic in this file assumes that the current process is the only process in the
/// containing cgroup (or more precisely: the only process with significant memory consumption).
/// If this is not the case, then other processe's memory consumption may affect the internal
/// memory tracker ...
/// - Cgroups v1 and v2 allow nested cgroup hierarchies. As v1 is deprecated for over half a
/// decade and will go away at some point, hierarchical detection is only implemented for v2.
/// - I did not test what happens if a host has v1 and v2 simultaneously enabled. I believe such
/// systems existed only for a short transition period.
std::optional<std::string> getCgroupsV2FileName()
{
if (!cgroupsV2Enabled())
return {};
if (!cgroupsV2MemoryControllerEnabled())
return {};
std::filesystem::path current_cgroup = cgroupV2PathOfProcess();
if (current_cgroup.empty())
return {};
/// Return the bottom-most nested current memory file. If there is no such file at the current
/// level, try again at the parent level as memory settings are inherited.
while (current_cgroup != default_cgroups_mount.parent_path())
{
auto path = current_cgroup / "memory.current";
if (std::filesystem::exists(path))
return {path};
current_cgroup = current_cgroup.parent_path();
}
return {};
}
std::optional<std::string> getCgroupsV1FileName()
{
auto path = default_cgroups_mount / "memory/memory.stat";
if (!std::filesystem::exists(path))
return {};
return {path};
}
std::pair<std::string, CgroupsMemoryUsageObserver::CgroupsVersion> getCgroupsFileName()
{
auto v2_file_name = getCgroupsV2FileName();
if (v2_file_name.has_value())
return {*v2_file_name, CgroupsMemoryUsageObserver::CgroupsVersion::V2};
auto v1_file_name = getCgroupsV1FileName();
if (v1_file_name.has_value())
return {*v1_file_name, CgroupsMemoryUsageObserver::CgroupsVersion::V1};
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot find cgroups v1 or v2 current memory file");
}
}
CgroupsMemoryUsageObserver::MemoryUsageFile::MemoryUsageFile(LoggerPtr log_)
: log(log_)
{
std::tie(file_name, version) = getCgroupsFileName();
LOG_INFO(log, "Will read the current memory usage from '{}' (cgroups version: {})", file_name, (version == CgroupsVersion::V1) ? "v1" : "v2");
fd = ::open(file_name.data(), O_RDONLY);
if (fd == -1)
ErrnoException::throwFromPath(
(errno == ENOENT) ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE,
file_name, "Cannot open file '{}'", file_name);
}
CgroupsMemoryUsageObserver::MemoryUsageFile::~MemoryUsageFile()
{
assert(fd != -1);
if (::close(fd) != 0)
{
try
{
ErrnoException::throwFromPath(
ErrorCodes::CANNOT_CLOSE_FILE,
file_name, "Cannot close file '{}'", file_name);
}
catch (const ErrnoException &)
{
tryLogCurrentException(log, __PRETTY_FUNCTION__);
}
}
}
uint64_t CgroupsMemoryUsageObserver::MemoryUsageFile::readMemoryUsage() const
{
/// File read is probably not read is thread-safe, just to be sure
std::lock_guard lock(mutex);
ReadBufferFromFileDescriptor buf(fd);
buf.rewind();
uint64_t mem_usage = 0;
switch (version)
{
case CgroupsVersion::V1:
{
/// Format is
/// kernel 5
/// rss 15
/// [...]
std::string key;
bool found_rss = false;
while (!buf.eof())
{
readStringUntilWhitespace(key, buf);
if (key != "rss")
{
std::string dummy;
readStringUntilNewlineInto(dummy, buf);
buf.ignore();
continue;
}
assertChar(' ', buf);
readIntText(mem_usage, buf);
found_rss = true;
break;
}
if (!found_rss)
throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot find 'rss' in '{}'", file_name);
break;
}
case CgroupsVersion::V2:
{
readIntText(mem_usage, buf);
break;
}
}
LOG_TRACE(log, "Read current memory usage {} from cgroups", ReadableSize(mem_usage));
return mem_usage;
}
void CgroupsMemoryUsageObserver::startThread()
{
if (!thread.joinable())
@ -302,7 +304,8 @@ void CgroupsMemoryUsageObserver::runThread()
std::lock_guard<std::mutex> limit_lock(limit_mutex);
if (soft_limit > 0 && hard_limit > 0)
{
uint64_t memory_usage = memory_usage_file.readMemoryUsage();
uint64_t memory_usage = cgroup_reader->readMemoryUsage();
LOG_TRACE(log, "Read current memory usage {} bytes ({}) from cgroups", memory_usage, ReadableSize(memory_usage));
if (memory_usage > hard_limit)
{
if (last_memory_usage <= hard_limit)

View File

@ -3,11 +3,19 @@
#include <Common/ThreadPool.h>
#include <chrono>
#include <memory>
#include <mutex>
namespace DB
{
struct ICgroupsReader
{
virtual ~ICgroupsReader() = default;
virtual uint64_t readMemoryUsage() = 0;
};
/// Does two things:
/// 1. Periodically reads the memory usage of the process from Linux cgroups.
/// You can specify soft or hard memory limits:
@ -61,27 +69,12 @@ private:
uint64_t last_memory_usage = 0; /// how much memory does the process use
uint64_t last_available_memory_amount; /// how much memory can the process use
/// Represents the cgroup virtual file that shows the memory consumption of the process's cgroup.
struct MemoryUsageFile
{
public:
explicit MemoryUsageFile(LoggerPtr log_);
~MemoryUsageFile();
uint64_t readMemoryUsage() const;
private:
LoggerPtr log;
mutable std::mutex mutex;
int fd TSA_GUARDED_BY(mutex) = -1;
CgroupsVersion version;
std::string file_name;
};
MemoryUsageFile memory_usage_file;
void stopThread();
void runThread();
std::unique_ptr<ICgroupsReader> cgroup_reader;
std::mutex thread_mutex;
std::condition_variable cond;
ThreadFromGlobalPool thread;

View File

@ -626,9 +626,11 @@ ConfigProcessor::Files ConfigProcessor::getConfigMergeFiles(const std::string &
XMLDocumentPtr ConfigProcessor::processConfig(
bool * has_zk_includes,
zkutil::ZooKeeperNodeCache * zk_node_cache,
const zkutil::EventPtr & zk_changed_event)
const zkutil::EventPtr & zk_changed_event,
bool is_config_changed)
{
LOG_DEBUG(log, "Processing configuration file '{}'.", path);
if (is_config_changed)
LOG_DEBUG(log, "Processing configuration file '{}'.", path);
XMLDocumentPtr config;
@ -657,7 +659,8 @@ XMLDocumentPtr ConfigProcessor::processConfig(
/// When we can use a config embedded in the binary.
if (auto it = embedded_configs.find(path); it != embedded_configs.end())
{
LOG_DEBUG(log, "There is no file '{}', will use embedded config.", path);
if (is_config_changed)
LOG_DEBUG(log, "There is no file '{}', will use embedded config.", path);
config = dom_parser.parseMemory(it->second.data(), it->second.size());
}
else
@ -671,7 +674,8 @@ XMLDocumentPtr ConfigProcessor::processConfig(
{
try
{
LOG_DEBUG(log, "Merging configuration file '{}'.", merge_file);
if (is_config_changed)
LOG_DEBUG(log, "Merging configuration file '{}'.", merge_file);
XMLDocumentPtr with;
@ -774,10 +778,10 @@ XMLDocumentPtr ConfigProcessor::processConfig(
return config;
}
ConfigProcessor::LoadedConfig ConfigProcessor::loadConfig(bool allow_zk_includes)
ConfigProcessor::LoadedConfig ConfigProcessor::loadConfig(bool allow_zk_includes, bool is_config_changed)
{
bool has_zk_includes;
XMLDocumentPtr config_xml = processConfig(&has_zk_includes);
XMLDocumentPtr config_xml = processConfig(&has_zk_includes, nullptr, nullptr, is_config_changed);
if (has_zk_includes && !allow_zk_includes)
throw Poco::Exception("Error while loading config '" + path + "': from_zk includes are not allowed!");
@ -790,14 +794,15 @@ ConfigProcessor::LoadedConfig ConfigProcessor::loadConfig(bool allow_zk_includes
ConfigProcessor::LoadedConfig ConfigProcessor::loadConfigWithZooKeeperIncludes(
zkutil::ZooKeeperNodeCache & zk_node_cache,
const zkutil::EventPtr & zk_changed_event,
bool fallback_to_preprocessed)
bool fallback_to_preprocessed,
bool is_config_changed)
{
XMLDocumentPtr config_xml;
bool has_zk_includes;
bool processed_successfully = false;
try
{
config_xml = processConfig(&has_zk_includes, &zk_node_cache, zk_changed_event);
config_xml = processConfig(&has_zk_includes, &zk_node_cache, zk_changed_event, is_config_changed);
processed_successfully = true;
}
catch (const Poco::Exception & ex)

View File

@ -63,7 +63,8 @@ public:
XMLDocumentPtr processConfig(
bool * has_zk_includes = nullptr,
zkutil::ZooKeeperNodeCache * zk_node_cache = nullptr,
const zkutil::EventPtr & zk_changed_event = nullptr);
const zkutil::EventPtr & zk_changed_event = nullptr,
bool is_config_changed = true);
/// These configurations will be used if there is no configuration file.
static void registerEmbeddedConfig(std::string name, std::string_view content);
@ -86,14 +87,15 @@ public:
/// If allow_zk_includes is true, expect that the configuration XML can contain from_zk nodes.
/// If it is the case, set has_zk_includes to true and don't write config-preprocessed.xml,
/// expecting that config would be reloaded with zookeeper later.
LoadedConfig loadConfig(bool allow_zk_includes = false);
LoadedConfig loadConfig(bool allow_zk_includes = false, bool is_config_changed = true);
/// If fallback_to_preprocessed is true, then if KeeperException is thrown during config
/// processing, load the configuration from the preprocessed file.
LoadedConfig loadConfigWithZooKeeperIncludes(
zkutil::ZooKeeperNodeCache & zk_node_cache,
const zkutil::EventPtr & zk_changed_event,
bool fallback_to_preprocessed = false);
bool fallback_to_preprocessed = false,
bool is_config_changed = true);
/// Save preprocessed config to specified directory.
/// If preprocessed_dir is empty - calculate from loaded_config.path + /preprocessed_configs/

View File

@ -97,7 +97,8 @@ void ConfigReloader::reloadIfNewer(bool force, bool throw_on_error, bool fallbac
std::lock_guard lock(reload_mutex);
FilesChangesTracker new_files = getNewFileList();
if (force || need_reload_from_zk || new_files.isDifferOrNewerThan(files))
const bool is_config_changed = new_files.isDifferOrNewerThan(files);
if (force || need_reload_from_zk || is_config_changed)
{
ConfigProcessor config_processor(config_path);
ConfigProcessor::LoadedConfig loaded_config;
@ -106,10 +107,10 @@ void ConfigReloader::reloadIfNewer(bool force, bool throw_on_error, bool fallbac
try
{
loaded_config = config_processor.loadConfig(/* allow_zk_includes = */ true);
loaded_config = config_processor.loadConfig(/* allow_zk_includes = */ true, is_config_changed);
if (loaded_config.has_zk_includes)
loaded_config = config_processor.loadConfigWithZooKeeperIncludes(
zk_node_cache, zk_changed_event, fallback_to_preprocessed);
zk_node_cache, zk_changed_event, fallback_to_preprocessed, is_config_changed);
}
catch (const Coordination::Exception & e)
{

View File

@ -177,6 +177,9 @@
M(MergeTreeOutdatedPartsLoaderThreads, "Number of threads in the threadpool for loading Outdated data parts.") \
M(MergeTreeOutdatedPartsLoaderThreadsActive, "Number of active threads in the threadpool for loading Outdated data parts.") \
M(MergeTreeOutdatedPartsLoaderThreadsScheduled, "Number of queued or active jobs in the threadpool for loading Outdated data parts.") \
M(MergeTreeUnexpectedPartsLoaderThreads, "Number of threads in the threadpool for loading Unexpected data parts.") \
M(MergeTreeUnexpectedPartsLoaderThreadsActive, "Number of active threads in the threadpool for loading Unexpected data parts.") \
M(MergeTreeUnexpectedPartsLoaderThreadsScheduled, "Number of queued or active jobs in the threadpool for loading Unexpected data parts.") \
M(MergeTreePartsCleanerThreads, "Number of threads in the MergeTree parts cleaner thread pool.") \
M(MergeTreePartsCleanerThreadsActive, "Number of threads in the MergeTree parts cleaner thread pool running a task.") \
M(MergeTreePartsCleanerThreadsScheduled, "Number of queued or active jobs in the MergeTree parts cleaner thread pool.") \

View File

@ -54,6 +54,7 @@ static struct InitFiu
PAUSEABLE_ONCE(finish_set_quorum_failed_parts) \
PAUSEABLE_ONCE(finish_clean_quorum_failed_parts) \
PAUSEABLE(dummy_pausable_failpoint) \
PAUSEABLE(stop_moving_part_before_swap_with_active) \
ONCE(execute_query_calling_empty_set_result_func_on_exception)
namespace FailPoints

View File

@ -67,19 +67,6 @@ struct HashTableNoState
};
/// These functions can be overloaded for custom types.
namespace ZeroTraits
{
template <typename T>
bool check(const T x) { return x == T{}; }
template <typename T>
void set(T & x) { x = T{}; }
}
/** Numbers are compared bitwise.
* Complex types are compared by operator== as usual (this is important if there are gaps).
*
@ -87,18 +74,32 @@ void set(T & x) { x = T{}; }
* Otherwise the invariants in hash table probing do not met when NaNs are present.
*/
template <typename T>
inline bool bitEquals(T && a, T && b)
inline bool bitEquals(T a, T b)
{
using RealT = std::decay_t<T>;
if constexpr (std::is_floating_point_v<RealT>)
/// Note that memcmp with constant size is compiler builtin.
return 0 == memcmp(&a, &b, sizeof(RealT)); /// NOLINT
if constexpr (std::is_floating_point_v<T>)
/// Note that memcmp with constant size is a compiler builtin.
return 0 == memcmp(&a, &b, sizeof(T)); /// NOLINT
else
return a == b;
}
/// These functions can be overloaded for custom types.
namespace ZeroTraits
{
template <typename T>
bool check(const T x)
{
return bitEquals(x, T{});
}
template <typename T>
void set(T & x) { x = T{}; }
}
/**
* getKey/Mapped -- methods to get key/"mapped" values from the LookupResult returned by find() and
* emplace() methods of HashTable. Must not be called for a null LookupResult.

View File

@ -3,10 +3,14 @@
#include "config.h"
#if USE_RAPIDJSON
# include <base/types.h>
# include <base/defines.h>
# include <rapidjson/document.h>
# include "ElementTypes.h"
/// Prevent stack overflow:
#define RAPIDJSON_PARSE_DEFAULT_FLAGS (kParseIterativeFlag)
#include <base/types.h>
#include <base/defines.h>
#include <rapidjson/document.h>
#include "ElementTypes.h"
namespace DB
{

View File

@ -117,6 +117,26 @@ public:
const TryGetEntryFunc & try_get_entry,
const GetPriorityFunc & get_priority);
// Returns if the TryResult provided is an invalid one that cannot be used. Used to prevent logical errors.
bool isTryResultInvalid(const TryResult & result, bool skip_read_only_replicas) const
{
return result.entry.isNull() || !result.is_usable || (skip_read_only_replicas && result.is_readonly);
}
TryResult getValidTryResult(const std::vector<TryResult> & results, bool skip_read_only_replicas) const
{
if (results.empty())
throw DB::Exception(DB::ErrorCodes::ALL_CONNECTION_TRIES_FAILED, "Cannot get any valid connection because all connection tries failed");
auto result = results.front();
if (isTryResultInvalid(result, skip_read_only_replicas))
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR,
"Got an invalid connection result: entry.isNull {}, is_usable {}, is_up_to_date {}, delay {}, is_readonly {}, skip_read_only_replicas {}",
result.entry.isNull(), result.is_usable, result.is_up_to_date, result.delay, result.is_readonly, skip_read_only_replicas);
return result;
}
size_t getPoolSize() const { return nested_pools.size(); }
protected:
@ -303,7 +323,7 @@ PoolWithFailoverBase<TNestedPool>::getMany(
throw DB::NetException(DB::ErrorCodes::ALL_CONNECTION_TRIES_FAILED,
"All connection tries failed. Log: \n\n{}\n", fail_messages);
std::erase_if(try_results, [&](const TryResult & r) { return r.entry.isNull() || !r.is_usable || (skip_read_only_replicas && r.is_readonly); });
std::erase_if(try_results, [&](const TryResult & r) { return isTryResultInvalid(r, skip_read_only_replicas); });
/// Sort so that preferred items are near the beginning.
std::stable_sort(
@ -324,6 +344,9 @@ PoolWithFailoverBase<TNestedPool>::getMany(
}
else if (up_to_date_count >= min_entries)
{
if (try_results.size() < up_to_date_count)
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Could not find enough connections for up-to-date results. Got: {}, needed: {}", try_results.size(), up_to_date_count);
/// There is enough up-to-date entries.
try_results.resize(up_to_date_count);
}

View File

@ -1417,7 +1417,7 @@ size_t getFailedOpIndex(Coordination::Error exception_code, const Coordination::
KeeperMultiException::KeeperMultiException(Coordination::Error exception_code, size_t failed_op_index_, const Coordination::Requests & requests_, const Coordination::Responses & responses_)
: KeeperException(exception_code, "Transaction failed: Op #{}, path", failed_op_index_),
: KeeperException(exception_code, "Transaction failed ({}): Op #{}, path", exception_code, failed_op_index_),
requests(requests_), responses(responses_), failed_op_index(failed_op_index_)
{
addMessage(getPathForFirstFailedOp());

View File

@ -1,4 +1,5 @@
#pragma once
/// Get number of CPU cores without hyper-threading.
/// The calculation respects possible cgroups limits.
unsigned getNumberOfPhysicalCPUCores();

View File

@ -2062,6 +2062,7 @@ struct KeeperStorageMultiRequestProcessor final : public KeeperStorageRequestPro
response.responses[i]->error = failed_multi->error_codes[i];
}
response.error = failed_multi->global_error;
storage.uncommitted_state.commit(zxid);
return response_ptr;
}
@ -2395,7 +2396,19 @@ void KeeperStorage::preprocessRequest(
if (check_acl && !request_processor->checkAuth(*this, session_id, false))
{
uncommitted_state.deltas.emplace_back(new_last_zxid, Coordination::Error::ZNOAUTH);
/// Multi requests handle failures using FailedMultiDelta
if (zk_request->getOpNum() == Coordination::OpNum::Multi || zk_request->getOpNum() == Coordination::OpNum::MultiRead)
{
const auto & multi_request = dynamic_cast<const Coordination::ZooKeeperMultiRequest &>(*zk_request);
std::vector<Coordination::Error> response_errors;
response_errors.resize(multi_request.requests.size(), Coordination::Error::ZOK);
uncommitted_state.deltas.emplace_back(
new_last_zxid, KeeperStorage::FailedMultiDelta{std::move(response_errors), Coordination::Error::ZNOAUTH});
}
else
{
uncommitted_state.deltas.emplace_back(new_last_zxid, Coordination::Error::ZNOAUTH);
}
return;
}

View File

@ -301,6 +301,7 @@ public:
struct FailedMultiDelta
{
std::vector<Coordination::Error> error_codes;
Coordination::Error global_error{Coordination::Error::ZOK};
};
// Denotes end of a subrequest in multi request

View File

@ -2199,6 +2199,58 @@ TEST_P(CoordinationTest, TestPreprocessWhenCloseSessionIsPrecommitted)
}
}
TEST_P(CoordinationTest, TestMultiRequestWithNoAuth)
{
using namespace Coordination;
using namespace DB;
ChangelogDirTest snapshots("./snapshots");
this->setSnapshotDirectory("./snapshots");
ResponsesQueue queue(std::numeric_limits<size_t>::max());
SnapshotsQueue snapshots_queue{1};
int64_t session_without_auth = 1;
int64_t session_with_auth = 2;
size_t term = 0;
auto state_machine = std::make_shared<KeeperStateMachine>(queue, snapshots_queue, keeper_context, nullptr);
state_machine->init();
auto & storage = state_machine->getStorageUnsafe();
auto auth_req = std::make_shared<ZooKeeperAuthRequest>();
auth_req->scheme = "digest";
auth_req->data = "test_user:test_password";
// Add auth data to the session
auto auth_entry = getLogEntryFromZKRequest(term, session_with_auth, state_machine->getNextZxid(), auth_req);
state_machine->pre_commit(1, auth_entry->get_buf());
state_machine->commit(1, auth_entry->get_buf());
std::string node_with_acl = "/node_with_acl";
{
auto create_req = std::make_shared<ZooKeeperCreateRequest>();
create_req->path = node_with_acl;
create_req->data = "notmodified";
create_req->acls = {{.permissions = ACL::Read, .scheme = "auth", .id = ""}};
auto create_entry = getLogEntryFromZKRequest(term, session_with_auth, state_machine->getNextZxid(), create_req);
state_machine->pre_commit(3, create_entry->get_buf());
state_machine->commit(3, create_entry->get_buf());
ASSERT_TRUE(storage.container.contains(node_with_acl));
}
Requests ops;
ops.push_back(zkutil::makeSetRequest(node_with_acl, "modified", -1));
ops.push_back(zkutil::makeCheckRequest("/nonexistentnode", -1));
auto multi_req = std::make_shared<ZooKeeperMultiRequest>(ops, ACLs{});
auto multi_entry = getLogEntryFromZKRequest(term, session_without_auth, state_machine->getNextZxid(), multi_req);
state_machine->pre_commit(4, multi_entry->get_buf());
state_machine->commit(4, multi_entry->get_buf());
auto node_it = storage.container.find(node_with_acl);
ASSERT_FALSE(node_it == storage.container.end());
ASSERT_TRUE(node_it->value.getData() == "notmodified");
}
TEST_P(CoordinationTest, TestSetACLWithAuthSchemeForAclWhenAuthIsPrecommitted)
{
using namespace Coordination;

View File

@ -40,7 +40,7 @@ static constexpr auto SHOW_CHARS_ON_SYNTAX_ERROR = ptrdiff_t(160);
/// each period reduces the error counter by 2 times
/// too short a period can cause errors to disappear immediately after creation.
static constexpr auto DBMS_CONNECTION_POOL_WITH_FAILOVER_DEFAULT_DECREASE_ERROR_PERIOD = 60;
/// replica error max cap, this is to prevent replica from accumulating too many errors and taking to long to recover.
/// replica error max cap, this is to prevent replica from accumulating too many errors and taking too long to recover.
static constexpr auto DBMS_CONNECTION_POOL_WITH_FAILOVER_MAX_ERROR_COUNT = 1000;
/// The boundary on which the blocks for asynchronous file operations should be aligned.

View File

@ -893,11 +893,13 @@ NearestFieldType<std::decay_t<T>> & Field::get()
template <typename T>
auto & Field::safeGet()
{
const Types::Which requested = TypeToEnum<NearestFieldType<std::decay_t<T>>>::value;
const Types::Which target = TypeToEnum<NearestFieldType<std::decay_t<T>>>::value;
if (which != requested)
/// We allow converting int64 <-> uint64, int64 <-> bool, uint64 <-> bool in safeGet().
if (target != which
&& (!isInt64OrUInt64orBoolFieldType(target) || !isInt64OrUInt64orBoolFieldType(which)))
throw Exception(ErrorCodes::BAD_GET,
"Bad get: has {}, requested {}", getTypeName(), requested);
"Bad get: has {}, requested {}", getTypeName(), target);
return get<T>();
}

View File

@ -3,6 +3,7 @@
#if USE_LIBPQXX
#include <Columns/ColumnNullable.h>
#include <Columns/ColumnString.h>
#include <Columns/ColumnFixedString.h>
#include <Columns/ColumnArray.h>
#include <Columns/ColumnsNumber.h>
#include <Columns/ColumnDecimal.h>
@ -82,6 +83,8 @@ void insertPostgreSQLValue(
case ExternalResultDescription::ValueType::vtEnum8:
case ExternalResultDescription::ValueType::vtEnum16:
case ExternalResultDescription::ValueType::vtFixedString:
assert_cast<ColumnFixedString &>(column).insertData(value.data(), value.size());
break;
case ExternalResultDescription::ValueType::vtString:
assert_cast<ColumnString &>(column).insertData(value.data(), value.size());
break;

View File

@ -25,6 +25,7 @@ namespace DB
M(UInt64, io_thread_pool_queue_size, 10000, "Queue size for IO thread pool.", 0) \
M(UInt64, max_active_parts_loading_thread_pool_size, 64, "The number of threads to load active set of data parts (Active ones) at startup.", 0) \
M(UInt64, max_outdated_parts_loading_thread_pool_size, 32, "The number of threads to load inactive set of data parts (Outdated ones) at startup.", 0) \
M(UInt64, max_unexpected_parts_loading_thread_pool_size, 8, "The number of threads to load inactive set of data parts (Unexpected ones) at startup.", 0) \
M(UInt64, max_parts_cleaning_thread_pool_size, 128, "The number of threads for concurrent removal of inactive data parts.", 0) \
M(UInt64, max_mutations_bandwidth_for_server, 0, "The maximum read speed of all mutations on server in bytes per second. Zero means unlimited.", 0) \
M(UInt64, max_merges_bandwidth_for_server, 0, "The maximum read speed of all merges on server in bytes per second. Zero means unlimited.", 0) \

View File

@ -381,7 +381,7 @@ class IColumn;
M(Float, opentelemetry_start_trace_probability, 0., "Probability to start an OpenTelemetry trace for an incoming query.", 0) \
M(Bool, opentelemetry_trace_processors, false, "Collect OpenTelemetry spans for processors.", 0) \
M(Bool, prefer_column_name_to_alias, false, "Prefer using column names instead of aliases if possible.", 0) \
M(Bool, allow_experimental_analyzer, true, "Allow experimental analyzer.", 0) \
M(Bool, allow_experimental_analyzer, true, "Allow experimental analyzer.", IMPORTANT) \
M(Bool, analyzer_compatibility_join_using_top_level_identifier, false, "Force to resolve identifier in JOIN USING from projection (for example, in `SELECT a + 1 AS b FROM t1 JOIN t2 USING (b)` join will be performed by `t1.a + 1 = t2.b`, rather then `t1.b = t2.b`).", 0) \
M(Bool, prefer_global_in_and_join, false, "If enabled, all IN/JOIN operators will be rewritten as GLOBAL IN/JOIN. It's useful when the to-be-joined tables are only available on the initiator and we need to always scatter their data on-the-fly during distributed processing with the GLOBAL keyword. It's also useful to reduce the need to access the external sources joining external tables.", 0) \
M(Bool, enable_vertical_final, false, "Not recommended. If enable, remove duplicated rows during FINAL by marking rows as deleted and filtering them later instead of merging rows", 0) \

View File

@ -35,6 +35,11 @@
namespace DB
{
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
}
/** Cursor allows to compare rows in different blocks (and parts).
* Cursor moves inside single block.
* It is used in priority queue.
@ -83,21 +88,27 @@ struct SortCursorImpl
SortCursorImpl(
const Block & header,
const Columns & columns,
size_t num_rows,
const SortDescription & desc_,
size_t order_ = 0,
IColumn::Permutation * perm = nullptr)
: desc(desc_), sort_columns_size(desc.size()), order(order_), need_collation(desc.size())
{
reset(columns, header, perm);
reset(columns, header, num_rows, perm);
}
bool empty() const { return rows == 0; }
/// Set the cursor to the beginning of the new block.
void reset(const Block & block, IColumn::Permutation * perm = nullptr) { reset(block.getColumns(), block, perm); }
void reset(const Block & block, IColumn::Permutation * perm = nullptr)
{
if (block.getColumns().empty())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Empty column list in block");
reset(block.getColumns(), block, block.getColumns()[0]->size(), perm);
}
/// Set the cursor to the beginning of the new block.
void reset(const Columns & columns, const Block & block, IColumn::Permutation * perm = nullptr)
void reset(const Columns & columns, const Block & block, UInt64 num_rows, IColumn::Permutation * perm = nullptr)
{
all_columns.clear();
sort_columns.clear();
@ -125,7 +136,7 @@ struct SortCursorImpl
}
pos = 0;
rows = all_columns[0]->size();
rows = num_rows;
permutation = perm;
}

View File

@ -9,6 +9,7 @@ namespace DB
namespace ErrorCodes
{
extern const int NOT_IMPLEMENTED;
extern const int LOGICAL_ERROR;
}
void SerializationVariantElement::enumerateStreams(
@ -148,27 +149,21 @@ void SerializationVariantElement::deserializeBinaryBulkWithMultipleStreams(
assert_cast<ColumnLowCardinality &>(*variant_element_state->variant->assumeMutable()).nestedRemoveNullable();
}
/// If nothing to deserialize, just insert defaults.
if (variant_limit == 0)
{
mutable_column->insertManyDefaults(limit);
return;
}
addVariantToPath(settings.path);
nested_serialization->deserializeBinaryBulkWithMultipleStreams(variant_element_state->variant, variant_limit, settings, variant_element_state->variant_element_state, cache);
removeVariantFromPath(settings.path);
/// If nothing was deserialized when variant_limit > 0
/// it means that we don't have a stream for such sub-column.
/// It may happen during ALTER MODIFY column with Variant extension.
/// In this case we should just insert default values.
if (variant_element_state->variant->empty())
/// If there was nothing to deserialize or nothing was actually deserialized when variant_limit > 0, just insert defaults.
/// The second case means that we don't have a stream for such sub-column. It may happen during ALTER MODIFY column with Variant extension.
if (variant_limit == 0 || variant_element_state->variant->empty())
{
mutable_column->insertManyDefaults(limit);
return;
}
if (variant_element_state->variant->size() < variant_limit)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of deserialized variant column less than the limit: {} < {}", variant_element_state->variant->size(), variant_limit);
size_t variant_offset = variant_element_state->variant->size() - variant_limit;
/// If we have only our discriminator in range, insert the whole range to result column.

View File

@ -15,6 +15,7 @@
#include <Interpreters/Context.h>
#include <Interpreters/InterpreterCreateQuery.h>
#include <Interpreters/FunctionNameNormalizer.h>
#include <Interpreters/NormalizeSelectWithUnionQueryVisitor.h>
#include <Parsers/ASTCreateQuery.h>
#include <Parsers/ASTSetQuery.h>
#include <Parsers/ParserCreateQuery.h>
@ -160,7 +161,7 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables
size_t prev_tables_count = metadata.parsed_tables.size();
size_t prev_total_dictionaries = metadata.total_dictionaries;
auto process_metadata = [&metadata, is_startup, this](const String & file_name)
auto process_metadata = [&metadata, is_startup, local_context, this](const String & file_name)
{
fs::path path(getMetadataPath());
fs::path file_path(file_name);
@ -206,6 +207,8 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables
convertMergeTreeToReplicatedIfNeeded(ast, qualified_name, file_name);
NormalizeSelectWithUnionQueryVisitor::Data data{local_context->getSettingsRef().union_default_mode};
NormalizeSelectWithUnionQueryVisitor{data}.visit(ast);
std::lock_guard lock{metadata.mutex};
metadata.parsed_tables[qualified_name] = ParsedTableMetadata{full_path.string(), ast};
metadata.total_dictionaries += create_query->is_dictionary;

View File

@ -35,9 +35,10 @@ class RegionsNames
M(et, ru, 11) \
M(pt, en, 12) \
M(he, en, 13) \
M(vi, en, 14)
M(vi, en, 14) \
M(es, en, 15)
static constexpr size_t total_languages = 15;
static constexpr size_t total_languages = 16;
public:
enum class Language : size_t

View File

@ -3,6 +3,7 @@
#if USE_AZURE_BLOB_STORAGE
#include <Disks/IO/ReadBufferFromAzureBlobStorage.h>
#include <IO/AzureBlobStorage/isRetryableAzureException.h>
#include <IO/ReadBufferFromString.h>
#include <Common/logger_useful.h>
#include <Common/Throttler.h>
@ -101,18 +102,6 @@ bool ReadBufferFromAzureBlobStorage::nextImpl()
size_t sleep_time_with_backoff_milliseconds = 100;
auto handle_exception = [&, this](const auto & e, size_t i)
{
LOG_DEBUG(log, "Exception caught during Azure Read for file {} at attempt {}/{}: {}", path, i + 1, max_single_read_retries, e.Message);
if (i + 1 == max_single_read_retries)
throw;
sleepForMilliseconds(sleep_time_with_backoff_milliseconds);
sleep_time_with_backoff_milliseconds *= 2;
initialized = false;
initialize();
};
for (size_t i = 0; i < max_single_read_retries; ++i)
{
try
@ -124,7 +113,14 @@ bool ReadBufferFromAzureBlobStorage::nextImpl()
}
catch (const Azure::Core::RequestFailedException & e)
{
handle_exception(e, i);
LOG_DEBUG(log, "Exception caught during Azure Read for file {} at attempt {}/{}: {}", path, i + 1, max_single_read_retries, e.Message);
if (i + 1 == max_single_read_retries || !isRetryableAzureException(e))
throw;
sleepForMilliseconds(sleep_time_with_backoff_milliseconds);
sleep_time_with_backoff_milliseconds *= 2;
initialized = false;
initialize();
}
}
@ -213,16 +209,6 @@ void ReadBufferFromAzureBlobStorage::initialize()
size_t sleep_time_with_backoff_milliseconds = 100;
auto handle_exception = [&, this](const auto & e, size_t i)
{
LOG_DEBUG(log, "Exception caught during Azure Download for file {} at offset {} at attempt {}/{}: {}", path, offset, i + 1, max_single_download_retries, e.Message);
if (i + 1 == max_single_download_retries)
throw;
sleepForMilliseconds(sleep_time_with_backoff_milliseconds);
sleep_time_with_backoff_milliseconds *= 2;
};
for (size_t i = 0; i < max_single_download_retries; ++i)
{
try
@ -233,7 +219,12 @@ void ReadBufferFromAzureBlobStorage::initialize()
}
catch (const Azure::Core::RequestFailedException & e)
{
handle_exception(e,i);
LOG_DEBUG(log, "Exception caught during Azure Download for file {} at offset {} at attempt {}/{}: {}", path, offset, i + 1, max_single_download_retries, e.Message);
if (i + 1 == max_single_download_retries || !isRetryableAzureException(e))
throw;
sleepForMilliseconds(sleep_time_with_backoff_milliseconds);
sleep_time_with_backoff_milliseconds *= 2;
}
}
@ -283,7 +274,7 @@ size_t ReadBufferFromAzureBlobStorage::readBigAt(char * to, size_t n, size_t ran
catch (const Azure::Core::RequestFailedException & e)
{
LOG_DEBUG(log, "Exception caught during Azure Download for file {} at offset {} at attempt {}/{}: {}", path, offset, i + 1, max_single_download_retries, e.Message);
if (i + 1 == max_single_download_retries)
if (i + 1 == max_single_download_retries || !isRetryableAzureException(e))
throw;
sleepForMilliseconds(sleep_time_with_backoff_milliseconds);

View File

@ -3,6 +3,7 @@
#if USE_AZURE_BLOB_STORAGE
#include <Disks/IO/WriteBufferFromAzureBlobStorage.h>
#include <IO/AzureBlobStorage/isRetryableAzureException.h>
#include <Common/getRandomASCIIString.h>
#include <Common/logger_useful.h>
#include <Common/Throttler.h>
@ -70,17 +71,6 @@ WriteBufferFromAzureBlobStorage::~WriteBufferFromAzureBlobStorage()
void WriteBufferFromAzureBlobStorage::execWithRetry(std::function<void()> func, size_t num_tries, size_t cost)
{
auto handle_exception = [&, this](const auto & e, size_t i)
{
if (cost)
write_settings.resource_link.accumulate(cost); // Accumulate resource for later use, because we have failed to consume it
if (i == num_tries - 1)
throw;
LOG_DEBUG(log, "Write at attempt {} for blob `{}` failed: {} {}", i + 1, blob_path, e.what(), e.Message);
};
for (size_t i = 0; i < num_tries; ++i)
{
try
@ -91,7 +81,13 @@ void WriteBufferFromAzureBlobStorage::execWithRetry(std::function<void()> func,
}
catch (const Azure::Core::RequestFailedException & e)
{
handle_exception(e, i);
if (cost)
write_settings.resource_link.accumulate(cost); // Accumulate resource for later use, because we have failed to consume it
if (i == num_tries - 1 || !isRetryableAzureException(e))
throw;
LOG_DEBUG(log, "Write at attempt {} for blob `{}` failed: {} {}", i + 1, blob_path, e.what(), e.Message);
}
catch (...)
{

View File

@ -267,7 +267,12 @@ struct TimeWindowImpl<TUMBLE_START>
{
auto type = WhichDataType(arguments[0].type);
if (type.isTuple())
return std::static_pointer_cast<const DataTypeTuple>(arguments[0].type)->getElement(0);
{
const auto & tuple_elems = std::static_pointer_cast<const DataTypeTuple>(arguments[0].type)->getElements();
if (tuple_elems.empty())
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Tuple passed to {} should not be empty", function_name);
return tuple_elems[0];
}
else if (type.isUInt32())
return std::make_shared<DataTypeDateTime>();
else
@ -625,7 +630,12 @@ struct TimeWindowImpl<HOP_START>
{
auto type = WhichDataType(arguments[0].type);
if (type.isTuple())
return std::static_pointer_cast<const DataTypeTuple>(arguments[0].type)->getElement(0);
{
const auto & tuple_elems = std::static_pointer_cast<const DataTypeTuple>(arguments[0].type)->getElements();
if (tuple_elems.empty())
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Tuple passed to {} should not be empty", function_name);
return tuple_elems[0];
}
else if (type.isUInt32())
return std::make_shared<DataTypeDateTime>();
else

View File

@ -284,12 +284,12 @@ void OrdinalDate::init(int64_t modified_julian_day)
bool OrdinalDate::tryInit(int64_t modified_julian_day)
{
/// This function supports day number from -678941 to 2973119 (which represent 0000-01-01 and 9999-12-31 respectively).
/// This function supports day number from -678941 to 2973483 (which represent 0000-01-01 and 9999-12-31 respectively).
if (modified_julian_day < -678941)
return false;
if (modified_julian_day > 2973119)
if (modified_julian_day > 2973483)
return false;
const auto a = modified_julian_day + 678575;

View File

@ -2,6 +2,7 @@
#include <AggregateFunctions/AggregateFunctionFactory.h>
#include <Backups/RestorerFromBackup.h>
#include <Core/Settings.h>
#include <Functions/FunctionFactory.h>
#include <Functions/UserDefined/IUserDefinedSQLObjectsStorage.h>
#include <Functions/UserDefined/UserDefinedExecutableFunctionFactory.h>
@ -9,6 +10,7 @@
#include <Functions/UserDefined/UserDefinedSQLObjectsBackup.h>
#include <Interpreters/Context.h>
#include <Interpreters/FunctionNameNormalizer.h>
#include <Interpreters/NormalizeSelectWithUnionQueryVisitor.h>
#include <Parsers/ASTCreateFunctionQuery.h>
#include <Parsers/ASTFunction.h>
#include <Parsers/ASTIdentifier.h>
@ -80,13 +82,17 @@ namespace
validateFunctionRecursiveness(*function_body, name);
}
ASTPtr normalizeCreateFunctionQuery(const IAST & create_function_query)
ASTPtr normalizeCreateFunctionQuery(const IAST & create_function_query, const ContextPtr & context)
{
auto ptr = create_function_query.clone();
auto & res = typeid_cast<ASTCreateFunctionQuery &>(*ptr);
res.if_not_exists = false;
res.or_replace = false;
FunctionNameNormalizer().visit(res.function_core.get());
NormalizeSelectWithUnionQueryVisitor::Data data{context->getSettingsRef().union_default_mode};
NormalizeSelectWithUnionQueryVisitor{data}.visit(res.function_core);
return ptr;
}
}
@ -125,7 +131,7 @@ void UserDefinedSQLFunctionFactory::checkCanBeUnregistered(const ContextPtr & co
bool UserDefinedSQLFunctionFactory::registerFunction(const ContextMutablePtr & context, const String & function_name, ASTPtr create_function_query, bool throw_if_exists, bool replace_if_exists)
{
checkCanBeRegistered(context, function_name, *create_function_query);
create_function_query = normalizeCreateFunctionQuery(*create_function_query);
create_function_query = normalizeCreateFunctionQuery(*create_function_query, context);
try
{

Some files were not shown because too many files have changed in this diff Show More