Merge remote-tracking branch 'upstream/master' into const-adaptive-granularity

This commit is contained in:
Anton Popov 2024-11-11 19:23:11 +00:00
commit aa51f4cc88
475 changed files with 8638 additions and 3274 deletions

View File

@ -12,7 +12,7 @@ tests/ci/cancel_and_rerun_workflow_lambda/app.py
- Backward Incompatible Change
- Build/Testing/Packaging Improvement
- Documentation (changelog entry is not required)
- Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
- Critical Bug Fix (crash, data loss, RBAC)
- Bug Fix (user-visible misbehavior in an official stable release)
- CI Fix or Improvement (changelog entry is not required)
- Not for changelog (changelog entry is not required)

View File

@ -488,6 +488,7 @@
* Remove `is_deterministic` field from the `system.functions` table. [#66630](https://github.com/ClickHouse/ClickHouse/pull/66630) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Function `tuple` will now try to construct named tuples in query (controlled by `enable_named_columns_in_function_tuple`). Introduce function `tupleNames` to extract names from tuples. [#54881](https://github.com/ClickHouse/ClickHouse/pull/54881) ([Amos Bird](https://github.com/amosbird)).
* Change how deduplication for Materialized Views works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#61601](https://github.com/ClickHouse/ClickHouse/pull/61601) ([Sema Checherinda](https://github.com/CheSema)).
* Functions `bitShiftLeft` and `bitShitfRight` return an error for out of bounds shift positions [#65838](https://github.com/ClickHouse/ClickHouse/pull/65838) ([Pablo Marcos](https://github.com/pamarcos)).
#### New Feature
* Add `ASOF JOIN` support for `full_sorting_join` algorithm. [#55051](https://github.com/ClickHouse/ClickHouse/pull/55051) ([vdimir](https://github.com/vdimir)).
@ -599,7 +600,6 @@
* Functions `bitTest`, `bitTestAll`, and `bitTestAny` now return an error if the specified bit index is out-of-bounds [#65818](https://github.com/ClickHouse/ClickHouse/pull/65818) ([Pablo Marcos](https://github.com/pamarcos)).
* Setting `join_any_take_last_row` is supported in any query with hash join. [#65820](https://github.com/ClickHouse/ClickHouse/pull/65820) ([vdimir](https://github.com/vdimir)).
* Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
* Functions `bitShiftLeft` and `bitShitfRight` return an error for out of bounds shift positions [#65838](https://github.com/ClickHouse/ClickHouse/pull/65838) ([Pablo Marcos](https://github.com/pamarcos)).
* Fix growing memory usage in S3Queue. [#65839](https://github.com/ClickHouse/ClickHouse/pull/65839) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix tie handling in `arrayAUC` to match sklearn. [#65840](https://github.com/ClickHouse/ClickHouse/pull/65840) ([gabrielmcg44](https://github.com/gabrielmcg44)).
* Fix possible issues with MySQL server protocol TLS connections. [#65917](https://github.com/ClickHouse/ClickHouse/pull/65917) ([Azat Khuzhin](https://github.com/azat)).

View File

@ -47,6 +47,7 @@ Upcoming meetups
* [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21
* [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26
* [Amsterdam Meetup](https://www.meetup.com/clickhouse-netherlands-user-group/events/303638814) - December 3
* [Stockholm Meetup](https://www.meetup.com/clickhouse-stockholm-user-group/events/304382411) - December 9
* [New York Meetup](https://www.meetup.com/clickhouse-new-york-user-group/events/304268174) - December 9
* [San Francisco Meetup](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/304286951/) - December 12

2
contrib/SimSIMD vendored

@ -1 +1 @@
Subproject commit 935fef2964bc38e995c5f465b42259a35b8cf0d3
Subproject commit fa60f1b8e3582c50978f0ae86c2ebb6c9af957f3

View File

@ -1,4 +1,8 @@
set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD")
add_library(_simsimd INTERFACE)
target_include_directories(_simsimd SYSTEM INTERFACE "${SIMSIMD_PROJECT_DIR}/include")
# See contrib/usearch-cmake/CMakeLists.txt, why only enabled on x86
if (ARCH_AMD64)
set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD")
set(SIMSIMD_SRCS ${SIMSIMD_PROJECT_DIR}/c/lib.c)
add_library(_simsimd ${SIMSIMD_SRCS})
target_include_directories(_simsimd SYSTEM PUBLIC "${SIMSIMD_PROJECT_DIR}/include")
target_compile_definitions(_simsimd PUBLIC SIMSIMD_DYNAMIC_DISPATCH)
endif()

2
contrib/usearch vendored

@ -1 +1 @@
Subproject commit 53799b84ca9ad708b060d0b1cfa5f039371721cd
Subproject commit 7efe8b710c9831bfe06573b1df0fad001b04a2b5

View File

@ -6,12 +6,63 @@ target_include_directories(_usearch SYSTEM INTERFACE ${USEARCH_PROJECT_DIR}/incl
target_link_libraries(_usearch INTERFACE _fp16)
target_compile_definitions(_usearch INTERFACE USEARCH_USE_FP16LIB)
# target_compile_definitions(_usearch INTERFACE USEARCH_USE_SIMSIMD)
# ^^ simsimd is not enabled at the moment. Reasons:
# - Vectorization is important for raw scans but not so much for HNSW. We use usearch only for HNSW.
# - Simsimd does compile-time dispatch (choice of SIMD kernels determined by capabilities of the build machine) or dynamic dispatch (SIMD
# kernels chosen at runtime based on cpuid instruction). Since current builds are limited to SSE 4.2 (x86) and NEON (ARM), the speedup of
# the former would be moderate compared to AVX-512 / SVE. The latter is at the moment too fragile with respect to portability across x86
# and ARM machines ... certain conbinations of quantizations / distance functions / SIMD instructions are not implemented at the moment.
# Only x86 for now. On ARM, the linker goes down in flames. To make SimSIMD compile, I had to remove a macro checks in SimSIMD
# for AVX512 (x86, worked nicely) and __ARM_BF16_FORMAT_ALTERNATIVE. It is probably because of that.
if (ARCH_AMD64)
target_link_libraries(_usearch INTERFACE _simsimd)
target_compile_definitions(_usearch INTERFACE USEARCH_USE_SIMSIMD)
target_compile_definitions(_usearch INTERFACE USEARCH_CAN_COMPILE_FLOAT16)
target_compile_definitions(_usearch INTERFACE USEARCH_CAN_COMPILE_BF16)
endif ()
add_library(ch_contrib::usearch ALIAS _usearch)
# Cf. https://github.com/llvm/llvm-project/issues/107810 (though it is not 100% the same stack)
#
# LLVM ERROR: Cannot select: 0x7996e7a73150: f32,ch = load<(load (s16) from %ir.22, !tbaa !54231), anyext from bf16> 0x79961cb737c0, 0x7996e7a1a500, undef:i64, ./contrib/SimSIMD/include/simsimd/dot.h:215:1
# 0x7996e7a1a500: i64 = add 0x79961e770d00, Constant:i64<-16>, ./contrib/SimSIMD/include/simsimd/dot.h:215:1
# 0x79961e770d00: i64,ch = CopyFromReg 0x79961cb737c0, Register:i64 %4, ./contrib/SimSIMD/include/simsimd/dot.h:215:1
# 0x7996e7a1ae10: i64 = Register %4
# 0x7996e7a1b5f0: i64 = Constant<-16>
# 0x7996e7a1a730: i64 = undef
# In function: _ZL23simsimd_dot_bf16_serialPKu6__bf16S0_yPd
# PLEASE submit a bug report to https://github.com/llvm/llvm-project/issues/ and include the crash backtrace.
# Stack dump:
# 0. Running pass 'Function Pass Manager' on module 'src/libdbms.a(MergeTreeIndexVectorSimilarity.cpp.o at 2312737440)'.
# 1. Running pass 'AArch64 Instruction Selection' on function '@_ZL23simsimd_dot_bf16_serialPKu6__bf16S0_yPd'
# #0 0x00007999e83a63bf llvm::sys::PrintStackTrace(llvm::raw_ostream&, int) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xda63bf)
# #1 0x00007999e83a44f9 llvm::sys::RunSignalHandlers() (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xda44f9)
# #2 0x00007999e83a6b00 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xda6b00)
# #3 0x00007999e6e45320 (/lib/x86_64-linux-gnu/libc.so.6+0x45320)
# #4 0x00007999e6e9eb1c pthread_kill (/lib/x86_64-linux-gnu/libc.so.6+0x9eb1c)
# #5 0x00007999e6e4526e raise (/lib/x86_64-linux-gnu/libc.so.6+0x4526e)
# #6 0x00007999e6e288ff abort (/lib/x86_64-linux-gnu/libc.so.6+0x288ff)
# #7 0x00007999e82fe0c2 llvm::report_fatal_error(llvm::Twine const&, bool) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xcfe0c2)
# #8 0x00007999e8c2f8e3 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x162f8e3)
# #9 0x00007999e8c2ed76 llvm::SelectionDAGISel::SelectCodeCommon(llvm::SDNode*, unsigned char const*, unsigned int) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x162ed76)
# #10 0x00007999ea1adbcb (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x2badbcb)
# #11 0x00007999e8c2611f llvm::SelectionDAGISel::DoInstructionSelection() (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x162611f)
# #12 0x00007999e8c25790 llvm::SelectionDAGISel::CodeGenAndEmitDAG() (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x1625790)
# #13 0x00007999e8c248de llvm::SelectionDAGISel::SelectAllBasicBlocks(llvm::Function const&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x16248de)
# #14 0x00007999e8c22934 llvm::SelectionDAGISel::runOnMachineFunction(llvm::MachineFunction&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x1622934)
# #15 0x00007999e87826b9 llvm::MachineFunctionPass::runOnFunction(llvm::Function&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x11826b9)
# #16 0x00007999e84f7772 llvm::FPPassManager::runOnFunction(llvm::Function&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xef7772)
# #17 0x00007999e84fd2f4 llvm::FPPassManager::runOnModule(llvm::Module&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xefd2f4)
# #18 0x00007999e84f7e9f llvm::legacy::PassManagerImpl::run(llvm::Module&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xef7e9f)
# #19 0x00007999e99f7d61 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f7d61)
# #20 0x00007999e99f8c91 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f8c91)
# #21 0x00007999e99f8b10 llvm::lto::thinBackend(llvm::lto::Config const&, unsigned int, std::function<llvm::Expected<std::unique_ptr<llvm::CachedFileStream, std::default_delete<llvm::CachedFileStream>>> (unsigned int, llvm::Twine const&)>, llvm::Module&, llvm::ModuleSummaryIndex const&, llvm::DenseMap<llvm::StringRef, std::unordered_set<unsigned long, std::hash<unsigned long>, std::equal_to<unsigned long>, std::allocator<unsigned long>>, llvm::DenseMapInfo<llvm::StringRef, void
# >, llvm::detail::DenseMapPair<llvm::StringRef, std::unordered_set<unsigned long, std::hash<unsigned long>, std::equal_to<unsigned long>, std::allocator<unsigned long>>>> const&, llvm::DenseMap<unsigned long, llvm::GlobalValueSummary*, llvm::DenseMapInfo<unsigned long, void>, llvm::detail::DenseMapPair<unsigned long, llvm::GlobalValueSummary*>> const&, llvm::MapVector<llvm::StringRef, llvm::BitcodeModule, llvm::DenseMap<llvm::StringRef, unsigned int, llvm::DenseMapInfo<llvm::S
# tringRef, void>, llvm::detail::DenseMapPair<llvm::StringRef, unsigned int>>, llvm::SmallVector<std::pair<llvm::StringRef, llvm::BitcodeModule>, 0u>>*, std::vector<unsigned char, std::allocator<unsigned char>> const&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f8b10)
# #22 0x00007999e99f248d (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f248d)
# #23 0x00007999e99f1cd6 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f1cd6)
# #24 0x00007999e82c9beb (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xcc9beb)
# #25 0x00007999e834ebe3 llvm::ThreadPool::processTasks(llvm::ThreadPoolTaskGroup*) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xd4ebe3)
# #26 0x00007999e834f704 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xd4f704)
# #27 0x00007999e6e9ca94 (/lib/x86_64-linux-gnu/libc.so.6+0x9ca94)
# #28 0x00007999e6f29c3c (/lib/x86_64-linux-gnu/libc.so.6+0x129c3c)
# clang++-18: error: unable to execute command: Aborted (core dumped)
# clang++-18: error: linker command failed due to signal (use -v to see invocation)
# ^[[A^Cninja: build stopped: interrupted by user.

View File

@ -1,7 +1,7 @@
# The Dockerfile.ubuntu exists for the tests/ci/docker_server.py script
# If the image is built from Dockerfile.alpine, then the `-alpine` suffix is added automatically,
# so the only purpose of Dockerfile.ubuntu is to push `latest`, `head` and so on w/o suffixes
FROM ubuntu:20.04 AS glibc-donor
FROM ubuntu:22.04 AS glibc-donor
ARG TARGETARCH
RUN arch=${TARGETARCH:-amd64} \
@ -9,7 +9,11 @@ RUN arch=${TARGETARCH:-amd64} \
amd64) rarch=x86_64 ;; \
arm64) rarch=aarch64 ;; \
esac \
&& ln -s "${rarch}-linux-gnu" /lib/linux-gnu
&& ln -s "${rarch}-linux-gnu" /lib/linux-gnu \
&& case $arch in \
amd64) ln /lib/linux-gnu/ld-linux-x86-64.so.2 /lib/linux-gnu/ld-2.35.so ;; \
arm64) ln /lib/linux-gnu/ld-linux-aarch64.so.1 /lib/linux-gnu/ld-2.35.so ;; \
esac
FROM alpine
@ -20,7 +24,7 @@ ENV LANG=en_US.UTF-8 \
TZ=UTC \
CLICKHOUSE_CONFIG=/etc/clickhouse-server/config.xml
COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.31.so /lib/
COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.35.so /lib/
COPY --from=glibc-donor /etc/nsswitch.conf /etc/
COPY entrypoint.sh /entrypoint.sh

View File

@ -1,21 +1,31 @@
#!/bin/bash
set +x
set -eo pipefail
shopt -s nullglob
DO_CHOWN=1
if [ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]; then
if [[ "${CLICKHOUSE_RUN_AS_ROOT:=0}" = "1" || "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]]; then
DO_CHOWN=0
fi
CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
# CLICKHOUSE_UID and CLICKHOUSE_GID are kept for backward compatibility, but deprecated
# One must use either "docker run --user" or CLICKHOUSE_RUN_AS_ROOT=1 to run the process as
# FIXME: Remove ALL CLICKHOUSE_UID CLICKHOUSE_GID before 25.3
if [[ "${CLICKHOUSE_UID:-}" || "${CLICKHOUSE_GID:-}" ]]; then
echo 'WARNING: Support for CLICKHOUSE_UID/CLICKHOUSE_GID will be removed in a couple of releases.' >&2
echo 'WARNING: Either use a proper "docker run --user=xxx:xxxx" argument instead of CLICKHOUSE_UID/CLICKHOUSE_GID' >&2
echo 'WARNING: or set "CLICKHOUSE_RUN_AS_ROOT=1" ENV to run the clickhouse-server as root:root' >&2
fi
# support --user
if [ "$(id -u)" = "0" ]; then
USER=$CLICKHOUSE_UID
GROUP=$CLICKHOUSE_GID
# support `docker run --user=xxx:xxxx`
if [[ "$(id -u)" = "0" ]]; then
if [[ "$CLICKHOUSE_RUN_AS_ROOT" = 1 ]]; then
USER=0
GROUP=0
else
USER="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
GROUP="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
fi
if command -v gosu &> /dev/null; then
gosu="gosu $USER:$GROUP"
elif command -v su-exec &> /dev/null; then
@ -82,11 +92,11 @@ if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
# There is a config file. It is already tested with gosu (if it is readably by keeper user)
if [ -f "$KEEPER_CONFIG" ]; then
exec $gosu /usr/bin/clickhouse-keeper --config-file="$KEEPER_CONFIG" "$@"
exec $gosu clickhouse-keeper --config-file="$KEEPER_CONFIG" "$@"
fi
# There is no config file. Will use embedded one
exec $gosu /usr/bin/clickhouse-keeper --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@"
exec $gosu clickhouse-keeper --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@"
fi
# Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image

View File

@ -1,4 +1,4 @@
FROM ubuntu:20.04
FROM ubuntu:22.04
# see https://github.com/moby/moby/issues/4032#issuecomment-192327844
# It could be removed after we move on a version 23:04+
@ -88,34 +88,32 @@ RUN if [ -n "${single_binary_location_url}" ]; then \
#docker-official-library:on
# A fallback to installation from ClickHouse repository
RUN if ! clickhouse local -q "SELECT ''" > /dev/null 2>&1; then \
apt-get update \
&& apt-get install --yes --no-install-recommends \
apt-transport-https \
dirmngr \
gnupg2 \
&& mkdir -p /etc/apt/sources.list.d \
&& GNUPGHOME=$(mktemp -d) \
&& GNUPGHOME="$GNUPGHOME" gpg --batch --no-default-keyring \
--keyring /usr/share/keyrings/clickhouse-keyring.gpg \
--keyserver hkp://keyserver.ubuntu.com:80 \
--recv-keys 3a9ea1193a97b548be1457d48919f6bd2b48d754 \
&& rm -rf "$GNUPGHOME" \
&& chmod +r /usr/share/keyrings/clickhouse-keyring.gpg \
&& echo "${REPOSITORY}" > /etc/apt/sources.list.d/clickhouse.list \
&& echo "installing from repository: ${REPOSITORY}" \
&& apt-get update \
&& for package in ${PACKAGES}; do \
packages="${packages} ${package}=${VERSION}" \
; done \
&& apt-get install --allow-unauthenticated --yes --no-install-recommends ${packages} || exit 1 \
&& rm -rf \
/var/lib/apt/lists/* \
/var/cache/debconf \
/tmp/* \
&& apt-get autoremove --purge -yq libksba8 \
&& apt-get autoremove -yq \
; fi
# It works unless the clickhouse binary already exists
RUN clickhouse local -q 'SELECT 1' >/dev/null 2>&1 && exit 0 || : \
; apt-get update \
&& apt-get install --yes --no-install-recommends \
dirmngr \
gnupg2 \
&& mkdir -p /etc/apt/sources.list.d \
&& GNUPGHOME=$(mktemp -d) \
&& GNUPGHOME="$GNUPGHOME" gpg --batch --no-default-keyring \
--keyring /usr/share/keyrings/clickhouse-keyring.gpg \
--keyserver hkp://keyserver.ubuntu.com:80 \
--recv-keys 3a9ea1193a97b548be1457d48919f6bd2b48d754 \
&& rm -rf "$GNUPGHOME" \
&& chmod +r /usr/share/keyrings/clickhouse-keyring.gpg \
&& echo "${REPOSITORY}" > /etc/apt/sources.list.d/clickhouse.list \
&& echo "installing from repository: ${REPOSITORY}" \
&& apt-get update \
&& for package in ${PACKAGES}; do \
packages="${packages} ${package}=${VERSION}" \
; done \
&& apt-get install --yes --no-install-recommends ${packages} || exit 1 \
&& rm -rf \
/var/lib/apt/lists/* \
/var/cache/debconf \
/tmp/* \
&& apt-get autoremove --purge -yq dirmngr gnupg2
# post install
# we need to allow "others" access to clickhouse folder, because docker container
@ -126,8 +124,6 @@ RUN clickhouse-local -q 'SELECT * FROM system.build_options' \
RUN locale-gen en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US:en
ENV LC_ALL en_US.UTF-8
ENV TZ UTC
RUN mkdir /docker-entrypoint-initdb.d

View File

@ -1,3 +1,11 @@
<!---
The README.md is generated by README.sh from the following sources:
- README.src/content.md
- README.src/license.md
If you want to change it, edit these files
-->
# ClickHouse Server Docker Image
## What is ClickHouse?
@ -8,6 +16,7 @@ ClickHouse works 100-1000x faster than traditional database management systems,
For more information and documentation see https://clickhouse.com/.
<!-- This is not related to the docker official library, remove it before commit to https://github.com/docker-library/docs -->
## Versions
- The `latest` tag points to the latest release of the latest stable branch.
@ -16,10 +25,12 @@ For more information and documentation see https://clickhouse.com/.
- The tag `head` is built from the latest commit to the default branch.
- Each tag has optional `-alpine` suffix to reflect that it's built on top of `alpine`.
<!-- REMOVE UNTIL HERE -->
### Compatibility
- The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3.
- The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A) and additionally the Load-Acquire RCpc register. The register is optional in version ARMv8.2-A and mandatory in [ARMv8.3-A](https://en.wikipedia.org/wiki/AArch64#ARMv8.3-A). Supported in Graviton >=2, Azure and GCP instances. Examples for unsupported devices are Raspberry Pi 4 (ARMv8.0-A) and Jetson AGX Xavier/Orin (ARMv8.2-A).
- Since the Clickhouse 24.11 Ubuntu images started using `ubuntu:22.04` as its base image. It requires docker version >= `20.10.10` containing [patch](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468). As a workaround you could use `docker run --security-opt seccomp=unconfined` instead, however that has security implications.
## How to use this image
@ -29,7 +40,7 @@ For more information and documentation see https://clickhouse.com/.
docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
```
By default, ClickHouse will be accessible only via the Docker network. See the [networking section below](#networking).
By default, ClickHouse will be accessible only via the Docker network. See the **networking** section below.
By default, starting above server instance will be run as the `default` user without password.
@ -46,7 +57,7 @@ More information about the [ClickHouse client](https://clickhouse.com/docs/en/in
### connect to it using curl
```bash
echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server curlimages/curl 'http://clickhouse-server:8123/?query=' -s --data-binary @-
echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server buildpack-deps:curl curl 'http://clickhouse-server:8123/?query=' -s --data-binary @-
```
More information about the [ClickHouse HTTP Interface](https://clickhouse.com/docs/en/interfaces/http/).
@ -69,7 +80,7 @@ echo 'SELECT version()' | curl 'http://localhost:18123/' --data-binary @-
`22.6.3.35`
or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance):
Or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance):
```bash
docker run -d --network=host --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
@ -87,8 +98,8 @@ Typically you may want to mount the following folders inside your container to a
```bash
docker run -d \
-v $(realpath ./ch_data):/var/lib/clickhouse/ \
-v $(realpath ./ch_logs):/var/log/clickhouse-server/ \
-v "$PWD/ch_data:/var/lib/clickhouse/" \
-v "$PWD/ch_logs:/var/log/clickhouse-server/" \
--name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
```
@ -110,6 +121,8 @@ docker run -d \
--name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
```
Read more in [knowledge base](https://clickhouse.com/docs/knowledgebase/configure_cap_ipc_lock_and_cap_sys_nice_in_docker).
## Configuration
The container exposes port 8123 for the [HTTP interface](https://clickhouse.com/docs/en/interfaces/http_interface/) and port 9000 for the [native client](https://clickhouse.com/docs/en/interfaces/tcp/).
@ -125,8 +138,8 @@ docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 -v /pa
### Start server as custom user
```bash
# $(pwd)/data/clickhouse should exist and be owned by current user
docker run --rm --user ${UID}:${GID} --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$(pwd)/logs/clickhouse:/var/log/clickhouse-server" -v "$(pwd)/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server
# $PWD/data/clickhouse should exist and be owned by current user
docker run --rm --user "${UID}:${GID}" --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server
```
When you use the image with local directories mounted, you probably want to specify the user to maintain the proper file ownership. Use the `--user` argument and mount `/var/lib/clickhouse` and `/var/log/clickhouse-server` inside the container. Otherwise, the image will complain and not start.
@ -134,7 +147,7 @@ When you use the image with local directories mounted, you probably want to spec
### Start server from root (useful in case of enabled user namespace)
```bash
docker run --rm -e CLICKHOUSE_UID=0 -e CLICKHOUSE_GID=0 --name clickhouse-server-userns -v "$(pwd)/logs/clickhouse:/var/log/clickhouse-server" -v "$(pwd)/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server
docker run --rm -e CLICKHOUSE_RUN_AS_ROOT=1 --name clickhouse-server-userns -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server
```
### How to create default database and user on starting

38
docker/server/README.sh Executable file
View File

@ -0,0 +1,38 @@
#!/usr/bin/env bash
set -ueo pipefail
# A script to generate README.sh close to as it done in https://github.com/docker-library/docs
WORKDIR=$(dirname "$0")
SCRIPT_NAME=$(basename "$0")
CONTENT=README.src/content.md
LICENSE=README.src/license.md
cd "$WORKDIR"
R=README.md
cat > "$R" <<EOD
<!---
The $R is generated by $SCRIPT_NAME from the following sources:
- $CONTENT
- $LICENSE
If you want to change it, edit these files
-->
EOD
cat "$CONTENT" >> "$R"
cat >> "$R" <<EOD
## License
$(cat $LICENSE)
EOD
# Remove %%LOGO%% from the file with one line below
sed -i '/^%%LOGO%%/,+1d' "$R"
# Replace each %%IMAGE%% with our `clickhouse/clickhouse-server`
sed -i '/%%IMAGE%%/s:%%IMAGE%%:clickhouse/clickhouse-server:g' $R

View File

@ -0,0 +1 @@
ClickHouse is the fastest and most resource efficient OSS database for real-time apps and analytics.

View File

@ -0,0 +1,170 @@
# ClickHouse Server Docker Image
## What is ClickHouse?
%%LOGO%%
ClickHouse is an open-source column-oriented DBMS (columnar database management system) for online analytical processing (OLAP) that allows users to generate analytical reports using SQL queries in real-time.
ClickHouse works 100-1000x faster than traditional database management systems, and processes hundreds of millions to over a billion rows and tens of gigabytes of data per server per second. With a widespread user base around the globe, the technology has received praise for its reliability, ease of use, and fault tolerance.
For more information and documentation see https://clickhouse.com/.
<!-- This is not related to the docker official library, remove it before commit to https://github.com/docker-library/docs -->
## Versions
- The `latest` tag points to the latest release of the latest stable branch.
- Branch tags like `22.2` point to the latest release of the corresponding branch.
- Full version tags like `22.2.3.5` point to the corresponding release.
- The tag `head` is built from the latest commit to the default branch.
- Each tag has optional `-alpine` suffix to reflect that it's built on top of `alpine`.
<!-- REMOVE UNTIL HERE -->
### Compatibility
- The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3.
- The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A) and additionally the Load-Acquire RCpc register. The register is optional in version ARMv8.2-A and mandatory in [ARMv8.3-A](https://en.wikipedia.org/wiki/AArch64#ARMv8.3-A). Supported in Graviton >=2, Azure and GCP instances. Examples for unsupported devices are Raspberry Pi 4 (ARMv8.0-A) and Jetson AGX Xavier/Orin (ARMv8.2-A).
- Since the Clickhouse 24.11 Ubuntu images started using `ubuntu:22.04` as its base image. It requires docker version >= `20.10.10` containing [patch](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468). As a workaround you could use `docker run --security-opt seccomp=unconfined` instead, however that has security implications.
## How to use this image
### start server instance
```bash
docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
```
By default, ClickHouse will be accessible only via the Docker network. See the **networking** section below.
By default, starting above server instance will be run as the `default` user without password.
### connect to it from a native client
```bash
docker run -it --rm --link some-clickhouse-server:clickhouse-server --entrypoint clickhouse-client %%IMAGE%% --host clickhouse-server
# OR
docker exec -it some-clickhouse-server clickhouse-client
```
More information about the [ClickHouse client](https://clickhouse.com/docs/en/interfaces/cli/).
### connect to it using curl
```bash
echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server buildpack-deps:curl curl 'http://clickhouse-server:8123/?query=' -s --data-binary @-
```
More information about the [ClickHouse HTTP Interface](https://clickhouse.com/docs/en/interfaces/http/).
### stopping / removing the container
```bash
docker stop some-clickhouse-server
docker rm some-clickhouse-server
```
### networking
You can expose your ClickHouse running in docker by [mapping a particular port](https://docs.docker.com/config/containers/container-networking/) from inside the container using host ports:
```bash
docker run -d -p 18123:8123 -p19000:9000 --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
echo 'SELECT version()' | curl 'http://localhost:18123/' --data-binary @-
```
`22.6.3.35`
Or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance):
```bash
docker run -d --network=host --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
echo 'SELECT version()' | curl 'http://localhost:8123/' --data-binary @-
```
`22.6.3.35`
### Volumes
Typically you may want to mount the following folders inside your container to achieve persistency:
- `/var/lib/clickhouse/` - main folder where ClickHouse stores the data
- `/var/log/clickhouse-server/` - logs
```bash
docker run -d \
-v "$PWD/ch_data:/var/lib/clickhouse/" \
-v "$PWD/ch_logs:/var/log/clickhouse-server/" \
--name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
```
You may also want to mount:
- `/etc/clickhouse-server/config.d/*.xml` - files with server configuration adjustments
- `/etc/clickhouse-server/users.d/*.xml` - files with user settings adjustments
- `/docker-entrypoint-initdb.d/` - folder with database initialization scripts (see below).
### Linux capabilities
ClickHouse has some advanced functionality, which requires enabling several [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html).
They are optional and can be enabled using the following [docker command-line arguments](https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities):
```bash
docker run -d \
--cap-add=SYS_NICE --cap-add=NET_ADMIN --cap-add=IPC_LOCK \
--name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
```
Read more in [knowledge base](https://clickhouse.com/docs/knowledgebase/configure_cap_ipc_lock_and_cap_sys_nice_in_docker).
## Configuration
The container exposes port 8123 for the [HTTP interface](https://clickhouse.com/docs/en/interfaces/http_interface/) and port 9000 for the [native client](https://clickhouse.com/docs/en/interfaces/tcp/).
ClickHouse configuration is represented with a file "config.xml" ([documentation](https://clickhouse.com/docs/en/operations/configuration_files/))
### Start server instance with custom configuration
```bash
docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 -v /path/to/your/config.xml:/etc/clickhouse-server/config.xml %%IMAGE%%
```
### Start server as custom user
```bash
# $PWD/data/clickhouse should exist and be owned by current user
docker run --rm --user "${UID}:${GID}" --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" %%IMAGE%%
```
When you use the image with local directories mounted, you probably want to specify the user to maintain the proper file ownership. Use the `--user` argument and mount `/var/lib/clickhouse` and `/var/log/clickhouse-server` inside the container. Otherwise, the image will complain and not start.
### Start server from root (useful in case of enabled user namespace)
```bash
docker run --rm -e CLICKHOUSE_RUN_AS_ROOT=1 --name clickhouse-server-userns -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" %%IMAGE%%
```
### How to create default database and user on starting
Sometimes you may want to create a user (user named `default` is used by default) and database on a container start. You can do it using environment variables `CLICKHOUSE_DB`, `CLICKHOUSE_USER`, `CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT` and `CLICKHOUSE_PASSWORD`:
```bash
docker run --rm -e CLICKHOUSE_DB=my_database -e CLICKHOUSE_USER=username -e CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT=1 -e CLICKHOUSE_PASSWORD=password -p 9000:9000/tcp %%IMAGE%%
```
## How to extend this image
To perform additional initialization in an image derived from this one, add one or more `*.sql`, `*.sql.gz`, or `*.sh` scripts under `/docker-entrypoint-initdb.d`. After the entrypoint calls `initdb`, it will run any `*.sql` files, run any executable `*.sh` scripts, and source any non-executable `*.sh` scripts found in that directory to do further initialization before starting the service.
Also, you can provide environment variables `CLICKHOUSE_USER` & `CLICKHOUSE_PASSWORD` that will be used for clickhouse-client during initialization.
For example, to add an additional user and database, add the following to `/docker-entrypoint-initdb.d/init-db.sh`:
```bash
#!/bin/bash
set -e
clickhouse client -n <<-EOSQL
CREATE DATABASE docker;
CREATE TABLE docker.docker (x Int32) ENGINE = Log;
EOSQL
```

View File

@ -0,0 +1 @@
https://github.com/ClickHouse/ClickHouse

View File

@ -0,0 +1 @@
View [license information](https://github.com/ClickHouse/ClickHouse/blob/master/LICENSE) for the software contained in this image.

View File

@ -0,0 +1,43 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 616 616">
<defs>
<style>
.cls-1 {
clip-path: url(#clippath);
}
.cls-2 {
fill: none;
}
.cls-2, .cls-3, .cls-4 {
stroke-width: 0px;
}
.cls-3 {
fill: #1e1e1e;
}
.cls-4 {
fill: #faff69;
}
</style>
<clipPath id="clippath">
<rect class="cls-2" x="83.23" y="71.73" width="472.55" height="472.55"/>
</clipPath>
</defs>
<g id="Layer_2" data-name="Layer 2">
<rect class="cls-4" width="616" height="616"/>
</g>
<g id="Layer_1" data-name="Layer 1">
<g class="cls-1">
<g>
<path class="cls-3" d="m120.14,113.3c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.66,2.09,4.66,4.66v389.38c0,2.57-2.09,4.66-4.66,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66V113.3Z"/>
<path class="cls-3" d="m208.75,113.3c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.66,2.09,4.66,4.66v389.38c0,2.57-2.09,4.66-4.66,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66V113.3Z"/>
<path class="cls-3" d="m297.35,113.3c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.66,2.09,4.66,4.66v389.38c0,2.57-2.09,4.66-4.66,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66V113.3Z"/>
<path class="cls-3" d="m385.94,113.3c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.66,2.09,4.66,4.66v389.38c0,2.57-2.09,4.66-4.66,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66V113.3Z"/>
<path class="cls-3" d="m474.56,268.36c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.65,2.09,4.65,4.66v79.28c0,2.57-2.09,4.66-4.65,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66v-79.28Z"/>
</g>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.7 KiB

View File

@ -0,0 +1 @@
[ClickHouse Inc.](%%GITHUB-REPO%%)

View File

@ -0,0 +1,7 @@
{
"hub": {
"categories": [
"databases-and-storage"
]
}
}

View File

@ -4,17 +4,28 @@ set -eo pipefail
shopt -s nullglob
DO_CHOWN=1
if [ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]; then
if [[ "${CLICKHOUSE_RUN_AS_ROOT:=0}" = "1" || "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]]; then
DO_CHOWN=0
fi
CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
# CLICKHOUSE_UID and CLICKHOUSE_GID are kept for backward compatibility, but deprecated
# One must use either "docker run --user" or CLICKHOUSE_RUN_AS_ROOT=1 to run the process as
# FIXME: Remove ALL CLICKHOUSE_UID CLICKHOUSE_GID before 25.3
if [[ "${CLICKHOUSE_UID:-}" || "${CLICKHOUSE_GID:-}" ]]; then
echo 'WARNING: Support for CLICKHOUSE_UID/CLICKHOUSE_GID will be removed in a couple of releases.' >&2
echo 'WARNING: Either use a proper "docker run --user=xxx:xxxx" argument instead of CLICKHOUSE_UID/CLICKHOUSE_GID' >&2
echo 'WARNING: or set "CLICKHOUSE_RUN_AS_ROOT=1" ENV to run the clickhouse-server as root:root' >&2
fi
# support --user
if [ "$(id -u)" = "0" ]; then
USER=$CLICKHOUSE_UID
GROUP=$CLICKHOUSE_GID
# support `docker run --user=xxx:xxxx`
if [[ "$(id -u)" = "0" ]]; then
if [[ "$CLICKHOUSE_RUN_AS_ROOT" = 1 ]]; then
USER=0
GROUP=0
else
USER="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
GROUP="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
fi
else
USER="$(id -u)"
GROUP="$(id -g)"
@ -55,14 +66,14 @@ function create_directory_and_do_chown() {
[ -z "$dir" ] && return
# ensure directories exist
if [ "$DO_CHOWN" = "1" ]; then
mkdir="mkdir"
mkdir=( mkdir )
else
# if DO_CHOWN=0 it means that the system does not map root user to "admin" permissions
# it mainly happens on NFS mounts where root==nobody for security reasons
# thus mkdir MUST run with user id/gid and not from nobody that has zero permissions
mkdir="/usr/bin/clickhouse su "${USER}:${GROUP}" mkdir"
mkdir=( clickhouse su "${USER}:${GROUP}" mkdir )
fi
if ! $mkdir -p "$dir"; then
if ! "${mkdir[@]}" -p "$dir"; then
echo "Couldn't create necessary directory: $dir"
exit 1
fi
@ -143,7 +154,7 @@ if [ -n "${RUN_INITDB_SCRIPTS}" ]; then
fi
# Listen only on localhost until the initialization is done
/usr/bin/clickhouse su "${USER}:${GROUP}" /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" -- --listen_host=127.0.0.1 &
clickhouse su "${USER}:${GROUP}" clickhouse-server --config-file="$CLICKHOUSE_CONFIG" -- --listen_host=127.0.0.1 &
pid="$!"
# check if clickhouse is ready to accept connections
@ -151,7 +162,7 @@ if [ -n "${RUN_INITDB_SCRIPTS}" ]; then
tries=${CLICKHOUSE_INIT_TIMEOUT:-1000}
while ! wget --spider --no-check-certificate -T 1 -q "$URL" 2>/dev/null; do
if [ "$tries" -le "0" ]; then
echo >&2 'ClickHouse init process failed.'
echo >&2 'ClickHouse init process timeout.'
exit 1
fi
tries=$(( tries-1 ))
@ -203,18 +214,8 @@ if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
CLICKHOUSE_WATCHDOG_ENABLE=${CLICKHOUSE_WATCHDOG_ENABLE:-0}
export CLICKHOUSE_WATCHDOG_ENABLE
# An option for easy restarting and replacing clickhouse-server in a container, especially in Kubernetes.
# For example, you can replace the clickhouse-server binary to another and restart it while keeping the container running.
if [[ "${CLICKHOUSE_DOCKER_RESTART_ON_EXIT:-0}" -eq "1" ]]; then
while true; do
# This runs the server as a child process of the shell script:
/usr/bin/clickhouse su "${USER}:${GROUP}" /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@" ||:
echo >&2 'ClickHouse Server exited, and the environment variable CLICKHOUSE_DOCKER_RESTART_ON_EXIT is set to 1. Restarting the server.'
done
else
# This replaces the shell script with the server:
exec /usr/bin/clickhouse su "${USER}:${GROUP}" /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@"
fi
# This replaces the shell script with the server:
exec clickhouse su "${USER}:${GROUP}" clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@"
fi
# Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image

View File

@ -1,16 +0,0 @@
# Since right now we can't set volumes to the docker during build, we split building container in stages:
# 1. build base container
# 2. run base conatiner with mounted volumes
# 3. commit container as image
FROM ubuntu:20.04 as clickhouse-test-runner-base
# A volume where directory with clickhouse packages to be mounted,
# for later installing.
VOLUME /packages
CMD apt-get update ;\
DEBIAN_FRONTEND=noninteractive \
apt install -y /packages/clickhouse-common-static_*.deb \
/packages/clickhouse-client_*.deb \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*

View File

@ -0,0 +1,31 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.3.13.40-lts (7acabd77389) FIXME as compared to v24.3.12.75-lts (7cb5dff8019)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#63976](https://github.com/ClickHouse/ClickHouse/issues/63976): Fix intersect parts when restart after drop range. [#63202](https://github.com/ClickHouse/ClickHouse/pull/63202) ([Han Fei](https://github.com/hanfei1991)).
* Backported in [#71482](https://github.com/ClickHouse/ClickHouse/issues/71482): Fix `Content-Encoding` not sent in some compressed responses. [#64802](https://github.com/ClickHouse/ClickHouse/issues/64802). [#68975](https://github.com/ClickHouse/ClickHouse/pull/68975) ([Konstantin Bogdanov](https://github.com/thevar1able)).
* Backported in [#70451](https://github.com/ClickHouse/ClickHouse/issues/70451): Fix vrash during insertion into FixedString column in PostgreSQL engine. [#69584](https://github.com/ClickHouse/ClickHouse/pull/69584) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#70619](https://github.com/ClickHouse/ClickHouse/issues/70619): Fix server segfault on creating a materialized view with two selects and an `INTERSECT`, e.g. `CREATE MATERIALIZED VIEW v0 AS (SELECT 1) INTERSECT (SELECT 1);`. [#70264](https://github.com/ClickHouse/ClickHouse/pull/70264) ([Konstantin Bogdanov](https://github.com/thevar1able)).
* Backported in [#70877](https://github.com/ClickHouse/ClickHouse/issues/70877): Fix table creation with `CREATE ... AS table_function()` with database `Replicated` and unavailable table function source on secondary replica. [#70511](https://github.com/ClickHouse/ClickHouse/pull/70511) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#70571](https://github.com/ClickHouse/ClickHouse/issues/70571): Ignore all output on async insert with `wait_for_async_insert=1`. Closes [#62644](https://github.com/ClickHouse/ClickHouse/issues/62644). [#70530](https://github.com/ClickHouse/ClickHouse/pull/70530) ([Konstantin Bogdanov](https://github.com/thevar1able)).
* Backported in [#71146](https://github.com/ClickHouse/ClickHouse/issues/71146): Ignore frozen_metadata.txt while traversing shadow directory from system.remote_data_paths. [#70590](https://github.com/ClickHouse/ClickHouse/pull/70590) ([Aleksei Filatov](https://github.com/aalexfvk)).
* Backported in [#70682](https://github.com/ClickHouse/ClickHouse/issues/70682): Fix creation of stateful window functions on misaligned memory. [#70631](https://github.com/ClickHouse/ClickHouse/pull/70631) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#71113](https://github.com/ClickHouse/ClickHouse/issues/71113): Fix a crash and a leak in AggregateFunctionGroupArraySorted. [#70820](https://github.com/ClickHouse/ClickHouse/pull/70820) ([Michael Kolupaev](https://github.com/al13n321)).
* Backported in [#70990](https://github.com/ClickHouse/ClickHouse/issues/70990): Fix a logical error due to negative zeros in the two-level hash table. This closes [#70973](https://github.com/ClickHouse/ClickHouse/issues/70973). [#70979](https://github.com/ClickHouse/ClickHouse/pull/70979) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#71246](https://github.com/ClickHouse/ClickHouse/issues/71246): Fixed named sessions not being closed and hanging on forever under certain circumstances. [#70998](https://github.com/ClickHouse/ClickHouse/pull/70998) ([Márcio Martins](https://github.com/marcio-absmartly)).
* Backported in [#71371](https://github.com/ClickHouse/ClickHouse/issues/71371): Add try/catch to data parts destructors to avoid terminate. [#71364](https://github.com/ClickHouse/ClickHouse/pull/71364) ([alesapin](https://github.com/alesapin)).
* Backported in [#71594](https://github.com/ClickHouse/ClickHouse/issues/71594): Prevent crash in SortCursor with 0 columns (old analyzer). [#71494](https://github.com/ClickHouse/ClickHouse/pull/71494) ([Raúl Marín](https://github.com/Algunenano)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#71022](https://github.com/ClickHouse/ClickHouse/issues/71022): Fix dropping of file cache in CHECK query in case of enabled transactions. [#69256](https://github.com/ClickHouse/ClickHouse/pull/69256) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#70384](https://github.com/ClickHouse/ClickHouse/issues/70384): CI: Enable Integration Tests for backport PRs. [#70329](https://github.com/ClickHouse/ClickHouse/pull/70329) ([Max Kainov](https://github.com/maxknv)).
* Backported in [#70538](https://github.com/ClickHouse/ClickHouse/issues/70538): Remove slow poll() logs in keeper. [#70508](https://github.com/ClickHouse/ClickHouse/pull/70508) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#70971](https://github.com/ClickHouse/ClickHouse/issues/70971): Limiting logging some lines about configs. [#70879](https://github.com/ClickHouse/ClickHouse/pull/70879) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).

View File

@ -4,9 +4,13 @@ sidebar_position: 50
sidebar_label: EmbeddedRocksDB
---
import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge';
# EmbeddedRocksDB Engine
This engine allows integrating ClickHouse with [rocksdb](http://rocksdb.org/).
<CloudNotSupportedBadge />
This engine allows integrating ClickHouse with [RocksDB](http://rocksdb.org/).
## Creating a Table {#creating-a-table}

View File

@ -54,7 +54,7 @@ Parameters:
- `distance_function`: either `L2Distance` (the [Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance) - the length of a
line between two points in Euclidean space), or `cosineDistance` (the [cosine
distance](https://en.wikipedia.org/wiki/Cosine_similarity#Cosine_distance)- the angle between two non-zero vectors).
- `quantization`: either `f64`, `f32`, `f16`, `bf16`, or `i8` for storing the vector with reduced precision (optional, default: `bf16`)
- `quantization`: either `f64`, `f32`, `f16`, `bf16`, or `i8` for storing vectors with reduced precision (optional, default: `bf16`)
- `hnsw_max_connections_per_layer`: the number of neighbors per HNSW graph node, also known as `M` in the [HNSW
paper](https://doi.org/10.1109/TPAMI.2018.2889473) (optional, default: 32)
- `hnsw_candidate_list_size_for_construction`: the size of the dynamic candidate list when constructing the HNSW graph, also known as
@ -92,8 +92,8 @@ Vector similarity indexes currently support two distance functions:
- `cosineDistance`, also called cosine similarity, is the cosine of the angle between two (non-zero) vectors
([Wikipedia](https://en.wikipedia.org/wiki/Cosine_similarity)).
Vector similarity indexes allows storing the vectors in reduced precision formats. Supported scalar kinds are `f64`, `f32`, `f16` or `i8`.
If no scalar kind was specified during index creation, `f16` is used as default.
Vector similarity indexes allows storing the vectors in reduced precision formats. Supported scalar kinds are `f64`, `f32`, `f16`, `bf16`,
and `i8`. If no scalar kind was specified during index creation, `bf16` is used as default.
For normalized data, `L2Distance` is usually a better choice, otherwise `cosineDistance` is recommended to compensate for scale. If no
distance function was specified during index creation, `L2Distance` is used as default.

View File

@ -33,6 +33,21 @@ Then, generate the data. Parameter `-s` specifies the scale factor. For example,
./dbgen -s 100
```
Detailed table sizes with scale factor 100:
| Table | size (in rows) | size (compressed in ClickHouse) |
|----------|----------------|---------------------------------|
| nation | 25 | 2 kB |
| region | 5 | 1 kB |
| part | 20.000.000 | 895 MB |
| supplier | 1.000.000 | 75 MB |
| partsupp | 80.000.000 | 4.37 GB |
| customer | 15.000.000 | 1.19 GB |
| orders | 150.000.000 | 6.15 GB |
| lineitem | 600.00.00 | 26.69 GB |
(Compressed sizes in ClickHouse are taken from `system.tables.total_bytes` and based on below table definitions.)
Now create tables in ClickHouse.
We stick as closely as possible to the rules of the TPC-H specification:
@ -151,10 +166,37 @@ clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO orders FORMAT
clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO lineitem FORMAT CSV" < lineitem.tbl
```
The queries are generated by `./qgen -s <scaling_factor>`. Example queries for `s = 100`:
:::note
Instead of using tpch-kit and generating the tables by yourself, you can alternatively import the data from a public S3 bucket. Make sure
to create empty tables first using above `CREATE` statements.
```sql
-- Scaling factor 1
INSERT INTO nation SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/nation.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO region SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/region.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO part SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/part.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO supplier SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/supplier.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO partsupp SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/partsupp.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO customer SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/customer.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO orders SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/orders.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO lineitem SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/lineitem.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
-- Scaling factor 100
INSERT INTO nation SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/nation.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO region SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/region.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO part SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/part.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO supplier SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/supplier.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO partsupp SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/partsupp.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO customer SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/customer.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO orders SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/orders.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO lineitem SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/lineitem.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
````
:::
## Queries
The queries are generated by `./qgen -s <scaling_factor>`. Example queries for `s = 100`:
**Correctness**
The result of the queries agrees with the official results unless mentioned otherwise. To verify, generate a TPC-H database with scale

View File

@ -9,7 +9,7 @@ sidebar_label: Prometheus protocols
## Exposing metrics {#expose}
:::note
ClickHouse Cloud does not currently support connecting to Prometheus. To be notified when this feature is supported, please contact support@clickhouse.com.
If you are using ClickHouse Cloud, you can expose metrics to Prometheus using the [Prometheus Integration](/en/integrations/prometheus).
:::
ClickHouse can expose its own metrics for scraping from Prometheus:

View File

@ -65,6 +65,34 @@ sudo rm -f /etc/yum.repos.d/clickhouse.repo
After that follow the [install guide](../getting-started/install.md#from-rpm-packages)
### You Can't Run Docker Container
You are running a simple `docker run clickhouse/clickhouse-server` and it crashes with a stack trace similar to following:
```
$ docker run -it clickhouse/clickhouse-server
........
2024.11.06 21:04:48.912036 [ 1 ] {} <Information> SentryWriter: Sending crash reports is disabled
Poco::Exception. Code: 1000, e.code() = 0, System exception: cannot start thread, Stack trace (when copying this message, always include the lines below):
0. Poco::ThreadImpl::startImpl(Poco::SharedPtr<Poco::Runnable, Poco::ReferenceCounter, Poco::ReleasePolicy<Poco::Runnable>>) @ 0x00000000157c7b34
1. Poco::Thread::start(Poco::Runnable&) @ 0x00000000157c8a0e
2. BaseDaemon::initializeTerminationAndSignalProcessing() @ 0x000000000d267a14
3. BaseDaemon::initialize(Poco::Util::Application&) @ 0x000000000d2652cb
4. DB::Server::initialize(Poco::Util::Application&) @ 0x000000000d128b38
5. Poco::Util::Application::run() @ 0x000000001581cfda
6. DB::Server::run() @ 0x000000000d1288f0
7. Poco::Util::ServerApplication::run(int, char**) @ 0x0000000015825e27
8. mainEntryClickHouseServer(int, char**) @ 0x000000000d125b38
9. main @ 0x0000000007ea4eee
10. ? @ 0x00007f67ff946d90
11. ? @ 0x00007f67ff946e40
12. _start @ 0x00000000062e802e
(version 24.10.1.2812 (official build))
```
The reason is an old docker daemon with version lower than `20.10.10`. A way to fix it either upgrading it, or running `docker run [--privileged | --security-opt seccomp=unconfined]`. The latter has security implications.
## Connecting to the Server {#troubleshooting-accepts-no-connections}
Possible issues:

View File

@ -25,9 +25,10 @@ Query caches can generally be viewed as transactionally consistent or inconsiste
slowly enough that the database only needs to compute the report once (represented by the first `SELECT` query). Further queries can be
served directly from the query cache. In this example, a reasonable validity period could be 30 min.
Transactionally inconsistent caching is traditionally provided by client tools or proxy packages interacting with the database. As a result,
the same caching logic and configuration is often duplicated. With ClickHouse's query cache, the caching logic moves to the server side.
This reduces maintenance effort and avoids redundancy.
Transactionally inconsistent caching is traditionally provided by client tools or proxy packages (e.g.
[chproxy](https://www.chproxy.org/configuration/caching/)) interacting with the database. As a result, the same caching logic and
configuration is often duplicated. With ClickHouse's query cache, the caching logic moves to the server side. This reduces maintenance
effort and avoids redundancy.
## Configuration Settings and Usage
@ -138,7 +139,10 @@ is only cached if the query runs longer than 5 seconds. It is also possible to s
cached - for that use setting [query_cache_min_query_runs](settings/settings.md#query-cache-min-query-runs).
Entries in the query cache become stale after a certain time period (time-to-live). By default, this period is 60 seconds but a different
value can be specified at session, profile or query level using setting [query_cache_ttl](settings/settings.md#query-cache-ttl).
value can be specified at session, profile or query level using setting [query_cache_ttl](settings/settings.md#query-cache-ttl). The query
cache evicts entries "lazily", i.e. when an entry becomes stale, it is not immediately removed from the cache. Instead, when a new entry
is to be inserted into the query cache, the database checks whether the cache has enough free space for the new entry. If this is not the
case, the database tries to remove all stale entries. If the cache still has not enough free space, the new entry is not inserted.
Entries in the query cache are compressed by default. This reduces the overall memory consumption at the cost of slower writes into / reads
from the query cache. To disable compression, use setting [query_cache_compress_entries](settings/settings.md#query-cache-compress-entries).
@ -188,14 +192,9 @@ Also, results of queries with non-deterministic functions are not cached by defa
To force caching of results of queries with non-deterministic functions regardless, use setting
[query_cache_nondeterministic_function_handling](settings/settings.md#query-cache-nondeterministic-function-handling).
Results of queries that involve system tables, e.g. `system.processes` or `information_schema.tables`, are not cached by default. To force
caching of results of queries with system tables regardless, use setting
[query_cache_system_table_handling](settings/settings.md#query-cache-system-table-handling).
:::note
Prior to ClickHouse v23.11, setting 'query_cache_store_results_of_queries_with_nondeterministic_functions = 0 / 1' controlled whether
results of queries with non-deterministic results were cached. In newer ClickHouse versions, this setting is obsolete and has no effect.
:::
Results of queries that involve system tables (e.g. [system.processes](system-tables/processes.md)` or
[information_schema.tables](system-tables/information_schema.md)) are not cached by default. To force caching of results of queries with
system tables regardless, use setting [query_cache_system_table_handling](settings/settings.md#query-cache-system-table-handling).
Finally, entries in the query cache are not shared between users due to security reasons. For example, user A must not be able to bypass a
row policy on a table by running the same query as another user B for whom no such policy exists. However, if necessary, cache entries can

View File

@ -131,16 +131,6 @@ Type: UInt64
Default: 8
## background_pool_size
Sets the number of threads performing background merges and mutations for tables with MergeTree engines. You can only increase the number of threads at runtime. To lower the number of threads you have to restart the server. By adjusting this setting, you manage CPU and disk load. Smaller pool size utilizes less CPU and disk resources, but background processes advance slower which might eventually impact query performance.
Before changing it, please also take a look at related MergeTree settings, such as `number_of_free_entries_in_pool_to_lower_max_size_of_merge` and `number_of_free_entries_in_pool_to_execute_mutation`.
Type: UInt64
Default: 16
## background_schedule_pool_size
The maximum number of threads that will be used for constantly executing some lightweight periodic operations for replicated tables, Kafka streaming, and DNS cache updates.

View File

@ -19,7 +19,7 @@ Columns:
- `column` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — Name of a column to which access is granted.
- `is_partial_revoke` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Logical value. It shows whether some privileges have been revoked. Possible values:
- `0` — The row describes a partial revoke.
- `1` — The row describes a grant.
- `0` — The row describes a grant.
- `1` — The row describes a partial revoke.
- `grant_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Permission is granted `WITH GRANT OPTION`, see [GRANT](../../sql-reference/statements/grant.md#granting-privilege-syntax).

View File

@ -7,119 +7,4 @@ toc_hidden: true
# List of Aggregate Functions
Standard aggregate functions:
- [count](../reference/count.md)
- [min](../reference/min.md)
- [max](../reference/max.md)
- [sum](../reference/sum.md)
- [avg](../reference/avg.md)
- [any](../reference/any.md)
- [stddevPop](../reference/stddevpop.md)
- [stddevPopStable](../reference/stddevpopstable.md)
- [stddevSamp](../reference/stddevsamp.md)
- [stddevSampStable](../reference/stddevsampstable.md)
- [varPop](../reference/varpop.md)
- [varSamp](../reference/varsamp.md)
- [corr](../reference/corr.md)
- [corr](../reference/corrstable.md)
- [corrMatrix](../reference/corrmatrix.md)
- [covarPop](../reference/covarpop.md)
- [covarStable](../reference/covarpopstable.md)
- [covarPopMatrix](../reference/covarpopmatrix.md)
- [covarSamp](../reference/covarsamp.md)
- [covarSampStable](../reference/covarsampstable.md)
- [covarSampMatrix](../reference/covarsampmatrix.md)
- [entropy](../reference/entropy.md)
- [exponentialMovingAverage](../reference/exponentialmovingaverage.md)
- [intervalLengthSum](../reference/intervalLengthSum.md)
- [kolmogorovSmirnovTest](../reference/kolmogorovsmirnovtest.md)
- [mannwhitneyutest](../reference/mannwhitneyutest.md)
- [median](../reference/median.md)
- [rankCorr](../reference/rankCorr.md)
- [sumKahan](../reference/sumkahan.md)
- [studentTTest](../reference/studentttest.md)
- [welchTTest](../reference/welchttest.md)
ClickHouse-specific aggregate functions:
- [aggThrow](../reference/aggthrow.md)
- [analysisOfVariance](../reference/analysis_of_variance.md)
- [any](../reference/any.md)
- [anyHeavy](../reference/anyheavy.md)
- [anyLast](../reference/anylast.md)
- [boundingRatio](../reference/boundrat.md)
- [first_value](../reference/first_value.md)
- [last_value](../reference/last_value.md)
- [argMin](../reference/argmin.md)
- [argMax](../reference/argmax.md)
- [avgWeighted](../reference/avgweighted.md)
- [topK](../reference/topk.md)
- [topKWeighted](../reference/topkweighted.md)
- [deltaSum](../reference/deltasum.md)
- [deltaSumTimestamp](../reference/deltasumtimestamp.md)
- [flameGraph](../reference/flame_graph.md)
- [groupArray](../reference/grouparray.md)
- [groupArrayLast](../reference/grouparraylast.md)
- [groupUniqArray](../reference/groupuniqarray.md)
- [groupArrayInsertAt](../reference/grouparrayinsertat.md)
- [groupArrayMovingAvg](../reference/grouparraymovingavg.md)
- [groupArrayMovingSum](../reference/grouparraymovingsum.md)
- [groupArraySample](../reference/grouparraysample.md)
- [groupArraySorted](../reference/grouparraysorted.md)
- [groupArrayIntersect](../reference/grouparrayintersect.md)
- [groupBitAnd](../reference/groupbitand.md)
- [groupBitOr](../reference/groupbitor.md)
- [groupBitXor](../reference/groupbitxor.md)
- [groupBitmap](../reference/groupbitmap.md)
- [groupBitmapAnd](../reference/groupbitmapand.md)
- [groupBitmapOr](../reference/groupbitmapor.md)
- [groupBitmapXor](../reference/groupbitmapxor.md)
- [sumWithOverflow](../reference/sumwithoverflow.md)
- [sumMap](../reference/summap.md)
- [sumMapWithOverflow](../reference/summapwithoverflow.md)
- [sumMapFiltered](../parametric-functions.md/#summapfiltered)
- [sumMapFilteredWithOverflow](../parametric-functions.md/#summapfilteredwithoverflow)
- [minMap](../reference/minmap.md)
- [maxMap](../reference/maxmap.md)
- [skewSamp](../reference/skewsamp.md)
- [skewPop](../reference/skewpop.md)
- [kurtSamp](../reference/kurtsamp.md)
- [kurtPop](../reference/kurtpop.md)
- [uniq](../reference/uniq.md)
- [uniqExact](../reference/uniqexact.md)
- [uniqCombined](../reference/uniqcombined.md)
- [uniqCombined64](../reference/uniqcombined64.md)
- [uniqHLL12](../reference/uniqhll12.md)
- [uniqTheta](../reference/uniqthetasketch.md)
- [quantile](../reference/quantile.md)
- [quantiles](../reference/quantiles.md)
- [quantileExact](../reference/quantileexact.md)
- [quantileExactLow](../reference/quantileexact.md#quantileexactlow)
- [quantileExactHigh](../reference/quantileexact.md#quantileexacthigh)
- [quantileExactWeighted](../reference/quantileexactweighted.md)
- [quantileTiming](../reference/quantiletiming.md)
- [quantileTimingWeighted](../reference/quantiletimingweighted.md)
- [quantileDeterministic](../reference/quantiledeterministic.md)
- [quantileTDigest](../reference/quantiletdigest.md)
- [quantileTDigestWeighted](../reference/quantiletdigestweighted.md)
- [quantileBFloat16](../reference/quantilebfloat16.md#quantilebfloat16)
- [quantileBFloat16Weighted](../reference/quantilebfloat16.md#quantilebfloat16weighted)
- [quantileDD](../reference/quantileddsketch.md#quantileddsketch)
- [simpleLinearRegression](../reference/simplelinearregression.md)
- [singleValueOrNull](../reference/singlevalueornull.md)
- [stochasticLinearRegression](../reference/stochasticlinearregression.md)
- [stochasticLogisticRegression](../reference/stochasticlogisticregression.md)
- [categoricalInformationValue](../reference/categoricalinformationvalue.md)
- [contingency](../reference/contingency.md)
- [cramersV](../reference/cramersv.md)
- [cramersVBiasCorrected](../reference/cramersvbiascorrected.md)
- [theilsU](../reference/theilsu.md)
- [maxIntersections](../reference/maxintersections.md)
- [maxIntersectionsPosition](../reference/maxintersectionsposition.md)
- [meanZTest](../reference/meanztest.md)
- [quantileGK](../reference/quantileGK.md)
- [quantileInterpolatedWeighted](../reference/quantileinterpolatedweighted.md)
- [sparkBar](../reference/sparkbar.md)
- [sumCount](../reference/sumcount.md)
- [largestTriangleThreeBuckets](../reference/largestTriangleThreeBuckets.md)
ClickHouse supports all standard SQL aggregate functions ([sum](../reference/sum.md), [avg](../reference/avg.md), [min](../reference/min.md), [max](../reference/max.md), [count](../reference/count.md)), as well as a wide range of other aggregate functions.

View File

@ -6,7 +6,9 @@ sidebar_label: AggregateFunction
# AggregateFunction
Aggregate functions can have an implementation-defined intermediate state that can be serialized to an `AggregateFunction(...)` data type and stored in a table, usually, by means of [a materialized view](../../sql-reference/statements/create/view.md). The common way to produce an aggregate function state is by calling the aggregate function with the `-State` suffix. To get the final result of aggregation in the future, you must use the same aggregate function with the `-Merge`suffix.
Aggregate functions have an implementation-defined intermediate state that can be serialized to an `AggregateFunction(...)` data type and stored in a table, usually, by means of [a materialized view](../../sql-reference/statements/create/view.md).
The common way to produce an aggregate function state is by calling the aggregate function with the `-State` suffix.
To get the final result of aggregation in the future, you must use the same aggregate function with the `-Merge`suffix.
`AggregateFunction(name, types_of_arguments...)` — parametric data type.

View File

@ -512,6 +512,8 @@ The result of operator `<` for values `d1` with underlying type `T1` and `d2` wi
- If `T1 = T2 = T`, the result will be `d1.T < d2.T` (underlying values will be compared).
- If `T1 != T2`, the result will be `T1 < T2` (type names will be compared).
By default `Dynamic` type is not allowed in `GROUP BY`/`ORDER BY` keys, if you want to use it consider its special comparison rule and enable `allow_suspicious_types_in_group_by`/`allow_suspicious_types_in_order_by` settings.
Examples:
```sql
CREATE TABLE test (d Dynamic) ENGINE=Memory;
@ -535,7 +537,7 @@ SELECT d, dynamicType(d) FROM test;
```
```sql
SELECT d, dynamicType(d) FROM test ORDER BY d;
SELECT d, dynamicType(d) FROM test ORDER BY d SETTINGS allow_suspicious_types_in_order_by=1;
```
```sql
@ -557,7 +559,7 @@ Example:
```sql
CREATE TABLE test (d Dynamic) ENGINE=Memory;
INSERT INTO test VALUES (1::UInt32), (1::Int64), (100::UInt32), (100::Int64);
SELECT d, dynamicType(d) FROM test ORDER by d;
SELECT d, dynamicType(d) FROM test ORDER BY d SETTINGS allow_suspicious_types_in_order_by=1;
```
```text
@ -570,7 +572,7 @@ SELECT d, dynamicType(d) FROM test ORDER by d;
```
```sql
SELECT d, dynamicType(d) FROM test GROUP by d;
SELECT d, dynamicType(d) FROM test GROUP by d SETTINGS allow_suspicious_types_in_group_by=1;
```
```text
@ -582,7 +584,7 @@ SELECT d, dynamicType(d) FROM test GROUP by d;
└─────┴────────────────┘
```
**Note**: the described comparison rule is not applied during execution of comparison functions like `<`/`>`/`=` and others because of [special work](#using-dynamic-type-in-functions) of functions with `Dynamic` type
**Note:** the described comparison rule is not applied during execution of comparison functions like `<`/`>`/`=` and others because of [special work](#using-dynamic-type-in-functions) of functions with `Dynamic` type
## Reaching the limit in number of different data types stored inside Dynamic

View File

@ -6,29 +6,8 @@ sidebar_position: 1
# Data Types in ClickHouse
ClickHouse can store various kinds of data in table cells. This section describes the supported data types and special considerations for using and/or implementing them if any.
This section describes the data types supported by ClickHouse, for example [integers](int-uint.md), [floats](float.md) and [strings](string.md).
:::note
You can check whether a data type name is case-sensitive in the [system.data_type_families](../../operations/system-tables/data_type_families.md#system_tables-data_type_families) table.
:::
ClickHouse data types include:
- **Integer types**: [signed and unsigned integers](./int-uint.md) (`UInt8`, `UInt16`, `UInt32`, `UInt64`, `UInt128`, `UInt256`, `Int8`, `Int16`, `Int32`, `Int64`, `Int128`, `Int256`)
- **Floating-point numbers**: [floats](./float.md)(`Float32` and `Float64`) and [`Decimal` values](./decimal.md)
- **Boolean**: ClickHouse has a [`Boolean` type](./boolean.md)
- **Strings**: [`String`](./string.md) and [`FixedString`](./fixedstring.md)
- **Dates**: use [`Date`](./date.md) and [`Date32`](./date32.md) for days, and [`DateTime`](./datetime.md) and [`DateTime64`](./datetime64.md) for instances in time
- **Object**: the [`Object`](./json.md) stores a JSON document in a single column (deprecated)
- **JSON**: the [`JSON` object](./newjson.md) stores a JSON document in a single column
- **UUID**: a performant option for storing [`UUID` values](./uuid.md)
- **Low cardinality types**: use an [`Enum`](./enum.md) when you have a handful of unique values, or use [`LowCardinality`](./lowcardinality.md) when you have up to 10,000 unique values of a column
- **Arrays**: any column can be defined as an [`Array` of values](./array.md)
- **Maps**: use [`Map`](./map.md) for storing key/value pairs
- **Aggregation function types**: use [`SimpleAggregateFunction`](./simpleaggregatefunction.md) and [`AggregateFunction`](./aggregatefunction.md) for storing the intermediate status of aggregate function results
- **Nested data structures**: A [`Nested` data structure](./nested-data-structures/index.md) is like a table inside a cell
- **Tuples**: A [`Tuple` of elements](./tuple.md), each having an individual type.
- **Nullable**: [`Nullable`](./nullable.md) allows you to store a value as `NULL` when a value is "missing" (instead of the column settings its default value for the data type)
- **IP addresses**: use [`IPv4`](./ipv4.md) and [`IPv6`](./ipv6.md) to efficiently store IP addresses
- **Geo types**: for [geographical data](./geo.md), including `Point`, `Ring`, `Polygon` and `MultiPolygon`
- **Special data types**: including [`Expression`](./special-data-types/expression.md), [`Set`](./special-data-types/set.md), [`Nothing`](./special-data-types/nothing.md) and [`Interval`](./special-data-types/interval.md)
System table [system.data_type_families](../../operations/system-tables/data_type_families.md#system_tables-data_type_families) provides an
overview of all available data types.
It also shows whether a data type is an alias to another data type and its name is case-sensitive (e.g. `bool` vs. `BOOL`).

View File

@ -7,7 +7,7 @@ keywords: [object, data type]
# Object Data Type (deprecated)
**This feature is not production-ready and is now deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-formats/json/overview) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864).
**This feature is not production-ready and deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-formats/json/overview) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864).
<hr />

View File

@ -58,10 +58,10 @@ SELECT json FROM test;
└───────────────────────────────────┘
```
Using CAST from 'String':
Using CAST from `String`:
```sql
SELECT '{"a" : {"b" : 42},"c" : [1, 2, 3], "d" : "Hello, World!"}'::JSON as json;
SELECT '{"a" : {"b" : 42},"c" : [1, 2, 3], "d" : "Hello, World!"}'::JSON AS json;
```
```text
@ -70,7 +70,47 @@ SELECT '{"a" : {"b" : 42},"c" : [1, 2, 3], "d" : "Hello, World!"}'::JSON as json
└────────────────────────────────────────────────┘
```
CAST from `JSON`, named `Tuple`, `Map` and `Object('json')` to `JSON` type will be supported later.
Using CAST from `Tuple`:
```sql
SELECT (tuple(42 AS b) AS a, [1, 2, 3] AS c, 'Hello, World!' AS d)::JSON AS json;
```
```text
┌─json───────────────────────────────────────────┐
│ {"a":{"b":42},"c":[1,2,3],"d":"Hello, World!"} │
└────────────────────────────────────────────────┘
```
Using CAST from `Map`:
```sql
SELECT map('a', map('b', 42), 'c', [1,2,3], 'd', 'Hello, World!')::JSON AS json;
```
```text
┌─json───────────────────────────────────────────┐
│ {"a":{"b":42},"c":[1,2,3],"d":"Hello, World!"} │
└────────────────────────────────────────────────┘
```
Using CAST from deprecated `Object('json')`:
```sql
SELECT '{"a" : {"b" : 42},"c" : [1, 2, 3], "d" : "Hello, World!"}'::Object('json')::JSON AS json;
```
```text
┌─json───────────────────────────────────────────┐
│ {"a":{"b":42},"c":[1,2,3],"d":"Hello, World!"} │
└────────────────────────────────────────────────┘
```
:::note
CAST from `Tuple`/`Map`/`Object('json')` to `JSON` is implemented via serializing the column into `String` column containing JSON objects and deserializing it back to `JSON` type column.
:::
CAST between `JSON` types with different arguments will be supported later.
## Reading JSON paths as subcolumns
@ -630,6 +670,28 @@ SELECT arrayJoin(distinctJSONPathsAndTypes(json)) FROM s3('s3://clickhouse-publi
└─arrayJoin(distinctJSONPathsAndTypes(json))──────────────────┘
```
## ALTER MODIFY COLUMN to JSON type
It's possible to alter an existing table and change the type of the column to the new `JSON` type. Right now only alter from `String` type is supported.
**Example**
```sql
CREATE TABLE test (json String) ENGINE=MergeTree ORDeR BY tuple();
INSERT INTO test VALUES ('{"a" : 42}'), ('{"a" : 43, "b" : "Hello"}'), ('{"a" : 44, "b" : [1, 2, 3]}')), ('{"c" : "2020-01-01"}');
ALTER TABLE test MODIFY COLUMN json JSON;
SELECT json, json.a, json.b, json.c FROM test;
```
```text
┌─json─────────────────────────┬─json.a─┬─json.b──┬─json.c─────┐
│ {"a":"42"} │ 42 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │
│ {"a":"43","b":"Hello"} │ 43 │ Hello │ ᴺᵁᴸᴸ │
│ {"a":"44","b":["1","2","3"]} │ 44 │ [1,2,3] │ ᴺᵁᴸᴸ │
│ {"c":"2020-01-01"} │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 2020-01-01 │
└──────────────────────────────┴────────┴─────────┴────────────┘
```
## Tips for better usage of the JSON type
Before creating `JSON` column and loading data into it, consider the following tips:

View File

@ -5,7 +5,9 @@ sidebar_label: SimpleAggregateFunction
---
# SimpleAggregateFunction
`SimpleAggregateFunction(name, types_of_arguments...)` data type stores current value of the aggregate function, and does not store its full state as [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md) does. This optimization can be applied to functions for which the following property holds: the result of applying a function `f` to a row set `S1 UNION ALL S2` can be obtained by applying `f` to parts of the row set separately, and then again applying `f` to the results: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`. This property guarantees that partial aggregation results are enough to compute the combined one, so we do not have to store and process any extra data.
`SimpleAggregateFunction(name, types_of_arguments...)` data type stores current value (intermediate state) of the aggregate function, but not its full state as [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md) does.
This optimization can be applied to functions for which the following property holds: the result of applying a function `f` to a row set `S1 UNION ALL S2` can be obtained by applying `f` to parts of the row set separately, and then again applying `f` to the results: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`.
This property guarantees that partial aggregation results are enough to compute the combined one, so we do not have to store and process any extra data.
The common way to produce an aggregate function value is by calling the aggregate function with the [-SimpleState](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-simplestate) suffix.

View File

@ -441,6 +441,8 @@ SELECT v, variantType(v) FROM test ORDER by v;
└─────┴────────────────┘
```
**Note** by default `Variant` type is not allowed in `GROUP BY`/`ORDER BY` keys, if you want to use it consider its special comparison rule and enable `allow_suspicious_types_in_group_by`/`allow_suspicious_types_in_order_by` settings.
## JSONExtract functions with Variant
All `JSONExtract*` functions support `Variant` type:

View File

@ -5,70 +5,4 @@ sidebar_position: 62
title: "Geo Functions"
---
## Geographical Coordinates Functions
- [greatCircleDistance](./coordinates.md#greatcircledistance)
- [geoDistance](./coordinates.md#geodistance)
- [greatCircleAngle](./coordinates.md#greatcircleangle)
- [pointInEllipses](./coordinates.md#pointinellipses)
- [pointInPolygon](./coordinates.md#pointinpolygon)
## Geohash Functions
- [geohashEncode](./geohash.md#geohashencode)
- [geohashDecode](./geohash.md#geohashdecode)
- [geohashesInBox](./geohash.md#geohashesinbox)
## H3 Indexes Functions
- [h3IsValid](./h3.md#h3isvalid)
- [h3GetResolution](./h3.md#h3getresolution)
- [h3EdgeAngle](./h3.md#h3edgeangle)
- [h3EdgeLengthM](./h3.md#h3edgelengthm)
- [h3EdgeLengthKm](./h3.md#h3edgelengthkm)
- [geoToH3](./h3.md#geotoh3)
- [h3ToGeo](./h3.md#h3togeo)
- [h3ToGeoBoundary](./h3.md#h3togeoboundary)
- [h3kRing](./h3.md#h3kring)
- [h3GetBaseCell](./h3.md#h3getbasecell)
- [h3HexAreaM2](./h3.md#h3hexaream2)
- [h3HexAreaKm2](./h3.md#h3hexareakm2)
- [h3IndexesAreNeighbors](./h3.md#h3indexesareneighbors)
- [h3ToChildren](./h3.md#h3tochildren)
- [h3ToParent](./h3.md#h3toparent)
- [h3ToString](./h3.md#h3tostring)
- [stringToH3](./h3.md#stringtoh3)
- [h3GetResolution](./h3.md#h3getresolution)
- [h3IsResClassIII](./h3.md#h3isresclassiii)
- [h3IsPentagon](./h3.md#h3ispentagon)
- [h3GetFaces](./h3.md#h3getfaces)
- [h3CellAreaM2](./h3.md#h3cellaream2)
- [h3CellAreaRads2](./h3.md#h3cellarearads2)
- [h3ToCenterChild](./h3.md#h3tocenterchild)
- [h3ExactEdgeLengthM](./h3.md#h3exactedgelengthm)
- [h3ExactEdgeLengthKm](./h3.md#h3exactedgelengthkm)
- [h3ExactEdgeLengthRads](./h3.md#h3exactedgelengthrads)
- [h3NumHexagons](./h3.md#h3numhexagons)
- [h3Line](./h3.md#h3line)
- [h3Distance](./h3.md#h3distance)
- [h3HexRing](./h3.md#h3hexring)
- [h3GetUnidirectionalEdge](./h3.md#h3getunidirectionaledge)
- [h3UnidirectionalEdgeIsValid](./h3.md#h3unidirectionaledgeisvalid)
- [h3GetOriginIndexFromUnidirectionalEdge](./h3.md#h3getoriginindexfromunidirectionaledge)
- [h3GetDestinationIndexFromUnidirectionalEdge](./h3.md#h3getdestinationindexfromunidirectionaledge)
- [h3GetIndexesFromUnidirectionalEdge](./h3.md#h3getindexesfromunidirectionaledge)
- [h3GetUnidirectionalEdgesFromHexagon](./h3.md#h3getunidirectionaledgesfromhexagon)
- [h3GetUnidirectionalEdgeBoundary](./h3.md#h3getunidirectionaledgeboundary)
## S2 Index Functions
- [geoToS2](./s2.md#geotos2)
- [s2ToGeo](./s2.md#s2togeo)
- [s2GetNeighbors](./s2.md#s2getneighbors)
- [s2CellsIntersect](./s2.md#s2cellsintersect)
- [s2CapContains](./s2.md#s2capcontains)
- [s2CapUnion](./s2.md#s2capunion)
- [s2RectAdd](./s2.md#s2rectadd)
- [s2RectContains](./s2.md#s2rectcontains)
- [s2RectUnion](./s2.md#s2rectunion)
- [s2RectIntersection](./s2.md#s2rectintersection)
Functions for working with geometric objects, for example [to calculate distances between points on a sphere](./coordinates.md), [compute geohashes](./geohash.md), and work with [h3 indexes](./h3.md).

View File

@ -24,7 +24,7 @@ All expressions in a query that have the same AST (the same record or same resul
## Types of Results
All functions return a single return as the result (not several values, and not zero values). The type of result is usually defined only by the types of arguments, not by the values. Exceptions are the tupleElement function (the a.N operator), and the toFixedString function.
All functions return a single value as the result (not several values, and not zero values). The type of result is usually defined only by the types of arguments, not by the values. Exceptions are the tupleElement function (the a.N operator), and the toFixedString function.
## Constants

View File

@ -6,16 +6,4 @@ sidebar_label: CREATE
# CREATE Queries
Create queries make a new entity of one of the following kinds:
- [DATABASE](/docs/en/sql-reference/statements/create/database.md)
- [TABLE](/docs/en/sql-reference/statements/create/table.md)
- [VIEW](/docs/en/sql-reference/statements/create/view.md)
- [DICTIONARY](/docs/en/sql-reference/statements/create/dictionary.md)
- [FUNCTION](/docs/en/sql-reference/statements/create/function.md)
- [USER](/docs/en/sql-reference/statements/create/user.md)
- [ROLE](/docs/en/sql-reference/statements/create/role.md)
- [ROW POLICY](/docs/en/sql-reference/statements/create/row-policy.md)
- [QUOTA](/docs/en/sql-reference/statements/create/quota.md)
- [SETTINGS PROFILE](/docs/en/sql-reference/statements/create/settings-profile.md)
- [NAMED COLLECTION](/docs/en/sql-reference/statements/create/named-collection.md)
CREATE queries create (for example) new [databases](/docs/en/sql-reference/statements/create/database.md), [tables](/docs/en/sql-reference/statements/create/table.md) and [views](/docs/en/sql-reference/statements/create/view.md).

View File

@ -6,27 +6,4 @@ sidebar_label: List of statements
# ClickHouse SQL Statements
Statements represent various kinds of action you can perform using SQL queries. Each kind of statement has its own syntax and usage details that are described separately:
- [SELECT](/docs/en/sql-reference/statements/select/index.md)
- [INSERT INTO](/docs/en/sql-reference/statements/insert-into.md)
- [CREATE](/docs/en/sql-reference/statements/create/index.md)
- [ALTER](/docs/en/sql-reference/statements/alter/index.md)
- [SYSTEM](/docs/en/sql-reference/statements/system.md)
- [SHOW](/docs/en/sql-reference/statements/show.md)
- [GRANT](/docs/en/sql-reference/statements/grant.md)
- [REVOKE](/docs/en/sql-reference/statements/revoke.md)
- [ATTACH](/docs/en/sql-reference/statements/attach.md)
- [CHECK TABLE](/docs/en/sql-reference/statements/check-table.md)
- [DESCRIBE TABLE](/docs/en/sql-reference/statements/describe-table.md)
- [DETACH](/docs/en/sql-reference/statements/detach.md)
- [DROP](/docs/en/sql-reference/statements/drop.md)
- [EXISTS](/docs/en/sql-reference/statements/exists.md)
- [KILL](/docs/en/sql-reference/statements/kill.md)
- [OPTIMIZE](/docs/en/sql-reference/statements/optimize.md)
- [RENAME](/docs/en/sql-reference/statements/rename.md)
- [SET](/docs/en/sql-reference/statements/set.md)
- [SET ROLE](/docs/en/sql-reference/statements/set-role.md)
- [TRUNCATE](/docs/en/sql-reference/statements/truncate.md)
- [USE](/docs/en/sql-reference/statements/use.md)
- [EXPLAIN](/docs/en/sql-reference/statements/explain.md)
Users interact with ClickHouse using SQL statements. ClickHouse supports common SQL statements like [SELECT](select/index.md) and [CREATE](create/index.md), but it also provides specialized statements like [KILL](kill.md) and [OPTIMIZE](optimize.md).

View File

@ -291,7 +291,7 @@ All missed values of `expr` column will be filled sequentially and other columns
To fill multiple columns, add `WITH FILL` modifier with optional parameters after each field name in `ORDER BY` section.
``` sql
ORDER BY expr [WITH FILL] [FROM const_expr] [TO const_expr] [STEP const_numeric_expr], ... exprN [WITH FILL] [FROM expr] [TO expr] [STEP numeric_expr]
ORDER BY expr [WITH FILL] [FROM const_expr] [TO const_expr] [STEP const_numeric_expr] [STALENESS const_numeric_expr], ... exprN [WITH FILL] [FROM expr] [TO expr] [STEP numeric_expr] [STALENESS numeric_expr]
[INTERPOLATE [(col [AS expr], ... colN [AS exprN])]]
```
@ -300,6 +300,7 @@ When `FROM const_expr` not defined sequence of filling use minimal `expr` field
When `TO const_expr` not defined sequence of filling use maximum `expr` field value from `ORDER BY`.
When `STEP const_numeric_expr` defined then `const_numeric_expr` interprets `as is` for numeric types, as `days` for Date type, as `seconds` for DateTime type. It also supports [INTERVAL](https://clickhouse.com/docs/en/sql-reference/data-types/special-data-types/interval/) data type representing time and date intervals.
When `STEP const_numeric_expr` omitted then sequence of filling use `1.0` for numeric type, `1 day` for Date type and `1 second` for DateTime type.
When `STALENESS const_numeric_expr` is defined, the query will generate rows until the difference from the previous row in the original data exceeds `const_numeric_expr`.
`INTERPOLATE` can be applied to columns not participating in `ORDER BY WITH FILL`. Such columns are filled based on previous fields values by applying `expr`. If `expr` is not present will repeat previous value. Omitted list will result in including all allowed columns.
Example of a query without `WITH FILL`:
@ -497,6 +498,64 @@ Result:
└────────────┴────────────┴──────────┘
```
Example of a query without `STALENESS`:
``` sql
SELECT number as key, 5 * number value, 'original' AS source
FROM numbers(16) WHERE key % 5 == 0
ORDER BY key WITH FILL;
```
Result:
``` text
┌─key─┬─value─┬─source───┐
1. │ 0 │ 0 │ original │
2. │ 1 │ 0 │ │
3. │ 2 │ 0 │ │
4. │ 3 │ 0 │ │
5. │ 4 │ 0 │ │
6. │ 5 │ 25 │ original │
7. │ 6 │ 0 │ │
8. │ 7 │ 0 │ │
9. │ 8 │ 0 │ │
10. │ 9 │ 0 │ │
11. │ 10 │ 50 │ original │
12. │ 11 │ 0 │ │
13. │ 12 │ 0 │ │
14. │ 13 │ 0 │ │
15. │ 14 │ 0 │ │
16. │ 15 │ 75 │ original │
└─────┴───────┴──────────┘
```
Same query after applying `STALENESS 3`:
``` sql
SELECT number as key, 5 * number value, 'original' AS source
FROM numbers(16) WHERE key % 5 == 0
ORDER BY key WITH FILL STALENESS 3;
```
Result:
``` text
┌─key─┬─value─┬─source───┐
1. │ 0 │ 0 │ original │
2. │ 1 │ 0 │ │
3. │ 2 │ 0 │ │
4. │ 5 │ 25 │ original │
5. │ 6 │ 0 │ │
6. │ 7 │ 0 │ │
7. │ 10 │ 50 │ original │
8. │ 11 │ 0 │ │
9. │ 12 │ 0 │ │
10. │ 15 │ 75 │ original │
11. │ 16 │ 0 │ │
12. │ 17 │ 0 │ │
└─────┴───────┴──────────┘
```
Example of a query without `INTERPOLATE`:
``` sql

View File

@ -431,7 +431,7 @@ catch (const Exception & e)
bool need_print_stack_trace = config().getBool("stacktrace", false) && e.code() != ErrorCodes::NETWORK_ERROR;
std::cerr << getExceptionMessage(e, need_print_stack_trace, true) << std::endl << std::endl;
/// If exception code isn't zero, we should return non-zero return code anyway.
return e.code() ? e.code() : -1;
return static_cast<UInt8>(e.code()) ? e.code() : -1;
}
catch (...)
{
@ -1390,7 +1390,8 @@ int mainEntryClickHouseClient(int argc, char ** argv)
catch (const DB::Exception & e)
{
std::cerr << DB::getExceptionMessage(e, false) << std::endl;
return 1;
auto code = DB::getCurrentExceptionCode();
return static_cast<UInt8>(code) ? code : 1;
}
catch (const boost::program_options::error & e)
{
@ -1399,7 +1400,8 @@ int mainEntryClickHouseClient(int argc, char ** argv)
}
catch (...)
{
std::cerr << DB::getCurrentExceptionMessage(true) << std::endl;
return 1;
std::cerr << DB::getCurrentExceptionMessage(true) << '\n';
auto code = DB::getCurrentExceptionCode();
return static_cast<UInt8>(code) ? code : 1;
}
}

View File

@ -9,6 +9,7 @@
#include <IO/WriteBufferFromFile.h>
#include <IO/ReadBufferFromFile.h>
#include <Compression/CompressedWriteBuffer.h>
#include <Compression/ParallelCompressedWriteBuffer.h>
#include <Compression/CompressedReadBuffer.h>
#include <Compression/CompressedReadBufferFromFile.h>
#include <IO/WriteHelpers.h>
@ -17,6 +18,8 @@
#include <Parsers/ExpressionElementParsers.h>
#include <Compression/CompressionFactory.h>
#include <Common/TerminalSize.h>
#include <Common/ThreadPool.h>
#include <Common/CurrentMetrics.h>
#include <Core/Defines.h>
@ -29,6 +32,13 @@ namespace DB
}
}
namespace CurrentMetrics
{
extern const Metric LocalThread;
extern const Metric LocalThreadActive;
extern const Metric LocalThreadScheduled;
}
namespace
{
@ -77,11 +87,12 @@ int mainEntryClickHouseCompressor(int argc, char ** argv)
("decompress,d", "decompress")
("offset-in-compressed-file", po::value<size_t>()->default_value(0ULL), "offset to the compressed block (i.e. physical file offset)")
("offset-in-decompressed-block", po::value<size_t>()->default_value(0ULL), "offset to the decompressed block (i.e. virtual offset)")
("block-size,b", po::value<unsigned>()->default_value(DBMS_DEFAULT_BUFFER_SIZE), "compress in blocks of specified size")
("block-size,b", po::value<size_t>()->default_value(DBMS_DEFAULT_BUFFER_SIZE), "compress in blocks of specified size")
("hc", "use LZ4HC instead of LZ4")
("zstd", "use ZSTD instead of LZ4")
("codec", po::value<std::vector<std::string>>()->multitoken(), "use codecs combination instead of LZ4")
("level", po::value<int>(), "compression level for codecs specified via flags")
("threads", po::value<size_t>()->default_value(1), "number of threads for parallel compression")
("none", "use no compression instead of LZ4")
("stat", "print block statistics of compressed data")
("stacktrace", "print stacktrace of exception")
@ -109,7 +120,8 @@ int mainEntryClickHouseCompressor(int argc, char ** argv)
bool stat_mode = options.count("stat");
bool use_none = options.count("none");
print_stacktrace = options.count("stacktrace");
unsigned block_size = options["block-size"].as<unsigned>();
size_t block_size = options["block-size"].as<size_t>();
size_t num_threads = options["threads"].as<size_t>();
std::vector<std::string> codecs;
if (options.count("codec"))
codecs = options["codec"].as<std::vector<std::string>>();
@ -117,6 +129,12 @@ int mainEntryClickHouseCompressor(int argc, char ** argv)
if ((use_lz4hc || use_zstd || use_none) && !codecs.empty())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Wrong options, codec flags like --zstd and --codec options are mutually exclusive");
if (num_threads < 1)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Invalid value of `threads` parameter");
if (num_threads > 1 && decompress)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parallel mode is only implemented for compression (not for decompression)");
if (!codecs.empty() && options.count("level"))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Wrong options, --level is not compatible with --codec list");
@ -145,7 +163,6 @@ int mainEntryClickHouseCompressor(int argc, char ** argv)
else
codec = CompressionCodecFactory::instance().get(method_family, level);
std::unique_ptr<ReadBufferFromFileBase> rb;
std::unique_ptr<WriteBufferFromFileBase> wb;
@ -186,9 +203,20 @@ int mainEntryClickHouseCompressor(int argc, char ** argv)
else
{
/// Compression
CompressedWriteBuffer to(*wb, codec, block_size);
copyData(*rb, to);
to.finalize();
if (num_threads == 1)
{
CompressedWriteBuffer to(*wb, codec, block_size);
copyData(*rb, to);
to.finalize();
}
else
{
ThreadPool pool(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, CurrentMetrics::LocalThreadScheduled, num_threads);
ParallelCompressedWriteBuffer to(*wb, codec, block_size, num_threads, pool);
copyData(*rb, to);
to.finalize();
}
}
}
catch (...)

View File

@ -546,16 +546,18 @@ int mainEntryClickHouseDisks(int argc, char ** argv)
catch (const DB::Exception & e)
{
std::cerr << DB::getExceptionMessage(e, false) << std::endl;
return 0;
auto code = DB::getCurrentExceptionCode();
return static_cast<UInt8>(code) ? code : 1;
}
catch (const boost::program_options::error & e)
{
std::cerr << "Bad arguments: " << e.what() << std::endl;
return 0;
return DB::ErrorCodes::BAD_ARGUMENTS;
}
catch (...)
{
std::cerr << DB::getCurrentExceptionMessage(true) << std::endl;
return 0;
auto code = DB::getCurrentExceptionCode();
return static_cast<UInt8>(code) ? code : 1;
}
}

View File

@ -448,7 +448,8 @@ int mainEntryClickHouseKeeperClient(int argc, char ** argv)
catch (const DB::Exception & e)
{
std::cerr << DB::getExceptionMessage(e, false) << std::endl;
return 1;
auto code = DB::getCurrentExceptionCode();
return static_cast<UInt8>(code) ? code : 1;
}
catch (const boost::program_options::error & e)
{
@ -458,6 +459,7 @@ int mainEntryClickHouseKeeperClient(int argc, char ** argv)
catch (...)
{
std::cerr << DB::getCurrentExceptionMessage(true) << std::endl;
return 1;
auto code = DB::getCurrentExceptionCode();
return static_cast<UInt8>(code) ? code : 1;
}
}

View File

@ -81,7 +81,7 @@ int mainEntryClickHouseKeeper(int argc, char ** argv)
{
std::cerr << DB::getCurrentExceptionMessage(true) << "\n";
auto code = DB::getCurrentExceptionCode();
return code ? code : 1;
return static_cast<UInt8>(code) ? code : 1;
}
}
@ -672,7 +672,7 @@ catch (...)
/// Poco does not provide stacktrace.
tryLogCurrentException("Application");
auto code = getCurrentExceptionCode();
return code ? code : -1;
return static_cast<UInt8>(code) ? code : -1;
}

View File

@ -13,7 +13,7 @@ int mainEntryClickHouseLibraryBridge(int argc, char ** argv)
{
std::cerr << DB::getCurrentExceptionMessage(true) << "\n";
auto code = DB::getCurrentExceptionCode();
return code ? code : 1;
return static_cast<UInt8>(code) ? code : 1;
}
}

View File

@ -14,6 +14,7 @@
#include <Databases/registerDatabases.h>
#include <Databases/DatabaseFilesystem.h>
#include <Databases/DatabaseMemory.h>
#include <Databases/DatabaseAtomic.h>
#include <Databases/DatabasesOverlay.h>
#include <Storages/System/attachSystemTables.h>
#include <Storages/System/attachInformationSchemaTables.h>
@ -22,7 +23,6 @@
#include <Interpreters/ProcessList.h>
#include <Interpreters/loadMetadata.h>
#include <Interpreters/registerInterpreters.h>
#include <base/getFQDNOrHostName.h>
#include <Access/AccessControl.h>
#include <Common/PoolId.h>
#include <Common/Exception.h>
@ -31,7 +31,6 @@
#include <Common/ThreadStatus.h>
#include <Common/TLDListsHolder.h>
#include <Common/quoteString.h>
#include <Common/randomSeed.h>
#include <Common/ThreadPool.h>
#include <Common/CurrentMetrics.h>
#include <Loggers/OwnFormattingChannel.h>
@ -50,7 +49,6 @@
#include <Dictionaries/registerDictionaries.h>
#include <Disks/registerDisks.h>
#include <Formats/registerFormats.h>
#include <boost/algorithm/string/replace.hpp>
#include <boost/program_options/options_description.hpp>
#include <base/argsToConfig.h>
#include <filesystem>
@ -71,9 +69,11 @@ namespace CurrentMetrics
namespace DB
{
namespace Setting
{
extern const SettingsBool allow_introspection_functions;
extern const SettingsBool implicit_select;
extern const SettingsLocalFSReadMethod storage_file_read_method;
}
@ -126,6 +126,7 @@ void applySettingsOverridesForLocal(ContextMutablePtr context)
settings[Setting::allow_introspection_functions] = true;
settings[Setting::storage_file_read_method] = LocalFSReadMethod::mmap;
settings[Setting::implicit_select] = true;
context->setSettings(settings);
}
@ -257,12 +258,12 @@ static DatabasePtr createMemoryDatabaseIfNotExists(ContextPtr context, const Str
return system_database;
}
static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context_)
static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context)
{
auto databaseCombiner = std::make_shared<DatabasesOverlay>(name_, context_);
databaseCombiner->registerNextDatabase(std::make_shared<DatabaseFilesystem>(name_, "", context_));
databaseCombiner->registerNextDatabase(std::make_shared<DatabaseMemory>(name_, context_));
return databaseCombiner;
auto overlay = std::make_shared<DatabasesOverlay>(name_, context);
overlay->registerNextDatabase(std::make_shared<DatabaseAtomic>(name_, fs::weakly_canonical(context->getPath()), UUIDHelpers::generateV4(), context));
overlay->registerNextDatabase(std::make_shared<DatabaseFilesystem>(name_, "", context));
return overlay;
}
/// If path is specified and not empty, will try to setup server environment and load existing metadata
@ -615,12 +616,14 @@ catch (const DB::Exception & e)
{
bool need_print_stack_trace = getClientConfiguration().getBool("stacktrace", false);
std::cerr << getExceptionMessage(e, need_print_stack_trace, true) << std::endl;
return e.code() ? e.code() : -1;
auto code = DB::getCurrentExceptionCode();
return static_cast<UInt8>(code) ? code : 1;
}
catch (...)
{
std::cerr << getCurrentExceptionMessage(false) << std::endl;
return getCurrentExceptionCode();
std::cerr << DB::getCurrentExceptionMessage(true) << '\n';
auto code = DB::getCurrentExceptionCode();
return static_cast<UInt8>(code) ? code : 1;
}
void LocalServer::updateLoggerLevel(const String & logs_level)
@ -809,7 +812,12 @@ void LocalServer::processConfig()
DatabaseCatalog::instance().initializeAndLoadTemporaryDatabase();
std::string default_database = server_settings[ServerSetting::default_database];
DatabaseCatalog::instance().attachDatabase(default_database, createClickHouseLocalDatabaseOverlay(default_database, global_context));
{
DatabasePtr database = createClickHouseLocalDatabaseOverlay(default_database, global_context);
if (UUID uuid = database->getUUID(); uuid != UUIDHelpers::Nil)
DatabaseCatalog::instance().addUUIDMapping(uuid);
DatabaseCatalog::instance().attachDatabase(default_database, database);
}
global_context->setCurrentDatabase(default_database);
if (getClientConfiguration().has("path"))
@ -1029,7 +1037,7 @@ int mainEntryClickHouseLocal(int argc, char ** argv)
{
std::cerr << DB::getExceptionMessage(e, false) << std::endl;
auto code = DB::getCurrentExceptionCode();
return code ? code : 1;
return static_cast<UInt8>(code) ? code : 1;
}
catch (const boost::program_options::error & e)
{
@ -1040,6 +1048,6 @@ int mainEntryClickHouseLocal(int argc, char ** argv)
{
std::cerr << DB::getCurrentExceptionMessage(true) << '\n';
auto code = DB::getCurrentExceptionCode();
return code ? code : 1;
return static_cast<UInt8>(code) ? code : 1;
}
}

View File

@ -1,27 +1,22 @@
#include <unistd.h>
#include <fcntl.h>
#include <base/phdr_cache.h>
#include <Common/EnvironmentChecks.h>
#include <Common/StringUtils.h>
#include <Common/getHashOfLoadedBinary.h>
#include <new>
#include <iostream>
#include <vector>
#include <string>
#include <string_view>
#include <utility> /// pair
#include <fmt/format.h>
#if defined(SANITIZE_COVERAGE)
# include <Common/Coverage.h>
#endif
#include "config.h"
#include "config_tools.h"
#include <Common/EnvironmentChecks.h>
#include <Common/Coverage.h>
#include <Common/StringUtils.h>
#include <Common/getHashOfLoadedBinary.h>
#include <Common/IO.h>
#include <base/phdr_cache.h>
#include <base/coverage.h>
#include <filesystem>
#include <iostream>
#include <new>
#include <string>
#include <string_view>
#include <utility> /// pair
#include <vector>
/// Universal executable for various clickhouse applications
int mainEntryClickHouseServer(int argc, char ** argv);
@ -238,9 +233,12 @@ int main(int argc_, char ** argv_)
/// clickhouse # spawn local
/// clickhouse local # spawn local
/// clickhouse "select ..." # spawn local
/// clickhouse /tmp/repro --enable-analyzer
///
if (main_func == printHelp && !argv.empty() && (argv.size() == 1 || argv[1][0] == '-'
|| std::string_view(argv[1]).contains(' ')))
std::error_code ec;
if (main_func == printHelp && !argv.empty()
&& (argv.size() == 1 || argv[1][0] == '-' || std::string_view(argv[1]).contains(' ')
|| std::filesystem::is_regular_file(std::filesystem::path{argv[1]}, ec)))
{
main_func = mainEntryClickHouseLocal;
}

View File

@ -1480,5 +1480,5 @@ catch (...)
{
std::cerr << DB::getCurrentExceptionMessage(true) << "\n";
auto code = DB::getCurrentExceptionCode();
return code ? code : 1;
return static_cast<UInt8>(code) ? code : 1;
}

View File

@ -13,7 +13,7 @@ int mainEntryClickHouseODBCBridge(int argc, char ** argv)
{
std::cerr << DB::getCurrentExceptionMessage(true) << "\n";
auto code = DB::getCurrentExceptionCode();
return code ? code : 1;
return static_cast<UInt8>(code) ? code : 1;
}
}

View File

@ -343,7 +343,7 @@ int mainEntryClickHouseServer(int argc, char ** argv)
{
std::cerr << DB::getCurrentExceptionMessage(true) << "\n";
auto code = DB::getCurrentExceptionCode();
return code ? code : 1;
return static_cast<UInt8>(code) ? code : 1;
}
}
@ -2537,7 +2537,7 @@ catch (...)
/// Poco does not provide stacktrace.
tryLogCurrentException("Application");
auto code = getCurrentExceptionCode();
return code ? code : -1;
return static_cast<UInt8>(code) ? code : -1;
}
std::unique_ptr<TCPProtocolStackFactory> Server::buildProtocolStackFromConfig(

View File

@ -59,7 +59,13 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid)
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getgrnam_r' to obtain gid from group name ({})", arg_gid);
if (!result)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Group {} is not found in the system", arg_gid);
{
if (0 != getgrgid_r(gid, &entry, buf.get(), buf_size, &result))
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getgrnam_r' to obtain gid from group name ({})", arg_gid);
if (!result)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Group {} is not found in the system", arg_gid);
}
gid = entry.gr_gid;
}
@ -84,7 +90,13 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid)
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getpwnam_r' to obtain uid from user name ({})", arg_uid);
if (!result)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "User {} is not found in the system", arg_uid);
{
if (0 != getpwuid_r(uid, &entry, buf.get(), buf_size, &result))
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getpwuid_r' to obtain uid from user name ({})", uid);
if (!result)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "User {} is not found in the system", arg_uid);
}
uid = entry.pw_uid;
}

View File

@ -387,7 +387,7 @@ template <typename Value, bool return_float, bool interpolated>
using FuncQuantileExactWeighted = AggregateFunctionQuantile<
Value,
QuantileExactWeighted<Value, interpolated>,
NameQuantileExactWeighted,
std::conditional_t<interpolated, NameQuantileExactWeightedInterpolated, NameQuantileExactWeighted>,
true,
std::conditional_t<return_float, Float64, void>,
false,
@ -396,7 +396,7 @@ template <typename Value, bool return_float, bool interpolated>
using FuncQuantilesExactWeighted = AggregateFunctionQuantile<
Value,
QuantileExactWeighted<Value, interpolated>,
NameQuantilesExactWeighted,
std::conditional_t<interpolated, NameQuantilesExactWeightedInterpolated, NameQuantilesExactWeighted>,
true,
std::conditional_t<return_float, Float64, void>,
true,

View File

@ -602,9 +602,21 @@ public:
return projection_columns;
}
/// Returns true if query node is resolved, false otherwise
bool isResolved() const
{
return !projection_columns.empty();
}
/// Resolve query node projection columns
void resolveProjectionColumns(NamesAndTypes projection_columns_value);
/// Clear query node projection columns
void clearProjectionColumns()
{
projection_columns.clear();
}
/// Remove unused projection columns
void removeUnusedProjectionColumns(const std::unordered_set<std::string> & used_projection_columns);

View File

@ -498,6 +498,8 @@ QueryTreeNodePtr QueryTreeBuilder::buildSortList(const ASTPtr & order_by_express
sort_node->getFillTo() = buildExpression(order_by_element.getFillTo(), context);
if (order_by_element.getFillStep())
sort_node->getFillStep() = buildExpression(order_by_element.getFillStep(), context);
if (order_by_element.getFillStaleness())
sort_node->getFillStaleness() = buildExpression(order_by_element.getFillStaleness(), context);
list_node->getNodes().push_back(std::move(sort_node));
}

View File

@ -51,7 +51,6 @@
#include <Analyzer/ArrayJoinNode.h>
#include <Analyzer/JoinNode.h>
#include <Analyzer/UnionNode.h>
#include <Analyzer/InDepthQueryTreeVisitor.h>
#include <Analyzer/QueryTreeBuilder.h>
#include <Analyzer/IQueryTreeNode.h>
#include <Analyzer/Identifier.h>
@ -103,6 +102,8 @@ namespace Setting
extern const SettingsBool single_join_prefer_left_table;
extern const SettingsBool transform_null_in;
extern const SettingsUInt64 use_structure_from_insertion_table_in_table_functions;
extern const SettingsBool allow_suspicious_types_in_group_by;
extern const SettingsBool allow_suspicious_types_in_order_by;
extern const SettingsBool use_concurrency_control;
}
@ -437,8 +438,13 @@ ProjectionName QueryAnalyzer::calculateWindowProjectionName(const QueryTreeNodeP
return buffer.str();
}
ProjectionName QueryAnalyzer::calculateSortColumnProjectionName(const QueryTreeNodePtr & sort_column_node, const ProjectionName & sort_expression_projection_name,
const ProjectionName & fill_from_expression_projection_name, const ProjectionName & fill_to_expression_projection_name, const ProjectionName & fill_step_expression_projection_name)
ProjectionName QueryAnalyzer::calculateSortColumnProjectionName(
const QueryTreeNodePtr & sort_column_node,
const ProjectionName & sort_expression_projection_name,
const ProjectionName & fill_from_expression_projection_name,
const ProjectionName & fill_to_expression_projection_name,
const ProjectionName & fill_step_expression_projection_name,
const ProjectionName & fill_staleness_expression_projection_name)
{
auto & sort_node_typed = sort_column_node->as<SortNode &>();
@ -468,6 +474,9 @@ ProjectionName QueryAnalyzer::calculateSortColumnProjectionName(const QueryTreeN
if (sort_node_typed.hasFillStep())
sort_column_projection_name_buffer << " STEP " << fill_step_expression_projection_name;
if (sort_node_typed.hasFillStaleness())
sort_column_projection_name_buffer << " STALENESS " << fill_staleness_expression_projection_name;
}
return sort_column_projection_name_buffer.str();
@ -2958,27 +2967,29 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
/// Replace storage with values storage of insertion block
if (StoragePtr storage = scope.context->getViewSource())
{
QueryTreeNodePtr table_expression;
/// Process possibly nested sub-selects
for (auto * query_node = in_second_argument->as<QueryNode>(); query_node; query_node = table_expression->as<QueryNode>())
table_expression = extractLeftTableExpression(query_node->getJoinTree());
QueryTreeNodePtr table_expression = in_second_argument;
if (table_expression)
/// Process possibly nested sub-selects
while (table_expression)
{
if (auto * query_table_node = table_expression->as<TableNode>())
{
if (query_table_node->getStorageID().getFullNameNotQuoted() == storage->getStorageID().getFullNameNotQuoted())
{
auto replacement_table_expression = std::make_shared<TableNode>(storage, scope.context);
if (std::optional<TableExpressionModifiers> table_expression_modifiers = query_table_node->getTableExpressionModifiers())
replacement_table_expression->setTableExpressionModifiers(*table_expression_modifiers);
in_second_argument = in_second_argument->cloneAndReplace(table_expression, std::move(replacement_table_expression));
}
}
if (auto * query_node = table_expression->as<QueryNode>())
table_expression = extractLeftTableExpression(query_node->getJoinTree());
else if (auto * union_node = table_expression->as<UnionNode>())
table_expression = union_node->getQueries().getNodes().at(0);
else
break;
}
TableNode * table_expression_table_node = table_expression ? table_expression->as<TableNode>() : nullptr;
if (table_expression_table_node &&
table_expression_table_node->getStorageID().getFullNameNotQuoted() == storage->getStorageID().getFullNameNotQuoted())
{
auto replacement_table_expression_table_node = table_expression_table_node->clone();
replacement_table_expression_table_node->as<TableNode &>().updateStorage(storage, scope.context);
in_second_argument = in_second_argument->cloneAndReplace(table_expression, std::move(replacement_table_expression_table_node));
}
}
resolveExpressionNode(in_second_argument, scope, false /*allow_lambda_expression*/, true /*allow_table_expression*/);
}
/// Edge case when the first argument of IN is scalar subquery.
@ -3011,9 +3022,10 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
argument_column.name = arguments_projection_names[function_argument_index];
/** If function argument is lambda, save lambda argument index and initialize argument type as DataTypeFunction
* where function argument types are initialized with empty array of lambda arguments size.
* where function argument types are initialized with empty arrays of lambda arguments size.
*/
if (const auto * lambda_node = function_argument->as<const LambdaNode>())
const auto * lambda_node = function_argument->as<const LambdaNode>();
if (lambda_node)
{
size_t lambda_arguments_size = lambda_node->getArguments().getNodes().size();
argument_column.type = std::make_shared<DataTypeFunction>(DataTypes(lambda_arguments_size, nullptr), nullptr);
@ -3485,15 +3497,11 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
else
function_base = function->build(argument_columns);
/// Do not constant fold get scalar functions
// bool disable_constant_folding = function_name == "__getScalar" || function_name == "shardNum" ||
// function_name == "shardCount" || function_name == "hostName" || function_name == "tcpPort";
/** If function is suitable for constant folding try to convert it to constant.
* Example: SELECT plus(1, 1);
* Result: SELECT 2;
*/
if (function_base->isSuitableForConstantFolding()) // && !disable_constant_folding)
if (function_base->isSuitableForConstantFolding())
{
auto result_type = function_base->getResultType();
auto executable_function = function_base->prepare(argument_columns);
@ -3502,7 +3510,9 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
if (all_arguments_constants)
{
size_t num_rows = function_arguments.empty() ? 0 : argument_columns.front().column->size();
size_t num_rows = 0;
if (!argument_columns.empty())
num_rows = argument_columns.front().column->size();
column = executable_function->execute(argument_columns, result_type, num_rows, true);
}
else
@ -3998,6 +4008,7 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_
ProjectionNames fill_from_expression_projection_names;
ProjectionNames fill_to_expression_projection_names;
ProjectionNames fill_step_expression_projection_names;
ProjectionNames fill_staleness_expression_projection_names;
auto & sort_node_list_typed = sort_node_list->as<ListNode &>();
for (auto & node : sort_node_list_typed.getNodes())
@ -4019,6 +4030,8 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_
sort_node.getExpression() = sort_column_list_node->getNodes().front();
}
validateSortingKeyType(sort_node.getExpression()->getResultType(), scope);
size_t sort_expression_projection_names_size = sort_expression_projection_names.size();
if (sort_expression_projection_names_size != 1)
throw Exception(ErrorCodes::LOGICAL_ERROR,
@ -4088,11 +4101,38 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_
fill_step_expression_projection_names_size);
}
if (sort_node.hasFillStaleness())
{
fill_staleness_expression_projection_names = resolveExpressionNode(sort_node.getFillStaleness(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
const auto * constant_node = sort_node.getFillStaleness()->as<ConstantNode>();
if (!constant_node)
throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION,
"Sort FILL STALENESS expression must be constant with numeric or interval type. Actual {}. In scope {}",
sort_node.getFillStaleness()->formatASTForErrorMessage(),
scope.scope_node->formatASTForErrorMessage());
bool is_number = isColumnedAsNumber(constant_node->getResultType());
bool is_interval = WhichDataType(constant_node->getResultType()).isInterval();
if (!is_number && !is_interval)
throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION,
"Sort FILL STALENESS expression must be constant with numeric or interval type. Actual {}. In scope {}",
sort_node.getFillStaleness()->formatASTForErrorMessage(),
scope.scope_node->formatASTForErrorMessage());
size_t fill_staleness_expression_projection_names_size = fill_staleness_expression_projection_names.size();
if (fill_staleness_expression_projection_names_size != 1)
throw Exception(ErrorCodes::LOGICAL_ERROR,
"Sort FILL STALENESS expression expected 1 projection name. Actual {}",
fill_staleness_expression_projection_names_size);
}
auto sort_column_projection_name = calculateSortColumnProjectionName(node,
sort_expression_projection_names[0],
fill_from_expression_projection_names.empty() ? "" : fill_from_expression_projection_names.front(),
fill_to_expression_projection_names.empty() ? "" : fill_to_expression_projection_names.front(),
fill_step_expression_projection_names.empty() ? "" : fill_step_expression_projection_names.front());
fill_step_expression_projection_names.empty() ? "" : fill_step_expression_projection_names.front(),
fill_staleness_expression_projection_names.empty() ? "" : fill_staleness_expression_projection_names.front());
result_projection_names.push_back(std::move(sort_column_projection_name));
@ -4100,11 +4140,32 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_
fill_from_expression_projection_names.clear();
fill_to_expression_projection_names.clear();
fill_step_expression_projection_names.clear();
fill_staleness_expression_projection_names.clear();
}
return result_projection_names;
}
void QueryAnalyzer::validateSortingKeyType(const DataTypePtr & sorting_key_type, const IdentifierResolveScope & scope) const
{
if (scope.context->getSettingsRef()[Setting::allow_suspicious_types_in_order_by])
return;
auto check = [](const IDataType & type)
{
if (isDynamic(type) || isVariant(type))
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Data types Variant/Dynamic are not allowed in ORDER BY keys, because it can lead to unexpected results. "
"Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if "
"its a JSON path subcolumn) or casting this column to a specific data type. "
"Set setting allow_suspicious_types_in_order_by = 1 in order to allow it");
};
check(*sorting_key_type);
sorting_key_type->forEachChild(check);
}
namespace
{
@ -4144,11 +4205,12 @@ void QueryAnalyzer::resolveGroupByNode(QueryNode & query_node_typed, IdentifierR
expandTuplesInList(group_by_list);
}
if (scope.group_by_use_nulls)
for (const auto & grouping_set : query_node_typed.getGroupBy().getNodes())
{
for (const auto & grouping_set : query_node_typed.getGroupBy().getNodes())
for (const auto & group_by_elem : grouping_set->as<ListNode>()->getNodes())
{
for (const auto & group_by_elem : grouping_set->as<ListNode>()->getNodes())
validateGroupByKeyType(group_by_elem->getResultType(), scope);
if (scope.group_by_use_nulls)
scope.nullable_group_by_keys.insert(group_by_elem);
}
}
@ -4164,14 +4226,37 @@ void QueryAnalyzer::resolveGroupByNode(QueryNode & query_node_typed, IdentifierR
auto & group_by_list = query_node_typed.getGroupBy().getNodes();
expandTuplesInList(group_by_list);
if (scope.group_by_use_nulls)
for (const auto & group_by_elem : query_node_typed.getGroupBy().getNodes())
{
for (const auto & group_by_elem : query_node_typed.getGroupBy().getNodes())
validateGroupByKeyType(group_by_elem->getResultType(), scope);
if (scope.group_by_use_nulls)
scope.nullable_group_by_keys.insert(group_by_elem);
}
}
}
/** Validate data types of GROUP BY key.
*/
void QueryAnalyzer::validateGroupByKeyType(const DataTypePtr & group_by_key_type, const IdentifierResolveScope & scope) const
{
if (scope.context->getSettingsRef()[Setting::allow_suspicious_types_in_group_by])
return;
auto check = [](const IDataType & type)
{
if (isDynamic(type) || isVariant(type))
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Data types Variant/Dynamic are not allowed in GROUP BY keys, because it can lead to unexpected results. "
"Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if "
"its a JSON path subcolumn) or casting this column to a specific data type. "
"Set setting allow_suspicious_types_in_group_by = 1 in order to allow it");
};
check(*group_by_key_type);
group_by_key_type->forEachChild(check);
}
/** Resolve interpolate columns nodes list.
*/
void QueryAnalyzer::resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpolate_node_list, IdentifierResolveScope & scope)
@ -5310,6 +5395,16 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier
auto & query_node_typed = query_node->as<QueryNode &>();
/** It is unsafe to call resolveQuery on already resolved query node, because during identifier resolution process
* we replace identifiers with expressions without aliases, also at the end of resolveQuery all aliases from all nodes will be removed.
* For subsequent resolveQuery executions it is possible to have wrong projection header, because for nodes
* with aliases projection name is alias.
*
* If for client it is necessary to resolve query node after clone, client must clear projection columns from query node before resolve.
*/
if (query_node_typed.isResolved())
return;
if (query_node_typed.isCTE())
ctes_in_resolve_process.insert(query_node_typed.getCTEName());
@ -5448,16 +5543,13 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier
*/
scope.use_identifier_lookup_to_result_cache = false;
if (query_node_typed.getJoinTree())
{
TableExpressionsAliasVisitor table_expressions_visitor(scope);
table_expressions_visitor.visit(query_node_typed.getJoinTree());
TableExpressionsAliasVisitor table_expressions_visitor(scope);
table_expressions_visitor.visit(query_node_typed.getJoinTree());
initializeQueryJoinTreeNode(query_node_typed.getJoinTree(), scope);
scope.aliases.alias_name_to_table_expression_node.clear();
initializeQueryJoinTreeNode(query_node_typed.getJoinTree(), scope);
scope.aliases.alias_name_to_table_expression_node.clear();
resolveQueryJoinTreeNode(query_node_typed.getJoinTree(), scope, visitor);
}
resolveQueryJoinTreeNode(query_node_typed.getJoinTree(), scope, visitor);
if (!scope.group_by_use_nulls)
scope.use_identifier_lookup_to_result_cache = true;
@ -5675,6 +5767,9 @@ void QueryAnalyzer::resolveUnion(const QueryTreeNodePtr & union_node, Identifier
{
auto & union_node_typed = union_node->as<UnionNode &>();
if (union_node_typed.isResolved())
return;
if (union_node_typed.isCTE())
ctes_in_resolve_process.insert(union_node_typed.getCTEName());

View File

@ -140,7 +140,8 @@ private:
const ProjectionName & sort_expression_projection_name,
const ProjectionName & fill_from_expression_projection_name,
const ProjectionName & fill_to_expression_projection_name,
const ProjectionName & fill_step_expression_projection_name);
const ProjectionName & fill_step_expression_projection_name,
const ProjectionName & fill_staleness_expression_projection_name);
QueryTreeNodePtr tryGetLambdaFromSQLUserDefinedFunctions(const std::string & function_name, ContextPtr context);
@ -219,8 +220,12 @@ private:
ProjectionNames resolveSortNodeList(QueryTreeNodePtr & sort_node_list, IdentifierResolveScope & scope);
void validateSortingKeyType(const DataTypePtr & sorting_key_type, const IdentifierResolveScope & scope) const;
void resolveGroupByNode(QueryNode & query_node_typed, IdentifierResolveScope & scope);
void validateGroupByKeyType(const DataTypePtr & group_by_key_type, const IdentifierResolveScope & scope) const;
void resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpolate_node_list, IdentifierResolveScope & scope);
void resolveWindowNodeList(QueryTreeNodePtr & window_node_list, IdentifierResolveScope & scope);

View File

@ -69,6 +69,12 @@ void SortNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, si
buffer << '\n' << std::string(indent + 2, ' ') << "FILL STEP\n";
getFillStep()->dumpTreeImpl(buffer, format_state, indent + 4);
}
if (hasFillStaleness())
{
buffer << '\n' << std::string(indent + 2, ' ') << "FILL STALENESS\n";
getFillStaleness()->dumpTreeImpl(buffer, format_state, indent + 4);
}
}
bool SortNode::isEqualImpl(const IQueryTreeNode & rhs, CompareOptions) const
@ -132,6 +138,8 @@ ASTPtr SortNode::toASTImpl(const ConvertToASTOptions & options) const
result->setFillTo(getFillTo()->toAST(options));
if (hasFillStep())
result->setFillStep(getFillStep()->toAST(options));
if (hasFillStaleness())
result->setFillStaleness(getFillStaleness()->toAST(options));
return result;
}

View File

@ -105,6 +105,24 @@ public:
return children[fill_step_child_index];
}
/// Returns true if sort node has fill staleness, false otherwise
bool hasFillStaleness() const
{
return children[fill_staleness_child_index] != nullptr;
}
/// Get fill staleness
const QueryTreeNodePtr & getFillStaleness() const
{
return children[fill_staleness_child_index];
}
/// Get fill staleness
QueryTreeNodePtr & getFillStaleness()
{
return children[fill_staleness_child_index];
}
/// Get collator
const std::shared_ptr<Collator> & getCollator() const
{
@ -144,7 +162,8 @@ private:
static constexpr size_t fill_from_child_index = 1;
static constexpr size_t fill_to_child_index = 2;
static constexpr size_t fill_step_child_index = 3;
static constexpr size_t children_size = fill_step_child_index + 1;
static constexpr size_t fill_staleness_child_index = 4;
static constexpr size_t children_size = fill_staleness_child_index + 1;
SortDirection sort_direction = SortDirection::ASCENDING;
std::optional<SortDirection> nulls_sort_direction;

View File

@ -35,6 +35,7 @@ namespace ErrorCodes
{
extern const int TYPE_MISMATCH;
extern const int BAD_ARGUMENTS;
extern const int LOGICAL_ERROR;
}
UnionNode::UnionNode(ContextMutablePtr context_, SelectUnionMode union_mode_)
@ -50,6 +51,26 @@ UnionNode::UnionNode(ContextMutablePtr context_, SelectUnionMode union_mode_)
children[queries_child_index] = std::make_shared<ListNode>();
}
bool UnionNode::isResolved() const
{
for (const auto & query_node : getQueries().getNodes())
{
bool is_resolved = false;
if (auto * query_node_typed = query_node->as<QueryNode>())
is_resolved = query_node_typed->isResolved();
else if (auto * union_node_typed = query_node->as<UnionNode>())
is_resolved = union_node_typed->isResolved();
else
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected query tree node type in UNION node");
if (!is_resolved)
return false;
}
return true;
}
NamesAndTypes UnionNode::computeProjectionColumns() const
{
if (recursive_cte_table)

View File

@ -163,6 +163,9 @@ public:
return children[queries_child_index];
}
/// Returns true if union node is resolved, false otherwise
bool isResolved() const;
/// Compute union node projection columns
NamesAndTypes computeProjectionColumns() const;

View File

@ -1650,6 +1650,11 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des
if (!parsed_insert_query)
return;
/// If it's clickhouse-local, and the input data reading is already baked into the query pipeline,
/// don't read the data again here. This happens in some cases (e.g. input() table function) but not others (e.g. INFILE).
if (!connection->isSendDataNeeded())
return;
bool have_data_in_stdin = !is_interactive && !stdin_is_a_tty && isStdinNotEmptyAndValid(std_in);
if (need_render_progress)
@ -2674,7 +2679,10 @@ void ClientBase::runInteractive()
#if USE_REPLXX
replxx::Replxx::highlighter_callback_t highlight_callback{};
if (getClientConfiguration().getBool("highlight", true))
highlight_callback = highlight;
highlight_callback = [this](const String & query, std::vector<replxx::Replxx::Color> & colors)
{
highlight(query, colors, *client_context);
};
ReplxxLineReader lr(
*suggest,

View File

@ -5,6 +5,8 @@
#include <Parsers/ParserQuery.h>
#include <Parsers/parseQuery.h>
#include <Common/UTF8Helpers.h>
#include <Core/Settings.h>
#include <Interpreters/Context.h>
#include <iostream>
@ -12,6 +14,11 @@
namespace DB
{
namespace Setting
{
extern const SettingsBool implicit_select;
}
/// Should we celebrate a bit?
bool isNewYearMode()
{
@ -95,7 +102,7 @@ bool isChineseNewYearMode(const String & local_tz)
}
#if USE_REPLXX
void highlight(const String & query, std::vector<replxx::Replxx::Color> & colors)
void highlight(const String & query, std::vector<replxx::Replxx::Color> & colors, const Context & context)
{
using namespace replxx;
@ -135,13 +142,27 @@ void highlight(const String & query, std::vector<replxx::Replxx::Color> & colors
/// Currently we highlight only the first query in the multi-query mode.
ParserQuery parser(end);
ParserQuery parser(end, false, context.getSettingsRef()[Setting::implicit_select]);
ASTPtr ast;
bool parse_res = false;
try
{
parse_res = parser.parse(token_iterator, ast, expected);
while (!token_iterator->isEnd())
{
parse_res = parser.parse(token_iterator, ast, expected);
if (!parse_res)
break;
if (!token_iterator->isEnd() && token_iterator->type != TokenType::Semicolon)
{
parse_res = false;
break;
}
while (token_iterator->type == TokenType::Semicolon)
++token_iterator;
}
}
catch (...)
{
@ -175,7 +196,7 @@ void highlight(const String & query, std::vector<replxx::Replxx::Color> & colors
/// Highlight the last error in red. If the parser failed or the lexer found an invalid token,
/// or if it didn't parse all the data (except, the data for INSERT query, which is legitimately unparsed)
if ((!parse_res || last_token.isError() || (!token_iterator->isEnd() && token_iterator->type != TokenType::Semicolon))
if ((!parse_res || last_token.isError())
&& !(insert_data && expected.max_parsed_pos >= insert_data)
&& expected.max_parsed_pos >= prev)
{

View File

@ -11,13 +11,15 @@
namespace DB
{
class Context;
/// Should we celebrate a bit?
bool isNewYearMode();
bool isChineseNewYearMode(const String & local_tz);
#if USE_REPLXX
void highlight(const String & query, std::vector<replxx::Replxx::Color> & colors);
void highlight(const String & query, std::vector<replxx::Replxx::Color> & colors, const Context & context);
#endif
}

View File

@ -1,5 +1,7 @@
#include <Client/ClientApplicationBase.h>
#include <filesystem>
namespace DB
{
@ -108,6 +110,7 @@ void ClientApplicationBase::parseAndCheckOptions(OptionsDescription & options_de
{
/// Two special cases for better usability:
/// - if the option contains a whitespace, it might be a query: clickhouse "SELECT 1"
/// - if the option is a filesystem file, then it's likely a queries file (clickhouse repro.sql)
/// These are relevant for interactive usage - user-friendly, but questionable in general.
/// In case of ambiguity or for scripts, prefer using proper options.
@ -115,8 +118,11 @@ void ClientApplicationBase::parseAndCheckOptions(OptionsDescription & options_de
po::variable_value value(boost::any(op.value), false);
const char * option;
std::error_code ec;
if (token.contains(' '))
option = "query";
else if (std::filesystem::is_regular_file(std::filesystem::path{token}, ec))
option = "queries-file";
else
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Positional option `{}` is not supported.", token);

View File

@ -109,6 +109,10 @@ public:
/// Send block of data; if name is specified, server will write it to external (temporary) table of that name.
virtual void sendData(const Block & block, const String & name, bool scalar) = 0;
/// Whether the client needs to read and send the data for the INSERT.
/// False if the server will read the data through other means (in particular if clickhouse-local added input reading step directly into the query pipeline).
virtual bool isSendDataNeeded() const { return true; }
/// Send all contents of external (temporary) tables.
virtual void sendExternalTablesData(ExternalTablesData & data) = 0;

View File

@ -328,6 +328,11 @@ void LocalConnection::sendData(const Block & block, const String &, bool)
sendProfileEvents();
}
bool LocalConnection::isSendDataNeeded() const
{
return !state || state->input_pipeline == nullptr;
}
void LocalConnection::sendCancel()
{
state->is_cancelled = true;

View File

@ -120,6 +120,8 @@ public:
void sendData(const Block & block, const String & name/* = "" */, bool scalar/* = false */) override;
bool isSendDataNeeded() const override;
void sendExternalTablesData(ExternalTablesData &) override;
void sendMergeTreeReadTaskResponse(const ParallelReadResponse & response) override;

View File

@ -196,6 +196,13 @@ public:
bool hasDynamicStructure() const override { return getData().hasDynamicStructure(); }
void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override;
bool dynamicStructureEquals(const IColumn & rhs) const override
{
if (const auto * rhs_concrete = typeid_cast<const ColumnArray *>(&rhs))
return data->dynamicStructureEquals(*rhs_concrete->data);
return false;
}
private:
WrappedPtr data;
WrappedPtr offsets;

View File

@ -1208,6 +1208,15 @@ void ColumnDynamic::prepareVariantsForSquashing(const Columns & source_columns)
}
}
bool ColumnDynamic::dynamicStructureEquals(const IColumn & rhs) const
{
if (const auto * rhs_concrete = typeid_cast<const ColumnDynamic *>(&rhs))
return max_dynamic_types == rhs_concrete->max_dynamic_types && global_max_dynamic_types == rhs_concrete->global_max_dynamic_types
&& variant_info.variant_name == rhs_concrete->variant_info.variant_name
&& variant_column->dynamicStructureEquals(*rhs_concrete->variant_column);
return false;
}
void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source_columns)
{
if (!empty())

View File

@ -376,6 +376,7 @@ public:
bool addNewVariant(const DataTypePtr & new_variant) { return addNewVariant(new_variant, new_variant->getName()); }
bool hasDynamicStructure() const override { return true; }
bool dynamicStructureEquals(const IColumn & rhs) const override;
void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override;
const StatisticsPtr & getStatistics() const { return statistics; }

View File

@ -72,6 +72,26 @@ ColumnPtr ColumnFunction::cut(size_t start, size_t length) const
return ColumnFunction::create(length, function, capture, is_short_circuit_argument, is_function_compiled);
}
Field ColumnFunction::operator[](size_t n) const
{
Field res;
get(n, res);
return res;
}
void ColumnFunction::get(size_t n, Field & res) const
{
const size_t tuple_size = captured_columns.size();
res = Tuple();
Tuple & res_tuple = res.safeGet<Tuple &>();
res_tuple.reserve(tuple_size);
for (size_t i = 0; i < tuple_size; ++i)
res_tuple.push_back((*captured_columns[i].column)[n]);
}
#if !defined(DEBUG_OR_SANITIZER_BUILD)
void ColumnFunction::insertFrom(const IColumn & src, size_t n)
#else

View File

@ -60,15 +60,9 @@ public:
void appendArguments(const ColumnsWithTypeAndName & columns);
ColumnWithTypeAndName reduce() const;
Field operator[](size_t) const override
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot get value from {}", getName());
}
Field operator[](size_t n) const override;
void get(size_t, Field &) const override
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot get value from {}", getName());
}
void get(size_t n, Field & res) const override;
StringRef getDataAt(size_t) const override
{

View File

@ -345,6 +345,13 @@ bool ColumnMap::structureEquals(const IColumn & rhs) const
return false;
}
bool ColumnMap::dynamicStructureEquals(const IColumn & rhs) const
{
if (const auto * rhs_map = typeid_cast<const ColumnMap *>(&rhs))
return nested->dynamicStructureEquals(*rhs_map->nested);
return false;
}
ColumnPtr ColumnMap::compress() const
{
auto compressed = nested->compress();

View File

@ -123,6 +123,7 @@ public:
ColumnPtr compress() const override;
bool hasDynamicStructure() const override { return nested->hasDynamicStructure(); }
bool dynamicStructureEquals(const IColumn & rhs) const override;
void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override;
};

View File

@ -1415,6 +1415,31 @@ void ColumnObject::prepareForSquashing(const std::vector<ColumnPtr> & source_col
}
}
bool ColumnObject::dynamicStructureEquals(const IColumn & rhs) const
{
const auto * rhs_object = typeid_cast<const ColumnObject *>(&rhs);
if (!rhs_object || typed_paths.size() != rhs_object->typed_paths.size()
|| global_max_dynamic_paths != rhs_object->global_max_dynamic_paths || max_dynamic_types != rhs_object->max_dynamic_types
|| dynamic_paths.size() != rhs_object->dynamic_paths.size())
return false;
for (const auto & [path, column] : typed_paths)
{
auto it = rhs_object->typed_paths.find(path);
if (it == rhs_object->typed_paths.end() || !it->second->dynamicStructureEquals(*column))
return false;
}
for (const auto & [path, column] : dynamic_paths)
{
auto it = rhs_object->dynamic_paths.find(path);
if (it == rhs_object->dynamic_paths.end() || !it->second->dynamicStructureEquals(*column))
return false;
}
return true;
}
void ColumnObject::takeDynamicStructureFromSourceColumns(const DB::Columns & source_columns)
{
if (!empty())

View File

@ -177,6 +177,7 @@ public:
bool isFinalized() const override;
bool hasDynamicStructure() const override { return true; }
bool dynamicStructureEquals(const IColumn & rhs) const override;
void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override;
const PathToColumnMap & getTypedPaths() const { return typed_paths; }
@ -227,6 +228,7 @@ public:
void setDynamicPaths(const std::vector<String> & paths);
void setDynamicPaths(const std::vector<std::pair<String, ColumnPtr>> & paths);
void setMaxDynamicPaths(size_t max_dynamic_paths_);
void setGlobalMaxDynamicPaths(size_t global_max_dynamic_paths_);
void setStatistics(const StatisticsPtr & statistics_) { statistics = statistics_; }
void serializePathAndValueIntoSharedData(ColumnString * shared_data_paths, ColumnString * shared_data_values, std::string_view path, const IColumn & column, size_t n);

View File

@ -757,6 +757,26 @@ bool ColumnTuple::hasDynamicStructure() const
return false;
}
bool ColumnTuple::dynamicStructureEquals(const IColumn & rhs) const
{
if (const auto * rhs_tuple = typeid_cast<const ColumnTuple *>(&rhs))
{
const size_t tuple_size = columns.size();
if (tuple_size != rhs_tuple->columns.size())
return false;
for (size_t i = 0; i < tuple_size; ++i)
if (!columns[i]->dynamicStructureEquals(*rhs_tuple->columns[i]))
return false;
return true;
}
else
{
return false;
}
}
void ColumnTuple::takeDynamicStructureFromSourceColumns(const Columns & source_columns)
{
std::vector<Columns> nested_source_columns;

View File

@ -141,6 +141,7 @@ public:
ColumnPtr & getColumnPtr(size_t idx) { return columns[idx]; }
bool hasDynamicStructure() const override;
bool dynamicStructureEquals(const IColumn & rhs) const override;
void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override;
/// Empty tuple needs a public method to manage its size.

View File

@ -952,7 +952,7 @@ ColumnPtr ColumnVariant::permute(const Permutation & perm, size_t limit) const
if (hasOnlyNulls())
{
if (limit)
return cloneResized(limit);
return cloneResized(limit ? std::min(size(), limit) : size());
/// If no limit, we can just return current immutable column.
return this->getPtr();
@ -1409,6 +1409,23 @@ bool ColumnVariant::structureEquals(const IColumn & rhs) const
return true;
}
bool ColumnVariant::dynamicStructureEquals(const IColumn & rhs) const
{
const auto * rhs_variant = typeid_cast<const ColumnVariant *>(&rhs);
if (!rhs_variant)
return false;
const size_t num_variants = variants.size();
if (num_variants != rhs_variant->variants.size())
return false;
for (size_t i = 0; i < num_variants; ++i)
if (!variants[i]->dynamicStructureEquals(rhs_variant->getVariantByGlobalDiscriminator(globalDiscriminatorByLocal(i))))
return false;
return true;
}
ColumnPtr ColumnVariant::compress() const
{
ColumnPtr local_discriminators_compressed = local_discriminators->compress();

View File

@ -336,6 +336,7 @@ public:
void extend(const std::vector<Discriminator> & old_to_new_global_discriminators, std::vector<std::pair<MutableColumnPtr, Discriminator>> && new_variants_and_discriminators);
bool hasDynamicStructure() const override;
bool dynamicStructureEquals(const IColumn & rhs) const override;
void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override;
private:

View File

@ -635,6 +635,9 @@ public:
/// Checks if column has dynamic subcolumns.
virtual bool hasDynamicStructure() const { return false; }
/// For columns with dynamic subcolumns checks if columns have equal dynamic structure.
[[nodiscard]] virtual bool dynamicStructureEquals(const IColumn & rhs) const { return structureEquals(rhs); }
/// For columns with dynamic subcolumns this method takes dynamic structure from source columns
/// and creates proper resulting dynamic structure in advance for merge of these source columns.
virtual void takeDynamicStructureFromSourceColumns(const std::vector<Ptr> & /*source_columns*/) {}

View File

@ -41,6 +41,10 @@
M(PostgreSQLConnection, "Number of client connections using PostgreSQL protocol") \
M(OpenFileForRead, "Number of files open for reading") \
M(OpenFileForWrite, "Number of files open for writing") \
M(Compressing, "Number of compress operations using internal compression codecs") \
M(Decompressing, "Number of decompress operations using internal compression codecs") \
M(ParallelCompressedWriteBufferThreads, "Number of threads in all instances of ParallelCompressedWriteBuffer - these threads are doing parallel compression and writing") \
M(ParallelCompressedWriteBufferWait, "Number of threads in all instances of ParallelCompressedWriteBuffer that are currently waiting for buffer to become available for writing") \
M(TotalTemporaryFiles, "Number of temporary files created") \
M(TemporaryFilesForSort, "Number of temporary files created for external sorting") \
M(TemporaryFilesForAggregation, "Number of temporary files created for external aggregation") \
@ -99,6 +103,9 @@
M(IOThreads, "Number of threads in the IO thread pool.") \
M(IOThreadsActive, "Number of threads in the IO thread pool running a task.") \
M(IOThreadsScheduled, "Number of queued or active jobs in the IO thread pool.") \
M(CompressionThread, "Number of threads in compression thread pools.") \
M(CompressionThreadActive, "Number of threads in compression thread pools running a task.") \
M(CompressionThreadScheduled, "Number of queued or active jobs in compression thread pools.") \
M(ThreadPoolRemoteFSReaderThreads, "Number of threads in the thread pool for remote_filesystem_read_method=threadpool.") \
M(ThreadPoolRemoteFSReaderThreadsActive, "Number of threads in the thread pool for remote_filesystem_read_method=threadpool running a task.") \
M(ThreadPoolRemoteFSReaderThreadsScheduled, "Number of queued or active jobs in the thread pool for remote_filesystem_read_method=threadpool.") \

View File

@ -0,0 +1,30 @@
#include <Common/FieldVisitorScale.h>
namespace DB
{
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
}
FieldVisitorScale::FieldVisitorScale(Int32 rhs_) : rhs(rhs_) {}
void FieldVisitorScale::operator() (Int64 & x) const { x *= rhs; }
void FieldVisitorScale::operator() (UInt64 & x) const { x *= rhs; }
void FieldVisitorScale::operator() (Float64 & x) const { x *= rhs; }
void FieldVisitorScale::operator() (Null &) const { /*Do not scale anything*/ }
void FieldVisitorScale::operator() (String &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale Strings"); }
void FieldVisitorScale::operator() (Array &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale Arrays"); }
void FieldVisitorScale::operator() (Tuple &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale Tuples"); }
void FieldVisitorScale::operator() (Map &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale Maps"); }
void FieldVisitorScale::operator() (Object &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale Objects"); }
void FieldVisitorScale::operator() (UUID &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale UUIDs"); }
void FieldVisitorScale::operator() (IPv4 &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale IPv4s"); }
void FieldVisitorScale::operator() (IPv6 &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale IPv6s"); }
void FieldVisitorScale::operator() (CustomType & x) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale custom type {}", x.getTypeName()); }
void FieldVisitorScale::operator() (AggregateFunctionStateData &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale AggregateFunctionStates"); }
void FieldVisitorScale::operator() (bool &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale Bools"); }
}

View File

@ -0,0 +1,43 @@
#pragma once
#include <Common/FieldVisitors.h>
#include <Common/FieldVisitorConvertToNumber.h>
namespace DB
{
/** Implements `*=` operation by number
*/
class FieldVisitorScale : public StaticVisitor<void>
{
private:
Int32 rhs;
public:
explicit FieldVisitorScale(Int32 rhs_);
void operator() (Int64 & x) const;
void operator() (UInt64 & x) const;
void operator() (Float64 & x) const;
void operator() (Null &) const;
[[noreturn]] void operator() (String &) const;
[[noreturn]] void operator() (Array &) const;
[[noreturn]] void operator() (Tuple &) const;
[[noreturn]] void operator() (Map &) const;
[[noreturn]] void operator() (Object &) const;
[[noreturn]] void operator() (UUID &) const;
[[noreturn]] void operator() (IPv4 &) const;
[[noreturn]] void operator() (IPv6 &) const;
[[noreturn]] void operator() (AggregateFunctionStateData &) const;
[[noreturn]] void operator() (CustomType &) const;
[[noreturn]] void operator() (bool &) const;
template <typename T>
void operator() (DecimalField<T> & x) const { x = DecimalField<T>(x.getValue() * T(rhs), x.getScale()); }
template <typename T>
requires is_big_int_v<T>
void operator() (T & x) const { x *= rhs; }
};
}

View File

@ -658,16 +658,11 @@ protected:
{
if (!std::is_trivially_destructible_v<Cell>)
{
for (iterator it = begin(), it_end = end(); it != it_end; ++it)
for (iterator it = begin(), it_end = end(); it != it_end;)
{
it.ptr->~Cell();
/// In case of poison_in_dtor=1 it will be poisoned,
/// but it maybe used later, during iteration.
///
/// NOTE, that technically this is UB [1], but OK for now.
///
/// [1]: https://github.com/google/sanitizers/issues/854#issuecomment-329661378
__msan_unpoison(it.ptr, sizeof(*it.ptr));
auto ptr = it.ptr;
++it;
ptr->~Cell();
}
/// Everything had been destroyed in the loop above, reset the flag

View File

@ -547,6 +547,7 @@ The server successfully detected this situation and will download merged part fr
M(FilesystemCacheLoadMetadataMicroseconds, "Time spent loading filesystem cache metadata", ValueType::Microseconds) \
M(FilesystemCacheEvictedBytes, "Number of bytes evicted from filesystem cache", ValueType::Bytes) \
M(FilesystemCacheEvictedFileSegments, "Number of file segments evicted from filesystem cache", ValueType::Number) \
M(FilesystemCacheBackgroundDownloadQueuePush, "Number of file segments sent for background download in filesystem cache", ValueType::Number) \
M(FilesystemCacheEvictionSkippedFileSegments, "Number of file segments skipped for eviction because of being in unreleasable state", ValueType::Number) \
M(FilesystemCacheEvictionSkippedEvictingFileSegments, "Number of file segments skipped for eviction because of being in evicting state", ValueType::Number) \
M(FilesystemCacheEvictionTries, "Number of filesystem cache eviction attempts", ValueType::Number) \
@ -745,6 +746,12 @@ The server successfully detected this situation and will download merged part fr
M(ReadTaskRequestsSentElapsedMicroseconds, "Time spent in callbacks requested from the remote server back to the initiator server to choose the read task (for s3Cluster table function and similar). Measured on the remote server side.", ValueType::Microseconds) \
M(MergeTreeReadTaskRequestsSentElapsedMicroseconds, "Time spent in callbacks requested from the remote server back to the initiator server to choose the read task (for MergeTree tables). Measured on the remote server side.", ValueType::Microseconds) \
M(MergeTreeAllRangesAnnouncementsSentElapsedMicroseconds, "Time spent in sending the announcement from the remote server to the initiator server about the set of data parts (for MergeTree tables). Measured on the remote server side.", ValueType::Microseconds) \
M(MergerMutatorsGetPartsForMergeElapsedMicroseconds, "Time spent to take data parts snapshot to build ranges from them.", ValueType::Microseconds) \
M(MergerMutatorPrepareRangesForMergeElapsedMicroseconds, "Time spent to prepare parts ranges which can be merged according to merge predicate.", ValueType::Microseconds) \
M(MergerMutatorSelectPartsForMergeElapsedMicroseconds, "Time spent to select parts from ranges which can be merged.", ValueType::Microseconds) \
M(MergerMutatorRangesForMergeCount, "Amount of candidate ranges for merge", ValueType::Number) \
M(MergerMutatorPartsInRangesForMergeCount, "Amount of candidate parts for merge", ValueType::Number) \
M(MergerMutatorSelectRangePartsCount, "Amount of parts in selected range for merge", ValueType::Number) \
\
M(ConnectionPoolIsFullMicroseconds, "Total time spent waiting for a slot in connection pool.", ValueType::Microseconds) \
M(AsyncLoaderWaitMicroseconds, "Total time a query was waiting for async loader jobs.", ValueType::Microseconds) \

View File

@ -122,7 +122,7 @@ public:
void scheduleOrThrowOnError(Job job, Priority priority = {});
/// Similar to scheduleOrThrowOnError(...). Wait for specified amount of time and schedule a job or return false.
bool trySchedule(Job job, Priority priority = {}, uint64_t wait_microseconds = 0) noexcept;
[[nodiscard]] bool trySchedule(Job job, Priority priority = {}, uint64_t wait_microseconds = 0) noexcept;
/// Similar to scheduleOrThrowOnError(...). Wait for specified amount of time and schedule a job or throw an exception.
void scheduleOrThrow(Job job, Priority priority = {}, uint64_t wait_microseconds = 0, bool propagate_opentelemetry_tracing_context = true);
@ -142,7 +142,7 @@ public:
/// Returns true if the pool already terminated
/// (and any further scheduling will produce CANNOT_SCHEDULE_TASK exception)
bool finished() const;
[[nodiscard]] bool finished() const;
void setMaxThreads(size_t value);
void setMaxFreeThreads(size_t value);

View File

@ -2,7 +2,6 @@
#include <cstring>
#include <base/types.h>
#include <base/unaligned.h>
#include <base/defines.h>
#include <IO/WriteHelpers.h>

View File

@ -5,11 +5,18 @@
#include <Parsers/ASTFunction.h>
#include <base/unaligned.h>
#include <Common/Exception.h>
#include <Common/CurrentMetrics.h>
#include <Parsers/queryToString.h>
#include <Parsers/ASTIdentifier.h>
#include <Compression/CompressionCodecMultiple.h>
namespace CurrentMetrics
{
extern const Metric Compressing;
extern const Metric Decompressing;
}
namespace DB
{
@ -80,6 +87,8 @@ UInt32 ICompressionCodec::compress(const char * source, UInt32 source_size, char
{
assert(source != nullptr && dest != nullptr);
CurrentMetrics::Increment metric_increment(CurrentMetrics::Compressing);
dest[0] = getMethodByte();
UInt8 header_size = getHeaderSize();
/// Write data from header_size
@ -93,6 +102,8 @@ UInt32 ICompressionCodec::decompress(const char * source, UInt32 source_size, ch
{
assert(source != nullptr && dest != nullptr);
CurrentMetrics::Increment metric_increment(CurrentMetrics::Decompressing);
UInt8 header_size = getHeaderSize();
if (source_size < header_size)
throw Exception(decompression_error_code,

View File

@ -0,0 +1,166 @@
#include <city.h>
#include <base/types.h>
#include <base/defines.h>
#include <IO/WriteHelpers.h>
#include <Common/setThreadName.h>
#include <Common/scope_guard_safe.h>
#include <Common/CurrentThread.h>
#include <Common/CurrentMetrics.h>
#include <Compression/ParallelCompressedWriteBuffer.h>
namespace CurrentMetrics
{
extern const Metric ParallelCompressedWriteBufferThreads;
extern const Metric ParallelCompressedWriteBufferWait;
}
namespace DB
{
ParallelCompressedWriteBuffer::ParallelCompressedWriteBuffer(
WriteBuffer & out_,
CompressionCodecPtr codec_,
size_t buf_size_,
size_t num_threads_,
ThreadPool & pool_)
: WriteBuffer(nullptr, 0), out(out_), codec(codec_), buf_size(buf_size_), num_threads(num_threads_), pool(pool_)
{
buffers.emplace_back(buf_size);
current_buffer = buffers.begin();
BufferBase::set(current_buffer->uncompressed.data(), buf_size, 0);
}
void ParallelCompressedWriteBuffer::nextImpl()
{
if (!offset())
return;
std::unique_lock lock(mutex);
/// The buffer will be compressed and processed in the thread.
current_buffer->busy = true;
current_buffer->sequence_num = current_sequence_num;
++current_sequence_num;
current_buffer->uncompressed_size = offset();
pool.scheduleOrThrowOnError([this, my_current_buffer = current_buffer, thread_group = CurrentThread::getGroup()]
{
SCOPE_EXIT_SAFE(
if (thread_group)
CurrentThread::detachFromGroupIfNotDetached();
);
if (thread_group)
CurrentThread::attachToGroupIfDetached(thread_group);
setThreadName("ParallelCompres");
compress(my_current_buffer);
});
BufferPair * previous_buffer = &*current_buffer;
++current_buffer;
if (current_buffer == buffers.end())
{
if (buffers.size() < num_threads)
{
/// If we didn't use all num_threads buffers yet, create a new one.
current_buffer = buffers.emplace(current_buffer, buf_size);
}
else
{
/// Otherwise, wrap around to the first buffer in the list.
current_buffer = buffers.begin();
}
}
/// Wait while the buffer becomes not busy
if (current_buffer->busy)
{
CurrentMetrics::Increment metric_increment(CurrentMetrics::ParallelCompressedWriteBufferWait);
cond.wait(lock, [&]{ return !current_buffer->busy; });
}
/// Now this buffer can be used.
current_buffer->previous = previous_buffer;
BufferBase::set(current_buffer->uncompressed.data(), buf_size, 0);
}
void ParallelCompressedWriteBuffer::finalizeImpl()
{
next();
pool.wait();
}
void ParallelCompressedWriteBuffer::compress(Iterator buffer)
{
CurrentMetrics::Increment metric_increment(CurrentMetrics::ParallelCompressedWriteBufferThreads);
chassert(buffer->uncompressed_size <= INT_MAX);
UInt32 uncompressed_size = static_cast<UInt32>(buffer->uncompressed_size);
UInt32 compressed_reserve_size = codec->getCompressedReserveSize(uncompressed_size);
/// If all previous buffers have been written,
/// and if the output buffer has the required capacity,
/// we can compress data directly into the output buffer.
size_t required_out_capacity = compressed_reserve_size + sizeof(CityHash_v1_0_2::uint128);
bool can_write_directly = false;
if (!buffer->previous)
{
can_write_directly = out.available() >= required_out_capacity;
}
else
{
std::unique_lock lock(mutex);
can_write_directly = (!buffer->previous->busy || buffer->previous->sequence_num > buffer->sequence_num)
&& out.available() >= required_out_capacity;
}
if (can_write_directly)
{
char * out_compressed_ptr = out.position() + sizeof(CityHash_v1_0_2::uint128);
UInt32 compressed_size = codec->compress(buffer->uncompressed.data(), uncompressed_size, out_compressed_ptr);
CityHash_v1_0_2::uint128 checksum = CityHash_v1_0_2::CityHash128(out_compressed_ptr, compressed_size);
writeBinaryLittleEndian(checksum.low64, out);
writeBinaryLittleEndian(checksum.high64, out);
out.position() += compressed_size;
}
else
{
buffer->compressed.resize(compressed_reserve_size);
UInt32 compressed_size = codec->compress(buffer->uncompressed.data(), uncompressed_size, buffer->compressed.data());
CityHash_v1_0_2::uint128 checksum = CityHash_v1_0_2::CityHash128(buffer->compressed.data(), compressed_size);
/// Wait while all previous buffers have been written.
if (buffer->previous)
{
CurrentMetrics::Increment metric_wait_increment(CurrentMetrics::ParallelCompressedWriteBufferWait);
std::unique_lock lock(mutex);
cond.wait(lock, [&]{ return !buffer->previous->busy || buffer->previous->sequence_num > buffer->sequence_num; });
}
writeBinaryLittleEndian(checksum.low64, out);
writeBinaryLittleEndian(checksum.high64, out);
out.write(buffer->compressed.data(), compressed_size);
}
std::unique_lock lock(mutex);
buffer->busy = false;
cond.notify_all();
}
ParallelCompressedWriteBuffer::~ParallelCompressedWriteBuffer()
{
if (!canceled)
finalize();
}
}

View File

@ -0,0 +1,70 @@
#pragma once
#include <list>
#include <memory>
#include <Common/PODArray.h>
#include <IO/WriteBuffer.h>
#include <IO/BufferWithOwnMemory.h>
#include <Compression/ICompressionCodec.h>
#include <Compression/CompressionFactory.h>
#include <Common/ThreadPool.h>
namespace DB
{
/** Uses multi-buffering for parallel compression.
* When the buffer is filled, it will be compressed in the background,
* and a new buffer is created for the next input data.
*/
class ParallelCompressedWriteBuffer final : public WriteBuffer
{
public:
explicit ParallelCompressedWriteBuffer(
WriteBuffer & out_,
CompressionCodecPtr codec_,
size_t buf_size_,
size_t num_threads_,
ThreadPool & pool_);
~ParallelCompressedWriteBuffer() override;
private:
void nextImpl() override;
void finalizeImpl() override;
WriteBuffer & out;
CompressionCodecPtr codec;
size_t buf_size;
size_t num_threads;
ThreadPool & pool;
struct BufferPair
{
explicit BufferPair(size_t input_size)
: uncompressed(input_size)
{
}
Memory<> uncompressed;
size_t uncompressed_size = 0;
PODArray<char> compressed;
BufferPair * previous = nullptr;
size_t sequence_num = 0;
bool busy = false;
};
std::mutex mutex;
std::condition_variable cond;
std::list<BufferPair> buffers;
using Iterator = std::list<BufferPair>::iterator;
Iterator current_buffer;
size_t current_sequence_num = 0;
void compress(Iterator buffer);
};
}

View File

@ -330,7 +330,7 @@ TYPED_TEST(CoordinationTest, TestSummingRaft1)
this->setLogDirectory("./logs");
this->setStateFileDirectory(".");
SummingRaftServer s1(1, "localhost", 44444, this->keeper_context);
SummingRaftServer s1(1, "localhost", 0, this->keeper_context);
SCOPE_EXIT(if (std::filesystem::exists("./state")) std::filesystem::remove("./state"););
/// Single node is leader

View File

@ -119,15 +119,4 @@ enum class JoinTableSide : uint8_t
const char * toString(JoinTableSide join_table_side);
/// Setting to choose which table to use as the inner table in hash join
enum class JoinInnerTableSelectionMode : uint8_t
{
/// Use left table
Left,
/// Use right table
Right,
/// Use the table with the smallest number of rows
Auto,
};
}

View File

@ -873,6 +873,12 @@ In CREATE TABLE statement allows specifying Variant type with similar variant ty
)", 0) \
DECLARE(Bool, allow_suspicious_primary_key, false, R"(
Allow suspicious `PRIMARY KEY`/`ORDER BY` for MergeTree (i.e. SimpleAggregateFunction).
)", 0) \
DECLARE(Bool, allow_suspicious_types_in_group_by, false, R"(
Allows or restricts using [Variant](../../sql-reference/data-types/variant.md) and [Dynamic](../../sql-reference/data-types/dynamic.md) types in GROUP BY keys.
)", 0) \
DECLARE(Bool, allow_suspicious_types_in_order_by, false, R"(
Allows or restricts using [Variant](../../sql-reference/data-types/variant.md) and [Dynamic](../../sql-reference/data-types/dynamic.md) types in ORDER BY keys.
)", 0) \
DECLARE(Bool, compile_expressions, false, R"(
Compile some scalar functions and operators to native code. Due to a bug in the LLVM compiler infrastructure, on AArch64 machines, it is known to lead to a nullptr dereference and, consequently, server crash. Do not enable this setting.
@ -1912,9 +1918,6 @@ See also:
For single JOIN in case of identifier ambiguity prefer left table
)", IMPORTANT) \
\
DECLARE(JoinInnerTableSelectionMode, query_plan_join_inner_table_selection, JoinInnerTableSelectionMode::Auto, R"(
Select the side of the join to be the inner table in the query plan. Supported only for `ALL` join strictness with `JOIN ON` clause. Possible values: 'auto', 'left', 'right'.
)", 0) \
DECLARE(UInt64, preferred_block_size_bytes, 1000000, R"(
This setting adjusts the data block size for query processing and represents additional fine-tuning to the more rough 'max_block_size' setting. If the columns are large and with 'max_block_size' rows the block size is likely to be larger than the specified amount of bytes, its size will be lowered for better CPU cache locality.
)", 0) \
@ -2866,7 +2869,7 @@ Limit on size of multipart/form-data content. This setting cannot be parsed from
DECLARE(Bool, calculate_text_stack_trace, true, R"(
Calculate text stack trace in case of exceptions during query execution. This is the default. It requires symbol lookups that may slow down fuzzing tests when a huge amount of wrong queries are executed. In normal cases, you should not disable this option.
)", 0) \
DECLARE(Bool, enable_job_stack_trace, false, R"(
DECLARE(Bool, enable_job_stack_trace, true, R"(
Output stack trace of a job creator when job results in exception
)", 0) \
DECLARE(Bool, allow_ddl, true, R"(
@ -2889,6 +2892,9 @@ Possible values:
**See Also**
- [ORDER BY Clause](../../sql-reference/statements/select/order-by.md/#optimize_read_in_order)
)", 0) \
DECLARE(Bool, read_in_order_use_virtual_row, false, R"(
Use virtual row while reading in order of primary key or its monotonic function fashion. It is useful when searching over multiple parts as only relevant ones are touched.
)", 0) \
DECLARE(Bool, optimize_read_in_window_order, true, R"(
Enable ORDER BY optimization in window clause for reading data in corresponding order in MergeTree tables.
@ -4239,7 +4245,7 @@ Rewrite aggregate functions with if expression as argument when logically equiva
For example, `avg(if(cond, col, null))` can be rewritten to `avgOrNullIf(cond, col)`. It may improve performance.
:::note
Supported only with experimental analyzer (`enable_analyzer = 1`).
Supported only with the analyzer (`enable_analyzer = 1`).
:::
)", 0) \
DECLARE(Bool, optimize_rewrite_array_exists_to_has, false, R"(
@ -5132,6 +5138,12 @@ Only in ClickHouse Cloud. A window for sending ACK for DataPacket sequence in a
)", 0) \
DECLARE(Bool, distributed_cache_discard_connection_if_unread_data, true, R"(
Only in ClickHouse Cloud. Discard connection if some data is unread.
)", 0) \
DECLARE(Bool, filesystem_cache_enable_background_download_for_metadata_files_in_packed_storage, true, R"(
Only in ClickHouse Cloud. Wait time to lock cache for space reservation in filesystem cache
)", 0) \
DECLARE(Bool, filesystem_cache_enable_background_download_during_fetch, true, R"(
Only in ClickHouse Cloud. Wait time to lock cache for space reservation in filesystem cache
)", 0) \
\
DECLARE(Bool, parallelize_output_from_storages, true, R"(
@ -5142,6 +5154,7 @@ The setting allows a user to provide own deduplication semantic in MergeTree/Rep
For example, by providing a unique value for the setting in each INSERT statement,
user can avoid the same inserted data being deduplicated.
Possible values:
- Any string
@ -5616,7 +5629,7 @@ If true, and JOIN can be executed with parallel replicas algorithm, and all stor
DECLARE(UInt64, parallel_replicas_mark_segment_size, 0, R"(
Parts virtually divided into segments to be distributed between replicas for parallel reading. This setting controls the size of these segments. Not recommended to change until you're absolutely sure in what you're doing. Value should be in range [128; 16384]
)", BETA) \
DECLARE(Bool, parallel_replicas_local_plan, false, R"(
DECLARE(Bool, parallel_replicas_local_plan, true, R"(
Build local plan for local replica
)", BETA) \
\
@ -5698,6 +5711,8 @@ If enabled, MongoDB tables will return an error when a MongoDB query cannot be b
)", 0) \
DECLARE(Bool, implicit_select, false, R"(
Allow writing simple SELECT queries without the leading SELECT keyword, which makes it simple for calculator-style usage, e.g. `1 + 2` becomes a valid query.
In `clickhouse-local` it is enabled by default and can be explicitly disabled.
)", 0) \
\
\
@ -5855,7 +5870,7 @@ Experimental data deduplication for SELECT queries based on part UUIDs
// Please add settings related to formats in Core/FormatFactorySettings.h, move obsolete settings to OBSOLETE_SETTINGS and obsolete format settings to OBSOLETE_FORMAT_SETTINGS.
#define OBSOLETE_SETTINGS(M, ALIAS) \
/** Obsolete settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \
/** Obsolete settings which are kept around for compatibility reasons. They have no effect anymore. */ \
MAKE_OBSOLETE(M, Bool, update_insert_deduplication_token_in_dependent_materialized_views, 0) \
MAKE_OBSOLETE(M, UInt64, max_memory_usage_for_all_queries, 0) \
MAKE_OBSOLETE(M, UInt64, multiple_joins_rewriter_version, 0) \

View File

@ -66,7 +66,6 @@ class WriteBuffer;
M(CLASS_NAME, IntervalOutputFormat) \
M(CLASS_NAME, JoinAlgorithm) \
M(CLASS_NAME, JoinStrictness) \
M(CLASS_NAME, JoinInnerTableSelectionMode) \
M(CLASS_NAME, LightweightMutationProjectionMode) \
M(CLASS_NAME, LoadBalancing) \
M(CLASS_NAME, LocalFSReadMethod) \

View File

@ -64,14 +64,20 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
},
{"24.11",
{
{"enable_job_stack_trace", false, true, "Enable by default collecting stack traces from job's scheduling."},
{"allow_suspicious_types_in_group_by", true, false, "Don't allow Variant/Dynamic types in GROUP BY by default"},
{"allow_suspicious_types_in_order_by", true, false, "Don't allow Variant/Dynamic types in ORDER BY by default"},
{"distributed_cache_discard_connection_if_unread_data", true, true, "New setting"},
{"filesystem_cache_enable_background_download_for_metadata_files_in_packed_storage", true, true, "New setting"},
{"filesystem_cache_enable_background_download_during_fetch", true, true, "New setting"},
{"azure_check_objects_after_upload", false, false, "Check each uploaded object in azure blob storage to be sure that upload was successful"},
{"backup_restore_keeper_max_retries", 20, 1000, "Should be big enough so the whole operation BACKUP or RESTORE operation won't fail because of a temporary [Zoo]Keeper failure in the middle of it."},
{"backup_restore_failure_after_host_disconnected_for_seconds", 0, 3600, "New setting."},
{"backup_restore_keeper_max_retries_while_initializing", 0, 20, "New setting."},
{"backup_restore_keeper_max_retries_while_handling_error", 0, 20, "New setting."},
{"backup_restore_finish_timeout_after_error_sec", 0, 180, "New setting."},
{"query_plan_join_inner_table_selection", "auto", "auto", "New setting."},
{"parallel_replicas_local_plan", false, true, "Use local plan for local replica in a query with parallel replicas"},
{"read_in_order_use_virtual_row", false, false, "Use virtual row while reading in order of primary key or its monotonic function fashion. It is useful when searching over multiple parts as only relevant ones are touched."},
}
},
{"24.10",

Some files were not shown because too many files have changed in this diff Show More