diff --git a/.clang-tidy b/.clang-tidy index 33d7a66d14c..bc854d57f8a 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -62,6 +62,7 @@ Checks: '*, -google-build-using-namespace, -google-readability-braces-around-statements, + -google-readability-casting, -google-readability-function-size, -google-readability-namespace-comments, -google-readability-todo, diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index c890488ea80..cd516ba9674 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -2911,7 +2911,7 @@ jobs: cat >> "$GITHUB_ENV" << 'EOF' TEMP_PATH=${{runner.temp}}/performance_comparison REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison (actions) + CHECK_NAME=Performance Comparison REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse RUN_BY_HASH_NUM=0 RUN_BY_HASH_TOTAL=4 @@ -2949,7 +2949,7 @@ jobs: cat >> "$GITHUB_ENV" << 'EOF' TEMP_PATH=${{runner.temp}}/performance_comparison REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison (actions) + CHECK_NAME=Performance Comparison REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse RUN_BY_HASH_NUM=1 RUN_BY_HASH_TOTAL=4 @@ -2987,7 +2987,7 @@ jobs: cat >> "$GITHUB_ENV" << 'EOF' TEMP_PATH=${{runner.temp}}/performance_comparison REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison (actions) + CHECK_NAME=Performance Comparison REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse RUN_BY_HASH_NUM=2 RUN_BY_HASH_TOTAL=4 @@ -3025,7 +3025,7 @@ jobs: cat >> "$GITHUB_ENV" << 'EOF' TEMP_PATH=${{runner.temp}}/performance_comparison REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison (actions) + CHECK_NAME=Performance Comparison REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse RUN_BY_HASH_NUM=3 RUN_BY_HASH_TOTAL=4 diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 76a26d685c5..79d54d77f06 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -3127,7 +3127,7 @@ jobs: cat >> "$GITHUB_ENV" << 'EOF' TEMP_PATH=${{runner.temp}}/performance_comparison REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison (actions) + CHECK_NAME=Performance Comparison REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse RUN_BY_HASH_NUM=0 RUN_BY_HASH_TOTAL=4 @@ -3165,7 +3165,7 @@ jobs: cat >> "$GITHUB_ENV" << 'EOF' TEMP_PATH=${{runner.temp}}/performance_comparison REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison (actions) + CHECK_NAME=Performance Comparison REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse RUN_BY_HASH_NUM=1 RUN_BY_HASH_TOTAL=4 @@ -3203,7 +3203,7 @@ jobs: cat >> "$GITHUB_ENV" << 'EOF' TEMP_PATH=${{runner.temp}}/performance_comparison REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison (actions) + CHECK_NAME=Performance Comparison REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse RUN_BY_HASH_NUM=2 RUN_BY_HASH_TOTAL=4 @@ -3241,7 +3241,159 @@ jobs: cat >> "$GITHUB_ENV" << 'EOF' TEMP_PATH=${{runner.temp}}/performance_comparison REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison (actions) + CHECK_NAME=Performance Comparison + REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse + RUN_BY_HASH_NUM=3 + RUN_BY_HASH_TOTAL=4 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Performance Comparison + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 performance_comparison_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + PerformanceComparisonAarch0: + needs: [BuilderDebAarch64] + runs-on: [self-hosted, func-tester-aarch64] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/performance_comparison + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Performance Comparison Aarch64 + REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse + RUN_BY_HASH_NUM=0 + RUN_BY_HASH_TOTAL=4 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Performance Comparison + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 performance_comparison_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + PerformanceComparisonAarch1: + needs: [BuilderDebAarch64] + runs-on: [self-hosted, func-tester-aarch64] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/performance_comparison + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Performance Comparison Aarch64 + REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse + RUN_BY_HASH_NUM=1 + RUN_BY_HASH_TOTAL=4 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Performance Comparison + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 performance_comparison_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + PerformanceComparisonAarch2: + needs: [BuilderDebAarch64] + runs-on: [self-hosted, func-tester-aarch64] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/performance_comparison + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Performance Comparison Aarch64 + REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse + RUN_BY_HASH_NUM=2 + RUN_BY_HASH_TOTAL=4 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Performance Comparison + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 performance_comparison_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + PerformanceComparisonAarch3: + needs: [BuilderDebAarch64] + runs-on: [self-hosted, func-tester-aarch64] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/performance_comparison + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Performance Comparison Aarch64 REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse RUN_BY_HASH_NUM=3 RUN_BY_HASH_TOTAL=4 @@ -3333,6 +3485,10 @@ jobs: - PerformanceComparison1 - PerformanceComparison2 - PerformanceComparison3 + - PerformanceComparisonAarch0 + - PerformanceComparisonAarch1 + - PerformanceComparisonAarch2 + - PerformanceComparisonAarch3 - UnitTestsAsan - UnitTestsTsan - UnitTestsMsan diff --git a/base/base/ReplxxLineReader.cpp b/base/base/ReplxxLineReader.cpp index 0569567d4f8..bd26ec69c89 100644 --- a/base/base/ReplxxLineReader.cpp +++ b/base/base/ReplxxLineReader.cpp @@ -45,14 +45,16 @@ std::string replxx_now_ms_str() time_t t = ms.count() / 1000; tm broken; if (!localtime_r(&t, &broken)) - { - return std::string(); - } + return {}; static int const BUFF_SIZE(32); char str[BUFF_SIZE]; - strftime(str, BUFF_SIZE, "%Y-%m-%d %H:%M:%S.", &broken); - snprintf(str + sizeof("YYYY-mm-dd HH:MM:SS"), 5, "%03d", static_cast(ms.count() % 1000)); + if (strftime(str, BUFF_SIZE, "%Y-%m-%d %H:%M:%S.", &broken) <= 0) + return {}; + + if (snprintf(str + sizeof("YYYY-mm-dd HH:MM:SS"), 5, "%03d", static_cast(ms.count() % 1000)) <= 0) + return {}; + return str; } diff --git a/base/base/wide_integer_impl.h b/base/base/wide_integer_impl.h index ed2c2972cfe..e469e1683c8 100644 --- a/base/base/wide_integer_impl.h +++ b/base/base/wide_integer_impl.h @@ -576,8 +576,8 @@ private: else if constexpr (Bits == 128 && sizeof(base_type) == 8) { using CompilerUInt128 = unsigned __int128; - CompilerUInt128 a = (CompilerUInt128(lhs.items[1]) << 64) + lhs.items[0]; - CompilerUInt128 b = (CompilerUInt128(rhs.items[1]) << 64) + rhs.items[0]; + CompilerUInt128 a = (CompilerUInt128(lhs.items[1]) << 64) + lhs.items[0]; // NOLINT(clang-analyzer-core.UndefinedBinaryOperatorResult) + CompilerUInt128 b = (CompilerUInt128(rhs.items[1]) << 64) + rhs.items[0]; // NOLINT(clang-analyzer-core.UndefinedBinaryOperatorResult) CompilerUInt128 c = a * b; integer res; res.items[0] = c; @@ -841,8 +841,8 @@ public: { using CompilerUInt128 = unsigned __int128; - CompilerUInt128 a = (CompilerUInt128(numerator.items[1]) << 64) + numerator.items[0]; - CompilerUInt128 b = (CompilerUInt128(denominator.items[1]) << 64) + denominator.items[0]; + CompilerUInt128 a = (CompilerUInt128(numerator.items[1]) << 64) + numerator.items[0]; // NOLINT(clang-analyzer-core.UndefinedBinaryOperatorResult) + CompilerUInt128 b = (CompilerUInt128(denominator.items[1]) << 64) + denominator.items[0]; // NOLINT(clang-analyzer-core.UndefinedBinaryOperatorResult) CompilerUInt128 c = a / b; // NOLINT integer res; @@ -1204,7 +1204,7 @@ constexpr integer::operator T() const noexcept UnsignedT res{}; for (unsigned i = 0; i < _impl::item_count && i < (sizeof(T) + sizeof(base_type) - 1) / sizeof(base_type); ++i) - res += UnsignedT(items[i]) << (sizeof(base_type) * 8 * i); + res += UnsignedT(items[i]) << (sizeof(base_type) * 8 * i); // NOLINT(clang-analyzer-core.UndefinedBinaryOperatorResult) return res; } diff --git a/contrib/libxml2 b/contrib/libxml2 index a075d256fd9..7846b0a677f 160000 --- a/contrib/libxml2 +++ b/contrib/libxml2 @@ -1 +1 @@ -Subproject commit a075d256fd9ff15590b86d981b75a50ead124fca +Subproject commit 7846b0a677f8d3ce72486125fa281e92ac9970e8 diff --git a/docker/packager/binary/Dockerfile b/docker/packager/binary/Dockerfile index d4e824838c2..995cecfebc7 100644 --- a/docker/packager/binary/Dockerfile +++ b/docker/packager/binary/Dockerfile @@ -6,7 +6,7 @@ FROM ubuntu:20.04 ARG apt_archive="http://archive.ubuntu.com" RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list -ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=13 +ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=14 RUN apt-get update \ && apt-get install \ diff --git a/docker/packager/packager b/docker/packager/packager index 578b5a38bfb..36f794cad4d 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -319,25 +319,16 @@ if __name__ == "__main__": ) parser.add_argument("--output-dir", type=dir_name, required=True) parser.add_argument("--build-type", choices=("debug", ""), default="") + parser.add_argument( "--compiler", choices=( - "clang-11", - "clang-11-darwin", - "clang-11-darwin-aarch64", - "clang-11-aarch64", - "clang-12", - "clang-12-darwin", - "clang-12-darwin-aarch64", - "clang-12-aarch64", - "clang-13", - "clang-13-darwin", - "clang-13-darwin-aarch64", - "clang-13-aarch64", - "clang-13-ppc64le", - "clang-11-freebsd", - "clang-12-freebsd", - "clang-13-freebsd", + "clang-14", + "clang-14-darwin", + "clang-14-darwin-aarch64", + "clang-14-aarch64", + "clang-14-ppc64le", + "clang-14-freebsd", "gcc-11", ), default="clang-13", @@ -348,6 +339,7 @@ if __name__ == "__main__": choices=("address", "thread", "memory", "undefined", ""), default="", ) + parser.add_argument("--split-binary", action="store_true") parser.add_argument("--clang-tidy", action="store_true") parser.add_argument("--cache", choices=("ccache", "distcc", ""), default="") diff --git a/docker/test/base/Dockerfile b/docker/test/base/Dockerfile index 6beab2e5bb7..ca44354620f 100644 --- a/docker/test/base/Dockerfile +++ b/docker/test/base/Dockerfile @@ -7,7 +7,7 @@ FROM clickhouse/test-util:$FROM_TAG ARG apt_archive="http://archive.ubuntu.com" RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list -ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=13 +ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=14 RUN apt-get update \ && apt-get install ca-certificates lsb-release wget gnupg apt-transport-https \ diff --git a/docker/test/codebrowser/Dockerfile b/docker/test/codebrowser/Dockerfile index 97f3f54ad98..c7aed618f6a 100644 --- a/docker/test/codebrowser/Dockerfile +++ b/docker/test/codebrowser/Dockerfile @@ -8,14 +8,18 @@ FROM clickhouse/binary-builder:$FROM_TAG ARG apt_archive="http://archive.ubuntu.com" RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list -RUN apt-get update && apt-get --yes --allow-unauthenticated install clang-13 libllvm13 libclang-13-dev libmlir-13-dev +RUN apt-get update && apt-get --yes --allow-unauthenticated install clang-14 libllvm14 libclang-14-dev libmlir-14-dev # repo versions doesn't work correctly with C++17 # also we push reports to s3, so we add index.html to subfolder urls # https://github.com/ClickHouse-Extras/woboq_codebrowser/commit/37e15eaf377b920acb0b48dbe82471be9203f76b -RUN git clone https://github.com/ClickHouse-Extras/woboq_codebrowser - -RUN cd woboq_codebrowser && cmake . -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER=clang\+\+-13 -DCMAKE_C_COMPILER=clang-13 && make -j +# TODO: remove branch in a few weeks after merge, e.g. in May or June 2022 +RUN git clone https://github.com/ClickHouse-Extras/woboq_codebrowser --branch llvm-14 \ + && cd woboq_codebrowser \ + && cmake . -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER=clang\+\+-14 -DCMAKE_C_COMPILER=clang-14 \ + && make -j \ + && cd .. \ + && rm -rf woboq_codebrowser ENV CODEGEN=/woboq_codebrowser/generator/codebrowser_generator ENV CODEINDEX=/woboq_codebrowser/indexgenerator/codebrowser_indexgenerator @@ -28,7 +32,7 @@ ENV SHA=nosha ENV DATA="https://s3.amazonaws.com/clickhouse-test-reports/codebrowser/data" CMD mkdir -p $BUILD_DIRECTORY && cd $BUILD_DIRECTORY && \ - cmake $SOURCE_DIRECTORY -DCMAKE_CXX_COMPILER=/usr/bin/clang\+\+-13 -DCMAKE_C_COMPILER=/usr/bin/clang-13 -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DENABLE_EMBEDDED_COMPILER=0 -DENABLE_S3=0 && \ + cmake $SOURCE_DIRECTORY -DCMAKE_CXX_COMPILER=/usr/bin/clang\+\+-14 -DCMAKE_C_COMPILER=/usr/bin/clang-14 -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DENABLE_EMBEDDED_COMPILER=0 -DENABLE_S3=0 && \ mkdir -p $HTML_RESULT_DIRECTORY && \ $CODEGEN -b $BUILD_DIRECTORY -a -o $HTML_RESULT_DIRECTORY -p ClickHouse:$SOURCE_DIRECTORY:$SHA -d $DATA | ts '%Y-%m-%d %H:%M:%S' && \ cp -r $STATIC_DATA $HTML_RESULT_DIRECTORY/ &&\ diff --git a/docker/test/fasttest/Dockerfile b/docker/test/fasttest/Dockerfile index 46b74d89e13..03a79b45a10 100644 --- a/docker/test/fasttest/Dockerfile +++ b/docker/test/fasttest/Dockerfile @@ -7,7 +7,7 @@ FROM clickhouse/test-util:$FROM_TAG ARG apt_archive="http://archive.ubuntu.com" RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list -ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=13 +ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=14 RUN apt-get update \ && apt-get install ca-certificates lsb-release wget gnupg apt-transport-https \ diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index 69e13973783..4bc10aee95f 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -12,7 +12,7 @@ stage=${stage:-} script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" echo "$script_dir" repo_dir=ch -BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-13_debug_none_bundled_unsplitted_disable_False_binary"} +BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-14_debug_none_bundled_unsplitted_disable_False_binary"} BINARY_URL_TO_DOWNLOAD=${BINARY_URL_TO_DOWNLOAD:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse"} function clone diff --git a/docker/test/keeper-jepsen/run.sh b/docker/test/keeper-jepsen/run.sh index 4dec82234bc..460c0db54c3 100644 --- a/docker/test/keeper-jepsen/run.sh +++ b/docker/test/keeper-jepsen/run.sh @@ -2,7 +2,7 @@ set -euo pipefail -CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-13_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"} +CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-14_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"} CLICKHOUSE_REPO_PATH=${CLICKHOUSE_REPO_PATH:=""} diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 16503eed778..046a394a686 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -207,6 +207,13 @@ function run_tests test_files=($(ls "$test_prefix"/*.xml)) fi + # We can filter out certain tests + if [ -v CHPC_TEST_GREP_EXCLUDE ]; then + # filter tests array in bash https://stackoverflow.com/a/40375567 + filtered_test_files=( $( for i in ${test_files[@]} ; do echo $i ; done | grep -v ${CHPC_TEST_GREP_EXCLUDE} ) ) + test_files=("${filtered_test_files[@]}") + fi + # We split perf tests into multiple checks to make them faster if [ -v CHPC_TEST_RUN_BY_HASH_TOTAL ]; then # filter tests array in bash https://stackoverflow.com/a/40375567 diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh index 97b9225a2d2..b2064317db1 100755 --- a/docker/test/stress/run.sh +++ b/docker/test/stress/run.sh @@ -338,6 +338,7 @@ then -e "Code: 1000, e.code() = 111, Connection refused" \ -e "UNFINISHED" \ -e "Renaming unexpected part" \ + -e "PART_IS_TEMPORARILY_LOCKED" \ /var/log/clickhouse-server/clickhouse-server.backward.clean.log | zgrep -Fa "" > /test_output/bc_check_error_messages.txt \ && echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv diff --git a/docs/_includes/cmake_in_clickhouse_header.md b/docs/_includes/cmake_in_clickhouse_header.md index c35668c2c40..2f2e0421946 100644 --- a/docs/_includes/cmake_in_clickhouse_header.md +++ b/docs/_includes/cmake_in_clickhouse_header.md @@ -6,8 +6,8 @@ Minimal ClickHouse build example: ```bash cmake .. \ - -DCMAKE_C_COMPILER=$(which clang-13) \ - -DCMAKE_CXX_COMPILER=$(which clang++-13) \ + -DCMAKE_C_COMPILER=$(which clang-14) \ + -DCMAKE_CXX_COMPILER=$(which clang++-14) \ -DCMAKE_BUILD_TYPE=Debug \ -DENABLE_UTILS=OFF \ -DENABLE_TESTS=OFF diff --git a/docs/en/development/build-cross-arm.md b/docs/en/development/build-cross-arm.md index 305c09ae217..ed968ee8e77 100644 --- a/docs/en/development/build-cross-arm.md +++ b/docs/en/development/build-cross-arm.md @@ -10,7 +10,7 @@ This is intended for continuous integration checks that run on Linux servers. The cross-build for AARCH64 is based on the [Build instructions](../development/build.md), follow them first. -## Install Clang-13 +## Install Clang-14 or newer Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup or do ``` @@ -31,7 +31,7 @@ tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build-aarch64/cma ``` bash cd ClickHouse mkdir build-arm64 -CC=clang-13 CXX=clang++-13 cmake . -Bbuild-arm64 -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-aarch64.cmake +CC=clang-14 CXX=clang++-14 cmake . -Bbuild-arm64 -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-aarch64.cmake ninja -C build-arm64 ``` diff --git a/docs/en/development/build-cross-osx.md b/docs/en/development/build-cross-osx.md index 1dbd0ec6430..d09552e06e9 100644 --- a/docs/en/development/build-cross-osx.md +++ b/docs/en/development/build-cross-osx.md @@ -10,14 +10,14 @@ This is intended for continuous integration checks that run on Linux servers. If The cross-build for Mac OS X is based on the [Build instructions](../development/build.md), follow them first. -## Install Clang-13 +## Install Clang-14 Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup. For example the commands for Bionic are like: ``` bash -sudo echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-13 main" >> /etc/apt/sources.list -sudo apt-get install clang-13 +sudo echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-14 main" >> /etc/apt/sources.list +sudo apt-get install clang-14 ``` ## Install Cross-Compilation Toolset {#install-cross-compilation-toolset} diff --git a/docs/en/development/build-cross-riscv.md b/docs/en/development/build-cross-riscv.md index 94c0f47a05d..a17063e7d8d 100644 --- a/docs/en/development/build-cross-riscv.md +++ b/docs/en/development/build-cross-riscv.md @@ -23,7 +23,7 @@ sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" ``` bash cd ClickHouse mkdir build-riscv64 -CC=clang-13 CXX=clang++-13 cmake . -Bbuild-riscv64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-riscv64.cmake -DGLIBC_COMPATIBILITY=OFF -DENABLE_LDAP=OFF -DOPENSSL_NO_ASM=ON -DENABLE_JEMALLOC=ON -DENABLE_PARQUET=OFF -DENABLE_ORC=OFF -DUSE_UNWIND=OFF -DENABLE_GRPC=OFF -DENABLE_HDFS=OFF -DENABLE_MYSQL=OFF +CC=clang-14 CXX=clang++-14 cmake . -Bbuild-riscv64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-riscv64.cmake -DGLIBC_COMPATIBILITY=OFF -DENABLE_LDAP=OFF -DOPENSSL_NO_ASM=ON -DENABLE_JEMALLOC=ON -DENABLE_PARQUET=OFF -DENABLE_ORC=OFF -DUSE_UNWIND=OFF -DENABLE_GRPC=OFF -DENABLE_HDFS=OFF -DENABLE_MYSQL=OFF ninja -C build-riscv64 ``` diff --git a/docs/en/development/build.md b/docs/en/development/build.md index 7f2d61d2fd0..ef744b57608 100644 --- a/docs/en/development/build.md +++ b/docs/en/development/build.md @@ -77,7 +77,7 @@ The build requires the following components: - Git (is used only to checkout the sources, it’s not needed for the build) - CMake 3.14 or newer - Ninja -- C++ compiler: clang-13 or newer +- C++ compiler: clang-14 or newer - Linker: lld If all the components are installed, you may build in the same way as the steps above. diff --git a/docs/en/development/developer-instruction.md b/docs/en/development/developer-instruction.md index 291e57fef66..fc031b1db49 100644 --- a/docs/en/development/developer-instruction.md +++ b/docs/en/development/developer-instruction.md @@ -155,7 +155,7 @@ While inside the `build` directory, configure your build by running CMake. Befor export CC=clang CXX=clang++ cmake .. -If you installed clang using the automatic installation script above, also specify the version of clang installed in the first command, e.g. `export CC=clang-13 CXX=clang++-13`. The clang version will be in the script output. +If you installed clang using the automatic installation script above, also specify the version of clang installed in the first command, e.g. `export CC=clang-14 CXX=clang++-14`. The clang version will be in the script output. The `CC` variable specifies the compiler for C (short for C Compiler), and `CXX` variable instructs which C++ compiler is to be used for building. diff --git a/docs/en/development/index.md b/docs/en/development/index.md index 55e03091a3e..7849c736229 100644 --- a/docs/en/development/index.md +++ b/docs/en/development/index.md @@ -3,6 +3,6 @@ sidebar_label: Development sidebar_position: 58 --- -# ClickHouse Development {#clickhouse-development} +# ClickHouse Development [Original article](https://clickhouse.com/docs/en/development/) diff --git a/docs/en/engines/database-engines/index.md b/docs/en/engines/database-engines/index.md index 0cee580abcd..8e36aca695c 100644 --- a/docs/en/engines/database-engines/index.md +++ b/docs/en/engines/database-engines/index.md @@ -4,7 +4,7 @@ toc_priority: 27 toc_title: Introduction --- -# Database Engines {#database-engines} +# Database Engines Database engines allow you to work with tables. By default, ClickHouse uses the [Atomic](../../engines/database-engines/atomic.md) database engine, which provides configurable [table engines](../../engines/table-engines/index.md) and an [SQL dialect](../../sql-reference/syntax.md). diff --git a/docs/en/engines/database-engines/lazy.md b/docs/en/engines/database-engines/lazy.md index b95ade19df4..170e101d387 100644 --- a/docs/en/engines/database-engines/lazy.md +++ b/docs/en/engines/database-engines/lazy.md @@ -3,7 +3,7 @@ sidebar_label: Lazy sidebar_position: 20 --- -# Lazy {#lazy} +# Lazy Keeps tables in RAM only `expiration_time_in_seconds` seconds after last access. Can be used only with \*Log tables. diff --git a/docs/en/engines/database-engines/materialized-postgresql.md b/docs/en/engines/database-engines/materialized-postgresql.md index 18b7512d11b..66f918f01d6 100644 --- a/docs/en/engines/database-engines/materialized-postgresql.md +++ b/docs/en/engines/database-engines/materialized-postgresql.md @@ -3,7 +3,7 @@ sidebar_label: MaterializedPostgreSQL sidebar_position: 60 --- -# [experimental] MaterializedPostgreSQL {#materialize-postgresql} +# [experimental] MaterializedPostgreSQL Creates a ClickHouse database with tables from PostgreSQL database. Firstly, database with engine `MaterializedPostgreSQL` creates a snapshot of PostgreSQL database and loads required tables. Required tables can include any subset of tables from any subset of schemas from specified database. Along with the snapshot database engine acquires LSN and once initial dump of tables is performed - it starts pulling updates from WAL. After database is created, newly added tables to PostgreSQL database are not automatically added to replication. They have to be added manually with `ATTACH TABLE db.table` query. diff --git a/docs/en/engines/database-engines/postgresql.md b/docs/en/engines/database-engines/postgresql.md index bc5e93d0923..843c10c7a48 100644 --- a/docs/en/engines/database-engines/postgresql.md +++ b/docs/en/engines/database-engines/postgresql.md @@ -3,7 +3,7 @@ sidebar_position: 40 sidebar_label: PostgreSQL --- -# PostgreSQL {#postgresql} +# PostgreSQL Allows to connect to databases on a remote [PostgreSQL](https://www.postgresql.org) server. Supports read and write operations (`SELECT` and `INSERT` queries) to exchange data between ClickHouse and PostgreSQL. diff --git a/docs/en/engines/database-engines/replicated.md b/docs/en/engines/database-engines/replicated.md index 63d955dc889..8ddff32bb2c 100644 --- a/docs/en/engines/database-engines/replicated.md +++ b/docs/en/engines/database-engines/replicated.md @@ -3,7 +3,7 @@ sidebar_position: 30 sidebar_label: Replicated --- -# [experimental] Replicated {#replicated} +# [experimental] Replicated The engine is based on the [Atomic](../../engines/database-engines/atomic.md) engine. It supports replication of metadata via DDL log being written to ZooKeeper and executed on all of the replicas for a given database. @@ -20,7 +20,7 @@ One ClickHouse server can have multiple replicated databases running and updatin - `shard_name` — Shard name. Database replicas are grouped into shards by `shard_name`. - `replica_name` — Replica name. Replica names must be different for all replicas of the same shard. -:::warning +:::warning For [ReplicatedMergeTree](../table-engines/mergetree-family/replication.md#table_engines-replication) tables if no arguments provided, then default arguments are used: `/clickhouse/tables/{uuid}/{shard}` and `{replica}`. These can be changed in the server settings [default_replica_path](../../operations/server-configuration-parameters/settings.md#default_replica_path) and [default_replica_name](../../operations/server-configuration-parameters/settings.md#default_replica_name). Macro `{uuid}` is unfolded to table's uuid, `{shard}` and `{replica}` are unfolded to values from server config, not from database engine arguments. But in the future, it will be possible to use `shard_name` and `replica_name` of Replicated database. ::: diff --git a/docs/en/engines/database-engines/sqlite.md b/docs/en/engines/database-engines/sqlite.md index 2f8b44c9a09..555f3e0b12b 100644 --- a/docs/en/engines/database-engines/sqlite.md +++ b/docs/en/engines/database-engines/sqlite.md @@ -3,21 +3,21 @@ sidebar_position: 55 sidebar_label: SQLite --- -# SQLite {#sqlite} +# SQLite -Allows to connect to [SQLite](https://www.sqlite.org/index.html) database and perform `INSERT` and `SELECT` queries to exchange data between ClickHouse and SQLite. +Allows to connect to [SQLite](https://www.sqlite.org/index.html) database and perform `INSERT` and `SELECT` queries to exchange data between ClickHouse and SQLite. ## Creating a Database {#creating-a-database} ``` sql - CREATE DATABASE sqlite_database + CREATE DATABASE sqlite_database ENGINE = SQLite('db_path') ``` **Engine Parameters** - `db_path` — Path to a file with SQLite database. - + ## Data Types Support {#data_types-support} | SQLite | ClickHouse | @@ -44,7 +44,7 @@ SHOW TABLES FROM sqlite_db; ``` text ┌──name───┐ │ table1 │ -│ table2 │ +│ table2 │ └─────────┘ ``` diff --git a/docs/en/engines/table-engines/index.md b/docs/en/engines/table-engines/index.md index 09e0147bbf7..556d21a82d9 100644 --- a/docs/en/engines/table-engines/index.md +++ b/docs/en/engines/table-engines/index.md @@ -4,7 +4,7 @@ toc_priority: 26 toc_title: Introduction --- -# Table Engines {#table_engines} +# Table Engines The table engine (type of table) determines: diff --git a/docs/en/engines/table-engines/integrations/ExternalDistributed.md b/docs/en/engines/table-engines/integrations/ExternalDistributed.md index c9aae1934db..a318e8c3a35 100644 --- a/docs/en/engines/table-engines/integrations/ExternalDistributed.md +++ b/docs/en/engines/table-engines/integrations/ExternalDistributed.md @@ -3,7 +3,7 @@ sidebar_position: 12 sidebar_label: ExternalDistributed --- -# ExternalDistributed {#externaldistributed} +# ExternalDistributed The `ExternalDistributed` engine allows to perform `SELECT` queries on data that is stored on a remote servers MySQL or PostgreSQL. Accepts [MySQL](../../../engines/table-engines/integrations/mysql.md) or [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md) engines as an argument so sharding is possible. diff --git a/docs/en/engines/table-engines/integrations/embedded-rocksdb.md b/docs/en/engines/table-engines/integrations/embedded-rocksdb.md index 701d190f022..2c7484f34bd 100644 --- a/docs/en/engines/table-engines/integrations/embedded-rocksdb.md +++ b/docs/en/engines/table-engines/integrations/embedded-rocksdb.md @@ -3,7 +3,7 @@ sidebar_position: 9 sidebar_label: EmbeddedRocksDB --- -# EmbeddedRocksDB Engine {#EmbeddedRocksDB-engine} +# EmbeddedRocksDB Engine This engine allows integrating ClickHouse with [rocksdb](http://rocksdb.org/). diff --git a/docs/en/engines/table-engines/integrations/hdfs.md b/docs/en/engines/table-engines/integrations/hdfs.md index 503bd779abf..ae32f5dd80f 100644 --- a/docs/en/engines/table-engines/integrations/hdfs.md +++ b/docs/en/engines/table-engines/integrations/hdfs.md @@ -3,7 +3,7 @@ sidebar_position: 6 sidebar_label: HDFS --- -# HDFS {#table_engines-hdfs} +# HDFS This engine provides integration with the [Apache Hadoop](https://en.wikipedia.org/wiki/Apache_Hadoop) ecosystem by allowing to manage data on [HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html) via ClickHouse. This engine is similar to the [File](../../../engines/table-engines/special/file.md#table_engines-file) and [URL](../../../engines/table-engines/special/url.md#table_engines-url) engines, but provides Hadoop-specific features. @@ -51,7 +51,7 @@ SELECT * FROM hdfs_engine_table LIMIT 2 ## Implementation Details {#implementation-details} - Reads and writes can be parallel. -- [Zero-copy](../../../operations/storing-data.md#zero-copy) replication is supported. +- [Zero-copy](../../../operations/storing-data.md#zero-copy) replication is supported. - Not supported: - `ALTER` and `SELECT...SAMPLE` operations. - Indexes. @@ -98,7 +98,7 @@ Table consists of all the files in both directories (all files should satisfy fo CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV') ``` -:::warning +:::warning If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`. ::: diff --git a/docs/en/engines/table-engines/integrations/hive.md b/docs/en/engines/table-engines/integrations/hive.md index 6731f0e7559..52250b17d7a 100644 --- a/docs/en/engines/table-engines/integrations/hive.md +++ b/docs/en/engines/table-engines/integrations/hive.md @@ -3,11 +3,11 @@ sidebar_position: 4 sidebar_label: Hive --- -# Hive {#hive} +# Hive The Hive engine allows you to perform `SELECT` quries on HDFS Hive table. Currently it supports input formats as below: -- Text: only supports simple scalar column types except `binary` +- Text: only supports simple scalar column types except `binary` - ORC: support simple scalar columns types except `char`; only support complex types like `array` @@ -66,33 +66,33 @@ When ClickHouse is started up with local cache for remote filesystem enabled, us #### Create Table in Hive ``` text hive > CREATE TABLE `test`.`test_orc`( - `f_tinyint` tinyint, - `f_smallint` smallint, - `f_int` int, - `f_integer` int, - `f_bigint` bigint, - `f_float` float, - `f_double` double, - `f_decimal` decimal(10,0), - `f_timestamp` timestamp, - `f_date` date, - `f_string` string, - `f_varchar` varchar(100), - `f_bool` boolean, - `f_binary` binary, - `f_array_int` array, - `f_array_string` array, - `f_array_float` array, - `f_array_array_int` array>, - `f_array_array_string` array>, + `f_tinyint` tinyint, + `f_smallint` smallint, + `f_int` int, + `f_integer` int, + `f_bigint` bigint, + `f_float` float, + `f_double` double, + `f_decimal` decimal(10,0), + `f_timestamp` timestamp, + `f_date` date, + `f_string` string, + `f_varchar` varchar(100), + `f_bool` boolean, + `f_binary` binary, + `f_array_int` array, + `f_array_string` array, + `f_array_float` array, + `f_array_array_int` array>, + `f_array_array_string` array>, `f_array_array_float` array>) -PARTITIONED BY ( +PARTITIONED BY ( `day` string) -ROW FORMAT SERDE - 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' -STORED AS INPUTFORMAT - 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' -OUTPUTFORMAT +ROW FORMAT SERDE + 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' +STORED AS INPUTFORMAT + 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' +OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat' LOCATION 'hdfs://testcluster/data/hive/test.db/test_orc' @@ -178,7 +178,7 @@ f_array_array_float: [[1.11,2.22],[3.33,4.44]] day: 2021-09-18 -1 rows in set. Elapsed: 0.078 sec. +1 rows in set. Elapsed: 0.078 sec. ``` ### Query Hive Table with Parquet Input Format @@ -187,34 +187,34 @@ day: 2021-09-18 ``` text hive > CREATE TABLE `test`.`test_parquet`( - `f_tinyint` tinyint, - `f_smallint` smallint, - `f_int` int, - `f_integer` int, - `f_bigint` bigint, - `f_float` float, - `f_double` double, - `f_decimal` decimal(10,0), - `f_timestamp` timestamp, - `f_date` date, - `f_string` string, - `f_varchar` varchar(100), - `f_char` char(100), - `f_bool` boolean, - `f_binary` binary, - `f_array_int` array, - `f_array_string` array, - `f_array_float` array, - `f_array_array_int` array>, - `f_array_array_string` array>, + `f_tinyint` tinyint, + `f_smallint` smallint, + `f_int` int, + `f_integer` int, + `f_bigint` bigint, + `f_float` float, + `f_double` double, + `f_decimal` decimal(10,0), + `f_timestamp` timestamp, + `f_date` date, + `f_string` string, + `f_varchar` varchar(100), + `f_char` char(100), + `f_bool` boolean, + `f_binary` binary, + `f_array_int` array, + `f_array_string` array, + `f_array_float` array, + `f_array_array_int` array>, + `f_array_array_string` array>, `f_array_array_float` array>) -PARTITIONED BY ( +PARTITIONED BY ( `day` string) -ROW FORMAT SERDE - 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' -STORED AS INPUTFORMAT - 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat' -OUTPUTFORMAT +ROW FORMAT SERDE + 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' +STORED AS INPUTFORMAT + 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat' +OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat' LOCATION 'hdfs://testcluster/data/hive/test.db/test_parquet' @@ -299,7 +299,7 @@ f_array_array_string: [['a','b'],['c','d']] f_array_array_float: [[1.11,2.22],[3.33,4.44]] day: 2021-09-18 -1 rows in set. Elapsed: 0.357 sec. +1 rows in set. Elapsed: 0.357 sec. ``` ### Query Hive Table with Text Input Format @@ -307,34 +307,34 @@ day: 2021-09-18 ``` text hive > CREATE TABLE `test`.`test_text`( - `f_tinyint` tinyint, - `f_smallint` smallint, - `f_int` int, - `f_integer` int, - `f_bigint` bigint, - `f_float` float, - `f_double` double, - `f_decimal` decimal(10,0), - `f_timestamp` timestamp, - `f_date` date, - `f_string` string, - `f_varchar` varchar(100), - `f_char` char(100), - `f_bool` boolean, - `f_binary` binary, - `f_array_int` array, - `f_array_string` array, - `f_array_float` array, - `f_array_array_int` array>, - `f_array_array_string` array>, + `f_tinyint` tinyint, + `f_smallint` smallint, + `f_int` int, + `f_integer` int, + `f_bigint` bigint, + `f_float` float, + `f_double` double, + `f_decimal` decimal(10,0), + `f_timestamp` timestamp, + `f_date` date, + `f_string` string, + `f_varchar` varchar(100), + `f_char` char(100), + `f_bool` boolean, + `f_binary` binary, + `f_array_int` array, + `f_array_string` array, + `f_array_float` array, + `f_array_array_int` array>, + `f_array_array_string` array>, `f_array_array_float` array>) -PARTITIONED BY ( +PARTITIONED BY ( `day` string) -ROW FORMAT SERDE - 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' -STORED AS INPUTFORMAT - 'org.apache.hadoop.mapred.TextInputFormat' -OUTPUTFORMAT +ROW FORMAT SERDE + 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' +STORED AS INPUTFORMAT + 'org.apache.hadoop.mapred.TextInputFormat' +OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' LOCATION 'hdfs://testcluster/data/hive/test.db/test_text' @@ -374,7 +374,7 @@ CREATE TABLE test.test_text `day` String ) ENGINE = Hive('thrift://localhost:9083', 'test', 'test_text') -PARTITION BY day +PARTITION BY day ``` ``` sql diff --git a/docs/en/engines/table-engines/integrations/index.md b/docs/en/engines/table-engines/integrations/index.md index 9230ad624ba..8c8728c7f17 100644 --- a/docs/en/engines/table-engines/integrations/index.md +++ b/docs/en/engines/table-engines/integrations/index.md @@ -3,7 +3,7 @@ sidebar_position: 40 sidebar_label: Integrations --- -# Table Engines for Integrations {#table-engines-for-integrations} +# Table Engines for Integrations ClickHouse provides various means for integrating with external systems, including table engines. Like with all other table engines, the configuration is done using `CREATE TABLE` or `ALTER TABLE` queries. Then from a user perspective, the configured integration looks like a normal table, but queries to it are proxied to the external system. This transparent querying is one of the key advantages of this approach over alternative integration methods, like external dictionaries or table functions, which require to use custom query methods on each use. diff --git a/docs/en/engines/table-engines/integrations/jdbc.md b/docs/en/engines/table-engines/integrations/jdbc.md index 0ce31f36070..f9907d53672 100644 --- a/docs/en/engines/table-engines/integrations/jdbc.md +++ b/docs/en/engines/table-engines/integrations/jdbc.md @@ -3,7 +3,7 @@ sidebar_position: 3 sidebar_label: JDBC --- -# JDBC {#table-engine-jdbc} +# JDBC Allows ClickHouse to connect to external databases via [JDBC](https://en.wikipedia.org/wiki/Java_Database_Connectivity). diff --git a/docs/en/engines/table-engines/integrations/kafka.md b/docs/en/engines/table-engines/integrations/kafka.md index a9d13194a59..2de931c9e51 100644 --- a/docs/en/engines/table-engines/integrations/kafka.md +++ b/docs/en/engines/table-engines/integrations/kafka.md @@ -3,7 +3,7 @@ sidebar_position: 8 sidebar_label: Kafka --- -# Kafka {#kafka} +# Kafka This engine works with [Apache Kafka](http://kafka.apache.org/). @@ -87,7 +87,7 @@ Examples: Deprecated Method for Creating a Table -:::warning +:::warning Do not use this method in new projects. If possible, switch old projects to the method described above. ::: diff --git a/docs/en/engines/table-engines/integrations/materialized-postgresql.md b/docs/en/engines/table-engines/integrations/materialized-postgresql.md index 61f97961ddb..d3b70419290 100644 --- a/docs/en/engines/table-engines/integrations/materialized-postgresql.md +++ b/docs/en/engines/table-engines/integrations/materialized-postgresql.md @@ -3,7 +3,7 @@ sidebar_position: 12 sidebar_label: MaterializedPostgreSQL --- -# MaterializedPostgreSQL {#materialize-postgresql} +# MaterializedPostgreSQL Creates ClickHouse table with an initial data dump of PostgreSQL table and starts replication process, i.e. executes background job to apply new changes as they happen on PostgreSQL table in the remote PostgreSQL database. @@ -52,7 +52,7 @@ PRIMARY KEY key; SELECT key, value, _version FROM postgresql_db.postgresql_replica; ``` -:::warning +:::warning Replication of [**TOAST**](https://www.postgresql.org/docs/9.5/storage-toast.html) values is not supported. The default value for the data type will be used. ::: diff --git a/docs/en/engines/table-engines/integrations/mongodb.md b/docs/en/engines/table-engines/integrations/mongodb.md index d212ab4720f..664e4722bbb 100644 --- a/docs/en/engines/table-engines/integrations/mongodb.md +++ b/docs/en/engines/table-engines/integrations/mongodb.md @@ -3,7 +3,7 @@ sidebar_position: 5 sidebar_label: MongoDB --- -# MongoDB {#mongodb} +# MongoDB MongoDB engine is read-only table engine which allows to read data (`SELECT` queries) from remote MongoDB collection. Engine supports only non-nested data types. `INSERT` queries are not supported. diff --git a/docs/en/engines/table-engines/integrations/mysql.md b/docs/en/engines/table-engines/integrations/mysql.md index e962db58873..54d71daa88a 100644 --- a/docs/en/engines/table-engines/integrations/mysql.md +++ b/docs/en/engines/table-engines/integrations/mysql.md @@ -3,7 +3,7 @@ sidebar_position: 4 sidebar_label: MySQL --- -# MySQL {#mysql} +# MySQL The MySQL engine allows you to perform `SELECT` and `INSERT` queries on data that is stored on a remote MySQL server. diff --git a/docs/en/engines/table-engines/integrations/odbc.md b/docs/en/engines/table-engines/integrations/odbc.md index ed2b77d7ca3..e82edc92fe9 100644 --- a/docs/en/engines/table-engines/integrations/odbc.md +++ b/docs/en/engines/table-engines/integrations/odbc.md @@ -3,7 +3,7 @@ sidebar_position: 2 sidebar_label: ODBC --- -# ODBC {#table-engine-odbc} +# ODBC Allows ClickHouse to connect to external databases via [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity). diff --git a/docs/en/engines/table-engines/integrations/postgresql.md b/docs/en/engines/table-engines/integrations/postgresql.md index 9a6fead25bd..d029aef240f 100644 --- a/docs/en/engines/table-engines/integrations/postgresql.md +++ b/docs/en/engines/table-engines/integrations/postgresql.md @@ -3,7 +3,7 @@ sidebar_position: 11 sidebar_label: PostgreSQL --- -# PostgreSQL {#postgresql} +# PostgreSQL The PostgreSQL engine allows to perform `SELECT` and `INSERT` queries on data that is stored on a remote PostgreSQL server. diff --git a/docs/en/engines/table-engines/integrations/rabbitmq.md b/docs/en/engines/table-engines/integrations/rabbitmq.md index 6653b76594a..b2b672fb1ef 100644 --- a/docs/en/engines/table-engines/integrations/rabbitmq.md +++ b/docs/en/engines/table-engines/integrations/rabbitmq.md @@ -3,7 +3,7 @@ sidebar_position: 10 sidebar_label: RabbitMQ --- -# RabbitMQ Engine {#rabbitmq-engine} +# RabbitMQ Engine This engine allows integrating ClickHouse with [RabbitMQ](https://www.rabbitmq.com). diff --git a/docs/en/engines/table-engines/integrations/s3.md b/docs/en/engines/table-engines/integrations/s3.md index 42abc2a0b1e..09566a08dd6 100644 --- a/docs/en/engines/table-engines/integrations/s3.md +++ b/docs/en/engines/table-engines/integrations/s3.md @@ -3,7 +3,7 @@ sidebar_position: 7 sidebar_label: S3 --- -# S3 Table Engine {#table-engine-s3} +# S3 Table Engine This engine provides integration with [Amazon S3](https://aws.amazon.com/s3/) ecosystem. This engine is similar to the [HDFS](../../../engines/table-engines/special/file.md#table_engines-hdfs) engine, but provides S3-specific features. @@ -25,7 +25,7 @@ CREATE TABLE s3_engine_table (name String, value UInt32) **Example** ``` sql -CREATE TABLE s3_engine_table (name String, value UInt32) +CREATE TABLE s3_engine_table (name String, value UInt32) ENGINE=S3('https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip') SETTINGS input_format_with_names_use_header = 0; @@ -50,7 +50,7 @@ For more information about virtual columns see [here](../../../engines/table-eng ## Implementation Details {#implementation-details} - Reads and writes can be parallel -- [Zero-copy](../../../operations/storing-data.md#zero-copy) replication is supported. +- [Zero-copy](../../../operations/storing-data.md#zero-copy) replication is supported. - Not supported: - `ALTER` and `SELECT...SAMPLE` operations. - Indexes. @@ -66,7 +66,7 @@ For more information about virtual columns see [here](../../../engines/table-eng Constructions with `{}` are similar to the [remote](../../../sql-reference/table-functions/remote.md) table function. -:::warning +:::warning If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`. ::: diff --git a/docs/en/engines/table-engines/integrations/sqlite.md b/docs/en/engines/table-engines/integrations/sqlite.md index 45cc1cfc28a..2676f912350 100644 --- a/docs/en/engines/table-engines/integrations/sqlite.md +++ b/docs/en/engines/table-engines/integrations/sqlite.md @@ -3,16 +3,16 @@ sidebar_position: 7 sidebar_label: SQLite --- -# SQLite {#sqlite} +# SQLite The engine allows to import and export data to SQLite and supports queries to SQLite tables directly from ClickHouse. ## Creating a Table {#creating-a-table} ``` sql - CREATE TABLE [IF NOT EXISTS] [db.]table_name + CREATE TABLE [IF NOT EXISTS] [db.]table_name ( - name1 [type1], + name1 [type1], name2 [type2], ... ) ENGINE = SQLite('db_path', 'table') ``` @@ -32,10 +32,10 @@ SHOW CREATE TABLE sqlite_db.table2; ``` text CREATE TABLE SQLite.table2 -( - `col1` Nullable(Int32), +( + `col1` Nullable(Int32), `col2` Nullable(String) -) +) ENGINE = SQLite('sqlite.db','table2'); ``` diff --git a/docs/en/engines/table-engines/log-family/index.md b/docs/en/engines/table-engines/log-family/index.md index 89eb08ad7b9..8e772341733 100644 --- a/docs/en/engines/table-engines/log-family/index.md +++ b/docs/en/engines/table-engines/log-family/index.md @@ -3,7 +3,7 @@ sidebar_position: 20 sidebar_label: Log Family --- -# Log Engine Family {#log-engine-family} +# Log Engine Family These engines were developed for scenarios when you need to quickly write many small tables (up to about 1 million rows) and read them later as a whole. diff --git a/docs/en/engines/table-engines/log-family/log.md b/docs/en/engines/table-engines/log-family/log.md index 8858699f045..d8cabfd25cd 100644 --- a/docs/en/engines/table-engines/log-family/log.md +++ b/docs/en/engines/table-engines/log-family/log.md @@ -3,7 +3,7 @@ toc_priority: 33 toc_title: Log --- -# Log {#log} +# Log The engine belongs to the family of `Log` engines. See the common properties of `Log` engines and their differences in the [Log Engine Family](../../../engines/table-engines/log-family/index.md) article. diff --git a/docs/en/engines/table-engines/log-family/stripelog.md b/docs/en/engines/table-engines/log-family/stripelog.md index 62703245062..759cbe532aa 100644 --- a/docs/en/engines/table-engines/log-family/stripelog.md +++ b/docs/en/engines/table-engines/log-family/stripelog.md @@ -3,7 +3,7 @@ toc_priority: 32 toc_title: StripeLog --- -# Stripelog {#stripelog} +# Stripelog This engine belongs to the family of log engines. See the common properties of log engines and their differences in the [Log Engine Family](../../../engines/table-engines/log-family/index.md) article. diff --git a/docs/en/engines/table-engines/log-family/tinylog.md b/docs/en/engines/table-engines/log-family/tinylog.md index 2407355a857..b23ec3e1d81 100644 --- a/docs/en/engines/table-engines/log-family/tinylog.md +++ b/docs/en/engines/table-engines/log-family/tinylog.md @@ -3,7 +3,7 @@ toc_priority: 34 toc_title: TinyLog --- -# TinyLog {#tinylog} +# TinyLog The engine belongs to the log engine family. See [Log Engine Family](../../../engines/table-engines/log-family/index.md) for common properties of log engines and their differences. diff --git a/docs/en/engines/table-engines/mergetree-family/aggregatingmergetree.md b/docs/en/engines/table-engines/mergetree-family/aggregatingmergetree.md index 7be10cec2f5..5c3143c6c18 100644 --- a/docs/en/engines/table-engines/mergetree-family/aggregatingmergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/aggregatingmergetree.md @@ -3,7 +3,7 @@ sidebar_position: 60 sidebar_label: AggregatingMergeTree --- -# AggregatingMergeTree {#aggregatingmergetree} +# AggregatingMergeTree The engine inherits from [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md#table_engines-mergetree), altering the logic for data parts merging. ClickHouse replaces all rows with the same primary key (or more accurately, with the same [sorting key](../../../engines/table-engines/mergetree-family/mergetree.md)) with a single row (within a one data part) that stores a combination of states of aggregate functions. diff --git a/docs/en/engines/table-engines/mergetree-family/collapsingmergetree.md b/docs/en/engines/table-engines/mergetree-family/collapsingmergetree.md index 22863611e79..afe323441ab 100644 --- a/docs/en/engines/table-engines/mergetree-family/collapsingmergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/collapsingmergetree.md @@ -3,7 +3,7 @@ sidebar_position: 70 sidebar_label: CollapsingMergeTree --- -# CollapsingMergeTree {#table_engine-collapsingmergetree} +# CollapsingMergeTree The engine inherits from [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) and adds the logic of rows collapsing to data parts merge algorithm. @@ -42,7 +42,7 @@ When creating a `CollapsingMergeTree` table, the same [query clauses](../../../e Deprecated Method for Creating a Table -:::warning +:::warning Do not use this method in new projects and, if possible, switch old projects to the method described above. ::: diff --git a/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md b/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md index 716528f8d77..1191becbb25 100644 --- a/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md +++ b/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md @@ -3,10 +3,10 @@ sidebar_position: 30 sidebar_label: Custom Partitioning Key --- -# Custom Partitioning Key {#custom-partitioning-key} +# Custom Partitioning Key -:::warning -In most cases you do not need a partition key, and in most other cases you do not need a partition key more granular than by months. Partitioning does not speed up queries (in contrast to the ORDER BY expression). +:::warning +In most cases you do not need a partition key, and in most other cases you do not need a partition key more granular than by months. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular of partitioning. Don't partition your data by client identifiers or names. Instead, make a client identifier or name the first column in the ORDER BY expression. ::: @@ -43,7 +43,7 @@ By default, the floating-point partition key is not supported. To use it enable When inserting new data to a table, this data is stored as a separate part (chunk) sorted by the primary key. In 10-15 minutes after inserting, the parts of the same partition are merged into the entire part. -:::info +:::info A merge only works for data parts that have the same value for the partitioning expression. This means **you shouldn’t make overly granular partitions** (more than about a thousand partitions). Otherwise, the `SELECT` query performs poorly because of an unreasonably large number of files in the file system and open file descriptors. ::: diff --git a/docs/en/engines/table-engines/mergetree-family/graphitemergetree.md b/docs/en/engines/table-engines/mergetree-family/graphitemergetree.md index 35f3f99d5a9..c1011e69ba6 100644 --- a/docs/en/engines/table-engines/mergetree-family/graphitemergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/graphitemergetree.md @@ -3,7 +3,7 @@ sidebar_position: 90 sidebar_label: GraphiteMergeTree --- -# GraphiteMergeTree {#graphitemergetree} +# GraphiteMergeTree This engine is designed for thinning and aggregating/averaging (rollup) [Graphite](http://graphite.readthedocs.io/en/latest/index.html) data. It may be helpful to developers who want to use ClickHouse as a data store for Graphite. @@ -54,7 +54,7 @@ When creating a `GraphiteMergeTree` table, the same [clauses](../../../engines/t Deprecated Method for Creating a Table -:::warning +:::warning Do not use this method in new projects and, if possible, switch old projects to the method described above. ::: @@ -120,7 +120,7 @@ default ... ``` -:::warning +:::warning Patterns must be strictly ordered: 1. Patterns without `function` or `retention`. @@ -132,9 +132,9 @@ When processing a row, ClickHouse checks the rules in the `pattern` sections. Ea Fields for `pattern` and `default` sections: -- `rule_type` - a rule's type. It's applied only to a particular metrics. The engine use it to separate plain and tagged metrics. Optional parameter. Default value: `all`. -It's unnecessary when performance is not critical, or only one metrics type is used, e.g. plain metrics. By default only one type of rules set is created. Otherwise, if any of special types is defined, two different sets are created. One for plain metrics (root.branch.leaf) and one for tagged metrics (root.branch.leaf;tag1=value1). -The default rules are ended up in both sets. +- `rule_type` - a rule's type. It's applied only to a particular metrics. The engine use it to separate plain and tagged metrics. Optional parameter. Default value: `all`. +It's unnecessary when performance is not critical, or only one metrics type is used, e.g. plain metrics. By default only one type of rules set is created. Otherwise, if any of special types is defined, two different sets are created. One for plain metrics (root.branch.leaf) and one for tagged metrics (root.branch.leaf;tag1=value1). +The default rules are ended up in both sets. Valid values: - `all` (default) - a universal rule, used when `rule_type` is omitted. - `plain` - a rule for plain metrics. The field `regexp` is processed as regular expression. @@ -143,7 +143,7 @@ Valid values: - `regexp` – A pattern for the metric name (a regular or DSL). - `age` – The minimum age of the data in seconds. - `precision`– How precisely to define the age of the data in seconds. Should be a divisor for 86400 (seconds in a day). -- `function` – The name of the aggregating function to apply to data whose age falls within the range `[age, age + precision]`. Accepted functions: min / max / any / avg. The average is calculated imprecisely, like the average of the averages. +- `function` – The name of the aggregating function to apply to data whose age falls within the range `[age, age + precision]`. Accepted functions: min / max / any / avg. The average is calculated imprecisely, like the average of the averages. ### Configuration Example without rules types {#configuration-example} diff --git a/docs/en/engines/table-engines/mergetree-family/index.md b/docs/en/engines/table-engines/mergetree-family/index.md index 37e7bf5b589..45a671da76a 100644 --- a/docs/en/engines/table-engines/mergetree-family/index.md +++ b/docs/en/engines/table-engines/mergetree-family/index.md @@ -3,7 +3,7 @@ sidebar_position: 10 sidebar_label: MergeTree Family --- -# MergeTree Engine Family {#mergetree-engine-family} +# MergeTree Engine Family Table engines from the MergeTree family are the core of ClickHouse data storage capabilities. They provide most features for resilience and high-performance data retrieval: columnar storage, custom partitioning, sparse primary index, secondary data-skipping indexes, etc. diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index d59b07b5dd6..3e2c7618181 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -3,7 +3,7 @@ sidebar_position: 11 sidebar_label: MergeTree --- -# MergeTree {#table_engines-mergetree} +# MergeTree The `MergeTree` engine and other engines of this family (`*MergeTree`) are the most robust ClickHouse table engines. @@ -694,7 +694,7 @@ Tags: - `volume_name_N` — Volume name. Volume names must be unique. - `disk` — a disk within a volume. - `max_data_part_size_bytes` — the maximum size of a part that can be stored on any of the volume’s disks. If the a size of a merged part estimated to be bigger than `max_data_part_size_bytes` then this part will be written to a next volume. Basically this feature allows to keep new/small parts on a hot (SSD) volume and move them to a cold (HDD) volume when they reach large size. Do not use this setting if your policy has only one volume. -- `move_factor` — when the amount of available space gets lower than this factor, data automatically starts to move on the next volume if any (by default, 0.1). ClickHouse sorts existing parts by size from largest to smallest (in descending order) and selects parts with the total size that is sufficient to meet the `move_factor` condition. If the total size of all parts is insufficient, all parts will be moved. +- `move_factor` — when the amount of available space gets lower than this factor, data automatically starts to move on the next volume if any (by default, 0.1). ClickHouse sorts existing parts by size from largest to smallest (in descending order) and selects parts with the total size that is sufficient to meet the `move_factor` condition. If the total size of all parts is insufficient, all parts will be moved. - `prefer_not_to_merge` — Disables merging of data parts on this volume. When this setting is enabled, merging data on this volume is not allowed. This allows controlling how ClickHouse works with slow disks. - `perform_ttl_move_on_insert` — Disables TTL move on data part INSERT. By default if we insert a data part that already expired by the TTL move rule it immediately goes to a volume/disk declared in move rule. This can significantly slowdown insert in case if destination volume/disk is slow (e.g. S3). - `load_balancing` - Policy for disk balancing, `round_robin` or `least_used`. diff --git a/docs/en/engines/table-engines/mergetree-family/replacingmergetree.md b/docs/en/engines/table-engines/mergetree-family/replacingmergetree.md index 47651527f99..12a5861c7fb 100644 --- a/docs/en/engines/table-engines/mergetree-family/replacingmergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/replacingmergetree.md @@ -3,7 +3,7 @@ sidebar_position: 40 sidebar_label: ReplacingMergeTree --- -# ReplacingMergeTree {#replacingmergetree} +# ReplacingMergeTree The engine differs from [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md#table_engines-mergetree) in that it removes duplicate entries with the same [sorting key](../../../engines/table-engines/mergetree-family/mergetree.md) value (`ORDER BY` table section, not `PRIMARY KEY`). diff --git a/docs/en/engines/table-engines/mergetree-family/replication.md b/docs/en/engines/table-engines/mergetree-family/replication.md index 67c503854a9..cbe586d75a3 100644 --- a/docs/en/engines/table-engines/mergetree-family/replication.md +++ b/docs/en/engines/table-engines/mergetree-family/replication.md @@ -3,7 +3,7 @@ sidebar_position: 20 sidebar_label: Data Replication --- -# Data Replication {#table_engines-replication} +# Data Replication Replication is only supported for tables in the MergeTree family: @@ -112,7 +112,7 @@ Data blocks are deduplicated. For multiple writes of the same data block (data b During replication, only the source data to insert is transferred over the network. Further data transformation (merging) is coordinated and performed on all the replicas in the same way. This minimizes network usage, which means that replication works well when replicas reside in different datacenters. (Note that duplicating data in different datacenters is the main goal of replication.) -You can have any number of replicas of the same data. Based on our experiences, a relatively reliable and convenient solution could use double replication in production, with each server using RAID-5 or RAID-6 (and RAID-10 in some cases). +You can have any number of replicas of the same data. Based on our experiences, a relatively reliable and convenient solution could use double replication in production, with each server using RAID-5 or RAID-6 (and RAID-10 in some cases). The system monitors data synchronicity on replicas and is able to recover after a failure. Failover is automatic (for small differences in data) or semi-automatic (when data differs too much, which may indicate a configuration error). diff --git a/docs/en/engines/table-engines/mergetree-family/summingmergetree.md b/docs/en/engines/table-engines/mergetree-family/summingmergetree.md index 5d180782ed3..b532aef1980 100644 --- a/docs/en/engines/table-engines/mergetree-family/summingmergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/summingmergetree.md @@ -3,7 +3,7 @@ sidebar_position: 50 sidebar_label: SummingMergeTree --- -# SummingMergeTree {#summingmergetree} +# SummingMergeTree The engine inherits from [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md#table_engines-mergetree). The difference is that when merging data parts for `SummingMergeTree` tables ClickHouse replaces all the rows with the same primary key (or more accurately, with the same [sorting key](../../../engines/table-engines/mergetree-family/mergetree.md)) with one row which contains summarized values for the columns with the numeric data type. If the sorting key is composed in a way that a single key value corresponds to large number of rows, this significantly reduces storage volume and speeds up data selection. diff --git a/docs/en/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md b/docs/en/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md index 77cf192dcda..c1fe5dfffdf 100644 --- a/docs/en/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md @@ -3,7 +3,7 @@ sidebar_position: 80 sidebar_label: VersionedCollapsingMergeTree --- -# VersionedCollapsingMergeTree {#versionedcollapsingmergetree} +# VersionedCollapsingMergeTree This engine: @@ -53,7 +53,7 @@ When creating a `VersionedCollapsingMergeTree` table, the same [clauses](../../. Deprecated Method for Creating a Table -:::warning +:::warning Do not use this method in new projects. If possible, switch old projects to the method described above. ::: diff --git a/docs/en/engines/table-engines/special/buffer.md b/docs/en/engines/table-engines/special/buffer.md index b7c91bec75c..5f81bd76ae4 100644 --- a/docs/en/engines/table-engines/special/buffer.md +++ b/docs/en/engines/table-engines/special/buffer.md @@ -3,7 +3,7 @@ sidebar_position: 120 sidebar_label: Buffer --- -# Buffer Table Engine {#buffer} +# Buffer Table Engine Buffers the data to write in RAM, periodically flushing it to another table. During the read operation, data is read from the buffer and the other table simultaneously. @@ -54,7 +54,7 @@ If the set of columns in the Buffer table does not match the set of columns in a If the types do not match for one of the columns in the Buffer table and a subordinate table, an error message is entered in the server log, and the buffer is cleared. The same thing happens if the subordinate table does not exist when the buffer is flushed. -:::warning +:::warning Running ALTER on the Buffer table in releases made before 26 Oct 2021 will cause a `Block structure mismatch` error (see [#15117](https://github.com/ClickHouse/ClickHouse/issues/15117) and [#30565](https://github.com/ClickHouse/ClickHouse/pull/30565)), so deleting the Buffer table and then recreating is the only option. It is advisable to check that this error is fixed in your release before trying to run ALTER on the Buffer table. ::: diff --git a/docs/en/engines/table-engines/special/dictionary.md b/docs/en/engines/table-engines/special/dictionary.md index 67b97e37d44..d73d3c65fb0 100644 --- a/docs/en/engines/table-engines/special/dictionary.md +++ b/docs/en/engines/table-engines/special/dictionary.md @@ -3,7 +3,7 @@ sidebar_position: 20 sidebar_label: Dictionary --- -# Dictionary Table Engine {#dictionary} +# Dictionary Table Engine The `Dictionary` engine displays the [dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) data as a ClickHouse table. diff --git a/docs/en/engines/table-engines/special/distributed.md b/docs/en/engines/table-engines/special/distributed.md index db89175e4d9..fe3348c4d78 100644 --- a/docs/en/engines/table-engines/special/distributed.md +++ b/docs/en/engines/table-engines/special/distributed.md @@ -3,7 +3,7 @@ sidebar_position: 10 sidebar_label: Distributed --- -# Distributed Table Engine {#distributed} +# Distributed Table Engine Tables with Distributed engine do not store any data of their own, but allow distributed query processing on multiple servers. Reading is automatically parallelized. During a read, the table indexes on remote servers are used, if there are any. @@ -209,13 +209,13 @@ When querying a `Distributed` table, `SELECT` queries are sent to all shards and When the `max_parallel_replicas` option is enabled, query processing is parallelized across all replicas within a single shard. For more information, see the section [max_parallel_replicas](../../../operations/settings/settings.md#settings-max_parallel_replicas). -To learn more about how distibuted `in` and `global in` queries are processed, refer to [this](../../../sql-reference/operators/in.md#select-distributed-subqueries) documentation. +To learn more about how distibuted `in` and `global in` queries are processed, refer to [this](../../../sql-reference/operators/in.md#select-distributed-subqueries) documentation. ## Virtual Columns {#virtual-columns} - `_shard_num` — Contains the `shard_num` value from the table `system.clusters`. Type: [UInt32](../../../sql-reference/data-types/int-uint.md). -:::note +:::note Since [remote](../../../sql-reference/table-functions/remote.md) and [cluster](../../../sql-reference/table-functions/cluster.md) table functions internally create temporary Distributed table, `_shard_num` is available there too. ::: diff --git a/docs/en/engines/table-engines/special/external-data.md b/docs/en/engines/table-engines/special/external-data.md index 1f4336c74fe..2aa90be617f 100644 --- a/docs/en/engines/table-engines/special/external-data.md +++ b/docs/en/engines/table-engines/special/external-data.md @@ -3,7 +3,7 @@ sidebar_position: 130 sidebar_label: External Data --- -# External Data for Query Processing {#external-data-for-query-processing} +# External Data for Query Processing ClickHouse allows sending a server the data that is needed for processing a query, together with a `SELECT` query. This data is put in a temporary table (see the section “Temporary tables”) and can be used in the query (for example, in `IN` operators). diff --git a/docs/en/engines/table-engines/special/file.md b/docs/en/engines/table-engines/special/file.md index 6e4449bf1a9..5f27bc73e1d 100644 --- a/docs/en/engines/table-engines/special/file.md +++ b/docs/en/engines/table-engines/special/file.md @@ -3,7 +3,7 @@ sidebar_position: 40 sidebar_label: File --- -# File Table Engine {#table_engines-file} +# File Table Engine The File table engine keeps the data in a file in one of the supported [file formats](../../../interfaces/formats.md#formats) (`TabSeparated`, `Native`, etc.). diff --git a/docs/en/engines/table-engines/special/generate.md b/docs/en/engines/table-engines/special/generate.md index 453f3b5db0b..a217c240b1c 100644 --- a/docs/en/engines/table-engines/special/generate.md +++ b/docs/en/engines/table-engines/special/generate.md @@ -3,7 +3,7 @@ sidebar_position: 140 sidebar_label: GenerateRandom --- -# GenerateRandom Table Engine {#table_engines-generate} +# GenerateRandom Table Engine The GenerateRandom table engine produces random data for given table schema. diff --git a/docs/en/engines/table-engines/special/index.md b/docs/en/engines/table-engines/special/index.md index f87cd86c891..be5ec79caf2 100644 --- a/docs/en/engines/table-engines/special/index.md +++ b/docs/en/engines/table-engines/special/index.md @@ -3,7 +3,7 @@ sidebar_position: 50 sidebar_label: Special --- -# Special Table Engines {#special-table-engines} +# Special Table Engines There are three main categories of table engines: diff --git a/docs/en/engines/table-engines/special/join.md b/docs/en/engines/table-engines/special/join.md index 7d6f6e99b9f..bb9744103f7 100644 --- a/docs/en/engines/table-engines/special/join.md +++ b/docs/en/engines/table-engines/special/join.md @@ -3,7 +3,7 @@ sidebar_position: 70 sidebar_label: Join --- -# Join Table Engine {#join} +# Join Table Engine Optional prepared data structure for usage in [JOIN](../../../sql-reference/statements/select/join.md#select-join) operations. diff --git a/docs/en/engines/table-engines/special/materializedview.md b/docs/en/engines/table-engines/special/materializedview.md index 6c9a5e84f60..8c77a9ce087 100644 --- a/docs/en/engines/table-engines/special/materializedview.md +++ b/docs/en/engines/table-engines/special/materializedview.md @@ -3,7 +3,7 @@ sidebar_position: 100 sidebar_label: MaterializedView --- -# MaterializedView Table Engine {#materializedview} +# MaterializedView Table Engine Used for implementing materialized views (for more information, see [CREATE VIEW](../../../sql-reference/statements/create/view.md#materialized)). For storing data, it uses a different engine that was specified when creating the view. When reading from a table, it just uses that engine. diff --git a/docs/en/engines/table-engines/special/memory.md b/docs/en/engines/table-engines/special/memory.md index 1e154a323d1..1f822d2f96d 100644 --- a/docs/en/engines/table-engines/special/memory.md +++ b/docs/en/engines/table-engines/special/memory.md @@ -3,7 +3,7 @@ sidebar_position: 110 sidebar_label: Memory --- -# Memory Table Engine {#memory} +# Memory Table Engine The Memory engine stores data in RAM, in uncompressed form. Data is stored in exactly the same form as it is received when read. In other words, reading from this table is completely free. Concurrent data access is synchronized. Locks are short: read and write operations do not block each other. diff --git a/docs/en/engines/table-engines/special/merge.md b/docs/en/engines/table-engines/special/merge.md index bcad7a0c1f6..0f97acda8b5 100644 --- a/docs/en/engines/table-engines/special/merge.md +++ b/docs/en/engines/table-engines/special/merge.md @@ -3,7 +3,7 @@ sidebar_position: 30 sidebar_label: Merge --- -# Merge Table Engine {#merge} +# Merge Table Engine The `Merge` engine (not to be confused with `MergeTree`) does not store data itself, but allows reading from any number of other tables simultaneously. @@ -18,7 +18,7 @@ CREATE TABLE ... Engine=Merge(db_name, tables_regexp) **Engine Parameters** - `db_name` — Possible values: - - database name, + - database name, - constant expression that returns a string with a database name, for example, `currentDatabase()`, - `REGEXP(expression)`, where `expression` is a regular expression to match the DB names. @@ -49,11 +49,11 @@ CREATE TABLE all_visitors (id UInt32) ENGINE=Merge(REGEXP('ABC_*'), 'visitors'); Let's say you have an old table `WatchLog_old` and decided to change partitioning without moving data to a new table `WatchLog_new`, and you need to see data from both tables. ``` sql -CREATE TABLE WatchLog_old(date Date, UserId Int64, EventType String, Cnt UInt64) +CREATE TABLE WatchLog_old(date Date, UserId Int64, EventType String, Cnt UInt64) ENGINE=MergeTree(date, (UserId, EventType), 8192); INSERT INTO WatchLog_old VALUES ('2018-01-01', 1, 'hit', 3); -CREATE TABLE WatchLog_new(date Date, UserId Int64, EventType String, Cnt UInt64) +CREATE TABLE WatchLog_new(date Date, UserId Int64, EventType String, Cnt UInt64) ENGINE=MergeTree PARTITION BY date ORDER BY (UserId, EventType) SETTINGS index_granularity=8192; INSERT INTO WatchLog_new VALUES ('2018-01-02', 2, 'hit', 3); diff --git a/docs/en/engines/table-engines/special/null.md b/docs/en/engines/table-engines/special/null.md index 309b09ba779..5e775227dab 100644 --- a/docs/en/engines/table-engines/special/null.md +++ b/docs/en/engines/table-engines/special/null.md @@ -3,7 +3,7 @@ sidebar_position: 50 sidebar_label: 'Null' --- -# Null Table Engine {#null} +# Null Table Engine When writing to a `Null` table, data is ignored. When reading from a `Null` table, the response is empty. diff --git a/docs/en/engines/table-engines/special/set.md b/docs/en/engines/table-engines/special/set.md index 5fd80ba55fe..46e31af7ff1 100644 --- a/docs/en/engines/table-engines/special/set.md +++ b/docs/en/engines/table-engines/special/set.md @@ -3,7 +3,7 @@ sidebar_position: 60 sidebar_label: Set --- -# Set Table Engine {#set} +# Set Table Engine A data set that is always in RAM. It is intended for use on the right side of the `IN` operator (see the section “IN operators”). diff --git a/docs/en/engines/table-engines/special/url.md b/docs/en/engines/table-engines/special/url.md index 64642623f88..19246b82219 100644 --- a/docs/en/engines/table-engines/special/url.md +++ b/docs/en/engines/table-engines/special/url.md @@ -3,7 +3,7 @@ sidebar_position: 80 sidebar_label: URL --- -# URL Table Engine {#table_engines-url} +# URL Table Engine Queries data to/from a remote HTTP/HTTPS server. This engine is similar to the [File](../../../engines/table-engines/special/file.md) engine. @@ -13,10 +13,10 @@ Syntax: `URL(URL [,Format] [,CompressionMethod])` - The `Format` must be one that ClickHouse can use in `SELECT` queries and, if necessary, in `INSERTs`. For the full list of supported formats, see [Formats](../../../interfaces/formats.md#formats). -- `CompressionMethod` indicates that whether the HTTP body should be compressed. If the compression is enabled, the HTTP packets sent by the URL engine contain 'Content-Encoding' header to indicate which compression method is used. +- `CompressionMethod` indicates that whether the HTTP body should be compressed. If the compression is enabled, the HTTP packets sent by the URL engine contain 'Content-Encoding' header to indicate which compression method is used. To enable compression, please first make sure the remote HTTP endpoint indicated by the `URL` parameter supports corresponding compression algorithm. - + The supported `CompressionMethod` should be one of following: - gzip or gz - deflate diff --git a/docs/en/engines/table-engines/special/view.md b/docs/en/engines/table-engines/special/view.md index 455c301fb01..5e646cf2fd3 100644 --- a/docs/en/engines/table-engines/special/view.md +++ b/docs/en/engines/table-engines/special/view.md @@ -3,7 +3,7 @@ sidebar_position: 90 sidebar_label: View --- -# View Table Engine {#table_engines-view} +# View Table Engine Used for implementing views (for more information, see the `CREATE VIEW query`). It does not store data, but only stores the specified `SELECT` query. When reading from a table, it runs this query (and deletes all unnecessary columns from the query). diff --git a/docs/en/getting-started/example-datasets/amplab-benchmark.md b/docs/en/getting-started/example-datasets/amplab-benchmark.md index a87ac53e2e3..e50c71009bd 100644 --- a/docs/en/getting-started/example-datasets/amplab-benchmark.md +++ b/docs/en/getting-started/example-datasets/amplab-benchmark.md @@ -3,7 +3,7 @@ sidebar_label: AMPLab Big Data Benchmark description: A benchmark dataset used for comparing the performance of data warehousing solutions. --- -# AMPLab Big Data Benchmark {#amplab-big-data-benchmark} +# AMPLab Big Data Benchmark See https://amplab.cs.berkeley.edu/benchmark/ diff --git a/docs/en/getting-started/install.md b/docs/en/getting-started/install.md index 12775749a25..b6cd62bb8fc 100644 --- a/docs/en/getting-started/install.md +++ b/docs/en/getting-started/install.md @@ -6,7 +6,7 @@ description: ClickHouse can run on any Linux, FreeBSD, or Mac OS X with x86_64, slug: /en/getting-started/install --- -# Installation {#installation} +# Installation ## System Requirements {#system-requirements} @@ -70,7 +70,7 @@ You can also download and install packages manually from [here](https://packages - `clickhouse-client` — Creates a symbolic link for `clickhouse-client` and other client-related tools. and installs client configuration files. - `clickhouse-common-static-dbg` — Installs ClickHouse compiled binary files with debug info. -:::info +:::info If you need to install specific version of ClickHouse you have to install all packages with the same version: `sudo apt-get install clickhouse-server=21.8.5.7 clickhouse-client=21.8.5.7 clickhouse-common-static=21.8.5.7` ::: @@ -190,22 +190,22 @@ sudo ./clickhouse install ### From Precompiled Binaries for Non-Standard Environments {#from-binaries-non-linux} -For non-Linux operating systems and for AArch64 CPU arhitecture, ClickHouse builds are provided as a cross-compiled binary from the latest commit of the `master` branch (with a few hours delay). +For non-Linux operating systems and for AArch64 CPU arhitecture, ClickHouse builds are provided as a cross-compiled binary from the latest commit of the `master` branch (with a few hours delay). - [MacOS x86_64](https://builds.clickhouse.com/master/macos/clickhouse) ```bash curl -O 'https://builds.clickhouse.com/master/macos/clickhouse' && chmod a+x ./clickhouse ``` -- [MacOS Aarch64 (Apple Silicon)](https://builds.clickhouse.com/master/macos-aarch64/clickhouse) +- [MacOS Aarch64 (Apple Silicon)](https://builds.clickhouse.com/master/macos-aarch64/clickhouse) ```bash curl -O 'https://builds.clickhouse.com/master/macos-aarch64/clickhouse' && chmod a+x ./clickhouse ``` -- [FreeBSD x86_64](https://builds.clickhouse.com/master/freebsd/clickhouse) +- [FreeBSD x86_64](https://builds.clickhouse.com/master/freebsd/clickhouse) ```bash curl -O 'https://builds.clickhouse.com/master/freebsd/clickhouse' && chmod a+x ./clickhouse ``` -- [Linux AArch64](https://builds.clickhouse.com/master/aarch64/clickhouse) +- [Linux AArch64](https://builds.clickhouse.com/master/aarch64/clickhouse) ```bash curl -O 'https://builds.clickhouse.com/master/aarch64/clickhouse' && chmod a+x ./clickhouse ``` diff --git a/docs/en/getting-started/playground.md b/docs/en/getting-started/playground.md index 73bc0936b2d..e995ea6ef8b 100644 --- a/docs/en/getting-started/playground.md +++ b/docs/en/getting-started/playground.md @@ -6,7 +6,7 @@ description: The ClickHouse Playground allows people to experiment with ClickHou slug: /en/getting-started/playground --- -# ClickHouse Playground {#clickhouse-playground} +# ClickHouse Playground [ClickHouse Playground](https://play.clickhouse.com/play?user=play) allows people to experiment with ClickHouse by running queries instantly, without setting up their server or cluster. Several example datasets are available in Playground. diff --git a/docs/en/interfaces/cli.md b/docs/en/interfaces/cli.md index 622742a3c9a..c8c430c4e03 100644 --- a/docs/en/interfaces/cli.md +++ b/docs/en/interfaces/cli.md @@ -3,7 +3,7 @@ sidebar_position: 17 sidebar_label: Command-Line Client --- -# Command-line Client {#command-line-client} +# Command-line Client ClickHouse provides a native command-line client: `clickhouse-client`. The client supports command-line options and configuration files. For more information, see [Configuring](#interfaces_cli_configuration). diff --git a/docs/en/interfaces/cpp.md b/docs/en/interfaces/cpp.md index a7b4188799e..b23e11399d9 100644 --- a/docs/en/interfaces/cpp.md +++ b/docs/en/interfaces/cpp.md @@ -3,7 +3,7 @@ sidebar_position: 24 sidebar_label: C++ Client Library --- -# C++ Client Library {#c-client-library} +# C++ Client Library See README at [clickhouse-cpp](https://github.com/ClickHouse/clickhouse-cpp) repository. diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 1828a1d0be6..89d3554105f 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -3,7 +3,7 @@ sidebar_position: 21 sidebar_label: Input and Output Formats --- -# Formats for Input and Output Data {#formats} +# Formats for Input and Output Data ClickHouse can accept and return data in various formats. A format supported for input can be used to parse the data provided to `INSERT`s, to perform `SELECT`s from a file-backed table such as File, URL or HDFS, or to read an external dictionary. A format supported for output can be used to arrange the results of a `SELECT`, and to perform `INSERT`s into a file-backed table. @@ -625,7 +625,7 @@ Example: "name": "str", "type": "String" }, - + { "name": "arr", "type": "Array(UInt8)" diff --git a/docs/en/interfaces/grpc.md b/docs/en/interfaces/grpc.md index 6ada38c6220..d5590d1cfb1 100644 --- a/docs/en/interfaces/grpc.md +++ b/docs/en/interfaces/grpc.md @@ -3,19 +3,19 @@ sidebar_position: 19 sidebar_label: gRPC Interface --- -# gRPC Interface {#grpc-interface} +# gRPC Interface ## Introduction {#grpc-interface-introduction} ClickHouse supports [gRPC](https://grpc.io/) interface. It is an open source remote procedure call system that uses HTTP/2 and [Protocol Buffers](https://en.wikipedia.org/wiki/Protocol_Buffers). The implementation of gRPC in ClickHouse supports: -- SSL; -- authentication; -- sessions; -- compression; -- parallel queries through the same channel; -- cancellation of queries; -- getting progress and logs; +- SSL; +- authentication; +- sessions; +- compression; +- parallel queries through the same channel; +- cancellation of queries; +- getting progress and logs; - external tables. The specification of the interface is described in [clickhouse_grpc.proto](https://github.com/ClickHouse/ClickHouse/blob/master/src/Server/grpc_protos/clickhouse_grpc.proto). @@ -59,7 +59,7 @@ To use the gRPC interface set `grpc_port` in the main [server configuration](../ ## Built-in Client {#grpc-client} You can write a client in any of the programming languages supported by gRPC using the provided [specification](https://github.com/ClickHouse/ClickHouse/blob/master/src/Server/grpc_protos/clickhouse_grpc.proto). -Or you can use a built-in Python client. It is placed in [utils/grpc-client/clickhouse-grpc-client.py](https://github.com/ClickHouse/ClickHouse/blob/master/utils/grpc-client/clickhouse-grpc-client.py) in the repository. The built-in client requires [grpcio and grpcio-tools](https://grpc.io/docs/languages/python/quickstart) Python modules. +Or you can use a built-in Python client. It is placed in [utils/grpc-client/clickhouse-grpc-client.py](https://github.com/ClickHouse/ClickHouse/blob/master/utils/grpc-client/clickhouse-grpc-client.py) in the repository. The built-in client requires [grpcio and grpcio-tools](https://grpc.io/docs/languages/python/quickstart) Python modules. The client supports the following arguments: diff --git a/docs/en/interfaces/http.md b/docs/en/interfaces/http.md index 3c009082360..b8ff26f7799 100644 --- a/docs/en/interfaces/http.md +++ b/docs/en/interfaces/http.md @@ -3,7 +3,7 @@ sidebar_position: 19 sidebar_label: HTTP Interface --- -# HTTP Interface {#http-interface} +# HTTP Interface The HTTP interface lets you use ClickHouse on any platform from any programming language in a form of REST API. The HTTP interface is more limited than the native interface, but it has better language support. @@ -178,7 +178,7 @@ You can also choose to use [HTTP compression](https://en.wikipedia.org/wiki/HTTP To send a compressed `POST` request, append the request header `Content-Encoding: compression_method`. In order for ClickHouse to compress the response, enable compression with [enable_http_compression](../operations/settings/settings.md#settings-enable_http_compression) setting and append `Accept-Encoding: compression_method` header to the request. You can configure the data compression level in the [http_zlib_compression_level](../operations/settings/settings.md#settings-http_zlib_compression_level) setting for all compression methods. -:::info +:::info Some HTTP clients might decompress data from the server by default (with `gzip` and `deflate`) and you might get decompressed data even if you use the compression settings correctly. ::: diff --git a/docs/en/interfaces/jdbc.md b/docs/en/interfaces/jdbc.md index 4bea0600a2a..c508b540eaf 100644 --- a/docs/en/interfaces/jdbc.md +++ b/docs/en/interfaces/jdbc.md @@ -3,7 +3,7 @@ sidebar_position: 22 sidebar_label: JDBC Driver --- -# JDBC Driver {#jdbc-driver} +# JDBC Driver Use the [official JDBC driver](https://github.com/ClickHouse/clickhouse-jdbc) (and Java client) to access ClickHouse from your Java applications. diff --git a/docs/en/interfaces/mysql.md b/docs/en/interfaces/mysql.md index df8ef38d671..fbaa49a66aa 100644 --- a/docs/en/interfaces/mysql.md +++ b/docs/en/interfaces/mysql.md @@ -3,7 +3,7 @@ sidebar_position: 20 sidebar_label: MySQL Interface --- -# MySQL Interface {#mysql-interface} +# MySQL Interface ClickHouse supports MySQL wire protocol. It can be enabled by [mysql_port](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-mysql_port) setting in configuration file: diff --git a/docs/en/interfaces/odbc.md b/docs/en/interfaces/odbc.md index 4c807654c28..48410fcdbad 100644 --- a/docs/en/interfaces/odbc.md +++ b/docs/en/interfaces/odbc.md @@ -3,10 +3,10 @@ sidebar_position: 23 sidebar_label: ODBC Driver --- -# ODBC Driver {#odbc-driver} +# ODBC Driver Use the [official ODBC driver](https://github.com/ClickHouse/clickhouse-odbc) for accessing ClickHouse as a data source. - + [Original article](https://clickhouse.com/docs/en/interfaces/odbc/) diff --git a/docs/en/interfaces/overview.md b/docs/en/interfaces/overview.md index b689636d292..0c7378bf075 100644 --- a/docs/en/interfaces/overview.md +++ b/docs/en/interfaces/overview.md @@ -5,7 +5,7 @@ keywords: [clickhouse, network, interfaces, http, tcp, grpc, command-line, clien description: ClickHouse provides three network interfaces --- -# Interfaces {#interfaces} +# Interfaces ClickHouse provides three network interfaces (they can be optionally wrapped in TLS for additional security): diff --git a/docs/en/interfaces/tcp.md b/docs/en/interfaces/tcp.md index 5f2f400799f..16189f11a12 100644 --- a/docs/en/interfaces/tcp.md +++ b/docs/en/interfaces/tcp.md @@ -3,7 +3,7 @@ sidebar_position: 18 sidebar_label: Native Interface (TCP) --- -# Native Interface (TCP) {#native-interface-tcp} +# Native Interface (TCP) The native protocol is used in the [command-line client](../interfaces/cli.md), for inter-server communication during distributed query processing, and also in other C++ programs. Unfortunately, native ClickHouse protocol does not have formal specification yet, but it can be reverse-engineered from ClickHouse source code (starting [around here](https://github.com/ClickHouse/ClickHouse/tree/master/src/Client)) and/or by intercepting and analyzing TCP traffic. diff --git a/docs/en/interfaces/third-party/client-libraries.md b/docs/en/interfaces/third-party/client-libraries.md index 885e9f430f2..7a4b9846521 100644 --- a/docs/en/interfaces/third-party/client-libraries.md +++ b/docs/en/interfaces/third-party/client-libraries.md @@ -3,9 +3,9 @@ sidebar_position: 26 sidebar_label: Client Libraries --- -# Client Libraries from Third-party Developers {#client-libraries-from-third-party-developers} +# Client Libraries from Third-party Developers -:::warning +:::warning ClickHouse Inc does **not** maintain the libraries listed below and hasn’t done any extensive testing to ensure their quality. ::: diff --git a/docs/en/interfaces/third-party/gui.md b/docs/en/interfaces/third-party/gui.md index 210c60cc71f..7de38cb3cfd 100644 --- a/docs/en/interfaces/third-party/gui.md +++ b/docs/en/interfaces/third-party/gui.md @@ -3,7 +3,7 @@ sidebar_position: 28 sidebar_label: Visual Interfaces --- -# Visual Interfaces from Third-party Developers {#visual-interfaces-from-third-party-developers} +# Visual Interfaces from Third-party Developers ## Open-Source {#open-source} @@ -249,7 +249,7 @@ Features: - Web-based interface. TABLUM.IO can be run as a self-hosted solution (as a docker image) or in the cloud. -License: [commercial](https://tablum.io/pricing) product with 3-month free period. +License: [commercial](https://tablum.io/pricing) product with 3-month free period. Try it out for free [in the cloud](https://tablum.io/try). Learn more about the product at [TABLUM.IO](https://tablum.io/) diff --git a/docs/en/interfaces/third-party/index.md b/docs/en/interfaces/third-party/index.md index c9be2b6ada9..f2d2f39f7f8 100644 --- a/docs/en/interfaces/third-party/index.md +++ b/docs/en/interfaces/third-party/index.md @@ -3,7 +3,7 @@ toc_folder_title: Third-Party sidebar_position: 24 --- -# Third-Party Interfaces {#third-party-interfaces} +# Third-Party Interfaces This is a collection of links to third-party tools that provide some sort of interface to ClickHouse. It can be either visual interface, command-line interface or an API: diff --git a/docs/en/interfaces/third-party/integrations.md b/docs/en/interfaces/third-party/integrations.md index ae055d63a9d..317e5ca5bda 100644 --- a/docs/en/interfaces/third-party/integrations.md +++ b/docs/en/interfaces/third-party/integrations.md @@ -3,7 +3,7 @@ sidebar_position: 27 sidebar_label: Integrations --- -# Integration Libraries from Third-party Developers {#integration-libraries-from-third-party-developers} +# Integration Libraries from Third-party Developers :::warning Disclaimer ClickHouse, Inc. does **not** maintain the tools and libraries listed below and haven’t done extensive testing to ensure their quality. diff --git a/docs/en/interfaces/third-party/proxy.md b/docs/en/interfaces/third-party/proxy.md index 45077cb6a89..2e395355c7d 100644 --- a/docs/en/interfaces/third-party/proxy.md +++ b/docs/en/interfaces/third-party/proxy.md @@ -3,7 +3,7 @@ sidebar_position: 29 sidebar_label: Proxies --- -# Proxy Servers from Third-party Developers {#proxy-servers-from-third-party-developers} +# Proxy Servers from Third-party Developers ## chproxy {#chproxy} diff --git a/docs/en/operations/access-rights.md b/docs/en/operations/access-rights.md index 7d75c47df2b..34d79aa99d9 100644 --- a/docs/en/operations/access-rights.md +++ b/docs/en/operations/access-rights.md @@ -3,7 +3,7 @@ sidebar_position: 48 sidebar_label: Access Control and Account Management --- -# Access Control and Account Management {#access-control} +# Access Control and Account Management ClickHouse supports access control management based on [RBAC](https://en.wikipedia.org/wiki/Role-based_access_control) approach. @@ -24,7 +24,7 @@ You can configure access entities using: We recommend using SQL-driven workflow. Both of the configuration methods work simultaneously, so if you use the server configuration files for managing accounts and access rights, you can smoothly switch to SQL-driven workflow. -:::warning +:::warning You can’t manage the same access entity by both configuration methods simultaneously. ::: @@ -102,7 +102,7 @@ Privileges can be granted to a role by the [GRANT](../sql-reference/statements/g Row policy is a filter that defines which of the rows are available to a user or a role. Row policy contains filters for one particular table, as well as a list of roles and/or users which should use this row policy. -:::warning +:::warning Row policies makes sense only for users with readonly access. If user can modify table or copy partitions between tables, it defeats the restrictions of row policies. ::: diff --git a/docs/en/operations/backup.md b/docs/en/operations/backup.md index c39658aa4b0..76f63db9c2e 100644 --- a/docs/en/operations/backup.md +++ b/docs/en/operations/backup.md @@ -3,7 +3,7 @@ sidebar_position: 49 sidebar_label: Data Backup --- -# Data Backup {#data-backup} +# Data Backup While [replication](../engines/table-engines/mergetree-family/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [you can’t just drop tables with a MergeTree-like engine containing more than 50 Gb of data](server-configuration-parameters/settings.md#max-table-size-to-drop). However, these safeguards do not cover all possible cases and can be circumvented. @@ -11,7 +11,7 @@ In order to effectively mitigate possible human errors, you should carefully pre Each company has different resources available and business requirements, so there’s no universal solution for ClickHouse backups and restores that will fit every situation. What works for one gigabyte of data likely won’t work for tens of petabytes. There are a variety of possible approaches with their own pros and cons, which will be discussed below. It is a good idea to use several approaches instead of just one in order to compensate for their various shortcomings. -:::note +:::note Keep in mind that if you backed something up and never tried to restore it, chances are that restore will not work properly when you actually need it (or at least it will take longer than business can tolerate). So whatever backup approach you choose, make sure to automate the restore process as well, and practice it on a spare ClickHouse cluster regularly. ::: diff --git a/docs/en/operations/caches.md b/docs/en/operations/caches.md index 27e81256315..28ddb14a511 100644 --- a/docs/en/operations/caches.md +++ b/docs/en/operations/caches.md @@ -3,7 +3,7 @@ sidebar_position: 65 sidebar_label: Caches --- -# Cache Types {#cache-types} +# Cache Types When performing queries, ClickHouse uses different caches. diff --git a/docs/en/operations/clickhouse-keeper.md b/docs/en/operations/clickhouse-keeper.md index e4d10967bc8..6bf97b3a470 100644 --- a/docs/en/operations/clickhouse-keeper.md +++ b/docs/en/operations/clickhouse-keeper.md @@ -3,7 +3,7 @@ sidebar_position: 66 sidebar_label: ClickHouse Keeper --- -# ClickHouse Keeper {#clickHouse-keeper} +# ClickHouse Keeper ClickHouse Keeper provides the coordination system for data [replication](../engines/table-engines/mergetree-family/replication.md) and [distributed DDL](../sql-reference/distributed-ddl.md) queries execution. ClickHouse Keeper is compatible with ZooKeeper. diff --git a/docs/en/operations/configuration-files.md b/docs/en/operations/configuration-files.md index 4a5431fa57c..8676d4900f4 100644 --- a/docs/en/operations/configuration-files.md +++ b/docs/en/operations/configuration-files.md @@ -3,7 +3,7 @@ sidebar_position: 50 sidebar_label: Configuration Files --- -# Configuration Files {#configuration_files} +# Configuration Files ClickHouse supports multi-file configuration management. The main server configuration file is `/etc/clickhouse-server/config.xml` or `/etc/clickhouse-server/config.yaml`. Other files must be in the `/etc/clickhouse-server/config.d` directory. Note, that any configuration file can be written either in XML or YAML, but mixing formats in one file is not supported. For example, you can have main configs as `config.xml` and `users.xml` and write additional files in `config.d` and `users.d` directories in `.yaml`. diff --git a/docs/en/operations/external-authenticators/kerberos.md b/docs/en/operations/external-authenticators/kerberos.md index 3711bac79c3..8b47ec2c809 100644 --- a/docs/en/operations/external-authenticators/kerberos.md +++ b/docs/en/operations/external-authenticators/kerberos.md @@ -1,4 +1,4 @@ -# Kerberos {#external-authenticators-kerberos} +# Kerberos Existing and properly configured ClickHouse users can be authenticated via Kerberos authentication protocol. @@ -99,7 +99,7 @@ Example (goes into `users.xml`): Note that Kerberos authentication cannot be used alongside with any other authentication mechanism. The presence of any other sections like `password` alongside `kerberos` will force ClickHouse to shutdown. ::: -:::info Reminder +:::info Reminder Note, that now, once user `my_user` uses `kerberos`, Kerberos must be enabled in the main `config.xml` file as described previously. ::: diff --git a/docs/en/operations/external-authenticators/ldap.md b/docs/en/operations/external-authenticators/ldap.md index 57e6ec1a087..0c79e2438ff 100644 --- a/docs/en/operations/external-authenticators/ldap.md +++ b/docs/en/operations/external-authenticators/ldap.md @@ -1,4 +1,4 @@ -# LDAP {#external-authenticators-ldap} +# LDAP LDAP server can be used to authenticate ClickHouse users. There are two different approaches for doing this: diff --git a/docs/en/operations/external-authenticators/ssl-x509.md b/docs/en/operations/external-authenticators/ssl-x509.md index 15b5990d00e..c0d83005a7e 100644 --- a/docs/en/operations/external-authenticators/ssl-x509.md +++ b/docs/en/operations/external-authenticators/ssl-x509.md @@ -1,4 +1,4 @@ -# SSL X.509 certificate authentication {#ssl-external-authentication} +# SSL X.509 certificate authentication [SSL 'strict' option](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) enables mandatory certificate validation for the incoming connections. In this case, only connections with trusted certificates can be established. Connections with untrusted certificates will be rejected. Thus, certificate validation allows to uniquely authenticate an incoming connection. `Common Name` field of the certificate is used to identify connected user. This allows to associate multiple certificates with the same user. Additionally, reissuing and revoking of the certificates does not affect the ClickHouse configuration. diff --git a/docs/en/operations/monitoring.md b/docs/en/operations/monitoring.md index 437122e106d..8a5358b146c 100644 --- a/docs/en/operations/monitoring.md +++ b/docs/en/operations/monitoring.md @@ -3,7 +3,7 @@ sidebar_position: 45 sidebar_label: Monitoring --- -# Monitoring {#monitoring} +# Monitoring You can monitor: diff --git a/docs/en/operations/named-collections.md b/docs/en/operations/named-collections.md index 52520ba76b7..aea218f6ad5 100644 --- a/docs/en/operations/named-collections.md +++ b/docs/en/operations/named-collections.md @@ -3,7 +3,7 @@ sidebar_position: 69 sidebar_label: "Named connections" --- -# Storing details for connecting to external sources in configuration files {#named-collections} +# Storing details for connecting to external sources in configuration files Details for connecting to external sources (dictionaries, tables, table functions) can be saved in configuration files and thus simplify the creation of objects and hide credentials diff --git a/docs/en/operations/optimizing-performance/index.md b/docs/en/operations/optimizing-performance/index.md index ef9c6a4b664..cde1ca9614b 100644 --- a/docs/en/operations/optimizing-performance/index.md +++ b/docs/en/operations/optimizing-performance/index.md @@ -3,6 +3,6 @@ sidebar_label: Optimizing Performance sidebar_position: 52 --- -# Optimizing Performance {#optimizing-performance} +# Optimizing Performance - [Sampling query profiler](../../operations/optimizing-performance/sampling-query-profiler.md) diff --git a/docs/en/operations/optimizing-performance/sampling-query-profiler.md b/docs/en/operations/optimizing-performance/sampling-query-profiler.md index 8555cebaf00..83bab2a3204 100644 --- a/docs/en/operations/optimizing-performance/sampling-query-profiler.md +++ b/docs/en/operations/optimizing-performance/sampling-query-profiler.md @@ -3,7 +3,7 @@ sidebar_position: 54 sidebar_label: Query Profiling --- -# Sampling Query Profiler {#sampling-query-profiler} +# Sampling Query Profiler ClickHouse runs sampling profiler that allows analyzing query execution. Using profiler you can find source code routines that used the most frequently during query execution. You can trace CPU time and wall-clock time spent including idle time. diff --git a/docs/en/operations/performance-test.md b/docs/en/operations/performance-test.md index 47827f331c7..b0b9a30fe2c 100644 --- a/docs/en/operations/performance-test.md +++ b/docs/en/operations/performance-test.md @@ -3,7 +3,7 @@ sidebar_position: 54 sidebar_label: Testing Hardware --- -# How to Test Your Hardware with ClickHouse {#how-to-test-your-hardware-with-clickhouse} +# How to Test Your Hardware with ClickHouse You can run basic ClickHouse performance test on any server without installation of ClickHouse packages. diff --git a/docs/en/operations/quotas.md b/docs/en/operations/quotas.md index 77b0697d483..2aa1c09386d 100644 --- a/docs/en/operations/quotas.md +++ b/docs/en/operations/quotas.md @@ -3,7 +3,7 @@ sidebar_position: 51 sidebar_label: Quotas --- -# Quotas {#quotas} +# Quotas Quotas allow you to limit resource usage over a period of time or track the use of resources. Quotas are set up in the user config, which is usually ‘users.xml’. diff --git a/docs/en/operations/requirements.md b/docs/en/operations/requirements.md index b879934fef3..753b4ee2b94 100644 --- a/docs/en/operations/requirements.md +++ b/docs/en/operations/requirements.md @@ -3,9 +3,9 @@ sidebar_position: 44 sidebar_label: Requirements --- -# Requirements +# Requirements -## CPU {#cpu} +## CPU For installation from prebuilt deb packages, use a CPU with x86_64 architecture and support for SSE 4.2 instructions. To run ClickHouse with processors that do not support SSE 4.2 or have AArch64 or PowerPC64LE architecture, you should build ClickHouse from sources. diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index f235fba84f7..ad879679a3d 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -3,7 +3,7 @@ sidebar_position: 57 sidebar_label: Server Settings --- -# Server Settings {#server-settings} +# Server Settings ## builtin_dictionaries_reload_interval {#builtin-dictionaries-reload-interval} diff --git a/docs/en/operations/settings/constraints-on-settings.md b/docs/en/operations/settings/constraints-on-settings.md index 5adde60a460..d240fde8ff3 100644 --- a/docs/en/operations/settings/constraints-on-settings.md +++ b/docs/en/operations/settings/constraints-on-settings.md @@ -3,7 +3,7 @@ sidebar_position: 62 sidebar_label: Constraints on Settings --- -# Constraints on Settings {#constraints-on-settings} +# Constraints on Settings The constraints on settings can be defined in the `profiles` section of the `user.xml` configuration file and prohibit users from changing some of the settings with the `SET` query. The constraints are defined as the following: diff --git a/docs/en/operations/settings/merge-tree-settings.md b/docs/en/operations/settings/merge-tree-settings.md index b672da83441..cf6b3459fe8 100644 --- a/docs/en/operations/settings/merge-tree-settings.md +++ b/docs/en/operations/settings/merge-tree-settings.md @@ -1,4 +1,4 @@ -# MergeTree tables settings {#merge-tree-settings} +# MergeTree tables settings The values of `merge_tree` settings (for all MergeTree tables) can be viewed in the table `system.merge_tree_settings`, they can be overridden in `config.xml` in the `merge_tree` section, or set in the `SETTINGS` section of each table. @@ -27,7 +27,7 @@ An example of changing the settings for a specific table with the `ALTER TABLE . ``` sql ALTER TABLE foo MODIFY SETTING max_suspicious_broken_parts = 100; - + -- reset to default (use value from system.merge_tree_settings) ALTER TABLE foo RESET SETTING max_suspicious_broken_parts; diff --git a/docs/en/operations/settings/permissions-for-queries.md b/docs/en/operations/settings/permissions-for-queries.md index ff63f524b7d..668cb9993eb 100644 --- a/docs/en/operations/settings/permissions-for-queries.md +++ b/docs/en/operations/settings/permissions-for-queries.md @@ -3,7 +3,7 @@ sidebar_position: 58 sidebar_label: Permissions for Queries --- -# Permissions for Queries {#permissions_for_queries} +# Permissions for Queries Queries in ClickHouse can be divided into several types: diff --git a/docs/en/operations/settings/query-complexity.md b/docs/en/operations/settings/query-complexity.md index c0c77bc809a..342c8002572 100644 --- a/docs/en/operations/settings/query-complexity.md +++ b/docs/en/operations/settings/query-complexity.md @@ -3,7 +3,7 @@ sidebar_position: 59 sidebar_label: Restrictions on Query Complexity --- -# Restrictions on Query Complexity {#restrictions-on-query-complexity} +# Restrictions on Query Complexity Restrictions on query complexity are part of the settings. They are used to provide safer execution from the user interface. diff --git a/docs/en/operations/settings/settings-profiles.md b/docs/en/operations/settings/settings-profiles.md index b8e1e3c21c4..ea6c88a0f86 100644 --- a/docs/en/operations/settings/settings-profiles.md +++ b/docs/en/operations/settings/settings-profiles.md @@ -3,7 +3,7 @@ sidebar_position: 61 sidebar_label: Settings Profiles --- -# Settings Profiles {#settings-profiles} +# Settings Profiles A settings profile is a collection of settings grouped under the same name. diff --git a/docs/en/operations/settings/settings-users.md b/docs/en/operations/settings/settings-users.md index 6a020be2afc..6b3340bfce2 100644 --- a/docs/en/operations/settings/settings-users.md +++ b/docs/en/operations/settings/settings-users.md @@ -3,7 +3,7 @@ sidebar_position: 63 sidebar_label: User Settings --- -# User Settings {#user-settings} +# User Settings The `users` section of the `user.xml` configuration file contains user settings. diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 5db116aa6e5..0928f5c4ee7 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -4,7 +4,7 @@ sidebar_position: 52 slug: /en/operations/settings/settings --- -# Settings {#settings} +# Settings ## allow_nondeterministic_mutations {#allow_nondeterministic_mutations} @@ -20,7 +20,7 @@ Default value: 0. 1 - + diff --git a/docs/en/operations/ssl-zookeeper.md b/docs/en/operations/ssl-zookeeper.md index d6043d521e7..79c65853d34 100644 --- a/docs/en/operations/ssl-zookeeper.md +++ b/docs/en/operations/ssl-zookeeper.md @@ -3,7 +3,7 @@ sidebar_position: 45 sidebar_label: Secured Communication with Zookeeper --- -# Optional secured communication between ClickHouse and Zookeeper {#secured-communication-with-zookeeper} +# Optional secured communication between ClickHouse and Zookeeper You should specify `ssl.keyStore.location`, `ssl.keyStore.password` and `ssl.trustStore.location`, `ssl.trustStore.password` for communication with ClickHouse client over SSL. These options are available from Zookeeper version 3.5.2. diff --git a/docs/en/operations/storing-data.md b/docs/en/operations/storing-data.md index 98e36f5e41b..6afd5d4b726 100644 --- a/docs/en/operations/storing-data.md +++ b/docs/en/operations/storing-data.md @@ -3,9 +3,9 @@ sidebar_position: 68 sidebar_label: External Disks for Storing Data --- -# External Disks for Storing Data {#external-disks} +# External Disks for Storing Data -Data, processed in ClickHouse, is usually stored in the local file system — on the same machine with the ClickHouse server. That requires large-capacity disks, which can be expensive enough. To avoid that you can store the data remotely — on [Amazon S3](https://aws.amazon.com/s3/) disks or in the Hadoop Distributed File System ([HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)). +Data, processed in ClickHouse, is usually stored in the local file system — on the same machine with the ClickHouse server. That requires large-capacity disks, which can be expensive enough. To avoid that you can store the data remotely — on [Amazon S3](https://aws.amazon.com/s3/) disks or in the Hadoop Distributed File System ([HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)). To work with data stored on `Amazon S3` disks use [S3](../engines/table-engines/integrations/s3.md) table engine, and to work with data in the Hadoop Distributed File System — [HDFS](../engines/table-engines/integrations/hdfs.md) table engine. diff --git a/docs/en/operations/system-tables/asynchronous_metric_log.md b/docs/en/operations/system-tables/asynchronous_metric_log.md index 2233406162b..f40b1e500c2 100644 --- a/docs/en/operations/system-tables/asynchronous_metric_log.md +++ b/docs/en/operations/system-tables/asynchronous_metric_log.md @@ -1,4 +1,4 @@ -# asynchronous_metric_log {#system-tables-async-log} +# asynchronous_metric_log Contains the historical values for `system.asynchronous_metrics`, which are saved once per minute. Enabled by default. diff --git a/docs/en/operations/system-tables/asynchronous_metrics.md b/docs/en/operations/system-tables/asynchronous_metrics.md index 162048b06ee..17ee1b27e3d 100644 --- a/docs/en/operations/system-tables/asynchronous_metrics.md +++ b/docs/en/operations/system-tables/asynchronous_metrics.md @@ -1,4 +1,4 @@ -# asynchronous_metrics {#system_tables-asynchronous_metrics} +# asynchronous_metrics Contains metrics that are calculated periodically in the background. For example, the amount of RAM in use. diff --git a/docs/en/operations/system-tables/clusters.md b/docs/en/operations/system-tables/clusters.md index 776c90b9936..9e086ef7808 100644 --- a/docs/en/operations/system-tables/clusters.md +++ b/docs/en/operations/system-tables/clusters.md @@ -1,4 +1,4 @@ -# clusters {#system-clusters} +# clusters Contains information about clusters available in the config file and the servers in them. diff --git a/docs/en/operations/system-tables/columns.md b/docs/en/operations/system-tables/columns.md index dd5674fe5b1..1945963352b 100644 --- a/docs/en/operations/system-tables/columns.md +++ b/docs/en/operations/system-tables/columns.md @@ -1,4 +1,4 @@ -# columns {#system-columns} +# columns Contains information about columns in all the tables. diff --git a/docs/en/operations/system-tables/contributors.md b/docs/en/operations/system-tables/contributors.md index 3b76684b44b..51a625bb844 100644 --- a/docs/en/operations/system-tables/contributors.md +++ b/docs/en/operations/system-tables/contributors.md @@ -1,4 +1,4 @@ -# contributors {#system-contributors} +# contributors Contains information about contributors. The order is random at query execution time. diff --git a/docs/en/operations/system-tables/crash-log.md b/docs/en/operations/system-tables/crash-log.md index be85cb78c9f..670ae7bc080 100644 --- a/docs/en/operations/system-tables/crash-log.md +++ b/docs/en/operations/system-tables/crash-log.md @@ -1,4 +1,4 @@ -# crash_log {#system-tables_crash_log} +# crash_log Contains information about stack traces for fatal errors. The table does not exist in the database by default, it is created only when fatal errors occur. diff --git a/docs/en/operations/system-tables/current-roles.md b/docs/en/operations/system-tables/current-roles.md index 81d4fad24a8..af4559ce6f7 100644 --- a/docs/en/operations/system-tables/current-roles.md +++ b/docs/en/operations/system-tables/current-roles.md @@ -1,4 +1,4 @@ -# current_roles {#system_tables-current_roles} +# current_roles Contains active roles of a current user. `SET ROLE` changes the contents of this table. diff --git a/docs/en/operations/system-tables/data_skipping_indices.md b/docs/en/operations/system-tables/data_skipping_indices.md index 71dfb046dbb..b3c7cbe2b23 100644 --- a/docs/en/operations/system-tables/data_skipping_indices.md +++ b/docs/en/operations/system-tables/data_skipping_indices.md @@ -1,4 +1,4 @@ -# data_skipping_indices {#system-data-skipping-indices} +# data_skipping_indices Contains information about existing data skipping indices in all the tables. diff --git a/docs/en/operations/system-tables/data_type_families.md b/docs/en/operations/system-tables/data_type_families.md index 2e5e7b74c66..0202ba78ffe 100644 --- a/docs/en/operations/system-tables/data_type_families.md +++ b/docs/en/operations/system-tables/data_type_families.md @@ -1,4 +1,4 @@ -# data_type_families {#system_tables-data_type_families} +# data_type_families Contains information about supported [data types](../../sql-reference/data-types/index.md). diff --git a/docs/en/operations/system-tables/databases.md b/docs/en/operations/system-tables/databases.md index 7245ecdcdec..6dbe02ca706 100644 --- a/docs/en/operations/system-tables/databases.md +++ b/docs/en/operations/system-tables/databases.md @@ -1,4 +1,4 @@ -# databases {#system-databases} +# databases Contains information about the databases that are available to the current user. diff --git a/docs/en/operations/system-tables/detached_parts.md b/docs/en/operations/system-tables/detached_parts.md index 2fe354a4471..9c0717fb062 100644 --- a/docs/en/operations/system-tables/detached_parts.md +++ b/docs/en/operations/system-tables/detached_parts.md @@ -1,4 +1,4 @@ -# detached_parts {#system_tables-detached_parts} +# detached_parts Contains information about detached parts of [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables. The `reason` column specifies why the part was detached. diff --git a/docs/en/operations/system-tables/dictionaries.md b/docs/en/operations/system-tables/dictionaries.md index c41d506ff0a..e0f2a7baa03 100644 --- a/docs/en/operations/system-tables/dictionaries.md +++ b/docs/en/operations/system-tables/dictionaries.md @@ -1,4 +1,4 @@ -# dictionaries {#system_tables-dictionaries} +# dictionaries Contains information about [external dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md). diff --git a/docs/en/operations/system-tables/disks.md b/docs/en/operations/system-tables/disks.md index 869c0f3cee5..0fe557bf985 100644 --- a/docs/en/operations/system-tables/disks.md +++ b/docs/en/operations/system-tables/disks.md @@ -1,4 +1,4 @@ -# disks {#system_tables-disks} +# disks Contains information about disks defined in the [server configuration](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes_configure). diff --git a/docs/en/operations/system-tables/distributed_ddl_queue.md b/docs/en/operations/system-tables/distributed_ddl_queue.md index ac2663bba19..5a2478b22d9 100644 --- a/docs/en/operations/system-tables/distributed_ddl_queue.md +++ b/docs/en/operations/system-tables/distributed_ddl_queue.md @@ -1,4 +1,4 @@ -# distributed_ddl_queue {#system_tables-distributed_ddl_queue} +# distributed_ddl_queue Contains information about [distributed ddl queries (ON CLUSTER clause)](../../sql-reference/distributed-ddl.md) that were executed on a cluster. diff --git a/docs/en/operations/system-tables/distribution_queue.md b/docs/en/operations/system-tables/distribution_queue.md index 231a06458c8..88d376c7553 100644 --- a/docs/en/operations/system-tables/distribution_queue.md +++ b/docs/en/operations/system-tables/distribution_queue.md @@ -1,4 +1,4 @@ -# distribution_queue {#system_tables-distribution_queue} +# distribution_queue Contains information about local files that are in the queue to be sent to the shards. These local files contain new parts that are created by inserting new data into the Distributed table in asynchronous mode. diff --git a/docs/en/operations/system-tables/enabled-roles.md b/docs/en/operations/system-tables/enabled-roles.md index 832fc6aba42..a1649df875a 100644 --- a/docs/en/operations/system-tables/enabled-roles.md +++ b/docs/en/operations/system-tables/enabled-roles.md @@ -1,4 +1,4 @@ -# enabled_roles {#system_tables-enabled_roles} +# enabled_roles Contains all active roles at the moment, including current role of the current user and granted roles for current role. diff --git a/docs/en/operations/system-tables/errors.md b/docs/en/operations/system-tables/errors.md index 8e60cf93bfa..3e40e898a78 100644 --- a/docs/en/operations/system-tables/errors.md +++ b/docs/en/operations/system-tables/errors.md @@ -1,4 +1,4 @@ -# errors {#system_tables-errors} +# errors Contains error codes with the number of times they have been triggered. diff --git a/docs/en/operations/system-tables/events.md b/docs/en/operations/system-tables/events.md index 445573ec978..4525733a775 100644 --- a/docs/en/operations/system-tables/events.md +++ b/docs/en/operations/system-tables/events.md @@ -1,4 +1,4 @@ -# events {#system_tables-events} +# events Contains information about the number of events that have occurred in the system. For example, in the table, you can find how many `SELECT` queries were processed since the ClickHouse server started. diff --git a/docs/en/operations/system-tables/functions.md b/docs/en/operations/system-tables/functions.md index 097b6ccd22a..8dcad0b48a7 100644 --- a/docs/en/operations/system-tables/functions.md +++ b/docs/en/operations/system-tables/functions.md @@ -1,4 +1,4 @@ -# functions {#system-functions} +# functions Contains information about normal and aggregate functions. diff --git a/docs/en/operations/system-tables/grants.md b/docs/en/operations/system-tables/grants.md index c848972c2d8..d7cbc4ea556 100644 --- a/docs/en/operations/system-tables/grants.md +++ b/docs/en/operations/system-tables/grants.md @@ -1,4 +1,4 @@ -# grants {#system_tables-grants} +# grants Privileges granted to ClickHouse user accounts. diff --git a/docs/en/operations/system-tables/graphite_retentions.md b/docs/en/operations/system-tables/graphite_retentions.md index 10e265815f4..697e272e810 100644 --- a/docs/en/operations/system-tables/graphite_retentions.md +++ b/docs/en/operations/system-tables/graphite_retentions.md @@ -1,4 +1,4 @@ -# graphite_retentions {#system-graphite-retentions} +# graphite_retentions Contains information about parameters [graphite_rollup](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-graphite) which are used in tables with [\*GraphiteMergeTree](../../engines/table-engines/mergetree-family/graphitemergetree.md) engines. diff --git a/docs/en/operations/system-tables/index.md b/docs/en/operations/system-tables/index.md index 7b977ab4d51..e33c7bde1e5 100644 --- a/docs/en/operations/system-tables/index.md +++ b/docs/en/operations/system-tables/index.md @@ -3,7 +3,7 @@ sidebar_position: 52 sidebar_label: System Tables --- -# System Tables {#system-tables} +# System Tables ## Introduction {#system-tables-introduction} diff --git a/docs/en/operations/system-tables/information_schema.md b/docs/en/operations/system-tables/information_schema.md index df5b012f2b6..50c15dacd13 100644 --- a/docs/en/operations/system-tables/information_schema.md +++ b/docs/en/operations/system-tables/information_schema.md @@ -1,4 +1,4 @@ -# INFORMATION_SCHEMA {#information-schema} +# INFORMATION_SCHEMA `INFORMATION_SCHEMA` (`information_schema`) is a system database that contains views. Using these views, you can get information about the metadata of database objects. These views read data from the columns of the [system.columns](../../operations/system-tables/columns.md), [system.databases](../../operations/system-tables/databases.md) and [system.tables](../../operations/system-tables/tables.md) system tables. diff --git a/docs/en/operations/system-tables/merge_tree_settings.md b/docs/en/operations/system-tables/merge_tree_settings.md index 0324d5c633d..49c5b951352 100644 --- a/docs/en/operations/system-tables/merge_tree_settings.md +++ b/docs/en/operations/system-tables/merge_tree_settings.md @@ -1,4 +1,4 @@ -# merge_tree_settings {#system-merge_tree_settings} +# merge_tree_settings Contains information about settings for `MergeTree` tables. diff --git a/docs/en/operations/system-tables/merges.md b/docs/en/operations/system-tables/merges.md index f512e00fc89..829be3e3147 100644 --- a/docs/en/operations/system-tables/merges.md +++ b/docs/en/operations/system-tables/merges.md @@ -1,4 +1,4 @@ -# merges {#system-merges} +# merges Contains information about merges and part mutations currently in process for tables in the MergeTree family. diff --git a/docs/en/operations/system-tables/metric_log.md b/docs/en/operations/system-tables/metric_log.md index 55b0d800ead..bb637d006d4 100644 --- a/docs/en/operations/system-tables/metric_log.md +++ b/docs/en/operations/system-tables/metric_log.md @@ -1,4 +1,4 @@ -# metric_log {#system_tables-metric_log} +# metric_log Contains history of metrics values from tables `system.metrics` and `system.events`, periodically flushed to disk. diff --git a/docs/en/operations/system-tables/metrics.md b/docs/en/operations/system-tables/metrics.md index 75725ae035a..66a56cf3618 100644 --- a/docs/en/operations/system-tables/metrics.md +++ b/docs/en/operations/system-tables/metrics.md @@ -1,4 +1,4 @@ -# metrics {#system_tables-metrics} +# metrics Contains metrics which can be calculated instantly, or have a current value. For example, the number of simultaneously processed queries or the current replica delay. This table is always up to date. diff --git a/docs/en/operations/system-tables/mutations.md b/docs/en/operations/system-tables/mutations.md index 2878a19a1e7..57fa3684c34 100644 --- a/docs/en/operations/system-tables/mutations.md +++ b/docs/en/operations/system-tables/mutations.md @@ -1,4 +1,4 @@ -# mutations {#system_tables-mutations} +# mutations The table contains information about [mutations](../../sql-reference/statements/alter/index.md#mutations) of [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables and their progress. Each mutation command is represented by a single row. diff --git a/docs/en/operations/system-tables/numbers.md b/docs/en/operations/system-tables/numbers.md index 29828bfe796..4b18e9177e0 100644 --- a/docs/en/operations/system-tables/numbers.md +++ b/docs/en/operations/system-tables/numbers.md @@ -1,4 +1,4 @@ -# numbers {#system-numbers} +# numbers This table contains a single UInt64 column named `number` that contains almost all the natural numbers starting from zero. diff --git a/docs/en/operations/system-tables/numbers_mt.md b/docs/en/operations/system-tables/numbers_mt.md index 02155db4711..d420186aec4 100644 --- a/docs/en/operations/system-tables/numbers_mt.md +++ b/docs/en/operations/system-tables/numbers_mt.md @@ -1,4 +1,4 @@ -# numbers_mt {#system-numbers-mt} +# numbers_mt The same as [system.numbers](../../operations/system-tables/numbers.md) but reads are parallelized. The numbers can be returned in any order. diff --git a/docs/en/operations/system-tables/one.md b/docs/en/operations/system-tables/one.md index 9b84c0bfcd6..6a4991a5190 100644 --- a/docs/en/operations/system-tables/one.md +++ b/docs/en/operations/system-tables/one.md @@ -1,4 +1,4 @@ -# one {#system-one} +# one This table contains a single row with a single `dummy` UInt8 column containing the value 0. diff --git a/docs/en/operations/system-tables/opentelemetry_span_log.md b/docs/en/operations/system-tables/opentelemetry_span_log.md index 89af72d6620..1b3b97af019 100644 --- a/docs/en/operations/system-tables/opentelemetry_span_log.md +++ b/docs/en/operations/system-tables/opentelemetry_span_log.md @@ -1,4 +1,4 @@ -# opentelemetry_span_log {#system_tables-opentelemetry_span_log} +# opentelemetry_span_log Contains information about [trace spans](https://opentracing.io/docs/overview/spans/) for executed queries. diff --git a/docs/en/operations/system-tables/part_log.md b/docs/en/operations/system-tables/part_log.md index 1b567367c97..298667f895b 100644 --- a/docs/en/operations/system-tables/part_log.md +++ b/docs/en/operations/system-tables/part_log.md @@ -1,4 +1,4 @@ -# part_log {#system_tables-part-log} +# part_log The `system.part_log` table is created only if the [part_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-part-log) server setting is specified. diff --git a/docs/en/operations/system-tables/parts_columns.md b/docs/en/operations/system-tables/parts_columns.md index e87be3fcd43..0439da79ab3 100644 --- a/docs/en/operations/system-tables/parts_columns.md +++ b/docs/en/operations/system-tables/parts_columns.md @@ -1,4 +1,4 @@ -# parts_columns {#system_tables-parts_columns} +# parts_columns Contains information about parts and columns of [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables. diff --git a/docs/en/operations/system-tables/processes.md b/docs/en/operations/system-tables/processes.md index a77fbf2a109..b808e801819 100644 --- a/docs/en/operations/system-tables/processes.md +++ b/docs/en/operations/system-tables/processes.md @@ -1,4 +1,4 @@ -# processes {#system_tables-processes} +# processes This system table is used for implementing the `SHOW PROCESSLIST` query. diff --git a/docs/en/operations/system-tables/query_log.md b/docs/en/operations/system-tables/query_log.md index a8fda41f7c2..80343a1cc2b 100644 --- a/docs/en/operations/system-tables/query_log.md +++ b/docs/en/operations/system-tables/query_log.md @@ -1,4 +1,4 @@ -# query_log {#system_tables-query_log} +# query_log Contains information about executed queries, for example, start time, duration of processing, error messages. diff --git a/docs/en/operations/system-tables/query_thread_log.md b/docs/en/operations/system-tables/query_thread_log.md index 072a311b7db..2b4c4bab841 100644 --- a/docs/en/operations/system-tables/query_thread_log.md +++ b/docs/en/operations/system-tables/query_thread_log.md @@ -1,4 +1,4 @@ -# query_thread_log {#system_tables-query_thread_log} +# query_thread_log Contains information about threads that execute queries, for example, thread name, thread start time, duration of query processing. diff --git a/docs/en/operations/system-tables/query_views_log.md b/docs/en/operations/system-tables/query_views_log.md index 5aa69522869..007128f5619 100644 --- a/docs/en/operations/system-tables/query_views_log.md +++ b/docs/en/operations/system-tables/query_views_log.md @@ -1,4 +1,4 @@ -# query_views_log {#system_tables-query_views_log} +# query_views_log Contains information about the dependent views executed when running a query, for example, the view type or the execution time. diff --git a/docs/en/operations/system-tables/quota_limits.md b/docs/en/operations/system-tables/quota_limits.md index e1873ecfa92..e3fea42c009 100644 --- a/docs/en/operations/system-tables/quota_limits.md +++ b/docs/en/operations/system-tables/quota_limits.md @@ -1,4 +1,4 @@ -# quota_limits {#system_tables-quota_limits} +# quota_limits Contains information about maximums for all intervals of all quotas. Any number of rows or zero can correspond to one quota. diff --git a/docs/en/operations/system-tables/quota_usage.md b/docs/en/operations/system-tables/quota_usage.md index ad9f9b8c44f..059c073babb 100644 --- a/docs/en/operations/system-tables/quota_usage.md +++ b/docs/en/operations/system-tables/quota_usage.md @@ -1,4 +1,4 @@ -# quota_usage {#system_tables-quota_usage} +# quota_usage Quota usage by the current user: how much is used and how much is left. diff --git a/docs/en/operations/system-tables/quotas.md b/docs/en/operations/system-tables/quotas.md index 0a435919b14..2ef89f6749c 100644 --- a/docs/en/operations/system-tables/quotas.md +++ b/docs/en/operations/system-tables/quotas.md @@ -1,4 +1,4 @@ -# quotas {#system_tables-quotas} +# quotas Contains information about [quotas](../../operations/system-tables/quotas.md). diff --git a/docs/en/operations/system-tables/quotas_usage.md b/docs/en/operations/system-tables/quotas_usage.md index 43811a75187..f295187f2ac 100644 --- a/docs/en/operations/system-tables/quotas_usage.md +++ b/docs/en/operations/system-tables/quotas_usage.md @@ -1,4 +1,4 @@ -# quotas_usage {#system_tables-quotas_usage} +# quotas_usage Quota usage by all users. diff --git a/docs/en/operations/system-tables/replicas.md b/docs/en/operations/system-tables/replicas.md index c65b0d294b0..c32014b8f48 100644 --- a/docs/en/operations/system-tables/replicas.md +++ b/docs/en/operations/system-tables/replicas.md @@ -1,4 +1,4 @@ -# replicas {#system_tables-replicas} +# replicas Contains information and status for replicated tables residing on the local server. This table can be used for monitoring. The table contains a row for every Replicated\* table. @@ -39,16 +39,16 @@ inserts_oldest_time: 2021-10-12 14:48:48 merges_oldest_time: 1970-01-01 03:00:00 part_mutations_oldest_time: 1970-01-01 03:00:00 oldest_part_to_get: 1_17_17_0 -oldest_part_to_merge_to: -oldest_part_to_mutate_to: +oldest_part_to_merge_to: +oldest_part_to_mutate_to: log_max_index: 206 log_pointer: 207 last_queue_update: 2021-10-12 14:50:08 absolute_delay: 99 total_replicas: 5 active_replicas: 5 -last_queue_update_exception: -zookeeper_exception: +last_queue_update_exception: +zookeeper_exception: replica_is_active: {'r1':1,'r2':1} ``` @@ -88,7 +88,7 @@ The next 4 columns have a non-zero value only where there is an active session w - `total_replicas` (`UInt8`) - The total number of known replicas of this table. - `active_replicas` (`UInt8`) - The number of replicas of this table that have a session in ClickHouse Keeper (i.e., the number of functioning replicas). - `last_queue_update_exception` (`String`) - When the queue contains broken entries. Especially important when ClickHouse breaks backward compatibility between versions and log entries written by newer versions aren't parseable by old versions. -- `zookeeper_exception` (`String`) - The last exception message, got if the error happened when fetching the info from ClickHouse Keeper. +- `zookeeper_exception` (`String`) - The last exception message, got if the error happened when fetching the info from ClickHouse Keeper. - `replica_is_active` ([Map(String, UInt8)](../../sql-reference/data-types/map.md)) — Map between replica name and is replica active. If you request all the columns, the table may work a bit slowly, since several reads from ClickHouse Keeper are made for each row. diff --git a/docs/en/operations/system-tables/replicated_fetches.md b/docs/en/operations/system-tables/replicated_fetches.md index 438d1572109..fd6d7b54778 100644 --- a/docs/en/operations/system-tables/replicated_fetches.md +++ b/docs/en/operations/system-tables/replicated_fetches.md @@ -1,4 +1,4 @@ -# replicated_fetches {#system_tables-replicated_fetches} +# replicated_fetches Contains information about currently running background fetches. diff --git a/docs/en/operations/system-tables/replication_queue.md b/docs/en/operations/system-tables/replication_queue.md index 834f4a04757..ba2eac1a854 100644 --- a/docs/en/operations/system-tables/replication_queue.md +++ b/docs/en/operations/system-tables/replication_queue.md @@ -1,4 +1,4 @@ -# replication_queue {#system_tables-replication_queue} +# replication_queue Contains information about tasks from replication queues stored in Clickhouse Keeper, or ZooKeeper, for tables in the `ReplicatedMergeTree` family. diff --git a/docs/en/operations/system-tables/roles.md b/docs/en/operations/system-tables/roles.md index 1f8fe349c7f..6e1d112a6e5 100644 --- a/docs/en/operations/system-tables/roles.md +++ b/docs/en/operations/system-tables/roles.md @@ -1,4 +1,4 @@ -# roles {#system_tables-roles} +# roles Contains information about configured [roles](../../operations/access-rights.md#role-management). diff --git a/docs/en/operations/system-tables/row_policies.md b/docs/en/operations/system-tables/row_policies.md index 2bff037751b..d7869c62499 100644 --- a/docs/en/operations/system-tables/row_policies.md +++ b/docs/en/operations/system-tables/row_policies.md @@ -1,4 +1,4 @@ -# row_policies {#system_tables-row_policies} +# row_policies Contains filters for one particular table, as well as a list of roles and/or users which should use this row policy. diff --git a/docs/en/operations/system-tables/session_log.md b/docs/en/operations/system-tables/session_log.md index 9ee7e294bfd..a42f0e79fe7 100644 --- a/docs/en/operations/system-tables/session_log.md +++ b/docs/en/operations/system-tables/session_log.md @@ -1,4 +1,4 @@ -# session_log {#system_tables-session_log} +# session_log Contains information about all successful and failed login and logout events. diff --git a/docs/en/operations/system-tables/settings.md b/docs/en/operations/system-tables/settings.md index ce6f3cd4724..35af1e286d8 100644 --- a/docs/en/operations/system-tables/settings.md +++ b/docs/en/operations/system-tables/settings.md @@ -1,4 +1,4 @@ -# settings {#system-tables-system-settings} +# settings Contains information about session settings for current user. diff --git a/docs/en/operations/system-tables/settings_profile_elements.md b/docs/en/operations/system-tables/settings_profile_elements.md index 5a010d6239a..9afde010d0f 100644 --- a/docs/en/operations/system-tables/settings_profile_elements.md +++ b/docs/en/operations/system-tables/settings_profile_elements.md @@ -1,4 +1,4 @@ -# settings_profile_elements {#system_tables-settings_profile_elements} +# settings_profile_elements Describes the content of the settings profile: diff --git a/docs/en/operations/system-tables/settings_profiles.md b/docs/en/operations/system-tables/settings_profiles.md index ab2020b375d..f14f8077143 100644 --- a/docs/en/operations/system-tables/settings_profiles.md +++ b/docs/en/operations/system-tables/settings_profiles.md @@ -1,4 +1,4 @@ -# settings_profiles {#system_tables-settings_profiles} +# settings_profiles Contains properties of configured setting profiles. diff --git a/docs/en/operations/system-tables/stack_trace.md b/docs/en/operations/system-tables/stack_trace.md index 2aa1c5af125..3cc3b4043e3 100644 --- a/docs/en/operations/system-tables/stack_trace.md +++ b/docs/en/operations/system-tables/stack_trace.md @@ -1,4 +1,4 @@ -# stack_trace {#system-tables_stack_trace} +# stack_trace Contains stack traces of all server threads. Allows developers to introspect the server state. diff --git a/docs/en/operations/system-tables/storage_policies.md b/docs/en/operations/system-tables/storage_policies.md index adbb2f8434e..85e745dd0f8 100644 --- a/docs/en/operations/system-tables/storage_policies.md +++ b/docs/en/operations/system-tables/storage_policies.md @@ -1,4 +1,4 @@ -# storage_policies {#system_tables-storage_policies} +# storage_policies Contains information about storage policies and volumes defined in the [server configuration](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes_configure). diff --git a/docs/en/operations/system-tables/table_engines.md b/docs/en/operations/system-tables/table_engines.md index d3ac8da1d70..f5da0777d7c 100644 --- a/docs/en/operations/system-tables/table_engines.md +++ b/docs/en/operations/system-tables/table_engines.md @@ -1,4 +1,4 @@ -# table_engines {#system-table-engines} +# table_engines Contains description of table engines supported by server and their feature support information. diff --git a/docs/en/operations/system-tables/tables.md b/docs/en/operations/system-tables/tables.md index 6cf1490f14e..804b5862b34 100644 --- a/docs/en/operations/system-tables/tables.md +++ b/docs/en/operations/system-tables/tables.md @@ -1,4 +1,4 @@ -# tables {#system-tables} +# tables Contains metadata of each table that the server knows about. diff --git a/docs/en/operations/system-tables/text_log.md b/docs/en/operations/system-tables/text_log.md index e4967dc8d0b..f2d2042f5da 100644 --- a/docs/en/operations/system-tables/text_log.md +++ b/docs/en/operations/system-tables/text_log.md @@ -1,4 +1,4 @@ -# text_log {#system_tables-text_log} +# text_log Contains logging entries. The logging level which goes to this table can be limited to the `text_log.level` server setting. diff --git a/docs/en/operations/system-tables/time_zones.md b/docs/en/operations/system-tables/time_zones.md index 899e115152f..78ce02ba3ae 100644 --- a/docs/en/operations/system-tables/time_zones.md +++ b/docs/en/operations/system-tables/time_zones.md @@ -1,4 +1,4 @@ -# time_zones {#system-time_zones} +# time_zones Contains a list of time zones that are supported by the ClickHouse server. This list of timezones might vary depending on the version of ClickHouse. diff --git a/docs/en/operations/system-tables/trace_log.md b/docs/en/operations/system-tables/trace_log.md index ace5662e919..8d9936b5097 100644 --- a/docs/en/operations/system-tables/trace_log.md +++ b/docs/en/operations/system-tables/trace_log.md @@ -1,4 +1,4 @@ -# trace_log {#system_tables-trace_log} +# trace_log Contains stack traces collected by the sampling query profiler. diff --git a/docs/en/operations/system-tables/users.md b/docs/en/operations/system-tables/users.md index 95691f4497c..4543b35c0ff 100644 --- a/docs/en/operations/system-tables/users.md +++ b/docs/en/operations/system-tables/users.md @@ -1,4 +1,4 @@ -# users {#system_tables-users} +# users Contains a list of [user accounts](../../operations/access-rights.md#user-account-management) configured at the server. diff --git a/docs/en/operations/system-tables/zookeeper.md b/docs/en/operations/system-tables/zookeeper.md index ec67d2780e3..923676b31e6 100644 --- a/docs/en/operations/system-tables/zookeeper.md +++ b/docs/en/operations/system-tables/zookeeper.md @@ -1,4 +1,4 @@ -# zookeeper {#system-zookeeper} +# zookeeper The table does not exist unless ClickHouse Keeper or ZooKeeper is configured. The `system.zookeeper` table exposes data from the Keeper cluster defined in the config. The query must either have a ‘path =’ condition or a `path IN` condition set with the `WHERE` clause as shown below. This corresponds to the path of the children that you want to get data for. diff --git a/docs/en/operations/system-tables/zookeeper_log.md b/docs/en/operations/system-tables/zookeeper_log.md index 919c4245d5d..2a5d7dbe8d2 100644 --- a/docs/en/operations/system-tables/zookeeper_log.md +++ b/docs/en/operations/system-tables/zookeeper_log.md @@ -1,4 +1,4 @@ -# zookeeper_log {#system-zookeeper_log} +# zookeeper_log This table contains information about the parameters of the request to the ZooKeeper server and the response from it. @@ -70,7 +70,7 @@ xid: 10858 has_watch: 1 op_num: List path: /clickhouse/task_queue/ddl -data: +data: is_ephemeral: 0 is_sequential: 0 version: ᴺᵁᴸᴸ @@ -80,7 +80,7 @@ zxid: 0 error: ᴺᵁᴸᴸ watch_type: ᴺᵁᴸᴸ watch_state: ᴺᵁᴸᴸ -path_created: +path_created: stat_czxid: 0 stat_mzxid: 0 stat_pzxid: 0 @@ -102,7 +102,7 @@ xid: 10858 has_watch: 1 op_num: List path: /clickhouse/task_queue/ddl -data: +data: is_ephemeral: 0 is_sequential: 0 version: ᴺᵁᴸᴸ @@ -112,7 +112,7 @@ zxid: 16926267 error: ZOK watch_type: ᴺᵁᴸᴸ watch_state: ᴺᵁᴸᴸ -path_created: +path_created: stat_czxid: 16925469 stat_mzxid: 16925469 stat_pzxid: 16926179 diff --git a/docs/en/operations/tips.md b/docs/en/operations/tips.md index a0a0391fb09..3c9f3948cea 100644 --- a/docs/en/operations/tips.md +++ b/docs/en/operations/tips.md @@ -3,9 +3,9 @@ sidebar_position: 58 sidebar_label: Usage Recommendations --- -# Usage Recommendations {#usage-recommendations} +# Usage Recommendations -## CPU Scaling Governor {#cpu-scaling-governor} +## CPU Scaling Governor Always use the `performance` scaling governor. The `on-demand` scaling governor works much worse with constantly high demand. @@ -33,7 +33,7 @@ $ echo 0 | sudo tee /proc/sys/vm/overcommit_memory Use `perf top` to watch the time spent in the kernel for memory management. Permanent huge pages also do not need to be allocated. -:::warning +:::warning If your system has less than 16 GB of RAM, you may experience various memory exceptions because default settings do not match this amount of memory. The recommended amount of RAM is 32 GB or more. You can use ClickHouse in a system with a small amount of RAM, even with 2 GB of RAM, but it requires additional tuning and can ingest at a low rate. ::: diff --git a/docs/en/operations/update.md b/docs/en/operations/update.md index fb5fb7803a9..95e9bed675b 100644 --- a/docs/en/operations/update.md +++ b/docs/en/operations/update.md @@ -3,7 +3,7 @@ sidebar_position: 47 sidebar_label: ClickHouse Upgrade --- -# ClickHouse Upgrade {#clickhouse-upgrade} +# ClickHouse Upgrade If ClickHouse was installed from `deb` packages, execute the following commands on the server: @@ -15,7 +15,7 @@ $ sudo service clickhouse-server restart If you installed ClickHouse using something other than the recommended `deb` packages, use the appropriate update method. -:::note +:::note You can update multiple servers at once as soon as there is no moment when all replicas of one shard are offline. ::: diff --git a/docs/en/sql-reference/aggregate-functions/_category_.yml b/docs/en/sql-reference/aggregate-functions/_category_.yml index df57c1f1651..9e289e6c83b 100644 --- a/docs/en/sql-reference/aggregate-functions/_category_.yml +++ b/docs/en/sql-reference/aggregate-functions/_category_.yml @@ -5,3 +5,5 @@ collapsed: true link: type: doc id: en/sql-reference/aggregate-functions/index +customProps: + description: List of Aggregate Fucntions in ClickHouse diff --git a/docs/en/sql-reference/aggregate-functions/combinators.md b/docs/en/sql-reference/aggregate-functions/combinators.md index 6a8c178919c..d89dad1b94d 100644 --- a/docs/en/sql-reference/aggregate-functions/combinators.md +++ b/docs/en/sql-reference/aggregate-functions/combinators.md @@ -3,11 +3,11 @@ sidebar_position: 37 sidebar_label: Combinators --- -# Aggregate Function Combinators {#aggregate_functions_combinators} +# Aggregate Function Combinators The name of an aggregate function can have a suffix appended to it. This changes the way the aggregate function works. -## -If {#agg-functions-combinator-if} +## -If The suffix -If can be appended to the name of any aggregate function. In this case, the aggregate function accepts an extra argument – a condition (Uint8 type). The aggregate function processes only the rows that trigger the condition. If the condition was not triggered even once, it returns a default value (usually zeros or empty strings). @@ -15,7 +15,7 @@ Examples: `sumIf(column, cond)`, `countIf(cond)`, `avgIf(x, cond)`, `quantilesTi With conditional aggregate functions, you can calculate aggregates for several conditions at once, without using subqueries and `JOIN`s. For example, conditional aggregate functions can be used to implement the segment comparison functionality. -## -Array {#agg-functions-combinator-array} +## -Array The -Array suffix can be appended to any aggregate function. In this case, the aggregate function takes arguments of the ‘Array(T)’ type (arrays) instead of ‘T’ type arguments. If the aggregate function accepts multiple arguments, this must be arrays of equal lengths. When processing arrays, the aggregate function works like the original aggregate function across all array elements. @@ -25,13 +25,13 @@ Example 2: `uniqArray(arr)` – Counts the number of unique elements in all ‘a -If and -Array can be combined. However, ‘Array’ must come first, then ‘If’. Examples: `uniqArrayIf(arr, cond)`, `quantilesTimingArrayIf(level1, level2)(arr, cond)`. Due to this order, the ‘cond’ argument won’t be an array. -## -Map {#agg-functions-combinator-map} +## -Map The -Map suffix can be appended to any aggregate function. This will create an aggregate function which gets Map type as an argument, and aggregates values of each key of the map separately using the specified aggregate function. The result is also of a Map type. Examples: `sumMap(map(1,1))`, `avgMap(map('a', 1))`. -## -SimpleState {#agg-functions-combinator-simplestate} +## -SimpleState If you apply this combinator, the aggregate function returns the same value but with a different type. This is a [SimpleAggregateFunction(...)](../../sql-reference/data-types/simpleaggregatefunction.md) that can be stored in a table to work with [AggregatingMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md) tables. @@ -65,7 +65,7 @@ Result: └──────────────────────────────────────┴───┘ ``` -## -State {#agg-functions-combinator-state} +## -State If you apply this combinator, the aggregate function does not return the resulting value (such as the number of unique values for the [uniq](../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) function), but an intermediate state of the aggregation (for `uniq`, this is the hash table for calculating the number of unique values). This is an `AggregateFunction(...)` that can be used for further processing or stored in a table to finish aggregating later. @@ -77,24 +77,24 @@ To work with these states, use: - [-Merge](#aggregate_functions_combinators-merge) combinator. - [-MergeState](#aggregate_functions_combinators-mergestate) combinator. -## -Merge {#aggregate_functions_combinators-merge} +## -Merge If you apply this combinator, the aggregate function takes the intermediate aggregation state as an argument, combines the states to finish aggregation, and returns the resulting value. -## -MergeState {#aggregate_functions_combinators-mergestate} +## -MergeState Merges the intermediate aggregation states in the same way as the -Merge combinator. However, it does not return the resulting value, but an intermediate aggregation state, similar to the -State combinator. -## -ForEach {#agg-functions-combinator-foreach} +## -ForEach Converts an aggregate function for tables into an aggregate function for arrays that aggregates the corresponding array items and returns an array of results. For example, `sumForEach` for the arrays `[1, 2]`, `[3, 4, 5]`and`[6, 7]`returns the result `[10, 13, 5]` after adding together the corresponding array items. -## -Distinct {#agg-functions-combinator-distinct} +## -Distinct Every unique combination of arguments will be aggregated only once. Repeating values are ignored. Examples: `sum(DISTINCT x)`, `groupArray(DISTINCT x)`, `corrStableDistinct(DISTINCT x, y)` and so on. -## -OrDefault {#agg-functions-combinator-ordefault} +## -OrDefault Changes behavior of an aggregate function. @@ -154,7 +154,7 @@ Result: └───────────────────────────────────┘ ``` -## -OrNull {#agg-functions-combinator-ornull} +## -OrNull Changes behavior of an aggregate function. @@ -217,7 +217,7 @@ Result: └────────────────────────────────┘ ``` -## -Resample {#agg-functions-combinator-resample} +## -Resample Lets you divide data into groups, and then separately aggregates the data in those groups. Groups are created by splitting the values from one column into intervals. diff --git a/docs/en/sql-reference/aggregate-functions/index.md b/docs/en/sql-reference/aggregate-functions/index.md index 1e6cc0f88c2..2a13e9a0bae 100644 --- a/docs/en/sql-reference/aggregate-functions/index.md +++ b/docs/en/sql-reference/aggregate-functions/index.md @@ -13,7 +13,7 @@ ClickHouse also supports: - [Combinators](../../sql-reference/aggregate-functions/combinators.md#aggregate_functions_combinators), which change the behavior of aggregate functions. -## NULL Processing {#null-processing} +## NULL Processing During aggregation, all `NULL`s are skipped. diff --git a/docs/en/sql-reference/aggregate-functions/parametric-functions.md b/docs/en/sql-reference/aggregate-functions/parametric-functions.md index 7708bcb8129..112f94e9261 100644 --- a/docs/en/sql-reference/aggregate-functions/parametric-functions.md +++ b/docs/en/sql-reference/aggregate-functions/parametric-functions.md @@ -3,11 +3,11 @@ sidebar_position: 38 sidebar_label: Parametric --- -# Parametric Aggregate Functions {#aggregate_functions_parametric} +# Parametric Aggregate Functions Some aggregate functions can accept not only argument columns (used for compression), but a set of parameters – constants for initialization. The syntax is two pairs of brackets instead of one. The first is for parameters, and the second is for arguments. -## histogram {#histogram} +## histogram Calculates an adaptive histogram. It does not guarantee precise results. @@ -81,7 +81,7 @@ FROM In this case, you should remember that you do not know the histogram bin borders. -## sequenceMatch(pattern)(timestamp, cond1, cond2, …) {#function-sequencematch} +## sequenceMatch(pattern)(timestamp, cond1, cond2, …) Checks whether the sequence contains an event chain that matches the pattern. @@ -89,7 +89,7 @@ Checks whether the sequence contains an event chain that matches the pattern. sequenceMatch(pattern)(timestamp, cond1, cond2, ...) ``` -:::warning +:::warning Events that occur at the same second may lay in the sequence in an undefined order affecting the result. ::: @@ -171,11 +171,11 @@ SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 4) FROM - [sequenceCount](#function-sequencecount) -## sequenceCount(pattern)(time, cond1, cond2, …) {#function-sequencecount} +## sequenceCount(pattern)(time, cond1, cond2, …) Counts the number of event chains that matched the pattern. The function searches event chains that do not overlap. It starts to search for the next chain after the current chain is matched. -:::warning +:::warning Events that occur at the same second may lay in the sequence in an undefined order affecting the result. ::: @@ -230,7 +230,7 @@ SELECT sequenceCount('(?1).*(?2)')(time, number = 1, number = 2) FROM t - [sequenceMatch](#function-sequencematch) -## windowFunnel {#windowfunnel} +## windowFunnel Searches for event chains in a sliding time window and calculates the maximum number of events that occurred from the chain. @@ -325,7 +325,7 @@ Result: └───────┴───┘ ``` -## retention {#retention} +## retention The function takes as arguments a set of conditions from 1 to 32 arguments of type `UInt8` that indicate whether a certain condition was met for the event. Any condition can be specified as an argument (as in [WHERE](../../sql-reference/statements/select/where.md#select-where)). @@ -484,7 +484,7 @@ Where: - `r2`- the number of unique visitors who visited the site during a specific time period between 2020-01-01 and 2020-01-02 (`cond1` and `cond2` conditions). - `r3`- the number of unique visitors who visited the site during a specific time period between 2020-01-01 and 2020-01-03 (`cond1` and `cond3` conditions). -## uniqUpTo(N)(x) {#uniquptonx} +## uniqUpTo(N)(x) Calculates the number of different argument values ​​if it is less than or equal to N. If the number of different argument values is greater than N, it returns N + 1. @@ -505,11 +505,11 @@ Solution: Write in the GROUP BY query SearchPhrase HAVING uniqUpTo(4)(UserID) >= ``` -## sumMapFiltered(keys_to_keep)(keys, values) {#summapfilteredkeys-to-keepkeys-values} +## sumMapFiltered(keys_to_keep)(keys, values) Same behavior as [sumMap](../../sql-reference/aggregate-functions/reference/summap.md#agg_functions-summap) except that an array of keys is passed as a parameter. This can be especially useful when working with a high cardinality of keys. -## sequenceNextNode {#sequenceNextNode} +## sequenceNextNode Returns a value of the next event that matched an event chain. diff --git a/docs/en/sql-reference/aggregate-functions/reference/any.md b/docs/en/sql-reference/aggregate-functions/reference/any.md index 3b5539c5b8d..096f1415d11 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/any.md +++ b/docs/en/sql-reference/aggregate-functions/reference/any.md @@ -2,7 +2,7 @@ sidebar_position: 6 --- -# any {#agg_function-any} +# any Selects the first encountered value. The query can be executed in any order and even in a different order each time, so the result of this function is indeterminate. diff --git a/docs/en/sql-reference/aggregate-functions/reference/anyheavy.md b/docs/en/sql-reference/aggregate-functions/reference/anyheavy.md index 491754453e3..ab19b145ddc 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/anyheavy.md +++ b/docs/en/sql-reference/aggregate-functions/reference/anyheavy.md @@ -2,7 +2,7 @@ sidebar_position: 103 --- -# anyHeavy {#anyheavyx} +# anyHeavy Selects a frequently occurring value using the [heavy hitters](http://www.cs.umd.edu/~samir/498/karp.pdf) algorithm. If there is a value that occurs more than in half the cases in each of the query’s execution threads, this value is returned. Normally, the result is nondeterministic. diff --git a/docs/en/sql-reference/aggregate-functions/reference/anylast.md b/docs/en/sql-reference/aggregate-functions/reference/anylast.md index 2a01a587f70..06d7603853f 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/anylast.md +++ b/docs/en/sql-reference/aggregate-functions/reference/anylast.md @@ -2,7 +2,7 @@ sidebar_position: 104 --- -## anyLast {#anylastx} +## anyLast Selects the last value encountered. The result is just as indeterminate as for the [any](../../../sql-reference/aggregate-functions/reference/any.md) function. diff --git a/docs/en/sql-reference/aggregate-functions/reference/argmax.md b/docs/en/sql-reference/aggregate-functions/reference/argmax.md index f09bcd0bba2..42da27e320f 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/argmax.md +++ b/docs/en/sql-reference/aggregate-functions/reference/argmax.md @@ -2,7 +2,7 @@ sidebar_position: 106 --- -# argMax {#agg-function-argmax} +# argMax Calculates the `arg` value for a maximum `val` value. If there are several different values of `arg` for maximum values of `val`, returns the first of these values encountered. diff --git a/docs/en/sql-reference/aggregate-functions/reference/argmin.md b/docs/en/sql-reference/aggregate-functions/reference/argmin.md index 926fda5a512..2d3ad337d3e 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/argmin.md +++ b/docs/en/sql-reference/aggregate-functions/reference/argmin.md @@ -2,7 +2,7 @@ sidebar_position: 105 --- -# argMin {#agg-function-argmin} +# argMin Calculates the `arg` value for a minimum `val` value. If there are several different values of `arg` for minimum values of `val`, returns the first of these values encountered. diff --git a/docs/en/sql-reference/aggregate-functions/reference/avg.md b/docs/en/sql-reference/aggregate-functions/reference/avg.md index b7b5e9fbed4..63702d56111 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/avg.md +++ b/docs/en/sql-reference/aggregate-functions/reference/avg.md @@ -2,7 +2,7 @@ sidebar_position: 5 --- -# avg {#agg_function-avg} +# avg Calculates the arithmetic mean. diff --git a/docs/en/sql-reference/aggregate-functions/reference/avgweighted.md b/docs/en/sql-reference/aggregate-functions/reference/avgweighted.md index 126c0c2f1d7..dbcfd3e3071 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/avgweighted.md +++ b/docs/en/sql-reference/aggregate-functions/reference/avgweighted.md @@ -2,7 +2,7 @@ sidebar_position: 107 --- -# avgWeighted {#avgweighted} +# avgWeighted Calculates the [weighted arithmetic mean](https://en.wikipedia.org/wiki/Weighted_arithmetic_mean). diff --git a/docs/en/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md b/docs/en/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md index e836dbe868a..e497d72b519 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md +++ b/docs/en/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md @@ -2,7 +2,7 @@ sidebar_position: 250 --- -# categoricalInformationValue {#categoricalinformationvalue} +# categoricalInformationValue Calculates the value of `(P(tag = 1) - P(tag = 0))(log(P(tag = 1)) - log(P(tag = 0)))` for each category. diff --git a/docs/en/sql-reference/aggregate-functions/reference/corr.md b/docs/en/sql-reference/aggregate-functions/reference/corr.md index c6d7fd5baed..df491968309 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/corr.md +++ b/docs/en/sql-reference/aggregate-functions/reference/corr.md @@ -2,12 +2,12 @@ sidebar_position: 107 --- -# corr {#corrx-y} +# corr Syntax: `corr(x, y)` Calculates the Pearson correlation coefficient: `Σ((x - x̅)(y - y̅)) / sqrt(Σ((x - x̅)^2) * Σ((y - y̅)^2))`. -:::note +:::note This function uses a numerically unstable algorithm. If you need [numerical stability](https://en.wikipedia.org/wiki/Numerical_stability) in calculations, use the `corrStable` function. It works slower but provides a lower computational error. ::: \ No newline at end of file diff --git a/docs/en/sql-reference/aggregate-functions/reference/count.md b/docs/en/sql-reference/aggregate-functions/reference/count.md index 8df4aef9d03..1d6080f683d 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/count.md +++ b/docs/en/sql-reference/aggregate-functions/reference/count.md @@ -2,7 +2,7 @@ sidebar_position: 1 --- -# count {#agg_function-count} +# count Counts the number of rows or not-NULL values. diff --git a/docs/en/sql-reference/aggregate-functions/reference/covarpop.md b/docs/en/sql-reference/aggregate-functions/reference/covarpop.md index 363a98c3f16..7858d53bbbd 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/covarpop.md +++ b/docs/en/sql-reference/aggregate-functions/reference/covarpop.md @@ -2,12 +2,12 @@ sidebar_position: 36 --- -# covarPop {#covarpop} +# covarPop Syntax: `covarPop(x, y)` Calculates the value of `Σ((x - x̅)(y - y̅)) / n`. -:::note +:::note This function uses a numerically unstable algorithm. If you need [numerical stability](https://en.wikipedia.org/wiki/Numerical_stability) in calculations, use the `covarPopStable` function. It works slower but provides a lower computational error. ::: \ No newline at end of file diff --git a/docs/en/sql-reference/aggregate-functions/reference/covarsamp.md b/docs/en/sql-reference/aggregate-functions/reference/covarsamp.md index 977b3f3b5b4..fb25aaa9602 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/covarsamp.md +++ b/docs/en/sql-reference/aggregate-functions/reference/covarsamp.md @@ -2,12 +2,12 @@ sidebar_position: 37 --- -# covarSamp {#covarsamp} +# covarSamp Calculates the value of `Σ((x - x̅)(y - y̅)) / (n - 1)`. Returns Float64. When `n <= 1`, returns +∞. -:::note +:::note This function uses a numerically unstable algorithm. If you need [numerical stability](https://en.wikipedia.org/wiki/Numerical_stability) in calculations, use the `covarSampStable` function. It works slower but provides a lower computational error. ::: \ No newline at end of file diff --git a/docs/en/sql-reference/aggregate-functions/reference/deltasum.md b/docs/en/sql-reference/aggregate-functions/reference/deltasum.md index ac35938e26d..d1318c6d830 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/deltasum.md +++ b/docs/en/sql-reference/aggregate-functions/reference/deltasum.md @@ -2,11 +2,11 @@ sidebar_position: 141 --- -# deltaSum {#agg_functions-deltasum} +# deltaSum Sums the arithmetic difference between consecutive rows. If the difference is negative, it is ignored. -:::note +:::note The underlying data must be sorted for this function to work properly. If you would like to use this function in a [materialized view](../../../sql-reference/statements/create/view.md#materialized), you most likely want to use the [deltaSumTimestamp](../../../sql-reference/aggregate-functions/reference/deltasumtimestamp.md#agg_functions-deltasumtimestamp) method instead. ::: @@ -68,6 +68,6 @@ Result: └─────────────────────────────────────┘ ``` -## See Also {#see-also} +## See Also - [runningDifference](../../functions/other-functions.md#other_functions-runningdifference) diff --git a/docs/en/sql-reference/aggregate-functions/reference/deltasumtimestamp.md b/docs/en/sql-reference/aggregate-functions/reference/deltasumtimestamp.md index e1024e58328..f8283178c6e 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/deltasumtimestamp.md +++ b/docs/en/sql-reference/aggregate-functions/reference/deltasumtimestamp.md @@ -2,7 +2,7 @@ sidebar_position: 141 --- -# deltaSumTimestamp {#agg_functions-deltasumtimestamp} +# deltaSumTimestamp Adds the difference between consecutive rows. If the difference is negative, it is ignored. diff --git a/docs/en/sql-reference/aggregate-functions/reference/entropy.md b/docs/en/sql-reference/aggregate-functions/reference/entropy.md index 9f1576c3ed8..b563dbb1b79 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/entropy.md +++ b/docs/en/sql-reference/aggregate-functions/reference/entropy.md @@ -2,7 +2,7 @@ sidebar_position: 302 --- -# entropy {#entropy} +# entropy Calculates [Shannon entropy](https://en.wikipedia.org/wiki/Entropy_(information_theory)) of a column of values. diff --git a/docs/en/sql-reference/aggregate-functions/reference/exponentialmovingaverage.md b/docs/en/sql-reference/aggregate-functions/reference/exponentialmovingaverage.md index 2337a0c8dab..7db8f2b6ef2 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/exponentialmovingaverage.md +++ b/docs/en/sql-reference/aggregate-functions/reference/exponentialmovingaverage.md @@ -2,9 +2,9 @@ sidebar_position: 108 --- -## exponentialMovingAverage {#exponential-moving-average} +## exponentialMovingAverage -Сalculates the exponential moving average of values for the determined time. +Сalculates the exponential moving average of values for the determined time. **Syntax** @@ -58,7 +58,7 @@ Input table: └──────────────┴────────────┘ ``` -Query: +Query: ```sql SELECT exponentialMovingAverage(5)(temperature, timestamp); @@ -72,7 +72,7 @@ Result: └───────────────────────────────────────────────────────┘ ``` -Query: +Query: ```sql SELECT diff --git a/docs/en/sql-reference/aggregate-functions/reference/grouparray.md b/docs/en/sql-reference/aggregate-functions/reference/grouparray.md index 072e460da26..f1d9e60f778 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/grouparray.md +++ b/docs/en/sql-reference/aggregate-functions/reference/grouparray.md @@ -2,7 +2,7 @@ sidebar_position: 110 --- -# groupArray {#agg_function-grouparray} +# groupArray Syntax: `groupArray(x)` or `groupArray(max_size)(x)` diff --git a/docs/en/sql-reference/aggregate-functions/reference/grouparrayinsertat.md b/docs/en/sql-reference/aggregate-functions/reference/grouparrayinsertat.md index 0699326725e..b5b3656860d 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/grouparrayinsertat.md +++ b/docs/en/sql-reference/aggregate-functions/reference/grouparrayinsertat.md @@ -2,7 +2,7 @@ sidebar_position: 112 --- -# groupArrayInsertAt {#grouparrayinsertat} +# groupArrayInsertAt Inserts a value into the array at the specified position. diff --git a/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingavg.md b/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingavg.md index dc3cc74721e..5c58c314577 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingavg.md +++ b/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingavg.md @@ -2,7 +2,7 @@ sidebar_position: 114 --- -# groupArrayMovingAvg {#agg_function-grouparraymovingavg} +# groupArrayMovingAvg Calculates the moving average of input values. diff --git a/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingsum.md b/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingsum.md index 563280b7dec..93dc4e5da47 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingsum.md +++ b/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingsum.md @@ -2,7 +2,7 @@ sidebar_position: 113 --- -# groupArrayMovingSum {#agg_function-grouparraymovingsum} +# groupArrayMovingSum Calculates the moving sum of input values. diff --git a/docs/en/sql-reference/aggregate-functions/reference/grouparraysample.md b/docs/en/sql-reference/aggregate-functions/reference/grouparraysample.md index f0406ddc93c..ca54d49d827 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/grouparraysample.md +++ b/docs/en/sql-reference/aggregate-functions/reference/grouparraysample.md @@ -2,7 +2,7 @@ sidebar_position: 114 --- -# groupArraySample {#grouparraysample} +# groupArraySample Creates an array of sample argument values. The size of the resulting array is limited to `max_size` elements. Argument values are selected and added to the array randomly. diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupbitand.md b/docs/en/sql-reference/aggregate-functions/reference/groupbitand.md index 0ebb9aec495..f3b89d530af 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/groupbitand.md +++ b/docs/en/sql-reference/aggregate-functions/reference/groupbitand.md @@ -2,7 +2,7 @@ sidebar_position: 125 --- -# groupBitAnd {#groupbitand} +# groupBitAnd Applies bitwise `AND` for series of numbers. diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupbitmap.md b/docs/en/sql-reference/aggregate-functions/reference/groupbitmap.md index 7f1fee6a9f0..39373c59aba 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/groupbitmap.md +++ b/docs/en/sql-reference/aggregate-functions/reference/groupbitmap.md @@ -2,7 +2,7 @@ sidebar_position: 128 --- -# groupBitmap {#groupbitmap} +# groupBitmap Bitmap or Aggregate calculations from a unsigned integer column, return cardinality of type UInt64, if add suffix -State, then return [bitmap object](../../../sql-reference/functions/bitmap-functions.md). diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupbitmapand.md b/docs/en/sql-reference/aggregate-functions/reference/groupbitmapand.md index 89c94547f8b..377b81c4ebf 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/groupbitmapand.md +++ b/docs/en/sql-reference/aggregate-functions/reference/groupbitmapand.md @@ -2,7 +2,7 @@ sidebar_position: 129 --- -# groupBitmapAnd {#groupbitmapand} +# groupBitmapAnd Calculations the AND of a bitmap column, return cardinality of type UInt64, if add suffix -State, then return [bitmap object](../../../sql-reference/functions/bitmap-functions.md). diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupbitmapor.md b/docs/en/sql-reference/aggregate-functions/reference/groupbitmapor.md index 172a3bb29ac..7e3973a00f0 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/groupbitmapor.md +++ b/docs/en/sql-reference/aggregate-functions/reference/groupbitmapor.md @@ -2,7 +2,7 @@ sidebar_position: 130 --- -# groupBitmapOr {#groupbitmapor} +# groupBitmapOr Calculations the OR of a bitmap column, return cardinality of type UInt64, if add suffix -State, then return [bitmap object](../../../sql-reference/functions/bitmap-functions.md). This is equivalent to `groupBitmapMerge`. diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupbitmapxor.md b/docs/en/sql-reference/aggregate-functions/reference/groupbitmapxor.md index 52c45815cc5..13548665c98 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/groupbitmapxor.md +++ b/docs/en/sql-reference/aggregate-functions/reference/groupbitmapxor.md @@ -2,7 +2,7 @@ sidebar_position: 131 --- -# groupBitmapXor {#groupbitmapxor} +# groupBitmapXor Calculations the XOR of a bitmap column, return cardinality of type UInt64, if add suffix -State, then return [bitmap object](../../../sql-reference/functions/bitmap-functions.md). diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupbitor.md b/docs/en/sql-reference/aggregate-functions/reference/groupbitor.md index c1ee1c40894..fc3569b3e98 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/groupbitor.md +++ b/docs/en/sql-reference/aggregate-functions/reference/groupbitor.md @@ -2,7 +2,7 @@ sidebar_position: 126 --- -# groupBitOr {#groupbitor} +# groupBitOr Applies bitwise `OR` for series of numbers. diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupbitxor.md b/docs/en/sql-reference/aggregate-functions/reference/groupbitxor.md index 472bcdf65c1..70f080827cc 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/groupbitxor.md +++ b/docs/en/sql-reference/aggregate-functions/reference/groupbitxor.md @@ -2,7 +2,7 @@ sidebar_position: 127 --- -# groupBitXor {#groupbitxor} +# groupBitXor Applies bitwise `XOR` for series of numbers. diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupuniqarray.md b/docs/en/sql-reference/aggregate-functions/reference/groupuniqarray.md index 9b5058032e5..65edbbdf3e9 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/groupuniqarray.md +++ b/docs/en/sql-reference/aggregate-functions/reference/groupuniqarray.md @@ -2,7 +2,7 @@ sidebar_position: 111 --- -# groupUniqArray {#groupuniqarray} +# groupUniqArray Syntax: `groupUniqArray(x)` or `groupUniqArray(max_size)(x)` diff --git a/docs/en/sql-reference/aggregate-functions/reference/index.md b/docs/en/sql-reference/aggregate-functions/reference/index.md index cd71bca2556..4854a19f475 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/index.md +++ b/docs/en/sql-reference/aggregate-functions/reference/index.md @@ -4,7 +4,7 @@ sidebar_position: 36 toc_hidden: true --- -# List of Aggregate Functions {#aggregate-functions-reference} +# List of Aggregate Functions Standard aggregate functions: diff --git a/docs/en/sql-reference/aggregate-functions/reference/intervalLengthSum.md b/docs/en/sql-reference/aggregate-functions/reference/intervalLengthSum.md index 33c5686cbbc..54469f3b56d 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/intervalLengthSum.md +++ b/docs/en/sql-reference/aggregate-functions/reference/intervalLengthSum.md @@ -3,7 +3,7 @@ sidebar_position: 146 sidebar_label: intervalLengthSum --- -# intervalLengthSum {#agg_function-intervallengthsum} +# intervalLengthSum Calculates the total length of union of all ranges (segments on numeric axis). @@ -18,7 +18,7 @@ intervalLengthSum(start, end) - `start` — The starting value of the interval. [Int32](../../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-int8-int16-int32-int64), [Int64](../../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-int8-int16-int32-int64), [UInt32](../../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-int8-int16-int32-int64), [UInt64](../../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-int8-int16-int32-int64), [Float32](../../../sql-reference/data-types/float.md#float32-float64), [Float64](../../../sql-reference/data-types/float.md#float32-float64), [DateTime](../../../sql-reference/data-types/datetime.md#data_type-datetime) or [Date](../../../sql-reference/data-types/date.md#data_type-date). - `end` — The ending value of the interval. [Int32](../../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-int8-int16-int32-int64), [Int64](../../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-int8-int16-int32-int64), [UInt32](../../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-int8-int16-int32-int64), [UInt64](../../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-int8-int16-int32-int64), [Float32](../../../sql-reference/data-types/float.md#float32-float64), [Float64](../../../sql-reference/data-types/float.md#float32-float64), [DateTime](../../../sql-reference/data-types/datetime.md#data_type-datetime) or [Date](../../../sql-reference/data-types/date.md#data_type-date). -:::note +:::note Arguments must be of the same data type. Otherwise, an exception will be thrown. ::: diff --git a/docs/en/sql-reference/aggregate-functions/reference/kurtpop.md b/docs/en/sql-reference/aggregate-functions/reference/kurtpop.md index 5640e69ba7c..c21e780991c 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/kurtpop.md +++ b/docs/en/sql-reference/aggregate-functions/reference/kurtpop.md @@ -2,7 +2,7 @@ sidebar_position: 153 --- -# kurtPop {#kurtpop} +# kurtPop Computes the [kurtosis](https://en.wikipedia.org/wiki/Kurtosis) of a sequence. diff --git a/docs/en/sql-reference/aggregate-functions/reference/kurtsamp.md b/docs/en/sql-reference/aggregate-functions/reference/kurtsamp.md index c0768edaf2d..601eebd6d9c 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/kurtsamp.md +++ b/docs/en/sql-reference/aggregate-functions/reference/kurtsamp.md @@ -2,7 +2,7 @@ sidebar_position: 154 --- -# kurtSamp {#kurtsamp} +# kurtSamp Computes the [sample kurtosis](https://en.wikipedia.org/wiki/Kurtosis) of a sequence. diff --git a/docs/en/sql-reference/aggregate-functions/reference/mannwhitneyutest.md b/docs/en/sql-reference/aggregate-functions/reference/mannwhitneyutest.md index 32e56b8de10..a9661fea8f9 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/mannwhitneyutest.md +++ b/docs/en/sql-reference/aggregate-functions/reference/mannwhitneyutest.md @@ -3,7 +3,7 @@ sidebar_position: 310 sidebar_label: mannWhitneyUTest --- -# mannWhitneyUTest {#mannwhitneyutest} +# mannWhitneyUTest Applies the Mann-Whitney rank test to samples from two populations. diff --git a/docs/en/sql-reference/aggregate-functions/reference/max.md b/docs/en/sql-reference/aggregate-functions/reference/max.md index 845d0c5ecee..481e8a4a21b 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/max.md +++ b/docs/en/sql-reference/aggregate-functions/reference/max.md @@ -2,7 +2,7 @@ sidebar_position: 3 --- -# max {#agg_function-max} +# max Aggregate function that calculates the maximum across a group of values. diff --git a/docs/en/sql-reference/aggregate-functions/reference/maxmap.md b/docs/en/sql-reference/aggregate-functions/reference/maxmap.md index 243a3375552..6f53d9f0ae0 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/maxmap.md +++ b/docs/en/sql-reference/aggregate-functions/reference/maxmap.md @@ -2,7 +2,7 @@ sidebar_position: 143 --- -# maxMap {#agg_functions-maxmap} +# maxMap Syntax: `maxMap(key, value)` or `maxMap(Tuple(key, value))` diff --git a/docs/en/sql-reference/aggregate-functions/reference/meanztest.md b/docs/en/sql-reference/aggregate-functions/reference/meanztest.md index 02b89b1b31d..d129e5722bc 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/meanztest.md +++ b/docs/en/sql-reference/aggregate-functions/reference/meanztest.md @@ -3,7 +3,7 @@ sidebar_position: 303 sidebar_label: meanZTest --- -# meanZTest {#meanztest} +# meanZTest Applies mean z-test to samples from two populations. diff --git a/docs/en/sql-reference/aggregate-functions/reference/median.md b/docs/en/sql-reference/aggregate-functions/reference/median.md index 3e84b4b169c..1c798f7bbf9 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/median.md +++ b/docs/en/sql-reference/aggregate-functions/reference/median.md @@ -2,7 +2,7 @@ sidebar_position: 212 --- -# median {#median} +# median The `median*` functions are the aliases for the corresponding `quantile*` functions. They calculate median of a numeric data sample. diff --git a/docs/en/sql-reference/aggregate-functions/reference/min.md b/docs/en/sql-reference/aggregate-functions/reference/min.md index 0525066e9f3..7252494f5ca 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/min.md +++ b/docs/en/sql-reference/aggregate-functions/reference/min.md @@ -2,7 +2,7 @@ sidebar_position: 2 --- -## min {#agg_function-min} +## min Aggregate function that calculates the minimum across a group of values. diff --git a/docs/en/sql-reference/aggregate-functions/reference/minmap.md b/docs/en/sql-reference/aggregate-functions/reference/minmap.md index 8a4d50dd46c..61c7f4358b6 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/minmap.md +++ b/docs/en/sql-reference/aggregate-functions/reference/minmap.md @@ -2,7 +2,7 @@ sidebar_position: 142 --- -# minMap {#agg_functions-minmap} +# minMap Syntax: `minMap(key, value)` or `minMap(Tuple(key, value))` diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantile.md b/docs/en/sql-reference/aggregate-functions/reference/quantile.md index 6a0479da77f..99346a50b33 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantile.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantile.md @@ -2,7 +2,7 @@ sidebar_position: 200 --- -# quantile {#quantile} +# quantile Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md b/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md index f0bd51f0add..01fd1ea8fb2 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md @@ -2,7 +2,7 @@ sidebar_position: 209 --- -# quantileBFloat16 {#quantilebfloat16} +# quantileBFloat16 Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a sample consisting of [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format) numbers. `bfloat16` is a floating-point data type with 1 sign bit, 8 exponent bits and 7 fraction bits. The function converts input values to 32-bit floats and takes the most significant 16 bits. Then it calculates `bfloat16` quantile value and converts the result to a 64-bit float by appending zero bits. @@ -58,7 +58,7 @@ Result: ``` Note that all floating point values in the example are truncated to 1.0 when converting to `bfloat16`. -# quantileBFloat16Weighted {#quantilebfloat16weighted} +# quantileBFloat16Weighted Like `quantileBFloat16` but takes into account the weight of each sequence member. diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiledeterministic.md b/docs/en/sql-reference/aggregate-functions/reference/quantiledeterministic.md index bb23ce63cea..50e0f089b72 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiledeterministic.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiledeterministic.md @@ -2,7 +2,7 @@ sidebar_position: 206 --- -# quantileDeterministic {#quantiledeterministic} +# quantileDeterministic Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md b/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md index b3a384b0cfd..ba2fe5c1eaa 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md @@ -2,9 +2,9 @@ sidebar_position: 202 --- -# quantileExact Functions {#quantileexact-functions} +# quantileExact Functions -## quantileExact {#quantileexact} +## quantileExact Exactly computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. @@ -51,7 +51,7 @@ Result: └───────────────────────┘ ``` -## quantileExactLow {#quantileexactlow} +## quantileExactLow Similar to `quantileExact`, this computes the exact [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. @@ -109,7 +109,7 @@ Result: │ 4 │ └──────────────────────────┘ ``` -## quantileExactHigh {#quantileexacthigh} +## quantileExactHigh Similar to `quantileExact`, this computes the exact [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. @@ -160,7 +160,7 @@ Result: └───────────────────────────┘ ``` -## quantileExactExclusive {#quantileexactexclusive} +## quantileExactExclusive Exactly computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. @@ -212,7 +212,7 @@ Result: └────────────────────────────────┘ ``` -## quantileExactInclusive {#quantileexactinclusive} +## quantileExactInclusive Exactly computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantileexactweighted.md b/docs/en/sql-reference/aggregate-functions/reference/quantileexactweighted.md index 4740d4a26f8..593ad3a0e4c 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantileexactweighted.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantileexactweighted.md @@ -2,7 +2,7 @@ sidebar_position: 203 --- -# quantileExactWeighted {#quantileexactweighted} +# quantileExactWeighted Exactly computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence, taking into account the weight of each element. diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md index 6d0cf37f25e..a38d3cb141e 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md @@ -2,15 +2,15 @@ sidebar_position: 201 --- -# quantiles Functions {#quantiles-functions} +# quantiles Functions -## quantiles {#quantiles} +## quantiles Syntax: `quantiles(level1, level2, …)(x)` All the quantile functions also have corresponding quantiles functions: `quantiles`, `quantilesDeterministic`, `quantilesTiming`, `quantilesTimingWeighted`, `quantilesExact`, `quantilesExactWeighted`, `quantilesTDigest`, `quantilesBFloat16`. These functions calculate all the quantiles of the listed levels in one pass, and return an array of the resulting values. -## quantilesExactExclusive {#quantilesexactexclusive} +## quantilesExactExclusive Exactly computes the [quantiles](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. @@ -62,7 +62,7 @@ Result: └─────────────────────────────────────────────────────────────────────┘ ``` -## quantilesExactInclusive {#quantilesexactinclusive} +## quantilesExactInclusive Exactly computes the [quantiles](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiletdigest.md b/docs/en/sql-reference/aggregate-functions/reference/quantiletdigest.md index f42c88b2aca..be06e562334 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiletdigest.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiletdigest.md @@ -2,7 +2,7 @@ sidebar_position: 207 --- -# quantileTDigest {#quantiletdigest} +# quantileTDigest Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence using the [t-digest](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) algorithm. diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md b/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md index 684e438f0c7..afde202dd15 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md @@ -2,7 +2,7 @@ sidebar_position: 208 --- -# quantileTDigestWeighted {#quantiletdigestweighted} +# quantileTDigestWeighted Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence using the [t-digest](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) algorithm. The function takes into account the weight of each sequence member. The maximum error is 1%. Memory consumption is `log(n)`, where `n` is a number of values. diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiletiming.md b/docs/en/sql-reference/aggregate-functions/reference/quantiletiming.md index f282f7e2004..8bcdbbc23aa 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiletiming.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiletiming.md @@ -2,7 +2,7 @@ sidebar_position: 204 --- -# quantileTiming {#quantiletiming} +# quantileTiming With the determined precision computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiletimingweighted.md b/docs/en/sql-reference/aggregate-functions/reference/quantiletimingweighted.md index c773f900764..4b56423c7d9 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiletimingweighted.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiletimingweighted.md @@ -2,7 +2,7 @@ sidebar_position: 205 --- -# quantileTimingWeighted {#quantiletimingweighted} +# quantileTimingWeighted With the determined precision computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence according to the weight of each sequence member. @@ -81,7 +81,7 @@ Result: └───────────────────────────────────────────────┘ ``` -# quantilesTimingWeighted {#quantilestimingweighted} +# quantilesTimingWeighted Same as `quantileTimingWeighted`, but accept multiple parameters with quantile levels and return an Array filled with many values of that quantiles. diff --git a/docs/en/sql-reference/aggregate-functions/reference/rankCorr.md b/docs/en/sql-reference/aggregate-functions/reference/rankCorr.md index 399fd88cf0e..3b8477340b6 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/rankCorr.md +++ b/docs/en/sql-reference/aggregate-functions/reference/rankCorr.md @@ -2,7 +2,7 @@ sidebar_position: 145 --- -# rankCorr {#agg_function-rankcorr} +# rankCorr Computes a rank correlation coefficient. diff --git a/docs/en/sql-reference/aggregate-functions/reference/simplelinearregression.md b/docs/en/sql-reference/aggregate-functions/reference/simplelinearregression.md index 8684cd4c3bb..b6f7a94acad 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/simplelinearregression.md +++ b/docs/en/sql-reference/aggregate-functions/reference/simplelinearregression.md @@ -2,7 +2,7 @@ sidebar_position: 220 --- -# simpleLinearRegression {#simplelinearregression} +# simpleLinearRegression Performs simple (unidimensional) linear regression. diff --git a/docs/en/sql-reference/aggregate-functions/reference/skewpop.md b/docs/en/sql-reference/aggregate-functions/reference/skewpop.md index 4cb3d58304f..87fa7e136f1 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/skewpop.md +++ b/docs/en/sql-reference/aggregate-functions/reference/skewpop.md @@ -2,7 +2,7 @@ sidebar_position: 150 --- -# skewPop {#skewpop} +# skewPop Computes the [skewness](https://en.wikipedia.org/wiki/Skewness) of a sequence. diff --git a/docs/en/sql-reference/aggregate-functions/reference/skewsamp.md b/docs/en/sql-reference/aggregate-functions/reference/skewsamp.md index 92e807d2d7d..cec74896deb 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/skewsamp.md +++ b/docs/en/sql-reference/aggregate-functions/reference/skewsamp.md @@ -2,7 +2,7 @@ sidebar_position: 151 --- -# skewSamp {#skewsamp} +# skewSamp Computes the [sample skewness](https://en.wikipedia.org/wiki/Skewness) of a sequence. diff --git a/docs/en/sql-reference/aggregate-functions/reference/sparkbar.md b/docs/en/sql-reference/aggregate-functions/reference/sparkbar.md index ebb9cccbd40..16b3e2ddba0 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/sparkbar.md +++ b/docs/en/sql-reference/aggregate-functions/reference/sparkbar.md @@ -3,7 +3,7 @@ sidebar_position: 311 sidebar_label: sparkbar --- -# sparkbar {#sparkbar} +# sparkbar The function plots a frequency histogram for values `x` and the repetition rate `y` of these values over the interval `[min_x, max_x]`. diff --git a/docs/en/sql-reference/aggregate-functions/reference/stddevpop.md b/docs/en/sql-reference/aggregate-functions/reference/stddevpop.md index 2b22320ae7a..015c0871dda 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/stddevpop.md +++ b/docs/en/sql-reference/aggregate-functions/reference/stddevpop.md @@ -2,7 +2,7 @@ sidebar_position: 30 --- -# stddevPop {#stddevpop} +# stddevPop The result is equal to the square root of [varPop](../../../sql-reference/aggregate-functions/reference/varpop.md). diff --git a/docs/en/sql-reference/aggregate-functions/reference/stddevsamp.md b/docs/en/sql-reference/aggregate-functions/reference/stddevsamp.md index 3dcee821606..50dfa10a0d7 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/stddevsamp.md +++ b/docs/en/sql-reference/aggregate-functions/reference/stddevsamp.md @@ -2,7 +2,7 @@ sidebar_position: 31 --- -# stddevSamp {#stddevsamp} +# stddevSamp The result is equal to the square root of [varSamp](../../../sql-reference/aggregate-functions/reference/varsamp.md). diff --git a/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression.md b/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression.md index e171629e90d..f4a79fd588b 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression.md +++ b/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression.md @@ -2,11 +2,11 @@ sidebar_position: 221 --- -# stochasticLinearRegression {#agg_functions-stochasticlinearregression} +# stochasticLinearRegression This function implements stochastic linear regression. It supports custom parameters for learning rate, L2 regularization coefficient, mini-batch size and has few methods for updating weights ([Adam](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam) (used by default), [simple SGD](https://en.wikipedia.org/wiki/Stochastic_gradient_descent), [Momentum](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Momentum), [Nesterov](https://mipt.ru/upload/medialibrary/d7e/41-91.pdf)). -### Parameters {#agg_functions-stochasticlinearregression-parameters} +### Parameters There are 4 customizable parameters. They are passed to the function sequentially, but there is no need to pass all four - default values will be used, however good model required some parameter tuning. @@ -19,7 +19,7 @@ stochasticLinearRegression(1.0, 1.0, 10, 'SGD') 3. `mini-batch size` sets the number of elements, which gradients will be computed and summed to perform one step of gradient descent. Pure stochastic descent uses one element, however having small batches(about 10 elements) make gradient steps more stable. Default is `15`. 4. `method for updating weights`, they are: `Adam` (by default), `SGD`, `Momentum`, `Nesterov`. `Momentum` and `Nesterov` require little bit more computations and memory, however they happen to be useful in terms of speed of convergance and stability of stochastic gradient methods. -### Usage {#agg_functions-stochasticlinearregression-usage} +### Usage `stochasticLinearRegression` is used in two steps: fitting the model and predicting on new data. In order to fit the model and save its state for later usage we use `-State` combinator, which basically saves the state (model weights, etc). To predict we use function [evalMLMethod](../../../sql-reference/functions/machine-learning-functions.md#machine_learning_methods-evalmlmethod), which takes a state as an argument as well as features to predict on. @@ -59,7 +59,7 @@ The query will return a column of predicted values. Note that first argument of `test_data` is a table like `train_data` but may not contain target value. -### Notes {#agg_functions-stochasticlinearregression-notes} +### Notes 1. To merge two models user may create such query: `sql SELECT state1 + state2 FROM your_models` diff --git a/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md b/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md index a7d4c640126..ea1cff0ddf8 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md +++ b/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md @@ -2,11 +2,11 @@ sidebar_position: 222 --- -# stochasticLogisticRegression {#agg_functions-stochasticlogisticregression} +# stochasticLogisticRegression This function implements stochastic logistic regression. It can be used for binary classification problem, supports the same custom parameters as stochasticLinearRegression and works the same way. -### Parameters {#agg_functions-stochasticlogisticregression-parameters} +### Parameters Parameters are exactly the same as in stochasticLinearRegression: `learning rate`, `l2 regularization coefficient`, `mini-batch size`, `method for updating weights`. diff --git a/docs/en/sql-reference/aggregate-functions/reference/studentttest.md b/docs/en/sql-reference/aggregate-functions/reference/studentttest.md index 86207a35c04..b4a86d15597 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/studentttest.md +++ b/docs/en/sql-reference/aggregate-functions/reference/studentttest.md @@ -3,7 +3,7 @@ sidebar_position: 300 sidebar_label: studentTTest --- -# studentTTest {#studentttest} +# studentTTest Applies Student's t-test to samples from two populations. diff --git a/docs/en/sql-reference/aggregate-functions/reference/sum.md b/docs/en/sql-reference/aggregate-functions/reference/sum.md index b72cb84e74f..527dc15d5a7 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/sum.md +++ b/docs/en/sql-reference/aggregate-functions/reference/sum.md @@ -2,6 +2,6 @@ sidebar_position: 4 --- -# sum {#agg_function-sum} +# sum Calculates the sum. Only works for numbers. diff --git a/docs/en/sql-reference/aggregate-functions/reference/sumcount.md b/docs/en/sql-reference/aggregate-functions/reference/sumcount.md index dbc0601241e..c4a302b076a 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/sumcount.md +++ b/docs/en/sql-reference/aggregate-functions/reference/sumcount.md @@ -2,7 +2,7 @@ sidebar_position: 144 --- -# sumCount {#agg_function-sumCount} +# sumCount Calculates the sum of the numbers and counts the number of rows at the same time. The function is used by ClickHouse query optimizer: if there are multiple `sum`, `count` or `avg` functions in a query, they can be replaced to single `sumCount` function to reuse the calculations. The function is rarely needed to use explicitly. diff --git a/docs/en/sql-reference/aggregate-functions/reference/sumkahan.md b/docs/en/sql-reference/aggregate-functions/reference/sumkahan.md index 8c96464dfd5..cbcb3362b64 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/sumkahan.md +++ b/docs/en/sql-reference/aggregate-functions/reference/sumkahan.md @@ -2,7 +2,7 @@ sidebar_position: 145 --- -# sumKahan {#agg_function-sumKahan} +# sumKahan Calculates the sum of the numbers with [Kahan compensated summation algorithm](https://en.wikipedia.org/wiki/Kahan_summation_algorithm) Slower than [sum](./sum.md) function. diff --git a/docs/en/sql-reference/aggregate-functions/reference/summap.md b/docs/en/sql-reference/aggregate-functions/reference/summap.md index 78ce6a9e835..88af347c88f 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/summap.md +++ b/docs/en/sql-reference/aggregate-functions/reference/summap.md @@ -2,7 +2,7 @@ sidebar_position: 141 --- -# sumMap {#agg_functions-summap} +# sumMap Syntax: `sumMap(key, value)` or `sumMap(Tuple(key, value))` diff --git a/docs/en/sql-reference/aggregate-functions/reference/sumwithoverflow.md b/docs/en/sql-reference/aggregate-functions/reference/sumwithoverflow.md index 0582eb5fb7b..9f8b4b4f577 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/sumwithoverflow.md +++ b/docs/en/sql-reference/aggregate-functions/reference/sumwithoverflow.md @@ -2,7 +2,7 @@ sidebar_position: 140 --- -# sumWithOverflow {#sumwithoverflowx} +# sumWithOverflow Computes the sum of the numbers, using the same data type for the result as for the input parameters. If the sum exceeds the maximum value for this data type, it is calculated with overflow. diff --git a/docs/en/sql-reference/aggregate-functions/reference/topk.md b/docs/en/sql-reference/aggregate-functions/reference/topk.md index 19e98262899..d968ca22b16 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/topk.md +++ b/docs/en/sql-reference/aggregate-functions/reference/topk.md @@ -2,7 +2,7 @@ sidebar_position: 108 --- -# topK {#topk} +# topK Returns an array of the approximately most frequent values in the specified column. The resulting array is sorted in descending order of approximate frequency of values (not by the values themselves). diff --git a/docs/en/sql-reference/aggregate-functions/reference/topkweighted.md b/docs/en/sql-reference/aggregate-functions/reference/topkweighted.md index 2d6e86667ef..daa5d05e99f 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/topkweighted.md +++ b/docs/en/sql-reference/aggregate-functions/reference/topkweighted.md @@ -2,7 +2,7 @@ sidebar_position: 109 --- -# topKWeighted {#topkweighted} +# topKWeighted Returns an array of the approximately most frequent values in the specified column. The resulting array is sorted in descending order of approximate frequency of values (not by the values themselves). Additionally, the weight of the value is taken into account. diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniq.md b/docs/en/sql-reference/aggregate-functions/reference/uniq.md index 6e6791702ef..942ad73dfd9 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/uniq.md +++ b/docs/en/sql-reference/aggregate-functions/reference/uniq.md @@ -2,7 +2,7 @@ sidebar_position: 190 --- -# uniq {#agg_function-uniq} +# uniq Calculates the approximate number of different values of the argument. diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniqcombined.md b/docs/en/sql-reference/aggregate-functions/reference/uniqcombined.md index 79357cb14ce..652032eb575 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/uniqcombined.md +++ b/docs/en/sql-reference/aggregate-functions/reference/uniqcombined.md @@ -2,7 +2,7 @@ sidebar_position: 192 --- -# uniqCombined {#agg_function-uniqcombined} +# uniqCombined Calculates the approximate number of different argument values. diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniqcombined64.md b/docs/en/sql-reference/aggregate-functions/reference/uniqcombined64.md index fb0be23c768..d2aa51954fe 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/uniqcombined64.md +++ b/docs/en/sql-reference/aggregate-functions/reference/uniqcombined64.md @@ -2,6 +2,6 @@ sidebar_position: 193 --- -# uniqCombined64 {#agg_function-uniqcombined64} +# uniqCombined64 Same as [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined), but uses 64-bit hash for all data types. diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniqexact.md b/docs/en/sql-reference/aggregate-functions/reference/uniqexact.md index 68e6bc562f9..9b3da4e317a 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/uniqexact.md +++ b/docs/en/sql-reference/aggregate-functions/reference/uniqexact.md @@ -2,7 +2,7 @@ sidebar_position: 191 --- -# uniqExact {#agg_function-uniqexact} +# uniqExact Calculates the exact number of different argument values. diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniqhll12.md b/docs/en/sql-reference/aggregate-functions/reference/uniqhll12.md index 1a13b365560..5514eb692b7 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/uniqhll12.md +++ b/docs/en/sql-reference/aggregate-functions/reference/uniqhll12.md @@ -2,7 +2,7 @@ sidebar_position: 194 --- -# uniqHLL12 {#agg_function-uniqhll12} +# uniqHLL12 Calculates the approximate number of different argument values, using the [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) algorithm. diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniqthetasketch.md b/docs/en/sql-reference/aggregate-functions/reference/uniqthetasketch.md index 9b9c16922b1..ab3661f07d9 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/uniqthetasketch.md +++ b/docs/en/sql-reference/aggregate-functions/reference/uniqthetasketch.md @@ -2,7 +2,7 @@ sidebar_position: 195 --- -# uniqTheta {#agg_function-uniqthetasketch} +# uniqTheta Calculates the approximate number of different argument values, using the [Theta Sketch Framework](https://datasketches.apache.org/docs/Theta/ThetaSketchFramework.html). diff --git a/docs/en/sql-reference/aggregate-functions/reference/varpop.md b/docs/en/sql-reference/aggregate-functions/reference/varpop.md index f16cfcdc63f..ec0d2b51185 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/varpop.md +++ b/docs/en/sql-reference/aggregate-functions/reference/varpop.md @@ -2,7 +2,7 @@ sidebar_position: 32 --- -# varPop(x) {#varpopx} +# varPop(x) Calculates the amount `Σ((x - x̅)^2) / n`, where `n` is the sample size and `x̅`is the average value of `x`. diff --git a/docs/en/sql-reference/aggregate-functions/reference/varsamp.md b/docs/en/sql-reference/aggregate-functions/reference/varsamp.md index b323f78fbd1..844b2006c91 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/varsamp.md +++ b/docs/en/sql-reference/aggregate-functions/reference/varsamp.md @@ -2,7 +2,7 @@ sidebar_position: 33 --- -# varSamp {#varsamp} +# varSamp Calculates the amount `Σ((x - x̅)^2) / (n - 1)`, where `n` is the sample size and `x̅`is the average value of `x`. diff --git a/docs/en/sql-reference/aggregate-functions/reference/welchttest.md b/docs/en/sql-reference/aggregate-functions/reference/welchttest.md index 0aff60e7bbf..82c09ed606e 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/welchttest.md +++ b/docs/en/sql-reference/aggregate-functions/reference/welchttest.md @@ -3,7 +3,7 @@ sidebar_position: 301 sidebar_label: welchTTest --- -# welchTTest {#welchttest} +# welchTTest Applies Welch's t-test to samples from two populations. diff --git a/docs/en/sql-reference/ansi.md b/docs/en/sql-reference/ansi.md index d6473e64872..7b307b5d1be 100644 --- a/docs/en/sql-reference/ansi.md +++ b/docs/en/sql-reference/ansi.md @@ -3,13 +3,13 @@ sidebar_position: 40 sidebar_label: ANSI Compatibility --- -# ANSI SQL Compatibility of ClickHouse SQL Dialect {#ansi-sql-compatibility-of-clickhouse-sql-dialect} +# ANSI SQL Compatibility of ClickHouse SQL Dialect :::note This article relies on Table 38, “Feature taxonomy and definition for mandatory features”, Annex F of [ISO/IEC CD 9075-2:2011](https://www.iso.org/obp/ui/#iso:std:iso-iec:9075:-2:ed-4:v1:en:sec:8). ::: -## Differences in Behaviour {#differences-in-behaviour} +## Differences in Behaviour The following table lists cases when query feature works in ClickHouse, but behaves not as specified in ANSI SQL. @@ -20,7 +20,7 @@ The following table lists cases when query feature works in ClickHouse, but beha | E141-01 | NOT NULL constraints | `NOT NULL` is implied for table columns by default | | E011-04 | Arithmetic operators | ClickHouse overflows instead of checked arithmetic and changes the result data type based on custom rules | -## Feature Status {#feature-status} +## Feature Status | Feature ID | Feature Name | Status | Comment | |------------|--------------------------------------------------------------------------------------------------------------------------|----------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| diff --git a/docs/en/sql-reference/data-types/aggregatefunction.md b/docs/en/sql-reference/data-types/aggregatefunction.md index 6dc89e2864f..6220c6b2d6f 100644 --- a/docs/en/sql-reference/data-types/aggregatefunction.md +++ b/docs/en/sql-reference/data-types/aggregatefunction.md @@ -3,7 +3,7 @@ sidebar_position: 53 sidebar_label: AggregateFunction --- -# AggregateFunction {#data-type-aggregatefunction} +# AggregateFunction Aggregate functions can have an implementation-defined intermediate state that can be serialized to an `AggregateFunction(…)` data type and stored in a table, usually, by means of [a materialized view](../../sql-reference/statements/create/view.md). The common way to produce an aggregate function state is by calling the aggregate function with the `-State` suffix. To get the final result of aggregation in the future, you must use the same aggregate function with the `-Merge`suffix. @@ -28,9 +28,9 @@ CREATE TABLE t [uniq](../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq), anyIf ([any](../../sql-reference/aggregate-functions/reference/any.md#agg_function-any)+[If](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-if)) and [quantiles](../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) are the aggregate functions supported in ClickHouse. -## Usage {#usage} +## Usage -### Data Insertion {#data-insertion} +### Data Insertion To insert data, use `INSERT SELECT` with aggregate `-State`- functions. @@ -45,7 +45,7 @@ In contrast to the corresponding functions `uniq` and `quantiles`, `-State`- fun In the results of `SELECT` query, the values of `AggregateFunction` type have implementation-specific binary representation for all of the ClickHouse output formats. If dump data into, for example, `TabSeparated` format with `SELECT` query, then this dump can be loaded back using `INSERT` query. -### Data Selection {#data-selection} +### Data Selection When selecting data from `AggregatingMergeTree` table, use `GROUP BY` clause and the same aggregate functions as when inserting data, but using `-Merge`suffix. @@ -59,7 +59,7 @@ SELECT uniq(UserID) FROM table SELECT uniqMerge(state) FROM (SELECT uniqState(UserID) AS state FROM table GROUP BY RegionID) ``` -## Usage Example {#usage-example} +## Usage Example See [AggregatingMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md) engine description. diff --git a/docs/en/sql-reference/data-types/array.md b/docs/en/sql-reference/data-types/array.md index 909df86ec2f..c0e9d217479 100644 --- a/docs/en/sql-reference/data-types/array.md +++ b/docs/en/sql-reference/data-types/array.md @@ -3,11 +3,11 @@ sidebar_position: 52 sidebar_label: Array(T) --- -# Array(t) {#data-type-array} +# Array(t) An array of `T`-type items, with the starting array index as 1. `T` can be any data type, including an array. -## Creating an Array {#creating-an-array} +## Creating an Array You can use a function to create an array: @@ -43,7 +43,7 @@ SELECT [1, 2] AS x, toTypeName(x) └───────┴────────────────────┘ ``` -## Working with Data Types {#working-with-data-types} +## Working with Data Types The maximum size of an array is limited to one million elements. @@ -74,7 +74,7 @@ Received exception from server (version 1.1.54388): Code: 386. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: There is no supertype for types UInt8, String because some of them are String/FixedString and some of them are not. ``` -## Array Size {#array-size} +## Array Size It is possible to find the size of an array by using the `size0` subcolumn without reading the whole column. For multi-dimensional arrays you can use `sizeN-1`, where `N` is the wanted dimension. diff --git a/docs/en/sql-reference/data-types/boolean.md b/docs/en/sql-reference/data-types/boolean.md index 7b61579c4a9..02e257bec43 100644 --- a/docs/en/sql-reference/data-types/boolean.md +++ b/docs/en/sql-reference/data-types/boolean.md @@ -3,7 +3,7 @@ sidebar_position: 43 sidebar_label: Boolean --- -# Boolean Values bool (boolean) {#boolean-values} +# Boolean Values bool (boolean) Type `bool` is stored as UInt8. Possible values `true` (1), `false` (0). diff --git a/docs/en/sql-reference/data-types/date.md b/docs/en/sql-reference/data-types/date.md index bc3fda4a9d0..e6aabb7aa79 100644 --- a/docs/en/sql-reference/data-types/date.md +++ b/docs/en/sql-reference/data-types/date.md @@ -3,7 +3,7 @@ sidebar_position: 47 sidebar_label: Date --- -# Date {#data_type-date} +# Date A date. Stored in two bytes as the number of days since 1970-01-01 (unsigned). Allows storing values from just after the beginning of the Unix Epoch to the upper threshold defined by a constant at the compilation stage (currently, this is until the year 2149, but the final fully-supported year is 2148). diff --git a/docs/en/sql-reference/data-types/date32.md b/docs/en/sql-reference/data-types/date32.md index 46c9fe00b34..e1d6e2363e8 100644 --- a/docs/en/sql-reference/data-types/date32.md +++ b/docs/en/sql-reference/data-types/date32.md @@ -3,7 +3,7 @@ sidebar_position: 48 sidebar_label: Date32 --- -# Date32 {#data_type-datetime32} +# Date32 A date. Supports the date range same with [Datetime64](../../sql-reference/data-types/datetime64.md). Stored in four bytes as the number of days since 1925-01-01. Allows storing values till 2283-11-11. diff --git a/docs/en/sql-reference/data-types/datetime.md b/docs/en/sql-reference/data-types/datetime.md index cae83ac9a31..cc58c33115d 100644 --- a/docs/en/sql-reference/data-types/datetime.md +++ b/docs/en/sql-reference/data-types/datetime.md @@ -3,7 +3,7 @@ sidebar_position: 48 sidebar_label: DateTime --- -# Datetime {#data_type-datetime} +# Datetime Allows to store an instant in time, that can be expressed as a calendar date and a time of a day. @@ -17,7 +17,7 @@ Supported range of values: \[1970-01-01 00:00:00, 2106-02-07 06:28:15\]. Resolution: 1 second. -## Usage Remarks {#usage-remarks} +## Usage Remarks The point in time is saved as a [Unix timestamp](https://en.wikipedia.org/wiki/Unix_time), regardless of the time zone or daylight saving time. The time zone affects how the values of the `DateTime` type values are displayed in text format and how the values specified as strings are parsed (‘2020-01-01 05:00:01’). @@ -33,7 +33,7 @@ ClickHouse outputs values depending on the value of the [date_time_output_format When inserting data into ClickHouse, you can use different formats of date and time strings, depending on the value of the [date_time_input_format](../../operations/settings/settings.md#settings-date_time_input_format) setting. -## Examples {#examples} +## Examples **1.** Creating a table with a `DateTime`-type column and inserting data into it: @@ -134,7 +134,7 @@ Similar issue exists for Casey Antarctic station in year 2010. They changed time Time shifts for multiple days. Some pacific islands changed their timezone offset from UTC+14 to UTC-12. That's alright but some inaccuracies may present if you do calculations with their timezone for historical time points at the days of conversion. -## See Also {#see-also} +## See Also - [Type conversion functions](../../sql-reference/functions/type-conversion-functions.md) - [Functions for working with dates and times](../../sql-reference/functions/date-time-functions.md) diff --git a/docs/en/sql-reference/data-types/datetime64.md b/docs/en/sql-reference/data-types/datetime64.md index aefd7e4a18b..e67f86743a9 100644 --- a/docs/en/sql-reference/data-types/datetime64.md +++ b/docs/en/sql-reference/data-types/datetime64.md @@ -3,7 +3,7 @@ sidebar_position: 49 sidebar_label: DateTime64 --- -# Datetime64 {#data_type-datetime64} +# Datetime64 Allows to store an instant in time, that can be expressed as a calendar date and a time of a day, with defined sub-second precision @@ -20,7 +20,7 @@ Internally, stores data as a number of ‘ticks’ since epoch start (1970-01-01 Supported range of values: \[1925-01-01 00:00:00, 2283-11-11 23:59:59.99999999\] (Note: The precision of the maximum value is 8). -## Examples {#examples} +## Examples 1. Creating a table with `DateTime64`-type column and inserting data into it: diff --git a/docs/en/sql-reference/data-types/decimal.md b/docs/en/sql-reference/data-types/decimal.md index 33b4addb54f..ddb1c091c7c 100644 --- a/docs/en/sql-reference/data-types/decimal.md +++ b/docs/en/sql-reference/data-types/decimal.md @@ -3,11 +3,11 @@ sidebar_position: 42 sidebar_label: Decimal --- -# Decimal(P, S), Decimal32(S), Decimal64(S), Decimal128(S), Decimal256(S) {#decimal} +# Decimal(P, S), Decimal32(S), Decimal64(S), Decimal128(S), Decimal256(S) Signed fixed-point numbers that keep precision during add, subtract and multiply operations. For division least significant digits are discarded (not rounded). -## Parameters {#parameters} +## Parameters - P - precision. Valid range: \[ 1 : 76 \]. Determines how many decimal digits number can have (including fraction). - S - scale. Valid range: \[ 0 : P \]. Determines how many decimal digits fraction can have. @@ -18,7 +18,7 @@ Depending on P parameter value Decimal(P, S) is a synonym for: - P from \[ 19 : 38 \] - for Decimal128(S) - P from \[ 39 : 76 \] - for Decimal256(S) -## Decimal Value Ranges {#decimal-value-ranges} +## Decimal Value Ranges - Decimal32(S) - ( -1 \* 10^(9 - S), 1 \* 10^(9 - S) ) - Decimal64(S) - ( -1 \* 10^(18 - S), 1 \* 10^(18 - S) ) @@ -27,13 +27,13 @@ Depending on P parameter value Decimal(P, S) is a synonym for: For example, Decimal32(4) can contain numbers from -99999.9999 to 99999.9999 with 0.0001 step. -## Internal Representation {#internal-representation} +## Internal Representation Internally data is represented as normal signed integers with respective bit width. Real value ranges that can be stored in memory are a bit larger than specified above, which are checked only on conversion from a string. Because modern CPUs do not support 128-bit integers natively, operations on Decimal128 are emulated. Because of this Decimal128 works significantly slower than Decimal32/Decimal64. -## Operations and Result Type {#operations-and-result-type} +## Operations and Result Type Binary operations on Decimal result in wider result type (with any order of arguments). @@ -54,7 +54,7 @@ Operations between Decimal and Float32/Float64 are not defined. If you need them Some functions on Decimal return result as Float64 (for example, var or stddev). Intermediate calculations might still be performed in Decimal, which might lead to different results between Float64 and Decimal inputs with the same values. -## Overflow Checks {#overflow-checks} +## Overflow Checks During calculations on Decimal, integer overflows might happen. Excessive digits in a fraction are discarded (not rounded). Excessive digits in integer part will lead to an exception. diff --git a/docs/en/sql-reference/data-types/domains/index.md b/docs/en/sql-reference/data-types/domains/index.md index e27bf9a6d37..f9bd6eea07e 100644 --- a/docs/en/sql-reference/data-types/domains/index.md +++ b/docs/en/sql-reference/data-types/domains/index.md @@ -3,7 +3,7 @@ sidebar_position: 56 sidebar_label: Domains --- -# Domains {#domains} +# Domains Domains are special-purpose types that add some extra features atop of existing base type, but leaving on-wire and on-disc format of the underlying data type intact. At the moment, ClickHouse does not support user-defined domains. @@ -14,14 +14,14 @@ You can use domains anywhere corresponding base type can be used, for example: - Use it as an index if a base type can be used as an index - Call functions with values of domain column -### Extra Features of Domains {#extra-features-of-domains} +### Extra Features of Domains - Explicit column type name in `SHOW CREATE TABLE` or `DESCRIBE TABLE` - Input from human-friendly format with `INSERT INTO domain_table(domain_column) VALUES(...)` - Output to human-friendly format for `SELECT domain_column FROM domain_table` - Loading data from an external source in the human-friendly format: `INSERT INTO domain_table FORMAT CSV ...` -### Limitations {#limitations} +### Limitations - Can’t convert index column of base type to domain type via `ALTER TABLE`. - Can’t implicitly convert string values into domain values when inserting data from another column or table. diff --git a/docs/en/sql-reference/data-types/domains/ipv4.md b/docs/en/sql-reference/data-types/domains/ipv4.md index 76d285fe34a..00d3a03ee29 100644 --- a/docs/en/sql-reference/data-types/domains/ipv4.md +++ b/docs/en/sql-reference/data-types/domains/ipv4.md @@ -3,11 +3,11 @@ sidebar_position: 59 sidebar_label: IPv4 --- -## IPv4 {#ipv4} +## IPv4 `IPv4` is a domain based on `UInt32` type and serves as a typed replacement for storing IPv4 values. It provides compact storage with the human-friendly input-output format and column type information on inspection. -### Basic Usage {#basic-usage} +### Basic Usage ``` sql CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY url; diff --git a/docs/en/sql-reference/data-types/domains/ipv6.md b/docs/en/sql-reference/data-types/domains/ipv6.md index c5745dcb80f..3863b085a14 100644 --- a/docs/en/sql-reference/data-types/domains/ipv6.md +++ b/docs/en/sql-reference/data-types/domains/ipv6.md @@ -3,11 +3,11 @@ sidebar_position: 60 sidebar_label: IPv6 --- -## IPv6 {#ipv6} +## IPv6 `IPv6` is a domain based on `FixedString(16)` type and serves as a typed replacement for storing IPv6 values. It provides compact storage with the human-friendly input-output format and column type information on inspection. -### Basic Usage {#basic-usage} +### Basic Usage ``` sql CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY url; diff --git a/docs/en/sql-reference/data-types/enum.md b/docs/en/sql-reference/data-types/enum.md index fa85d48c93f..5b975c83844 100644 --- a/docs/en/sql-reference/data-types/enum.md +++ b/docs/en/sql-reference/data-types/enum.md @@ -3,7 +3,7 @@ sidebar_position: 50 sidebar_label: Enum --- -# Enum {#enum} +# Enum Enumerated type consisting of named values. @@ -16,7 +16,7 @@ ClickHouse supports: ClickHouse automatically chooses the type of `Enum` when data is inserted. You can also use `Enum8` or `Enum16` types to be sure in the size of storage. -## Usage Examples {#usage-examples} +## Usage Examples Here we create a table with an `Enum8('hello' = 1, 'world' = 2)` type column: @@ -120,7 +120,7 @@ SELECT toTypeName(CAST('a', 'Enum(\'a\' = 1, \'b\' = 2)')) └─────────────────────────────────────────────────────┘ ``` -## General Rules and Usage {#general-rules-and-usage} +## General Rules and Usage Each of the values is assigned a number in the range `-128 ... 127` for `Enum8` or in the range `-32768 ... 32767` for `Enum16`. All the strings and numbers must be different. An empty string is allowed. If this type is specified (in a table definition), numbers can be in an arbitrary order. However, the order does not matter. diff --git a/docs/en/sql-reference/data-types/fixedstring.md b/docs/en/sql-reference/data-types/fixedstring.md index 230792c19bb..444e6cbcd47 100644 --- a/docs/en/sql-reference/data-types/fixedstring.md +++ b/docs/en/sql-reference/data-types/fixedstring.md @@ -3,7 +3,7 @@ sidebar_position: 45 sidebar_label: FixedString(N) --- -# Fixedstring {#fixedstring} +# Fixedstring A fixed-length string of `N` bytes (neither characters nor code points). diff --git a/docs/en/sql-reference/data-types/float.md b/docs/en/sql-reference/data-types/float.md index 46076e29525..fbf1088b190 100644 --- a/docs/en/sql-reference/data-types/float.md +++ b/docs/en/sql-reference/data-types/float.md @@ -3,7 +3,7 @@ sidebar_position: 41 sidebar_label: Float32, Float64 --- -# Float32, Float64 {#float32-float64} +# Float32, Float64 [Floating point numbers](https://en.wikipedia.org/wiki/IEEE_754). @@ -21,7 +21,7 @@ Aliases: When creating tables, numeric parameters for floating point numbers can be set (e.g. `FLOAT(12)`, `FLOAT(15, 22)`, `DOUBLE(12)`, `DOUBLE(4, 18)`), but ClickHouse ignores them. -## Using Floating-point Numbers {#using-floating-point-numbers} +## Using Floating-point Numbers - Computations with floating-point numbers might produce a rounding error. @@ -41,7 +41,7 @@ SELECT 1 - 0.9 - Floating-point calculations might result in numbers such as infinity (`Inf`) and “not-a-number” (`NaN`). This should be taken into account when processing the results of calculations. - When parsing floating-point numbers from text, the result might not be the nearest machine-representable number. -## NaN and Inf {#data_type-float-nan-inf} +## NaN and Inf In contrast to standard SQL, ClickHouse supports the following categories of floating-point numbers: diff --git a/docs/en/sql-reference/data-types/geo.md b/docs/en/sql-reference/data-types/geo.md index 7ce863a5a10..c8edf985582 100644 --- a/docs/en/sql-reference/data-types/geo.md +++ b/docs/en/sql-reference/data-types/geo.md @@ -3,7 +3,7 @@ sidebar_position: 62 sidebar_label: Geo --- -# Geo Data Types {#geo-data-types} +# Geo Data Types ClickHouse supports data types for representing geographical objects — locations, lands, etc. @@ -15,7 +15,7 @@ Currently geo data types are an experimental feature. To work with them you must - [Representing simple geographical features](https://en.wikipedia.org/wiki/GeoJSON). - [allow_experimental_geo_types](../../operations/settings/settings.md#allow-experimental-geo-types) setting. -## Point {#point-data-type} +## Point `Point` is represented by its X and Y coordinates, stored as a [Tuple](tuple.md)([Float64](float.md), [Float64](float.md)). @@ -37,7 +37,7 @@ Result: └───────┴───────────────┘ ``` -## Ring {#ring-data-type} +## Ring `Ring` is a simple polygon without holes stored as an array of points: [Array](array.md)([Point](#point-data-type)). @@ -59,7 +59,7 @@ Result: └───────────────────────────────┴───────────────┘ ``` -## Polygon {#polygon-data-type} +## Polygon `Polygon` is a polygon with holes stored as an array of rings: [Array](array.md)([Ring](#ring-data-type)). First element of outer array is the outer shape of polygon and all the following elements are holes. @@ -82,7 +82,7 @@ Result: └───────────────────────────────────────────────────────────────┴────────────────┘ ``` -## MultiPolygon {#multipolygon-data-type} +## MultiPolygon `MultiPolygon` consists of multiple polygons and is stored as an array of polygons: [Array](array.md)([Polygon](#polygon-data-type)). diff --git a/docs/en/sql-reference/data-types/int-uint.md b/docs/en/sql-reference/data-types/int-uint.md index 86d587cfb55..c63f6780154 100644 --- a/docs/en/sql-reference/data-types/int-uint.md +++ b/docs/en/sql-reference/data-types/int-uint.md @@ -9,7 +9,7 @@ Fixed-length integers, with or without a sign. When creating tables, numeric parameters for integer numbers can be set (e.g. `TINYINT(8)`, `SMALLINT(16)`, `INT(32)`, `BIGINT(64)`), but ClickHouse ignores them. -## Int Ranges {#int-ranges} +## Int Ranges - `Int8` — \[-128 : 127\] - `Int16` — \[-32768 : 32767\] @@ -25,7 +25,7 @@ Aliases: - `Int32` — `INT`, `INT4`, `INTEGER`. - `Int64` — `BIGINT`. -## UInt Ranges {#uint-ranges} +## UInt Ranges - `UInt8` — \[0 : 255\] - `UInt16` — \[0 : 65535\] diff --git a/docs/en/sql-reference/data-types/json.md b/docs/en/sql-reference/data-types/json.md index 41235d17cdb..718e5279980 100644 --- a/docs/en/sql-reference/data-types/json.md +++ b/docs/en/sql-reference/data-types/json.md @@ -3,7 +3,7 @@ sidebar_position: 54 sidebar_label: JSON --- -# JSON {#json-data-type} +# JSON Stores JavaScript Object Notation (JSON) documents in a single column. @@ -13,7 +13,7 @@ Stores JavaScript Object Notation (JSON) documents in a single column. The JSON data type is an experimental feature. To use it, set `allow_experimental_object_type = 1`. ::: -## Example {#usage-example} +## Example **Example 1** diff --git a/docs/en/sql-reference/data-types/lowcardinality.md b/docs/en/sql-reference/data-types/lowcardinality.md index 40105446cbe..3bfe7b8a14e 100644 --- a/docs/en/sql-reference/data-types/lowcardinality.md +++ b/docs/en/sql-reference/data-types/lowcardinality.md @@ -3,11 +3,11 @@ sidebar_position: 51 sidebar_label: LowCardinality --- -# LowCardinality Data Type {#lowcardinality-data-type} +# LowCardinality Data Type Changes the internal representation of other data types to be dictionary-encoded. -## Syntax {#lowcardinality-syntax} +## Syntax ``` sql LowCardinality(data_type) @@ -17,7 +17,7 @@ LowCardinality(data_type) - `data_type` — [String](../../sql-reference/data-types/string.md), [FixedString](../../sql-reference/data-types/fixedstring.md), [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md), and numbers excepting [Decimal](../../sql-reference/data-types/decimal.md). `LowCardinality` is not efficient for some data types, see the [allow_suspicious_low_cardinality_types](../../operations/settings/settings.md#allow_suspicious_low_cardinality_types) setting description. -## Description {#lowcardinality-dscr} +## Description `LowCardinality` is a superstructure that changes a data storage method and rules of data processing. ClickHouse applies [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) to `LowCardinality`-columns. Operating with dictionary encoded data significantly increases performance of [SELECT](../../sql-reference/statements/select/index.md) queries for many applications. @@ -25,7 +25,7 @@ The efficiency of using `LowCardinality` data type depends on data diversity. If Consider using `LowCardinality` instead of [Enum](../../sql-reference/data-types/enum.md) when working with strings. `LowCardinality` provides more flexibility in use and often reveals the same or higher efficiency. -## Example {#example} +## Example Create a table with a `LowCardinality`-column: @@ -39,7 +39,7 @@ ENGINE = MergeTree() ORDER BY id ``` -## Related Settings and Functions {#related-settings-and-functions} +## Related Settings and Functions Settings: @@ -53,7 +53,7 @@ Functions: - [toLowCardinality](../../sql-reference/functions/type-conversion-functions.md#tolowcardinality) -## See Also {#see-also} +## See Also - [Reducing ClickHouse Storage Cost with the Low Cardinality Type – Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/). - [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/ClickHouse/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf). diff --git a/docs/en/sql-reference/data-types/map.md b/docs/en/sql-reference/data-types/map.md index 56f4442fe5a..e913a5f34e3 100644 --- a/docs/en/sql-reference/data-types/map.md +++ b/docs/en/sql-reference/data-types/map.md @@ -3,14 +3,14 @@ sidebar_position: 65 sidebar_label: Map(key, value) --- -# Map(key, value) {#data_type-map} +# Map(key, value) `Map(key, value)` data type stores `key:value` pairs. **Parameters** -- `key` — The key part of the pair. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md), or [FixedString](../../sql-reference/data-types/fixedstring.md). -- `value` — The value part of the pair. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [Array](../../sql-reference/data-types/array.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md), or [FixedString](../../sql-reference/data-types/fixedstring.md). +- `key` — The key part of the pair. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md), [FixedString](../../sql-reference/data-types/fixedstring.md), [UUID](../../sql-reference/data-types/uuid.md), [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md), [Date32](../../sql-reference/data-types/date32.md), [Enum](../../sql-reference/data-types/enum.md). +- `value` — The value part of the pair. Arbitrary type, including [Map](../../sql-reference/data-types/map.md) and [Array](../../sql-reference/data-types/array.md). To get the value from an `a Map('key', 'value')` column, use `a['key']` syntax. This lookup works now with a linear complexity. @@ -59,7 +59,7 @@ Result: └─────────────────────────┘ ``` -## Convert Tuple to Map Type {#map-and-tuple} +## Convert Tuple to Map Type You can cast `Tuple()` as `Map()` using [CAST](../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) function: @@ -73,7 +73,7 @@ SELECT CAST(([1, 2, 3], ['Ready', 'Steady', 'Go']), 'Map(UInt8, String)') AS map └───────────────────────────────┘ ``` -## Map.keys and Map.values Subcolumns {#map-subcolumns} +## Map.keys and Map.values Subcolumns To optimize `Map` column processing, in some cases you can use the `keys` and `values` subcolumns instead of reading the whole column. diff --git a/docs/en/sql-reference/data-types/multiword-types.md b/docs/en/sql-reference/data-types/multiword-types.md index ae57037b6e2..913f5325e6f 100644 --- a/docs/en/sql-reference/data-types/multiword-types.md +++ b/docs/en/sql-reference/data-types/multiword-types.md @@ -3,11 +3,11 @@ sidebar_position: 61 sidebar_label: Multiword Type Names --- -# Multiword Types {#multiword-types} +# Multiword Types When creating tables, you can use data types with a name consisting of several words. This is implemented for better SQL compatibility. -## Multiword Types Support {#multiword-types-support} +## Multiword Types Support | Multiword types | Simple types | |----------------------------------|--------------------------------------------------------------| diff --git a/docs/en/sql-reference/data-types/nested-data-structures/index.md b/docs/en/sql-reference/data-types/nested-data-structures/index.md index c0f016ea41d..90150f3acc2 100644 --- a/docs/en/sql-reference/data-types/nested-data-structures/index.md +++ b/docs/en/sql-reference/data-types/nested-data-structures/index.md @@ -3,6 +3,6 @@ sidebar_label: Nested Data Structures sidebar_position: 54 --- -# Nested Data Structures {#nested-data-structures} +# Nested Data Structures [Original article](https://clickhouse.com/docs/en/data_types/nested_data_structures/) diff --git a/docs/en/sql-reference/data-types/nested-data-structures/nested.md b/docs/en/sql-reference/data-types/nested-data-structures/nested.md index 8258d8bd8e5..b14025cdfaf 100644 --- a/docs/en/sql-reference/data-types/nested-data-structures/nested.md +++ b/docs/en/sql-reference/data-types/nested-data-structures/nested.md @@ -3,9 +3,9 @@ sidebar_position: 57 sidebar_label: Nested(Name1 Type1, Name2 Type2, ...) --- -# Nested {#nested} +# Nested -## Nested(name1 Type1, Name2 Type2, …) {#nestedname1-type1-name2-type2} +## Nested(name1 Type1, Name2 Type2, …) A nested data structure is like a table inside a cell. The parameters of a nested data structure – the column names and types – are specified the same way as in a [CREATE TABLE](../../../sql-reference/statements/create/table.md) query. Each table row can correspond to any number of rows in a nested data structure. diff --git a/docs/en/sql-reference/data-types/nullable.md b/docs/en/sql-reference/data-types/nullable.md index f3c3dcd2326..b9a75274c08 100644 --- a/docs/en/sql-reference/data-types/nullable.md +++ b/docs/en/sql-reference/data-types/nullable.md @@ -3,7 +3,7 @@ sidebar_position: 55 sidebar_label: Nullable --- -# Nullable(typename) {#data_type-nullable} +# Nullable(typename) Allows to store special marker ([NULL](../../sql-reference/syntax.md)) that denotes “missing value” alongside normal values allowed by `TypeName`. For example, a `Nullable(Int8)` type column can store `Int8` type values, and the rows that do not have a value will store `NULL`. @@ -13,7 +13,7 @@ A `Nullable` type field can’t be included in table indexes. `NULL` is the default value for any `Nullable` type, unless specified otherwise in the ClickHouse server configuration. -## Storage Features {#storage-features} +## Storage Features To store `Nullable` type values in a table column, ClickHouse uses a separate file with `NULL` masks in addition to normal file with values. Entries in masks file allow ClickHouse to distinguish between `NULL` and a default value of corresponding data type for each table row. Because of an additional file, `Nullable` column consumes additional storage space compared to a similar normal one. @@ -21,7 +21,7 @@ To store `Nullable` type values in a table column, ClickHouse uses a separate fi Using `Nullable` almost always negatively affects performance, keep this in mind when designing your databases. ::: -## Finding NULL {#finding-null} +## Finding NULL It is possible to find `NULL` values in a column by using `null` subcolumn without reading the whole column. It returns `1` if the corresponding value is `NULL` and `0` otherwise. @@ -48,7 +48,7 @@ Result: └────────┘ ``` -## Usage Example {#usage-example} +## Usage Example ``` sql CREATE TABLE t_null(x Int8, y Nullable(Int8)) ENGINE TinyLog diff --git a/docs/en/sql-reference/data-types/simpleaggregatefunction.md b/docs/en/sql-reference/data-types/simpleaggregatefunction.md index 1c04a71dedb..d0f604e8d8e 100644 --- a/docs/en/sql-reference/data-types/simpleaggregatefunction.md +++ b/docs/en/sql-reference/data-types/simpleaggregatefunction.md @@ -1,4 +1,4 @@ -# SimpleAggregateFunction {#data-type-simpleaggregatefunction} +# SimpleAggregateFunction `SimpleAggregateFunction(name, types_of_arguments…)` data type stores current value of the aggregate function, and does not store its full state as [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md) does. This optimization can be applied to functions for which the following property holds: the result of applying a function `f` to a row set `S1 UNION ALL S2` can be obtained by applying `f` to parts of the row set separately, and then again applying `f` to the results: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`. This property guarantees that partial aggregation results are enough to compute the combined one, so we do not have to store and process any extra data. diff --git a/docs/en/sql-reference/data-types/special-data-types/expression.md b/docs/en/sql-reference/data-types/special-data-types/expression.md index b6a2a2ebb9d..0b9265eaa6e 100644 --- a/docs/en/sql-reference/data-types/special-data-types/expression.md +++ b/docs/en/sql-reference/data-types/special-data-types/expression.md @@ -3,7 +3,7 @@ sidebar_position: 58 sidebar_label: Expression --- -# Expression {#expression} +# Expression Expressions are used for representing lambdas in high-order functions. diff --git a/docs/en/sql-reference/data-types/special-data-types/index.md b/docs/en/sql-reference/data-types/special-data-types/index.md index 5455d34a2a2..b2e5251d748 100644 --- a/docs/en/sql-reference/data-types/special-data-types/index.md +++ b/docs/en/sql-reference/data-types/special-data-types/index.md @@ -3,7 +3,7 @@ sidebar_label: Special Data Types sidebar_position: 55 --- -# Special Data Types {#special-data-types} +# Special Data Types Special data type values can’t be serialized for saving in a table or output in query results, but can be used as an intermediate result during query execution. diff --git a/docs/en/sql-reference/data-types/special-data-types/interval.md b/docs/en/sql-reference/data-types/special-data-types/interval.md index 3ebeee01bf6..6c2349d492c 100644 --- a/docs/en/sql-reference/data-types/special-data-types/interval.md +++ b/docs/en/sql-reference/data-types/special-data-types/interval.md @@ -3,7 +3,7 @@ sidebar_position: 61 sidebar_label: Interval --- -# Interval {#data-type-interval} +# Interval The family of data types representing time and date intervals. The resulting types of the [INTERVAL](../../../sql-reference/operators/index.md#operator-interval) operator. @@ -39,7 +39,7 @@ SELECT toTypeName(INTERVAL 4 DAY) └──────────────────────────────┘ ``` -## Usage Remarks {#data-type-interval-usage-remarks} +## Usage Remarks You can use `Interval`-type values in arithmetical operations with [Date](../../../sql-reference/data-types/date.md) and [DateTime](../../../sql-reference/data-types/datetime.md)-type values. For example, you can add 4 days to the current time: @@ -78,7 +78,7 @@ Received exception from server (version 19.14.1): Code: 43. DB::Exception: Received from localhost:9000. DB::Exception: Wrong argument types for function plus: if one argument is Interval, then another must be Date or DateTime.. ``` -## See Also {#see-also} +## See Also - [INTERVAL](../../../sql-reference/operators/index.md#operator-interval) operator - [toInterval](../../../sql-reference/functions/type-conversion-functions.md#function-tointerval) type conversion functions diff --git a/docs/en/sql-reference/data-types/special-data-types/nothing.md b/docs/en/sql-reference/data-types/special-data-types/nothing.md index f9f296f7dc4..d3164eab941 100644 --- a/docs/en/sql-reference/data-types/special-data-types/nothing.md +++ b/docs/en/sql-reference/data-types/special-data-types/nothing.md @@ -3,7 +3,7 @@ sidebar_position: 60 sidebar_label: Nothing --- -# Nothing {#nothing} +# Nothing The only purpose of this data type is to represent cases where a value is not expected. So you can’t create a `Nothing` type value. diff --git a/docs/en/sql-reference/data-types/special-data-types/set.md b/docs/en/sql-reference/data-types/special-data-types/set.md index 6d447b96f3b..1490fd311ea 100644 --- a/docs/en/sql-reference/data-types/special-data-types/set.md +++ b/docs/en/sql-reference/data-types/special-data-types/set.md @@ -3,7 +3,7 @@ sidebar_position: 59 sidebar_label: Set --- -# Set {#set} +# Set Used for the right half of an [IN](../../../sql-reference/operators/in.md#select-in-operators) expression. diff --git a/docs/en/sql-reference/data-types/string.md b/docs/en/sql-reference/data-types/string.md index e2903c7329d..db8a399df9d 100644 --- a/docs/en/sql-reference/data-types/string.md +++ b/docs/en/sql-reference/data-types/string.md @@ -3,7 +3,7 @@ sidebar_position: 44 sidebar_label: String --- -# String {#string} +# String Strings of an arbitrary length. The length is not limited. The value can contain an arbitrary set of bytes, including null bytes. The String type replaces the types VARCHAR, BLOB, CLOB, and others from other DBMSs. @@ -14,7 +14,7 @@ Aliases: - `String` — `LONGTEXT`, `MEDIUMTEXT`, `TINYTEXT`, `TEXT`, `LONGBLOB`, `MEDIUMBLOB`, `TINYBLOB`, `BLOB`, `VARCHAR`, `CHAR`. -## Encodings {#encodings} +## Encodings ClickHouse does not have the concept of encodings. Strings can contain an arbitrary set of bytes, which are stored and output as-is. If you need to store texts, we recommend using UTF-8 encoding. At the very least, if your terminal uses UTF-8 (as recommended), you can read and write your values without making conversions. diff --git a/docs/en/sql-reference/data-types/tuple.md b/docs/en/sql-reference/data-types/tuple.md index eea48ab37b4..159fe9b5ee4 100644 --- a/docs/en/sql-reference/data-types/tuple.md +++ b/docs/en/sql-reference/data-types/tuple.md @@ -3,7 +3,7 @@ sidebar_position: 54 sidebar_label: Tuple(T1, T2, ...) --- -# Tuple(t1, T2, …) {#tuplet1-t2} +# Tuple(t1, T2, …) A tuple of elements, each having an individual [type](../../sql-reference/data-types/index.md#data_types). @@ -11,7 +11,7 @@ Tuples are used for temporary column grouping. Columns can be grouped when an IN Tuples can be the result of a query. In this case, for text formats other than JSON, values are comma-separated in brackets. In JSON formats, tuples are output as arrays (in square brackets). -## Creating a Tuple {#creating-a-tuple} +## Creating a Tuple You can use a function to create a tuple: @@ -31,7 +31,7 @@ SELECT tuple(1,'a') AS x, toTypeName(x) └─────────┴───────────────────────────┘ ``` -## Working with Data Types {#working-with-data-types} +## Working with Data Types When creating a tuple on the fly, ClickHouse automatically detects the type of each argument as the minimum of the types which can store the argument value. If the argument is [NULL](../../sql-reference/syntax.md#null-literal), the type of the tuple element is [Nullable](../../sql-reference/data-types/nullable.md). @@ -47,7 +47,7 @@ SELECT tuple(1, NULL) AS x, toTypeName(x) └──────────┴─────────────────────────────────┘ ``` -## Addressing Tuple Elements {#addressing-tuple-elements} +## Addressing Tuple Elements It is possible to read elements of named tuples using indexes and names: diff --git a/docs/en/sql-reference/data-types/uuid.md b/docs/en/sql-reference/data-types/uuid.md index 010fc0b5cf5..75485561f96 100644 --- a/docs/en/sql-reference/data-types/uuid.md +++ b/docs/en/sql-reference/data-types/uuid.md @@ -3,7 +3,7 @@ sidebar_position: 46 sidebar_label: UUID --- -# UUID {#uuid-data-type} +# UUID A universally unique identifier (UUID) is a 16-byte number used to identify records. For detailed information about the UUID, see [Wikipedia](https://en.wikipedia.org/wiki/Universally_unique_identifier). @@ -19,11 +19,11 @@ If you do not specify the UUID column value when inserting a new record, the UUI 00000000-0000-0000-0000-000000000000 ``` -## How to Generate {#how-to-generate} +## How to Generate To generate the UUID value, ClickHouse provides the [generateUUIDv4](../../sql-reference/functions/uuid-functions.md) function. -## Usage Example {#usage-example} +## Usage Example **Example 1** @@ -66,7 +66,7 @@ SELECT * FROM t_uuid └──────────────────────────────────────┴───────────┘ ``` -## Restrictions {#restrictions} +## Restrictions The UUID data type only supports functions which [String](../../sql-reference/data-types/string.md) data type also supports (for example, [min](../../sql-reference/aggregate-functions/reference/min.md#agg_function-min), [max](../../sql-reference/aggregate-functions/reference/max.md#agg_function-max), and [count](../../sql-reference/aggregate-functions/reference/count.md#agg_function-count)). diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md index cf95c83a154..887d9ee4612 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md @@ -77,7 +77,7 @@ Configuration example of a composite key (key has one element with [String](../. ... ``` -## Ways to Store Dictionaries in Memory {#ways-to-store-dictionaries-in-memory} +## Ways to Store Dictionaries in Memory - [flat](#flat) - [hashed](#dicts-external_dicts_dict_layout-hashed) @@ -96,7 +96,7 @@ Configuration example of a composite key (key has one element with [String](../. - [complex_key_direct](#complex-key-direct) - [ip_trie](#ip-trie) -### flat {#flat} +### flat The dictionary is completely stored in memory in the form of flat arrays. How much memory does the dictionary use? The amount is proportional to the size of the largest key (in space used). @@ -123,7 +123,7 @@ or LAYOUT(FLAT(INITIAL_ARRAY_SIZE 50000 MAX_ARRAY_SIZE 5000000)) ``` -### hashed {#dicts-external_dicts_dict_layout-hashed} +### hashed The dictionary is completely stored in memory in the form of a hash table. The dictionary can contain any number of elements with any identifiers In practice, the number of keys can reach tens of millions of items. @@ -152,7 +152,7 @@ or LAYOUT(HASHED(PREALLOCATE 0)) ``` -### sparse_hashed {#dicts-external_dicts_dict_layout-sparse_hashed} +### sparse_hashed Similar to `hashed`, but uses less memory in favor more CPU usage. @@ -174,7 +174,7 @@ or LAYOUT(SPARSE_HASHED([PREALLOCATE 0])) ``` -### complex_key_hashed {#complex-key-hashed} +### complex_key_hashed This type of storage is for use with composite [keys](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). Similar to `hashed`. @@ -192,7 +192,7 @@ or LAYOUT(COMPLEX_KEY_HASHED()) ``` -### complex_key_sparse_hashed {#complex-key-sparse-hashed} +### complex_key_sparse_hashed This type of storage is for use with composite [keys](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). Similar to [sparse_hashed](#dicts-external_dicts_dict_layout-sparse_hashed). @@ -210,7 +210,7 @@ or LAYOUT(COMPLEX_KEY_SPARSE_HASHED()) ``` -### hashed_array {#dicts-external_dicts_dict_layout-hashed-array} +### hashed_array The dictionary is completely stored in memory. Each attribute is stored in an array. The key attribute is stored in the form of a hashed table where value is an index in the attributes array. The dictionary can contain any number of elements with any identifiers. In practice, the number of keys can reach tens of millions of items. @@ -233,7 +233,7 @@ or LAYOUT(HASHED_ARRAY()) ``` -### complex_key_hashed_array {#complex-key-hashed-array} +### complex_key_hashed_array This type of storage is for use with composite [keys](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). Similar to [hashed_array](#dicts-external_dicts_dict_layout-hashed-array). @@ -251,7 +251,7 @@ or LAYOUT(COMPLEX_KEY_HASHED_ARRAY()) ``` -### range_hashed {#range-hashed} +### range_hashed The dictionary is stored in memory in the form of a hash table with an ordered array of ranges and their corresponding values. @@ -370,7 +370,7 @@ PRIMARY KEY Abcdef RANGE(MIN StartTimeStamp MAX EndTimeStamp) ``` -### complex_key_range_hashed {#complex-key-range-hashed} +### complex_key_range_hashed The dictionary is stored in memory in the form of a hash table with an ordered array of ranges and their corresponding values (see [range_hashed](#range-hashed)). This type of storage is for use with composite [keys](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). @@ -392,7 +392,7 @@ LAYOUT(COMPLEX_KEY_RANGE_HASHED()) RANGE(MIN StartDate MAX EndDate); ``` -### cache {#cache} +### cache The dictionary is stored in a cache that has a fixed number of cells. These cells contain frequently used elements. @@ -450,11 +450,11 @@ Set a large enough cache size. You need to experiment to select the number of ce Do not use ClickHouse as a source, because it is slow to process queries with random reads. ::: -### complex_key_cache {#complex-key-cache} +### complex_key_cache This type of storage is for use with composite [keys](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). Similar to `cache`. -### ssd_cache {#ssd-cache} +### ssd_cache Similar to `cache`, but stores data on SSD and index in RAM. All cache dictionary settings related to update queue can also be applied to SSD cache dictionaries. @@ -484,11 +484,11 @@ LAYOUT(SSD_CACHE(BLOCK_SIZE 4096 FILE_SIZE 16777216 READ_BUFFER_SIZE 1048576 PATH '/var/lib/clickhouse/user_files/test_dict')) ``` -### complex_key_ssd_cache {#complex-key-ssd-cache} +### complex_key_ssd_cache This type of storage is for use with composite [keys](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). Similar to `ssd_cache`. -### direct {#direct} +### direct The dictionary is not stored in memory and directly goes to the source during the processing of a request. @@ -510,11 +510,11 @@ or LAYOUT(DIRECT()) ``` -### complex_key_direct {#complex-key-direct} +### complex_key_direct This type of storage is for use with composite [keys](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). Similar to `direct`. -### ip_trie {#ip-trie} +### ip_trie This type of storage is for mapping network prefixes (IP addresses) to metadata such as ASN. diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index d4ea249c786..ec62205a36d 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -72,7 +72,7 @@ Types of sources (`source_type`): - [Cassandra](#dicts-external_dicts_dict_sources-cassandra) - [PostgreSQL](#dicts-external_dicts_dict_sources-postgresql) -## Local File {#dicts-external_dicts_dict_sources-local_file} +## Local File Example of settings: @@ -102,7 +102,7 @@ When dictionary with source `FILE` is created via DDL command (`CREATE DICTIONAR - [Dictionary function](../../../sql-reference/table-functions/dictionary.md#dictionary-function) -## Executable File {#dicts-external_dicts_dict_sources-executable} +## Executable File Working with executable files depends on [how the dictionary is stored in memory](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request to the executable file’s STDIN. Otherwise, ClickHouse starts executable file and treats its output as dictionary data. @@ -131,7 +131,7 @@ Setting fields: That dictionary source can be configured only via XML configuration. Creating dictionaries with executable source via DDL is disabled, otherwise, the DB user would be able to execute arbitrary binary on ClickHouse node. -## Executable Pool {#dicts-external_dicts_dict_sources-executable_pool} +## Executable Pool Executable pool allows loading data from pool of processes. This source does not work with dictionary layouts that need to load all data from source. Executable pool works if the dictionary [is stored](external-dicts-dict-layout.md#ways-to-store-dictionaries-in-memory) using `cache`, `complex_key_cache`, `ssd_cache`, `complex_key_ssd_cache`, `direct`, `complex_key_direct` layouts. @@ -166,7 +166,7 @@ Setting fields: That dictionary source can be configured only via XML configuration. Creating dictionaries with executable source via DDL is disabled, otherwise, the DB user would be able to execute arbitrary binary on ClickHouse node. -## Http(s) {#dicts-external_dicts_dict_sources-http} +## Http(s) Working with an HTTP(s) server depends on [how the dictionary is stored in memory](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request via the `POST` method. @@ -218,7 +218,7 @@ Setting fields: When creating a dictionary using the DDL command (`CREATE DICTIONARY ...`) remote hosts for HTTP dictionaries are checked against the contents of `remote_url_allow_hosts` section from config to prevent database users to access arbitrary HTTP server. -### Known Vulnerability of the ODBC Dictionary Functionality {#known-vulnerability-of-the-odbc-dictionary-functionality} +### Known Vulnerability of the ODBC Dictionary Functionality :::note When connecting to the database through the ODBC driver connection parameter `Servername` can be substituted. In this case values of `USERNAME` and `PASSWORD` from `odbc.ini` are sent to the remote server and can be compromised. @@ -247,7 +247,7 @@ SELECT * FROM odbc('DSN=gregtest;Servername=some-server.com', 'test_db'); ODBC driver will send values of `USERNAME` and `PASSWORD` from `odbc.ini` to `some-server.com`. -### Example of Connecting Postgresql {#example-of-connecting-postgresql} +### Example of Connecting Postgresql Ubuntu OS. @@ -328,7 +328,7 @@ LIFETIME(MIN 300 MAX 360) You may need to edit `odbc.ini` to specify the full path to the library with the driver `DRIVER=/usr/local/lib/psqlodbcw.so`. -### Example of Connecting MS SQL Server {#example-of-connecting-ms-sql-server} +### Example of Connecting MS SQL Server Ubuntu OS. @@ -432,9 +432,9 @@ LAYOUT(FLAT()) LIFETIME(MIN 300 MAX 360) ``` -## DBMS {#dbms} +## DBMS -### ODBC {#dicts-external_dicts_dict_sources-odbc} +### ODBC You can use this method to connect any database that has an ODBC driver. @@ -480,7 +480,7 @@ ClickHouse receives quoting symbols from ODBC-driver and quote all settings in q If you have a problems with encodings when using Oracle, see the corresponding [FAQ](../../../faq/integration/oracle-odbc.md) item. -### Mysql {#dicts-external_dicts_dict_sources-mysql} +### Mysql Example of settings: @@ -593,7 +593,7 @@ SOURCE(MYSQL( )) ``` -### ClickHouse {#dicts-external_dicts_dict_sources-clickhouse} +### ClickHouse Example of settings: @@ -646,7 +646,7 @@ Setting fields: The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared. ::: -### Mongodb {#dicts-external_dicts_dict_sources-mongodb} +### Mongodb Example of settings: @@ -685,7 +685,7 @@ Setting fields: - `db` – Name of the database. - `collection` – Name of the collection. -### Redis {#dicts-external_dicts_dict_sources-redis} +### Redis Example of settings: @@ -718,7 +718,7 @@ Setting fields: - `storage_type` – The structure of internal Redis storage using for work with keys. `simple` is for simple sources and for hashed single key sources, `hash_map` is for hashed sources with two keys. Ranged sources and cache sources with complex key are unsupported. May be omitted, default value is `simple`. - `db_index` – The specific numeric index of Redis logical database. May be omitted, default value is 0. -### Cassandra {#dicts-external_dicts_dict_sources-cassandra} +### Cassandra Example of settings: @@ -760,7 +760,7 @@ Setting fields: The `column_family` or `where` fields cannot be used together with the `query` field. And either one of the `column_family` or `query` fields must be declared. ::: -### PostgreSQL {#dicts-external_dicts_dict_sources-postgresql} +### PostgreSQL Example of settings: diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md index 8a38c249300..b159401ea45 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md @@ -47,7 +47,7 @@ Attributes are described in the query body: - `PRIMARY KEY` — [Key column](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key) - `AttrName AttrType` — [Data column](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes). There can be a multiple number of attributes. -## Key {#ext_dict_structure-key} +## Key ClickHouse supports the following types of keys: @@ -60,7 +60,7 @@ An xml structure can contain either `` or ``. DDL-query must contain si You must not describe key as an attribute. ::: -### Numeric Key {#numeric-key} +### Numeric Key Type: `UInt64`. @@ -89,7 +89,7 @@ PRIMARY KEY Id - `PRIMARY KEY` – The name of the column with keys. -### Composite Key {#composite-key} +### Composite Key The key can be a `tuple` from any types of fields. The [layout](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) in this case must be `complex_key_hashed` or `complex_key_cache`. @@ -129,7 +129,7 @@ PRIMARY KEY field1, field2 For a query to the `dictGet*` function, a tuple is passed as the key. Example: `dictGetString('dict_name', 'attr_name', tuple('string for field1', num_for_field2))`. -## Attributes {#ext_dict_structure-attributes} +## Attributes Configuration example: diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts.md index d816888f019..92c73ca5978 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts.md @@ -49,7 +49,7 @@ You can [configure](../../../sql-reference/dictionaries/external-dictionaries/ex You can convert values for a small dictionary by describing it in a `SELECT` query (see the [transform](../../../sql-reference/functions/other-functions.md) function). This functionality is not related to external dictionaries. ::: -## See Also {#ext-dicts-see-also} +## See Also - [Configuring an External Dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md) - [Storing Dictionaries in Memory](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) diff --git a/docs/en/sql-reference/dictionaries/index.md b/docs/en/sql-reference/dictionaries/index.md index 8e54b70eab0..2c96cc1916e 100644 --- a/docs/en/sql-reference/dictionaries/index.md +++ b/docs/en/sql-reference/dictionaries/index.md @@ -3,7 +3,7 @@ sidebar_label: Dictionaries sidebar_position: 35 --- -# Dictionaries {#dictionaries} +# Dictionaries A dictionary is a mapping (`key -> attributes`) that is convenient for various types of reference lists. diff --git a/docs/en/sql-reference/dictionaries/internal-dicts.md b/docs/en/sql-reference/dictionaries/internal-dicts.md index 1996c974412..3dd13631f08 100644 --- a/docs/en/sql-reference/dictionaries/internal-dicts.md +++ b/docs/en/sql-reference/dictionaries/internal-dicts.md @@ -3,7 +3,7 @@ sidebar_position: 39 sidebar_label: Internal Dictionaries --- -# Internal Dictionaries {#internal_dicts} +# Internal Dictionaries ClickHouse contains a built-in feature for working with a geobase. diff --git a/docs/en/sql-reference/distributed-ddl.md b/docs/en/sql-reference/distributed-ddl.md index 80197b628be..e12be4206a3 100644 --- a/docs/en/sql-reference/distributed-ddl.md +++ b/docs/en/sql-reference/distributed-ddl.md @@ -3,7 +3,7 @@ sidebar_position: 3 sidebar_label: Distributed DDL --- -# Distributed DDL Queries (ON CLUSTER Clause) {#distributed-ddl-queries-on-cluster-clause} +# Distributed DDL Queries (ON CLUSTER Clause) By default the `CREATE`, `DROP`, `ALTER`, and `RENAME` queries affect only the current server where they are executed. In a cluster setup, it is possible to run such queries in a distributed manner with the `ON CLUSTER` clause. diff --git a/docs/en/sql-reference/functions/arithmetic-functions.md b/docs/en/sql-reference/functions/arithmetic-functions.md index 63c481c9ae6..b8d2f171bc8 100644 --- a/docs/en/sql-reference/functions/arithmetic-functions.md +++ b/docs/en/sql-reference/functions/arithmetic-functions.md @@ -3,7 +3,7 @@ sidebar_position: 34 sidebar_label: Arithmetic --- -# Arithmetic Functions {#arithmetic-functions} +# Arithmetic Functions For all arithmetic functions, the result type is calculated as the smallest number type that the result fits in, if there is such a type. The minimum is taken simultaneously based on the number of bits, whether it is signed, and whether it floats. If there are not enough bits, the highest bit type is taken. @@ -23,66 +23,66 @@ Arithmetic functions work for any pair of types from UInt8, UInt16, UInt32, UInt Overflow is produced the same way as in C++. -## plus(a, b), a + b operator {#plusa-b-a-b-operator} +## plus(a, b), a + b operator Calculates the sum of the numbers. You can also add integer numbers with a date or date and time. In the case of a date, adding an integer means adding the corresponding number of days. For a date with time, it means adding the corresponding number of seconds. -## minus(a, b), a - b operator {#minusa-b-a-b-operator} +## minus(a, b), a - b operator Calculates the difference. The result is always signed. You can also calculate integer numbers from a date or date with time. The idea is the same – see above for ‘plus’. -## multiply(a, b), a \* b operator {#multiplya-b-a-b-operator} +## multiply(a, b), a \* b operator Calculates the product of the numbers. -## divide(a, b), a / b operator {#dividea-b-a-b-operator} +## divide(a, b), a / b operator Calculates the quotient of the numbers. The result type is always a floating-point type. It is not integer division. For integer division, use the ‘intDiv’ function. When dividing by zero you get ‘inf’, ‘-inf’, or ‘nan’. -## intDiv(a, b) {#intdiva-b} +## intDiv(a, b) Calculates the quotient of the numbers. Divides into integers, rounding down (by the absolute value). An exception is thrown when dividing by zero or when dividing a minimal negative number by minus one. -## intDivOrZero(a, b) {#intdivorzeroa-b} +## intDivOrZero(a, b) Differs from ‘intDiv’ in that it returns zero when dividing by zero or when dividing a minimal negative number by minus one. -## modulo(a, b), a % b operator {#modulo} +## modulo(a, b), a % b operator Calculates the remainder after division. If arguments are floating-point numbers, they are pre-converted to integers by dropping the decimal portion. The remainder is taken in the same sense as in C++. Truncated division is used for negative numbers. An exception is thrown when dividing by zero or when dividing a minimal negative number by minus one. -## moduloOrZero(a, b) {#modulo-or-zero} +## moduloOrZero(a, b) Differs from [modulo](#modulo) in that it returns zero when the divisor is zero. -## negate(a), -a operator {#negatea-a-operator} +## negate(a), -a operator Calculates a number with the reverse sign. The result is always signed. -## abs(a) {#arithm_func-abs} +## abs(a) Calculates the absolute value of the number (a). That is, if a \< 0, it returns -a. For unsigned types it does not do anything. For signed integer types, it returns an unsigned number. -## gcd(a, b) {#gcda-b} +## gcd(a, b) Returns the greatest common divisor of the numbers. An exception is thrown when dividing by zero or when dividing a minimal negative number by minus one. -## lcm(a, b) {#lcma-b} +## lcm(a, b) Returns the least common multiple of the numbers. An exception is thrown when dividing by zero or when dividing a minimal negative number by minus one. -## max2 {#max2} +## max2 Compares two values and returns the maximum. The returned value is converted to [Float64](../../sql-reference/data-types/float.md). @@ -119,7 +119,7 @@ Result: └─────────────┘ ``` -## min2 {#min2} +## min2 Compares two values and returns the minimum. The returned value is converted to [Float64](../../sql-reference/data-types/float.md). diff --git a/docs/en/sql-reference/functions/array-functions.md b/docs/en/sql-reference/functions/array-functions.md index 4fe7e44aad7..53c2e929d15 100644 --- a/docs/en/sql-reference/functions/array-functions.md +++ b/docs/en/sql-reference/functions/array-functions.md @@ -3,9 +3,9 @@ sidebar_position: 35 sidebar_label: Arrays --- -# Array Functions {#functions-for-working-with-arrays} +# Array Functions -## empty {#function-empty} +## empty Checks whether the input array is empty. @@ -49,7 +49,7 @@ Result: └────────────────┘ ``` -## notEmpty {#function-notempty} +## notEmpty Checks whether the input array is non-empty. @@ -93,7 +93,7 @@ Result: └──────────────────┘ ``` -## length {#array_functions-length} +## length Returns the number of items in the array. The result type is UInt64. @@ -101,24 +101,24 @@ The function also works for strings. Can be optimized by enabling the [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [size0](../../sql-reference/data-types/array.md#array-size) subcolumn instead of reading and processing the whole array column. The query `SELECT length(arr) FROM table` transforms to `SELECT arr.size0 FROM TABLE`. -## emptyArrayUInt8, emptyArrayUInt16, emptyArrayUInt32, emptyArrayUInt64 {#emptyarrayuint8-emptyarrayuint16-emptyarrayuint32-emptyarrayuint64} +## emptyArrayUInt8, emptyArrayUInt16, emptyArrayUInt32, emptyArrayUInt64 -## emptyArrayInt8, emptyArrayInt16, emptyArrayInt32, emptyArrayInt64 {#emptyarrayint8-emptyarrayint16-emptyarrayint32-emptyarrayint64} +## emptyArrayInt8, emptyArrayInt16, emptyArrayInt32, emptyArrayInt64 -## emptyArrayFloat32, emptyArrayFloat64 {#emptyarrayfloat32-emptyarrayfloat64} +## emptyArrayFloat32, emptyArrayFloat64 -## emptyArrayDate, emptyArrayDateTime {#emptyarraydate-emptyarraydatetime} +## emptyArrayDate, emptyArrayDateTime -## emptyArrayString {#emptyarraystring} +## emptyArrayString Accepts zero arguments and returns an empty array of the appropriate type. -## emptyArrayToSingle {#emptyarraytosingle} +## emptyArrayToSingle Accepts an empty array and returns a one-element array that is equal to the default value. -## range(end), range(\[start, \] end \[, step\]) {#range} +## range(end), range(\[start, \] end \[, step\]) Returns an array of `UInt` numbers from `start` to `end - 1` by `step`. @@ -156,13 +156,13 @@ Result: └─────────────┴─────────────┴────────────────┘ ``` -## array(x1, …), operator \[x1, …\] {#arrayx1-operator-x1} +## array(x1, …), operator \[x1, …\] Creates an array from the function arguments. The arguments must be constants and have types that have the smallest common type. At least one argument must be passed, because otherwise it isn’t clear which type of array to create. That is, you can’t use this function to create an empty array (to do that, use the ‘emptyArray\*’ function described above). Returns an ‘Array(T)’ type result, where ‘T’ is the smallest common type out of the passed arguments. -## arrayConcat {#arrayconcat} +## arrayConcat Combines arrays passed as arguments. @@ -187,7 +187,7 @@ SELECT arrayConcat([1, 2], [3, 4], [5, 6]) AS res └───────────────┘ ``` -## arrayElement(arr, n), operator arr\[n\] {#arrayelementarr-n-operator-arrn} +## arrayElement(arr, n), operator arr\[n\] Get the element with the index `n` from the array `arr`. `n` must be any integer type. Indexes in an array begin from one. @@ -195,7 +195,7 @@ Negative indexes are supported. In this case, it selects the corresponding eleme If the index falls outside of the bounds of an array, it returns some default value (0 for numbers, an empty string for strings, etc.), except for the case with a non-constant array and a constant index 0 (in this case there will be an error `Array indices are 1-based`). -## has(arr, elem) {#hasarr-elem} +## has(arr, elem) Checks whether the ‘arr’ array has the ‘elem’ element. Returns 0 if the element is not in the array, or 1 if it is. @@ -212,7 +212,7 @@ SELECT has([1, 2, NULL], NULL) └─────────────────────────┘ ``` -## hasAll {#hasall} +## hasAll Checks whether one array is a subset of another. @@ -250,7 +250,7 @@ hasAll(set, subset) `SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [3, 5]])` returns 0. -## hasAny {#hasany} +## hasAny Checks whether two arrays have intersection by some elements. @@ -285,7 +285,7 @@ hasAny(array1, array2) `SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [1, 2]])` returns `1`. -## hasSubstr {#hassubstr} +## hasSubstr Checks whether all the elements of array2 appear in array1 in the same exact order. Therefore, the function will return 1, if and only if `array1 = prefix + array2 + suffix`. @@ -332,7 +332,7 @@ For Example: `SELECT hasSubstr([[1, 2], [3, 4], [5, 6]], [[1, 2], [3, 4]])` returns 1. -## indexOf(arr, x) {#indexofarr-x} +## indexOf(arr, x) Returns the index of the first ‘x’ element (starting from 1) if it is in the array, or 0 if it is not. @@ -350,13 +350,13 @@ SELECT indexOf([1, 3, NULL, NULL], NULL) Elements set to `NULL` are handled as normal values. -## arrayCount(\[func,\] arr1, …) {#array-count} +## arrayCount(\[func,\] arr1, …) -Returns the number of elements in the arr array for which func returns something other than 0. If ‘func’ is not specified, it returns the number of non-zero elements in the array. +Returns the number of elements for which `func(arr1[i], …, arrN[i])` returns something other than 0. If `func` is not specified, it returns the number of non-zero elements in the array. Note that the `arrayCount` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument. -## countEqual(arr, x) {#countequalarr-x} +## countEqual(arr, x) Returns the number of elements in the array equal to x. Equivalent to arrayCount (elem -\> elem = x, arr). @@ -374,7 +374,7 @@ SELECT countEqual([1, 2, NULL, NULL], NULL) └──────────────────────────────────────┘ ``` -## arrayEnumerate(arr) {#array_functions-arrayenumerate} +## arrayEnumerate(arr) Returns the array \[1, 2, 3, …, length (arr) \] @@ -416,7 +416,7 @@ WHERE (CounterID = 160656) AND notEmpty(GoalsReached) This function can also be used in higher-order functions. For example, you can use it to get array indexes for elements that match a condition. -## arrayEnumerateUniq(arr, …) {#arrayenumerateuniqarr} +## arrayEnumerateUniq(arr, …) Returns an array the same size as the source array, indicating for each element what its position is among elements with the same value. For example: arrayEnumerateUniq(\[10, 20, 10, 30\]) = \[1, 1, 2, 1\]. @@ -470,7 +470,7 @@ SELECT arrayEnumerateUniq([1, 1, 1, 2, 2, 2], [1, 1, 2, 1, 1, 2]) AS res This is necessary when using ARRAY JOIN with a nested data structure and further aggregation across multiple elements in this structure. -## arrayPopBack {#arraypopback} +## arrayPopBack Removes the last item from the array. @@ -494,7 +494,7 @@ SELECT arrayPopBack([1, 2, 3]) AS res; └───────┘ ``` -## arrayPopFront {#arraypopfront} +## arrayPopFront Removes the first item from the array. @@ -518,7 +518,7 @@ SELECT arrayPopFront([1, 2, 3]) AS res; └───────┘ ``` -## arrayPushBack {#arraypushback} +## arrayPushBack Adds one item to the end of the array. @@ -543,7 +543,7 @@ SELECT arrayPushBack(['a'], 'b') AS res; └───────────┘ ``` -## arrayPushFront {#arraypushfront} +## arrayPushFront Adds one element to the beginning of the array. @@ -568,7 +568,7 @@ SELECT arrayPushFront(['b'], 'a') AS res; └───────────┘ ``` -## arrayResize {#arrayresize} +## arrayResize Changes the length of the array. @@ -610,7 +610,7 @@ SELECT arrayResize([1], 3, NULL); └───────────────────────────┘ ``` -## arraySlice {#arrayslice} +## arraySlice Returns a slice of the array. @@ -638,7 +638,7 @@ SELECT arraySlice([1, 2, NULL, 4, 5], 2, 3) AS res; Array elements set to `NULL` are handled as normal values. -## arraySort(\[func,\] arr, …) {#array_functions-sort} +## arraySort(\[func,\] arr, …) Sorts the elements of the `arr` array in ascending order. If the `func` function is specified, sorting order is determined by the result of the `func` function applied to the elements of the array. If `func` accepts multiple arguments, the `arraySort` function is passed several arrays that the arguments of `func` will correspond to. Detailed examples are shown at the end of `arraySort` description. @@ -739,7 +739,7 @@ SELECT arraySort((x, y) -> -y, [0, 1, 2], [1, 2, 3]) as res; To improve sorting efficiency, the [Schwartzian transform](https://en.wikipedia.org/wiki/Schwartzian_transform) is used. ::: -## arrayReverseSort(\[func,\] arr, …) {#array_functions-reverse-sort} +## arrayReverseSort(\[func,\] arr, …) Sorts the elements of the `arr` array in descending order. If the `func` function is specified, `arr` is sorted according to the result of the `func` function applied to the elements of the array, and then the sorted array is reversed. If `func` accepts multiple arguments, the `arrayReverseSort` function is passed several arrays that the arguments of `func` will correspond to. Detailed examples are shown at the end of `arrayReverseSort` description. @@ -840,18 +840,18 @@ SELECT arrayReverseSort((x, y) -> -y, [4, 3, 5], [1, 2, 3]) AS res; └─────────┘ ``` -## arrayUniq(arr, …) {#arrayuniqarr} +## arrayUniq(arr, …) If one argument is passed, it counts the number of different elements in the array. If multiple arguments are passed, it counts the number of different tuples of elements at corresponding positions in multiple arrays. If you want to get a list of unique items in an array, you can use arrayReduce(‘groupUniqArray’, arr). -## arrayJoin(arr) {#array-functions-join} +## arrayJoin(arr) A special function. See the section [“ArrayJoin function”](../../sql-reference/functions/array-join.md#functions_arrayjoin). -## arrayDifference {#arraydifference} +## arrayDifference Calculates the difference between adjacent array elements. Returns an array where the first element will be 0, the second is the difference between `a[1] - a[0]`, etc. The type of elements in the resulting array is determined by the type inference rules for subtraction (e.g. `UInt8` - `UInt8` = `Int16`). @@ -903,7 +903,7 @@ Result: └────────────────────────────────────────────┘ ``` -## arrayDistinct {#arraydistinct} +## arrayDistinct Takes an array, returns an array containing the distinct elements only. @@ -937,7 +937,7 @@ Result: └────────────────────────────────┘ ``` -## arrayEnumerateDense(arr) {#array_functions-arrayenumeratedense} +## arrayEnumerateDense(arr) Returns an array of the same size as the source array, indicating where each element first appears in the source array. @@ -953,7 +953,7 @@ SELECT arrayEnumerateDense([10, 20, 10, 30]) └───────────────────────────────────────┘ ``` -## arrayIntersect(arr) {#array-functions-arrayintersect} +## arrayIntersect(arr) Takes multiple arrays, returns an array with elements that are present in all source arrays. @@ -971,7 +971,7 @@ SELECT └──────────────┴───────────┘ ``` -## arrayReduce {#arrayreduce} +## arrayReduce Applies an aggregate function to array elements and returns its result. The name of the aggregation function is passed as a string in single quotes `'max'`, `'sum'`. When using parametric aggregate functions, the parameter is indicated after the function name in parentheses `'uniqUpTo(6)'`. @@ -1036,7 +1036,7 @@ Result: └─────────────────────────────────────────────────────────────┘ ``` -## arrayReduceInRanges {#arrayreduceinranges} +## arrayReduceInRanges Applies an aggregate function to array elements in given ranges and returns an array containing the result corresponding to each range. The function will return the same result as multiple `arrayReduce(agg_func, arraySlice(arr1, index, length), ...)`. @@ -1078,7 +1078,7 @@ Result: └─────────────────────────────┘ ``` -## arrayReverse(arr) {#arrayreverse} +## arrayReverse(arr) Returns an array of the same size as the original array containing the elements in reverse order. @@ -1094,11 +1094,11 @@ SELECT arrayReverse([1, 2, 3]) └─────────────────────────┘ ``` -## reverse(arr) {#array-functions-reverse} +## reverse(arr) Synonym for [“arrayReverse”](#arrayreverse) -## arrayFlatten {#arrayflatten} +## arrayFlatten Converts an array of arrays to a flat array. @@ -1133,7 +1133,7 @@ SELECT flatten([[[1]], [[2], [3]]]); └─────────────────────────────────────────────┘ ``` -## arrayCompact {#arraycompact} +## arrayCompact Removes consecutive duplicate elements from an array. The order of result values is determined by the order in the source array. @@ -1169,7 +1169,7 @@ Result: └────────────────────────────────────────────┘ ``` -## arrayZip {#arrayzip} +## arrayZip Combines multiple arrays into a single array. The resulting array contains the corresponding elements of the source arrays grouped into tuples in the listed order of arguments. @@ -1207,7 +1207,7 @@ Result: └──────────────────────────────────────┘ ``` -## arrayAUC {#arrayauc} +## arrayAUC Calculate AUC (Area Under the Curve, which is a concept in machine learning, see more details: https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve). @@ -1242,9 +1242,9 @@ Result: └───────────────────────────────────────────────┘ ``` -## arrayMap(func, arr1, …) {#array-map} +## arrayMap(func, arr1, …) -Returns an array obtained from the original application of the `func` function to each element in the `arr` array. +Returns an array obtained from the original arrays by application of `func(arr1[i], …, arrN[i])` for each element. Arrays `arr1` … `arrN` must have the same number of elements. Examples: @@ -1272,9 +1272,9 @@ SELECT arrayMap((x, y) -> (x, y), [1, 2, 3], [4, 5, 6]) AS res Note that the `arrayMap` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You must pass a lambda function to it as the first argument, and it can’t be omitted. -## arrayFilter(func, arr1, …) {#array-filter} +## arrayFilter(func, arr1, …) -Returns an array containing only the elements in `arr1` for which `func` returns something other than 0. +Returns an array containing only the elements in `arr1` for which `func(arr1[i], …, arrN[i])` returns something other than 0. Examples: @@ -1305,9 +1305,9 @@ SELECT Note that the `arrayFilter` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You must pass a lambda function to it as the first argument, and it can’t be omitted. -## arrayFill(func, arr1, …) {#array-fill} +## arrayFill(func, arr1, …) -Scan through `arr1` from the first element to the last element and replace `arr1[i]` by `arr1[i - 1]` if `func` returns 0. The first element of `arr1` will not be replaced. +Scan through `arr1` from the first element to the last element and replace `arr1[i]` by `arr1[i - 1]` if `func(arr1[i], …, arrN[i])` returns 0. The first element of `arr1` will not be replaced. Examples: @@ -1323,9 +1323,9 @@ SELECT arrayFill(x -> not isNull(x), [1, null, 3, 11, 12, null, null, 5, 6, 14, Note that the `arrayFill` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You must pass a lambda function to it as the first argument, and it can’t be omitted. -## arrayReverseFill(func, arr1, …) {#array-reverse-fill} +## arrayReverseFill(func, arr1, …) -Scan through `arr1` from the last element to the first element and replace `arr1[i]` by `arr1[i + 1]` if `func` returns 0. The last element of `arr1` will not be replaced. +Scan through `arr1` from the last element to the first element and replace `arr1[i]` by `arr1[i + 1]` if `func(arr1[i], …, arrN[i])` returns 0. The last element of `arr1` will not be replaced. Examples: @@ -1341,9 +1341,9 @@ SELECT arrayReverseFill(x -> not isNull(x), [1, null, 3, 11, 12, null, null, 5, Note that the `arrayReverseFill` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You must pass a lambda function to it as the first argument, and it can’t be omitted. -## arraySplit(func, arr1, …) {#array-split} +## arraySplit(func, arr1, …) -Split `arr1` into multiple arrays. When `func` returns something other than 0, the array will be split on the left hand side of the element. The array will not be split before the first element. +Split `arr1` into multiple arrays. When `func(arr1[i], …, arrN[i])` returns something other than 0, the array will be split on the left hand side of the element. The array will not be split before the first element. Examples: @@ -1359,9 +1359,9 @@ SELECT arraySplit((x, y) -> y, [1, 2, 3, 4, 5], [1, 0, 0, 1, 0]) AS res Note that the `arraySplit` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You must pass a lambda function to it as the first argument, and it can’t be omitted. -## arrayReverseSplit(func, arr1, …) {#array-reverse-split} +## arrayReverseSplit(func, arr1, …) -Split `arr1` into multiple arrays. When `func` returns something other than 0, the array will be split on the right hand side of the element. The array will not be split after the last element. +Split `arr1` into multiple arrays. When `func(arr1[i], …, arrN[i])` returns something other than 0, the array will be split on the right hand side of the element. The array will not be split after the last element. Examples: @@ -1377,43 +1377,43 @@ SELECT arrayReverseSplit((x, y) -> y, [1, 2, 3, 4, 5], [1, 0, 0, 1, 0]) AS res Note that the `arrayReverseSplit` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You must pass a lambda function to it as the first argument, and it can’t be omitted. -## arrayExists(\[func,\] arr1, …) {#arrayexistsfunc-arr1} +## arrayExists(\[func,\] arr1, …) -Returns 1 if there is at least one element in `arr` for which `func` returns something other than 0. Otherwise, it returns 0. +Returns 1 if there is at least one element in `arr` for which `func(arr1[i], …, arrN[i])` returns something other than 0. Otherwise, it returns 0. Note that the `arrayExists` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument. -## arrayAll(\[func,\] arr1, …) {#arrayallfunc-arr1} +## arrayAll(\[func,\] arr1, …) -Returns 1 if `func` returns something other than 0 for all the elements in `arr`. Otherwise, it returns 0. +Returns 1 if `func(arr1[i], …, arrN[i])` returns something other than 0 for all the elements in arrays. Otherwise, it returns 0. Note that the `arrayAll` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument. -## arrayFirst(func, arr1, …) {#array-first} +## arrayFirst(func, arr1, …) -Returns the first element in the `arr1` array for which `func` returns something other than 0. +Returns the first element in the `arr1` array for which `func(arr1[i], …, arrN[i])` returns something other than 0. Note that the `arrayFirst` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You must pass a lambda function to it as the first argument, and it can’t be omitted. -## arrayLast(func, arr1, …) {#array-last} +## arrayLast(func, arr1, …) -Returns the last element in the `arr1` array for which `func` returns something other than 0. +Returns the last element in the `arr1` array for which `func(arr1[i], …, arrN[i])` returns something other than 0. Note that the `arrayLast` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You must pass a lambda function to it as the first argument, and it can’t be omitted. -## arrayFirstIndex(func, arr1, …) {#array-first-index} +## arrayFirstIndex(func, arr1, …) -Returns the index of the first element in the `arr1` array for which `func` returns something other than 0. +Returns the index of the first element in the `arr1` array for which `func(arr1[i], …, arrN[i])` returns something other than 0. Note that the `arrayFirstIndex` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You must pass a lambda function to it as the first argument, and it can’t be omitted. -## arrayLastIndex(func, arr1, …) {#array-last-index} +## arrayLastIndex(func, arr1, …) -Returns the index of the last element in the `arr1` array for which `func` returns something other than 0. +Returns the index of the last element in the `arr1` array for which `func(arr1[i], …, arrN[i])` returns something other than 0. Note that the `arrayLastIndex` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You must pass a lambda function to it as the first argument, and it can’t be omitted. -## arrayMin {#array-min} +## arrayMin Returns the minimum of elements in the source array. @@ -1468,7 +1468,7 @@ Result: └─────┘ ``` -## arrayMax {#array-max} +## arrayMax Returns the maximum of elements in the source array. @@ -1523,7 +1523,7 @@ Result: └─────┘ ``` -## arraySum {#array-sum} +## arraySum Returns the sum of elements in the source array. @@ -1578,7 +1578,7 @@ Result: └─────┘ ``` -## arrayAvg {#array-avg} +## arrayAvg Returns the average of elements in the source array. @@ -1633,9 +1633,9 @@ Result: └─────┘ ``` -## arrayCumSum(\[func,\] arr1, …) {#arraycumsumfunc-arr1} +## arrayCumSum(\[func,\] arr1, …) -Returns an array of partial sums of elements in the source array (a running sum). If the `func` function is specified, then the values of the array elements are converted by this function before summing. +Returns an array of partial sums of elements in the source array (a running sum). If the `func` function is specified, then the values of the array elements are converted by `func(arr1[i], …, arrN[i])` before summing. Example: @@ -1651,7 +1651,7 @@ SELECT arrayCumSum([1, 1, 1, 1]) AS res Note that the `arrayCumSum` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument. -## arrayCumSumNonNegative(arr) {#arraycumsumnonnegativearr} +## arrayCumSumNonNegative(arr) Same as `arrayCumSum`, returns an array of partial sums of elements in the source array (a running sum). Different `arrayCumSum`, when then returned value contains a value less than zero, the value is replace with zero and the subsequent calculation is performed with zero parameters. For example: @@ -1666,7 +1666,7 @@ SELECT arrayCumSumNonNegative([1, 1, -4, 1]) AS res ``` Note that the `arraySumNonNegative` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument. -## arrayProduct {#arrayproduct} +## arrayProduct Multiplies elements of an [array](../../sql-reference/data-types/array.md). diff --git a/docs/en/sql-reference/functions/array-join.md b/docs/en/sql-reference/functions/array-join.md index 24d9c2b08d8..ed1199a45d8 100644 --- a/docs/en/sql-reference/functions/array-join.md +++ b/docs/en/sql-reference/functions/array-join.md @@ -3,7 +3,7 @@ sidebar_position: 61 sidebar_label: arrayJoin --- -# arrayJoin function {#functions_arrayjoin} +# arrayJoin function This is a very unusual function. diff --git a/docs/en/sql-reference/functions/bit-functions.md b/docs/en/sql-reference/functions/bit-functions.md index 4a3c1b5ecd5..1fde738476d 100644 --- a/docs/en/sql-reference/functions/bit-functions.md +++ b/docs/en/sql-reference/functions/bit-functions.md @@ -3,21 +3,21 @@ sidebar_position: 48 sidebar_label: Bit --- -# Bit Functions {#bit-functions} +# Bit Functions Bit functions work for any pair of types from `UInt8`, `UInt16`, `UInt32`, `UInt64`, `Int8`, `Int16`, `Int32`, `Int64`, `Float32`, or `Float64`. Some functions support `String` and `FixedString` types. The result type is an integer with bits equal to the maximum bits of its arguments. If at least one of the arguments is signed, the result is a signed number. If an argument is a floating-point number, it is cast to Int64. -## bitAnd(a, b) {#bitanda-b} +## bitAnd(a, b) -## bitOr(a, b) {#bitora-b} +## bitOr(a, b) -## bitXor(a, b) {#bitxora-b} +## bitXor(a, b) -## bitNot(a) {#bitnota} +## bitNot(a) -## bitShiftLeft(a, b) {#bitshiftlefta-b} +## bitShiftLeft(a, b) Shifts the binary representation of a value to the left by a specified number of bit positions. @@ -66,7 +66,7 @@ Result: └─────┴──────────────────────────────┴───────────┴───────────────────────────────────────────────┘ ``` -## bitShiftRight(a, b) {#bitshiftrighta-b} +## bitShiftRight(a, b) Shifts the binary representation of a value to the right by a specified number of bit positions. @@ -113,9 +113,9 @@ Result: └─────┴──────────────────────────────┴───────────┴─────────────────────────────────────────────────┘ ``` -## bitRotateLeft(a, b) {#bitrotatelefta-b} +## bitRotateLeft(a, b) -## bitRotateRight(a, b) {#bitrotaterighta-b} +## bitRotateRight(a, b) ## bitSlice(s, offset, length) @@ -166,7 +166,7 @@ Result: └──────────────────────────────────────────┴───────────────────────────────┘ ``` -## bitTest {#bittest} +## bitTest Takes any integer and converts it into [binary form](https://en.wikipedia.org/wiki/Binary_number), returns the value of a bit at specified position. The countdown starts from 0 from the right to the left. @@ -221,7 +221,7 @@ Result: └────────────────┘ ``` -## bitTestAll {#bittestall} +## bitTestAll Returns result of [logical conjuction](https://en.wikipedia.org/wiki/Logical_conjunction) (AND operator) of all bits at given positions. The countdown starts from 0 from the right to the left. @@ -286,7 +286,7 @@ Result: └───────────────────────────────┘ ``` -## bitTestAny {#bittestany} +## bitTestAny Returns result of [logical disjunction](https://en.wikipedia.org/wiki/Logical_disjunction) (OR operator) of all bits at given positions. The countdown starts from 0 from the right to the left. @@ -351,7 +351,7 @@ Result: └──────────────────────┘ ``` -## bitCount {#bitcount} +## bitCount Calculates the number of bits set to one in the binary representation of a number. @@ -391,7 +391,7 @@ Result: └───────────────┘ ``` -## bitHammingDistance {#bithammingdistance} +## bitHammingDistance Returns the [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance) between the bit representations of two integer values. Can be used with [SimHash](../../sql-reference/functions/hash-functions.md#ngramsimhash) functions for detection of semi-duplicate strings. The smaller is the distance, the more likely those strings are the same. diff --git a/docs/en/sql-reference/functions/bitmap-functions.md b/docs/en/sql-reference/functions/bitmap-functions.md index 68d1fc88a31..3250c10ff84 100644 --- a/docs/en/sql-reference/functions/bitmap-functions.md +++ b/docs/en/sql-reference/functions/bitmap-functions.md @@ -3,7 +3,7 @@ sidebar_position: 49 sidebar_label: Bitmap --- -# Bitmap Functions {#bitmap-functions} +# Bitmap Functions Bitmap functions work for two bitmaps Object value calculation, it is to return new bitmap or cardinality while using formula calculation, such as and, or, xor, and not, etc. @@ -13,7 +13,7 @@ RoaringBitmap is wrapped into a data structure while actual storage of Bitmap ob For more information on RoaringBitmap, see: [CRoaring](https://github.com/RoaringBitmap/CRoaring). -## bitmapBuild {#bitmap_functions-bitmapbuild} +## bitmapBuild Build a bitmap from unsigned integer array. @@ -37,7 +37,7 @@ SELECT bitmapBuild([1, 2, 3, 4, 5]) AS res, toTypeName(res); └─────┴──────────────────────────────────────────────┘ ``` -## bitmapToArray {#bitmaptoarray} +## bitmapToArray Convert bitmap to integer array. @@ -61,7 +61,7 @@ SELECT bitmapToArray(bitmapBuild([1, 2, 3, 4, 5])) AS res; └─────────────┘ ``` -## bitmapSubsetInRange {#bitmap-functions-bitmapsubsetinrange} +## bitmapSubsetInRange Return subset in specified range (not include the range_end). @@ -87,7 +87,7 @@ SELECT bitmapToArray(bitmapSubsetInRange(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11, └───────────────────┘ ``` -## bitmapSubsetLimit {#bitmapsubsetlimit} +## bitmapSubsetLimit Creates a subset of bitmap with n elements taken between `range_start` and `cardinality_limit`. @@ -125,7 +125,7 @@ Result: └───────────────────────────┘ ``` -## subBitmap {#subbitmap} +## subBitmap Returns the bitmap elements, starting from the `offset` position. The number of returned elements is limited by the `cardinality_limit` parameter. Analog of the [substring](string-functions.md#substring)) string function, but for bitmap. @@ -163,7 +163,7 @@ Result: └─────────────────────────────────┘ ``` -## bitmapContains {#bitmap_functions-bitmapcontains} +## bitmapContains Checks whether the bitmap contains an element. @@ -195,7 +195,7 @@ SELECT bitmapContains(bitmapBuild([1,5,7,9]), toUInt32(9)) AS res; └─────┘ ``` -## bitmapHasAny {#bitmaphasany} +## bitmapHasAny Checks whether two bitmaps have intersection by some elements. @@ -226,7 +226,7 @@ SELECT bitmapHasAny(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; └─────┘ ``` -## bitmapHasAll {#bitmaphasall} +## bitmapHasAll Analogous to `hasAll(array, array)` returns 1 if the first bitmap contains all the elements of the second one, 0 otherwise. If the second argument is an empty bitmap then returns 1. @@ -251,7 +251,7 @@ SELECT bitmapHasAll(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; └─────┘ ``` -## bitmapCardinality {#bitmapcardinality} +## bitmapCardinality Retrun bitmap cardinality of type UInt64. @@ -275,7 +275,7 @@ SELECT bitmapCardinality(bitmapBuild([1, 2, 3, 4, 5])) AS res; └─────┘ ``` -## bitmapMin {#bitmapmin} +## bitmapMin Retrun the smallest value of type UInt64 in the set, UINT32_MAX if the set is empty. @@ -297,7 +297,7 @@ SELECT bitmapMin(bitmapBuild([1, 2, 3, 4, 5])) AS res; └─────┘ ``` -## bitmapMax {#bitmapmax} +## bitmapMax Retrun the greatest value of type UInt64 in the set, 0 if the set is empty. @@ -319,7 +319,7 @@ SELECT bitmapMax(bitmapBuild([1, 2, 3, 4, 5])) AS res; └─────┘ ``` -## bitmapTransform {#bitmaptransform} +## bitmapTransform Transform an array of values in a bitmap to another array of values, the result is a new bitmap. @@ -343,7 +343,7 @@ SELECT bitmapToArray(bitmapTransform(bitmapBuild([1, 2, 3, 4, 5, 6, 7, 8, 9, 10] └───────────────────────┘ ``` -## bitmapAnd {#bitmapand} +## bitmapAnd Two bitmap and calculation, the result is a new bitmap. @@ -367,7 +367,7 @@ SELECT bitmapToArray(bitmapAnd(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS re └─────┘ ``` -## bitmapOr {#bitmapor} +## bitmapOr Two bitmap or calculation, the result is a new bitmap. @@ -391,7 +391,7 @@ SELECT bitmapToArray(bitmapOr(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res └─────────────┘ ``` -## bitmapXor {#bitmapxor} +## bitmapXor Two bitmap xor calculation, the result is a new bitmap. @@ -415,7 +415,7 @@ SELECT bitmapToArray(bitmapXor(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS re └───────────┘ ``` -## bitmapAndnot {#bitmapandnot} +## bitmapAndnot Two bitmap andnot calculation, the result is a new bitmap. @@ -439,7 +439,7 @@ SELECT bitmapToArray(bitmapAndnot(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS └───────┘ ``` -## bitmapAndCardinality {#bitmapandcardinality} +## bitmapAndCardinality Two bitmap and calculation, return cardinality of type UInt64. @@ -463,7 +463,7 @@ SELECT bitmapAndCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; └─────┘ ``` -## bitmapOrCardinality {#bitmaporcardinality} +## bitmapOrCardinality Two bitmap or calculation, return cardinality of type UInt64. @@ -487,7 +487,7 @@ SELECT bitmapOrCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; └─────┘ ``` -## bitmapXorCardinality {#bitmapxorcardinality} +## bitmapXorCardinality Two bitmap xor calculation, return cardinality of type UInt64. @@ -511,7 +511,7 @@ SELECT bitmapXorCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; └─────┘ ``` -## bitmapAndnotCardinality {#bitmapandnotcardinality} +## bitmapAndnotCardinality Two bitmap andnot calculation, return cardinality of type UInt64. diff --git a/docs/en/sql-reference/functions/comparison-functions.md b/docs/en/sql-reference/functions/comparison-functions.md index b5e842ddcad..f3315fb08d9 100644 --- a/docs/en/sql-reference/functions/comparison-functions.md +++ b/docs/en/sql-reference/functions/comparison-functions.md @@ -3,7 +3,7 @@ sidebar_position: 36 sidebar_label: Comparison --- -# Comparison Functions {#comparison-functions} +# Comparison Functions Comparison functions always return 0 or 1 (Uint8). @@ -20,15 +20,15 @@ For example, you can’t compare a date with a string. You have to use a functio Strings are compared by bytes. A shorter string is smaller than all strings that start with it and that contain at least one more character. -## equals, a = b and a == b operator {#function-equals} +## equals, a = b and a == b operator -## notEquals, a != b and a \<\> b operator {#function-notequals} +## notEquals, a != b and a \<\> b operator -## less, \< operator {#function-less} +## less, \< operator -## greater, \> operator {#function-greater} +## greater, \> operator -## lessOrEquals, \<= operator {#function-lessorequals} +## lessOrEquals, \<= operator -## greaterOrEquals, \>= operator {#function-greaterorequals} +## greaterOrEquals, \>= operator diff --git a/docs/en/sql-reference/functions/conditional-functions.md b/docs/en/sql-reference/functions/conditional-functions.md index 21189bbb072..0e81a2159a3 100644 --- a/docs/en/sql-reference/functions/conditional-functions.md +++ b/docs/en/sql-reference/functions/conditional-functions.md @@ -3,9 +3,9 @@ sidebar_position: 43 sidebar_label: 'Conditional ' --- -# Conditional Functions {#conditional-functions} +# Conditional Functions -## if {#if} +## if Controls conditional branching. Unlike most systems, ClickHouse always evaluate both expressions `then` and `else`. @@ -97,7 +97,7 @@ WHERE isNotNull(left) AND isNotNull(right) Note: `NULL` values are not used in this example, check [NULL values in conditionals](#null-values-in-conditionals) section. -## Ternary Operator {#ternary-operator} +## Ternary Operator It works same as `if` function. @@ -113,7 +113,7 @@ Returns `then` if the `cond` evaluates to be true (greater than zero), otherwise - [ifNotFinite](../../sql-reference/functions/other-functions.md#ifnotfinite). -## multiIf {#multiif} +## multiIf Allows you to write the [CASE](../../sql-reference/operators/index.md#operator_case) operator more compactly in the query. @@ -157,7 +157,7 @@ FROM LEFT_RIGHT └──────┴───────┴─────────────────┘ ``` -## Using Conditional Results Directly {#using-conditional-results-directly} +## Using Conditional Results Directly Conditionals always result to `0`, `1` or `NULL`. So you can use conditional results directly like this: @@ -174,7 +174,7 @@ FROM LEFT_RIGHT └──────────┘ ``` -## NULL Values in Conditionals {#null-values-in-conditionals} +## NULL Values in Conditionals When `NULL` values are involved in conditionals, the result will also be `NULL`. diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index e37e86e99dc..621429fb02c 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -3,7 +3,7 @@ sidebar_position: 39 sidebar_label: Dates and Times --- -# Functions for Working with Dates and Times {#functions-for-working-with-dates-and-times} +# Functions for Working with Dates and Times Support for time zones. @@ -23,7 +23,7 @@ SELECT └─────────────────────┴────────────┴────────────┴─────────────────────┘ ``` -## timeZone {#timezone} +## timeZone Returns the timezone of the server. If it is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard. Otherwise it produces a constant value. @@ -42,7 +42,7 @@ Alias: `timezone`. Type: [String](../../sql-reference/data-types/string.md). -## toTimeZone {#totimezone} +## toTimeZone Converts time or date and time to the specified time zone. The time zone is an attribute of the `Date` and `DateTime` data types. The internal value (number of seconds) of the table field or of the resultset's column does not change, the column's type changes and its string representation changes accordingly. @@ -100,7 +100,7 @@ int32samoa: 1546300800 `toTimeZone(time_utc, 'Asia/Yekaterinburg')` changes the `DateTime('UTC')` type to `DateTime('Asia/Yekaterinburg')`. The value (Unixtimestamp) 1546300800 stays the same, but the string representation (the result of the toString() function) changes from `time_utc: 2019-01-01 00:00:00` to `time_yekat: 2019-01-01 05:00:00`. -## timeZoneOf {#timezoneof} +## timeZoneOf Returns the timezone name of [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md) data types. @@ -136,7 +136,7 @@ Result: └───────────────────┘ ``` -## timeZoneOffset {#timezoneoffset} +## timeZoneOffset Returns a timezone offset in seconds from [UTC](https://en.wikipedia.org/wiki/Coordinated_Universal_Time). The function takes into account [daylight saving time](https://en.wikipedia.org/wiki/Daylight_saving_time) and historical timezone changes at the specified date and time. [IANA timezone database](https://www.iana.org/time-zones) is used to calculate the offset. @@ -176,63 +176,63 @@ Result: └─────────────────────┴──────────────────────────────┴───────────────────┴─────────────────┘ ``` -## toYear {#toyear} +## toYear Converts a date or date with time to a UInt16 number containing the year number (AD). Alias: `YEAR`. -## toQuarter {#toquarter} +## toQuarter Converts a date or date with time to a UInt8 number containing the quarter number. Alias: `QUARTER`. -## toMonth {#tomonth} +## toMonth Converts a date or date with time to a UInt8 number containing the month number (1-12). Alias: `MONTH`. -## toDayOfYear {#todayofyear} +## toDayOfYear Converts a date or date with time to a UInt16 number containing the number of the day of the year (1-366). Alias: `DAYOFYEAR`. -## toDayOfMonth {#todayofmonth} +## toDayOfMonth Converts a date or date with time to a UInt8 number containing the number of the day of the month (1-31). Aliases: `DAYOFMONTH`, `DAY`. -## toDayOfWeek {#todayofweek} +## toDayOfWeek Converts a date or date with time to a UInt8 number containing the number of the day of the week (Monday is 1, and Sunday is 7). Alias: `DAYOFWEEK`. -## toHour {#tohour} +## toHour Converts a date with time to a UInt8 number containing the number of the hour in 24-hour time (0-23). This function assumes that if clocks are moved ahead, it is by one hour and occurs at 2 a.m., and if clocks are moved back, it is by one hour and occurs at 3 a.m. (which is not always true – even in Moscow the clocks were twice changed at a different time). Alias: `HOUR`. -## toMinute {#tominute} +## toMinute Converts a date with time to a UInt8 number containing the number of the minute of the hour (0-59). Alias: `MINUTE`. -## toSecond {#tosecond} +## toSecond Converts a date with time to a UInt8 number containing the number of the second in the minute (0-59). Leap seconds are not accounted for. Alias: `SECOND`. -## toUnixTimestamp {#to-unix-timestamp} +## toUnixTimestamp For DateTime argument: converts value to the number with type UInt32 -- Unix Timestamp (https://en.wikipedia.org/wiki/Unix_time). For String argument: converts the input string to the datetime according to the timezone (optional second argument, server timezone is used by default) and returns the corresponding unix timestamp. @@ -270,23 +270,23 @@ Result: The return type `toStartOf*` functions described below is `Date` or `DateTime`. Though these functions can take `DateTime64` as an argument, passing them a `DateTime64` that is out of the normal range (years 1925 - 2283) will give an incorrect result. ::: -## toStartOfYear {#tostartofyear} +## toStartOfYear Rounds down a date or date with time to the first day of the year. Returns the date. -## toStartOfISOYear {#tostartofisoyear} +## toStartOfISOYear Rounds down a date or date with time to the first day of ISO year. Returns the date. -## toStartOfQuarter {#tostartofquarter} +## toStartOfQuarter Rounds down a date or date with time to the first day of the quarter. The first day of the quarter is either 1 January, 1 April, 1 July, or 1 October. Returns the date. -## toStartOfMonth {#tostartofmonth} +## toStartOfMonth Rounds down a date or date with time to the first day of the month. Returns the date. @@ -295,30 +295,30 @@ Returns the date. The behavior of parsing incorrect dates is implementation specific. ClickHouse may return zero date, throw an exception or do “natural” overflow. ::: -## toMonday {#tomonday} +## toMonday Rounds down a date or date with time to the nearest Monday. Returns the date. -## toStartOfWeek(t\[,mode\]) {#tostartofweektmode} +## toStartOfWeek(t\[,mode\]) Rounds down a date or date with time to the nearest Sunday or Monday by mode. Returns the date. The mode argument works exactly like the mode argument to toWeek(). For the single-argument syntax, a mode value of 0 is used. -## toStartOfDay {#tostartofday} +## toStartOfDay Rounds down a date with time to the start of the day. -## toStartOfHour {#tostartofhour} +## toStartOfHour Rounds down a date with time to the start of the hour. -## toStartOfMinute {#tostartofminute} +## toStartOfMinute Rounds down a date with time to the start of the minute. -## toStartOfSecond {#tostartofsecond} +## toStartOfSecond Truncates sub-seconds. @@ -375,19 +375,19 @@ Result: - [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) server configuration parameter. -## toStartOfFiveMinutes {#tostartoffiveminutes} +## toStartOfFiveMinutes Rounds down a date with time to the start of the five-minute interval. -## toStartOfTenMinutes {#tostartoftenminutes} +## toStartOfTenMinutes Rounds down a date with time to the start of the ten-minute interval. -## toStartOfFifteenMinutes {#tostartoffifteenminutes} +## toStartOfFifteenMinutes Rounds down the date with time to the start of the fifteen-minute interval. -## toStartOfInterval(time_or_data, INTERVAL x unit \[, time_zone\]) {#tostartofintervaltime-or-data-interval-x-unit-time-zone} +## toStartOfInterval(time_or_data, INTERVAL x unit \[, time_zone\]) This is a generalization of other functions named `toStartOf*`. For example, `toStartOfInterval(t, INTERVAL 1 year)` returns the same as `toStartOfYear(t)`, @@ -395,51 +395,51 @@ This is a generalization of other functions named `toStartOf*`. For example, `toStartOfInterval(t, INTERVAL 1 day)` returns the same as `toStartOfDay(t)`, `toStartOfInterval(t, INTERVAL 15 minute)` returns the same as `toStartOfFifteenMinutes(t)` etc. -## toTime {#totime} +## toTime Converts a date with time to a certain fixed date, while preserving the time. -## toRelativeYearNum {#torelativeyearnum} +## toRelativeYearNum Converts a date with time or date to the number of the year, starting from a certain fixed point in the past. -## toRelativeQuarterNum {#torelativequarternum} +## toRelativeQuarterNum Converts a date with time or date to the number of the quarter, starting from a certain fixed point in the past. -## toRelativeMonthNum {#torelativemonthnum} +## toRelativeMonthNum Converts a date with time or date to the number of the month, starting from a certain fixed point in the past. -## toRelativeWeekNum {#torelativeweeknum} +## toRelativeWeekNum Converts a date with time or date to the number of the week, starting from a certain fixed point in the past. -## toRelativeDayNum {#torelativedaynum} +## toRelativeDayNum Converts a date with time or date to the number of the day, starting from a certain fixed point in the past. -## toRelativeHourNum {#torelativehournum} +## toRelativeHourNum Converts a date with time or date to the number of the hour, starting from a certain fixed point in the past. -## toRelativeMinuteNum {#torelativeminutenum} +## toRelativeMinuteNum Converts a date with time or date to the number of the minute, starting from a certain fixed point in the past. -## toRelativeSecondNum {#torelativesecondnum} +## toRelativeSecondNum Converts a date with time or date to the number of the second, starting from a certain fixed point in the past. -## toISOYear {#toisoyear} +## toISOYear Converts a date or date with time to a UInt16 number containing the ISO Year number. -## toISOWeek {#toisoweek} +## toISOWeek Converts a date or date with time to a UInt8 number containing the ISO Week number. -## toWeek(date\[,mode\]) {#toweekdatemode} +## toWeek(date\[,mode\]) This function returns the week number for date or datetime. The two-argument form of toWeek() enables you to specify whether the week starts on Sunday or Monday and whether the return value should be in the range from 0 to 53 or from 1 to 53. If the mode argument is omitted, the default mode is 0. `toISOWeek()`is a compatibility function that is equivalent to `toWeek(date,3)`. @@ -488,7 +488,7 @@ SELECT toDate('2016-12-27') AS date, toWeek(date) AS week0, toWeek(date,1) AS we └────────────┴───────┴───────┴───────┘ ``` -## toYearWeek(date\[,mode\]) {#toyearweekdatemode} +## toYearWeek(date\[,mode\]) Returns year and week for a date. The year in the result may be different from the year in the date argument for the first and the last week of the year. @@ -508,7 +508,7 @@ SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(d └────────────┴───────────┴───────────┴───────────┘ ``` -## date\_trunc {#date_trunc} +## date\_trunc Truncates date and time data to the specified part of date. @@ -577,7 +577,7 @@ Result: - [toStartOfInterval](#tostartofintervaltime-or-data-interval-x-unit-time-zone) -## date\_add {#date_add} +## date\_add Adds the time interval or date interval to the provided date or date with time. @@ -628,7 +628,7 @@ Result: └───────────────────────────────────────────────┘ ``` -## date\_diff {#date_diff} +## date\_diff Returns the difference between two dates or dates with time values. @@ -682,7 +682,7 @@ Result: └────────────────────────────────────────────────────────────────────────────────────────┘ ``` -## date\_sub {#date_sub} +## date\_sub Subtracts the time interval or date interval from the provided date or date with time. @@ -733,7 +733,7 @@ Result: └────────────────────────────────────────────────┘ ``` -## timestamp\_add {#timestamp_add} +## timestamp\_add Adds the specified time value with the provided date or date time value. @@ -783,7 +783,7 @@ Result: └────────────────────────────────────────────────┘ ``` -## timestamp\_sub {#timestamp_sub} +## timestamp\_sub Subtracts the time interval from the provided date or date with time. @@ -834,7 +834,7 @@ Result: └──────────────────────────────────────────────────────────────┘ ``` -## now {#now} +## now Returns the current date and time. @@ -884,33 +884,33 @@ Result: └──────────────────────┘ ``` -## today {#today} +## today Accepts zero arguments and returns the current date at one of the moments of request execution. The same as ‘toDate(now())’. -## yesterday {#yesterday} +## yesterday Accepts zero arguments and returns yesterday’s date at one of the moments of request execution. The same as ‘today() - 1’. -## timeSlot {#timeslot} +## timeSlot Rounds the time to the half hour. -## toYYYYMM {#toyyyymm} +## toYYYYMM Converts a date or date with time to a UInt32 number containing the year and month number (YYYY \* 100 + MM). -## toYYYYMMDD {#toyyyymmdd} +## toYYYYMMDD Converts a date or date with time to a UInt32 number containing the year and month number (YYYY \* 10000 + MM \* 100 + DD). -## toYYYYMMDDhhmmss {#toyyyymmddhhmmss} +## toYYYYMMDDhhmmss Converts a date or date with time to a UInt64 number containing the year and month number (YYYY \* 10000000000 + MM \* 100000000 + DD \* 1000000 + hh \* 10000 + mm \* 100 + ss). -## addYears, addMonths, addWeeks, addDays, addHours, addMinutes, addSeconds, addQuarters {#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters} +## addYears, addMonths, addWeeks, addDays, addHours, addMinutes, addSeconds, addQuarters Function adds a Date/DateTime interval to a Date/DateTime and then return the Date/DateTime. For example: @@ -929,7 +929,7 @@ SELECT └─────────────────────┴──────────────────────────┘ ``` -## subtractYears, subtractMonths, subtractWeeks, subtractDays, subtractHours, subtractMinutes, subtractSeconds, subtractQuarters {#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters} +## subtractYears, subtractMonths, subtractWeeks, subtractDays, subtractHours, subtractMinutes, subtractSeconds, subtractQuarters Function subtract a Date/DateTime interval to a Date/DateTime and then return the Date/DateTime. For example: @@ -948,13 +948,13 @@ SELECT └──────────────────────────┴───────────────────────────────┘ ``` -## timeSlots(StartTime, Duration,\[, Size\]) {#timeslotsstarttime-duration-size} +## timeSlots(StartTime, Duration,\[, Size\]) For a time interval starting at ‘StartTime’ and continuing for ‘Duration’ seconds, it returns an array of moments in time, consisting of points from this interval rounded down to the ‘Size’ in seconds. ‘Size’ is an optional parameter: a constant UInt32, set to 1800 by default. For example, `timeSlots(toDateTime('2012-01-01 12:20:00'), 600) = [toDateTime('2012-01-01 12:00:00'), toDateTime('2012-01-01 12:30:00')]`. This is necessary for searching for pageviews in the corresponding session. -## formatDateTime {#formatdatetime} +## formatDateTime Formats a Time according to the given Format string. Format is a constant expression, so you cannot have multiple formats for a single result column. @@ -1015,7 +1015,7 @@ Result: └────────────────────────────────────────────┘ ``` -## dateName {#dataname} +## dateName Returns specified part of date. @@ -1054,7 +1054,7 @@ Result: └──────────────────────────────┴───────────────────────────────┴───────────────────────────── ``` -## FROM\_UNIXTIME {#fromunixfime} +## FROM\_UNIXTIME Function converts Unix timestamp to a calendar date and a time of a day. When there is only a single argument of [Integer](../../sql-reference/data-types/int-uint.md) type, it acts in the same way as [toDateTime](../../sql-reference/functions/type-conversion-functions.md#todatetime) and return [DateTime](../../sql-reference/data-types/datetime.md) type. @@ -1088,7 +1088,7 @@ SELECT FROM_UNIXTIME(1234334543, '%Y-%m-%d %R:%S') AS DateTime; └─────────────────────┘ ``` -## toModifiedJulianDay {#tomodifiedjulianday} +## toModifiedJulianDay Converts a [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) date in text form `YYYY-MM-DD` to a [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) number in Int32. This function supports date from `0000-01-01` to `9999-12-31`. It raises an exception if the argument cannot be parsed as a date, or the date is invalid. @@ -1124,7 +1124,7 @@ Result: └───────────────────────────────────┘ ``` -## toModifiedJulianDayOrNull {#tomodifiedjuliandayornull} +## toModifiedJulianDayOrNull Similar to [toModifiedJulianDay()](#tomodifiedjulianday), but instead of raising exceptions it returns `NULL`. @@ -1160,7 +1160,7 @@ Result: └─────────────────────────────────────────┘ ``` -## fromModifiedJulianDay {#frommodifiedjulianday} +## fromModifiedJulianDay Converts a [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) number to a [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) date in text form `YYYY-MM-DD`. This function supports day number from `-678941` to `2973119` (which represent 0000-01-01 and 9999-12-31 respectively). It raises an exception if the day number is outside of the supported range. @@ -1196,7 +1196,7 @@ Result: └──────────────────────────────┘ ``` -## fromModifiedJulianDayOrNull {#frommodifiedjuliandayornull} +## fromModifiedJulianDayOrNull Similar to [fromModifiedJulianDayOrNull()](#frommodifiedjuliandayornull), but instead of raising exceptions it returns `NULL`. diff --git a/docs/en/sql-reference/functions/encoding-functions.md b/docs/en/sql-reference/functions/encoding-functions.md index 6e25befcbc7..4ee71267a09 100644 --- a/docs/en/sql-reference/functions/encoding-functions.md +++ b/docs/en/sql-reference/functions/encoding-functions.md @@ -3,9 +3,9 @@ sidebar_position: 52 sidebar_label: Encoding --- -# Encoding Functions {#encoding-functions} +# Encoding Functions -## char {#char} +## char Returns the string with the length as the number of passed arguments and each byte has the value of corresponding argument. Accepts multiple arguments of numeric types. If the value of argument is out of range of UInt8 data type, it is converted to UInt8 with possible rounding and overflow. @@ -71,7 +71,7 @@ Result: └───────┘ ``` -## hex {#hex} +## hex Returns a string containing the argument’s hexadecimal representation. @@ -164,7 +164,7 @@ Result: ``` -## unhex {#unhexstr} +## unhex Performs the opposite operation of [hex](#hex). It interprets each pair of hexadecimal digits (in the argument) as a number and converts it to the byte represented by the number. The return value is a binary string (BLOB). @@ -222,7 +222,7 @@ Result: └──────┘ ``` -## bin {#bin} +## bin Returns a string containing the argument’s binary representation. @@ -315,7 +315,7 @@ Result: ``` -## unbin {#unbinstr} +## unbin Interprets each pair of binary digits (in the argument) as a number and converts it to the byte represented by the number. The functions performs the opposite operation to [bin](#bin). @@ -375,23 +375,23 @@ Result: └─────┘ ``` -## UUIDStringToNum(str) {#uuidstringtonumstr} +## UUIDStringToNum(str) Accepts a string containing 36 characters in the format `123e4567-e89b-12d3-a456-426655440000`, and returns it as a set of bytes in a FixedString(16). -## UUIDNumToString(str) {#uuidnumtostringstr} +## UUIDNumToString(str) Accepts a FixedString(16) value. Returns a string containing 36 characters in text format. -## bitmaskToList(num) {#bitmasktolistnum} +## bitmaskToList(num) Accepts an integer. Returns a string containing the list of powers of two that total the source number when summed. They are comma-separated without spaces in text format, in ascending order. -## bitmaskToArray(num) {#bitmasktoarraynum} +## bitmaskToArray(num) Accepts an integer. Returns an array of UInt64 numbers containing the list of powers of two that total the source number when summed. Numbers in the array are in ascending order. -## bitPositionsToArray(num) {#bitpositionstoarraynum} +## bitPositionsToArray(num) Accepts an integer and converts it to an unsigned integer. Returns an array of `UInt64` numbers containing the list of positions of bits of `arg` that equal `1`, in ascending order. diff --git a/docs/en/sql-reference/functions/encryption-functions.md b/docs/en/sql-reference/functions/encryption-functions.md index 942a63a48a8..fb821ca7783 100644 --- a/docs/en/sql-reference/functions/encryption-functions.md +++ b/docs/en/sql-reference/functions/encryption-functions.md @@ -3,7 +3,7 @@ sidebar_position: 67 sidebar_label: Encryption --- -# Encryption functions {#encryption-functions} +# Encryption functions These functions implement encryption and decryption of data with AES (Advanced Encryption Standard) algorithm. @@ -13,7 +13,7 @@ Initialization vector length is always 16 bytes (bytes in excess of 16 are ignor Note that these functions work slowly until ClickHouse 21.1. -## encrypt {#encrypt} +## encrypt This function encrypts data using these modes: @@ -106,7 +106,7 @@ Result: └──────────────────────┴──────────────────────────────────────────────┘ ``` -## aes_encrypt_mysql {#aes_encrypt_mysql} +## aes_encrypt_mysql Compatible with mysql encryption and resulting ciphertext can be decrypted with [AES_DECRYPT](https://dev.mysql.com/doc/refman/8.0/en/encryption-functions.html#function_aes-decrypt) function. @@ -218,7 +218,7 @@ mysql> SELECT aes_encrypt('Secret', '123456789101213141516171819202122', 'iviviv 1 row in set (0.00 sec) ``` -## decrypt {#decrypt} +## decrypt This function decrypts ciphertext into a plaintext using these modes: @@ -295,7 +295,7 @@ Result: Notice how only a portion of the data was properly decrypted, and the rest is gibberish since either `mode`, `key`, or `iv` were different upon encryption. -## aes_decrypt_mysql {#aes_decrypt_mysql} +## aes_decrypt_mysql Compatible with mysql encryption and decrypts data encrypted with [AES_ENCRYPT](https://dev.mysql.com/doc/refman/8.0/en/encryption-functions.html#function_aes-encrypt) function. diff --git a/docs/en/sql-reference/functions/ext-dict-functions.md b/docs/en/sql-reference/functions/ext-dict-functions.md index 1d3f5952c98..a62445f82d1 100644 --- a/docs/en/sql-reference/functions/ext-dict-functions.md +++ b/docs/en/sql-reference/functions/ext-dict-functions.md @@ -7,11 +7,11 @@ sidebar_label: External Dictionaries For dictionaries created with [DDL queries](../../sql-reference/statements/create/dictionary.md), the `dict_name` parameter must be fully specified, like `.`. Otherwise, the current database is used. ::: -# Functions for Working with External Dictionaries {#ext_dict_functions} +# Functions for Working with External Dictionaries For information on connecting and configuring external dictionaries, see [External dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md). -## dictGet, dictGetOrDefault, dictGetOrNull {#dictget} +## dictGet, dictGetOrDefault, dictGetOrNull Retrieves values from an external dictionary. @@ -227,7 +227,7 @@ Result: - [External Dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) -## dictHas {#dicthas} +## dictHas Checks whether a key is present in a dictionary. @@ -247,7 +247,7 @@ dictHas('dict_name', id_expr) Type: `UInt8`. -## dictGetHierarchy {#dictgethierarchy} +## dictGetHierarchy Creates an array, containing all the parents of a key in the [hierarchical dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md). @@ -268,7 +268,7 @@ dictGetHierarchy('dict_name', key) Type: [Array(UInt64)](../../sql-reference/data-types/array.md). -## dictIsIn {#dictisin} +## dictIsIn Checks the ancestor of a key through the whole hierarchical chain in the dictionary. @@ -289,7 +289,7 @@ dictIsIn('dict_name', child_id_expr, ancestor_id_expr) Type: `UInt8`. -## dictGetChildren {#dictgetchildren} +## dictGetChildren Returns first-level children as an array of indexes. It is the inverse transformation for [dictGetHierarchy](#dictgethierarchy). @@ -338,7 +338,7 @@ SELECT dictGetChildren('hierarchy_flat_dictionary', number) FROM system.numbers └──────────────────────────────────────────────────────┘ ``` -## dictGetDescendant {#dictgetdescendant} +## dictGetDescendant Returns all descendants as if [dictGetChildren](#dictgetchildren) function was applied `level` times recursively. @@ -402,7 +402,7 @@ SELECT dictGetDescendants('hierarchy_flat_dictionary', number, 1) FROM system.nu └────────────────────────────────────────────────────────────┘ ``` -## Other Functions {#ext_dict_functions-other} +## Other Functions ClickHouse supports specialized functions that convert dictionary attribute values to a specific data type regardless of the dictionary configuration. diff --git a/docs/en/sql-reference/functions/files.md b/docs/en/sql-reference/functions/files.md index 5bb77016039..fc2f8a0e050 100644 --- a/docs/en/sql-reference/functions/files.md +++ b/docs/en/sql-reference/functions/files.md @@ -3,9 +3,9 @@ sidebar_position: 43 sidebar_label: Files --- -# Functions for Working with Files {#functions-for-working-with-files} +# Functions for Working with Files -## file {#file} +## file Reads file as a String. The file content is not parsed, so any information is read as one string and placed into the specified column. diff --git a/docs/en/sql-reference/functions/functions-for-nulls.md b/docs/en/sql-reference/functions/functions-for-nulls.md index 0ff93357208..2c86aa403cd 100644 --- a/docs/en/sql-reference/functions/functions-for-nulls.md +++ b/docs/en/sql-reference/functions/functions-for-nulls.md @@ -3,9 +3,9 @@ sidebar_position: 63 sidebar_label: Nullable --- -# Functions for Working with Nullable Values {#functions-for-working-with-nullable-aggregates} +# Functions for Working with Nullable Values -## isNull {#isnull} +## isNull Checks whether the argument is [NULL](../../sql-reference/syntax.md#null-literal). @@ -47,7 +47,7 @@ SELECT x FROM t_null WHERE isNull(y); └───┘ ``` -## isNotNull {#isnotnull} +## isNotNull Checks whether the argument is [NULL](../../sql-reference/syntax.md#null-literal). @@ -87,7 +87,7 @@ SELECT x FROM t_null WHERE isNotNull(y); └───┘ ``` -## coalesce {#coalesce} +## coalesce Checks from left to right whether `NULL` arguments were passed and returns the first non-`NULL` argument. @@ -130,7 +130,7 @@ SELECT name, coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook; └──────────┴──────────────────────────────────────────────────────┘ ``` -## ifNull {#ifnull} +## ifNull Returns an alternative value if the main argument is `NULL`. @@ -170,7 +170,7 @@ SELECT ifNull(NULL, 'b'); └───────────────────┘ ``` -## nullIf {#nullif} +## nullIf Returns `NULL` if the arguments are equal. @@ -209,7 +209,7 @@ SELECT nullIf(1, 2); └──────────────┘ ``` -## assumeNotNull {#assumenotnull} +## assumeNotNull Results in an equivalent non-`Nullable` value for a [Nullable](../../sql-reference/data-types/nullable.md) type. In case the original value is `NULL` the result is undetermined. See also `ifNull` and `coalesce` functions. @@ -271,7 +271,7 @@ SELECT toTypeName(assumeNotNull(y)) FROM t_null; └──────────────────────────────┘ ``` -## toNullable {#tonullable} +## toNullable Converts the argument type to `Nullable`. diff --git a/docs/en/sql-reference/functions/geo/coordinates.md b/docs/en/sql-reference/functions/geo/coordinates.md index 41ba409cbc1..6cc8137a2ff 100644 --- a/docs/en/sql-reference/functions/geo/coordinates.md +++ b/docs/en/sql-reference/functions/geo/coordinates.md @@ -4,9 +4,9 @@ sidebar_position: 62 --- -# Functions for Working with Geographical Coordinates {#geographical-coordinates} +# Functions for Working with Geographical Coordinates -## greatCircleDistance {#greatcircledistance} +## greatCircleDistance Calculates the distance between two points on the Earth’s surface using [the great-circle formula](https://en.wikipedia.org/wiki/Great-circle_distance). @@ -48,7 +48,7 @@ The performance is the same as for `greatCircleDistance` (no performance drawbac Technical note: for close enough points we calculate the distance using planar approximation with the metric on the tangent plane at the midpoint of the coordinates. -## greatCircleAngle {#greatcircleangle} +## greatCircleAngle Calculates the central angle between two points on the Earth’s surface using [the great-circle formula](https://en.wikipedia.org/wiki/Great-circle_distance). @@ -79,7 +79,7 @@ SELECT greatCircleAngle(0, 0, 45, 0) AS arc └─────┘ ``` -## pointInEllipses {#pointinellipses} +## pointInEllipses Checks whether the point belongs to at least one of the ellipses. Coordinates are geometric in the Cartesian coordinate system. @@ -112,7 +112,7 @@ SELECT pointInEllipses(10., 10., 10., 9.1, 1., 0.9999) └─────────────────────────────────────────────────┘ ``` -## pointInPolygon {#pointinpolygon} +## pointInPolygon Checks whether the point belongs to the polygon on the plane. diff --git a/docs/en/sql-reference/functions/geo/geohash.md b/docs/en/sql-reference/functions/geo/geohash.md index e65456d0c40..a4f0328d0d0 100644 --- a/docs/en/sql-reference/functions/geo/geohash.md +++ b/docs/en/sql-reference/functions/geo/geohash.md @@ -2,13 +2,13 @@ sidebar_label: Geohash --- -# Functions for Working with Geohash {#geohash} +# Functions for Working with Geohash [Geohash](https://en.wikipedia.org/wiki/Geohash) is the geocode system, which subdivides Earth’s surface into buckets of grid shape and encodes each cell into a short string of letters and digits. It is a hierarchical data structure, so the longer is the geohash string, the more precise is the geographic location. If you need to manually convert geographic coordinates to geohash strings, you can use [geohash.org](http://geohash.org/). -## geohashEncode {#geohashencode} +## geohashEncode Encodes latitude and longitude as a [geohash](#geohash)-string. @@ -38,7 +38,7 @@ SELECT geohashEncode(-5.60302734375, 42.593994140625, 0) AS res; └──────────────┘ ``` -## geohashDecode {#geohashdecode} +## geohashDecode Decodes any [geohash](#geohash)-encoded string into longitude and latitude. @@ -62,7 +62,7 @@ SELECT geohashDecode('ezs42') AS res; └─────────────────────────────────┘ ``` -## geohashesInBox {#geohashesinbox} +## geohashesInBox Returns an array of [geohash](#geohash)-encoded strings of given precision that fall inside and intersect boundaries of given box, basically a 2D grid flattened into array. diff --git a/docs/en/sql-reference/functions/geo/h3.md b/docs/en/sql-reference/functions/geo/h3.md index 25f668de245..8678e890917 100644 --- a/docs/en/sql-reference/functions/geo/h3.md +++ b/docs/en/sql-reference/functions/geo/h3.md @@ -2,7 +2,7 @@ sidebar_label: H3 Indexes --- -# Functions for Working with H3 Indexes {#h3index} +# Functions for Working with H3 Indexes [H3](https://eng.uber.com/h3/) is a geographical indexing system where Earth’s surface divided into a grid of even hexagonal cells. This system is hierarchical, i. e. each hexagon on the top level ("parent") can be splitted into seven even but smaller ones ("children"), and so on. @@ -14,7 +14,7 @@ The H3 index is used primarily for bucketing locations and other geospatial mani The full description of the H3 system is available at [the Uber Engeneering site](https://eng.uber.com/h3/). -## h3IsValid {#h3isvalid} +## h3IsValid Verifies whether the number is a valid [H3](#h3index) index. @@ -51,7 +51,7 @@ Result: └───────────┘ ``` -## h3GetResolution {#h3getresolution} +## h3GetResolution Defines the resolution of the given [H3](#h3index) index. @@ -88,7 +88,7 @@ Result: └────────────┘ ``` -## h3EdgeAngle {#h3edgeangle} +## h3EdgeAngle Calculates the average length of the [H3](#h3index) hexagon edge in grades. @@ -122,7 +122,7 @@ Result: └───────────────────────┘ ``` -## h3EdgeLengthM {#h3edgelengthm} +## h3EdgeLengthM Calculates the average length of the [H3](#h3index) hexagon edge in meters. @@ -156,7 +156,7 @@ Result: └─────────────┘ ``` -## h3EdgeLengthKm {#h3edgelengthkm} +## h3EdgeLengthKm Calculates the average length of the [H3](#h3index) hexagon edge in kilometers. @@ -190,7 +190,7 @@ Result: └──────────────┘ ``` -## geoToH3 {#geotoh3} +## geoToH3 Returns [H3](#h3index) point index `(lon, lat)` with specified resolution. @@ -229,7 +229,7 @@ Result: └────────────────────┘ ``` -## h3ToGeo {#h3togeo} +## h3ToGeo Returns the centroid longitude and latitude corresponding to the provided [H3](#h3index) index. @@ -263,7 +263,7 @@ Result: └───────────────────────────────────────┘ ``` -## h3ToGeoBoundary {#h3togeoboundary} +## h3ToGeoBoundary Returns array of pairs `(lon, lat)`, which corresponds to the boundary of the provided H3 index. @@ -299,7 +299,7 @@ Result: └────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` -## h3kRing {#h3kring} +## h3kRing Lists all the [H3](#h3index) hexagons in the raduis of `k` from the given hexagon in random order. @@ -342,7 +342,7 @@ Result: └────────────────────┘ ``` -## h3GetBaseCell {#h3getbasecell} +## h3GetBaseCell Returns the base cell number of the [H3](#h3index) index. @@ -378,7 +378,7 @@ Result: └──────────┘ ``` -## h3HexAreaM2 {#h3hexaream2} +## h3HexAreaM2 Returns average hexagon area in square meters at the given resolution. @@ -414,7 +414,7 @@ Result: └──────┘ ``` -## h3HexAreaKm2 {#h3hexareakm2} +## h3HexAreaKm2 Returns average hexagon area in square kilometers at the given resolution. @@ -450,7 +450,7 @@ Result: └───────────┘ ``` -## h3IndexesAreNeighbors {#h3indexesareneighbors} +## h3IndexesAreNeighbors Returns whether or not the provided [H3](#h3index) indexes are neighbors. @@ -488,7 +488,7 @@ Result: └───┘ ``` -## h3ToChildren {#h3tochildren} +## h3ToChildren Returns an array of child indexes for the given [H3](#h3index) index. @@ -525,7 +525,7 @@ Result: └────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` -## h3ToParent {#h3toparent} +## h3ToParent Returns the parent (coarser) index containing the given [H3](#h3index) index. @@ -562,7 +562,7 @@ Result: └────────────────────┘ ``` -## h3ToString {#h3tostring} +## h3ToString Converts the `H3Index` representation of the index to the string representation. @@ -596,7 +596,7 @@ Result: └─────────────────┘ ``` -## stringToH3 {#stringtoh3} +## stringToH3 Converts the string representation to the `H3Index` (UInt64) representation. @@ -630,7 +630,7 @@ Result: └────────────────────┘ ``` -## h3GetResolution {#h3getresolution} +## h3GetResolution Returns the resolution of the [H3](#h3index) index. @@ -664,7 +664,7 @@ Result: └─────┘ ``` -## h3IsResClassIII {#h3isresclassIII} +## h3IsResClassIII Returns whether [H3](#h3index) index has a resolution with Class III orientation. @@ -701,7 +701,7 @@ Result: └─────┘ ``` -## h3IsPentagon {#h3ispentagon} +## h3IsPentagon Returns whether this [H3](#h3index) index represents a pentagonal cell. @@ -738,7 +738,7 @@ Result: └──────────┘ ``` -## h3GetFaces {#h3getfaces} +## h3GetFaces Returns icosahedron faces intersected by a given [H3](#h3index) index. @@ -774,7 +774,7 @@ Result: └───────┘ ``` -## h3CellAreaM2 {#h3cellaream2} +## h3CellAreaM2 Returns the exact area of a specific cell in square meters corresponding to the given input H3 index. @@ -810,7 +810,7 @@ Result: └────────────────────┘ ``` -## h3CellAreaRads2 {#h3cellarearads2} +## h3CellAreaRads2 Returns the exact area of a specific cell in square radians corresponding to the given input H3 index. @@ -846,7 +846,7 @@ Result: └─────────────────────┘ ``` -## h3ToCenterChild {#h3tocenterchild} +## h3ToCenterChild Returns the center child (finer) [H3](#h3index) index contained by given [H3](#h3index) at the given resolution. @@ -883,7 +883,7 @@ Result: └────────────────────┘ ``` -## h3ExactEdgeLengthM {#h3exactedgelengthm} +## h3ExactEdgeLengthM Returns the exact edge length of the unidirectional edge represented by the input h3 index in meters. @@ -919,7 +919,7 @@ Result: └────────────────────┘ ``` -## h3ExactEdgeLengthKm {#h3exactedgelengthkm} +## h3ExactEdgeLengthKm Returns the exact edge length of the unidirectional edge represented by the input h3 index in kilometers. @@ -955,7 +955,7 @@ Result: └────────────────────┘ ``` -## h3ExactEdgeLengthRads {#h3exactedgelengthrads} +## h3ExactEdgeLengthRads Returns the exact edge length of the unidirectional edge represented by the input h3 index in radians. @@ -991,7 +991,7 @@ Result: └──────────────────────┘ ``` -## h3NumHexagons {#h3numhexagons} +## h3NumHexagons Returns the number of unique H3 indices at the given resolution. @@ -1027,7 +1027,7 @@ Result: └─────────────┘ ``` -## h3Line {#h3line} +## h3Line Returns the line of indices between the two indices that are provided. @@ -1064,7 +1064,7 @@ Result: └────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` -## h3Distance {#h3distance} +## h3Distance Returns the distance in grid cells between the two indices that are provided. @@ -1103,7 +1103,7 @@ Result: └──────────┘ ``` -## h3HexRing {#h3hexring} +## h3HexRing Returns the indexes of the hexagonal ring centered at the provided origin h3Index and length k. @@ -1142,7 +1142,7 @@ Result: └─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` -## h3GetUnidirectionalEdge {#h3getunidirectionaledge} +## h3GetUnidirectionalEdge Returns a unidirectional edge H3 index based on the provided origin and destination and returns 0 on error. @@ -1179,7 +1179,7 @@ Result: └─────────────────────┘ ``` -## h3UnidirectionalEdgeIsValid {#h3unidirectionaledgeisvalid} +## h3UnidirectionalEdgeIsValid Determines if the provided H3Index is a valid unidirectional edge index. Returns 1 if it's a unidirectional edge and 0 otherwise. @@ -1216,7 +1216,7 @@ Result: └────────────┘ ``` -## h3GetOriginIndexFromUnidirectionalEdge {#h3getoriginindexfromunidirectionaledge} +## h3GetOriginIndexFromUnidirectionalEdge Returns the origin hexagon index from the unidirectional edge H3Index. @@ -1252,7 +1252,7 @@ Result: └────────────────────┘ ``` -## h3GetDestinationIndexFromUnidirectionalEdge {#h3getdestinationindexfromunidirectionaledge} +## h3GetDestinationIndexFromUnidirectionalEdge Returns the destination hexagon index from the unidirectional edge H3Index. @@ -1288,7 +1288,7 @@ Result: └────────────────────┘ ``` -## h3GetIndexesFromUnidirectionalEdge {#h3getindexesfromunidirectionaledge} +## h3GetIndexesFromUnidirectionalEdge Returns the origin and destination hexagon indexes from the given unidirectional edge H3Index. @@ -1327,7 +1327,7 @@ Result: └─────────────────────────────────────────┘ ``` -## h3GetUnidirectionalEdgesFromHexagon {#h3getunidirectionaledgesfromhexagon} +## h3GetUnidirectionalEdgesFromHexagon Provides all of the unidirectional edges from the provided H3Index. @@ -1363,7 +1363,7 @@ Result: └───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` -## h3GetUnidirectionalEdgeBoundary {#h3getunidirectionaledgeboundary} +## h3GetUnidirectionalEdgeBoundary Returns the coordinates defining the unidirectional edge. diff --git a/docs/en/sql-reference/functions/geo/s2.md b/docs/en/sql-reference/functions/geo/s2.md index c3d95d2f0a9..00b75ad42a7 100644 --- a/docs/en/sql-reference/functions/geo/s2.md +++ b/docs/en/sql-reference/functions/geo/s2.md @@ -2,13 +2,13 @@ sidebar_label: S2 Geometry --- -# Functions for Working with S2 Index {#s2index} +# Functions for Working with S2 Index [S2](https://s2geometry.io/) is a geographical indexing system where all geographical data is represented on a three-dimensional sphere (similar to a globe). In the S2 library points are represented as the S2 Index - a specific number which encodes internally a point on the surface of a unit sphere, unlike traditional (latitude, longitude) pairs. To get the S2 point index for a given point specified in the format (latitude, longitude) use the [geoToS2](#geotos2) function. Also, you can use the [s2ToGeo](#s2togeo) function for getting geographical coordinates corresponding to the specified S2 point index. -## geoToS2 {#geotos2} +## geoToS2 Returns [S2](#s2index) point index corresponding to the provided coordinates `(longitude, latitude)`. @@ -45,7 +45,7 @@ Result: └─────────────────────┘ ``` -## s2ToGeo {#s2togeo} +## s2ToGeo Returns geo coordinates `(longitude, latitude)` corresponding to the provided [S2](#s2index) point index. @@ -81,7 +81,7 @@ Result: └──────────────────────────────────────┘ ``` -## s2GetNeighbors {#s2getneighbors} +## s2GetNeighbors Returns S2 neighbor indixes corresponding to the provided [S2](#s2index). Each cell in the S2 system is a quadrilateral bounded by four geodesics. So, each cell has 4 neighbors. @@ -117,7 +117,7 @@ Result: └───────────────────────────────────────────────────────────────────────────────────┘ ``` -## s2CellsIntersect {#s2cellsintersect} +## s2CellsIntersect Determines if the two provided [S2](#s2index) cells intersect or not. @@ -154,7 +154,7 @@ Result: └───────────┘ ``` -## s2CapContains {#s2capcontains} +## s2CapContains Determines if a cap contains a S2 point. A cap represents a part of the sphere that has been cut off by a plane. It is defined by a point on a sphere and a radius in degrees. @@ -193,7 +193,7 @@ Result: └─────────────┘ ``` -## s2CapUnion {#s2capunion} +## s2CapUnion Determines the smallest cap that contains the given two input caps. A cap represents a portion of the sphere that has been cut off by a plane. It is defined by a point on a sphere and a radius in degrees. @@ -229,7 +229,7 @@ Result: └────────────────────────────────────────┘ ``` -## s2RectAdd {#s2rectadd} +## s2RectAdd Increases the size of the bounding rectangle to include the given S2 point. In the S2 system, a rectangle is represented by a type of S2Region called a `S2LatLngRect` that represents a rectangle in latitude-longitude space. @@ -266,7 +266,7 @@ Result: └───────────────────────────────────────────┘ ``` -## s2RectContains {#s2rectcontains} +## s2RectContains Determines if a given rectangle contains a S2 point. In the S2 system, a rectangle is represented by a type of S2Region called a `S2LatLngRect` that represents a rectangle in latitude-longitude space. @@ -303,7 +303,7 @@ Result: └──────────────┘ ``` -## s2RectUinion {#s2rectunion} +## s2RectUinion Returns the smallest rectangle containing the union of this rectangle and the given rectangle. In the S2 system, a rectangle is represented by a type of S2Region called a `S2LatLngRect` that represents a rectangle in latitude-longitude space. @@ -339,7 +339,7 @@ Result: └───────────────────────────────────────────┘ ``` -## s2RectIntersection {#s2rectintersection} +## s2RectIntersection Returns the smallest rectangle containing the intersection of this rectangle and the given rectangle. In the S2 system, a rectangle is represented by a type of S2Region called a `S2LatLngRect` that represents a rectangle in latitude-longitude space. diff --git a/docs/en/sql-reference/functions/hash-functions.md b/docs/en/sql-reference/functions/hash-functions.md index bd05f3b4ad2..78f9d1b8285 100644 --- a/docs/en/sql-reference/functions/hash-functions.md +++ b/docs/en/sql-reference/functions/hash-functions.md @@ -3,13 +3,13 @@ sidebar_position: 50 sidebar_label: Hash --- -# Hash Functions {#hash-functions} +# Hash Functions Hash functions can be used for the deterministic pseudo-random shuffling of elements. Simhash is a hash function, which returns close hash values for close (similar) arguments. -## halfMD5 {#hash-functions-halfmd5} +## halfMD5 [Interprets](../../sql-reference/functions/type-conversion-functions.md#type_conversion_functions-reinterpretAsString) all the input parameters as strings and calculates the [MD5](https://en.wikipedia.org/wiki/MD5) hash value for each of them. Then combines hashes, takes the first 8 bytes of the hash of the resulting string, and interprets them as `UInt64` in big-endian byte order. @@ -40,17 +40,17 @@ SELECT halfMD5(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00') └────────────────────┴────────┘ ``` -## MD4 {#hash_functions-md4} +## MD4 Calculates the MD4 from a string and returns the resulting set of bytes as FixedString(16). -## MD5 {#hash_functions-md5} +## MD5 Calculates the MD5 from a string and returns the resulting set of bytes as FixedString(16). If you do not need MD5 in particular, but you need a decent cryptographic 128-bit hash, use the ‘sipHash128’ function instead. If you want to get the same result as output by the md5sum utility, use lower(hex(MD5(s))). -## sipHash64 {#hash_functions-siphash64} +## sipHash64 Produces a 64-bit [SipHash](https://131002.net/siphash/) hash value. @@ -87,7 +87,7 @@ SELECT sipHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00 └──────────────────────┴────────┘ ``` -## sipHash128 {#hash_functions-siphash128} +## sipHash128 Produces a 128-bit [SipHash](https://131002.net/siphash/) hash value. Differs from [sipHash64](#hash_functions-siphash64) in that the final xor-folding state is done up to 128 bits. @@ -123,7 +123,7 @@ Result: └──────────────────────────────────┘ ``` -## cityHash64 {#cityhash64} +## cityHash64 Produces a 64-bit [CityHash](https://github.com/google/cityhash) hash value. @@ -161,17 +161,17 @@ The following example shows how to compute the checksum of the entire table with SELECT groupBitXor(cityHash64(*)) FROM table ``` -## intHash32 {#inthash32} +## intHash32 Calculates a 32-bit hash code from any type of integer. This is a relatively fast non-cryptographic hash function of average quality for numbers. -## intHash64 {#inthash64} +## intHash64 Calculates a 64-bit hash code from any type of integer. It works faster than intHash32. Average quality. -## SHA1, SHA224, SHA256, SHA512 {#sha} +## SHA1, SHA224, SHA256, SHA512 Calculates SHA-1, SHA-224, SHA-256, SHA-512 hash from a string and returns the resulting set of bytes as [FixedString](../data-types/fixedstring.md). @@ -215,7 +215,7 @@ Result: └──────────────────────────────────────────┘ ``` -## BLAKE3 {#blake3} +## BLAKE3 Calculates BLAKE3 hash string and returns the resulting set of bytes as [FixedString](../data-types/fixedstring.md). @@ -253,16 +253,16 @@ Result: └──────────────────────────────────────────────────────────────────┘ ``` -## URLHash(url\[, N\]) {#urlhashurl-n} +## URLHash(url\[, N\]) A fast, decent-quality non-cryptographic hash function for a string obtained from a URL using some type of normalization. `URLHash(s)` – Calculates a hash from a string without one of the trailing symbols `/`,`?` or `#` at the end, if present. `URLHash(s, N)` – Calculates a hash from a string up to the N level in the URL hierarchy, without one of the trailing symbols `/`,`?` or `#` at the end, if present. Levels are the same as in URLHierarchy. -## farmFingerprint64 {#farmfingerprint64} +## farmFingerprint64 -## farmHash64 {#farmhash64} +## farmHash64 Produces a 64-bit [FarmHash](https://github.com/google/farmhash) or Fingerprint value. `farmFingerprint64` is preferred for a stable and portable value. @@ -293,7 +293,7 @@ SELECT farmHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:0 └──────────────────────┴────────┘ ``` -## javaHash {#hash_functions-javahash} +## javaHash Calculates [JavaHash](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/String.java#l1452) from a string. This hash function is neither fast nor having a good quality. The only reason to use it is when this algorithm is already used in another system and you have to calculate exactly the same result. @@ -323,7 +323,7 @@ Result: └───────────────────────────┘ ``` -## javaHashUTF16LE {#javahashutf16le} +## javaHashUTF16LE Calculates [JavaHash](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/String.java#l1452) from a string, assuming it contains bytes representing a string in UTF-16LE encoding. @@ -359,7 +359,7 @@ Result: └──────────────────────────────────────────────────────────────┘ ``` -## hiveHash {#hash-functions-hivehash} +## hiveHash Calculates `HiveHash` from a string. @@ -391,7 +391,7 @@ Result: └───────────────────────────┘ ``` -## metroHash64 {#metrohash64} +## metroHash64 Produces a 64-bit [MetroHash](http://www.jandrewrogers.com/2015/05/27/metrohash/) hash value. @@ -419,13 +419,13 @@ SELECT metroHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00: └──────────────────────┴────────┘ ``` -## jumpConsistentHash {#jumpconsistenthash} +## jumpConsistentHash Calculates JumpConsistentHash form a UInt64. Accepts two arguments: a UInt64-type key and the number of buckets. Returns Int32. For more information, see the link: [JumpConsistentHash](https://arxiv.org/pdf/1406.2294.pdf) -## murmurHash2_32, murmurHash2_64 {#murmurhash2-32-murmurhash2-64} +## murmurHash2_32, murmurHash2_64 Produces a [MurmurHash2](https://github.com/aappleby/smhasher) hash value. @@ -455,7 +455,7 @@ SELECT murmurHash2_64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23: └──────────────────────┴────────┘ ``` -## gccMurmurHash {#gccmurmurhash} +## gccMurmurHash Calculates a 64-bit [MurmurHash2](https://github.com/aappleby/smhasher) hash value using the same hash seed as [gcc](https://github.com/gcc-mirror/gcc/blob/41d6b10e96a1de98e90a7c0378437c3255814b16/libstdc%2B%2B-v3/include/bits/functional_hash.h#L191). It is portable between CLang and GCC builds. @@ -493,7 +493,7 @@ Result: └──────────────────────┴─────────────────────┘ ``` -## murmurHash3_32, murmurHash3_64 {#murmurhash3-32-murmurhash3-64} +## murmurHash3_32, murmurHash3_64 Produces a [MurmurHash3](https://github.com/aappleby/smhasher) hash value. @@ -523,7 +523,7 @@ SELECT murmurHash3_32(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23: └─────────────┴────────┘ ``` -## murmurHash3_128 {#murmurhash3-128} +## murmurHash3_128 Produces a 128-bit [MurmurHash3](https://github.com/aappleby/smhasher) hash value. @@ -559,7 +559,7 @@ Result: └───────────────────────────────────────────┘ ``` -## xxHash32, xxHash64 {#hash-functions-xxhash32} +## xxHash32, xxHash64 Calculates `xxHash` from a string. It is proposed in two flavors, 32 and 64 bits. @@ -597,7 +597,7 @@ Result: - [xxHash](http://cyan4973.github.io/xxHash/). -## ngramSimHash {#ngramsimhash} +## ngramSimHash Splits a ASCII string into n-grams of `ngramsize` symbols and returns the n-gram `simhash`. Is case sensitive. @@ -636,7 +636,7 @@ Result: └────────────┘ ``` -## ngramSimHashCaseInsensitive {#ngramsimhashcaseinsensitive} +## ngramSimHashCaseInsensitive Splits a ASCII string into n-grams of `ngramsize` symbols and returns the n-gram `simhash`. Is case insensitive. @@ -675,7 +675,7 @@ Result: └───────────┘ ``` -## ngramSimHashUTF8 {#ngramsimhashutf8} +## ngramSimHashUTF8 Splits a UTF-8 string into n-grams of `ngramsize` symbols and returns the n-gram `simhash`. Is case sensitive. @@ -714,7 +714,7 @@ Result: └────────────┘ ``` -## ngramSimHashCaseInsensitiveUTF8 {#ngramsimhashcaseinsensitiveutf8} +## ngramSimHashCaseInsensitiveUTF8 Splits a UTF-8 string into n-grams of `ngramsize` symbols and returns the n-gram `simhash`. Is case insensitive. @@ -753,7 +753,7 @@ Result: └────────────┘ ``` -## wordShingleSimHash {#wordshinglesimhash} +## wordShingleSimHash Splits a ASCII string into parts (shingles) of `shinglesize` words and returns the word shingle `simhash`. Is case sensitive. @@ -792,7 +792,7 @@ Result: └────────────┘ ``` -## wordShingleSimHashCaseInsensitive {#wordshinglesimhashcaseinsensitive} +## wordShingleSimHashCaseInsensitive Splits a ASCII string into parts (shingles) of `shinglesize` words and returns the word shingle `simhash`. Is case insensitive. @@ -831,7 +831,7 @@ Result: └────────────┘ ``` -## wordShingleSimHashUTF8 {#wordshinglesimhashutf8} +## wordShingleSimHashUTF8 Splits a UTF-8 string into parts (shingles) of `shinglesize` words and returns the word shingle `simhash`. Is case sensitive. @@ -870,7 +870,7 @@ Result: └────────────┘ ``` -## wordShingleSimHashCaseInsensitiveUTF8 {#wordshinglesimhashcaseinsensitiveutf8} +## wordShingleSimHashCaseInsensitiveUTF8 Splits a UTF-8 string into parts (shingles) of `shinglesize` words and returns the word shingle `simhash`. Is case insensitive. @@ -909,7 +909,7 @@ Result: └────────────┘ ``` -## ngramMinHash {#ngramminhash} +## ngramMinHash Splits a ASCII string into n-grams of `ngramsize` symbols and calculates hash values for each n-gram. Uses `hashnum` minimum hashes to calculate the minimum hash and `hashnum` maximum hashes to calculate the maximum hash. Returns a tuple with these hashes. Is case sensitive. @@ -949,7 +949,7 @@ Result: └────────────────────────────────────────────┘ ``` -## ngramMinHashCaseInsensitive {#ngramminhashcaseinsensitive} +## ngramMinHashCaseInsensitive Splits a ASCII string into n-grams of `ngramsize` symbols and calculates hash values for each n-gram. Uses `hashnum` minimum hashes to calculate the minimum hash and `hashnum` maximum hashes to calculate the maximum hash. Returns a tuple with these hashes. Is case insensitive. @@ -989,7 +989,7 @@ Result: └────────────────────────────────────────────┘ ``` -## ngramMinHashUTF8 {#ngramminhashutf8} +## ngramMinHashUTF8 Splits a UTF-8 string into n-grams of `ngramsize` symbols and calculates hash values for each n-gram. Uses `hashnum` minimum hashes to calculate the minimum hash and `hashnum` maximum hashes to calculate the maximum hash. Returns a tuple with these hashes. Is case sensitive. @@ -1029,7 +1029,7 @@ Result: └────────────────────────────────────────────┘ ``` -## ngramMinHashCaseInsensitiveUTF8 {#ngramminhashcaseinsensitiveutf8} +## ngramMinHashCaseInsensitiveUTF8 Splits a UTF-8 string into n-grams of `ngramsize` symbols and calculates hash values for each n-gram. Uses `hashnum` minimum hashes to calculate the minimum hash and `hashnum` maximum hashes to calculate the maximum hash. Returns a tuple with these hashes. Is case insensitive. @@ -1069,7 +1069,7 @@ Result: └─────────────────────────────────────────────┘ ``` -## ngramMinHashArg {#ngramminhasharg} +## ngramMinHashArg Splits a ASCII string into n-grams of `ngramsize` symbols and returns the n-grams with minimum and maximum hashes, calculated by the [ngramMinHash](#ngramminhash) function with the same input. Is case sensitive. @@ -1107,7 +1107,7 @@ Result: └───────────────────────────────────────────────────────────────────────────────┘ ``` -## ngramMinHashArgCaseInsensitive {#ngramminhashargcaseinsensitive} +## ngramMinHashArgCaseInsensitive Splits a ASCII string into n-grams of `ngramsize` symbols and returns the n-grams with minimum and maximum hashes, calculated by the [ngramMinHashCaseInsensitive](#ngramminhashcaseinsensitive) function with the same input. Is case insensitive. @@ -1145,7 +1145,7 @@ Result: └───────────────────────────────────────────────────────────────────────────────┘ ``` -## ngramMinHashArgUTF8 {#ngramminhashargutf8} +## ngramMinHashArgUTF8 Splits a UTF-8 string into n-grams of `ngramsize` symbols and returns the n-grams with minimum and maximum hashes, calculated by the [ngramMinHashUTF8](#ngramminhashutf8) function with the same input. Is case sensitive. @@ -1183,7 +1183,7 @@ Result: └───────────────────────────────────────────────────────────────────────────────┘ ``` -## ngramMinHashArgCaseInsensitiveUTF8 {#ngramminhashargcaseinsensitiveutf8} +## ngramMinHashArgCaseInsensitiveUTF8 Splits a UTF-8 string into n-grams of `ngramsize` symbols and returns the n-grams with minimum and maximum hashes, calculated by the [ngramMinHashCaseInsensitiveUTF8](#ngramminhashcaseinsensitiveutf8) function with the same input. Is case insensitive. @@ -1221,7 +1221,7 @@ Result: └───────────────────────────────────────────────────────────────────────────────┘ ``` -## wordShingleMinHash {#wordshingleminhash} +## wordShingleMinHash Splits a ASCII string into parts (shingles) of `shinglesize` words and calculates hash values for each word shingle. Uses `hashnum` minimum hashes to calculate the minimum hash and `hashnum` maximum hashes to calculate the maximum hash. Returns a tuple with these hashes. Is case sensitive. @@ -1261,7 +1261,7 @@ Result: └────────────────────────────────────────────┘ ``` -## wordShingleMinHashCaseInsensitive {#wordshingleminhashcaseinsensitive} +## wordShingleMinHashCaseInsensitive Splits a ASCII string into parts (shingles) of `shinglesize` words and calculates hash values for each word shingle. Uses `hashnum` minimum hashes to calculate the minimum hash and `hashnum` maximum hashes to calculate the maximum hash. Returns a tuple with these hashes. Is case insensitive. @@ -1301,7 +1301,7 @@ Result: └───────────────────────────────────────────┘ ``` -## wordShingleMinHashUTF8 {#wordshingleminhashutf8} +## wordShingleMinHashUTF8 Splits a UTF-8 string into parts (shingles) of `shinglesize` words and calculates hash values for each word shingle. Uses `hashnum` minimum hashes to calculate the minimum hash and `hashnum` maximum hashes to calculate the maximum hash. Returns a tuple with these hashes. Is case sensitive. @@ -1341,7 +1341,7 @@ Result: └────────────────────────────────────────────┘ ``` -## wordShingleMinHashCaseInsensitiveUTF8 {#wordshingleminhashcaseinsensitiveutf8} +## wordShingleMinHashCaseInsensitiveUTF8 Splits a UTF-8 string into parts (shingles) of `shinglesize` words and calculates hash values for each word shingle. Uses `hashnum` minimum hashes to calculate the minimum hash and `hashnum` maximum hashes to calculate the maximum hash. Returns a tuple with these hashes. Is case insensitive. @@ -1381,7 +1381,7 @@ Result: └───────────────────────────────────────────┘ ``` -## wordShingleMinHashArg {#wordshingleminhasharg} +## wordShingleMinHashArg Splits a ASCII string into parts (shingles) of `shinglesize` words each and returns the shingles with minimum and maximum word hashes, calculated by the [wordshingleMinHash](#wordshingleminhash) function with the same input. Is case sensitive. @@ -1419,7 +1419,7 @@ Result: └───────────────────────────────────────────────────────────────────────┘ ``` -## wordShingleMinHashArgCaseInsensitive {#wordshingleminhashargcaseinsensitive} +## wordShingleMinHashArgCaseInsensitive Splits a ASCII string into parts (shingles) of `shinglesize` words each and returns the shingles with minimum and maximum word hashes, calculated by the [wordShingleMinHashCaseInsensitive](#wordshingleminhashcaseinsensitive) function with the same input. Is case insensitive. @@ -1457,7 +1457,7 @@ Result: └────────────────────────────────────────────────────────────────────────┘ ``` -## wordShingleMinHashArgUTF8 {#wordshingleminhashargutf8} +## wordShingleMinHashArgUTF8 Splits a UTF-8 string into parts (shingles) of `shinglesize` words each and returns the shingles with minimum and maximum word hashes, calculated by the [wordShingleMinHashUTF8](#wordshingleminhashutf8) function with the same input. Is case sensitive. @@ -1495,7 +1495,7 @@ Result: └───────────────────────────────────────────────────────────────────────┘ ``` -## wordShingleMinHashArgCaseInsensitiveUTF8 {#wordshingleminhashargcaseinsensitiveutf8} +## wordShingleMinHashArgCaseInsensitiveUTF8 Splits a UTF-8 string into parts (shingles) of `shinglesize` words each and returns the shingles with minimum and maximum word hashes, calculated by the [wordShingleMinHashCaseInsensitiveUTF8](#wordshingleminhashcaseinsensitiveutf8) function with the same input. Is case insensitive. diff --git a/docs/en/sql-reference/functions/in-functions.md b/docs/en/sql-reference/functions/in-functions.md index ab8ba93daba..ffddf2c9009 100644 --- a/docs/en/sql-reference/functions/in-functions.md +++ b/docs/en/sql-reference/functions/in-functions.md @@ -3,9 +3,9 @@ sidebar_position: 60 sidebar_label: IN Operator --- -# Functions for Implementing the IN Operator {#functions-for-implementing-the-in-operator} +# Functions for Implementing the IN Operator -## in, notIn, globalIn, globalNotIn {#in-functions} +## in, notIn, globalIn, globalNotIn See the section [IN operators](../../sql-reference/operators/in.md#select-in-operators). diff --git a/docs/en/sql-reference/functions/index.md b/docs/en/sql-reference/functions/index.md index 8bbb3edf67c..4574d78bc29 100644 --- a/docs/en/sql-reference/functions/index.md +++ b/docs/en/sql-reference/functions/index.md @@ -11,19 +11,19 @@ In this section we discuss regular functions. For aggregate functions, see the s \* - There is a third type of function that the ‘arrayJoin’ function belongs to; table functions can also be mentioned separately.\* -## Strong Typing {#strong-typing} +## Strong Typing In contrast to standard SQL, ClickHouse has strong typing. In other words, it does not make implicit conversions between types. Each function works for a specific set of types. This means that sometimes you need to use type conversion functions. -## Common Subexpression Elimination {#common-subexpression-elimination} +## Common Subexpression Elimination All expressions in a query that have the same AST (the same record or same result of syntactic parsing) are considered to have identical values. Such expressions are concatenated and executed once. Identical subqueries are also eliminated this way. -## Types of Results {#types-of-results} +## Types of Results All functions return a single return as the result (not several values, and not zero values). The type of result is usually defined only by the types of arguments, not by the values. Exceptions are the tupleElement function (the a.N operator), and the toFixedString function. -## Constants {#constants} +## Constants For simplicity, certain functions can only work with constants for some arguments. For example, the right argument of the LIKE operator must be a constant. Almost all functions return a constant for constant arguments. The exception is functions that generate random numbers. @@ -32,18 +32,18 @@ A constant expression is also considered a constant (for example, the right half Functions can be implemented in different ways for constant and non-constant arguments (different code is executed). But the results for a constant and for a true column containing only the same value should match each other. -## NULL Processing {#null-processing} +## NULL Processing Functions have the following behaviors: - If at least one of the arguments of the function is `NULL`, the function result is also `NULL`. - Special behavior that is specified individually in the description of each function. In the ClickHouse source code, these functions have `UseDefaultImplementationForNulls=false`. -## Constancy {#constancy} +## Constancy Functions can’t change the values of their arguments – any changes are returned as the result. Thus, the result of calculating separate functions does not depend on the order in which the functions are written in the query. -## Higher-order functions, `->` operator and lambda(params, expr) function {#higher-order-functions} +## Higher-order functions, `->` operator and lambda(params, expr) function Higher-order functions can only accept lambda functions as their functional argument. To pass a lambda function to a higher-order function use `->` operator. The left side of the arrow has a formal parameter, which is any ID, or multiple formal parameters – any IDs in a tuple. The right side of the arrow has an expression that can use these formal parameters, as well as any table columns. @@ -58,11 +58,11 @@ A lambda function that accepts multiple arguments can also be passed to a higher For some functions the first argument (the lambda function) can be omitted. In this case, identical mapping is assumed. -## SQL User Defined Functions {#user-defined-functions} +## SQL User Defined Functions Custom functions from lambda expressions can be created using the [CREATE FUNCTION](../statements/create/function.md) statement. To delete these functions use the [DROP FUNCTION](../statements/drop.md#drop-function) statement. -## Executable User Defined Functions {#executable-user-defined-functions} +## Executable User Defined Functions ClickHouse can call any external executable program or script to process data. The configuration of executable user defined functions can be located in one or more xml-files. The path to the configuration is specified in the [user_defined_executable_functions_config](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_defined_executable_functions_config) parameter. @@ -224,16 +224,16 @@ Result: └──────────────────────────────┘ ``` -## Error Handling {#error-handling} +## Error Handling Some functions might throw an exception if the data is invalid. In this case, the query is canceled and an error text is returned to the client. For distributed processing, when an exception occurs on one of the servers, the other servers also attempt to abort the query. -## Evaluation of Argument Expressions {#evaluation-of-argument-expressions} +## Evaluation of Argument Expressions In almost all programming languages, one of the arguments might not be evaluated for certain operators. This is usually the operators `&&`, `||`, and `?:`. But in ClickHouse, arguments of functions (operators) are always evaluated. This is because entire parts of columns are evaluated at once, instead of calculating each row separately. -## Performing Functions for Distributed Query Processing {#performing-functions-for-distributed-query-processing} +## Performing Functions for Distributed Query Processing For distributed query processing, as many stages of query processing as possible are performed on remote servers, and the rest of the stages (merging intermediate results and everything after that) are performed on the requestor server. diff --git a/docs/en/sql-reference/functions/introspection.md b/docs/en/sql-reference/functions/introspection.md index 694d07f18dc..b885b50ce22 100644 --- a/docs/en/sql-reference/functions/introspection.md +++ b/docs/en/sql-reference/functions/introspection.md @@ -3,7 +3,7 @@ sidebar_position: 65 sidebar_label: Introspection --- -# Introspection Functions {#introspection-functions} +# Introspection Functions You can use functions described in this chapter to introspect [ELF](https://en.wikipedia.org/wiki/Executable_and_Linkable_Format) and [DWARF](https://en.wikipedia.org/wiki/DWARF) for query profiling. @@ -21,7 +21,7 @@ For proper operation of introspection functions: ClickHouse saves profiler reports to the [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) system table. Make sure the table and profiler are configured properly. -## addressToLine {#addresstoline} +## addressToLine Converts virtual memory address inside ClickHouse server process to the filename and the line number in ClickHouse source code. @@ -114,7 +114,7 @@ trace_source_code_lines: /lib/x86_64-linux-gnu/libpthread-2.27.so /build/glibc-OTsEL5/glibc-2.27/misc/../sysdeps/unix/sysv/linux/x86_64/clone.S:97 ``` -## addressToLineWithInlines {#addresstolinewithinlines} +## addressToLineWithInlines Similar to `addressToLine`, but it will return an Array with all inline functions, and will be much slower as a price. @@ -219,7 +219,7 @@ The [arrayJoin](../../sql-reference/functions/array-functions.md#array-functions ``` -## addressToSymbol {#addresstosymbol} +## addressToSymbol Converts virtual memory address inside ClickHouse server process to the symbol from ClickHouse object files. @@ -316,7 +316,7 @@ start_thread clone ``` -## demangle {#demangle} +## demangle Converts a symbol that you can get using the [addressToSymbol](#addresstosymbol) function to the C++ function name. @@ -412,7 +412,7 @@ execute_native_thread_routine start_thread clone ``` -## tid {#tid} +## tid Returns id of the thread, in which current [Block](https://clickhouse.com/docs/en/development/architecture/#block) is processed. @@ -442,7 +442,7 @@ Result: └───────┘ ``` -## logTrace {#logtrace} +## logTrace Emits trace log message to server log for each [Block](https://clickhouse.com/docs/en/development/architecture/#block). diff --git a/docs/en/sql-reference/functions/ip-address-functions.md b/docs/en/sql-reference/functions/ip-address-functions.md index a37398786af..9b34a4db440 100644 --- a/docs/en/sql-reference/functions/ip-address-functions.md +++ b/docs/en/sql-reference/functions/ip-address-functions.md @@ -3,29 +3,29 @@ sidebar_position: 55 sidebar_label: IP Addresses --- -# Functions for Working with IPv4 and IPv6 Addresses {#functions-for-working-with-ip-addresses} +# Functions for Working with IPv4 and IPv6 Addresses -## IPv4NumToString(num) {#ipv4numtostringnum} +## IPv4NumToString(num) Takes a UInt32 number. Interprets it as an IPv4 address in big endian. Returns a string containing the corresponding IPv4 address in the format A.B.C.d (dot-separated numbers in decimal form). Alias: `INET_NTOA`. -## IPv4StringToNum(s) {#ipv4stringtonums} +## IPv4StringToNum(s) The reverse function of IPv4NumToString. If the IPv4 address has an invalid format, it throws exception. Alias: `INET_ATON`. -## IPv4StringToNumOrDefault(s) {#ipv4stringtonums} +## IPv4StringToNumOrDefault(s) Same as `IPv4StringToNum`, but if the IPv4 address has an invalid format, it returns 0. -## IPv4StringToNumOrNull(s) {#ipv4stringtonums} +## IPv4StringToNumOrNull(s) Same as `IPv4StringToNum`, but if the IPv4 address has an invalid format, it returns null. -## IPv4NumToStringClassC(num) {#ipv4numtostringclasscnum} +## IPv4NumToStringClassC(num) Similar to IPv4NumToString, but using xxx instead of the last octet. @@ -58,7 +58,7 @@ LIMIT 10 Since using ‘xxx’ is highly unusual, this may be changed in the future. We recommend that you do not rely on the exact format of this fragment. -### IPv6NumToString(x) {#ipv6numtostringx} +### IPv6NumToString(x) Accepts a FixedString(16) value containing the IPv6 address in binary format. Returns a string containing this address in text format. IPv6-mapped IPv4 addresses are output in the format ::ffff:111.222.33.44. @@ -129,7 +129,7 @@ LIMIT 10 └────────────────────────────┴────────┘ ``` -## IPv6StringToNum {#ipv6stringtonums} +## IPv6StringToNum The reverse function of [IPv6NumToString](#ipv6numtostringx). If the IPv6 address has an invalid format, it throws exception. @@ -176,15 +176,15 @@ Result: - [cutIPv6](#cutipv6x-bytestocutforipv6-bytestocutforipv4). -## IPv6StringToNumOrDefault(s) {#ipv6stringtonums} +## IPv6StringToNumOrDefault(s) Same as `IPv6StringToNum`, but if the IPv6 address has an invalid format, it returns 0. -## IPv6StringToNumOrNull(s) {#ipv6stringtonums} +## IPv6StringToNumOrNull(s) Same as `IPv6StringToNum`, but if the IPv6 address has an invalid format, it returns null. -## IPv4ToIPv6(x) {#ipv4toipv6x} +## IPv4ToIPv6(x) Takes a `UInt32` number. Interprets it as an IPv4 address in [big endian](https://en.wikipedia.org/wiki/Endianness). Returns a `FixedString(16)` value containing the IPv6 address in binary format. Examples: @@ -198,7 +198,7 @@ SELECT IPv6NumToString(IPv4ToIPv6(IPv4StringToNum('192.168.0.1'))) AS addr; └────────────────────┘ ``` -## cutIPv6(x, bytesToCutForIPv6, bytesToCutForIPv4) {#cutipv6x-bytestocutforipv6-bytestocutforipv4} +## cutIPv6(x, bytesToCutForIPv6, bytesToCutForIPv4) Accepts a FixedString(16) value containing the IPv6 address in binary format. Returns a string containing the address of the specified number of bytes removed in text format. For example: @@ -217,7 +217,7 @@ SELECT └─────────────────────────────────────┴─────────────────────┘ ``` -## IPv4CIDRToRange(ipv4, Cidr), {#ipv4cidrtorangeipv4-cidr} +## IPv4CIDRToRange(ipv4, Cidr), Accepts an IPv4 and an UInt8 value containing the [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). Return a tuple with two IPv4 containing the lower range and the higher range of the subnet. @@ -231,7 +231,7 @@ SELECT IPv4CIDRToRange(toIPv4('192.168.5.2'), 16); └────────────────────────────────────────────┘ ``` -## IPv6CIDRToRange(ipv6, Cidr), {#ipv6cidrtorangeipv6-cidr} +## IPv6CIDRToRange(ipv6, Cidr), Accepts an IPv6 and an UInt8 value containing the CIDR. Return a tuple with two IPv6 containing the lower range and the higher range of the subnet. @@ -245,7 +245,7 @@ SELECT IPv6CIDRToRange(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32); └────────────────────────────────────────────────────────────────────────┘ ``` -## toIPv4(string) {#toipv4string} +## toIPv4(string) An alias to `IPv4StringToNum()` that takes a string form of IPv4 address and returns value of [IPv4](../../sql-reference/data-types/domains/ipv4.md) type, which is binary equal to value returned by `IPv4StringToNum()`. @@ -277,15 +277,15 @@ SELECT └───────────────────────────────────┴──────────────────────────┘ ``` -## toIPv4OrDefault(string) {#toipv4ordefaultstring} +## toIPv4OrDefault(string) Same as `toIPv4`, but if the IPv4 address has an invalid format, it returns 0. -## toIPv4OrNull(string) {#toipv4ornullstring} +## toIPv4OrNull(string) Same as `toIPv4`, but if the IPv4 address has an invalid format, it returns null. -## toIPv6 {#toipv6string} +## toIPv6 Converts a string form of IPv6 address to [IPv6](../../sql-reference/data-types/domains/ipv6.md) type. If the IPv6 address has an invalid format, returns an empty value. Similar to [IPv6StringToNum](#ipv6stringtonums) function, which converts IPv6 address to binary format. @@ -341,15 +341,15 @@ Result: └─────────────────────┘ ``` -## IPv6StringToNumOrDefault(s) {#toipv6ordefaultstring} +## IPv6StringToNumOrDefault(s) Same as `toIPv6`, but if the IPv6 address has an invalid format, it returns 0. -## IPv6StringToNumOrNull(s) {#toipv6ornullstring} +## IPv6StringToNumOrNull(s) Same as `toIPv6`, but if the IPv6 address has an invalid format, it returns null. -## isIPv4String {#isipv4string} +## isIPv4String Determines whether the input string is an IPv4 address or not. If `string` is IPv6 address returns `0`. @@ -387,7 +387,7 @@ Result: └──────────────────┴────────────────────┘ ``` -## isIPv6String {#isipv6string} +## isIPv6String Determines whether the input string is an IPv6 address or not. If `string` is IPv4 address returns `0`. @@ -426,7 +426,7 @@ Result: └──────────────────┴────────────────────┘ ``` -## isIPAddressInRange {#isipaddressinrange} +## isIPAddressInRange Determines if an IP address is contained in a network represented in the [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation. Returns `1` if true, or `0` otherwise. diff --git a/docs/en/sql-reference/functions/json-functions.md b/docs/en/sql-reference/functions/json-functions.md index a976a56a4fc..b71926f7b56 100644 --- a/docs/en/sql-reference/functions/json-functions.md +++ b/docs/en/sql-reference/functions/json-functions.md @@ -3,7 +3,7 @@ sidebar_position: 56 sidebar_label: JSON --- -# Functions for Working with JSON {#functions-for-working-with-json} +# Functions for Working with JSON ClickHouse has special functions for working with this JSON. All the JSON functions are based on strong assumptions about what the JSON can be, but they try to do as little as possible to get the job done. @@ -14,37 +14,37 @@ The following assumptions are made: 3. Fields are searched for on any nesting level, indiscriminately. If there are multiple matching fields, the first occurrence is used. 4. The JSON does not have space characters outside of string literals. -## visitParamHas(params, name) {#visitparamhasparams-name} +## visitParamHas(params, name) Checks whether there is a field with the `name` name. Alias: `simpleJSONHas`. -## visitParamExtractUInt(params, name) {#visitparamextractuintparams-name} +## visitParamExtractUInt(params, name) Parses UInt64 from the value of the field named `name`. If this is a string field, it tries to parse a number from the beginning of the string. If the field does not exist, or it exists but does not contain a number, it returns 0. Alias: `simpleJSONExtractUInt`. -## visitParamExtractInt(params, name) {#visitparamextractintparams-name} +## visitParamExtractInt(params, name) The same as for Int64. Alias: `simpleJSONExtractInt`. -## visitParamExtractFloat(params, name) {#visitparamextractfloatparams-name} +## visitParamExtractFloat(params, name) The same as for Float64. Alias: `simpleJSONExtractFloat`. -## visitParamExtractBool(params, name) {#visitparamextractboolparams-name} +## visitParamExtractBool(params, name) Parses a true/false value. The result is UInt8. Alias: `simpleJSONExtractBool`. -## visitParamExtractRaw(params, name) {#visitparamextractrawparams-name} +## visitParamExtractRaw(params, name) Returns the value of a field, including separators. @@ -57,7 +57,7 @@ visitParamExtractRaw('{"abc":"\\n\\u0000"}', 'abc') = '"\\n\\u0000"'; visitParamExtractRaw('{"abc":{"def":[1,2,3]}}', 'abc') = '{"def":[1,2,3]}'; ``` -## visitParamExtractString(params, name) {#visitparamextractstringparams-name} +## visitParamExtractString(params, name) Parses the string in double quotes. The value is unescaped. If unescaping failed, it returns an empty string. @@ -76,7 +76,7 @@ There is currently no support for code points in the format `\uXXXX\uYYYY` that The following functions are based on [simdjson](https://github.com/lemire/simdjson) designed for more complex JSON parsing requirements. The assumption 2 mentioned above still applies. -## isValidJSON(json) {#isvalidjsonjson} +## isValidJSON(json) Checks that passed string is a valid json. @@ -87,7 +87,7 @@ SELECT isValidJSON('{"a": "hello", "b": [-100, 200.0, 300]}') = 1 SELECT isValidJSON('not a json') = 0 ``` -## JSONHas(json\[, indices_or_keys\]…) {#jsonhasjson-indices-or-keys} +## JSONHas(json\[, indices_or_keys\]…) If the value exists in the JSON document, `1` will be returned. @@ -120,7 +120,7 @@ SELECT JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', -2) = 'a' SELECT JSONExtractString('{"a": "hello", "b": [-100, 200.0, 300]}', 1) = 'hello' ``` -## JSONLength(json\[, indices_or_keys\]…) {#jsonlengthjson-indices-or-keys} +## JSONLength(json\[, indices_or_keys\]…) Return the length of a JSON array or a JSON object. @@ -133,7 +133,7 @@ SELECT JSONLength('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 3 SELECT JSONLength('{"a": "hello", "b": [-100, 200.0, 300]}') = 2 ``` -## JSONType(json\[, indices_or_keys\]…) {#jsontypejson-indices-or-keys} +## JSONType(json\[, indices_or_keys\]…) Return the type of a JSON value. @@ -147,13 +147,13 @@ SELECT JSONType('{"a": "hello", "b": [-100, 200.0, 300]}', 'a') = 'String' SELECT JSONType('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 'Array' ``` -## JSONExtractUInt(json\[, indices_or_keys\]…) {#jsonextractuintjson-indices-or-keys} +## JSONExtractUInt(json\[, indices_or_keys\]…) -## JSONExtractInt(json\[, indices_or_keys\]…) {#jsonextractintjson-indices-or-keys} +## JSONExtractInt(json\[, indices_or_keys\]…) -## JSONExtractFloat(json\[, indices_or_keys\]…) {#jsonextractfloatjson-indices-or-keys} +## JSONExtractFloat(json\[, indices_or_keys\]…) -## JSONExtractBool(json\[, indices_or_keys\]…) {#jsonextractbooljson-indices-or-keys} +## JSONExtractBool(json\[, indices_or_keys\]…) Parses a JSON and extract a value. These functions are similar to `visitParam` functions. @@ -167,7 +167,7 @@ SELECT JSONExtractFloat('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 2) = 200 SELECT JSONExtractUInt('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', -1) = 300 ``` -## JSONExtractString(json\[, indices_or_keys\]…) {#jsonextractstringjson-indices-or-keys} +## JSONExtractString(json\[, indices_or_keys\]…) Parses a JSON and extract a string. This function is similar to `visitParamExtractString` functions. @@ -185,7 +185,7 @@ SELECT JSONExtractString('{"abc":"\\u263"}', 'abc') = '' SELECT JSONExtractString('{"abc":"hello}', 'abc') = '' ``` -## JSONExtract(json\[, indices_or_keys…\], Return_type) {#jsonextractjson-indices-or-keys-return-type} +## JSONExtract(json\[, indices_or_keys…\], Return_type) Parses a JSON and extract a value of the given ClickHouse data type. @@ -206,7 +206,7 @@ SELECT JSONExtract('{"day": "Thursday"}', 'day', 'Enum8(\'Sunday\' = 0, \'Monday SELECT JSONExtract('{"day": 5}', 'day', 'Enum8(\'Sunday\' = 0, \'Monday\' = 1, \'Tuesday\' = 2, \'Wednesday\' = 3, \'Thursday\' = 4, \'Friday\' = 5, \'Saturday\' = 6)') = 'Friday' ``` -## JSONExtractKeysAndValues(json\[, indices_or_keys…\], Value_type) {#jsonextractkeysandvaluesjson-indices-or-keys-value-type} +## JSONExtractKeysAndValues(json\[, indices_or_keys…\], Value_type) Parses key-value pairs from a JSON where the values are of the given ClickHouse data type. @@ -216,7 +216,7 @@ Example: SELECT JSONExtractKeysAndValues('{"x": {"a": 5, "b": 7, "c": 11}}', 'x', 'Int8') = [('a',5),('b',7),('c',11)]; ``` -## JSONExtractKeys {#jsonextractkeysjson-indices-or-keys} +## JSONExtractKeys Parses a JSON string and extracts the keys. @@ -254,7 +254,7 @@ text └────────────────────────────────────────────────────────────┘ ``` -## JSONExtractRaw(json\[, indices_or_keys\]…) {#jsonextractrawjson-indices-or-keys} +## JSONExtractRaw(json\[, indices_or_keys\]…) Returns a part of JSON as unparsed string. @@ -266,7 +266,7 @@ Example: SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = '[-100, 200.0, 300]'; ``` -## JSONExtractArrayRaw(json\[, indices_or_keys…\]) {#jsonextractarrayrawjson-indices-or-keys} +## JSONExtractArrayRaw(json\[, indices_or_keys…\]) Returns an array with elements of JSON array, each represented as unparsed string. @@ -278,7 +278,7 @@ Example: SELECT JSONExtractArrayRaw('{"a": "hello", "b": [-100, 200.0, "hello"]}', 'b') = ['-100', '200.0', '"hello"']; ``` -## JSONExtractKeysAndValuesRaw {#json-extract-keys-and-values-raw} +## JSONExtractKeysAndValuesRaw Extracts raw data from a JSON object. @@ -344,7 +344,7 @@ Result: └───────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` -## JSON_EXISTS(json, path) {#json-exists} +## JSON_EXISTS(json, path) If the value exists in the JSON document, `1` will be returned. @@ -363,7 +363,7 @@ SELECT JSON_EXISTS('{"hello":["world"]}', '$.hello[0]'); Before version 21.11 the order of arguments was wrong, i.e. JSON_EXISTS(path, json) ::: -## JSON_QUERY(json, path) {#json-query} +## JSON_QUERY(json, path) Parses a JSON and extract a value as JSON array or JSON object. @@ -390,7 +390,7 @@ String Before version 21.11 the order of arguments was wrong, i.e. JSON_QUERY(path, json) ::: -## JSON_VALUE(json, path) {#json-value} +## JSON_VALUE(json, path) Parses a JSON and extract a value as JSON scalar. @@ -418,7 +418,7 @@ String Before version 21.11 the order of arguments was wrong, i.e. JSON_VALUE(path, json) ::: -## toJSONString {#tojsonstring} +## toJSONString Serializes a value to its JSON representation. Various data types and nested structures are supported. 64-bit [integers](../../sql-reference/data-types/int-uint.md) or bigger (like `UInt64` or `Int128`) are enclosed in quotes by default. [output_format_json_quote_64bit_integers](../../operations/settings/settings.md#session_settings-output_format_json_quote_64bit_integers) controls this behavior. diff --git a/docs/en/sql-reference/functions/logical-functions.md b/docs/en/sql-reference/functions/logical-functions.md index 0055e253951..0dd0c8af146 100644 --- a/docs/en/sql-reference/functions/logical-functions.md +++ b/docs/en/sql-reference/functions/logical-functions.md @@ -3,13 +3,13 @@ sidebar_position: 37 sidebar_label: Logical --- -# Logical Functions {#logical-functions} +# Logical Functions Performs logical operations on arguments of any numeric types, but returns a [UInt8](../../sql-reference/data-types/int-uint.md) number equal to 0, 1 or `NULL` in some cases. Zero as an argument is considered `false`, while any non-zero value is considered `true`. -## and {#logical-and-function} +## and Calculates the result of the logical conjunction between two or more values. Corresponds to [Logical AND Operator](../../sql-reference/operators/index.md#logical-and-operator). @@ -63,7 +63,7 @@ Result: └──────────────────────┘ ``` -## or {#logical-or-function} +## or Calculates the result of the logical disjunction between two or more values. Corresponds to [Logical OR Operator](../../sql-reference/operators/index.md#logical-or-operator). @@ -117,7 +117,7 @@ Result: └─────────────┘ ``` -## not {#logical-not-function} +## not Calculates the result of the logical negation of the value. Corresponds to [Logical Negation Operator](../../sql-reference/operators/index.md#logical-negation-operator). @@ -155,7 +155,7 @@ Result: └────────┘ ``` -## xor {#logical-xor-function} +## xor Calculates the result of the logical exclusive disjunction between two or more values. For more than two values the function works as if it calculates `XOR` of the first two values and then uses the result with the next value to calculate `XOR` and so on. diff --git a/docs/en/sql-reference/functions/machine-learning-functions.md b/docs/en/sql-reference/functions/machine-learning-functions.md index 5b3e8b87e34..92765a65849 100644 --- a/docs/en/sql-reference/functions/machine-learning-functions.md +++ b/docs/en/sql-reference/functions/machine-learning-functions.md @@ -3,16 +3,16 @@ sidebar_position: 64 sidebar_label: Machine Learning --- -# Machine Learning Functions {#machine-learning-functions} +# Machine Learning Functions -## evalMLMethod {#machine_learning_methods-evalmlmethod} +## evalMLMethod Prediction using fitted regression models uses `evalMLMethod` function. See link in `linearRegression`. -## stochasticLinearRegression {#stochastic-linear-regression} +## stochasticLinearRegression The [stochasticLinearRegression](../../sql-reference/aggregate-functions/reference/stochasticlinearregression.md#agg_functions-stochasticlinearregression) aggregate function implements stochastic gradient descent method using linear model and MSE loss function. Uses `evalMLMethod` to predict on new data. -## stochasticLogisticRegression {#stochastic-logistic-regression} +## stochasticLogisticRegression The [stochasticLogisticRegression](../../sql-reference/aggregate-functions/reference/stochasticlogisticregression.md#agg_functions-stochasticlogisticregression) aggregate function implements stochastic gradient descent method for binary classification problem. Uses `evalMLMethod` to predict on new data. \ No newline at end of file diff --git a/docs/en/sql-reference/functions/math-functions.md b/docs/en/sql-reference/functions/math-functions.md index 645587b4f5c..8ea2935ed5d 100644 --- a/docs/en/sql-reference/functions/math-functions.md +++ b/docs/en/sql-reference/functions/math-functions.md @@ -3,51 +3,51 @@ sidebar_position: 44 sidebar_label: Mathematical --- -# Mathematical Functions {#mathematical-functions} +# Mathematical Functions All the functions return a Float64 number. The accuracy of the result is close to the maximum precision possible, but the result might not coincide with the machine representable number nearest to the corresponding real number. -## e() {#e} +## e() Returns a Float64 number that is close to the number e. -## pi() {#pi} +## pi() Returns a Float64 number that is close to the number π. -## exp(x) {#expx} +## exp(x) Accepts a numeric argument and returns a Float64 number close to the exponent of the argument. -## log(x), ln(x) {#logx-lnx} +## log(x), ln(x) Accepts a numeric argument and returns a Float64 number close to the natural logarithm of the argument. -## exp2(x) {#exp2x} +## exp2(x) Accepts a numeric argument and returns a Float64 number close to 2 to the power of x. -## log2(x) {#log2x} +## log2(x) Accepts a numeric argument and returns a Float64 number close to the binary logarithm of the argument. -## exp10(x) {#exp10x} +## exp10(x) Accepts a numeric argument and returns a Float64 number close to 10 to the power of x. -## log10(x) {#log10x} +## log10(x) Accepts a numeric argument and returns a Float64 number close to the decimal logarithm of the argument. -## sqrt(x) {#sqrtx} +## sqrt(x) Accepts a numeric argument and returns a Float64 number close to the square root of the argument. -## cbrt(x) {#cbrtx} +## cbrt(x) Accepts a numeric argument and returns a Float64 number close to the cubic root of the argument. -## erf(x) {#erfx} +## erf(x) If ‘x’ is non-negative, then `erf(x / σ√2)` is the probability that a random variable having a normal distribution with standard deviation ‘σ’ takes the value that is separated from the expected value by more than ‘x’. @@ -63,55 +63,55 @@ SELECT erf(3 / sqrt(2)); └─────────────────────────┘ ``` -## erfc(x) {#erfcx} +## erfc(x) Accepts a numeric argument and returns a Float64 number close to 1 - erf(x), but without loss of precision for large ‘x’ values. -## lgamma(x) {#lgammax} +## lgamma(x) The logarithm of the gamma function. -## tgamma(x) {#tgammax} +## tgamma(x) Gamma function. -## sin(x) {#sinx} +## sin(x) The sine. -## cos(x) {#cosx} +## cos(x) The cosine. -## tan(x) {#tanx} +## tan(x) The tangent. -## asin(x) {#asinx} +## asin(x) The arc sine. -## acos(x) {#acosx} +## acos(x) The arc cosine. -## atan(x) {#atanx} +## atan(x) The arc tangent. -## pow(x, y), power(x, y) {#powx-y-powerx-y} +## pow(x, y), power(x, y) Takes two numeric arguments x and y. Returns a Float64 number close to x to the power of y. -## intExp2 {#intexp2} +## intExp2 Accepts a numeric argument and returns a UInt64 number close to 2 to the power of x. -## intExp10 {#intexp10} +## intExp10 Accepts a numeric argument and returns a UInt64 number close to 10 to the power of x. -## cosh(x) {#coshx} +## cosh(x) [Hyperbolic cosine](https://in.mathworks.com/help/matlab/ref/cosh.html). @@ -147,7 +147,7 @@ Result: └──────────┘ ``` -## acosh(x) {#acoshx} +## acosh(x) [Inverse hyperbolic cosine](https://www.mathworks.com/help/matlab/ref/acosh.html). @@ -187,7 +187,7 @@ Result: - [cosh(x)](../../sql-reference/functions/math-functions.md#coshx) -## sinh(x) {#sinhx} +## sinh(x) [Hyperbolic sine](https://www.mathworks.com/help/matlab/ref/sinh.html). @@ -223,7 +223,7 @@ Result: └──────────┘ ``` -## asinh(x) {#asinhx} +## asinh(x) [Inverse hyperbolic sine](https://www.mathworks.com/help/matlab/ref/asinh.html). @@ -263,7 +263,7 @@ Result: - [sinh(x)](../../sql-reference/functions/math-functions.md#sinhx) -## atanh(x) {#atanhx} +## atanh(x) [Inverse hyperbolic tangent](https://www.mathworks.com/help/matlab/ref/atanh.html). @@ -299,7 +299,7 @@ Result: └──────────┘ ``` -## atan2(y, x) {#atan2yx} +## atan2(y, x) The [function](https://en.wikipedia.org/wiki/Atan2) calculates the angle in the Euclidean plane, given in radians, between the positive x axis and the ray to the point `(x, y) ≠ (0, 0)`. @@ -336,7 +336,7 @@ Result: └────────────────────┘ ``` -## hypot(x, y) {#hypotxy} +## hypot(x, y) Calculates the length of the hypotenuse of a right-angle triangle. The [function](https://en.wikipedia.org/wiki/Hypot) avoids problems that occur when squaring very large or very small numbers. @@ -373,7 +373,7 @@ Result: └────────────────────┘ ``` -## log1p(x) {#log1px} +## log1p(x) Calculates `log(1+x)`. The [function](https://en.wikipedia.org/wiki/Natural_logarithm#lnp1) `log1p(x)` is more accurate than `log(1+x)` for small values of x. @@ -413,7 +413,7 @@ Result: - [log(x)](../../sql-reference/functions/math-functions.md#logx-lnx) -## sign(x) {#signx} +## sign(x) Returns the sign of a real number. @@ -477,7 +477,7 @@ Result: └──────────┘ ``` -## degrees(x) {#degreesx} +## degrees(x) Converts the input value in radians to degrees. @@ -513,7 +513,7 @@ Result: └────────────────────────────┘ ``` -## radians(x) {#radiansx} +## radians(x) Converts the input value in degrees to radians. diff --git a/docs/en/sql-reference/functions/nlp-functions.md b/docs/en/sql-reference/functions/nlp-functions.md index 5a00252f56c..6d1e894a456 100644 --- a/docs/en/sql-reference/functions/nlp-functions.md +++ b/docs/en/sql-reference/functions/nlp-functions.md @@ -3,13 +3,13 @@ sidebar_position: 67 sidebar_label: NLP --- -# [experimental] Natural Language Processing functions {#nlp-functions} +# [experimental] Natural Language Processing functions :::warning This is an experimental feature that is currently in development and is not ready for general use. It will change in unpredictable backwards-incompatible ways in future releases. Set `allow_experimental_nlp_functions = 1` to enable it. ::: -## stem {#stem} +## stem Performs stemming on a given word. @@ -40,7 +40,7 @@ Result: └────────────────────────────────────────────────────┘ ``` -## lemmatize {#lemmatize} +## lemmatize Performs lemmatization on a given word. Needs dictionaries to operate, which can be obtained [here](https://github.com/vpodpecan/lemmagen3/tree/master/src/lemmagen3/models). @@ -81,7 +81,7 @@ Configuration: ``` -## synonyms {#synonyms} +## synonyms Finds synonyms to a given word. There are two types of synonym extensions: `plain` and `wordnet`. diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 45e9ef43c6a..9e6f0effcf9 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -3,14 +3,14 @@ sidebar_position: 67 sidebar_label: Other --- -# Other Functions {#other-functions} +# Other Functions -## hostName() {#hostname} +## hostName() Returns a string with the name of the host that this function was performed on. For distributed processing, this is the name of the remote server host, if the function is performed on a remote server. If it is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard. Otherwise it produces a constant value. -## getMacro {#getmacro} +## getMacro Gets a named value from the [macros](../../operations/server-configuration-parameters/settings.md#macros) section of the server configuration. @@ -67,7 +67,7 @@ WHERE macro = 'test'; └───────┴──────────────┘ ``` -## FQDN {#fqdn} +## FQDN Returns the fully qualified domain name. @@ -101,7 +101,7 @@ Result: └─────────────────────────────────┘ ``` -## basename {#basename} +## basename Extracts the trailing part of a string after the last slash or backslash. This function if often used to extract the filename from a path. @@ -155,7 +155,7 @@ SELECT 'some-file-name' AS a, basename(a) └────────────────┴────────────────────────────┘ ``` -## visibleWidth(x) {#visiblewidthx} +## visibleWidth(x) Calculates the approximate width when outputting values to the console in text format (tab-separated). This function is used by the system for implementing Pretty formats. @@ -172,18 +172,18 @@ SELECT visibleWidth(NULL) └────────────────────┘ ``` -## toTypeName(x) {#totypenamex} +## toTypeName(x) Returns a string containing the type name of the passed argument. If `NULL` is passed to the function as input, then it returns the `Nullable(Nothing)` type, which corresponds to an internal `NULL` representation in ClickHouse. -## blockSize() {#function-blocksize} +## blockSize() Gets the size of the block. In ClickHouse, queries are always run on blocks (sets of column parts). This function allows getting the size of the block that you called it for. -## byteSize {#function-bytesize} +## byteSize Returns estimation of uncompressed byte size of its arguments in memory. @@ -280,30 +280,30 @@ Result: └────────────────────────────┘ ``` -## materialize(x) {#materializex} +## materialize(x) Turns a constant into a full column containing just one value. In ClickHouse, full columns and constants are represented differently in memory. Functions work differently for constant arguments and normal arguments (different code is executed), although the result is almost always the same. This function is for debugging this behavior. -## ignore(…) {#ignore} +## ignore(…) Accepts any arguments, including `NULL`. Always returns 0. However, the argument is still evaluated. This can be used for benchmarks. -## sleep(seconds) {#sleepseconds} +## sleep(seconds) Sleeps ‘seconds’ seconds on each data block. You can specify an integer or a floating-point number. -## sleepEachRow(seconds) {#sleepeachrowseconds} +## sleepEachRow(seconds) Sleeps ‘seconds’ seconds on each row. You can specify an integer or a floating-point number. -## currentDatabase() {#currentdatabase} +## currentDatabase() Returns the name of the current database. You can use this function in table engine parameters in a CREATE TABLE query where you need to specify the database. -## currentUser() {#other-function-currentuser} +## currentUser() Returns the login of current user. Login of user, that initiated query, will be returned in case distibuted query. @@ -336,7 +336,7 @@ Result: └───────────────┘ ``` -## isConstant {#is-constant} +## isConstant Checks whether the argument is a constant expression. @@ -405,15 +405,15 @@ Result: └────────────────────┘ ``` -## isFinite(x) {#isfinitex} +## isFinite(x) Accepts Float32 and Float64 and returns UInt8 equal to 1 if the argument is not infinite and not a NaN, otherwise 0. -## isInfinite(x) {#isinfinitex} +## isInfinite(x) Accepts Float32 and Float64 and returns UInt8 equal to 1 if the argument is infinite, otherwise 0. Note that 0 is returned for a NaN. -## ifNotFinite {#ifnotfinite} +## ifNotFinite Checks whether floating point value is finite. @@ -445,17 +445,17 @@ Result: You can get similar result by using [ternary operator](../../sql-reference/functions/conditional-functions.md#ternary-operator): `isFinite(x) ? x : y`. -## isNaN(x) {#isnanx} +## isNaN(x) Accepts Float32 and Float64 and returns UInt8 equal to 1 if the argument is a NaN, otherwise 0. -## hasColumnInTable(\[‘hostname’\[, ‘username’\[, ‘password’\]\],\] ‘database’, ‘table’, ‘column’) {#hascolumnintablehostname-username-password-database-table-column} +## hasColumnInTable(\[‘hostname’\[, ‘username’\[, ‘password’\]\],\] ‘database’, ‘table’, ‘column’) Accepts constant strings: database name, table name, and column name. Returns a UInt8 constant expression equal to 1 if there is a column, otherwise 0. If the hostname parameter is set, the test will run on a remote server. The function throws an exception if the table does not exist. For elements in a nested data structure, the function checks for the existence of a column. For the nested data structure itself, the function returns 0. -## bar {#function-bar} +## bar Allows building a unicode-art diagram. @@ -510,12 +510,12 @@ ORDER BY h ASC └────┴────────┴────────────────────┘ ``` -## transform {#transform} +## transform Transforms a value according to the explicitly defined mapping of some elements to other ones. There are two variations of this function: -### transform(x, array_from, array_to, default) {#transformx-array-from-array-to-default} +### transform(x, array_from, array_to, default) `x` – What to transform. @@ -557,7 +557,7 @@ ORDER BY c DESC └───────────┴────────┘ ``` -### transform(x, array_from, array_to) {#transformx-array-from-array-to} +### transform(x, array_from, array_to) Differs from the first variation in that the ‘default’ argument is omitted. If the ‘x’ value is equal to one of the elements in the ‘array_from’ array, it returns the matching element (that is numbered the same) from the ‘array_to’ array. Otherwise, it returns ‘x’. @@ -592,7 +592,7 @@ LIMIT 10 └────────────────┴─────────┘ ``` -## formatReadableSize(x) {#formatreadablesizex} +## formatReadableSize(x) Accepts the size (number of bytes). Returns a rounded size with a suffix (KiB, MiB, etc.) as a string. @@ -613,7 +613,7 @@ SELECT └────────────────┴────────────┘ ``` -## formatReadableQuantity(x) {#formatreadablequantityx} +## formatReadableQuantity(x) Accepts the number. Returns a rounded number with a suffix (thousand, million, billion, etc.) as a string. @@ -636,7 +636,7 @@ SELECT └────────────────┴───────────────────┘ ``` -## formatReadableTimeDelta {#formatreadabletimedelta} +## formatReadableTimeDelta Accepts the time delta in seconds. Returns a time delta with (year, month, day, hour, minute, second) as a string. @@ -681,43 +681,43 @@ SELECT └────────────┴─────────────────────────────────────────────────────────────────┘ ``` -## least(a, b) {#leasta-b} +## least(a, b) Returns the smallest value from a and b. -## greatest(a, b) {#greatesta-b} +## greatest(a, b) Returns the largest value of a and b. -## uptime() {#uptime} +## uptime() Returns the server’s uptime in seconds. If it is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard. Otherwise it produces a constant value. -## version() {#version} +## version() Returns the version of the server as a string. If it is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard. Otherwise it produces a constant value. -## buildId() {#buildid} +## buildId() Returns the build ID generated by a compiler for the running ClickHouse server binary. If it is executed in the context of a distributed table, then it generates a normal column with values relevant to each shard. Otherwise it produces a constant value. -## blockNumber {#blocknumber} +## blockNumber Returns the sequence number of the data block where the row is located. -## rowNumberInBlock {#function-rownumberinblock} +## rowNumberInBlock Returns the ordinal number of the row in the data block. Different data blocks are always recalculated. -## rowNumberInAllBlocks() {#rownumberinallblocks} +## rowNumberInAllBlocks() Returns the ordinal number of the row in the data block. This function only considers the affected data blocks. -## neighbor {#neighbor} +## neighbor The window function that provides access to a row at a specified offset which comes before or after the current row of a given column. @@ -834,7 +834,7 @@ Result: └────────────┴───────┴───────────┴────────────────┘ ``` -## runningDifference(x) {#other_functions-runningdifference} +## runningDifference(x) Calculates the difference between successive row values ​​in the data block. Returns 0 for the first row and the difference from the previous row for each subsequent row. @@ -912,11 +912,11 @@ WHERE diff != 1 └────────┴──────┘ ``` -## runningDifferenceStartingWithFirstValue {#runningdifferencestartingwithfirstvalue} +## runningDifferenceStartingWithFirstValue Same as for [runningDifference](./other-functions.md#other_functions-runningdifference), the difference is the value of the first row, returned the value of the first row, and each subsequent row returns the difference from the previous row. -## runningConcurrency {#runningconcurrency} +## runningConcurrency Calculates the number of concurrent events. Each event has a start time and an end time. The start time is included in the event, while the end time is excluded. Columns with a start time and an end time must be of the same data type. @@ -974,19 +974,19 @@ Result: └────────────┴────────────────────────────────┘ ``` -## MACNumToString(num) {#macnumtostringnum} +## MACNumToString(num) Accepts a UInt64 number. Interprets it as a MAC address in big endian. Returns a string containing the corresponding MAC address in the format AA:BB:CC:DD:EE:FF (colon-separated numbers in hexadecimal form). -## MACStringToNum(s) {#macstringtonums} +## MACStringToNum(s) The inverse function of MACNumToString. If the MAC address has an invalid format, it returns 0. -## MACStringToOUI(s) {#macstringtoouis} +## MACStringToOUI(s) Accepts a MAC address in the format AA:BB:CC:DD:EE:FF (colon-separated numbers in hexadecimal form). Returns the first three octets as a UInt64 number. If the MAC address has an invalid format, it returns 0. -## getSizeOfEnumType {#getsizeofenumtype} +## getSizeOfEnumType Returns the number of fields in [Enum](../../sql-reference/data-types/enum.md). @@ -1015,7 +1015,7 @@ SELECT getSizeOfEnumType( CAST('a' AS Enum8('a' = 1, 'b' = 2) ) ) AS x └───┘ ``` -## blockSerializedSize {#blockserializedsize} +## blockSerializedSize Returns size on disk (without taking into account compression). @@ -1047,7 +1047,7 @@ Result: └───┘ ``` -## toColumnTypeName {#tocolumntypename} +## toColumnTypeName Returns the name of the class that represents the data type of the column in RAM. @@ -1087,7 +1087,7 @@ SELECT toColumnTypeName(CAST('2018-01-01 01:02:03' AS DateTime)) The example shows that the `DateTime` data type is stored in memory as `Const(UInt32)`. -## dumpColumnStructure {#dumpcolumnstructure} +## dumpColumnStructure Outputs a detailed description of data structures in RAM @@ -1115,7 +1115,7 @@ SELECT dumpColumnStructure(CAST('2018-01-01 01:02:03', 'DateTime')) └──────────────────────────────────────────────────────────────┘ ``` -## defaultValueOfArgumentType {#defaultvalueofargumenttype} +## defaultValueOfArgumentType Outputs the default value for the data type. @@ -1157,7 +1157,7 @@ SELECT defaultValueOfArgumentType( CAST(1 AS Nullable(Int8) ) ) └───────────────────────────────────────────────────────┘ ``` -## defaultValueOfTypeName {#defaultvalueoftypename} +## defaultValueOfTypeName Outputs the default value for given type name. @@ -1199,7 +1199,7 @@ SELECT defaultValueOfTypeName('Nullable(Int8)') └──────────────────────────────────────────┘ ``` -## indexHint {#indexhint} +## indexHint The function is intended for debugging and introspection purposes. The function ignores it's argument and always returns 1. Arguments are not even evaluated. But for the purpose of index analysis, the argument of this function is analyzed as if it was present directly without being wrapped inside `indexHint` function. This allows to select data in index ranges by the corresponding condition but without further filtering by this condition. The index in ClickHouse is sparse and using `indexHint` will yield more data than specifying the same condition directly. @@ -1302,7 +1302,7 @@ Result: └────────────┴─────────┘ ``` -## replicate {#other-functions-replicate} +## replicate Creates an array with a single value. @@ -1339,7 +1339,7 @@ Result: └───────────────────────────────┘ ``` -## filesystemAvailable {#filesystemavailable} +## filesystemAvailable Returns amount of remaining space on the filesystem where the files of the databases located. It is always smaller than total free space ([filesystemFree](#filesystemfree)) because some space is reserved for OS. @@ -1371,7 +1371,7 @@ Result: └─────────────────┴────────┘ ``` -## filesystemFree {#filesystemfree} +## filesystemFree Returns total amount of the free space on the filesystem where the files of the databases located. See also `filesystemAvailable` @@ -1403,7 +1403,7 @@ Result: └────────────┴────────┘ ``` -## filesystemCapacity {#filesystemcapacity} +## filesystemCapacity Returns the capacity of the filesystem in bytes. For evaluation, the [path](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-path) to the data directory must be configured. @@ -1435,7 +1435,7 @@ Result: └───────────┴────────┘ ``` -## initializeAggregation {#initializeaggregation} +## initializeAggregation Calculates result of aggregate function based on single value. It is intended to use this function to initialize aggregate functions with combinator [-State](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-state). You can create states of aggregate functions and insert them to columns of type [AggregateFunction](../../sql-reference/data-types/aggregatefunction.md#data-type-aggregatefunction) or use initialized aggregates as default values. @@ -1507,7 +1507,7 @@ INSERT INTO metrics VALUES (0, initializeAggregation('sumState', toUInt64(42))) **See Also** - [arrayReduce](../../sql-reference/functions/array-functions.md#arrayreduce) -## finalizeAggregation {#function-finalizeaggregation} +## finalizeAggregation Takes state of aggregate function. Returns result of aggregation (or finalized state when using[-State](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-state) combinator). @@ -1607,7 +1607,7 @@ Result: - [arrayReduce](../../sql-reference/functions/array-functions.md#arrayreduce) - [initializeAggregation](#initializeaggregation) -## runningAccumulate {#runningaccumulate} +## runningAccumulate Accumulates states of an aggregate function for each row of a data block. @@ -1713,7 +1713,7 @@ Result: As you can see, `runningAccumulate` merges states for each group of rows separately. -## joinGet {#joinget} +## joinGet The function lets you extract data from the table the same way as from a [dictionary](../../sql-reference/dictionaries/index.md). @@ -1776,12 +1776,12 @@ Result: └──────────────────────────────────────────────────┘ ``` -## modelEvaluate(model_name, …) {#function-modelevaluate} +## modelEvaluate(model_name, …) Evaluate external model. Accepts a model name and model arguments. Returns Float64. -## throwIf(x\[, custom_message\]) {#throwifx-custom-message} +## throwIf(x\[, custom_message\]) Throw an exception if the argument is non zero. custom_message - is an optional parameter: a constant string, provides an error message @@ -1795,7 +1795,7 @@ SELECT throwIf(number = 3, 'Too many') FROM numbers(10); Code: 395. DB::Exception: Received from localhost:9000. DB::Exception: Too many. ``` -## identity {#identity} +## identity Returns the same value that was used as its argument. Used for debugging and testing, allows to cancel using index, and get the query performance of a full scan. When query is analyzed for possible use of index, the analyzer does not look inside `identity` functions. Also constant folding is not applied too. @@ -1821,7 +1821,7 @@ Result: └──────────────┘ ``` -## randomPrintableASCII {#randomascii} +## randomPrintableASCII Generates a string with a random set of [ASCII](https://en.wikipedia.org/wiki/ASCII#Printable_characters) printable characters. @@ -1857,7 +1857,7 @@ SELECT number, randomPrintableASCII(30) as str, length(str) FROM system.numbers └────────┴────────────────────────────────┴──────────────────────────────────┘ ``` -## randomString {#randomstring} +## randomString Generates a binary string of the specified length filled with random bytes (including zero bytes). @@ -1905,7 +1905,7 @@ len: 30 - [randomPrintableASCII](../../sql-reference/functions/other-functions.md#randomascii) -## randomFixedString {#randomfixedstring} +## randomFixedString Generates a binary string of the specified length filled with random bytes (including zero bytes). @@ -1942,7 +1942,7 @@ Result: ``` -## randomStringUTF8 {#randomstringutf8} +## randomStringUTF8 Generates a random string of a specified length. Result string contains valid UTF-8 code points. The value of code points may be outside of the range of assigned Unicode. @@ -1979,7 +1979,7 @@ Result: ``` -## getSetting {#getSetting} +## getSetting Returns the current value of a [custom setting](../../operations/settings/index.md#custom_settings). @@ -2014,7 +2014,7 @@ SELECT getSetting('custom_a'); - [Custom Settings](../../operations/settings/index.md#custom_settings) -## isDecimalOverflow {#is-decimal-overflow} +## isDecimalOverflow Checks whether the [Decimal](../../sql-reference/data-types/decimal.md) value is out of its (or specified) precision. @@ -2051,7 +2051,7 @@ Result: 1 1 1 1 ``` -## countDigits {#count-digits} +## countDigits Returns number of decimal digits you need to represent the value. @@ -2091,7 +2091,7 @@ Result: 10 10 19 19 39 39 ``` -## errorCodeToName {#error-code-to-name} +## errorCodeToName **Returned value** @@ -2111,7 +2111,7 @@ Result: UNSUPPORTED_METHOD ``` -## tcpPort {#tcpPort} +## tcpPort Returns [native interface](../../interfaces/tcp.md) TCP port number listened by this server. If it is executed in the context of a distributed table, then it generates a normal column, otherwise it produces a constant value. @@ -2152,7 +2152,7 @@ Result: - [tcp_port](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-tcp_port) -## currentProfiles {#current-profiles} +## currentProfiles Returns a list of the current [settings profiles](../../operations/access-rights.md#settings-profiles-management) for the current user. @@ -2170,7 +2170,7 @@ currentProfiles() Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). -## enabledProfiles {#enabled-profiles} +## enabledProfiles Returns settings profiles, assigned to the current user both explicitly and implicitly. Explicitly assigned profiles are the same as returned by the [currentProfiles](#current-profiles) function. Implicitly assigned profiles include parent profiles of other assigned profiles, profiles assigned via granted roles, profiles assigned via their own settings, and the main default profile (see the `default_profile` section in the main server configuration file). @@ -2186,7 +2186,7 @@ enabledProfiles() Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). -## defaultProfiles {#default-profiles} +## defaultProfiles Returns all the profiles specified at the current user's definition (see [CREATE USER](../../sql-reference/statements/create/user.md#create-user-statement) statement). @@ -2202,7 +2202,7 @@ defaultProfiles() Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). -## currentRoles {#current-roles} +## currentRoles Returns the names of the roles which are current for the current user. The current roles can be changed by the [SET ROLE](../../sql-reference/statements/set-role.md#set-role-statement) statement. If the `SET ROLE` statement was not used, the function `currentRoles` returns the same as `defaultRoles`. @@ -2218,7 +2218,7 @@ currentRoles() Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). -## enabledRoles {#enabled-roles} +## enabledRoles Returns the names of the current roles and the roles, granted to some of the current roles. @@ -2234,7 +2234,7 @@ enabledRoles() Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). -## defaultRoles {#default-roles} +## defaultRoles Returns the names of the roles which are enabled by default for the current user when he logins. Initially these are all roles granted to the current user (see [GRANT](../../sql-reference/statements/grant/#grant-select)), but that can be changed with the [SET DEFAULT ROLE](../../sql-reference/statements/set-role.md#set-default-role-statement) statement. @@ -2250,7 +2250,7 @@ defaultRoles() Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). -## getServerPort {#getserverport} +## getServerPort Returns the number of the server port. When the port is not used by the server, throws an exception. @@ -2297,7 +2297,7 @@ Result: └───────────────────────────┘ ``` -## queryID {#query-id} +## queryID Returns the ID of the current query. Other parameters of a query can be extracted from the [system.query_log](../../operations/system-tables/query_log.md) table via `query_id`. @@ -2333,7 +2333,7 @@ Result: └─────────┘ ``` -## initialQueryID {#initial-query-id} +## initialQueryID Returns the ID of the initial current query. Other parameters of a query can be extracted from the [system.query_log](../../operations/system-tables/query_log.md) table via `initial_query_id`. @@ -2369,7 +2369,7 @@ Result: └─────────┘ ``` -## shardNum {#shard-num} +## shardNum Returns the index of a shard which processes a part of data for a distributed query. Indices are started from `1`. If a query is not distributed then constant value `0` is returned. @@ -2411,7 +2411,7 @@ Result: - [Distributed Table Engine](../../engines/table-engines/special/distributed.md) -## shardCount {#shard-count} +## shardCount Returns the total number of shards for a distributed query. If a query is not distributed then constant value `0` is returned. @@ -2432,7 +2432,7 @@ Type: [UInt32](../../sql-reference/data-types/int-uint.md). - [shardNum()](#shard-num) function example also contains `shardCount()` function call. -## getOSKernelVersion {#getoskernelversion} +## getOSKernelVersion Returns a string with the current OS kernel version. @@ -2468,7 +2468,7 @@ Result: └─────────────────────────┘ ``` -## zookeeperSessionUptime {#zookeepersessionuptime} +## zookeeperSessionUptime Returns the uptime of the current ZooKeeper session in seconds. diff --git a/docs/en/sql-reference/functions/random-functions.md b/docs/en/sql-reference/functions/random-functions.md index 3931898f081..452be8a17b2 100644 --- a/docs/en/sql-reference/functions/random-functions.md +++ b/docs/en/sql-reference/functions/random-functions.md @@ -3,7 +3,7 @@ sidebar_position: 51 sidebar_label: Pseudo-Random Numbers --- -# Functions for Generating Pseudo-Random Numbers {#functions-for-generating-pseudo-random-numbers} +# Functions for Generating Pseudo-Random Numbers All the functions accept zero arguments or one argument. If an argument is passed, it can be any type, and its value is not used for anything. The only purpose of this argument is to prevent common subexpression elimination, so that two different instances of the same function return different columns with different random numbers. @@ -11,19 +11,19 @@ All the functions accept zero arguments or one argument. If an argument is passe Non-cryptographic generators of pseudo-random numbers are used. ::: -## rand, rand32 {#rand} +## rand, rand32 Returns a pseudo-random UInt32 number, evenly distributed among all UInt32-type numbers. Uses a linear congruential generator. -## rand64 {#rand64} +## rand64 Returns a pseudo-random UInt64 number, evenly distributed among all UInt64-type numbers. Uses a linear congruential generator. -## randConstant {#randconstant} +## randConstant Produces a constant column with a random value. @@ -62,17 +62,17 @@ Result: └────────────┴────────────┴──────────────┴────────────────┴─────────────────┴──────────────────────┘ ``` -# Random Functions for Working with Strings {#random-functions-for-working-with-strings} +# Random Functions for Working with Strings -## randomString {#random-string} +## randomString -## randomFixedString {#random-fixed-string} +## randomFixedString -## randomPrintableASCII {#random-printable-ascii} +## randomPrintableASCII -## randomStringUTF8 {#random-string-utf8} +## randomStringUTF8 -## fuzzBits {#fuzzbits} +## fuzzBits **Syntax** diff --git a/docs/en/sql-reference/functions/rounding-functions.md b/docs/en/sql-reference/functions/rounding-functions.md index a469318e623..37f1c35aa7e 100644 --- a/docs/en/sql-reference/functions/rounding-functions.md +++ b/docs/en/sql-reference/functions/rounding-functions.md @@ -3,9 +3,9 @@ sidebar_position: 45 sidebar_label: Rounding --- -# Rounding Functions {#rounding-functions} +# Rounding Functions -## floor(x\[, N\]) {#floorx-n} +## floor(x\[, N\]) Returns the largest round number that is less than or equal to `x`. A round number is a multiple of 1/10N, or the nearest number of the appropriate data type if 1 / 10N isn’t exact. ‘N’ is an integer constant, optional parameter. By default it is zero, which means to round to an integer. @@ -17,15 +17,15 @@ Examples: `floor(123.45, 1) = 123.4, floor(123.45, -1) = 120.` For integer arguments, it makes sense to round with a negative `N` value (for non-negative `N`, the function does not do anything). If rounding causes overflow (for example, floor(-128, -1)), an implementation-specific result is returned. -## ceil(x\[, N\]), ceiling(x\[, N\]) {#ceilx-n-ceilingx-n} +## ceil(x\[, N\]), ceiling(x\[, N\]) Returns the smallest round number that is greater than or equal to `x`. In every other way, it is the same as the `floor` function (see above). -## trunc(x\[, N\]), truncate(x\[, N\]) {#truncx-n-truncatex-n} +## trunc(x\[, N\]), truncate(x\[, N\]) Returns the round number with largest absolute value that has an absolute value less than or equal to `x`‘s. In every other way, it is the same as the ’floor’ function (see above). -## round(x\[, N\]) {#rounding_functions-round} +## round(x\[, N\]) Rounds a value to a specified number of decimal places. @@ -47,7 +47,7 @@ round(expression [, decimal_places]) The rounded number of the same type as the input number. -### Examples {#examples} +### Examples **Example of use with Float** @@ -102,7 +102,7 @@ round(3.65, 1) = 3.6 - [roundBankers](#roundbankers) -## roundBankers {#roundbankers} +## roundBankers Rounds a number to a specified decimal position. @@ -140,7 +140,7 @@ roundBankers(expression [, decimal_places]) A value rounded by the banker’s rounding method. -### Examples {#examples-1} +### Examples **Example of use** @@ -183,19 +183,19 @@ roundBankers(10.755, 2) = 10.76 - [round](#rounding_functions-round) -## roundToExp2(num) {#roundtoexp2num} +## roundToExp2(num) Accepts a number. If the number is less than one, it returns 0. Otherwise, it rounds the number down to the nearest (whole non-negative) degree of two. -## roundDuration(num) {#rounddurationnum} +## roundDuration(num) Accepts a number. If the number is less than one, it returns 0. Otherwise, it rounds the number down to numbers from the set: 1, 10, 30, 60, 120, 180, 240, 300, 600, 1200, 1800, 3600, 7200, 18000, 36000. -## roundAge(num) {#roundagenum} +## roundAge(num) Accepts a number. If the number is less than 18, it returns 0. Otherwise, it rounds the number down to a number from the set: 18, 25, 35, 45, 55. -## roundDown(num, arr) {#rounddownnum-arr} +## roundDown(num, arr) Accepts a number and rounds it down to an element in the specified array. If the value is less than the lowest bound, the lowest bound is returned. diff --git a/docs/en/sql-reference/functions/splitting-merging-functions.md b/docs/en/sql-reference/functions/splitting-merging-functions.md index 7e94c225f6b..3c6db8cc19e 100644 --- a/docs/en/sql-reference/functions/splitting-merging-functions.md +++ b/docs/en/sql-reference/functions/splitting-merging-functions.md @@ -3,9 +3,9 @@ sidebar_position: 47 sidebar_label: Splitting and Merging Strings and Arrays --- -# Functions for Splitting and Merging Strings and Arrays {#functions-for-splitting-and-merging-strings-and-arrays} +# Functions for Splitting and Merging Strings and Arrays -## splitByChar(separator, s) {#splitbycharseparator-s} +## splitByChar(separator, s) Splits a string into substrings separated by a specified character. It uses a constant string `separator` which consisting of exactly one character. Returns an array of selected substrings. Empty substrings may be selected if the separator occurs at the beginning or end of the string, or if there are multiple consecutive separators. @@ -43,7 +43,7 @@ SELECT splitByChar(',', '1,2,3,abcde'); └─────────────────────────────────┘ ``` -## splitByString(separator, s) {#splitbystringseparator-s} +## splitByString(separator, s) Splits a string into substrings separated by a string. It uses a constant string `separator` of multiple characters as the separator. If the string `separator` is empty, it will split the string `s` into an array of single characters. @@ -90,7 +90,7 @@ SELECT splitByString('', 'abcde'); └────────────────────────────┘ ``` -## splitByRegexp(regexp, s) {#splitbyregexpseparator-s} +## splitByRegexp(regexp, s) Splits a string into substrings separated by a regular expression. It uses a regular expression string `regexp` as the separator. If the `regexp` is empty, it will split the string `s` into an array of single characters. If no match is found for this regular expression, the string `s` won't be split. @@ -145,7 +145,7 @@ Result: └────────────────────────────┘ ``` -## splitByWhitespace(s) {#splitbywhitespaceseparator-s} +## splitByWhitespace(s) Splits a string into substrings separated by whitespace characters. Returns an array of selected substrings. @@ -178,7 +178,7 @@ SELECT splitByWhitespace(' 1! a, b. '); └─────────────────────────────────────┘ ``` -## splitByNonAlpha(s) {#splitbynonalphaseparator-s} +## splitByNonAlpha(s) Splits a string into substrings separated by whitespace and punctuation characters. Returns an array of selected substrings. @@ -211,12 +211,12 @@ SELECT splitByNonAlpha(' 1! a, b. '); └───────────────────────────────────┘ ``` -## arrayStringConcat(arr\[, separator\]) {#arraystringconcatarr-separator} +## arrayStringConcat(arr\[, separator\]) Concatenates string representations of values listed in the array with the separator. `separator` is an optional parameter: a constant string, set to an empty string by default. Returns the string. -## alphaTokens(s) {#alphatokenss} +## alphaTokens(s) Selects substrings of consecutive bytes from the ranges a-z and A-Z.Returns an array of substrings. @@ -232,7 +232,7 @@ SELECT alphaTokens('abca1abc'); └─────────────────────────┘ ``` -## extractAllGroups(text, regexp) {#extractallgroups} +## extractAllGroups(text, regexp) Extracts all groups from non-overlapping substrings matched by a regular expression. @@ -271,7 +271,7 @@ Result: └───────────────────────────────────────────────────────────────────────┘ ``` -## ngrams {#ngrams} +## ngrams Splits the UTF-8 string into n-grams of `ngramsize` symbols. @@ -308,7 +308,7 @@ Result: └───────────────────────────────────────────────────┘ ``` -## tokens {#tokens} +## tokens Splits a string into tokens using non-alphanumeric ASCII characters as separators. diff --git a/docs/en/sql-reference/functions/string-functions.md b/docs/en/sql-reference/functions/string-functions.md index 0c1c738f663..66e9aa98e67 100644 --- a/docs/en/sql-reference/functions/string-functions.md +++ b/docs/en/sql-reference/functions/string-functions.md @@ -3,13 +3,13 @@ sidebar_position: 40 sidebar_label: Strings --- -# Functions for Working with Strings {#functions-for-working-with-strings} +# Functions for Working with Strings :::note Functions for [searching](../../sql-reference/functions/string-search-functions.md) and [replacing](../../sql-reference/functions/string-replace-functions.md) in strings are described separately. ::: -## empty {#empty} +## empty Checks whether the input string is empty. @@ -49,7 +49,7 @@ Result: └───────────┘ ``` -## notEmpty {#notempty} +## notEmpty Checks whether the input string is non-empty. @@ -89,28 +89,28 @@ Result: └──────────────────┘ ``` -## length {#length} +## length Returns the length of a string in bytes (not in characters, and not in code points). The result type is UInt64. The function also works for arrays. -## lengthUTF8 {#lengthutf8} +## lengthUTF8 Returns the length of a string in Unicode code points (not in characters), assuming that the string contains a set of bytes that make up UTF-8 encoded text. If this assumption is not met, it returns some result (it does not throw an exception). The result type is UInt64. -## char_length, CHAR_LENGTH {#char-length} +## char_length, CHAR_LENGTH Returns the length of a string in Unicode code points (not in characters), assuming that the string contains a set of bytes that make up UTF-8 encoded text. If this assumption is not met, it returns some result (it does not throw an exception). The result type is UInt64. -## character_length, CHARACTER_LENGTH {#character-length} +## character_length, CHARACTER_LENGTH Returns the length of a string in Unicode code points (not in characters), assuming that the string contains a set of bytes that make up UTF-8 encoded text. If this assumption is not met, it returns some result (it does not throw an exception). The result type is UInt64. -## leftPad {#leftpad} +## leftPad Pads the current string from the left with spaces or a specified string (multiple times, if needed) until the resulting string reaches the given length. Similarly to the MySQL `LPAD` function. @@ -148,7 +148,7 @@ Result: └────────────────────────┴───────────────────┘ ``` -## leftPadUTF8 {#leftpadutf8} +## leftPadUTF8 Pads the current string from the left with spaces or a specified string (multiple times, if needed) until the resulting string reaches the given length. Similarly to the MySQL `LPAD` function. While in the [leftPad](#leftpad) function the length is measured in bytes, here in the `leftPadUTF8` function it is measured in code points. @@ -186,7 +186,7 @@ Result: └─────────────────────────────┴────────────────────────┘ ``` -## rightPad {#rightpad} +## rightPad Pads the current string from the right with spaces or a specified string (multiple times, if needed) until the resulting string reaches the given length. Similarly to the MySQL `RPAD` function. @@ -224,7 +224,7 @@ Result: └─────────────────────────┴────────────────────┘ ``` -## rightPadUTF8 {#rightpadutf8} +## rightPadUTF8 Pads the current string from the right with spaces or a specified string (multiple times, if needed) until the resulting string reaches the given length. Similarly to the MySQL `RPAD` function. While in the [rightPad](#rightpad) function the length is measured in bytes, here in the `rightPadUTF8` function it is measured in code points. @@ -262,33 +262,33 @@ Result: └──────────────────────────────┴─────────────────────────┘ ``` -## lower, lcase {#lower} +## lower, lcase Converts ASCII Latin symbols in a string to lowercase. -## upper, ucase {#upper} +## upper, ucase Converts ASCII Latin symbols in a string to uppercase. -## lowerUTF8 {#lowerutf8} +## lowerUTF8 Converts a string to lowercase, assuming the string contains a set of bytes that make up a UTF-8 encoded text. It does not detect the language. So for Turkish the result might not be exactly correct. If the length of the UTF-8 byte sequence is different for upper and lower case of a code point, the result may be incorrect for this code point. If the string contains a set of bytes that is not UTF-8, then the behavior is undefined. -## upperUTF8 {#upperutf8} +## upperUTF8 Converts a string to uppercase, assuming the string contains a set of bytes that make up a UTF-8 encoded text. It does not detect the language. So for Turkish the result might not be exactly correct. If the length of the UTF-8 byte sequence is different for upper and lower case of a code point, the result may be incorrect for this code point. If the string contains a set of bytes that is not UTF-8, then the behavior is undefined. -## isValidUTF8 {#isvalidutf8} +## isValidUTF8 Returns 1, if the set of bytes is valid UTF-8 encoded, otherwise 0. -## toValidUTF8 {#tovalidutf8} +## toValidUTF8 Replaces invalid UTF-8 characters by the `�` (U+FFFD) character. All running in a row invalid characters are collapsed into the one replacement character. @@ -314,7 +314,7 @@ SELECT toValidUTF8('\x61\xF0\x80\x80\x80b'); └───────────────────────┘ ``` -## repeat {#repeat} +## repeat Repeats a string as many times as specified and concatenates the replicated values as a single string. @@ -353,15 +353,15 @@ Result: └────────────────────────────────┘ ``` -## reverse {#reverse} +## reverse Reverses the string (as a sequence of bytes). -## reverseUTF8 {#reverseutf8} +## reverseUTF8 Reverses a sequence of Unicode code points, assuming that the string contains a set of bytes representing a UTF-8 text. Otherwise, it does something else (it does not throw an exception). -## format(pattern, s0, s1, …) {#format} +## format(pattern, s0, s1, …) Formatting constant pattern with the string listed in the arguments. `pattern` is a simplified Python format pattern. Format string contains “replacement fields” surrounded by curly braces `{}`. Anything that is not contained in braces is considered literal text, which is copied unchanged to the output. If you need to include a brace character in the literal text, it can be escaped by doubling: `{{ '{{' }}` and `{{ '}}' }}`. Field names can be numbers (starting from zero) or empty (then they are treated as consequence numbers). @@ -385,7 +385,7 @@ SELECT format('{} {}', 'Hello', 'World') └───────────────────────────────────┘ ``` -## concat {#concat} +## concat Concatenates the strings listed in the arguments, without a separator. @@ -421,7 +421,7 @@ Result: └─────────────────────────────┘ ``` -## concatAssumeInjective {#concatassumeinjective} +## concatAssumeInjective Same as [concat](#concat), the difference is that you need to ensure that `concat(s1, s2, ...) → sn` is injective, it will be used for optimization of GROUP BY. @@ -478,43 +478,43 @@ Result: └────────────────────┴────────────┘ ``` -## substring(s, offset, length), mid(s, offset, length), substr(s, offset, length) {#substring} +## substring(s, offset, length), mid(s, offset, length), substr(s, offset, length) Returns a substring starting with the byte from the ‘offset’ index that is ‘length’ bytes long. Character indexing starts from one (as in standard SQL). -## substringUTF8(s, offset, length) {#substringutf8} +## substringUTF8(s, offset, length) The same as ‘substring’, but for Unicode code points. Works under the assumption that the string contains a set of bytes representing a UTF-8 encoded text. If this assumption is not met, it returns some result (it does not throw an exception). -## appendTrailingCharIfAbsent(s, c) {#appendtrailingcharifabsent} +## appendTrailingCharIfAbsent(s, c) If the ‘s’ string is non-empty and does not contain the ‘c’ character at the end, it appends the ‘c’ character to the end. -## convertCharset(s, from, to) {#convertcharset} +## convertCharset(s, from, to) Returns the string ‘s’ that was converted from the encoding in ‘from’ to the encoding in ‘to’. -## base64Encode(s) {#base64encode} +## base64Encode(s) Encodes ‘s’ string into base64 Alias: `TO_BASE64`. -## base64Decode(s) {#base64decode} +## base64Decode(s) Decode base64-encoded string ‘s’ into original string. In case of failure raises an exception. Alias: `FROM_BASE64`. -## tryBase64Decode(s) {#trybase64decode} +## tryBase64Decode(s) Similar to base64Decode, but in case of error an empty string would be returned. -## endsWith(s, suffix) {#endswith} +## endsWith(s, suffix) Returns whether to end with the specified suffix. Returns 1 if the string ends with the specified suffix, otherwise it returns 0. -## startsWith(str, prefix) {#startswith} +## startsWith(str, prefix) Returns 1 whether string starts with the specified prefix, otherwise it returns 0. @@ -543,7 +543,7 @@ Result: └───────────────────────────────────┘ ``` -## trim {#trim} +## trim Removes all specified characters from the start or end of a string. By default removes all consecutive occurrences of common whitespace (ASCII character 32) from both ends of a string. @@ -581,7 +581,7 @@ Result: └───────────────────────────────────────────────┘ ``` -## trimLeft {#trimleft} +## trimLeft Removes all consecutive occurrences of common whitespace (ASCII character 32) from the beginning of a string. It does not remove other kinds of whitespace characters (tab, no-break space, etc.). @@ -619,7 +619,7 @@ Result: └─────────────────────────────────────┘ ``` -## trimRight {#trimright} +## trimRight Removes all consecutive occurrences of common whitespace (ASCII character 32) from the end of a string. It does not remove other kinds of whitespace characters (tab, no-break space, etc.). @@ -657,7 +657,7 @@ Result: └──────────────────────────────────────┘ ``` -## trimBoth {#trimboth} +## trimBoth Removes all consecutive occurrences of common whitespace (ASCII character 32) from both ends of a string. It does not remove other kinds of whitespace characters (tab, no-break space, etc.). @@ -695,25 +695,25 @@ Result: └─────────────────────────────────────┘ ``` -## CRC32(s) {#crc32} +## CRC32(s) Returns the CRC32 checksum of a string, using CRC-32-IEEE 802.3 polynomial and initial value `0xffffffff` (zlib implementation). The result type is UInt32. -## CRC32IEEE(s) {#crc32ieee} +## CRC32IEEE(s) Returns the CRC32 checksum of a string, using CRC-32-IEEE 802.3 polynomial. The result type is UInt32. -## CRC64(s) {#crc64} +## CRC64(s) Returns the CRC64 checksum of a string, using CRC-64-ECMA polynomial. The result type is UInt64. -## normalizeQuery {#normalized-query} +## normalizeQuery Replaces literals, sequences of literals and complex aliases with placeholders. @@ -749,7 +749,7 @@ Result: └──────────┘ ``` -## normalizedQueryHash {#normalized-query-hash} +## normalizedQueryHash Returns identical 64bit hash values without the values of literals for similar queries. It helps to analyze query log. @@ -785,7 +785,7 @@ Result: └─────┘ ``` -## normalizeUTF8NFC {#normalizeutf8nfc} +## normalizeUTF8NFC Converts a string to [NFC normalized form](https://en.wikipedia.org/wiki/Unicode_equivalence#Normal_forms), assuming the string contains a set of bytes that make up a UTF-8 encoded text. @@ -821,7 +821,7 @@ Result: └─────────────┴─────┴─────────┘ ``` -## normalizeUTF8NFD {#normalizeutf8nfd} +## normalizeUTF8NFD Converts a string to [NFD normalized form](https://en.wikipedia.org/wiki/Unicode_equivalence#Normal_forms), assuming the string contains a set of bytes that make up a UTF-8 encoded text. @@ -857,7 +857,7 @@ Result: └─────────────┴─────┴─────────┘ ``` -## normalizeUTF8NFKC {#normalizeutf8nfkc} +## normalizeUTF8NFKC Converts a string to [NFKC normalized form](https://en.wikipedia.org/wiki/Unicode_equivalence#Normal_forms), assuming the string contains a set of bytes that make up a UTF-8 encoded text. @@ -893,7 +893,7 @@ Result: └─────────────┴──────┴──────────┘ ``` -## normalizeUTF8NFKD {#normalizeutf8nfkd} +## normalizeUTF8NFKD Converts a string to [NFKD normalized form](https://en.wikipedia.org/wiki/Unicode_equivalence#Normal_forms), assuming the string contains a set of bytes that make up a UTF-8 encoded text. @@ -929,7 +929,7 @@ Result: └─────────────┴──────┴──────────┘ ``` -## encodeXMLComponent {#encode-xml-component} +## encodeXMLComponent Escapes characters to place string into XML text node or attribute. @@ -971,7 +971,7 @@ Hello, "world"! 'foo' ``` -## decodeXMLComponent {#decode-xml-component} +## decodeXMLComponent Replaces XML predefined entities with characters. Predefined entities are `"` `&` `'` `>` `<` This function also replaces numeric character references with Unicode characters. Both decimal (like `✓`) and hexadecimal (`✓`) forms are supported. @@ -1014,7 +1014,7 @@ Result: -## extractTextFromHTML {#extracttextfromhtml} +## extractTextFromHTML A function to extract text from HTML or XHTML. It does not necessarily 100% conform to any of the HTML, XML or XHTML standards, but the implementation is reasonably accurate and it is fast. The rules are the following: diff --git a/docs/en/sql-reference/functions/string-replace-functions.md b/docs/en/sql-reference/functions/string-replace-functions.md index 1df8bfd0c44..08fd979ce8f 100644 --- a/docs/en/sql-reference/functions/string-replace-functions.md +++ b/docs/en/sql-reference/functions/string-replace-functions.md @@ -3,22 +3,22 @@ sidebar_position: 42 sidebar_label: For Replacing in Strings --- -# Functions for Searching and Replacing in Strings {#functions-for-searching-and-replacing-in-strings} +# Functions for Searching and Replacing in Strings :::note Functions for [searching](../../sql-reference/functions/string-search-functions.md) and [other manipulations with strings](../../sql-reference/functions/string-functions.md) are described separately. ::: -## replaceOne(haystack, pattern, replacement) {#replaceonehaystack-pattern-replacement} +## replaceOne(haystack, pattern, replacement) Replaces the first occurrence, if it exists, of the ‘pattern’ substring in ‘haystack’ with the ‘replacement’ substring. Hereafter, ‘pattern’ and ‘replacement’ must be constants. -## replaceAll(haystack, pattern, replacement), replace(haystack, pattern, replacement) {#replaceallhaystack-pattern-replacement-replacehaystack-pattern-replacement} +## replaceAll(haystack, pattern, replacement), replace(haystack, pattern, replacement) Replaces all occurrences of the ‘pattern’ substring in ‘haystack’ with the ‘replacement’ substring. -## replaceRegexpOne(haystack, pattern, replacement) {#replaceregexponehaystack-pattern-replacement} +## replaceRegexpOne(haystack, pattern, replacement) Replacement using the ‘pattern’ regular expression. A re2 regular expression. Replaces only the first occurrence, if it exists. @@ -59,7 +59,7 @@ SELECT replaceRegexpOne('Hello, World!', '.*', '\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0') └────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` -## replaceRegexpAll(haystack, pattern, replacement) {#replaceregexpallhaystack-pattern-replacement} +## replaceRegexpAll(haystack, pattern, replacement) This does the same thing, but replaces all the occurrences. Example: @@ -86,7 +86,7 @@ SELECT replaceRegexpAll('Hello, World!', '^', 'here: ') AS res └─────────────────────┘ ``` -## regexpQuoteMeta(s) {#regexpquotemetas} +## regexpQuoteMeta(s) The function adds a backslash before some predefined characters in the string. Predefined characters: `\0`, `\\`, `|`, `(`, `)`, `^`, `$`, `.`, `[`, `]`, `?`, `*`, `+`, `{`, `:`, `-`. diff --git a/docs/en/sql-reference/functions/string-search-functions.md b/docs/en/sql-reference/functions/string-search-functions.md index 985d9f1e63a..f4a13708770 100644 --- a/docs/en/sql-reference/functions/string-search-functions.md +++ b/docs/en/sql-reference/functions/string-search-functions.md @@ -3,7 +3,7 @@ sidebar_position: 41 sidebar_label: For Searching in Strings --- -# Functions for Searching in Strings {#functions-for-searching-strings} +# Functions for Searching in Strings The search is case-sensitive by default in all these functions. There are separate variants for case insensitive search. @@ -11,7 +11,7 @@ The search is case-sensitive by default in all these functions. There are separa Functions for [replacing](../../sql-reference/functions/string-replace-functions.md) and [other manipulations with strings](../../sql-reference/functions/string-functions.md) are described separately. ::: -## position(haystack, needle), locate(haystack, needle) {#position} +## position(haystack, needle), locate(haystack, needle) Searches for the substring `needle` in the string `haystack`. @@ -124,7 +124,7 @@ Result: └─────────────────────────────┘ ``` -## positionCaseInsensitive {#positioncaseinsensitive} +## positionCaseInsensitive The same as [position](#position) returns the position (in bytes) of the found substring in the string, starting from 1. Use the function for a case-insensitive search. @@ -165,7 +165,7 @@ Result: └───────────────────────────────────────────────────┘ ``` -## positionUTF8 {#positionutf8} +## positionUTF8 Returns the position (in Unicode points) of the found substring in the string, starting from 1. @@ -240,7 +240,7 @@ Result: └────────────────────────────────────────┘ ``` -## positionCaseInsensitiveUTF8 {#positioncaseinsensitiveutf8} +## positionCaseInsensitiveUTF8 The same as [positionUTF8](#positionutf8), but is case-insensitive. Returns the position (in Unicode points) of the found substring in the string, starting from 1. @@ -281,7 +281,7 @@ Result: └────────────────────────────────────────────────────┘ ``` -## multiSearchAllPositions {#multisearchallpositions} +## multiSearchAllPositions The same as [position](../../sql-reference/functions/string-search-functions.md#position) but returns `Array` of positions (in bytes) of the found corresponding substrings in the string. Positions are indexed starting from 1. @@ -322,23 +322,23 @@ Result: └───────────────────────────────────────────────────────────────────┘ ``` -## multiSearchAllPositionsUTF8 {#multiSearchAllPositionsUTF8} +## multiSearchAllPositionsUTF8 See `multiSearchAllPositions`. -## multiSearchFirstPosition(haystack, \[needle1, needle2, …, needlen\]) {#multisearchfirstposition} +## multiSearchFirstPosition(haystack, \[needle1, needle2, …, needlen\]) The same as `position` but returns the leftmost offset of the string `haystack` that is matched to some of the needles. For a case-insensitive search or/and in UTF-8 format use functions `multiSearchFirstPositionCaseInsensitive, multiSearchFirstPositionUTF8, multiSearchFirstPositionCaseInsensitiveUTF8`. -## multiSearchFirstIndex(haystack, \[needle1, needle2, …, needlen\]) {#multisearchfirstindexhaystack-needle1-needle2-needlen} +## multiSearchFirstIndex(haystack, \[needle1, needle2, …, needlen\]) Returns the index `i` (starting from 1) of the leftmost found needlei in the string `haystack` and 0 otherwise. For a case-insensitive search or/and in UTF-8 format use functions `multiSearchFirstIndexCaseInsensitive, multiSearchFirstIndexUTF8, multiSearchFirstIndexCaseInsensitiveUTF8`. -## multiSearchAny(haystack, \[needle1, needle2, …, needlen\]) {#function-multisearchany} +## multiSearchAny(haystack, \[needle1, needle2, …, needlen\]) Returns 1, if at least one string needlei matches the string `haystack` and 0 otherwise. @@ -348,7 +348,7 @@ For a case-insensitive search or/and in UTF-8 format use functions `multiSearchA In all `multiSearch*` functions the number of needles should be less than 28 because of implementation specification. ::: -## match(haystack, pattern) {#matchhaystack-pattern} +## match(haystack, pattern) Checks whether the string matches the `pattern` regular expression. A `re2` regular expression. The [syntax](https://github.com/google/re2/wiki/Syntax) of the `re2` regular expressions is more limited than the syntax of the Perl regular expressions. @@ -357,7 +357,7 @@ Returns 0 if it does not match, or 1 if it matches. The regular expression works with the string as if it is a set of bytes. The regular expression can’t contain null bytes. For patterns to search for substrings in a string, it is better to use LIKE or ‘position’, since they work much faster. -## multiMatchAny(haystack, \[pattern1, pattern2, …, patternn\]) {#multimatchanyhaystack-pattern1-pattern2-patternn} +## multiMatchAny(haystack, \[pattern1, pattern2, …, patternn\]) The same as `match`, but returns 0 if none of the regular expressions are matched and 1 if any of the patterns matches. It uses [hyperscan](https://github.com/intel/hyperscan) library. For patterns to search substrings in a string, it is better to use `multiSearchAny` since it works much faster. @@ -365,23 +365,23 @@ The same as `match`, but returns 0 if none of the regular expressions are matche The length of any of the `haystack` string must be less than 232 bytes otherwise the exception is thrown. This restriction takes place because of hyperscan API. ::: -## multiMatchAnyIndex(haystack, \[pattern1, pattern2, …, patternn\]) {#multimatchanyindexhaystack-pattern1-pattern2-patternn} +## multiMatchAnyIndex(haystack, \[pattern1, pattern2, …, patternn\]) The same as `multiMatchAny`, but returns any index that matches the haystack. -## multiMatchAllIndices(haystack, \[pattern1, pattern2, …, patternn\]) {#multimatchallindiceshaystack-pattern1-pattern2-patternn} +## multiMatchAllIndices(haystack, \[pattern1, pattern2, …, patternn\]) The same as `multiMatchAny`, but returns the array of all indicies that match the haystack in any order. -## multiFuzzyMatchAny(haystack, distance, \[pattern1, pattern2, …, patternn\]) {#multifuzzymatchanyhaystack-distance-pattern1-pattern2-patternn} +## multiFuzzyMatchAny(haystack, distance, \[pattern1, pattern2, …, patternn\]) The same as `multiMatchAny`, but returns 1 if any pattern matches the haystack within a constant [edit distance](https://en.wikipedia.org/wiki/Edit_distance). This function relies on the experimental feature of [hyperscan](https://intel.github.io/hyperscan/dev-reference/compilation.html#approximate-matching) library, and can be slow for some corner cases. The performance depends on the edit distance value and patterns used, but it's always more expensive compared to a non-fuzzy variants. -## multiFuzzyMatchAnyIndex(haystack, distance, \[pattern1, pattern2, …, patternn\]) {#multifuzzymatchanyindexhaystack-distance-pattern1-pattern2-patternn} +## multiFuzzyMatchAnyIndex(haystack, distance, \[pattern1, pattern2, …, patternn\]) The same as `multiFuzzyMatchAny`, but returns any index that matches the haystack within a constant edit distance. -## multiFuzzyMatchAllIndices(haystack, distance, \[pattern1, pattern2, …, patternn\]) {#multifuzzymatchallindiceshaystack-distance-pattern1-pattern2-patternn} +## multiFuzzyMatchAllIndices(haystack, distance, \[pattern1, pattern2, …, patternn\]) The same as `multiFuzzyMatchAny`, but returns the array of all indices in any order that match the haystack within a constant edit distance. @@ -393,15 +393,15 @@ The same as `multiFuzzyMatchAny`, but returns the array of all indices in any or To turn off all functions that use hyperscan, use setting `SET allow_hyperscan = 0;`. ::: -## extract(haystack, pattern) {#extracthaystack-pattern} +## extract(haystack, pattern) Extracts a fragment of a string using a regular expression. If ‘haystack’ does not match the ‘pattern’ regex, an empty string is returned. If the regex does not contain subpatterns, it takes the fragment that matches the entire regex. Otherwise, it takes the fragment that matches the first subpattern. -## extractAll(haystack, pattern) {#extractallhaystack-pattern} +## extractAll(haystack, pattern) Extracts all the fragments of a string using a regular expression. If ‘haystack’ does not match the ‘pattern’ regex, an empty string is returned. Returns an array of strings consisting of all matches to the regex. In general, the behavior is the same as the ‘extract’ function (it takes the first subpattern, or the entire expression if there isn’t a subpattern). -## extractAllGroupsHorizontal {#extractallgroups-horizontal} +## extractAllGroupsHorizontal Matches all groups of the `haystack` string using the `pattern` regular expression. Returns an array of arrays, where the first array includes all fragments matching the first group, the second array - matching the second group, etc. @@ -446,7 +446,7 @@ Result: - [extractAllGroupsVertical](#extractallgroups-vertical) -## extractAllGroupsVertical {#extractallgroups-vertical} +## extractAllGroupsVertical Matches all groups of the `haystack` string using the `pattern` regular expression. Returns an array of arrays, where each array includes matching fragments from every group. Fragments are grouped in order of appearance in the `haystack`. @@ -487,7 +487,7 @@ Result: - [extractAllGroupsHorizontal](#extractallgroups-horizontal) -## like(haystack, pattern), haystack LIKE pattern operator {#function-like} +## like(haystack, pattern), haystack LIKE pattern operator Checks whether a string matches a simple regular expression. The regular expression can contain the metasymbols `%` and `_`. @@ -501,11 +501,11 @@ Use the backslash (`\`) for escaping metasymbols. See the note on escaping in th For regular expressions like `%needle%`, the code is more optimal and works as fast as the `position` function. For other regular expressions, the code is the same as for the ‘match’ function. -## notLike(haystack, pattern), haystack NOT LIKE pattern operator {#function-notlike} +## notLike(haystack, pattern), haystack NOT LIKE pattern operator The same thing as ‘like’, but negative. -## ilike {#ilike} +## ilike Case insensitive variant of [like](https://clickhouse.com/docs/en/sql-reference/functions/string-search-functions/#function-like) function. You can use `ILIKE` operator instead of the `ilike` function. @@ -565,13 +565,13 @@ Result: - [like](https://clickhouse.com/docs/en/sql-reference/functions/string-search-functions/#function-like) -## ngramDistance(haystack, needle) {#ngramdistancehaystack-needle} +## ngramDistance(haystack, needle) Calculates the 4-gram distance between `haystack` and `needle`: counts the symmetric difference between two multisets of 4-grams and normalizes it by the sum of their cardinalities. Returns float number from 0 to 1 – the closer to zero, the more strings are similar to each other. If the constant `needle` or `haystack` is more than 32Kb, throws an exception. If some of the non-constant `haystack` or `needle` strings are more than 32Kb, the distance is always one. For case-insensitive search or/and in UTF-8 format use functions `ngramDistanceCaseInsensitive, ngramDistanceUTF8, ngramDistanceCaseInsensitiveUTF8`. -## ngramSearch(haystack, needle) {#ngramsearchhaystack-needle} +## ngramSearch(haystack, needle) Same as `ngramDistance` but calculates the non-symmetric difference between `needle` and `haystack` – the number of n-grams from needle minus the common number of n-grams normalized by the number of `needle` n-grams. The closer to one, the more likely `needle` is in the `haystack`. Can be useful for fuzzy string search. @@ -581,7 +581,7 @@ For case-insensitive search or/and in UTF-8 format use functions `ngramSearchCas For UTF-8 case we use 3-gram distance. All these are not perfectly fair n-gram distances. We use 2-byte hashes to hash n-grams and then calculate the (non-)symmetric difference between these hash tables – collisions may occur. With UTF-8 case-insensitive format we do not use fair `tolower` function – we zero the 5-th bit (starting from zero) of each codepoint byte and first bit of zeroth byte if bytes more than one – this works for Latin and mostly for all Cyrillic letters. ::: -## countSubstrings {#countSubstrings} +## countSubstrings Returns the number of substring occurrences. @@ -649,7 +649,7 @@ Result: └────────────────────────────────────────┘ ``` -## countSubstringsCaseInsensitive {#countSubstringsCaseInsensitive} +## countSubstringsCaseInsensitive Returns the number of substring occurrences case-insensitive. @@ -715,7 +715,7 @@ Result: └───────────────────────────────────────────────────────┘ ``` -## countSubstringsCaseInsensitiveUTF8 {#countSubstringsCaseInsensitiveUTF8} +## countSubstringsCaseInsensitiveUTF8 Returns the number of substring occurrences in `UTF-8` case-insensitive. @@ -767,7 +767,7 @@ Result: └────────────────────────────────────────────────────────────┘ ``` -## countMatches(haystack, pattern) {#countmatcheshaystack-pattern} +## countMatches(haystack, pattern) Returns the number of regular expression matches for a `pattern` in a `haystack`. diff --git a/docs/en/sql-reference/functions/time-window-functions.md b/docs/en/sql-reference/functions/time-window-functions.md index dbe1f538a96..eea785e783e 100644 --- a/docs/en/sql-reference/functions/time-window-functions.md +++ b/docs/en/sql-reference/functions/time-window-functions.md @@ -3,11 +3,11 @@ sidebar_position: 68 sidebar_label: Time Window --- -# Time Window Functions {#time-window-functions} +# Time Window Functions Time window functions return the inclusive lower and exclusive upper bound of the corresponding window. The functions for working with WindowView are listed below: -## tumble {#time-window-functions-tumble} +## tumble A tumbling time window assigns records to non-overlapping, continuous windows with a fixed duration (`interval`). @@ -42,7 +42,7 @@ Result: └───────────────────────────────────────────────┘ ``` -## hop {#time-window-functions-hop} +## hop A hopping time window has a fixed duration (`window_interval`) and hops by a specified hop interval (`hop_interval`). If the `hop_interval` is smaller than the `window_interval`, hopping windows are overlapping. Thus, records can be assigned to multiple windows. @@ -79,7 +79,7 @@ Result: └───────────────────────────────────────────────────────────┘ ``` -## tumbleStart {#time-window-functions-tumblestart} +## tumbleStart Returns the inclusive lower bound of the corresponding tumbling window. @@ -88,7 +88,7 @@ tumbleStart(bounds_tuple); tumbleStart(time_attr, interval [, timezone]); ``` -## tumbleEnd {#time-window-functions-tumbleend} +## tumbleEnd Returns the exclusive upper bound of the corresponding tumbling window. @@ -97,7 +97,7 @@ tumbleEnd(bounds_tuple); tumbleEnd(time_attr, interval [, timezone]); ``` -## hopStart {#time-window-functions-hopstart} +## hopStart Returns the inclusive lower bound of the corresponding hopping window. @@ -106,7 +106,7 @@ hopStart(bounds_tuple); hopStart(time_attr, hop_interval, window_interval [, timezone]); ``` -## hopEnd {#time-window-functions-hopend} +## hopEnd Returns the exclusive upper bound of the corresponding hopping window. diff --git a/docs/en/sql-reference/functions/tuple-functions.md b/docs/en/sql-reference/functions/tuple-functions.md index cfce02f4d31..64bdf9b9f33 100644 --- a/docs/en/sql-reference/functions/tuple-functions.md +++ b/docs/en/sql-reference/functions/tuple-functions.md @@ -3,9 +3,9 @@ sidebar_position: 66 sidebar_label: Tuples --- -# Functions for Working with Tuples {#tuple-functions} +# Functions for Working with Tuples -## tuple {#tuple} +## tuple A function that allows grouping multiple columns. For columns with the types T1, T2, …, it returns a Tuple(T1, T2, …) type tuple containing these columns. There is no cost to execute the function. @@ -19,7 +19,7 @@ The function implements the operator `(x, y, …)`. tuple(x, y, …) ``` -## tupleElement {#tupleelement} +## tupleElement A function that allows getting a column from a tuple. ‘N’ is the column index, starting from 1. ‘N’ must be a constant. ‘N’ must be a strict postive integer no greater than the size of the tuple. @@ -33,7 +33,7 @@ The function implements the operator `x.N`. tupleElement(tuple, n) ``` -## untuple {#untuple} +## untuple Performs syntactic substitution of [tuple](../../sql-reference/data-types/tuple.md#tuplet1-t2) elements in the call location. @@ -113,7 +113,7 @@ Result: - [Tuple](../../sql-reference/data-types/tuple.md) -## tupleHammingDistance {#tuplehammingdistance} +## tupleHammingDistance Returns the [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance) between two tuples of the same size. @@ -182,7 +182,7 @@ Result: └─────────────────┘ ``` -## tupleToNameValuePairs {#tupletonamevaluepairs} +## tupleToNameValuePairs Turns a named tuple into an array of (name, value) pairs. For a `Tuple(a T, b T, ..., c T)` returns `Array(Tuple(String, T), ...)` in which the `Strings` represents the named fields of the tuple and `T` are the values associated with those names. All values in the tuple should be of the same type. @@ -258,7 +258,7 @@ Result: └───────────────────────────────────────┘ ``` -## tuplePlus {#tupleplus} +## tuplePlus Calculates the sum of corresponding values of two tuples of the same size. @@ -297,7 +297,7 @@ Result: └───────────────────────────┘ ``` -## tupleMinus {#tupleminus} +## tupleMinus Calculates the subtraction of corresponding values of two tuples of the same size. @@ -336,7 +336,7 @@ Result: └────────────────────────────┘ ``` -## tupleMultiply {#tuplemultiply} +## tupleMultiply Calculates the multiplication of corresponding values of two tuples of the same size. @@ -373,7 +373,7 @@ Result: └───────────────────────────────┘ ``` -## tupleDivide {#tupledivide} +## tupleDivide Calculates the division of corresponding values of two tuples of the same size. Note that division by zero will return `inf`. @@ -410,7 +410,7 @@ Result: └─────────────────────────────┘ ``` -## tupleNegate {#tuplenegate} +## tupleNegate Calculates the negation of the tuple values. @@ -446,7 +446,7 @@ Result: └─────────────────────┘ ``` -## tupleMultiplyByNumber {#tuplemultiplybynumber} +## tupleMultiplyByNumber Returns a tuple with all values multiplied by a number. @@ -483,7 +483,7 @@ Result: └─────────────────────────────────────┘ ``` -## tupleDivideByNumber {#tupledividebynumber} +## tupleDivideByNumber Returns a tuple with all values divided by a number. Note that division by zero will return `inf`. @@ -520,7 +520,7 @@ Result: └──────────────────────────────────┘ ``` -## dotProduct {#dotproduct} +## dotProduct Calculates the scalar product of two tuples of the same size. @@ -559,7 +559,7 @@ Result: └────────────────────────────┘ ``` -## L1Norm {#l1norm} +## L1Norm Calculates the sum of absolute values of a tuple. @@ -597,7 +597,7 @@ Result: └────────────────┘ ``` -## L2Norm {#l2norm} +## L2Norm Calculates the square root of the sum of the squares of the tuple values. @@ -635,7 +635,7 @@ Result: └──────────────────┘ ``` -## LinfNorm {#linfnorm} +## LinfNorm Calculates the maximum of absolute values of a tuple. @@ -673,7 +673,7 @@ Result: └───────────────────┘ ``` -## LpNorm {#lpnorm} +## LpNorm Calculates the root of `p`-th power of the sum of the absolute values of a tuple in the power of `p`. @@ -712,7 +712,7 @@ Result: └────────────────────┘ ``` -## L1Distance {#l1distance} +## L1Distance Calculates the distance between two points (the values of the tuples are the coordinates) in `L1` space (1-norm ([taxicab geometry](https://en.wikipedia.org/wiki/Taxicab_geometry) distance)). @@ -751,7 +751,7 @@ Result: └────────────────────────────┘ ``` -## L2Distance {#l2distance} +## L2Distance Calculates the distance between two points (the values of the tuples are the coordinates) in Euclidean space ([Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance)). @@ -790,7 +790,7 @@ Result: └────────────────────────────┘ ``` -## LinfDistance {#linfdistance} +## LinfDistance Calculates the distance between two points (the values of the tuples are the coordinates) in `L_{inf}` space ([maximum norm](https://en.wikipedia.org/wiki/Norm_(mathematics)#Maximum_norm_(special_case_of:_infinity_norm,_uniform_norm,_or_supremum_norm))). @@ -829,7 +829,7 @@ Result: └──────────────────────────────┘ ``` -## LpDistance {#lpdistance} +## LpDistance Calculates the distance between two points (the values of the tuples are the coordinates) in `Lp` space ([p-norm distance](https://en.wikipedia.org/wiki/Norm_(mathematics)#p-norm)). @@ -869,7 +869,7 @@ Result: └───────────────────────────────┘ ``` -## L1Normalize {#l1normalize} +## L1Normalize Calculates the unit vector of a given vector (the values of the tuple are the coordinates) in `L1` space ([taxicab geometry](https://en.wikipedia.org/wiki/Taxicab_geometry)). @@ -907,7 +907,7 @@ Result: └─────────────────────────────────────────┘ ``` -## L2Normalize {#l2normalize} +## L2Normalize Calculates the unit vector of a given vector (the values of the tuple are the coordinates) in Euclidean space (using [Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance)). @@ -945,7 +945,7 @@ Result: └─────────────────────┘ ``` -## LinfNormalize {#linfnormalize} +## LinfNormalize Calculates the unit vector of a given vector (the values of the tuple are the coordinates) in `L_{inf}` space (using [maximum norm](https://en.wikipedia.org/wiki/Norm_(mathematics)#Maximum_norm_(special_case_of:_infinity_norm,_uniform_norm,_or_supremum_norm))). @@ -983,7 +983,7 @@ Result: └───────────────────────┘ ``` -## LpNormalize {#lpnormalize} +## LpNormalize Calculates the unit vector of a given vector (the values of the tuple are the coordinates) in `Lp` space (using [p-norm](https://en.wikipedia.org/wiki/Norm_(mathematics)#p-norm)). @@ -1022,7 +1022,7 @@ Result: └─────────────────────────────────────────┘ ``` -## cosineDistance {#cosinedistance} +## cosineDistance Calculates the cosine distance between two vectors (the values of the tuples are the coordinates). The less the returned value is, the more similar are the vectors. diff --git a/docs/en/sql-reference/functions/tuple-map-functions.md b/docs/en/sql-reference/functions/tuple-map-functions.md index a0d62ff5ecb..b6b7a057894 100644 --- a/docs/en/sql-reference/functions/tuple-map-functions.md +++ b/docs/en/sql-reference/functions/tuple-map-functions.md @@ -3,9 +3,9 @@ sidebar_position: 46 sidebar_label: Working with maps --- -# Functions for maps {#functions-for-working-with-tuple-maps} +# Functions for maps -## map {#function-map} +## map Arranges `key:value` pairs into [Map(key, value)](../../sql-reference/data-types/map.md) data type. @@ -17,8 +17,8 @@ map(key1, value1[, key2, value2, ...]) **Arguments** -- `key` — The key part of the pair. [String](../../sql-reference/data-types/string.md) or [Integer](../../sql-reference/data-types/int-uint.md). -- `value` — The value part of the pair. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md) or [Array](../../sql-reference/data-types/array.md). +- `key` — The key part of the pair. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md), [FixedString](../../sql-reference/data-types/fixedstring.md), [UUID](../../sql-reference/data-types/uuid.md), [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md), [Date32](../../sql-reference/data-types/date32.md), [Enum](../../sql-reference/data-types/enum.md). +- `value` — The value part of the pair. Arbitrary type, including [Map](../../sql-reference/data-types/map.md) and [Array](../../sql-reference/data-types/array.md). **Returned value** @@ -66,7 +66,7 @@ Result: - [Map(key, value)](../../sql-reference/data-types/map.md) data type -## mapAdd {#function-mapadd} +## mapAdd Collect all the keys and sum corresponding values. @@ -114,7 +114,7 @@ Result: └──────────────────────────────┘ ``` -## mapSubtract {#function-mapsubtract} +## mapSubtract Collect all the keys and subtract corresponding values. @@ -162,7 +162,7 @@ Result: └───────────────────────────────────┘ ``` -## mapPopulateSeries {#function-mappopulateseries} +## mapPopulateSeries Fills missing keys in the maps (key and value array pair), where keys are integers. Also, it supports specifying the max key, which is used to extend the keys array. @@ -225,7 +225,7 @@ Result: └─────────────────────────────────────────┘ ``` -## mapContains {#mapcontains} +## mapContains Determines whether the `map` contains the `key` parameter. @@ -268,7 +268,7 @@ Result: └────────────────────────┘ ``` -## mapKeys {#mapkeys} +## mapKeys Returns all keys from the `map` parameter. @@ -311,7 +311,7 @@ Result: └───────────────────────┘ ``` -## mapValues {#mapvalues} +## mapValues Returns all values from the `map` parameter. @@ -354,7 +354,7 @@ Result: └──────────────────┘ ``` -## mapContainsKeyLike {#mapContainsKeyLike} +## mapContainsKeyLike **Syntax** @@ -392,7 +392,7 @@ Result: └─────────────────────────────┘ ``` -## mapExtractKeyLike {#mapExtractKeyLike} +## mapExtractKeyLike **Syntax** diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index de6ca769589..09cade0125d 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -3,15 +3,15 @@ sidebar_position: 38 sidebar_label: Type Conversion --- -# Type Conversion Functions {#type-conversion-functions} +# Type Conversion Functions -## Common Issues of Numeric Conversions {#numeric-conversion-issues} +## Common Issues of Numeric Conversions When you convert a value from one to another data type, you should remember that in common case, it is an unsafe operation that can lead to a data loss. A data loss can occur if you try to fit value from a larger data type to a smaller data type, or if you convert values between different data types. ClickHouse has the [same behavior as C++ programs](https://en.cppreference.com/w/cpp/language/implicit_conversion). -## toInt(8\|16\|32\|64\|128\|256) {#toint8163264128256} +## toInt(8\|16\|32\|64\|128\|256) Converts an input value to the [Int](../../sql-reference/data-types/int-uint.md) data type. This function family includes: @@ -50,7 +50,7 @@ Result: └──────────────────────┴─────────────┴───────────────┴─────────────┘ ``` -## toInt(8\|16\|32\|64\|128\|256)OrZero {#toint8163264orzero} +## toInt(8\|16\|32\|64\|128\|256)OrZero It takes an argument of type String and tries to parse it into Int (8 \| 16 \| 32 \| 64 \| 128 \| 256). If failed, returns 0. @@ -70,7 +70,7 @@ Result: └─────────────────────────┴───────────────────────────┘ ``` -## toInt(8\|16\|32\|64\|128\|256)OrNull {#toint8163264128256ornull} +## toInt(8\|16\|32\|64\|128\|256)OrNull It takes an argument of type String and tries to parse it into Int (8 \| 16 \| 32 \| 64 \| 128 \| 256). If failed, returns NULL. @@ -90,7 +90,7 @@ Result: └─────────────────────────┴───────────────────────────┘ ``` -## toInt(8\|16\|32\|64\|128\|256)OrDefault {#toint8163264128256orDefault} +## toInt(8\|16\|32\|64\|128\|256)OrDefault It takes an argument of type String and tries to parse it into Int (8 \| 16 \| 32 \| 64 \| 128 \| 256). If failed, returns the default type value. @@ -111,7 +111,7 @@ Result: ``` -## toUInt(8\|16\|32\|64\|256) {#touint8163264256} +## toUInt(8\|16\|32\|64\|256) Converts an input value to the [UInt](../../sql-reference/data-types/int-uint.md) data type. This function family includes: @@ -149,39 +149,39 @@ Result: └─────────────────────┴───────────────┴────────────────┴──────────────┘ ``` -## toUInt(8\|16\|32\|64\|256)OrZero {#touint8163264256orzero} +## toUInt(8\|16\|32\|64\|256)OrZero -## toUInt(8\|16\|32\|64\|256)OrNull {#touint8163264256ornull} +## toUInt(8\|16\|32\|64\|256)OrNull -## toUInt(8\|16\|32\|64\|256)OrDefault {#touint8163264256ordefault} +## toUInt(8\|16\|32\|64\|256)OrDefault -## toFloat(32\|64) {#tofloat3264} +## toFloat(32\|64) -## toFloat(32\|64)OrZero {#tofloat3264orzero} +## toFloat(32\|64)OrZero -## toFloat(32\|64)OrNull {#tofloat3264ornull} +## toFloat(32\|64)OrNull -## toFloat(32\|64)OrDefault {#tofloat3264ordefault} +## toFloat(32\|64)OrDefault -## toDate {#todate} +## toDate Alias: `DATE`. -## toDateOrZero {#todateorzero} +## toDateOrZero -## toDateOrNull {#todateornull} +## toDateOrNull -## toDateOrDefault {#todateordefault} +## toDateOrDefault -## toDateTime {#todatetime} +## toDateTime -## toDateTimeOrZero {#todatetimeorzero} +## toDateTimeOrZero -## toDateTimeOrNull {#todatetimeornull} +## toDateTimeOrNull -## toDateTimeOrDefault {#todatetimeordefault} +## toDateTimeOrDefault -## toDate32 {#todate32} +## toDate32 Converts the argument to the [Date32](../../sql-reference/data-types/date32.md) data type. If the value is outside the range returns the border values supported by `Date32`. If the argument has [Date](../../sql-reference/data-types/date.md) type, borders of `Date` are taken into account. @@ -239,7 +239,7 @@ SELECT toDate32(toDate('1924-01-01')) AS value, toTypeName(value); └────────────┴────────────────────────────────────────────┘ ``` -## toDate32OrZero {#todate32-or-zero} +## toDate32OrZero The same as [toDate32](#todate32) but returns the min value of [Date32](../../sql-reference/data-types/date32.md) if invalid argument is received. @@ -259,7 +259,7 @@ Result: └──────────────────────────────┴────────────────────┘ ``` -## toDate32OrNull {#todate32-or-null} +## toDate32OrNull The same as [toDate32](#todate32) but returns `NULL` if invalid argument is received. @@ -279,7 +279,7 @@ Result: └──────────────────────────────┴────────────────────┘ ``` -## toDate32OrDefault {#todate32-or-default} +## toDate32OrDefault Converts the argument to the [Date32](../../sql-reference/data-types/date32.md) data type. If the value is outside the range returns the lower border value supported by `Date32`. If the argument has [Date](../../sql-reference/data-types/date.md) type, borders of `Date` are taken into account. Returns default value if invalid argument is received. @@ -301,7 +301,7 @@ Result: └─────────────────────────────────────────────────────────┴───────────────────────────────────────────────────────────┘ ``` -## toDecimal(32\|64\|128\|256) {#todecimal3264128256} +## toDecimal(32\|64\|128\|256) Converts `value` to the [Decimal](../../sql-reference/data-types/decimal.md) data type with precision of `S`. The `value` can be a number or a string. The `S` (scale) parameter specifies the number of decimal places. @@ -310,7 +310,7 @@ Converts `value` to the [Decimal](../../sql-reference/data-types/decimal.md) dat - `toDecimal128(value, S)` - `toDecimal256(value, S)` -## toDecimal(32\|64\|128\|256)OrNull {#todecimal3264128256ornull} +## toDecimal(32\|64\|128\|256)OrNull Converts an input string to a [Nullable(Decimal(P,S))](../../sql-reference/data-types/decimal.md) data type value. This family of functions include: @@ -364,7 +364,7 @@ Result: ``` -## toDecimal(32\|64\|128\|256)OrDefault {#todecimal3264128256ordefault} +## toDecimal(32\|64\|128\|256)OrDefault Converts an input string to a [Decimal(P,S)](../../sql-reference/data-types/decimal.md) data type value. This family of functions include: @@ -417,7 +417,7 @@ Result: └─────┴───────────────────────────────────────────────────────┘ ``` -## toDecimal(32\|64\|128\|256)OrZero {#todecimal3264128256orzero} +## toDecimal(32\|64\|128\|256)OrZero Converts an input value to the [Decimal(P,S)](../../sql-reference/data-types/decimal.md) data type. This family of functions include: @@ -470,7 +470,7 @@ Result: └──────┴────────────────────────────────────────────────────┘ ``` -## toString {#tostring} +## toString Functions for converting between numbers, strings (but not fixed strings), dates, and dates with times. All these functions accept one argument. @@ -515,12 +515,12 @@ Result: Also see the `toUnixTimestamp` function. -## toFixedString(s, N) {#tofixedstrings-n} +## toFixedString(s, N) Converts a String type argument to a FixedString(N) type (a string with fixed length N). N must be a constant. If the string has fewer bytes than N, it is padded with null bytes to the right. If the string has more bytes than N, an exception is thrown. -## toStringCutToZero(s) {#tostringcuttozeros} +## toStringCutToZero(s) Accepts a String or FixedString argument. Returns the String with the content truncated at the first zero byte found. @@ -554,27 +554,27 @@ Result: └────────────┴───────┘ ``` -## reinterpretAsUInt(8\|16\|32\|64) {#reinterpretasuint8163264} +## reinterpretAsUInt(8\|16\|32\|64) -## reinterpretAsInt(8\|16\|32\|64) {#reinterpretasint8163264} +## reinterpretAsInt(8\|16\|32\|64) -## reinterpretAsFloat(32\|64) {#reinterpretasfloat3264} +## reinterpretAsFloat(32\|64) -## reinterpretAsDate {#reinterpretasdate} +## reinterpretAsDate -## reinterpretAsDateTime {#reinterpretasdatetime} +## reinterpretAsDateTime These functions accept a string and interpret the bytes placed at the beginning of the string as a number in host order (little endian). If the string isn’t long enough, the functions work as if the string is padded with the necessary number of null bytes. If the string is longer than needed, the extra bytes are ignored. A date is interpreted as the number of days since the beginning of the Unix Epoch, and a date with time is interpreted as the number of seconds since the beginning of the Unix Epoch. -## reinterpretAsString {#type_conversion_functions-reinterpretAsString} +## reinterpretAsString This function accepts a number or date or date with time, and returns a string containing bytes representing the corresponding value in host order (little endian). Null bytes are dropped from the end. For example, a UInt32 type value of 255 is a string that is one byte long. -## reinterpretAsFixedString {#reinterpretasfixedstring} +## reinterpretAsFixedString This function accepts a number or date or date with time, and returns a FixedString containing bytes representing the corresponding value in host order (little endian). Null bytes are dropped from the end. For example, a UInt32 type value of 255 is a FixedString that is one byte long. -## reinterpretAsUUID {#reinterpretasuuid} +## reinterpretAsUUID Accepts 16 bytes string and returns UUID containing bytes representing the corresponding value in network byte order (big-endian). If the string isn't long enough, the function works as if the string is padded with the necessary number of null bytes to the end. If the string longer than 16 bytes, the extra bytes at the end are ignored. @@ -630,7 +630,7 @@ Result: └─────────────────────┘ ``` -## reinterpret(x, T) {#type_conversion_function-reinterpret} +## reinterpret(x, T) Uses the same source in-memory bytes sequence for `x` value and reinterprets it to destination type. @@ -666,7 +666,7 @@ Result: └─────────────┴──────────────┴───────────────┘ ``` -## CAST(x, T) {#type_conversion_function-cast} +## CAST(x, T) Converts an input value to the specified data type. Unlike the [reinterpret](#type_conversion_function-reinterpret) function, `CAST` tries to present the same value using the new data type. If the conversion can not be done then an exception is raised. Several syntax variants are supported. @@ -771,7 +771,7 @@ Result: - [cast_keep_nullable](../../operations/settings/settings.md#cast_keep_nullable) setting -## accurateCast(x, T) {#type_conversion_function-accurate-cast} +## accurateCast(x, T) Converts `x` to the `T` data type. @@ -805,7 +805,7 @@ Result: Code: 70. DB::Exception: Received from localhost:9000. DB::Exception: Value in column Int8 cannot be safely converted into type UInt8: While processing accurateCast(-1, 'UInt8') AS uint8. ``` -## accurateCastOrNull(x, T) {#type_conversion_function-accurate-cast_or_null} +## accurateCastOrNull(x, T) Converts input value `x` to the specified data type `T`. Always returns [Nullable](../../sql-reference/data-types/nullable.md) type and returns [NULL](../../sql-reference/syntax.md#null-literal) if the casted value is not representable in the target type. @@ -858,7 +858,7 @@ Result: ``` -## accurateCastOrDefault(x, T[, default_value]) {#type_conversion_function-accurate-cast_or_default} +## accurateCastOrDefault(x, T[, default_value]) Converts input value `x` to the specified data type `T`. Returns default type value or `default_value` if specified if the casted value is not representable in the target type. @@ -914,7 +914,7 @@ Result: └───────┴───────────────┴──────┴──────────────┴──────────────┴──────────────────────┘ ``` -## toInterval(Year\|Quarter\|Month\|Week\|Day\|Hour\|Minute\|Second) {#function-tointerval} +## toInterval(Year\|Quarter\|Month\|Week\|Day\|Hour\|Minute\|Second) Converts a Number type argument to an [Interval](../../sql-reference/data-types/special-data-types/interval.md) data type. @@ -961,8 +961,8 @@ Result: └───────────────────────────┴──────────────────────────────┘ ``` -## parseDateTimeBestEffort {#parsedatetimebesteffort} -## parseDateTime32BestEffort {#parsedatetime32besteffort} +## parseDateTimeBestEffort +## parseDateTime32BestEffort Converts a date and time in the [String](../../sql-reference/data-types/string.md) representation to [DateTime](../../sql-reference/data-types/datetime.md#data_type-datetime) data type. @@ -1076,7 +1076,7 @@ Result: - [toDate](#todate) - [toDateTime](#todatetime) -## parseDateTimeBestEffortUS {#parsedatetimebesteffortUS} +## parseDateTimeBestEffortUS This function is similar to [parseDateTimeBestEffort](#parsedatetimebesteffort), the only difference is that this function prefers US date format (`MM/DD/YYYY` etc.) in case of ambiguity. @@ -1150,17 +1150,17 @@ Result: └─────────────────────────——┘ ``` -## parseDateTimeBestEffortOrNull {#parsedatetimebesteffortornull} -## parseDateTime32BestEffortOrNull {#parsedatetime32besteffortornull} +## parseDateTimeBestEffortOrNull +## parseDateTime32BestEffortOrNull Same as for [parseDateTimeBestEffort](#parsedatetimebesteffort) except that it returns `NULL` when it encounters a date format that cannot be processed. -## parseDateTimeBestEffortOrZero {#parsedatetimebesteffortorzero} -## parseDateTime32BestEffortOrZero {#parsedatetime32besteffortorzero} +## parseDateTimeBestEffortOrZero +## parseDateTime32BestEffortOrZero Same as for [parseDateTimeBestEffort](#parsedatetimebesteffort) except that it returns zero date or zero date time when it encounters a date format that cannot be processed. -## parseDateTimeBestEffortUSOrNull {#parsedatetimebesteffortusornull} +## parseDateTimeBestEffortUSOrNull Same as [parseDateTimeBestEffortUS](#parsedatetimebesteffortUS) function except that it returns `NULL` when it encounters a date format that cannot be processed. @@ -1246,7 +1246,7 @@ Result: └─────────────────────────────────┘ ``` -## parseDateTimeBestEffortUSOrZero {#parsedatetimebesteffortusorzero} +## parseDateTimeBestEffortUSOrZero Same as [parseDateTimeBestEffortUS](#parsedatetimebesteffortUS) function except that it returns zero date (`1970-01-01`) or zero date with time (`1970-01-01 00:00:00`) when it encounters a date format that cannot be processed. @@ -1332,7 +1332,7 @@ Result: └─────────────────────────────────┘ ``` -## parseDateTime64BestEffort {#parsedatetime64besteffort} +## parseDateTime64BestEffort Same as [parseDateTimeBestEffort](#parsedatetimebesteffort) function but also parse milliseconds and microseconds and returns [DateTime](../../sql-reference/functions/type-conversion-functions.md#data_type-datetime) data type. @@ -1378,16 +1378,16 @@ Result: └────────────────────────────┴────────────────────────────────┘ ``` -## parseDateTime64BestEffortOrNull {#parsedatetime32besteffortornull} +## parseDateTime64BestEffortOrNull Same as for [parseDateTime64BestEffort](#parsedatetime64besteffort) except that it returns `NULL` when it encounters a date format that cannot be processed. -## parseDateTime64BestEffortOrZero {#parsedatetime64besteffortorzero} +## parseDateTime64BestEffortOrZero Same as for [parseDateTime64BestEffort](#parsedatetimebesteffort) except that it returns zero date or zero date time when it encounters a date format that cannot be processed. -## toLowCardinality {#tolowcardinality} +## toLowCardinality Converts input parameter to the [LowCardianlity](../../sql-reference/data-types/lowcardinality.md) version of same data type. @@ -1425,11 +1425,11 @@ Result: └───────────────────────┘ ``` -## toUnixTimestamp64Milli {#tounixtimestamp64milli} +## toUnixTimestamp64Milli -## toUnixTimestamp64Micro {#tounixtimestamp64micro} +## toUnixTimestamp64Micro -## toUnixTimestamp64Nano {#tounixtimestamp64nano} +## toUnixTimestamp64Nano Converts a `DateTime64` to a `Int64` value with fixed sub-second precision. Input value is scaled up or down appropriately depending on it precision. @@ -1483,11 +1483,11 @@ Result: └─────────────────────────────┘ ``` -## fromUnixTimestamp64Milli {#fromunixtimestamp64milli} +## fromUnixTimestamp64Milli -## fromUnixTimestamp64Micro {#fromunixtimestamp64micro} +## fromUnixTimestamp64Micro -## fromUnixTimestamp64Nano {#fromunixtimestamp64nano} +## fromUnixTimestamp64Nano Converts an `Int64` to a `DateTime64` value with fixed sub-second precision and optional timezone. Input value is scaled up or down appropriately depending on it’s precision. Please note that input value is treated as UTC timestamp, not timestamp at given (or implicit) timezone. @@ -1523,7 +1523,7 @@ Result: └──────────────────────────────────────┘ ``` -## formatRow {#formatrow} +## formatRow Converts arbitrary expressions into a string via given format. @@ -1564,7 +1564,7 @@ Result: └──────────────────────────────────┘ ``` -## formatRowNoNewline {#formatrownonewline} +## formatRowNoNewline Converts arbitrary expressions into a string via given format. The function trims the last `\n` if any. @@ -1602,7 +1602,7 @@ Result: └───────────────────────────────────────────┘ ``` -## snowflakeToDateTime {#snowflaketodatetime} +## snowflakeToDateTime Extracts time from [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) as [DateTime](../data-types/datetime.md) format. @@ -1638,7 +1638,7 @@ Result: └──────────────────────────────────────────────────────────────────┘ ``` -## snowflakeToDateTime64 {#snowflaketodatetime64} +## snowflakeToDateTime64 Extracts time from [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) as [DateTime64](../data-types/datetime64.md) format. @@ -1674,7 +1674,7 @@ Result: └────────────────────────────────────────────────────────────────────┘ ``` -## dateTimeToSnowflake {#datetimetosnowflake} +## dateTimeToSnowflake Converts [DateTime](../data-types/datetime.md) value to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time. @@ -1708,7 +1708,7 @@ Result: └─────────────────────────┘ ``` -## dateTime64ToSnowflake {#datetime64tosnowflake} +## dateTime64ToSnowflake Convert [DateTime64](../data-types/datetime64.md) to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time. diff --git a/docs/en/sql-reference/functions/url-functions.md b/docs/en/sql-reference/functions/url-functions.md index c91029c4fce..a46dda8269c 100644 --- a/docs/en/sql-reference/functions/url-functions.md +++ b/docs/en/sql-reference/functions/url-functions.md @@ -3,21 +3,21 @@ sidebar_position: 54 sidebar_label: URLs --- -# Functions for Working with URLs {#functions-for-working-with-urls} +# Functions for Working with URLs All these functions do not follow the RFC. They are maximally simplified for improved performance. -## Functions that Extract Parts of a URL {#functions-that-extract-parts-of-a-url} +## Functions that Extract Parts of a URL If the relevant part isn’t present in a URL, an empty string is returned. -### protocol {#protocol} +### protocol Extracts the protocol from a URL. Examples of typical returned values: http, https, ftp, mailto, tel, magnet… -### domain {#domain} +### domain Extracts the hostname from a URL. @@ -64,11 +64,11 @@ SELECT domain('svn+ssh://some.svn-hosting.com:80/repo/trunk'); └────────────────────────────────────────────────────────┘ ``` -### domainWithoutWWW {#domainwithoutwww} +### domainWithoutWWW Returns the domain and removes no more than one ‘www.’ from the beginning of it, if present. -### topLevelDomain {#topleveldomain} +### topLevelDomain Extracts the the top-level domain from a URL. @@ -107,11 +107,11 @@ SELECT topLevelDomain('svn+ssh://www.some.svn-hosting.com:80/repo/trunk'); └────────────────────────────────────────────────────────────────────┘ ``` -### firstSignificantSubdomain {#firstsignificantsubdomain} +### firstSignificantSubdomain Returns the “first significant subdomain”. The first significant subdomain is a second-level domain if it is ‘com’, ‘net’, ‘org’, or ‘co’. Otherwise, it is a third-level domain. For example, `firstSignificantSubdomain (‘https://news.clickhouse.com/’) = ‘clickhouse’, firstSignificantSubdomain (‘https://news.clickhouse.com.tr/’) = ‘clickhouse’`. The list of “insignificant” second-level domains and other implementation details may change in the future. -### cutToFirstSignificantSubdomain {#cuttofirstsignificantsubdomain} +### cutToFirstSignificantSubdomain Returns the part of the domain that includes top-level subdomains up to the “first significant subdomain” (see the explanation above). @@ -121,7 +121,7 @@ For example: - `cutToFirstSignificantSubdomain('www.tr') = 'tr'`. - `cutToFirstSignificantSubdomain('tr') = ''`. -### cutToFirstSignificantSubdomainWithWWW {#cuttofirstsignificantsubdomainwithwww} +### cutToFirstSignificantSubdomainWithWWW Returns the part of the domain that includes top-level subdomains up to the “first significant subdomain”, without stripping "www". @@ -131,7 +131,7 @@ For example: - `cutToFirstSignificantSubdomain('www.tr') = 'www.tr'`. - `cutToFirstSignificantSubdomain('tr') = ''`. -### cutToFirstSignificantSubdomainCustom {#cuttofirstsignificantsubdomaincustom} +### cutToFirstSignificantSubdomainCustom Returns the part of the domain that includes top-level subdomains up to the first significant subdomain. Accepts custom [TLD list](https://en.wikipedia.org/wiki/List_of_Internet_top-level_domains) name. @@ -185,7 +185,7 @@ Result: - [firstSignificantSubdomain](#firstsignificantsubdomain). -### cutToFirstSignificantSubdomainCustomWithWWW {#cuttofirstsignificantsubdomaincustomwithwww} +### cutToFirstSignificantSubdomainCustomWithWWW Returns the part of the domain that includes top-level subdomains up to the first significant subdomain without stripping `www`. Accepts custom TLD list name. @@ -239,7 +239,7 @@ Result: - [firstSignificantSubdomain](#firstsignificantsubdomain). -### firstSignificantSubdomainCustom {#firstsignificantsubdomaincustom} +### firstSignificantSubdomainCustom Returns the first significant subdomain. Accepts customs TLD list name. @@ -293,47 +293,47 @@ Result: - [firstSignificantSubdomain](#firstsignificantsubdomain). -### port(URL\[, default_port = 0\]) {#port} +### port(URL\[, default_port = 0\]) Returns the port or `default_port` if there is no port in the URL (or in case of validation error). -### path {#path} +### path Returns the path. Example: `/top/news.html` The path does not include the query string. -### pathFull {#pathfull} +### pathFull The same as above, but including query string and fragment. Example: /top/news.html?page=2#comments -### queryString {#querystring} +### queryString Returns the query string. Example: page=1&lr=213. query-string does not include the initial question mark, as well as # and everything after #. -### fragment {#fragment} +### fragment Returns the fragment identifier. fragment does not include the initial hash symbol. -### queryStringAndFragment {#querystringandfragment} +### queryStringAndFragment Returns the query string and fragment identifier. Example: page=1#29390. -### extractURLParameter(URL, name) {#extracturlparameterurl-name} +### extractURLParameter(URL, name) Returns the value of the ‘name’ parameter in the URL, if present. Otherwise, an empty string. If there are many parameters with this name, it returns the first occurrence. This function works under the assumption that the parameter name is encoded in the URL exactly the same way as in the passed argument. -### extractURLParameters(URL) {#extracturlparametersurl} +### extractURLParameters(URL) Returns an array of name=value strings corresponding to the URL parameters. The values are not decoded in any way. -### extractURLParameterNames(URL) {#extracturlparameternamesurl} +### extractURLParameterNames(URL) Returns an array of name strings corresponding to the names of URL parameters. The values are not decoded in any way. -### URLHierarchy(URL) {#urlhierarchyurl} +### URLHierarchy(URL) Returns an array containing the URL, truncated at the end by the symbols /,? in the path and query-string. Consecutive separator characters are counted as one. The cut is made in the position after all the consecutive separator characters. -### URLPathHierarchy(URL) {#urlpathhierarchyurl} +### URLPathHierarchy(URL) The same as above, but without the protocol and host in the result. The / element (root) is not included. @@ -345,7 +345,7 @@ URLPathHierarchy('https://example.com/browse/CONV-6788') = ] ``` -### encodeURLComponent(URL) {#encodeurlcomponenturl} +### encodeURLComponent(URL) Returns the encoded URL. Example: @@ -360,7 +360,7 @@ SELECT encodeURLComponent('http://127.0.0.1:8123/?query=SELECT 1;') AS EncodedUR └──────────────────────────────────────────────────────────┘ ``` -### decodeURLComponent(URL) {#decodeurlcomponenturl} +### decodeURLComponent(URL) Returns the decoded URL. Example: @@ -375,7 +375,7 @@ SELECT decodeURLComponent('http://127.0.0.1:8123/?query=SELECT%201%3B') AS Decod └────────────────────────────────────────┘ ``` -### encodeURLFormComponent(URL) {#encodeurlformcomponenturl} +### encodeURLFormComponent(URL) Returns the encoded URL. Follows rfc-1866, space(` `) is encoded as plus(`+`). Example: @@ -390,7 +390,7 @@ SELECT encodeURLFormComponent('http://127.0.0.1:8123/?query=SELECT 1 2+3') AS En └───────────────────────────────────────────────────────────┘ ``` -### decodeURLFormComponent(URL) {#decodeurlformcomponenturl} +### decodeURLFormComponent(URL) Returns the decoded URL. Follows rfc-1866, plain plus(`+`) is decoded as space(` `). Example: @@ -405,7 +405,7 @@ SELECT decodeURLFormComponent('http://127.0.0.1:8123/?query=SELECT%201+2%2B3') A └───────────────────────────────────────────┘ ``` -### netloc {#netloc} +### netloc Extracts network locality (`username:password@host:port`) from a URL. @@ -441,27 +441,27 @@ Result: └───────────────────────────────────────────┘ ``` -## Functions that Remove Part of a URL {#functions-that-remove-part-of-a-url} +## Functions that Remove Part of a URL If the URL does not have anything similar, the URL remains unchanged. -### cutWWW {#cutwww} +### cutWWW Removes no more than one ‘www.’ from the beginning of the URL’s domain, if present. -### cutQueryString {#cutquerystring} +### cutQueryString Removes query string. The question mark is also removed. -### cutFragment {#cutfragment} +### cutFragment Removes the fragment identifier. The number sign is also removed. -### cutQueryStringAndFragment {#cutquerystringandfragment} +### cutQueryStringAndFragment Removes the query string and fragment identifier. The question mark and number sign are also removed. -### cutURLParameter(URL, name) {#cuturlparameterurl-name} +### cutURLParameter(URL, name) Removes the ‘name’ URL parameter, if present. This function works under the assumption that the parameter name is encoded in the URL exactly the same way as in the passed argument. diff --git a/docs/en/sql-reference/functions/uuid-functions.md b/docs/en/sql-reference/functions/uuid-functions.md index 08f281ba281..78a5ffa36a1 100644 --- a/docs/en/sql-reference/functions/uuid-functions.md +++ b/docs/en/sql-reference/functions/uuid-functions.md @@ -3,11 +3,11 @@ sidebar_position: 53 sidebar_label: UUID --- -# Functions for Working with UUID {#functions-for-working-with-uuid} +# Functions for Working with UUID The functions for working with UUID are listed below. -## generateUUIDv4 {#uuid-function-generate} +## generateUUIDv4 Generates the [UUID](../data-types/uuid.md) of [version 4](https://tools.ietf.org/html/rfc4122#section-4.4). @@ -52,7 +52,7 @@ SELECT generateUUIDv4(1), generateUUIDv4(2) └──────────────────────────────────────┴──────────────────────────────────────┘ ``` -## empty {#empty} +## empty Checks whether the input UUID is empty. @@ -94,7 +94,7 @@ Result: └─────────────────────────┘ ``` -## notEmpty {#notempty} +## notEmpty Checks whether the input UUID is non-empty. @@ -136,7 +136,7 @@ Result: └────────────────────────────┘ ``` -## toUUID (x) {#touuid-x} +## toUUID (x) Converts String type value to UUID type. @@ -160,7 +160,7 @@ SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') AS uuid └──────────────────────────────────────┘ ``` -## toUUIDOrNull (x) {#touuidornull-x} +## toUUIDOrNull (x) It takes an argument of type String and tries to parse it into UUID. If failed, returns NULL. @@ -184,7 +184,7 @@ SELECT toUUIDOrNull('61f0c404-5cb3-11e7-907b-a6006ad3dba0T') AS uuid └──────┘ ``` -## toUUIDOrZero (x) {#touuidorzero-x} +## toUUIDOrZero (x) It takes an argument of type String and tries to parse it into UUID. If failed, returns zero UUID. @@ -208,7 +208,7 @@ SELECT toUUIDOrZero('61f0c404-5cb3-11e7-907b-a6006ad3dba0T') AS uuid └──────────────────────────────────────┘ ``` -## UUIDStringToNum {#uuidstringtonum} +## UUIDStringToNum Accepts a string containing 36 characters in the format `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`, and returns it as a set of bytes in a [FixedString(16)](../../sql-reference/data-types/fixedstring.md). @@ -234,7 +234,7 @@ SELECT └──────────────────────────────────────┴──────────────────┘ ``` -## UUIDNumToString {#uuidnumtostring} +## UUIDNumToString Accepts a [FixedString(16)](../../sql-reference/data-types/fixedstring.md) value, and returns a string containing 36 characters in text format. @@ -260,7 +260,7 @@ SELECT └──────────────────┴──────────────────────────────────────┘ ``` -## serverUUID() {#server-uuid} +## serverUUID() Returns the random and unique UUID, which is generated when the server is first started and stored forever. The result writes to the file `uuid` created in the ClickHouse server directory `/var/lib/clickhouse/`. @@ -276,6 +276,6 @@ serverUUID() Type: [UUID](../data-types/uuid.md). -## See Also {#see-also} +## See Also - [dictGetUUID](../../sql-reference/functions/ext-dict-functions.md#ext_dict_functions-other) diff --git a/docs/en/sql-reference/functions/ym-dict-functions.md b/docs/en/sql-reference/functions/ym-dict-functions.md index 4fc727844e7..06f278c6abc 100644 --- a/docs/en/sql-reference/functions/ym-dict-functions.md +++ b/docs/en/sql-reference/functions/ym-dict-functions.md @@ -9,7 +9,7 @@ In order for the functions below to work, the server config must specify the pat For information about creating reference lists, see the section “Dictionaries”. -## Multiple Geobases {#multiple-geobases} +## Multiple Geobases ClickHouse supports working with multiple alternative geobases (regional hierarchies) simultaneously, in order to support various perspectives on which countries certain regions belong to. @@ -31,11 +31,11 @@ regionToCountry(RegionID, '') – Uses the default dictionary: /opt/geo/regions_ regionToCountry(RegionID, 'ua') – Uses the dictionary for the 'ua' key: /opt/geo/regions_hierarchy_ua.txt ``` -### regionToCity(id\[, geobase\]) {#regiontocityid-geobase} +### regionToCity(id\[, geobase\]) Accepts a UInt32 number – the region ID from the geobase. If this region is a city or part of a city, it returns the region ID for the appropriate city. Otherwise, returns 0. -### regionToArea(id\[, geobase\]) {#regiontoareaid-geobase} +### regionToArea(id\[, geobase\]) Converts a region to an area (type 5 in the geobase). In every other way, this function is the same as ‘regionToCity’. @@ -65,7 +65,7 @@ LIMIT 15 └──────────────────────────────────────────────────────┘ ``` -### regionToDistrict(id\[, geobase\]) {#regiontodistrictid-geobase} +### regionToDistrict(id\[, geobase\]) Converts a region to a federal district (type 4 in the geobase). In every other way, this function is the same as ‘regionToCity’. @@ -95,17 +95,17 @@ LIMIT 15 └──────────────────────────────────────────────────────────┘ ``` -### regionToCountry(id\[, geobase\]) {#regiontocountryid-geobase} +### regionToCountry(id\[, geobase\]) Converts a region to a country. In every other way, this function is the same as ‘regionToCity’. Example: `regionToCountry(toUInt32(213)) = 225` converts Moscow (213) to Russia (225). -### regionToContinent(id\[, geobase\]) {#regiontocontinentid-geobase} +### regionToContinent(id\[, geobase\]) Converts a region to a continent. In every other way, this function is the same as ‘regionToCity’. Example: `regionToContinent(toUInt32(213)) = 10001` converts Moscow (213) to Eurasia (10001). -### regionToTopContinent(id\[, geobase\]) {#regiontotopcontinentid-geobase} +### regionToTopContinent(id\[, geobase\]) Finds the highest continent in the hierarchy for the region. @@ -127,24 +127,24 @@ regionToTopContinent(id[, geobase]) Type: `UInt32`. -### regionToPopulation(id\[, geobase\]) {#regiontopopulationid-geobase} +### regionToPopulation(id\[, geobase\]) Gets the population for a region. The population can be recorded in files with the geobase. See the section “External dictionaries”. If the population is not recorded for the region, it returns 0. In the geobase, the population might be recorded for child regions, but not for parent regions. -### regionIn(lhs, rhs\[, geobase\]) {#regioninlhs-rhs-geobase} +### regionIn(lhs, rhs\[, geobase\]) Checks whether a ‘lhs’ region belongs to a ‘rhs’ region. Returns a UInt8 number equal to 1 if it belongs, or 0 if it does not belong. The relationship is reflexive – any region also belongs to itself. -### regionHierarchy(id\[, geobase\]) {#regionhierarchyid-geobase} +### regionHierarchy(id\[, geobase\]) Accepts a UInt32 number – the region ID from the geobase. Returns an array of region IDs consisting of the passed region and all parents along the chain. Example: `regionHierarchy(toUInt32(213)) = [213,1,3,225,10001,10000]`. -### regionToName(id\[, lang\]) {#regiontonameid-lang} +### regionToName(id\[, lang\]) Accepts a UInt32 number – the region ID from the geobase. A string with the name of the language can be passed as a second argument. Supported languages are: ru, en, ua, uk, by, kz, tr. If the second argument is omitted, the language ‘ru’ is used. If the language is not supported, an exception is thrown. Returns a string – the name of the region in the corresponding language. If the region with the specified ID does not exist, an empty string is returned. diff --git a/docs/en/sql-reference/operators/exists.md b/docs/en/sql-reference/operators/exists.md index 25413790801..2e9f6f58df5 100644 --- a/docs/en/sql-reference/operators/exists.md +++ b/docs/en/sql-reference/operators/exists.md @@ -1,4 +1,4 @@ -# EXISTS {#exists-operator} +# EXISTS The `EXISTS` operator checks how many records are in the result of a subquery. If it is empty, then the operator returns `0`. Otherwise, it returns `1`. diff --git a/docs/en/sql-reference/operators/in.md b/docs/en/sql-reference/operators/in.md index 5dda097e799..709570eac2d 100644 --- a/docs/en/sql-reference/operators/in.md +++ b/docs/en/sql-reference/operators/in.md @@ -1,4 +1,4 @@ -# IN Operators {#select-in-operators} +# IN Operators The `IN`, `NOT IN`, `GLOBAL IN`, and `GLOBAL NOT IN` operators are covered separately, since their functionality is quite rich. @@ -80,7 +80,7 @@ ORDER BY EventDate ASC For each day after March 17th, count the percentage of pageviews made by users who visited the site on March 17th. A subquery in the IN clause is always run just one time on a single server. There are no dependent subqueries. -## NULL Processing {#in-null-processing} +## NULL Processing During request processing, the `IN` operator assumes that the result of an operation with [NULL](../../sql-reference/syntax.md#null-literal) always equals `0`, regardless of whether `NULL` is on the right or left side of the operator. `NULL` values are not included in any dataset, do not correspond to each other and cannot be compared if [transform_null_in = 0](../../operations/settings/settings.md#transform_null_in). @@ -115,7 +115,7 @@ FROM t_null └───────────────────────┘ ``` -## Distributed Subqueries {#select-distributed-subqueries} +## Distributed Subqueries There are two options for IN-s with subqueries (similar to JOINs): normal `IN` / `JOIN` and `GLOBAL IN` / `GLOBAL JOIN`. They differ in how they are run for distributed query processing. @@ -228,7 +228,7 @@ select * from table1 where col1 global in (select col1 from table2 where c`. -## Operators for Working with Data Sets {#operators-for-working-with-data-sets} +## Operators for Working with Data Sets See [IN operators](../../sql-reference/operators/in.md) and [EXISTS](../../sql-reference/operators/exists.md) operator. @@ -128,9 +128,9 @@ Result: └───┘ ``` -## Operators for Working with Dates and Times {#operators-datetime} +## Operators for Working with Dates and Times -### EXTRACT {#operator-extract} +### EXTRACT ``` sql EXTRACT(part FROM date); @@ -194,7 +194,7 @@ FROM test.Orders; You can see more examples in [tests](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00619_extract.sql). -### INTERVAL {#operator-interval} +### INTERVAL Creates an [Interval](../../sql-reference/data-types/special-data-types/interval.md)-type value that should be used in arithmetical operations with [Date](../../sql-reference/data-types/date.md) and [DateTime](../../sql-reference/data-types/datetime.md)-type values. @@ -269,19 +269,19 @@ SELECT toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul') AS time, time + 60 * 6 - [Interval](../../sql-reference/data-types/special-data-types/interval.md) data type - [toInterval](../../sql-reference/functions/type-conversion-functions.md#function-tointerval) type conversion functions -## Logical AND Operator {#logical-and-operator} +## Logical AND Operator Syntax `SELECT a AND b` — calculates logical conjunction of `a` and `b` with the function [and](../../sql-reference/functions/logical-functions.md#logical-and-function). -## Logical OR Operator {#logical-or-operator} +## Logical OR Operator Syntax `SELECT a OR b` — calculates logical disjunction of `a` and `b` with the function [or](../../sql-reference/functions/logical-functions.md#logical-or-function). -## Logical Negation Operator {#logical-negation-operator} +## Logical Negation Operator Syntax `SELECT NOT a` — calculates logical negation of `a` with the function [not](../../sql-reference/functions/logical-functions.md#logical-not-function). -## Conditional Operator {#conditional-operator} +## Conditional Operator `a ? b : c` – The `if(a, b, c)` function. @@ -289,7 +289,7 @@ Note: The conditional operator calculates the values of b and c, then checks whether condition a is met, and then returns the corresponding value. If `b` or `C` is an [arrayJoin()](../../sql-reference/functions/array-join.md#functions_arrayjoin) function, each row will be replicated regardless of the “a” condition. -## Conditional Expression {#operator_case} +## Conditional Expression ``` sql CASE [x] @@ -305,36 +305,36 @@ If there is no `ELSE c` clause in the expression, the default value is `NULL`. The `transform` function does not work with `NULL`. -## Concatenation Operator {#concatenation-operator} +## Concatenation Operator `s1 || s2` – The `concat(s1, s2) function.` -## Lambda Creation Operator {#lambda-creation-operator} +## Lambda Creation Operator `x -> expr` – The `lambda(x, expr) function.` The following operators do not have a priority since they are brackets: -## Array Creation Operator {#array-creation-operator} +## Array Creation Operator `[x1, ...]` – The `array(x1, ...) function.` -## Tuple Creation Operator {#tuple-creation-operator} +## Tuple Creation Operator `(x1, x2, ...)` – The `tuple(x2, x2, ...) function.` -## Associativity {#associativity} +## Associativity All binary operators have left associativity. For example, `1 + 2 + 3` is transformed to `plus(plus(1, 2), 3)`. Sometimes this does not work the way you expect. For example, `SELECT 4 > 2 > 3` will result in 0. For efficiency, the `and` and `or` functions accept any number of arguments. The corresponding chains of `AND` and `OR` operators are transformed into a single call of these functions. -## Checking for `NULL` {#checking-for-null} +## Checking for `NULL` ClickHouse supports the `IS NULL` and `IS NOT NULL` operators. -### IS NULL {#operator-is-null} +### IS NULL - For [Nullable](../../sql-reference/data-types/nullable.md) type values, the `IS NULL` operator returns: - `1`, if the value is `NULL`. @@ -355,7 +355,7 @@ SELECT x+100 FROM t_null WHERE y IS NULL └──────────────┘ ``` -### IS NOT NULL {#is-not-null} +### IS NOT NULL - For [Nullable](../../sql-reference/data-types/nullable.md) type values, the `IS NOT NULL` operator returns: - `0`, if the value is `NULL`. diff --git a/docs/en/sql-reference/statements/alter/column.md b/docs/en/sql-reference/statements/alter/column.md index 3d22146a56b..07266eb09a3 100644 --- a/docs/en/sql-reference/statements/alter/column.md +++ b/docs/en/sql-reference/statements/alter/column.md @@ -3,7 +3,7 @@ sidebar_position: 37 sidebar_label: COLUMN --- -# Column Manipulations {#column-manipulations} +# Column Manipulations A set of queries that allow changing the table structure. @@ -29,7 +29,7 @@ The following actions are supported: These actions are described in detail below. -## ADD COLUMN {#alter_add-column} +## ADD COLUMN ``` sql ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after | FIRST] @@ -65,7 +65,7 @@ ToDrop UInt32 Added3 UInt32 ``` -## DROP COLUMN {#alter_drop-column} +## DROP COLUMN ``` sql DROP COLUMN [IF EXISTS] name @@ -85,7 +85,7 @@ Example: ALTER TABLE visits DROP COLUMN browser ``` -## RENAME COLUMN {#alter_rename-column} +## RENAME COLUMN ``` sql RENAME COLUMN [IF EXISTS] name to new_name @@ -101,7 +101,7 @@ Example: ALTER TABLE visits RENAME COLUMN webBrowser TO browser ``` -## CLEAR COLUMN {#alter_clear-column} +## CLEAR COLUMN ``` sql CLEAR COLUMN [IF EXISTS] name IN PARTITION partition_name @@ -117,7 +117,7 @@ Example: ALTER TABLE visits CLEAR COLUMN browser IN PARTITION tuple() ``` -## COMMENT COLUMN {#alter_comment-column} +## COMMENT COLUMN ``` sql COMMENT COLUMN [IF EXISTS] name 'Text comment' @@ -135,7 +135,7 @@ Example: ALTER TABLE visits COMMENT COLUMN browser 'The table shows the browser used for accessing the site.' ``` -## MODIFY COLUMN {#alter_modify-column} +## MODIFY COLUMN ``` sql MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [codec] [TTL] [AFTER name_after | FIRST] @@ -174,7 +174,7 @@ The `ALTER` query is atomic. For MergeTree tables it is also lock-free. The `ALTER` query for changing columns is replicated. The instructions are saved in ZooKeeper, then each replica applies them. All `ALTER` queries are run in the same order. The query waits for the appropriate actions to be completed on the other replicas. However, a query to change columns in a replicated table can be interrupted, and all actions will be performed asynchronously. -## MODIFY COLUMN REMOVE {#modify-remove} +## MODIFY COLUMN REMOVE Removes one of the column properties: `DEFAULT`, `ALIAS`, `MATERIALIZED`, `CODEC`, `COMMENT`, `TTL`. @@ -196,7 +196,7 @@ ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL; - [REMOVE TTL](ttl.md). -## MATERIALIZE COLUMN {#materialize-column} +## MATERIALIZE COLUMN Materializes or updates a column with an expression for a default value (`DEFAULT` or `MATERIALIZED`). It is used if it is necessary to add or update a column with a complicated expression, because evaluating such an expression directly on `SELECT` executing turns out to be expensive. @@ -247,7 +247,7 @@ SELECT groupArray(x), groupArray(s) FROM tmp; - [MATERIALIZED](../../statements/create/table.md#materialized). -## Limitations {#alter-query-limitations} +## Limitations The `ALTER` query lets you create and delete separate elements (columns) in nested data structures, but not whole nested data structures. To add a nested data structure, you can add columns with a name like `name.nested_name` and the type `Array(T)`. A nested data structure is equivalent to multiple array columns with a name that has the same prefix before the dot. diff --git a/docs/en/sql-reference/statements/alter/comment.md b/docs/en/sql-reference/statements/alter/comment.md index af57adcf31c..a6ec72221ff 100644 --- a/docs/en/sql-reference/statements/alter/comment.md +++ b/docs/en/sql-reference/statements/alter/comment.md @@ -3,7 +3,7 @@ sidebar_position: 51 sidebar_label: COMMENT --- -# ALTER TABLE … MODIFY COMMENT {#alter-modify-comment} +# ALTER TABLE … MODIFY COMMENT Adds, modifies, or removes comment to the table, regardless if it was set before or not. Comment change is reflected in both [system.tables](../../../operations/system-tables/tables.md) and `SHOW CREATE TABLE` query. diff --git a/docs/en/sql-reference/statements/alter/constraint.md b/docs/en/sql-reference/statements/alter/constraint.md index c9517981ae7..9f89a010a44 100644 --- a/docs/en/sql-reference/statements/alter/constraint.md +++ b/docs/en/sql-reference/statements/alter/constraint.md @@ -3,7 +3,7 @@ sidebar_position: 43 sidebar_label: CONSTRAINT --- -# Manipulating Constraints {#manipulations-with-constraints} +# Manipulating Constraints Constraints could be added or deleted using following syntax: diff --git a/docs/en/sql-reference/statements/alter/delete.md b/docs/en/sql-reference/statements/alter/delete.md index 21ae091f9e7..88ecf26961c 100644 --- a/docs/en/sql-reference/statements/alter/delete.md +++ b/docs/en/sql-reference/statements/alter/delete.md @@ -3,7 +3,7 @@ sidebar_position: 39 sidebar_label: DELETE --- -# ALTER TABLE … DELETE Statement {#alter-mutations} +# ALTER TABLE … DELETE Statement ``` sql ALTER TABLE [db.]table [ON CLUSTER cluster] DELETE WHERE filter_expr diff --git a/docs/en/sql-reference/statements/alter/index.md b/docs/en/sql-reference/statements/alter/index.md index 536da948218..e18e9e21a31 100644 --- a/docs/en/sql-reference/statements/alter/index.md +++ b/docs/en/sql-reference/statements/alter/index.md @@ -35,7 +35,7 @@ These `ALTER` statements modify entities related to role-based access control: [ALTER TABLE ... MODIFY COMMENT](../../../sql-reference/statements/alter/comment.md) statement adds, modifies, or removes comments to the table, regardless if it was set before or not. -## Mutations {#mutations} +## Mutations `ALTER` queries that are intended to manipulate table data are implemented with a mechanism called “mutations”, most notably [ALTER TABLE … DELETE](../../../sql-reference/statements/alter/delete.md) and [ALTER TABLE … UPDATE](../../../sql-reference/statements/alter/update.md). They are asynchronous background processes similar to merges in [MergeTree](../../../engines/table-engines/mergetree-family/index.md) tables that to produce new “mutated” versions of parts. @@ -47,7 +47,7 @@ A mutation query returns immediately after the mutation entry is added (in case Entries for finished mutations are not deleted right away (the number of preserved entries is determined by the `finished_mutations_to_keep` storage engine parameter). Older mutation entries are deleted. -## Synchronicity of ALTER Queries {#synchronicity-of-alter-queries} +## Synchronicity of ALTER Queries For non-replicated tables, all `ALTER` queries are performed synchronously. For replicated tables, the query just adds instructions for the appropriate actions to `ZooKeeper`, and the actions themselves are performed as soon as possible. However, the query can wait for these actions to be completed on all the replicas. diff --git a/docs/en/sql-reference/statements/alter/index/index.md b/docs/en/sql-reference/statements/alter/index/index.md index 90317f1ccdf..2c2f1c9b2f2 100644 --- a/docs/en/sql-reference/statements/alter/index/index.md +++ b/docs/en/sql-reference/statements/alter/index/index.md @@ -4,7 +4,7 @@ sidebar_position: 42 sidebar_label: INDEX --- -# Manipulating Data Skipping Indices {#manipulations-with-data-skipping-indices} +# Manipulating Data Skipping Indices The following operations are available: diff --git a/docs/en/sql-reference/statements/alter/order-by.md b/docs/en/sql-reference/statements/alter/order-by.md index 84d29ae8e11..1ffb6a3bbb3 100644 --- a/docs/en/sql-reference/statements/alter/order-by.md +++ b/docs/en/sql-reference/statements/alter/order-by.md @@ -3,7 +3,7 @@ sidebar_position: 41 sidebar_label: ORDER BY --- -# Manipulating Key Expressions {#manipulations-with-key-expressions} +# Manipulating Key Expressions ``` sql ALTER TABLE [db].name [ON CLUSTER cluster] MODIFY ORDER BY new_expression diff --git a/docs/en/sql-reference/statements/alter/partition.md b/docs/en/sql-reference/statements/alter/partition.md index 453d1bd7bf6..75c80add9b7 100644 --- a/docs/en/sql-reference/statements/alter/partition.md +++ b/docs/en/sql-reference/statements/alter/partition.md @@ -3,7 +3,7 @@ sidebar_position: 38 sidebar_label: PARTITION --- -# Manipulating Partitions and Parts {#alter_manipulations-with-partitions} +# Manipulating Partitions and Parts The following operations with [partitions](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md) are available: @@ -24,7 +24,7 @@ The following operations with [partitions](../../../engines/table-engines/merget -## DETACH PARTITION\|PART {#alter_detach-partition} +## DETACH PARTITION\|PART ``` sql ALTER TABLE table_name DETACH PARTITION|PART partition_expr @@ -45,7 +45,7 @@ After the query is executed, you can do whatever you want with the data in the ` This query is replicated – it moves the data to the `detached` directory on all replicas. Note that you can execute this query only on a leader replica. To find out if a replica is a leader, perform the `SELECT` query to the [system.replicas](../../../operations/system-tables/replicas.md#system_tables-replicas) table. Alternatively, it is easier to make a `DETACH` query on all replicas - all the replicas throw an exception, except the leader replicas (as multiple leaders are allowed). -## DROP PARTITION\|PART {#alter_drop-partition} +## DROP PARTITION\|PART ``` sql ALTER TABLE table_name DROP PARTITION|PART partition_expr @@ -64,7 +64,7 @@ ALTER TABLE mt DROP PARTITION '2020-11-21'; ALTER TABLE mt DROP PART 'all_4_4_0'; ``` -## DROP DETACHED PARTITION\|PART {#alter_drop-detached} +## DROP DETACHED PARTITION\|PART ``` sql ALTER TABLE table_name DROP DETACHED PARTITION|PART partition_expr @@ -73,7 +73,7 @@ ALTER TABLE table_name DROP DETACHED PARTITION|PART partition_expr Removes the specified part or all parts of the specified partition from `detached`. Read more about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr). -## ATTACH PARTITION\|PART {#alter_attach-partition} +## ATTACH PARTITION\|PART ``` sql ALTER TABLE table_name ATTACH PARTITION|PART partition_expr @@ -96,7 +96,7 @@ If there is no part with the correct checksums, the data is downloaded from any You can put data to the `detached` directory on one replica and use the `ALTER ... ATTACH` query to add it to the table on all replicas. -## ATTACH PARTITION FROM {#alter_attach-partition-from} +## ATTACH PARTITION FROM ``` sql ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1 @@ -110,7 +110,7 @@ For the query to run successfully, the following conditions must be met: - Both tables must have the same structure. - Both tables must have the same partition key. -## REPLACE PARTITION {#alter_replace-partition} +## REPLACE PARTITION ``` sql ALTER TABLE table2 REPLACE PARTITION partition_expr FROM table1 @@ -123,7 +123,7 @@ For the query to run successfully, the following conditions must be met: - Both tables must have the same structure. - Both tables must have the same partition key. -## MOVE PARTITION TO TABLE {#alter_move_to_table-partition} +## MOVE PARTITION TO TABLE ``` sql ALTER TABLE table_source MOVE PARTITION partition_expr TO TABLE table_dest @@ -138,7 +138,7 @@ For the query to run successfully, the following conditions must be met: - Both tables must be the same engine family (replicated or non-replicated). - Both tables must have the same storage policy. -## CLEAR COLUMN IN PARTITION {#alter_clear-column-partition} +## CLEAR COLUMN IN PARTITION ``` sql ALTER TABLE table_name CLEAR COLUMN column_name IN PARTITION partition_expr @@ -152,7 +152,7 @@ Example: ALTER TABLE visits CLEAR COLUMN hour in PARTITION 201902 ``` -## FREEZE PARTITION {#alter_freeze-partition} +## FREEZE PARTITION ``` sql ALTER TABLE table_name FREEZE [PARTITION partition_expr] [WITH NAME 'backup_name'] @@ -194,7 +194,7 @@ Restoring from a backup does not require stopping the server. For more information about backups and restoring data, see the [Data Backup](../../../operations/backup.md) section. -## UNFREEZE PARTITION {#alter_unfreeze-partition} +## UNFREEZE PARTITION ``` sql ALTER TABLE 'table_name' UNFREEZE [PARTITION 'part_expr'] WITH NAME 'backup_name' @@ -202,7 +202,7 @@ ALTER TABLE 'table_name' UNFREEZE [PARTITION 'part_expr'] WITH NAME 'backup_name Removes `freezed` partitions with the specified name from the disk. If the `PARTITION` clause is omitted, the query removes the backup of all partitions at once. -## CLEAR INDEX IN PARTITION {#alter_clear-index-partition} +## CLEAR INDEX IN PARTITION ``` sql ALTER TABLE table_name CLEAR INDEX index_name IN PARTITION partition_expr @@ -210,7 +210,7 @@ ALTER TABLE table_name CLEAR INDEX index_name IN PARTITION partition_expr The query works similar to `CLEAR COLUMN`, but it resets an index instead of a column data. -## FETCH PARTITION|PART {#alter_fetch-partition} +## FETCH PARTITION|PART ``` sql ALTER TABLE table_name FETCH PARTITION|PART partition_expr FROM 'path-in-zookeeper' @@ -245,7 +245,7 @@ Before downloading, the system checks if the partition exists and the table stru Although the query is called `ALTER TABLE`, it does not change the table structure and does not immediately change the data available in the table. -## MOVE PARTITION\|PART {#alter_move-partition} +## MOVE PARTITION\|PART Moves partitions or data parts to another volume or disk for `MergeTree`-engine tables. See [Using Multiple Block Devices for Data Storage](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes). @@ -266,7 +266,7 @@ ALTER TABLE hits MOVE PART '20190301_14343_16206_438' TO VOLUME 'slow' ALTER TABLE hits MOVE PARTITION '2019-09-01' TO DISK 'fast_ssd' ``` -## UPDATE IN PARTITION {#update-in-partition} +## UPDATE IN PARTITION Manipulates data in the specifies partition matching the specified filtering expression. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). @@ -286,7 +286,7 @@ ALTER TABLE mt UPDATE x = x + 1 IN PARTITION 2 WHERE p = 2; - [UPDATE](../../../sql-reference/statements/alter/update.md#alter-table-update-statements) -## DELETE IN PARTITION {#delete-in-partition} +## DELETE IN PARTITION Deletes data in the specifies partition matching the specified filtering expression. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). @@ -306,7 +306,7 @@ ALTER TABLE mt DELETE IN PARTITION 2 WHERE p = 2; - [DELETE](../../../sql-reference/statements/alter/delete.md#alter-mutations) -## How to Set Partition Expression {#alter-how-to-specify-part-expr} +## How to Set Partition Expression You can specify the partition expression in `ALTER ... PARTITION` queries in different ways: diff --git a/docs/en/sql-reference/statements/alter/projection.md b/docs/en/sql-reference/statements/alter/projection.md index 5ccf33d2d2f..72a4b792fa4 100644 --- a/docs/en/sql-reference/statements/alter/projection.md +++ b/docs/en/sql-reference/statements/alter/projection.md @@ -3,7 +3,7 @@ sidebar_position: 49 sidebar_label: PROJECTION --- -# Manipulating Projections {#manipulations-with-projections} +# Manipulating Projections The following operations with [projections](../../../engines/table-engines/mergetree-family/mergetree.md#projections) are available: diff --git a/docs/en/sql-reference/statements/alter/quota.md b/docs/en/sql-reference/statements/alter/quota.md index 2398a57502c..c5f1bac0666 100644 --- a/docs/en/sql-reference/statements/alter/quota.md +++ b/docs/en/sql-reference/statements/alter/quota.md @@ -3,7 +3,7 @@ sidebar_position: 46 sidebar_label: QUOTA --- -# ALTER QUOTA {#alter-quota-statement} +# ALTER QUOTA Changes quotas. diff --git a/docs/en/sql-reference/statements/alter/role.md b/docs/en/sql-reference/statements/alter/role.md index d3cb28a1705..62a80ccaf50 100644 --- a/docs/en/sql-reference/statements/alter/role.md +++ b/docs/en/sql-reference/statements/alter/role.md @@ -3,7 +3,7 @@ sidebar_position: 46 sidebar_label: ROLE --- -## ALTER ROLE {#alter-role-statement} +## ALTER ROLE Changes roles. diff --git a/docs/en/sql-reference/statements/alter/row-policy.md b/docs/en/sql-reference/statements/alter/row-policy.md index 47207d29287..0851c5b052d 100644 --- a/docs/en/sql-reference/statements/alter/row-policy.md +++ b/docs/en/sql-reference/statements/alter/row-policy.md @@ -3,7 +3,7 @@ sidebar_position: 47 sidebar_label: ROW POLICY --- -# ALTER ROW POLICY {#alter-row-policy-statement} +# ALTER ROW POLICY Changes row policy. diff --git a/docs/en/sql-reference/statements/alter/sample-by.md b/docs/en/sql-reference/statements/alter/sample-by.md index 08e4fe1066b..d3490916b26 100644 --- a/docs/en/sql-reference/statements/alter/sample-by.md +++ b/docs/en/sql-reference/statements/alter/sample-by.md @@ -3,7 +3,7 @@ sidebar_position: 41 sidebar_label: SAMPLE BY --- -# Manipulating Sampling-Key Expressions {#manipulations-with-sampling-key-expressions} +# Manipulating Sampling-Key Expressions Syntax: diff --git a/docs/en/sql-reference/statements/alter/setting.md b/docs/en/sql-reference/statements/alter/setting.md index bb361e2ee6f..da31da0cf53 100644 --- a/docs/en/sql-reference/statements/alter/setting.md +++ b/docs/en/sql-reference/statements/alter/setting.md @@ -3,7 +3,7 @@ sidebar_position: 38 sidebar_label: SETTING --- -# Table Settings Manipulations {#table_settings_manipulations} +# Table Settings Manipulations There is a set of queries to change table settings. You can modify settings or reset them to default values. A single query can change several settings at once. If a setting with the specified name does not exist, then the query raises an exception. @@ -18,7 +18,7 @@ ALTER TABLE [db].name [ON CLUSTER cluster] MODIFY|RESET SETTING ... These queries can be applied to [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) tables only. ::: -## MODIFY SETTING {#alter_modify_setting} +## MODIFY SETTING Changes table settings. @@ -36,7 +36,7 @@ CREATE TABLE example_table (id UInt32, data String) ENGINE=MergeTree() ORDER BY ALTER TABLE example_table MODIFY SETTING max_part_loading_threads=8, max_parts_in_total=50000; ``` -## RESET SETTING {#alter_reset_setting} +## RESET SETTING Resets table settings to their default values. If a setting is in a default state, then no action is taken. diff --git a/docs/en/sql-reference/statements/alter/settings-profile.md b/docs/en/sql-reference/statements/alter/settings-profile.md index b1728f21c08..902f3854a12 100644 --- a/docs/en/sql-reference/statements/alter/settings-profile.md +++ b/docs/en/sql-reference/statements/alter/settings-profile.md @@ -3,7 +3,7 @@ sidebar_position: 48 sidebar_label: SETTINGS PROFILE --- -## ALTER SETTINGS PROFILE {#alter-settings-profile-statement} +## ALTER SETTINGS PROFILE Changes settings profiles. diff --git a/docs/en/sql-reference/statements/alter/ttl.md b/docs/en/sql-reference/statements/alter/ttl.md index f2cf8724197..37a171d4969 100644 --- a/docs/en/sql-reference/statements/alter/ttl.md +++ b/docs/en/sql-reference/statements/alter/ttl.md @@ -3,9 +3,9 @@ sidebar_position: 44 sidebar_label: TTL --- -# Manipulations with Table TTL {#manipulations-with-table-ttl} +# Manipulations with Table TTL -## MODIFY TTL {#modify-ttl} +## MODIFY TTL You can change [table TTL](../../../engines/table-engines/mergetree-family/mergetree.md#mergetree-table-ttl) with a request of the following form: @@ -13,7 +13,7 @@ You can change [table TTL](../../../engines/table-engines/mergetree-family/merge ALTER TABLE table_name MODIFY TTL ttl_expression; ``` -## REMOVE TTL {#remove-ttl} +## REMOVE TTL TTL-property can be removed from table with the following query: diff --git a/docs/en/sql-reference/statements/alter/update.md b/docs/en/sql-reference/statements/alter/update.md index aeff7cfa1b2..5937f2e8f79 100644 --- a/docs/en/sql-reference/statements/alter/update.md +++ b/docs/en/sql-reference/statements/alter/update.md @@ -3,7 +3,7 @@ sidebar_position: 40 sidebar_label: UPDATE --- -# ALTER TABLE … UPDATE Statements {#alter-table-update-statements} +# ALTER TABLE … UPDATE Statements ``` sql ALTER TABLE [db.]table UPDATE column1 = expr1 [, ...] WHERE filter_expr diff --git a/docs/en/sql-reference/statements/alter/user.md b/docs/en/sql-reference/statements/alter/user.md index f9b90349dab..30b024227be 100644 --- a/docs/en/sql-reference/statements/alter/user.md +++ b/docs/en/sql-reference/statements/alter/user.md @@ -3,7 +3,7 @@ sidebar_position: 45 sidebar_label: USER --- -# ALTER USER {#alter-user-statement} +# ALTER USER Changes ClickHouse user accounts. @@ -21,7 +21,7 @@ ALTER USER [IF EXISTS] name1 [ON CLUSTER cluster_name1] [RENAME TO new_name1] To use `ALTER USER` you must have the [ALTER USER](../../../sql-reference/statements/grant.md#grant-access-management) privilege. -## GRANTEES Clause {#grantees} +## GRANTEES Clause Specifies users or roles which are allowed to receive [privileges](../../../sql-reference/statements/grant.md#grant-privileges) from this user on the condition this user has also all required access granted with [GRANT OPTION](../../../sql-reference/statements/grant.md#grant-privigele-syntax). Options of the `GRANTEES` clause: @@ -32,7 +32,7 @@ Specifies users or roles which are allowed to receive [privileges](../../../sql- You can exclude any user or role by using the `EXCEPT` expression. For example, `ALTER USER user1 GRANTEES ANY EXCEPT user2`. It means if `user1` has some privileges granted with `GRANT OPTION` it will be able to grant those privileges to anyone except `user2`. -## Examples {#alter-user-examples} +## Examples Set assigned roles as default: diff --git a/docs/en/sql-reference/statements/alter/view.md b/docs/en/sql-reference/statements/alter/view.md index 71e89aaefe8..45bc2ab155a 100644 --- a/docs/en/sql-reference/statements/alter/view.md +++ b/docs/en/sql-reference/statements/alter/view.md @@ -3,7 +3,7 @@ sidebar_position: 50 sidebar_label: VIEW --- -# ALTER TABLE … MODIFY QUERY Statement {#alter-modify-query} +# ALTER TABLE … MODIFY QUERY Statement You can modify `SELECT` query that was specified when a [materialized view](../create/view.md#materialized) was created with the `ALTER TABLE … MODIFY QUERY` statement. Use it when the materialized view was created without the `TO [db.]name` clause. The `allow_experimental_alter_materialized_view_structure` setting must be enabled. @@ -39,6 +39,6 @@ SELECT * FROM mv; └───┘ ``` -## ALTER LIVE VIEW Statement {#alter-live-view} +## ALTER LIVE VIEW Statement `ALTER LIVE VIEW ... REFRESH` statement refreshes a [Live view](../create/view.md#live-view). See [Force Live View Refresh](../create/view.md#live-view-alter-refresh). diff --git a/docs/en/sql-reference/statements/attach.md b/docs/en/sql-reference/statements/attach.md index bc7b2be333f..e298df52409 100644 --- a/docs/en/sql-reference/statements/attach.md +++ b/docs/en/sql-reference/statements/attach.md @@ -3,7 +3,7 @@ sidebar_position: 40 sidebar_label: ATTACH --- -# ATTACH Statement {#attach} +# ATTACH Statement Attaches a table or a dictionary, for example, when moving a database to another server. @@ -17,7 +17,7 @@ The query does not create data on the disk, but assumes that data is already in If a table was previously detached ([DETACH](../../sql-reference/statements/detach.md) query), meaning that its structure is known, you can use shorthand without defining the structure. -## Attach Existing Table {#attach-existing-table} +## Attach Existing Table **Syntax** @@ -29,9 +29,9 @@ This query is used when starting the server. The server stores table metadata as If the table was detached permanently, it won't be reattached at the server start, so you need to use `ATTACH` query explicitly. -## Create New Table And Attach Data {#create-new-table-and-attach-data} +## Create New Table And Attach Data -### With Specified Path to Table Data {#attach-with-specified-path} +### With Specified Path to Table Data The query creates a new table with provided structure and attaches table data from the provided directory in `user_files`. @@ -59,7 +59,7 @@ Result: └──────┴────┘ ``` -### With Specified Table UUID {#attach-with-specified-uuid} +### With Specified Table UUID This query creates a new table with provided structure and attaches data from the table with the specified UUID. It is supported by the [Atomic](../../engines/database-engines/atomic.md) database engine. @@ -70,7 +70,7 @@ It is supported by the [Atomic](../../engines/database-engines/atomic.md) databa ATTACH TABLE name UUID '' (col1 Type1, ...) ``` -## Attach Existing Dictionary {#attach-existing-dictionary} +## Attach Existing Dictionary Attaches a previously detached dictionary. diff --git a/docs/en/sql-reference/statements/check-table.md b/docs/en/sql-reference/statements/check-table.md index 1164a8b8be6..2e1a6c5b366 100644 --- a/docs/en/sql-reference/statements/check-table.md +++ b/docs/en/sql-reference/statements/check-table.md @@ -3,7 +3,7 @@ sidebar_position: 41 sidebar_label: CHECK --- -# CHECK TABLE Statement {#check-table} +# CHECK TABLE Statement Checks if the data in the table is corrupted. @@ -30,7 +30,7 @@ Performed over the tables with another table engines causes an exception. Engines from the `*Log` family do not provide automatic data recovery on failure. Use the `CHECK TABLE` query to track data loss in a timely manner. -## Checking the MergeTree Family Tables {#checking-mergetree-tables} +## Checking the MergeTree Family Tables For `MergeTree` family engines, if [check_query_single_value_result](../../operations/settings/settings.md#check_query_single_value_result) = 0, the `CHECK TABLE` query shows a check status for every individual data part of a table on the local server. @@ -59,7 +59,7 @@ CHECK TABLE test_table; └────────┘ ``` -## If the Data Is Corrupted {#if-data-is-corrupted} +## If the Data Is Corrupted If the table is corrupted, you can copy the non-corrupted data to another table. To do this: diff --git a/docs/en/sql-reference/statements/create/database.md b/docs/en/sql-reference/statements/create/database.md index 18ed94bef79..5ce9d55a6b1 100644 --- a/docs/en/sql-reference/statements/create/database.md +++ b/docs/en/sql-reference/statements/create/database.md @@ -3,7 +3,7 @@ sidebar_position: 35 sidebar_label: DATABASE --- -# CREATE DATABASE {#query-language-create-database} +# CREATE DATABASE Creates a new database. @@ -11,24 +11,24 @@ Creates a new database. CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] [ENGINE = engine(...)] [COMMENT 'Comment'] ``` -## Clauses {#clauses} +## Clauses -### IF NOT EXISTS {#if-not-exists} +### IF NOT EXISTS If the `db_name` database already exists, then ClickHouse does not create a new database and: - Doesn’t throw an exception if clause is specified. - Throws an exception if clause isn’t specified. -### ON CLUSTER {#on-cluster} +### ON CLUSTER ClickHouse creates the `db_name` database on all the servers of a specified cluster. More details in a [Distributed DDL](../../../sql-reference/distributed-ddl.md) article. -### ENGINE {#engine} +### ENGINE By default, ClickHouse uses its own [Atomic](../../../engines/database-engines/atomic.md) database engine. There are also [Lazy](../../../engines/database-engines/lazy.md), [MySQL](../../../engines/database-engines/mysql.md), [PostgresSQL](../../../engines/database-engines/postgresql.md), [MaterializedMySQL](../../../engines/database-engines/materialized-mysql.md), [MaterializedPostgreSQL](../../../engines/database-engines/materialized-postgresql.md), [Replicated](../../../engines/database-engines/replicated.md), [SQLite](../../../engines/database-engines/sqlite.md). -### COMMENT {#comment} +### COMMENT You can add a comment to the database when you creating it. diff --git a/docs/en/sql-reference/statements/create/dictionary.md b/docs/en/sql-reference/statements/create/dictionary.md index 246625cc901..442d7bd8afd 100644 --- a/docs/en/sql-reference/statements/create/dictionary.md +++ b/docs/en/sql-reference/statements/create/dictionary.md @@ -3,7 +3,7 @@ sidebar_position: 38 sidebar_label: DICTIONARY --- -# CREATE DICTIONARY {#create-dictionary-query} +# CREATE DICTIONARY Creates a new [external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) with given [structure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md), [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), [layout](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) and [lifetime](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). diff --git a/docs/en/sql-reference/statements/create/function.md b/docs/en/sql-reference/statements/create/function.md index a87d3d70e54..7d9a727a70d 100644 --- a/docs/en/sql-reference/statements/create/function.md +++ b/docs/en/sql-reference/statements/create/function.md @@ -3,7 +3,7 @@ sidebar_position: 38 sidebar_label: FUNCTION --- -# CREATE FUNCTION {#create-function} +# CREATE FUNCTION Creates a user defined function from a lambda expression. The expression must consist of function parameters, constants, operators, or other function calls. diff --git a/docs/en/sql-reference/statements/create/quota.md b/docs/en/sql-reference/statements/create/quota.md index 931da165a73..da6ce01aafd 100644 --- a/docs/en/sql-reference/statements/create/quota.md +++ b/docs/en/sql-reference/statements/create/quota.md @@ -3,7 +3,7 @@ sidebar_position: 42 sidebar_label: QUOTA --- -# CREATE QUOTA {#create-quota-statement} +# CREATE QUOTA Creates a [quota](../../../operations/access-rights.md#quotas-management) that can be assigned to a user or a role. diff --git a/docs/en/sql-reference/statements/create/role.md b/docs/en/sql-reference/statements/create/role.md index aa0e92031f5..d69aeb0976c 100644 --- a/docs/en/sql-reference/statements/create/role.md +++ b/docs/en/sql-reference/statements/create/role.md @@ -3,7 +3,7 @@ sidebar_position: 40 sidebar_label: ROLE --- -# CREATE ROLE {#create-role-statement} +# CREATE ROLE Creates new [roles](../../../operations/access-rights.md#role-management). Role is a set of [privileges](../../../sql-reference/statements/grant.md#grant-privileges). A [user](../../../sql-reference/statements/create/user.md) assigned a role gets all the privileges of this role. @@ -14,7 +14,7 @@ CREATE ROLE [IF NOT EXISTS | OR REPLACE] name1 [, name2 ...] [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] ``` -## Managing Roles {#managing-roles} +## Managing Roles A user can be assigned multiple roles. Users can apply their assigned roles in arbitrary combinations by the [SET ROLE](../../../sql-reference/statements/set-role.md) statement. The final scope of privileges is a combined set of all the privileges of all the applied roles. If a user has privileges granted directly to it’s user account, they are also combined with the privileges granted by roles. @@ -24,7 +24,7 @@ To revoke a role, use the [REVOKE](../../../sql-reference/statements/revoke.md) To delete role, use the [DROP ROLE](../../../sql-reference/statements/drop#drop-role-statement) statement. The deleted role is being automatically revoked from all the users and roles to which it was assigned. -## Examples {#create-role-examples} +## Examples ``` sql CREATE ROLE accountant; diff --git a/docs/en/sql-reference/statements/create/row-policy.md b/docs/en/sql-reference/statements/create/row-policy.md index 58b7b1e2cb9..c84c67f97f6 100644 --- a/docs/en/sql-reference/statements/create/row-policy.md +++ b/docs/en/sql-reference/statements/create/row-policy.md @@ -3,7 +3,7 @@ sidebar_position: 41 sidebar_label: ROW POLICY --- -# CREATE ROW POLICY {#create-row-policy-statement} +# CREATE ROW POLICY Creates a [row policy](../../../operations/access-rights.md#row-policy-management), i.e. a filter used to determine which rows a user can read from a table. @@ -21,11 +21,11 @@ CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluste [TO {role1 [, role2 ...] | ALL | ALL EXCEPT role1 [, role2 ...]}] ``` -## USING Clause {#create-row-policy-using} +## USING Clause Allows to specify a condition to filter rows. An user will see a row if the condition is calculated to non-zero for the row. -## TO Clause {#create-row-policy-to} +## TO Clause In the section `TO` you can provide a list of users and roles this policy should work for. For example, `CREATE ROW POLICY ... TO accountant, john@localhost`. @@ -43,7 +43,7 @@ If that's not desirable it can't be fixed by adding one more row policy, like th `CREATE ROW POLICY pol2 ON mydb.table1 USING 1 TO ALL EXCEPT mira, peter` ::: -## AS Clause {#create-row-policy-as} +## AS Clause It's allowed to have more than one policy enabled on the same table for the same user at the one time. So we need a way to combine the conditions from multiple policies. @@ -76,7 +76,7 @@ CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 AS RESTRICTIVE TO peter, antonio enables the user `peter` to see rows only if both `b=1` AND `c=2`. -## ON CLUSTER Clause {#create-row-policy-on-cluster} +## ON CLUSTER Clause Allows creating row policies on a cluster, see [Distributed DDL](../../../sql-reference/distributed-ddl.md). diff --git a/docs/en/sql-reference/statements/create/settings-profile.md b/docs/en/sql-reference/statements/create/settings-profile.md index 0cc633d9770..0a3e1c0daf1 100644 --- a/docs/en/sql-reference/statements/create/settings-profile.md +++ b/docs/en/sql-reference/statements/create/settings-profile.md @@ -3,7 +3,7 @@ sidebar_position: 43 sidebar_label: SETTINGS PROFILE --- -# CREATE SETTINGS PROFILE {#create-settings-profile-statement} +# CREATE SETTINGS PROFILE Creates [settings profiles](../../../operations/access-rights.md#settings-profiles-management) that can be assigned to a user or a role. @@ -17,7 +17,7 @@ CREATE SETTINGS PROFILE [IF NOT EXISTS | OR REPLACE] TO name1 [ON CLUSTER cluste `ON CLUSTER` clause allows creating settings profiles on a cluster, see [Distributed DDL](../../../sql-reference/distributed-ddl.md). -## Example {#create-settings-profile-syntax} +## Example Create the `max_memory_usage_profile` settings profile with value and constraints for the `max_memory_usage` setting and assign it to user `robin`: diff --git a/docs/en/sql-reference/statements/create/table.md b/docs/en/sql-reference/statements/create/table.md index c477e41ba02..d8bd741c2cf 100644 --- a/docs/en/sql-reference/statements/create/table.md +++ b/docs/en/sql-reference/statements/create/table.md @@ -3,15 +3,15 @@ sidebar_position: 36 sidebar_label: TABLE --- -# CREATE TABLE {#create-table-query} +# CREATE TABLE Creates a new table. This query can have various syntax forms depending on a use case. By default, tables are created only on the current server. Distributed DDL queries are implemented as `ON CLUSTER` clause, which is [described separately](../../../sql-reference/distributed-ddl.md). -## Syntax Forms {#syntax-forms} +## Syntax Forms -### With Explicit Schema {#with-explicit-schema} +### With Explicit Schema ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] @@ -31,7 +31,7 @@ Expressions can also be defined for default values (see below). If necessary, primary key can be specified, with one or more key expressions. -### With a Schema Similar to Other Table {#with-a-schema-similar-to-other-table} +### With a Schema Similar to Other Table ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name AS [db2.]name2 [ENGINE = engine] @@ -39,7 +39,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name AS [db2.]name2 [ENGINE = engine] Creates a table with the same structure as another table. You can specify a different engine for the table. If the engine is not specified, the same engine will be used as for the `db2.name2` table. -### From a Table Function {#from-a-table-function} +### From a Table Function ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name AS table_function() @@ -47,7 +47,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name AS table_function() Creates a table with the same result as that of the [table function](../../../sql-reference/table-functions/index.md#table-functions) specified. The created table will also work in the same way as the corresponding table function that was specified. -### From SELECT query {#from-select-query} +### From SELECT query ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name[(name1 [type1], name2 [type2], ...)] ENGINE = engine AS SELECT ... @@ -76,7 +76,7 @@ Result: └───┴───────────────┘ ``` -## NULL Or NOT NULL Modifiers {#null-modifiers} +## NULL Or NOT NULL Modifiers `NULL` and `NOT NULL` modifiers after data type in column definition allow or do not allow it to be [Nullable](../../../sql-reference/data-types/nullable.md#data_type-nullable). @@ -84,7 +84,7 @@ If the type is not `Nullable` and if `NULL` is specified, it will be treated as See also [data_type_default_nullable](../../../operations/settings/settings.md#data_type_default_nullable) setting. -## Default Values {#create-default-values} +## Default Values The column description can specify an expression for a default value, in one of the following ways: `DEFAULT expr`, `MATERIALIZED expr`, `ALIAS expr`. @@ -98,13 +98,13 @@ If the data type and default expression are defined explicitly, this expression Default expressions may be defined as an arbitrary expression from table constants and columns. When creating and changing the table structure, it checks that expressions do not contain loops. For INSERT, it checks that expressions are resolvable – that all columns they can be calculated from have been passed. -### DEFAULT {#default} +### DEFAULT `DEFAULT expr` Normal default value. If the INSERT query does not specify the corresponding column, it will be filled in by computing the corresponding expression. -### MATERIALIZED {#materialized} +### MATERIALIZED `MATERIALIZED expr` @@ -112,14 +112,14 @@ Materialized expression. Such a column can’t be specified for INSERT, because For an INSERT without a list of columns, these columns are not considered. In addition, this column is not substituted when using an asterisk in a SELECT query. This is to preserve the invariant that the dump obtained using `SELECT *` can be inserted back into the table using INSERT without specifying the list of columns. -### EPHEMERAL {#ephemeral} +### EPHEMERAL `EPHEMERAL [expr]` Ephemeral column. Such a column isn't stored in the table and cannot be SELECTed, but can be referenced in the defaults of CREATE statement. If `expr` is omitted type for column is required. INSERT without list of columns will skip such column, so SELECT/INSERT invariant is preserved - the dump obtained using `SELECT *` can be inserted back into the table using INSERT without specifying the list of columns. -### ALIAS {#alias} +### ALIAS `ALIAS expr` @@ -133,7 +133,7 @@ If you add a new column to a table but later change its default expression, the It is not possible to set default values for elements in nested data structures. -## Primary Key {#primary-key} +## Primary Key You can define a [primary key](../../../engines/table-engines/mergetree-family/mergetree.md#primary-keys-and-indexes-in-queries) when creating a table. Primary key can be specified in two ways: @@ -163,7 +163,7 @@ PRIMARY KEY(expr1[, expr2,...]); You can't combine both ways in one query. ::: -## Constraints {#constraints} +## Constraints Along with columns descriptions constraints could be defined: @@ -181,11 +181,11 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Adding large amount of constraints can negatively affect performance of big `INSERT` queries. -## TTL Expression {#ttl-expression} +## TTL Expression Defines storage time for values. Can be specified only for MergeTree-family tables. For the detailed description, see [TTL for columns and tables](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl). -## Column Compression Codecs {#codecs} +## Column Compression Codecs By default, ClickHouse applies the `lz4` compression method. For `MergeTree`-engine family you can change the default compression method in the [compression](../../../operations/server-configuration-parameters/settings.md#server-settings-compression) section of a server configuration. @@ -228,7 +228,7 @@ Compression is supported for the following table engines: ClickHouse supports general purpose codecs and specialized codecs. -### General Purpose Codecs {#create-query-general-purpose-codecs} +### General Purpose Codecs Codecs: @@ -239,7 +239,7 @@ Codecs: High compression levels are useful for asymmetric scenarios, like compress once, decompress repeatedly. Higher levels mean better compression and higher CPU usage. -### Specialized Codecs {#specialized-codecs} +### Specialized Codecs These codecs are designed to make compression more effective by using specific features of data. Some of these codecs do not compress data themself. Instead, they prepare the data for a common purpose codec, which compresses it better than without this preparation. @@ -261,7 +261,7 @@ CREATE TABLE codec_example ENGINE = MergeTree() ``` -### Encryption Codecs {#create-query-encryption-codecs} +### Encryption Codecs These codecs don't actually compress data, but instead encrypt data on disk. These are only available when an encryption key is specified by [encryption](../../../operations/server-configuration-parameters/settings.md#server-settings-encryption) settings. Note that encryption only makes sense at the end of codec pipelines, because encrypted data usually can't be compressed in any meaningful way. @@ -305,7 +305,7 @@ CREATE TABLE mytable ENGINE = MergeTree ORDER BY x; ``` -## Temporary Tables {#temporary-tables} +## Temporary Tables ClickHouse supports temporary tables which have the following characteristics: @@ -331,7 +331,7 @@ In most cases, temporary tables are not created manually, but when using externa It’s possible to use tables with [ENGINE = Memory](../../../engines/table-engines/special/memory.md) instead of temporary tables. -## REPLACE TABLE {#replace-table-query} +## REPLACE TABLE 'REPLACE' query allows you to update the table atomically. @@ -407,7 +407,7 @@ SELECT * FROM base.t1; └───┘ ``` -## COMMENT Clause {#comment-table} +## COMMENT Clause You can add a comment to the table when you creating it. diff --git a/docs/en/sql-reference/statements/create/user.md b/docs/en/sql-reference/statements/create/user.md index 34f0a13147c..feda8459104 100644 --- a/docs/en/sql-reference/statements/create/user.md +++ b/docs/en/sql-reference/statements/create/user.md @@ -3,7 +3,7 @@ sidebar_position: 39 sidebar_label: USER --- -# CREATE USER {#create-user-statement} +# CREATE USER Creates [user accounts](../../../operations/access-rights.md#user-account-management). @@ -22,7 +22,7 @@ CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1] `ON CLUSTER` clause allows creating users on a cluster, see [Distributed DDL](../../../sql-reference/distributed-ddl.md). -## Identification {#identification} +## Identification There are multiple ways of user identification: @@ -37,7 +37,7 @@ There are multiple ways of user identification: For identification with sha256_hash using `SALT` - hash must be calculated from concatination of 'password' and 'salt'. -## User Host {#user-host} +## User Host User host is a host from which a connection to ClickHouse server could be established. The host can be specified in the `HOST` query section in the following ways: @@ -58,7 +58,7 @@ Another way of specifying host is to use `@` syntax following the username. Exam ClickHouse treats `user_name@'address'` as a username as a whole. Thus, technically you can create multiple users with the same `user_name` and different constructions after `@`. However, we do not recommend to do so. ::: -## GRANTEES Clause {#grantees} +## GRANTEES Clause Specifies users or roles which are allowed to receive [privileges](../../../sql-reference/statements/grant.md#grant-privileges) from this user on the condition this user has also all required access granted with [GRANT OPTION](../../../sql-reference/statements/grant.md#grant-privigele-syntax). Options of the `GRANTEES` clause: @@ -69,7 +69,7 @@ Specifies users or roles which are allowed to receive [privileges](../../../sql- You can exclude any user or role by using the `EXCEPT` expression. For example, `CREATE USER user1 GRANTEES ANY EXCEPT user2`. It means if `user1` has some privileges granted with `GRANT OPTION` it will be able to grant those privileges to anyone except `user2`. -## Examples {#create-user-examples} +## Examples Create the user account `mira` protected by the password `qwerty`: diff --git a/docs/en/sql-reference/statements/create/view.md b/docs/en/sql-reference/statements/create/view.md index e31d1b4473f..54ea4f7cd2a 100644 --- a/docs/en/sql-reference/statements/create/view.md +++ b/docs/en/sql-reference/statements/create/view.md @@ -3,11 +3,11 @@ sidebar_position: 37 sidebar_label: VIEW --- -# CREATE VIEW {#create-view} +# CREATE VIEW Creates a new view. Views can be [normal](#normal), [materialized](#materialized), [live](#live-view), and [window](#window-view) (live view and window view are experimental features). -## Normal View {#normal} +## Normal View Syntax: @@ -35,7 +35,7 @@ This query is fully equivalent to using the subquery: SELECT a, b, c FROM (SELECT ...) ``` -## Materialized View {#materialized} +## Materialized View ``` sql CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ... @@ -67,7 +67,7 @@ Views look the same as normal tables. For example, they are listed in the result To delete a view, use [DROP VIEW](../../../sql-reference/statements/drop.md#drop-view). Although `DROP TABLE` works for VIEWs as well. -## Live View [Experimental] {#live-view} +## Live View [Experimental] :::note This is an experimental feature that may change in backwards-incompatible ways in the future releases. Enable usage of live views and `WATCH` query using [allow_experimental_live_view](../../../operations/settings/settings.md#allow-experimental-live-view) setting. Input the command `set allow_experimental_live_view = 1`. @@ -93,7 +93,7 @@ Live views work similarly to how a query in a distributed table works. But inste See [WITH REFRESH](#live-view-with-refresh) to force periodic updates of a live view that in some cases can be used as a workaround. ::: -### Monitoring Live View Changes {#live-view-monitoring} +### Monitoring Live View Changes You can monitor changes in the `LIVE VIEW` query result using [WATCH](../../../sql-reference/statements/watch.md) query. @@ -161,11 +161,11 @@ You can execute [SELECT](../../../sql-reference/statements/select/index.md) quer SELECT * FROM [db.]live_view WHERE ... ``` -### Force Live View Refresh {#live-view-alter-refresh} +### Force Live View Refresh You can force live view refresh using the `ALTER LIVE VIEW [db.]table_name REFRESH` statement. -### WITH TIMEOUT Clause {#live-view-with-timeout} +### WITH TIMEOUT Clause When a live view is created with a `WITH TIMEOUT` clause then the live view will be dropped automatically after the specified number of seconds elapse since the end of the last [WATCH](../../../sql-reference/statements/watch.md) query that was watching the live view. @@ -182,7 +182,7 @@ CREATE TABLE mt (x Int8) Engine = MergeTree ORDER BY x; CREATE LIVE VIEW lv WITH TIMEOUT 15 AS SELECT sum(x) FROM mt; ``` -### WITH REFRESH Clause {#live-view-with-refresh} +### WITH REFRESH Clause When a live view is created with a `WITH REFRESH` clause then it will be automatically refreshed after the specified number of seconds elapse since the last refresh or trigger. @@ -233,7 +233,7 @@ WATCH lv Code: 60. DB::Exception: Received from localhost:9000. DB::Exception: Table default.lv does not exist.. ``` -### Live View Usage {#live-view-usage} +### Live View Usage Most common uses of live view tables include: @@ -245,7 +245,7 @@ Most common uses of live view tables include: **See Also** - [ALTER LIVE VIEW](../alter/view.md#alter-live-view) -## Window View [Experimental] {#window-view} +## Window View [Experimental] :::info This is an experimental feature that may change in backwards-incompatible ways in the future releases. Enable usage of window views and `WATCH` query using [allow_experimental_window_view](../../../operations/settings/settings.md#allow-experimental-window-view) setting. Input the command `set allow_experimental_window_view = 1`. @@ -259,11 +259,11 @@ Window view can aggregate data by time window and output the results when the wi Creating a window view is similar to creating `MATERIALIZED VIEW`. Window view needs an inner storage engine to store intermediate data. The inner storage will use `AggregatingMergeTree` as the default engine. -### Time Window Functions {#window-view-timewindowfunctions} +### Time Window Functions [Time window functions](../../functions/time-window-functions.md) are used to get the lower and upper window bound of records. The window view needs to be used with a time window function. -### TIME ATTRIBUTES {#window-view-timeattributes} +### TIME ATTRIBUTES Window view supports **processing time** and **event time** process. @@ -297,7 +297,7 @@ CREATE WINDOW VIEW test.wv TO test.dst WATERMARK=ASCENDING ALLOWED_LATENESS=INTE Note that elements emitted by a late firing should be treated as updated results of a previous computation. Instead of firing at the end of windows, the window view will fire immediately when the late event arrives. Thus, it will result in multiple outputs for the same window. Users need to take these duplicated results into account or deduplicate them. -### Monitoring New Windows {#window-view-monitoring} +### Monitoring New Windows Window view supports the [WATCH](../../../sql-reference/statements/watch.md) query to monitoring changes, or use `TO` syntax to output the results to a table. @@ -310,12 +310,12 @@ WATCH [db.]window_view `WATCH` query acts similar as in `LIVE VIEW`. A `LIMIT` can be specified to set the number of updates to receive before terminating the query. The `EVENTS` clause can be used to obtain a short form of the `WATCH` query where instead of the query result you will just get the latest query watermark. -### Settings {#window-view-settings} +### Settings - `window_view_clean_interval`: The clean interval of window view in seconds to free outdated data. The system will retain the windows that have not been fully triggered according to the system time or `WATERMARK` configuration, and the other data will be deleted. - `window_view_heartbeat_interval`: The heartbeat interval in seconds to indicate the watch query is alive. -### Example {#window-view-example} +### Example Suppose we need to count the number of click logs per 10 seconds in a log table called `data`, and its table structure is: @@ -357,7 +357,7 @@ CREATE WINDOW VIEW wv TO dst AS SELECT count(id), tumbleStart(w_id) as window_st Additional examples can be found among stateful tests of ClickHouse (they are named `*window_view*` there). -### Window View Usage {#window-view-usage} +### Window View Usage The window view is useful in the following scenarios: diff --git a/docs/en/sql-reference/statements/describe-table.md b/docs/en/sql-reference/statements/describe-table.md index 7fbe5bd2790..bc15e0e3062 100644 --- a/docs/en/sql-reference/statements/describe-table.md +++ b/docs/en/sql-reference/statements/describe-table.md @@ -3,7 +3,7 @@ sidebar_position: 42 sidebar_label: DESCRIBE --- -# DESCRIBE TABLE {#misc-describe-table} +# DESCRIBE TABLE Returns information about table columns. diff --git a/docs/en/sql-reference/statements/detach.md b/docs/en/sql-reference/statements/detach.md index 780de73842d..0265cb49f7e 100644 --- a/docs/en/sql-reference/statements/detach.md +++ b/docs/en/sql-reference/statements/detach.md @@ -3,7 +3,7 @@ sidebar_position: 43 sidebar_label: DETACH --- -# DETACH Statement {#detach} +# DETACH Statement Makes the server "forget" about the existence of a table, a materialized view, or a dictionary. diff --git a/docs/en/sql-reference/statements/drop.md b/docs/en/sql-reference/statements/drop.md index 0d3e1f7860d..49862cbfc02 100644 --- a/docs/en/sql-reference/statements/drop.md +++ b/docs/en/sql-reference/statements/drop.md @@ -3,11 +3,11 @@ sidebar_position: 44 sidebar_label: DROP --- -# DROP Statements {#drop} +# DROP Statements Deletes existing entity. If the `IF EXISTS` clause is specified, these queries do not return an error if the entity does not exist. -## DROP DATABASE {#drop-database} +## DROP DATABASE Deletes all tables inside the `db` database, then deletes the `db` database itself. @@ -17,7 +17,7 @@ Syntax: DROP DATABASE [IF EXISTS] db [ON CLUSTER cluster] ``` -## DROP TABLE {#drop-table} +## DROP TABLE Deletes the table. @@ -27,7 +27,7 @@ Syntax: DROP [TEMPORARY] TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] ``` -## DROP DICTIONARY {#drop-dictionary} +## DROP DICTIONARY Deletes the dictionary. @@ -37,7 +37,7 @@ Syntax: DROP DICTIONARY [IF EXISTS] [db.]name ``` -## DROP USER {#drop-user-statement} +## DROP USER Deletes a user. @@ -47,7 +47,7 @@ Syntax: DROP USER [IF EXISTS] name [,...] [ON CLUSTER cluster_name] ``` -## DROP ROLE {#drop-role-statement} +## DROP ROLE Deletes a role. The deleted role is revoked from all the entities where it was assigned. @@ -57,7 +57,7 @@ Syntax: DROP ROLE [IF EXISTS] name [,...] [ON CLUSTER cluster_name] ``` -## DROP ROW POLICY {#drop-row-policy-statement} +## DROP ROW POLICY Deletes a row policy. Deleted row policy is revoked from all the entities where it was assigned. @@ -67,7 +67,7 @@ Syntax: DROP [ROW] POLICY [IF EXISTS] name [,...] ON [database.]table [,...] [ON CLUSTER cluster_name] ``` -## DROP QUOTA {#drop-quota-statement} +## DROP QUOTA Deletes a quota. The deleted quota is revoked from all the entities where it was assigned. @@ -77,7 +77,7 @@ Syntax: DROP QUOTA [IF EXISTS] name [,...] [ON CLUSTER cluster_name] ``` -## DROP SETTINGS PROFILE {#drop-settings-profile-statement} +## DROP SETTINGS PROFILE Deletes a settings profile. The deleted settings profile is revoked from all the entities where it was assigned. @@ -87,7 +87,7 @@ Syntax: DROP [SETTINGS] PROFILE [IF EXISTS] name [,...] [ON CLUSTER cluster_name] ``` -## DROP VIEW {#drop-view} +## DROP VIEW Deletes a view. Views can be deleted by a `DROP TABLE` command as well but `DROP VIEW` checks that `[db.]name` is a view. @@ -97,7 +97,7 @@ Syntax: DROP VIEW [IF EXISTS] [db.]name [ON CLUSTER cluster] ``` -## DROP FUNCTION {#drop-function} +## DROP FUNCTION Deletes a user defined function created by [CREATE FUNCTION](./create/function.md). System functions can not be dropped. diff --git a/docs/en/sql-reference/statements/exchange.md b/docs/en/sql-reference/statements/exchange.md index abe3d40950e..cf1be28fe7c 100644 --- a/docs/en/sql-reference/statements/exchange.md +++ b/docs/en/sql-reference/statements/exchange.md @@ -3,7 +3,7 @@ sidebar_position: 49 sidebar_label: EXCHANGE --- -# EXCHANGE Statement {#exchange} +# EXCHANGE Statement Exchanges the names of two tables or dictionaries atomically. This task can also be accomplished with a [RENAME](./rename.md) query using a temporary name, but the operation is not atomic in that case. @@ -18,7 +18,7 @@ The `EXCHANGE` query is supported by the [Atomic](../../engines/database-engines EXCHANGE TABLES|DICTIONARIES [db0.]name_A AND [db1.]name_B ``` -## EXCHANGE TABLES {#exchange_tables} +## EXCHANGE TABLES Exchanges the names of two tables. @@ -28,7 +28,7 @@ Exchanges the names of two tables. EXCHANGE TABLES [db0.]table_A AND [db1.]table_B ``` -## EXCHANGE DICTIONARIES {#exchange_dictionaries} +## EXCHANGE DICTIONARIES Exchanges the names of two dictionaries. diff --git a/docs/en/sql-reference/statements/exists.md b/docs/en/sql-reference/statements/exists.md index 7c6cc812665..044bfb9a4b3 100644 --- a/docs/en/sql-reference/statements/exists.md +++ b/docs/en/sql-reference/statements/exists.md @@ -3,7 +3,7 @@ sidebar_position: 45 sidebar_label: EXISTS --- -# EXISTS Statement {#exists-statement} +# EXISTS Statement ``` sql EXISTS [TEMPORARY] [TABLE|DICTIONARY] [db.]name [INTO OUTFILE filename] [FORMAT format] diff --git a/docs/en/sql-reference/statements/explain.md b/docs/en/sql-reference/statements/explain.md index 81045a699be..bc6d967e71a 100644 --- a/docs/en/sql-reference/statements/explain.md +++ b/docs/en/sql-reference/statements/explain.md @@ -3,7 +3,7 @@ sidebar_position: 39 sidebar_label: EXPLAIN --- -# EXPLAIN Statement {#explain} +# EXPLAIN Statement Shows the execution plan of a statement. @@ -43,14 +43,14 @@ Union ReadFromStorage (SystemNumbers) ``` -## EXPLAIN Types {#explain-types} +## EXPLAIN Types - `AST` — Abstract syntax tree. - `SYNTAX` — Query text after AST-level optimizations. - `PLAN` — Query execution plan. - `PIPELINE` — Query execution pipeline. -### EXPLAIN AST {#explain-ast} +### EXPLAIN AST Dump query AST. Supports all types of queries, not only `SELECT`. @@ -84,7 +84,7 @@ EXPLAIN AST ALTER TABLE t1 DELETE WHERE date = today(); ExpressionList ``` -### EXPLAIN SYNTAX {#explain-syntax} +### EXPLAIN SYNTAX Returns query after syntax optimizations. @@ -110,7 +110,7 @@ FROM CROSS JOIN system.numbers AS c ``` -### EXPLAIN PLAN {#explain-plan} +### EXPLAIN PLAN Dump query plan steps. @@ -361,7 +361,7 @@ EXPLAIN json = 1, actions = 1, description = 0 SELECT 1 FORMAT TSVRaw; ] ``` -### EXPLAIN PIPELINE {#explain-pipeline} +### EXPLAIN PIPELINE Settings: @@ -390,7 +390,7 @@ ExpressionTransform (ReadFromStorage) NumbersMt × 2 0 → 1 ``` -### EXPLAIN ESTIMATE {#explain-estimate} +### EXPLAIN ESTIMATE Shows the estimated number of rows, marks and parts to be read from the tables while processing the query. Works with tables in the [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md#table_engines-mergetree) family. @@ -418,7 +418,7 @@ Result: └──────────┴───────┴───────┴──────┴───────┘ ``` -### EXPLAIN TABLE OVERRIDE {#explain-table-override} +### EXPLAIN TABLE OVERRIDE Shows the result of a table override on a table schema accessed through a table function. Also does some validation, throwing an exception if the override would have caused some kind of failure. diff --git a/docs/en/sql-reference/statements/grant.md b/docs/en/sql-reference/statements/grant.md index b60114e10c5..c2395e83b7f 100644 --- a/docs/en/sql-reference/statements/grant.md +++ b/docs/en/sql-reference/statements/grant.md @@ -3,14 +3,14 @@ sidebar_position: 38 sidebar_label: GRANT --- -# GRANT Statement {#grant} +# GRANT Statement - Grants [privileges](#grant-privileges) to ClickHouse user accounts or roles. - Assigns roles to user accounts or to the other roles. To revoke privileges, use the [REVOKE](../../sql-reference/statements/revoke.md) statement. Also you can list granted privileges with the [SHOW GRANTS](../../sql-reference/statements/show.md#show-grants-statement) statement. -## Granting Privilege Syntax {#grant-privigele-syntax} +## Granting Privilege Syntax ``` sql GRANT [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.table|db.*|*.*|table|*} TO {user | role | CURRENT_USER} [,...] [WITH GRANT OPTION] [WITH REPLACE OPTION] @@ -23,7 +23,7 @@ GRANT [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.ta The `WITH GRANT OPTION` clause grants `user` or `role` with permission to execute the `GRANT` query. Users can grant privileges of the same scope they have and less. The `WITH REPLACE OPTION` clause replace old privileges by new privileges for the `user` or `role`, if is not specified it appends privileges. -## Assigning Role Syntax {#assign-role-syntax} +## Assigning Role Syntax ``` sql GRANT [ON CLUSTER cluster_name] role [,...] TO {user | another_role | CURRENT_USER} [,...] [WITH ADMIN OPTION] [WITH REPLACE OPTION] @@ -35,7 +35,7 @@ GRANT [ON CLUSTER cluster_name] role [,...] TO {user | another_role | CURRENT_US The `WITH ADMIN OPTION` clause grants [ADMIN OPTION](#admin-option-privilege) privilege to `user` or `role`. The `WITH REPLACE OPTION` clause replace old roles by new role for the `user` or `role`, if is not specified it appends roles. -## Usage {#grant-usage} +## Usage To use `GRANT`, your account must have the `GRANT OPTION` privilege. You can grant privileges only inside the scope of your account privileges. @@ -61,7 +61,7 @@ Access to the `system` database is always allowed (since this database is used f You can grant multiple privileges to multiple accounts in one query. The query `GRANT SELECT, INSERT ON *.* TO john, robin` allows accounts `john` and `robin` to execute the `INSERT` and `SELECT` queries over all the tables in all the databases on the server. -## Privileges {#grant-privileges} +## Privileges Privilege is a permission to execute specific kind of queries. @@ -222,7 +222,7 @@ If a user or a role has no privileges, it is displayed as [NONE](#grant-none) pr Some queries by their implementation require a set of privileges. For example, to execute the [RENAME](../../sql-reference/statements/misc.md#misc_operations-rename) query you need the following privileges: `SELECT`, `CREATE TABLE`, `INSERT` and `DROP TABLE`. -### SELECT {#grant-select} +### SELECT Allows executing [SELECT](../../sql-reference/statements/select/index.md) queries. @@ -240,7 +240,7 @@ GRANT SELECT(x,y) ON db.table TO john This privilege allows `john` to execute any `SELECT` query that involves data from the `x` and/or `y` columns in `db.table`, for example, `SELECT x FROM db.table`. `john` can’t execute `SELECT z FROM db.table`. The `SELECT * FROM db.table` also is not available. Processing this query, ClickHouse does not return any data, even `x` and `y`. The only exception is if a table contains only `x` and `y` columns, in this case ClickHouse returns all the data. -### INSERT {#grant-insert} +### INSERT Allows executing [INSERT](../../sql-reference/statements/insert-into.md) queries. @@ -258,7 +258,7 @@ GRANT INSERT(x,y) ON db.table TO john The granted privilege allows `john` to insert data to the `x` and/or `y` columns in `db.table`. -### ALTER {#grant-alter} +### ALTER Allows executing [ALTER](../../sql-reference/statements/alter/index.md) queries according to the following hierarchy of privileges: @@ -305,7 +305,7 @@ Examples of how this hierarchy is treated: - The `DETACH` operation needs the [DROP](#grant-drop) privilege. - To stop mutation by the [KILL MUTATION](../../sql-reference/statements/misc.md#kill-mutation) query, you need to have a privilege to start this mutation. For example, if you want to stop the `ALTER UPDATE` query, you need the `ALTER UPDATE`, `ALTER TABLE`, or `ALTER` privilege. -### CREATE {#grant-create} +### CREATE Allows executing [CREATE](../../sql-reference/statements/create/index.md) and [ATTACH](../../sql-reference/statements/misc.md#attach) DDL-queries according to the following hierarchy of privileges: @@ -320,7 +320,7 @@ Allows executing [CREATE](../../sql-reference/statements/create/index.md) and [A - To delete the created table, a user needs [DROP](#grant-drop). -### DROP {#grant-drop} +### DROP Allows executing [DROP](../../sql-reference/statements/misc.md#drop) and [DETACH](../../sql-reference/statements/misc.md#detach) queries according to the following hierarchy of privileges: @@ -330,19 +330,19 @@ Allows executing [DROP](../../sql-reference/statements/misc.md#drop) and [DETACH - `DROP VIEW`. Level: `VIEW` - `DROP DICTIONARY`. Level: `DICTIONARY` -### TRUNCATE {#grant-truncate} +### TRUNCATE Allows executing [TRUNCATE](../../sql-reference/statements/misc.md#truncate-statement) queries. Privilege level: `TABLE`. -### OPTIMIZE {#grant-optimize} +### OPTIMIZE Allows executing [OPTIMIZE TABLE](../../sql-reference/statements/misc.md#misc_operations-optimize) queries. Privilege level: `TABLE`. -### SHOW {#grant-show} +### SHOW Allows executing `SHOW`, `DESCRIBE`, `USE`, and `EXISTS` queries according to the following hierarchy of privileges: @@ -356,7 +356,7 @@ Allows executing `SHOW`, `DESCRIBE`, `USE`, and `EXISTS` queries according to th A user has the `SHOW` privilege if it has any other privilege concerning the specified table, dictionary or database. -### KILL QUERY {#grant-kill-query} +### KILL QUERY Allows executing [KILL](../../sql-reference/statements/misc.md#kill-query-statement) queries according to the following hierarchy of privileges: @@ -366,7 +366,7 @@ Privilege level: `GLOBAL`. `KILL QUERY` privilege allows one user to kill queries of other users. -### ACCESS MANAGEMENT {#grant-access-management} +### ACCESS MANAGEMENT Allows a user to execute queries that manage users, roles and row policies. @@ -396,7 +396,7 @@ Allows a user to execute queries that manage users, roles and row policies. The `ROLE ADMIN` privilege allows a user to assign and revoke any roles including those which are not assigned to the user with the admin option. -### SYSTEM {#grant-system} +### SYSTEM Allows a user to execute [SYSTEM](../../sql-reference/statements/system.md) queries according to the following hierarchy of privileges. @@ -426,7 +426,7 @@ Allows a user to execute [SYSTEM](../../sql-reference/statements/system.md) quer The `SYSTEM RELOAD EMBEDDED DICTIONARIES` privilege implicitly granted by the `SYSTEM RELOAD DICTIONARY ON *.*` privilege. -### INTROSPECTION {#grant-introspection} +### INTROSPECTION Allows using [introspection](../../operations/optimizing-performance/sampling-query-profiler.md) functions. @@ -436,7 +436,7 @@ Allows using [introspection](../../operations/optimizing-performance/sampling-qu - `addressToSymbol`. Level: `GLOBAL` - `demangle`. Level: `GLOBAL` -### SOURCES {#grant-sources} +### SOURCES Allows using external data sources. Applies to [table engines](../../engines/table-engines/index.md) and [table functions](../../sql-reference/table-functions/index.md#table-functions). @@ -457,7 +457,7 @@ Examples: - To create a table with the [MySQL table engine](../../engines/table-engines/integrations/mysql.md), you need `CREATE TABLE (ON db.table_name)` and `MYSQL` privileges. - To use the [mysql table function](../../sql-reference/table-functions/mysql.md), you need `CREATE TEMPORARY TABLE` and `MYSQL` privileges. -### dictGet {#grant-dictget} +### dictGet - `dictGet`. Aliases: `dictHas`, `dictGetHierarchy`, `dictIsIn` @@ -470,15 +470,15 @@ Privilege level: `DICTIONARY`. - `GRANT dictGet ON mydb.mydictionary TO john` - `GRANT dictGet ON mydictionary TO john` -### ALL {#grant-all} +### ALL Grants all the privileges on regulated entity to a user account or a role. -### NONE {#grant-none} +### NONE Doesn’t grant any privileges. -### ADMIN OPTION {#admin-option-privilege} +### ADMIN OPTION The `ADMIN OPTION` privilege allows a user to grant their role to another user. diff --git a/docs/en/sql-reference/statements/insert-into.md b/docs/en/sql-reference/statements/insert-into.md index 45e6e87cf12..194e5f1ea91 100644 --- a/docs/en/sql-reference/statements/insert-into.md +++ b/docs/en/sql-reference/statements/insert-into.md @@ -90,11 +90,11 @@ INSERT INTO t FORMAT TabSeparated You can insert data separately from the query by using the command-line client or the HTTP interface. For more information, see the section “[Interfaces](../../interfaces)”. -### Constraints {#constraints} +### Constraints If table has [constraints](../../sql-reference/statements/create/table.md#constraints), their expressions will be checked for each row of inserted data. If any of those constraints is not satisfied — server will raise an exception containing constraint name and expression, the query will be stopped. -### Inserting the Results of `SELECT` {#insert_query_insert-select} +### Inserting the Results of `SELECT` **Syntax** @@ -113,7 +113,7 @@ However, you can delete old data using `ALTER TABLE ... DROP PARTITION`. To insert a default value instead of `NULL` into a column with not nullable data type, enable [insert_null_as_default](../../operations/settings/settings.md#insert_null_as_default) setting. -### Inserting Data from a File {#inserting-data-from-a-file} +### Inserting Data from a File **Syntax** @@ -147,7 +147,7 @@ Result: └────┴──────┘ ``` -### Inserting into Table Function {#inserting-into-table-function} +### Inserting into Table Function Data can be inserted into tables referenced by [table functions](../../sql-reference/table-functions/index.md). @@ -175,7 +175,7 @@ Result: └─────┴───────────────────────┘ ``` -### Performance Considerations {#performance-considerations} +### Performance Considerations `INSERT` sorts the input data by primary key and splits them into partitions by a partition key. If you insert data into several partitions at once, it can significantly reduce the performance of the `INSERT` query. To avoid this: diff --git a/docs/en/sql-reference/statements/kill.md b/docs/en/sql-reference/statements/kill.md index 9fe207f24b2..d7b32680abf 100644 --- a/docs/en/sql-reference/statements/kill.md +++ b/docs/en/sql-reference/statements/kill.md @@ -3,11 +3,11 @@ sidebar_position: 46 sidebar_label: KILL --- -# KILL Statements {#kill-statements} +# KILL Statements There are two kinds of kill statements: to kill a query and to kill a mutation -## KILL QUERY {#kill-query-statement} +## KILL QUERY ``` sql KILL QUERY [ON CLUSTER cluster] @@ -42,7 +42,7 @@ The response contains the `kill_status` column, which can take the following val A test query (`TEST`) only checks the user’s rights and displays a list of queries to stop. -## KILL MUTATION {#kill-mutation} +## KILL MUTATION ``` sql KILL MUTATION [ON CLUSTER cluster] diff --git a/docs/en/sql-reference/statements/misc.md b/docs/en/sql-reference/statements/misc.md index 2751c5296c2..6b239fff75f 100644 --- a/docs/en/sql-reference/statements/misc.md +++ b/docs/en/sql-reference/statements/misc.md @@ -3,7 +3,7 @@ toc_hidden: true sidebar_position: 70 --- -# Miscellaneous Statements {#miscellaneous-queries} +# Miscellaneous Statements - [ATTACH](../../sql-reference/statements/attach.md) - [CHECK TABLE](../../sql-reference/statements/check-table.md) diff --git a/docs/en/sql-reference/statements/optimize.md b/docs/en/sql-reference/statements/optimize.md index 773284a1b30..969289b8070 100644 --- a/docs/en/sql-reference/statements/optimize.md +++ b/docs/en/sql-reference/statements/optimize.md @@ -3,7 +3,7 @@ sidebar_position: 47 sidebar_label: OPTIMIZE --- -# OPTIMIZE Statement {#misc_operations-optimize} +# OPTIMIZE Statement This query tries to initialize an unscheduled merge of data parts for tables. @@ -32,7 +32,7 @@ You can specify how long (in seconds) to wait for inactive replicas to execute ` If the `replication_alter_partitions_sync` is set to `2` and some replicas are not active for more than the time, specified by the `replication_wait_for_inactive_replica_timeout` setting, then an exception `UNFINISHED` is thrown. ::: -## BY expression {#by-expression} +## BY expression If you want to perform deduplication on custom set of columns rather than on all, you can specify list of columns explicitly or use any combination of [`*`](../../sql-reference/statements/select/index.md#asterisk), [`COLUMNS`](../../sql-reference/statements/select/index.md#columns-expression) or [`EXCEPT`](../../sql-reference/statements/select/index.md#except-modifier) expressions. The explictly written or implicitly expanded list of columns must include all columns specified in row ordering expression (both primary and sorting keys) and partitioning expression (partitioning key). diff --git a/docs/en/sql-reference/statements/rename.md b/docs/en/sql-reference/statements/rename.md index b3bea3e3c37..a7f766efb08 100644 --- a/docs/en/sql-reference/statements/rename.md +++ b/docs/en/sql-reference/statements/rename.md @@ -3,7 +3,7 @@ sidebar_position: 48 sidebar_label: RENAME --- -# RENAME Statement {#misc_operations-rename} +# RENAME Statement Renames databases, tables, or dictionaries. Several entities can be renamed in a single query. Note that the `RENAME` query with several entities is non-atomic operation. To swap entities names atomically, use the [EXCHANGE](./exchange.md) statement. @@ -18,7 +18,7 @@ The `RENAME` query is supported by the [Atomic](../../engines/database-engines/a RENAME DATABASE|TABLE|DICTIONARY name TO new_name [,...] [ON CLUSTER cluster] ``` -## RENAME DATABASE {#misc_operations-rename_database} +## RENAME DATABASE Renames databases. @@ -28,7 +28,7 @@ Renames databases. RENAME DATABASE atomic_database1 TO atomic_database2 [,...] [ON CLUSTER cluster] ``` -## RENAME TABLE {#misc_operations-rename_table} +## RENAME TABLE Renames one or more tables. @@ -47,7 +47,7 @@ RENAME TABLE [db1.]name1 TO [db2.]name2 [,...] [ON CLUSTER cluster] RENAME TABLE table_A TO table_A_bak, table_B TO table_B_bak; ``` -## RENAME DICTIONARY {#rename_dictionary} +## RENAME DICTIONARY Renames one or several dictionaries. This query can be used to move dictionaries between databases. diff --git a/docs/en/sql-reference/statements/revoke.md b/docs/en/sql-reference/statements/revoke.md index 1d2e86340f3..f3b13c2664a 100644 --- a/docs/en/sql-reference/statements/revoke.md +++ b/docs/en/sql-reference/statements/revoke.md @@ -3,11 +3,11 @@ sidebar_position: 39 sidebar_label: REVOKE --- -# REVOKE Statement {#revoke} +# REVOKE Statement Revokes privileges from users or roles. -## Syntax {#revoke-syntax} +## Syntax **Revoking privileges from users** @@ -21,15 +21,15 @@ REVOKE [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.t REVOKE [ON CLUSTER cluster_name] [ADMIN OPTION FOR] role [,...] FROM {user | role | CURRENT_USER} [,...] | ALL | ALL EXCEPT {user_name | role_name | CURRENT_USER} [,...] ``` -## Description {#revoke-description} +## Description To revoke some privilege you can use a privilege of a wider scope than you plan to revoke. For example, if a user has the `SELECT (x,y)` privilege, administrator can execute `REVOKE SELECT(x,y) ...`, or `REVOKE SELECT * ...`, or even `REVOKE ALL PRIVILEGES ...` query to revoke this privilege. -### Partial Revokes {#partial-revokes-dscr} +### Partial Revokes You can revoke a part of a privilege. For example, if a user has the `SELECT *.*` privilege you can revoke from it a privilege to read data from some table or a database. -## Examples {#revoke-example} +## Examples Grant the `john` user account with a privilege to select from all the databases, excepting the `accounts` one: diff --git a/docs/en/sql-reference/statements/select/all.md b/docs/en/sql-reference/statements/select/all.md index 6b35678fd92..06a7bbff16a 100644 --- a/docs/en/sql-reference/statements/select/all.md +++ b/docs/en/sql-reference/statements/select/all.md @@ -2,7 +2,7 @@ sidebar_label: ALL --- -# ALL Clause {#select-all} +# ALL Clause If there are multiple matching rows in the table, then `ALL` returns all of them. `SELECT ALL` is identical to `SELECT` without `DISTINCT`. If both `ALL` and `DISTINCT` specified, exception will be thrown. diff --git a/docs/en/sql-reference/statements/select/array-join.md b/docs/en/sql-reference/statements/select/array-join.md index ea7a824ac35..d168f421609 100644 --- a/docs/en/sql-reference/statements/select/array-join.md +++ b/docs/en/sql-reference/statements/select/array-join.md @@ -2,7 +2,7 @@ sidebar_label: ARRAY JOIN --- -# ARRAY JOIN Clause {#select-array-join-clause} +# ARRAY JOIN Clause It is a common operation for tables that contain an array column to produce a new table that has a column with each individual array element of that initial column, while values of other columns are duplicated. This is the basic case of what `ARRAY JOIN` clause does. @@ -25,7 +25,7 @@ Supported types of `ARRAY JOIN` are listed below: - `ARRAY JOIN` - In base case, empty arrays are not included in the result of `JOIN`. - `LEFT ARRAY JOIN` - The result of `JOIN` contains rows with empty arrays. The value for an empty array is set to the default value for the array element type (usually 0, empty string or NULL). -## Basic ARRAY JOIN Examples {#basic-array-join-examples} +## Basic ARRAY JOIN Examples The examples below demonstrate the usage of the `ARRAY JOIN` and `LEFT ARRAY JOIN` clauses. Let’s create a table with an [Array](../../../sql-reference/data-types/array.md) type column and insert values into it: @@ -85,7 +85,7 @@ LEFT ARRAY JOIN arr; └─────────────┴─────┘ ``` -## Using Aliases {#using-aliases} +## Using Aliases An alias can be specified for an array in the `ARRAY JOIN` clause. In this case, an array item can be accessed by this alias, but the array itself is accessed by the original name. Example: @@ -182,7 +182,7 @@ SETTINGS enable_unaligned_array_join = 1; └─────────┴─────────┴───┴───────────┘ ``` -## ARRAY JOIN with Nested Data Structure {#array-join-with-nested-data-structure} +## ARRAY JOIN with Nested Data Structure `ARRAY JOIN` also works with [nested data structures](../../../sql-reference/data-types/nested-data-structures/nested.md): @@ -295,6 +295,6 @@ ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num; └───────┴─────┴─────┴─────────┴────────────┴─────┘ ``` -## Implementation Details {#implementation-details} +## Implementation Details The query execution order is optimized when running `ARRAY JOIN`. Although `ARRAY JOIN` must always be specified before the [WHERE](../../../sql-reference/statements/select/where.md)/[PREWHERE](../../../sql-reference/statements/select/prewhere.md) clause in a query, technically they can be performed in any order, unless result of `ARRAY JOIN` is used for filtering. The processing order is controlled by the query optimizer. diff --git a/docs/en/sql-reference/statements/select/distinct.md b/docs/en/sql-reference/statements/select/distinct.md index 898de4730ae..bb429dfbf4b 100644 --- a/docs/en/sql-reference/statements/select/distinct.md +++ b/docs/en/sql-reference/statements/select/distinct.md @@ -2,7 +2,7 @@ sidebar_label: DISTINCT --- -# DISTINCT Clause {#select-distinct} +# DISTINCT Clause If `SELECT DISTINCT` is specified, only unique rows will remain in a query result. Thus only a single row will remain out of all the sets of fully matching rows in the result. @@ -50,7 +50,7 @@ SELECT DISTINCT ON (a,b) * FROM t1; └───┴───┴───┘ ``` -## DISTINCT and ORDER BY {#distinct-orderby} +## DISTINCT and ORDER BY ClickHouse supports using the `DISTINCT` and `ORDER BY` clauses for different columns in one query. The `DISTINCT` clause is executed before the `ORDER BY` clause. @@ -96,11 +96,11 @@ Row `2, 4` was cut before sorting. Take this implementation specificity into account when programming queries. -## Null Processing {#null-processing} +## Null Processing `DISTINCT` works with [NULL](../../../sql-reference/syntax.md#null-literal) as if `NULL` were a specific value, and `NULL==NULL`. In other words, in the `DISTINCT` results, different combinations with `NULL` occur only once. It differs from `NULL` processing in most other contexts. -## Alternatives {#alternatives} +## Alternatives It is possible to obtain the same result by applying [GROUP BY](../../../sql-reference/statements/select/group-by.md) across the same set of values as specified as `SELECT` clause, without using any aggregate functions. But there are few differences from `GROUP BY` approach: diff --git a/docs/en/sql-reference/statements/select/except.md b/docs/en/sql-reference/statements/select/except.md index dcaefd67ca9..e8cf4283b47 100644 --- a/docs/en/sql-reference/statements/select/except.md +++ b/docs/en/sql-reference/statements/select/except.md @@ -2,7 +2,7 @@ sidebar_label: EXCEPT --- -# EXCEPT Clause {#except-clause} +# EXCEPT Clause The `EXCEPT` clause returns only those rows that result from the first query without the second. The queries must match the number of columns, order, and type. The result of `EXCEPT` can contain duplicate rows. diff --git a/docs/en/sql-reference/statements/select/format.md b/docs/en/sql-reference/statements/select/format.md index a7936509ad5..d32770f04ce 100644 --- a/docs/en/sql-reference/statements/select/format.md +++ b/docs/en/sql-reference/statements/select/format.md @@ -2,16 +2,16 @@ sidebar_label: FORMAT --- -# FORMAT Clause {#format-clause} +# FORMAT Clause ClickHouse supports a wide range of [serialization formats](../../../interfaces/formats.md) that can be used on query results among other things. There are multiple ways to choose a format for `SELECT` output, one of them is to specify `FORMAT format` at the end of query to get resulting data in any specific format. Specific format might be used either for convenience, integration with other systems or performance gain. -## Default Format {#default-format} +## Default Format If the `FORMAT` clause is omitted, the default format is used, which depends on both the settings and the interface used for accessing the ClickHouse server. For the [HTTP interface](../../../interfaces/http.md) and the [command-line client](../../../interfaces/cli.md) in batch mode, the default format is `TabSeparated`. For the command-line client in interactive mode, the default format is `PrettyCompact` (it produces compact human-readable tables). -## Implementation Details {#implementation-details} +## Implementation Details When using the command-line client, data is always passed over the network in an internal efficient format (`Native`). The client independently interprets the `FORMAT` clause of the query and formats the data itself (thus relieving the network and the server from the extra load). diff --git a/docs/en/sql-reference/statements/select/from.md b/docs/en/sql-reference/statements/select/from.md index 93f47c6c65b..f1fb04f6818 100644 --- a/docs/en/sql-reference/statements/select/from.md +++ b/docs/en/sql-reference/statements/select/from.md @@ -2,7 +2,7 @@ sidebar_label: FROM --- -# FROM Clause {#select-from} +# FROM Clause The `FROM` clause specifies the source to read data from: @@ -16,7 +16,7 @@ Subquery is another `SELECT` query that may be specified in parenthesis inside ` `FROM` clause can contain multiple data sources, separated by commas, which is equivalent of performing [CROSS JOIN](../../../sql-reference/statements/select/join.md) on them. -## FINAL Modifier {#select-from-final} +## FINAL Modifier When `FINAL` is specified, ClickHouse fully merges the data before returning the result and thus performs all data transformations that happen during merges for the given table engine. @@ -27,7 +27,7 @@ It is applicable when selecting data from tables that use the [MergeTree](../../ Now `SELECT` queries with `FINAL` are executed in parallel and slightly faster. But there are drawbacks (see below). The [max_final_threads](../../../operations/settings/settings.md#max-final-threads) setting limits the number of threads used. -### Drawbacks {#drawbacks} +### Drawbacks Queries that use `FINAL` are executed slightly slower than similar queries that do not, because: @@ -36,7 +36,7 @@ Queries that use `FINAL` are executed slightly slower than similar queries that **In most cases, avoid using `FINAL`.** The common approach is to use different queries that assume the background processes of the `MergeTree` engine have’t happened yet and deal with it by applying aggregation (for example, to discard duplicates). -## Implementation Details {#implementation-details} +## Implementation Details If the `FROM` clause is omitted, data will be read from the `system.one` table. The `system.one` table contains exactly one row (this table fulfills the same purpose as the DUAL table found in other DBMSs). diff --git a/docs/en/sql-reference/statements/select/group-by.md b/docs/en/sql-reference/statements/select/group-by.md index 281920a1ea9..45230d0b3b1 100644 --- a/docs/en/sql-reference/statements/select/group-by.md +++ b/docs/en/sql-reference/statements/select/group-by.md @@ -2,7 +2,7 @@ sidebar_label: GROUP BY --- -# GROUP BY Clause {#select-group-by-clause} +# GROUP BY Clause `GROUP BY` clause switches the `SELECT` query into an aggregation mode, which works as follows: @@ -16,7 +16,7 @@ When you want to group data in the table by column numbers instead of column nam There’s an additional way to run aggregation over a table. If a query contains table columns only inside aggregate functions, the `GROUP BY clause` can be omitted, and aggregation by an empty set of keys is assumed. Such queries always return exactly one row. ::: -## NULL Processing {#null-processing} +## NULL Processing For grouping, ClickHouse interprets [NULL](../../../sql-reference/syntax.md#null-literal) as a value, and `NULL==NULL`. It differs from `NULL` processing in most other contexts. @@ -48,7 +48,7 @@ You can see that `GROUP BY` for `y = NULL` summed up `x`, as if `NULL` is this v If you pass several keys to `GROUP BY`, the result will give you all the combinations of the selection, as if `NULL` were a specific value. -## WITH ROLLUP Modifier {#with-rollup-modifier} +## WITH ROLLUP Modifier `WITH ROLLUP` modifier is used to calculate subtotals for the key expressions, based on their order in the `GROUP BY` list. The subtotals rows are added after the result table. @@ -110,7 +110,7 @@ As `GROUP BY` section has three key expressions, the result contains four tables └──────┴───────┴─────┴─────────┘ ``` -## WITH CUBE Modifier {#with-cube-modifier} +## WITH CUBE Modifier `WITH CUBE` modifier is used to calculate subtotals for every combination of the key expressions in the `GROUP BY` list. The subtotals rows are added after the result table. @@ -198,7 +198,7 @@ Columns, excluded from `GROUP BY`, are filled with zeros. ``` -## WITH TOTALS Modifier {#with-totals-modifier} +## WITH TOTALS Modifier If the `WITH TOTALS` modifier is specified, another row will be calculated. This row will have key columns containing default values (zeros or empty lines), and columns of aggregate functions with the values calculated across all the rows (the “total” values). @@ -215,7 +215,7 @@ totals is output in the results of `SELECT` queries, and is not output in `INSER `WITH TOTALS` can be run in different ways when [HAVING](../../../sql-reference/statements/select/having) is present. The behavior depends on the `totals_mode` setting. -### Configuring Totals Processing {#configuring-totals-processing} +### Configuring Totals Processing By default, `totals_mode = 'before_having'`. In this case, ‘totals’ is calculated across all rows, including the ones that do not pass through HAVING and `max_rows_to_group_by`. @@ -233,7 +233,7 @@ If `max_rows_to_group_by` and `group_by_overflow_mode = 'any'` are not used, all You can use `WITH TOTALS` in subqueries, including subqueries in the [JOIN](../../../sql-reference/statements/select/join.md) clause (in this case, the respective total values are combined). -## Examples {#examples} +## Examples Example: @@ -260,15 +260,15 @@ GROUP BY domain For every different key value encountered, `GROUP BY` calculates a set of aggregate function values. -## Implementation Details {#implementation-details} +## Implementation Details Aggregation is one of the most important features of a column-oriented DBMS, and thus it’s implementation is one of the most heavily optimized parts of ClickHouse. By default, aggregation is done in memory using a hash-table. It has 40+ specializations that are chosen automatically depending on “grouping key” data types. -### GROUP BY Optimization Depending on Table Sorting Key {#aggregation-in-order} +### GROUP BY Optimization Depending on Table Sorting Key The aggregation can be performed more effectively, if a table is sorted by some key, and `GROUP BY` expression contains at least prefix of sorting key or injective functions. In this case when a new key is read from table, the in-between result of aggregation can be finalized and sent to client. This behaviour is switched on by the [optimize_aggregation_in_order](../../../operations/settings/settings.md#optimize_aggregation_in_order) setting. Such optimization reduces memory usage during aggregation, but in some cases may slow down the query execution. -### GROUP BY in External Memory {#select-group-by-in-external-memory} +### GROUP BY in External Memory You can enable dumping temporary data to the disk to restrict memory usage during `GROUP BY`. The [max_bytes_before_external_group_by](../../../operations/settings/settings.md#settings-max_bytes_before_external_group_by) setting determines the threshold RAM consumption for dumping `GROUP BY` temporary data to the file system. If set to 0 (the default), it is disabled. diff --git a/docs/en/sql-reference/statements/select/having.md b/docs/en/sql-reference/statements/select/having.md index 9aee0cf4d63..a5226d6ccab 100644 --- a/docs/en/sql-reference/statements/select/having.md +++ b/docs/en/sql-reference/statements/select/having.md @@ -2,12 +2,12 @@ sidebar_label: HAVING --- -# HAVING Clause {#having-clause} +# HAVING Clause Allows filtering the aggregation results produced by [GROUP BY](../../../sql-reference/statements/select/group-by.md). It is similar to the [WHERE](../../../sql-reference/statements/select/where.md) clause, but the difference is that `WHERE` is performed before aggregation, while `HAVING` is performed after it. It is possible to reference aggregation results from `SELECT` clause in `HAVING` clause by their alias. Alternatively, `HAVING` clause can filter on results of additional aggregates that are not returned in query results. -## Limitations {#limitations} +## Limitations `HAVING` can’t be used if aggregation is not performed. Use `WHERE` instead. diff --git a/docs/en/sql-reference/statements/select/index.md b/docs/en/sql-reference/statements/select/index.md index 70e381e2c51..b05fb2ba334 100644 --- a/docs/en/sql-reference/statements/select/index.md +++ b/docs/en/sql-reference/statements/select/index.md @@ -7,7 +7,7 @@ sidebar_label: SELECT `SELECT` queries perform data retrieval. By default, the requested data is returned to the client, while in conjunction with [INSERT INTO](../../../sql-reference/statements/insert-into.md) it can be forwarded to a different table. -## Syntax {#syntax} +## Syntax ``` sql [WITH expr_list|(subquery)] @@ -52,14 +52,14 @@ Specifics of each optional clause are covered in separate sections, which are li - [INTO OUTFILE clause](../../../sql-reference/statements/select/into-outfile.md) - [FORMAT clause](../../../sql-reference/statements/select/format.md) -## SELECT Clause {#select-clause} +## SELECT Clause [Expressions](../../../sql-reference/syntax.md#syntax-expressions) specified in the `SELECT` clause are calculated after all the operations in the clauses described above are finished. These expressions work as if they apply to separate rows in the result. If expressions in the `SELECT` clause contain aggregate functions, then ClickHouse processes aggregate functions and expressions used as their arguments during the [GROUP BY](../../../sql-reference/statements/select/group-by.md) aggregation. If you want to include all columns in the result, use the asterisk (`*`) symbol. For example, `SELECT * FROM ...`. -### COLUMNS expression {#columns-expression} +### COLUMNS expression To match some columns in the result with a [re2](https://en.wikipedia.org/wiki/RE2_(software)) regular expression, you can use the `COLUMNS` expression. @@ -118,7 +118,7 @@ In this example, `COLUMNS('a')` returns two columns: `aa` and `ab`. `COLUMNS('c' Columns that matched the `COLUMNS` expression can have different data types. If `COLUMNS` does not match any columns and is the only expression in `SELECT`, ClickHouse throws an exception. -### Asterisk {#asterisk} +### Asterisk You can put an asterisk in any part of a query instead of an expression. When the query is analyzed, the asterisk is expanded to a list of all table columns (excluding the `MATERIALIZED` and `ALIAS` columns). There are only a few cases when using an asterisk is justified: @@ -130,7 +130,7 @@ You can put an asterisk in any part of a query instead of an expression. When th In all other cases, we do not recommend using the asterisk, since it only gives you the drawbacks of a columnar DBMS instead of the advantages. In other words using the asterisk is not recommended. -### Extreme Values {#extreme-values} +### Extreme Values In addition to results, you can also get minimum and maximum values for the results columns. To do this, set the **extremes** setting to 1. Minimums and maximums are calculated for numeric types, dates, and dates with times. For other columns, the default values are output. @@ -140,13 +140,13 @@ In `JSON*` formats, the extreme values are output in a separate ‘extremes’ f Extreme values are calculated for rows before `LIMIT`, but after `LIMIT BY`. However, when using `LIMIT offset, size`, the rows before `offset` are included in `extremes`. In stream requests, the result may also include a small number of rows that passed through `LIMIT`. -### Notes {#notes} +### Notes You can use synonyms (`AS` aliases) in any part of a query. The `GROUP BY`, `ORDER BY`, and `LIMIT BY` clauses can support positional arguments. To enable this, switch on the [enable_positional_arguments](../../../operations/settings/settings.md#enable-positional-arguments) setting. Then, for example, `ORDER BY 1,2` will be sorting rows in the table on the first and then the second column. -## Implementation Details {#implementation-details} +## Implementation Details If the query omits the `DISTINCT`, `GROUP BY` and `ORDER BY` clauses and the `IN` and `JOIN` subqueries, the query will be completely stream processed, using O(1) amount of RAM. Otherwise, the query might consume a lot of RAM if the appropriate restrictions are not specified: @@ -164,11 +164,11 @@ If the query omits the `DISTINCT`, `GROUP BY` and `ORDER BY` clauses and the `IN For more information, see the section “Settings”. It is possible to use external sorting (saving temporary tables to a disk) and external aggregation. -## SELECT modifiers {#select-modifiers} +## SELECT modifiers You can use the following modifiers in `SELECT` queries. -### APPLY {#apply-modifier} +### APPLY Allows you to invoke some function for each row returned by an outer table expression of a query. @@ -192,7 +192,7 @@ SELECT * APPLY(sum) FROM columns_transformers; └────────┴────────┴────────┘ ``` -### EXCEPT {#except-modifier} +### EXCEPT Specifies the names of one or more columns to exclude from the result. All matching column names are omitted from the output. @@ -215,7 +215,7 @@ SELECT * EXCEPT (i) from columns_transformers; └────┴─────┘ ``` -### REPLACE {#replace-modifier} +### REPLACE Specifies one or more [expression aliases](../../../sql-reference/syntax.md#syntax-expression_aliases). Each alias must match a column name from the `SELECT *` statement. In the output column list, the column that matches the alias is replaced by the expression in that `REPLACE`. @@ -240,7 +240,7 @@ SELECT * REPLACE(i + 1 AS i) from columns_transformers; └─────┴────┴─────┘ ``` -### Modifier Combinations {#modifier-combinations} +### Modifier Combinations You can use each modifier separately or combine them. @@ -270,7 +270,7 @@ SELECT * REPLACE(i + 1 AS i) EXCEPT (j) APPLY(sum) from columns_transformers; └─────────────────┴────────┘ ``` -## SETTINGS in SELECT Query {#settings-in-select} +## SETTINGS in SELECT Query You can specify the necessary settings right in the `SELECT` query. The setting value is applied only to this query and is reset to default or previous value after the query is executed. diff --git a/docs/en/sql-reference/statements/select/intersect.md b/docs/en/sql-reference/statements/select/intersect.md index ef9868daebb..55204b7b0d8 100644 --- a/docs/en/sql-reference/statements/select/intersect.md +++ b/docs/en/sql-reference/statements/select/intersect.md @@ -2,7 +2,7 @@ sidebar_label: INTERSECT --- -# INTERSECT Clause {#intersect-clause} +# INTERSECT Clause The `INTERSECT` clause returns only those rows that result from both the first and the second queries. The queries must match the number of columns, order, and type. The result of `INTERSECT` can contain duplicate rows. diff --git a/docs/en/sql-reference/statements/select/into-outfile.md b/docs/en/sql-reference/statements/select/into-outfile.md index b37285cb0cc..db1ed2551a7 100644 --- a/docs/en/sql-reference/statements/select/into-outfile.md +++ b/docs/en/sql-reference/statements/select/into-outfile.md @@ -16,7 +16,7 @@ SELECT INTO OUTFILE file_name [COMPRESSION type] `file_name` and `type` are string literals. Supported compression types are: `'none'`, `'gzip'`, `'deflate'`, `'br'`, `'xz'`, `'zstd'`, `'lz4'`, `'bz2'`. -## Implementation Details {#implementation-details} +## Implementation Details - This functionality is available in the [command-line client](../../../interfaces/cli.md) and [clickhouse-local](../../../operations/utilities/clickhouse-local.md). Thus a query sent via [HTTP interface](../../../interfaces/http.md) will fail. - The query will fail if a file with the same file name already exists. diff --git a/docs/en/sql-reference/statements/select/join.md b/docs/en/sql-reference/statements/select/join.md index 0cf58d0b90f..b029cf4bac8 100644 --- a/docs/en/sql-reference/statements/select/join.md +++ b/docs/en/sql-reference/statements/select/join.md @@ -2,7 +2,7 @@ sidebar_label: JOIN --- -# JOIN Clause {#select-join} +# JOIN Clause Join produces a new table by combining columns from one or multiple tables by using values common to each. It is a common operation in databases with SQL support, which corresponds to [relational algebra](https://en.wikipedia.org/wiki/Relational_algebra#Joins_and_join-like_operators) join. The special case of one table join is often referred to as “self-join”. @@ -17,7 +17,7 @@ FROM Expressions from `ON` clause and columns from `USING` clause are called “join keys”. Unless otherwise stated, join produces a [Cartesian product](https://en.wikipedia.org/wiki/Cartesian_product) from rows with matching “join keys”, which might produce results with much more rows than the source tables. -## Supported Types of JOIN {#select-join-types} +## Supported Types of JOIN All standard [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) types are supported: @@ -40,7 +40,7 @@ Additional join types available in ClickHouse: When [join_algorithm](../../../operations/settings/settings.md#settings-join_algorithm) is set to `partial_merge`, `RIGHT JOIN` and `FULL JOIN` are supported only with `ALL` strictness (`SEMI`, `ANTI`, `ANY`, and `ASOF` are not supported). ::: -## Settings {#join-settings} +## Settings The default join type can be overridden using [join_default_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) setting. @@ -56,7 +56,7 @@ The behavior of ClickHouse server for `ANY JOIN` operations depends on the [any_ - [join_on_disk_max_files_to_merge](../../../operations/settings/settings.md#join_on_disk_max_files_to_merge) - [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys) -## ON Section Conditions {#on-section-conditions} +## ON Section Conditions An `ON` section can contain several conditions combined using the `AND` and `OR` operators. Conditions specifying join keys must refer both left and right tables and must use the equality operator. Other conditions may use other logical operators but they must refer either the left or the right table of a query. @@ -152,7 +152,7 @@ Result: │ 4 │ -4 │ 4 │ └───┴────┴─────┘ ``` -## ASOF JOIN Usage {#asof-join-usage} +## ASOF JOIN Usage `ASOF JOIN` is useful when you need to join records that have no exact match. @@ -203,7 +203,7 @@ For example, consider the following tables: `ASOF` join is **not** supported in the [Join](../../../engines/table-engines/special/join.md) table engine. ::: -## Distributed JOIN {#global-join} +## Distributed JOIN There are two ways to execute join involving distributed tables: @@ -212,7 +212,7 @@ There are two ways to execute join involving distributed tables: Be careful when using `GLOBAL`. For more information, see the [Distributed subqueries](../../../sql-reference/operators/in.md#select-distributed-subqueries) section. -## Implicit Type Conversion {#implicit-type-conversion} +## Implicit Type Conversion `INNER JOIN`, `LEFT JOIN`, `RIGHT JOIN`, and `FULL JOIN` queries support the implicit type conversion for "join keys". However the query can not be executed, if join keys from the left and the right tables cannot be converted to a single type (for example, there is no data type that can hold all values from both `UInt64` and `Int64`, or `String` and `Int32`). @@ -248,21 +248,21 @@ returns the set: └────┴──────┴───────────────┴─────────────────┘ ``` -## Usage Recommendations {#usage-recommendations} +## Usage Recommendations -### Processing of Empty or NULL Cells {#processing-of-empty-or-null-cells} +### Processing of Empty or NULL Cells While joining tables, the empty cells may appear. The setting [join_use_nulls](../../../operations/settings/settings.md#join_use_nulls) define how ClickHouse fills these cells. If the `JOIN` keys are [Nullable](../../../sql-reference/data-types/nullable.md) fields, the rows where at least one of the keys has the value [NULL](../../../sql-reference/syntax.md#null-literal) are not joined. -### Syntax {#syntax} +### Syntax The columns specified in `USING` must have the same names in both subqueries, and the other columns must be named differently. You can use aliases to change the names of columns in subqueries. The `USING` clause specifies one or more columns to join, which establishes the equality of these columns. The list of columns is set without brackets. More complex join conditions are not supported. -### Syntax Limitations {#syntax-limitations} +### Syntax Limitations For multiple `JOIN` clauses in a single `SELECT` query: @@ -273,7 +273,7 @@ For `ON`, `WHERE`, and `GROUP BY` clauses: - Arbitrary expressions cannot be used in `ON`, `WHERE`, and `GROUP BY` clauses, but you can define an expression in a `SELECT` clause and then use it in these clauses via an alias. -### Performance {#performance} +### Performance When running a `JOIN`, there is no optimization of the order of execution in relation to other stages of the query. The join (a search in the right table) is run before filtering in `WHERE` and before aggregation. @@ -283,7 +283,7 @@ In some cases, it is more efficient to use [IN](../../../sql-reference/operators If you need a `JOIN` for joining with dimension tables (these are relatively small tables that contain dimension properties, such as names for advertising campaigns), a `JOIN` might not be very convenient due to the fact that the right table is re-accessed for every query. For such cases, there is an “external dictionaries” feature that you should use instead of `JOIN`. For more information, see the [External dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) section. -### Memory Limitations {#memory-limitations} +### Memory Limitations By default, ClickHouse uses the [hash join](https://en.wikipedia.org/wiki/Hash_join) algorithm. ClickHouse takes the right_table and creates a hash table for it in RAM. If `join_algorithm = 'auto'` is enabled, then after some threshold of memory consumption, ClickHouse falls back to [merge](https://en.wikipedia.org/wiki/Sort-merge_join) join algorithm. For `JOIN` algorithms description see the [join_algorithm](../../../operations/settings/settings.md#settings-join_algorithm) setting. @@ -294,7 +294,7 @@ If you need to restrict `JOIN` operation memory consumption use the following se When any of these limits is reached, ClickHouse acts as the [join_overflow_mode](../../../operations/settings/query-complexity.md#settings-join_overflow_mode) setting instructs. -## Examples {#examples} +## Examples Example: diff --git a/docs/en/sql-reference/statements/select/limit-by.md b/docs/en/sql-reference/statements/select/limit-by.md index 913b7b40338..0433ea946cc 100644 --- a/docs/en/sql-reference/statements/select/limit-by.md +++ b/docs/en/sql-reference/statements/select/limit-by.md @@ -2,7 +2,7 @@ sidebar_label: LIMIT BY --- -# LIMIT BY Clause {#limit-by-clause} +# LIMIT BY Clause A query with the `LIMIT n BY expressions` clause selects the first `n` rows for each distinct value of `expressions`. The key for `LIMIT BY` can contain any number of [expressions](../../../sql-reference/syntax.md#syntax-expressions). @@ -20,7 +20,7 @@ During query processing, ClickHouse selects data ordered by sorting key. The sor If you want to use column numbers instead of column names in the `LIMIT BY` clause, enable the setting [enable_positional_arguments](../../../operations/settings/settings.md#enable-positional-arguments). -## Examples {#examples} +## Examples Sample table: diff --git a/docs/en/sql-reference/statements/select/limit.md b/docs/en/sql-reference/statements/select/limit.md index 6b1c90041fe..d7eb97c23f2 100644 --- a/docs/en/sql-reference/statements/select/limit.md +++ b/docs/en/sql-reference/statements/select/limit.md @@ -2,7 +2,7 @@ sidebar_label: LIMIT --- -# LIMIT Clause {#limit-clause} +# LIMIT Clause `LIMIT m` allows to select the first `m` rows from the result. @@ -16,7 +16,7 @@ If there is no [ORDER BY](../../../sql-reference/statements/select/order-by.md) The number of rows in the result set can also depend on the [limit](../../../operations/settings/settings.md#limit) setting. ::: -## LIMIT … WITH TIES Modifier {#limit-with-ties} +## LIMIT … WITH TIES Modifier When you set `WITH TIES` modifier for `LIMIT n[,m]` and specify `ORDER BY expr_list`, you will get in result first `n` or `n,m` rows and all rows with same `ORDER BY` fields values equal to row at position `n` for `LIMIT n` and `m` for `LIMIT n,m`. diff --git a/docs/en/sql-reference/statements/select/offset.md b/docs/en/sql-reference/statements/select/offset.md index e120845dbc6..ca9a438ec1f 100644 --- a/docs/en/sql-reference/statements/select/offset.md +++ b/docs/en/sql-reference/statements/select/offset.md @@ -2,7 +2,7 @@ sidebar_label: OFFSET --- -# OFFSET FETCH Clause {#offset-fetch} +# OFFSET FETCH Clause `OFFSET` and `FETCH` allow you to retrieve data by portions. They specify a row block which you want to get by a single query. @@ -38,7 +38,7 @@ According to the standard, the `OFFSET` clause must come before the `FETCH` clau The real offset can also depend on the [offset](../../../operations/settings/settings.md#offset) setting. ::: -## Examples {#examples} +## Examples Input table: diff --git a/docs/en/sql-reference/statements/select/order-by.md b/docs/en/sql-reference/statements/select/order-by.md index d69cf7f5aaa..0411147f18c 100644 --- a/docs/en/sql-reference/statements/select/order-by.md +++ b/docs/en/sql-reference/statements/select/order-by.md @@ -2,7 +2,7 @@ sidebar_label: ORDER BY --- -# ORDER BY Clause {#select-order-by} +# ORDER BY Clause The `ORDER BY` clause contains a list of expressions, which can each be attributed with `DESC` (descending) or `ASC` (ascending) modifier which determine the sorting direction. If the direction is not specified, `ASC` is assumed, so it’s usually omitted. The sorting direction applies to a single expression, not to the entire list. Example: `ORDER BY Visits DESC, SearchPhrase`. @@ -11,14 +11,14 @@ If you want to sort by column numbers instead of column names, enable the settin Rows that have identical values for the list of sorting expressions are output in an arbitrary order, which can also be non-deterministic (different each time). If the ORDER BY clause is omitted, the order of the rows is also undefined, and may be non-deterministic as well. -## Sorting of Special Values {#sorting-of-special-values} +## Sorting of Special Values There are two approaches to `NaN` and `NULL` sorting order: - By default or with the `NULLS LAST` modifier: first the values, then `NaN`, then `NULL`. - With the `NULLS FIRST` modifier: first `NULL`, then `NaN`, then other values. -### Example {#example} +### Example For the table @@ -56,7 +56,7 @@ Run the query `SELECT * FROM t_null_nan ORDER BY y NULLS FIRST` to get: When floating point numbers are sorted, NaNs are separate from the other values. Regardless of the sorting order, NaNs come at the end. In other words, for ascending sorting they are placed as if they are larger than all the other numbers, while for descending sorting they are placed as if they are smaller than the rest. -## Collation Support {#collation-support} +## Collation Support For sorting by [String](../../../sql-reference/data-types/string.md) values, you can specify collation (comparison). Example: `ORDER BY SearchPhrase COLLATE 'tr'` - for sorting by keyword in ascending order, using the Turkish alphabet, case insensitive, assuming that strings are UTF-8 encoded. `COLLATE` can be specified or not for each expression in ORDER BY independently. If `ASC` or `DESC` is specified, `COLLATE` is specified after it. When using `COLLATE`, sorting is always case-insensitive. @@ -64,7 +64,7 @@ Collate is supported in [LowCardinality](../../../sql-reference/data-types/lowca We only recommend using `COLLATE` for final sorting of a small number of rows, since sorting with `COLLATE` is less efficient than normal sorting by bytes. -## Collation Examples {#collation-examples} +## Collation Examples Example only with [String](../../../sql-reference/data-types/string.md) values: @@ -240,7 +240,7 @@ Result: └───┴─────────┘ ``` -## Implementation Details {#implementation-details} +## Implementation Details Less RAM is used if a small enough [LIMIT](../../../sql-reference/statements/select/limit.md) is specified in addition to `ORDER BY`. Otherwise, the amount of memory spent is proportional to the volume of data for sorting. For distributed query processing, if [GROUP BY](../../../sql-reference/statements/select/group-by.md) is omitted, sorting is partially done on remote servers, and the results are merged on the requestor server. This means that for distributed sorting, the volume of data to sort can be greater than the amount of memory on a single server. @@ -250,7 +250,7 @@ Running a query may use more memory than `max_bytes_before_external_sort`. For t External sorting works much less effectively than sorting in RAM. -## Optimization of Data Reading {#optimize_read_in_order} +## Optimization of Data Reading If `ORDER BY` expression has a prefix that coincides with the table sorting key, you can optimize the query by using the [optimize_read_in_order](../../../operations/settings/settings.md#optimize_read_in_order) setting. @@ -269,7 +269,7 @@ Optimization is supported in the following table engines: In `MaterializedView`-engine tables the optimization works with views like `SELECT ... FROM merge_tree_table ORDER BY pk`. But it is not supported in the queries like `SELECT ... FROM view ORDER BY pk` if the view query does not have the `ORDER BY` clause. -## ORDER BY Expr WITH FILL Modifier {#orderby-with-fill} +## ORDER BY Expr WITH FILL Modifier This modifier also can be combined with [LIMIT … WITH TIES modifier](../../../sql-reference/statements/select/limit.md#limit-with-ties). diff --git a/docs/en/sql-reference/statements/select/prewhere.md b/docs/en/sql-reference/statements/select/prewhere.md index c3aa2e14384..49aa6ea894e 100644 --- a/docs/en/sql-reference/statements/select/prewhere.md +++ b/docs/en/sql-reference/statements/select/prewhere.md @@ -2,13 +2,13 @@ sidebar_label: PREWHERE --- -# PREWHERE Clause {#prewhere-clause} +# PREWHERE Clause Prewhere is an optimization to apply filtering more efficiently. It is enabled by default even if `PREWHERE` clause is not specified explicitly. It works by automatically moving part of [WHERE](../../../sql-reference/statements/select/where.md) condition to prewhere stage. The role of `PREWHERE` clause is only to control this optimization if you think that you know how to do it better than it happens by default. With prewhere optimization, at first only the columns necessary for executing prewhere expression are read. Then the other columns are read that are needed for running the rest of the query, but only those blocks where the prewhere expression is `true` at least for some rows. If there are a lot of blocks where prewhere expression is `false` for all rows and prewhere needs less columns than other parts of query, this often allows to read a lot less data from disk for query execution. -## Controlling Prewhere Manually {#controlling-prewhere-manually} +## Controlling Prewhere Manually The clause has the same meaning as the `WHERE` clause. The difference is in which data is read from the table. When manually controlling `PREWHERE` for filtration conditions that are used by a minority of the columns in the query, but that provide strong data filtration. This reduces the volume of data to read. @@ -22,6 +22,6 @@ If query has [FINAL](from.md#select-from-final) modifier, the `PREWHERE` optimiz The `PREWHERE` section is executed before `FINAL`, so the results of `FROM ... FINAL` queries may be skewed when using `PREWHERE` with fields not in the `ORDER BY` section of a table. ::: -## Limitations {#limitations} +## Limitations `PREWHERE` is only supported by tables from the [*MergeTree](../../../engines/table-engines/mergetree-family/index.md) family. diff --git a/docs/en/sql-reference/statements/select/sample.md b/docs/en/sql-reference/statements/select/sample.md index 3673a49a9e9..85c21f5e271 100644 --- a/docs/en/sql-reference/statements/select/sample.md +++ b/docs/en/sql-reference/statements/select/sample.md @@ -2,7 +2,7 @@ sidebar_label: SAMPLE --- -# SAMPLE Clause {#select-sample-clause} +# SAMPLE Clause The `SAMPLE` clause allows for approximated `SELECT` query processing. @@ -33,7 +33,7 @@ For the `SAMPLE` clause the following syntax is supported: | `SAMPLE k OFFSET m` | Here `k` and `m` are the numbers from 0 to 1. The query is executed on a sample of `k` fraction of the data. The data used for the sample is offset by `m` fraction. [Read more](#select-sample-offset) | -## SAMPLE K {#select-sample-k} +## SAMPLE K Here `k` is the number from 0 to 1 (both fractional and decimal notations are supported). For example, `SAMPLE 1/2` or `SAMPLE 0.5`. @@ -53,7 +53,7 @@ ORDER BY PageViews DESC LIMIT 1000 In this example, the query is executed on a sample from 0.1 (10%) of data. Values of aggregate functions are not corrected automatically, so to get an approximate result, the value `count()` is manually multiplied by 10. -## SAMPLE N {#select-sample-n} +## SAMPLE N Here `n` is a sufficiently large integer. For example, `SAMPLE 10000000`. @@ -89,7 +89,7 @@ FROM visits SAMPLE 10000000 ``` -## SAMPLE K OFFSET M {#select-sample-offset} +## SAMPLE K OFFSET M Here `k` and `m` are numbers from 0 to 1. Examples are shown below. diff --git a/docs/en/sql-reference/statements/select/union.md b/docs/en/sql-reference/statements/select/union.md index 8a1c7a770c9..ea8c8bcb1e6 100644 --- a/docs/en/sql-reference/statements/select/union.md +++ b/docs/en/sql-reference/statements/select/union.md @@ -2,7 +2,7 @@ sidebar_label: UNION --- -# UNION Clause {#union-clause} +# UNION Clause You can use `UNION` with explicitly specifying `UNION ALL` or `UNION DISTINCT`. diff --git a/docs/en/sql-reference/statements/select/where.md b/docs/en/sql-reference/statements/select/where.md index c68f9d39d09..e010c2dc913 100644 --- a/docs/en/sql-reference/statements/select/where.md +++ b/docs/en/sql-reference/statements/select/where.md @@ -2,7 +2,7 @@ sidebar_label: WHERE --- -# WHERE Clause {#select-where} +# WHERE Clause `WHERE` clause allows to filter the data that is coming from [FROM](../../../sql-reference/statements/select/from.md) clause of `SELECT`. diff --git a/docs/en/sql-reference/statements/select/with.md b/docs/en/sql-reference/statements/select/with.md index 39fcb752980..4630e46cdec 100644 --- a/docs/en/sql-reference/statements/select/with.md +++ b/docs/en/sql-reference/statements/select/with.md @@ -2,7 +2,7 @@ sidebar_label: WITH --- -# WITH Clause {#with-clause} +# WITH Clause ClickHouse supports Common Table Expressions ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)), that is provides to use results of `WITH` clause in the rest of `SELECT` query. Named subqueries can be included to the current and child query context in places where table objects are allowed. Recursion is prevented by hiding the current level CTEs from the WITH expression. @@ -16,7 +16,7 @@ or WITH AS ``` -## Examples {#examples} +## Examples **Example 1:** Using constant expression as “variable” diff --git a/docs/en/sql-reference/statements/set-role.md b/docs/en/sql-reference/statements/set-role.md index cac7ca28b92..67b85fee9a2 100644 --- a/docs/en/sql-reference/statements/set-role.md +++ b/docs/en/sql-reference/statements/set-role.md @@ -3,7 +3,7 @@ sidebar_position: 51 sidebar_label: SET ROLE --- -# SET ROLE Statement {#set-role-statement} +# SET ROLE Statement Activates roles for the current user. @@ -11,7 +11,7 @@ Activates roles for the current user. SET ROLE {DEFAULT | NONE | role [,...] | ALL | ALL EXCEPT role [,...]} ``` -## SET DEFAULT ROLE {#set-default-role-statement} +## SET DEFAULT ROLE Sets default roles to a user. @@ -21,7 +21,7 @@ Default roles are automatically activated at user login. You can set as default SET DEFAULT ROLE {NONE | role [,...] | ALL | ALL EXCEPT role [,...]} TO {user|CURRENT_USER} [,...] ``` -## Examples {#set-default-role-examples} +## Examples Set multiple default roles to a user: diff --git a/docs/en/sql-reference/statements/set.md b/docs/en/sql-reference/statements/set.md index d2a1d30c797..aa95eacd071 100644 --- a/docs/en/sql-reference/statements/set.md +++ b/docs/en/sql-reference/statements/set.md @@ -3,7 +3,7 @@ sidebar_position: 50 sidebar_label: SET --- -# SET Statement {#query-set} +# SET Statement ``` sql SET param = value diff --git a/docs/en/sql-reference/statements/show.md b/docs/en/sql-reference/statements/show.md index 75c5c121946..6071c129c97 100644 --- a/docs/en/sql-reference/statements/show.md +++ b/docs/en/sql-reference/statements/show.md @@ -3,9 +3,9 @@ sidebar_position: 37 sidebar_label: SHOW --- -# SHOW Statements {#show-queries} +# SHOW Statements -## SHOW CREATE TABLE {#show-create-table} +## SHOW CREATE TABLE ``` sql SHOW CREATE [TEMPORARY] [TABLE|DICTIONARY|VIEW] [db.]table|view [INTO OUTFILE filename] [FORMAT format] @@ -15,7 +15,7 @@ Returns a single `String`-type ‘statement’ column, which contains a single v Note that if you use this statement to get `CREATE` query of system tables, you will get a *fake* query, which only declares table structure, but cannot be used to create table. -## SHOW DATABASES {#show-databases} +## SHOW DATABASES Prints a list of all databases. @@ -29,7 +29,7 @@ This statement is identical to the query: SELECT name FROM system.databases [WHERE name LIKE | ILIKE | NOT LIKE ''] [LIMIT ] [INTO OUTFILE filename] [FORMAT format] ``` -### Examples {#examples} +### Examples Getting database names, containing the symbols sequence 'de' in their names: @@ -91,11 +91,11 @@ Result: └────────────────────────────────┘ ``` -### See Also {#see-also} +### See Also - [CREATE DATABASE](https://clickhouse.com/docs/en/sql-reference/statements/create/database/#query-language-create-database) -## SHOW PROCESSLIST {#show-processlist} +## SHOW PROCESSLIST ``` sql SHOW PROCESSLIST [INTO OUTFILE filename] [FORMAT format] @@ -111,7 +111,7 @@ Tip (execute in the console): $ watch -n1 "clickhouse-client --query='SHOW PROCESSLIST'" ``` -## SHOW TABLES {#show-tables} +## SHOW TABLES Displays a list of tables. @@ -127,7 +127,7 @@ This statement is identical to the query: SELECT name FROM system.tables [WHERE name LIKE | ILIKE | NOT LIKE ''] [LIMIT ] [INTO OUTFILE ] [FORMAT ] ``` -### Examples {#examples} +### Examples Getting table names, containing the symbols sequence 'user' in their names: @@ -190,12 +190,12 @@ Result: └────────────────────────────────┘ ``` -### See Also {#see-also} +### See Also - [Create Tables](https://clickhouse.com/docs/en/getting-started/tutorial/#create-tables) - [SHOW CREATE TABLE](https://clickhouse.com/docs/en/sql-reference/statements/show/#show-create-table) -## SHOW DICTIONARIES {#show-dictionaries} +## SHOW DICTIONARIES Displays a list of [external dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md). @@ -226,11 +226,11 @@ SHOW DICTIONARIES FROM db LIKE '%reg%' LIMIT 2 └──────────────┘ ``` -## SHOW GRANTS {#show-grants-statement} +## SHOW GRANTS Shows privileges for a user. -### Syntax {#show-grants-syntax} +### Syntax ``` sql SHOW GRANTS [FOR user1 [, user2 ...]] @@ -238,126 +238,126 @@ SHOW GRANTS [FOR user1 [, user2 ...]] If user is not specified, the query returns privileges for the current user. -## SHOW CREATE USER {#show-create-user-statement} +## SHOW CREATE USER Shows parameters that were used at a [user creation](../../sql-reference/statements/create/user.md). `SHOW CREATE USER` does not output user passwords. -### Syntax {#show-create-user-syntax} +### Syntax ``` sql SHOW CREATE USER [name1 [, name2 ...] | CURRENT_USER] ``` -## SHOW CREATE ROLE {#show-create-role-statement} +## SHOW CREATE ROLE Shows parameters that were used at a [role creation](../../sql-reference/statements/create/role.md). -### Syntax {#show-create-role-syntax} +### Syntax ``` sql SHOW CREATE ROLE name1 [, name2 ...] ``` -## SHOW CREATE ROW POLICY {#show-create-row-policy-statement} +## SHOW CREATE ROW POLICY Shows parameters that were used at a [row policy creation](../../sql-reference/statements/create/row-policy.md). -### Syntax {#show-create-row-policy-syntax} +### Syntax ``` sql SHOW CREATE [ROW] POLICY name ON [database1.]table1 [, [database2.]table2 ...] ``` -## SHOW CREATE QUOTA {#show-create-quota-statement} +## SHOW CREATE QUOTA Shows parameters that were used at a [quota creation](../../sql-reference/statements/create/quota.md). -### Syntax {#show-create-quota-syntax} +### Syntax ``` sql SHOW CREATE QUOTA [name1 [, name2 ...] | CURRENT] ``` -## SHOW CREATE SETTINGS PROFILE {#show-create-settings-profile-statement} +## SHOW CREATE SETTINGS PROFILE Shows parameters that were used at a [settings profile creation](../../sql-reference/statements/create/settings-profile.md). -### Syntax {#show-create-settings-profile-syntax} +### Syntax ``` sql SHOW CREATE [SETTINGS] PROFILE name1 [, name2 ...] ``` -## SHOW USERS {#show-users-statement} +## SHOW USERS Returns a list of [user account](../../operations/access-rights.md#user-account-management) names. To view user accounts parameters, see the system table [system.users](../../operations/system-tables/users.md#system_tables-users). -### Syntax {#show-users-syntax} +### Syntax ``` sql SHOW USERS ``` -## SHOW ROLES {#show-roles-statement} +## SHOW ROLES Returns a list of [roles](../../operations/access-rights.md#role-management). To view another parameters, see system tables [system.roles](../../operations/system-tables/roles.md#system_tables-roles) and [system.role-grants](../../operations/system-tables/role-grants.md#system_tables-role_grants). -### Syntax {#show-roles-syntax} +### Syntax ``` sql SHOW [CURRENT|ENABLED] ROLES ``` -## SHOW PROFILES {#show-profiles-statement} +## SHOW PROFILES Returns a list of [setting profiles](../../operations/access-rights.md#settings-profiles-management). To view user accounts parameters, see the system table [settings_profiles](../../operations/system-tables/settings_profiles.md#system_tables-settings_profiles). -### Syntax {#show-profiles-syntax} +### Syntax ``` sql SHOW [SETTINGS] PROFILES ``` -## SHOW POLICIES {#show-policies-statement} +## SHOW POLICIES Returns a list of [row policies](../../operations/access-rights.md#row-policy-management) for the specified table. To view user accounts parameters, see the system table [system.row_policies](../../operations/system-tables/row_policies.md#system_tables-row_policies). -### Syntax {#show-policies-syntax} +### Syntax ``` sql SHOW [ROW] POLICIES [ON [db.]table] ``` -## SHOW QUOTAS {#show-quotas-statement} +## SHOW QUOTAS Returns a list of [quotas](../../operations/access-rights.md#quotas-management). To view quotas parameters, see the system table [system.quotas](../../operations/system-tables/quotas.md#system_tables-quotas). -### Syntax {#show-quotas-syntax} +### Syntax ``` sql SHOW QUOTAS ``` -## SHOW QUOTA {#show-quota-statement} +## SHOW QUOTA Returns a [quota](../../operations/quotas.md) consumption for all users or for current user. To view another parameters, see system tables [system.quotas_usage](../../operations/system-tables/quotas_usage.md#system_tables-quotas_usage) and [system.quota_usage](../../operations/system-tables/quota_usage.md#system_tables-quota_usage). -### Syntax {#show-quota-syntax} +### Syntax ``` sql SHOW [CURRENT] QUOTA ``` -## SHOW ACCESS {#show-access-statement} +## SHOW ACCESS Shows all [users](../../operations/access-rights.md#user-account-management), [roles](../../operations/access-rights.md#role-management), [profiles](../../operations/access-rights.md#settings-profiles-management), etc. and all their [grants](../../sql-reference/statements/grant.md#grant-privileges). -### Syntax {#show-access-syntax} +### Syntax ``` sql SHOW ACCESS ``` -## SHOW CLUSTER(s) {#show-cluster-statement} +## SHOW CLUSTER(s) Returns a list of clusters. All available clusters are listed in the [system.clusters](../../operations/system-tables/clusters.md) table. @@ -365,13 +365,13 @@ Returns a list of clusters. All available clusters are listed in the [system.clu `SHOW CLUSTER name` query displays the contents of system.clusters table for this cluster. ::: -### Syntax {#show-cluster-syntax} +### Syntax ``` sql SHOW CLUSTER '' SHOW CLUSTERS [LIKE|NOT LIKE ''] [LIMIT ] ``` -### Examples {#show-cluster-examples} +### Examples Query: @@ -431,7 +431,7 @@ errors_count: 0 estimated_recovery_time: 0 ``` -## SHOW SETTINGS {#show-settings} +## SHOW SETTINGS Returns a list of system settings and their values. Selects data from the [system.settings](../../operations/system-tables/settings.md) table. diff --git a/docs/en/sql-reference/statements/system.md b/docs/en/sql-reference/statements/system.md index a3b323760f4..e5a5608622d 100644 --- a/docs/en/sql-reference/statements/system.md +++ b/docs/en/sql-reference/statements/system.md @@ -3,7 +3,7 @@ sidebar_position: 36 sidebar_label: SYSTEM --- -# SYSTEM Statements {#query-language-system} +# SYSTEM Statements The list of available `SYSTEM` statements: @@ -43,19 +43,19 @@ The list of available `SYSTEM` statements: - [RESTORE REPLICA](#query_language-system-restore-replica) - [RESTART REPLICAS](#query_language-system-restart-replicas) -## RELOAD EMBEDDED DICTIONARIES {#query_language-system-reload-emdedded-dictionaries} +## RELOAD EMBEDDED DICTIONARIES Reload all [Internal dictionaries](../../sql-reference/dictionaries/internal-dicts.md). By default, internal dictionaries are disabled. Always returns `Ok.` regardless of the result of the internal dictionary update. -## RELOAD DICTIONARIES {#query_language-system-reload-dictionaries} +## RELOAD DICTIONARIES Reloads all dictionaries that have been successfully loaded before. By default, dictionaries are loaded lazily (see [dictionaries_lazy_load](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load)), so instead of being loaded automatically at startup, they are initialized on first access through dictGet function or SELECT from tables with ENGINE = Dictionary. The `SYSTEM RELOAD DICTIONARIES` query reloads such dictionaries (LOADED). Always returns `Ok.` regardless of the result of the dictionary update. -## RELOAD DICTIONARY {#query_language-system-reload-dictionary} +## RELOAD DICTIONARY Completely reloads a dictionary `dictionary_name`, regardless of the state of the dictionary (LOADED / NOT_LOADED / FAILED). Always returns `Ok.` regardless of the result of updating the dictionary. @@ -65,7 +65,7 @@ The status of the dictionary can be checked by querying the `system.dictionaries SELECT name, status FROM system.dictionaries; ``` -## RELOAD MODELS {#query_language-system-reload-models} +## RELOAD MODELS Reloads all [CatBoost](../../guides/developer/apply-catboost-model.md) models if the configuration was updated without restarting the server. @@ -75,7 +75,7 @@ Reloads all [CatBoost](../../guides/developer/apply-catboost-model.md) models if SYSTEM RELOAD MODELS [ON CLUSTER cluster_name] ``` -## RELOAD MODEL {#query_language-system-reload-model} +## RELOAD MODEL Completely reloads a CatBoost model `model_name` if the configuration was updated without restarting the server. @@ -85,7 +85,7 @@ Completely reloads a CatBoost model `model_name` if the configuration was update SYSTEM RELOAD MODEL [ON CLUSTER cluster_name] ``` -## RELOAD FUNCTIONS {#query_language-system-reload-functions} +## RELOAD FUNCTIONS Reloads all registered [executable user defined functions](../functions/index.md#executable-user-defined-functions) or one of them from a configuration file. @@ -96,17 +96,17 @@ RELOAD FUNCTIONS [ON CLUSTER cluster_name] RELOAD FUNCTION [ON CLUSTER cluster_name] function_name ``` -## DROP DNS CACHE {#query_language-system-drop-dns-cache} +## DROP DNS CACHE Resets ClickHouse’s internal DNS cache. Sometimes (for old ClickHouse versions) it is necessary to use this command when changing the infrastructure (changing the IP address of another ClickHouse server or the server used by dictionaries). For more convenient (automatic) cache management, see disable_internal_dns_cache, dns_cache_update_period parameters. -## DROP MARK CACHE {#query_language-system-drop-mark-cache} +## DROP MARK CACHE Resets the mark cache. Used in development of ClickHouse and performance tests. -## DROP REPLICA {#query_language-system-drop-replica} +## DROP REPLICA Dead replicas can be dropped using following syntax: @@ -124,38 +124,38 @@ The second one does the same for all replicated tables in the database. The third one does the same for all replicated tables on the local server. The fourth one is useful to remove metadata of dead replica when all other replicas of a table were dropped. It requires the table path to be specified explicitly. It must be the same path as was passed to the first argument of `ReplicatedMergeTree` engine on table creation. -## DROP UNCOMPRESSED CACHE {#query_language-system-drop-uncompressed-cache} +## DROP UNCOMPRESSED CACHE Reset the uncompressed data cache. Used in development of ClickHouse and performance tests. For manage uncompressed data cache parameters use following server level settings [uncompressed_cache_size](../../operations/server-configuration-parameters/settings.md#server-settings-uncompressed_cache_size) and query/user/profile level settings [use_uncompressed_cache](../../operations/settings/settings.md#setting-use_uncompressed_cache) -## DROP COMPILED EXPRESSION CACHE {#query_language-system-drop-compiled-expression-cache} +## DROP COMPILED EXPRESSION CACHE Reset the compiled expression cache. Used in development of ClickHouse and performance tests. Compiled expression cache used when query/user/profile enable option [compile-expressions](../../operations/settings/settings.md#compile-expressions) -## FLUSH LOGS {#query_language-system-flush_logs} +## FLUSH LOGS Flushes buffers of log messages to system tables (e.g. system.query_log). Allows you to not wait 7.5 seconds when debugging. This will also create system tables even if message queue is empty. -## RELOAD CONFIG {#query_language-system-reload-config} +## RELOAD CONFIG Reloads ClickHouse configuration. Used when configuration is stored in ZooKeeper. -## SHUTDOWN {#query_language-system-shutdown} +## SHUTDOWN Normally shuts down ClickHouse (like `service clickhouse-server stop` / `kill {$pid_clickhouse-server}`) -## KILL {#query_language-system-kill} +## KILL Aborts ClickHouse process (like `kill -9 {$ pid_clickhouse-server}`) -## Managing Distributed Tables {#query-language-system-distributed} +## Managing Distributed Tables ClickHouse can manage [distributed](../../engines/table-engines/special/distributed.md) tables. When a user inserts data into these tables, ClickHouse first creates a queue of the data that should be sent to cluster nodes, then asynchronously sends it. You can manage queue processing with the [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [FLUSH DISTRIBUTED](#query_language-system-flush-distributed), and [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) queries. You can also synchronously insert distributed data with the [insert_distributed_sync](../../operations/settings/settings.md#insert_distributed_sync) setting. -### STOP DISTRIBUTED SENDS {#query_language-system-stop-distributed-sends} +### STOP DISTRIBUTED SENDS Disables background data distribution when inserting data into distributed tables. @@ -163,7 +163,7 @@ Disables background data distribution when inserting data into distributed table SYSTEM STOP DISTRIBUTED SENDS [db.] ``` -### FLUSH DISTRIBUTED {#query_language-system-flush-distributed} +### FLUSH DISTRIBUTED Forces ClickHouse to send data to cluster nodes synchronously. If any nodes are unavailable, ClickHouse throws an exception and stops query execution. You can retry the query until it succeeds, which will happen when all nodes are back online. @@ -171,7 +171,7 @@ Forces ClickHouse to send data to cluster nodes synchronously. If any nodes are SYSTEM FLUSH DISTRIBUTED [db.] ``` -### START DISTRIBUTED SENDS {#query_language-system-start-distributed-sends} +### START DISTRIBUTED SENDS Enables background data distribution when inserting data into distributed tables. @@ -179,11 +179,11 @@ Enables background data distribution when inserting data into distributed tables SYSTEM START DISTRIBUTED SENDS [db.] ``` -## Managing MergeTree Tables {#query-language-system-mergetree} +## Managing MergeTree Tables ClickHouse can manage background processes in [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables. -### STOP MERGES {#query_language-system-stop-merges} +### STOP MERGES Provides possibility to stop background merges for tables in the MergeTree family: @@ -195,7 +195,7 @@ SYSTEM STOP MERGES [ON VOLUME | [db.]merge_tree_family_table_name] `DETACH / ATTACH` table will start background merges for the table even in case when merges have been stopped for all MergeTree tables before. ::: -### START MERGES {#query_language-system-start-merges} +### START MERGES Provides possibility to start background merges for tables in the MergeTree family: @@ -203,7 +203,7 @@ Provides possibility to start background merges for tables in the MergeTree fami SYSTEM START MERGES [ON VOLUME | [db.]merge_tree_family_table_name] ``` -### STOP TTL MERGES {#query_language-stop-ttl-merges} +### STOP TTL MERGES Provides possibility to stop background delete old data according to [TTL expression](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl) for tables in the MergeTree family: Returns `Ok.` even if table does not exist or table has not MergeTree engine. Returns error when database does not exist: @@ -212,7 +212,7 @@ Returns `Ok.` even if table does not exist or table has not MergeTree engine. Re SYSTEM STOP TTL MERGES [[db.]merge_tree_family_table_name] ``` -### START TTL MERGES {#query_language-start-ttl-merges} +### START TTL MERGES Provides possibility to start background delete old data according to [TTL expression](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl) for tables in the MergeTree family: Returns `Ok.` even if table does not exist. Returns error when database does not exist: @@ -221,7 +221,7 @@ Returns `Ok.` even if table does not exist. Returns error when database does not SYSTEM START TTL MERGES [[db.]merge_tree_family_table_name] ``` -### STOP MOVES {#query_language-stop-moves} +### STOP MOVES Provides possibility to stop background move data according to [TTL table expression with TO VOLUME or TO DISK clause](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-table-ttl) for tables in the MergeTree family: Returns `Ok.` even if table does not exist. Returns error when database does not exist: @@ -230,7 +230,7 @@ Returns `Ok.` even if table does not exist. Returns error when database does not SYSTEM STOP MOVES [[db.]merge_tree_family_table_name] ``` -### START MOVES {#query_language-start-moves} +### START MOVES Provides possibility to start background move data according to [TTL table expression with TO VOLUME and TO DISK clause](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-table-ttl) for tables in the MergeTree family: Returns `Ok.` even if table does not exist. Returns error when database does not exist: @@ -239,11 +239,11 @@ Returns `Ok.` even if table does not exist. Returns error when database does not SYSTEM START MOVES [[db.]merge_tree_family_table_name] ``` -## Managing ReplicatedMergeTree Tables {#query-language-system-replicated} +## Managing ReplicatedMergeTree Tables ClickHouse can manage background replication related processes in [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md#table_engines-replication) tables. -### STOP FETCHES {#query_language-system-stop-fetches} +### STOP FETCHES Provides possibility to stop background fetches for inserted parts for tables in the `ReplicatedMergeTree` family: Always returns `Ok.` regardless of the table engine and even if table or database does not exist. @@ -252,7 +252,7 @@ Always returns `Ok.` regardless of the table engine and even if table or databas SYSTEM STOP FETCHES [[db.]replicated_merge_tree_family_table_name] ``` -### START FETCHES {#query_language-system-start-fetches} +### START FETCHES Provides possibility to start background fetches for inserted parts for tables in the `ReplicatedMergeTree` family: Always returns `Ok.` regardless of the table engine and even if table or database does not exist. @@ -261,7 +261,7 @@ Always returns `Ok.` regardless of the table engine and even if table or databas SYSTEM START FETCHES [[db.]replicated_merge_tree_family_table_name] ``` -### STOP REPLICATED SENDS {#query_language-system-start-replicated-sends} +### STOP REPLICATED SENDS Provides possibility to stop background sends to other replicas in cluster for new inserted parts for tables in the `ReplicatedMergeTree` family: @@ -269,7 +269,7 @@ Provides possibility to stop background sends to other replicas in cluster for n SYSTEM STOP REPLICATED SENDS [[db.]replicated_merge_tree_family_table_name] ``` -### START REPLICATED SENDS {#query_language-system-start-replicated-sends} +### START REPLICATED SENDS Provides possibility to start background sends to other replicas in cluster for new inserted parts for tables in the `ReplicatedMergeTree` family: @@ -277,7 +277,7 @@ Provides possibility to start background sends to other replicas in cluster for SYSTEM START REPLICATED SENDS [[db.]replicated_merge_tree_family_table_name] ``` -### STOP REPLICATION QUEUES {#query_language-system-stop-replication-queues} +### STOP REPLICATION QUEUES Provides possibility to stop background fetch tasks from replication queues which stored in Zookeeper for tables in the `ReplicatedMergeTree` family. Possible background tasks types - merges, fetches, mutation, DDL statements with ON CLUSTER clause: @@ -285,7 +285,7 @@ Provides possibility to stop background fetch tasks from replication queues whic SYSTEM STOP REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name] ``` -### START REPLICATION QUEUES {#query_language-system-start-replication-queues} +### START REPLICATION QUEUES Provides possibility to start background fetch tasks from replication queues which stored in Zookeeper for tables in the `ReplicatedMergeTree` family. Possible background tasks types - merges, fetches, mutation, DDL statements with ON CLUSTER clause: @@ -293,7 +293,7 @@ Provides possibility to start background fetch tasks from replication queues whi SYSTEM START REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name] ``` -### SYNC REPLICA {#query_language-system-sync-replica} +### SYNC REPLICA Wait until a `ReplicatedMergeTree` table will be synced with other replicas in a cluster. Will run until `receive_timeout` if fetches currently disabled for the table. @@ -303,7 +303,7 @@ SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name After running this statement the `[db.]replicated_merge_tree_family_table_name` fetches commands from the common replicated log into its own replication queue, and then the query waits till the replica processes all of the fetched commands. -### RESTART REPLICA {#query_language-system-restart-replica} +### RESTART REPLICA Provides possibility to reinitialize Zookeeper sessions state for `ReplicatedMergeTree` table, will compare current state with Zookeeper as source of true and add tasks to Zookeeper queue if needed. Initialization replication queue based on ZooKeeper date happens in the same way as `ATTACH TABLE` statement. For a short time the table will be unavailable for any operations. @@ -312,7 +312,7 @@ Initialization replication queue based on ZooKeeper date happens in the same way SYSTEM RESTART REPLICA [db.]replicated_merge_tree_family_table_name ``` -### RESTORE REPLICA {#query_language-system-restore-replica} +### RESTORE REPLICA Restores a replica if data is [possibly] present but Zookeeper metadata is lost. @@ -366,6 +366,6 @@ Another way: SYSTEM RESTORE REPLICA test ON CLUSTER cluster; ``` -### RESTART REPLICAS {#query_language-system-restart-replicas} +### RESTART REPLICAS Provides possibility to reinitialize Zookeeper sessions state for all `ReplicatedMergeTree` tables, will compare current state with Zookeeper as source of true and add tasks to Zookeeper queue if needed diff --git a/docs/en/sql-reference/statements/truncate.md b/docs/en/sql-reference/statements/truncate.md index 393ba82b3cd..7aff5f392bf 100644 --- a/docs/en/sql-reference/statements/truncate.md +++ b/docs/en/sql-reference/statements/truncate.md @@ -3,7 +3,7 @@ sidebar_position: 52 sidebar_label: TRUNCATE --- -# TRUNCATE Statement {#truncate-statement} +# TRUNCATE Statement ``` sql TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] diff --git a/docs/en/sql-reference/statements/use.md b/docs/en/sql-reference/statements/use.md index 869bf44fdeb..508e1269537 100644 --- a/docs/en/sql-reference/statements/use.md +++ b/docs/en/sql-reference/statements/use.md @@ -3,7 +3,7 @@ sidebar_position: 53 sidebar_label: USE --- -# USE Statement {#use} +# USE Statement ``` sql USE db diff --git a/docs/en/sql-reference/statements/watch.md b/docs/en/sql-reference/statements/watch.md index 688cf21e23c..2db2c019f18 100644 --- a/docs/en/sql-reference/statements/watch.md +++ b/docs/en/sql-reference/statements/watch.md @@ -3,7 +3,7 @@ sidebar_position: 53 sidebar_label: WATCH --- -# WATCH Statement (Experimental) {#watch} +# WATCH Statement (Experimental) :::warning This is an experimental feature that may change in backwards-incompatible ways in the future releases. Enable live views and `WATCH` query using `set allow_experimental_live_view = 1`. @@ -22,7 +22,7 @@ The `WATCH` query performs continuous data retrieval from a [LIVE VIEW](./create WATCH [db.]live_view [EVENTS] [LIMIT n] [FORMAT format] ``` -## Virtual columns {#watch-virtual-columns} +## Virtual columns The virtual `_version` column in the query result indicates the current result version. @@ -54,7 +54,7 @@ By default, the requested data is returned to the client, while in conjunction w INSERT INTO [db.]table WATCH [db.]live_view ... ``` -## EVENTS Clause {#events-clause} +## EVENTS Clause The `EVENTS` clause can be used to obtain a short form of the `WATCH` query where instead of the query result you will just get the latest query result version. @@ -79,7 +79,7 @@ WATCH lv EVENTS; ... ``` -## LIMIT Clause {#limit-clause} +## LIMIT Clause The `LIMIT n` clause specifies the number of updates the `WATCH` query should wait for before terminating. By default there is no limit on the number of updates and therefore the query will not terminate. The value of `0` indicates that the `WATCH` query should not wait for any new query results and therefore will return immediately once query result is evaluated. @@ -100,7 +100,7 @@ WATCH lv EVENTS LIMIT 1; └─────────┘ ``` -## FORMAT Clause {#format-clause} +## FORMAT Clause The `FORMAT` clause works the same way as for the [SELECT](../../sql-reference/statements/select/format.md#format-clause). diff --git a/docs/en/sql-reference/syntax.md b/docs/en/sql-reference/syntax.md index f73b9a17c3d..0b403ae2789 100644 --- a/docs/en/sql-reference/syntax.md +++ b/docs/en/sql-reference/syntax.md @@ -3,7 +3,7 @@ sidebar_position: 2 sidebar_label: Syntax --- -# Syntax {#syntax} +# Syntax There are two types of parsers in the system: the full SQL parser (a recursive descent parser), and the data format parser (a fast stream parser). In all cases except the `INSERT` query, only the full SQL parser is used. @@ -22,18 +22,18 @@ When using the `Values` format in an `INSERT` query, it may seem that data is pa The rest of this article covers the full parser. For more information about format parsers, see the [Formats](../interfaces/formats.md) section. -## Spaces {#spaces} +## Spaces There may be any number of space symbols between syntactical constructions (including the beginning and end of a query). Space symbols include the space, tab, line feed, CR, and form feed. -## Comments {#comments} +## Comments ClickHouse supports either SQL-style and C-style comments: - SQL-style comments start with `--`, `#!` or `# ` and continue to the end of the line, a space after `--` and `#!` can be omitted. - C-style are from `/*` to `*/`and can be multiline, spaces are not required either. -## Keywords {#syntax-keywords} +## Keywords Keywords are case-insensitive when they correspond to: @@ -46,7 +46,7 @@ In contrast to standard SQL, all other keywords (including functions names) are Keywords are not reserved; they are treated as such only in the corresponding context. If you use [identifiers](#syntax-identifiers) with the same name as the keywords, enclose them into double-quotes or backticks. For example, the query `SELECT "FROM" FROM table_name` is valid if the table `table_name` has column with the name `"FROM"`. -## Identifiers {#syntax-identifiers} +## Identifiers Identifiers are: @@ -61,11 +61,11 @@ Non-quoted identifiers must match the regex `^[a-zA-Z_][0-9a-zA-Z_]*$` and can n If you want to use identifiers the same as keywords or you want to use other symbols in identifiers, quote it using double quotes or backticks, for example, `"id"`, `` `id` ``. -## Literals {#literals} +## Literals There are numeric, string, compound, and `NULL` literals. -### Numeric {#numeric} +### Numeric Numeric literal tries to be parsed: @@ -79,20 +79,20 @@ For example, 1 is parsed as `UInt8`, but 256 is parsed as `UInt16`. For more inf Examples: `1`, `18446744073709551615`, `0xDEADBEEF`, `01`, `0.1`, `1e100`, `-1e-100`, `inf`, `nan`. -### String {#syntax-string-literal} +### String Only string literals in single quotes are supported. The enclosed characters can be backslash-escaped. The following escape sequences have a corresponding special value: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\a`, `\v`, `\xHH`. In all other cases, escape sequences in the format `\c`, where `c` is any character, are converted to `c`. It means that you can use the sequences `\'`and`\\`. The value will have the [String](../sql-reference/data-types/string.md) type. In string literals, you need to escape at least `'` and `\`. Single quotes can be escaped with the single quote, literals `'It\'s'` and `'It''s'` are equal. -### Compound {#compound} +### Compound Arrays are constructed with square brackets `[1, 2, 3]`. Tuples are constructed with round brackets `(1, 'Hello, world!', 2)`. Technically these are not literals, but expressions with the array creation operator and the tuple creation operator, respectively. An array must consist of at least one item, and a tuple must have at least two items. There’s a separate case when tuples appear in the `IN` clause of a `SELECT` query. Query results can include tuples, but tuples can’t be saved to a database (except of tables with [Memory](../engines/table-engines/special/memory.md) engine). -### NULL {#null-literal} +### NULL Indicates that the value is missing. @@ -104,7 +104,7 @@ There are many nuances to processing `NULL`. For example, if at least one of the In queries, you can check `NULL` using the [IS NULL](../sql-reference/operators/index.md#operator-is-null) and [IS NOT NULL](../sql-reference/operators/index.md) operators and the related functions `isNull` and `isNotNull`. -### Heredoc {#heredoc} +### Heredoc A [heredoc](https://en.wikipedia.org/wiki/Here_document) is a way to define a string (often multiline), while maintaining the original formatting. A heredoc is defined as a custom string literal, placed between two `$` symbols, for example `$heredoc$`. A value between two heredocs is processed "as-is". @@ -126,21 +126,21 @@ Result: └────────────────────────────┘ ``` -## Functions {#functions} +## Functions Function calls are written like an identifier with a list of arguments (possibly empty) in round brackets. In contrast to standard SQL, the brackets are required, even for an empty argument list. Example: `now()`. There are regular and aggregate functions (see the section “Aggregate functions”). Some aggregate functions can contain two lists of arguments in brackets. Example: `quantile (0.9) (x)`. These aggregate functions are called “parametric” functions, and the arguments in the first list are called “parameters”. The syntax of aggregate functions without parameters is the same as for regular functions. -## Operators {#operators} +## Operators Operators are converted to their corresponding functions during query parsing, taking their priority and associativity into account. For example, the expression `1 + 2 * 3 + 4` is transformed to `plus(plus(1, multiply(2, 3)), 4)`. -## Data Types and Database Table Engines {#data_types-and-database-table-engines} +## Data Types and Database Table Engines Data types and table engines in the `CREATE` query are written the same way as identifiers or functions. In other words, they may or may not contain an argument list in brackets. For more information, see the sections “Data types,” “Table engines,” and “CREATE”. -## Expression Aliases {#syntax-expression_aliases} +## Expression Aliases An alias is a user-defined name for expression in a query. @@ -162,7 +162,7 @@ expr AS alias For example, `SELECT "table t".column_name FROM table_name AS "table t"`. -### Notes on Usage {#notes-on-usage} +### Notes on Usage Aliases are global for a query or subquery, and you can define an alias in any part of a query for any expression. For example, `SELECT (1 AS n) + 2, n`. @@ -195,11 +195,11 @@ Code: 184. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception In this example, we declared table `t` with column `b`. Then, when selecting data, we defined the `sum(b) AS b` alias. As aliases are global, ClickHouse substituted the literal `b` in the expression `argMax(a, b)` with the expression `sum(b)`. This substitution caused the exception. You can change this default behavior by setting [prefer_column_name_to_alias](../operations/settings/settings.md#prefer_column_name_to_alias) to `1`. -## Asterisk {#asterisk} +## Asterisk In a `SELECT` query, an asterisk can replace the expression. For more information, see the section “SELECT”. -## Expressions {#syntax-expressions} +## Expressions An expression is a function, identifier, literal, application of an operator, expression in brackets, subquery, or asterisk. It can also contain an alias. A list of expressions is one or more expressions separated by commas. diff --git a/docs/en/sql-reference/table-functions/cluster.md b/docs/en/sql-reference/table-functions/cluster.md index 5954ed1b439..11dd63e7f65 100644 --- a/docs/en/sql-reference/table-functions/cluster.md +++ b/docs/en/sql-reference/table-functions/cluster.md @@ -3,7 +3,7 @@ sidebar_position: 50 sidebar_label: cluster --- -# cluster, clusterAllReplicas {#cluster-clusterallreplicas} +# cluster, clusterAllReplicas Allows to access all shards in an existing cluster which configured in `remote_servers` section without creating a [Distributed](../../engines/table-engines/special/distributed.md) table. One replica of each shard is queried. diff --git a/docs/en/sql-reference/table-functions/dictionary.md b/docs/en/sql-reference/table-functions/dictionary.md index f04a4b6eb24..b192498af66 100644 --- a/docs/en/sql-reference/table-functions/dictionary.md +++ b/docs/en/sql-reference/table-functions/dictionary.md @@ -3,7 +3,7 @@ sidebar_position: 54 sidebar_label: dictionary function --- -# dictionary {#dictionary-function} +# dictionary Displays the [dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) data as a ClickHouse table. Works the same way as [Dictionary](../../engines/table-engines/special/dictionary.md) engine. diff --git a/docs/en/sql-reference/table-functions/file.md b/docs/en/sql-reference/table-functions/file.md index 4b72b0d84f5..e1d9eb73b73 100644 --- a/docs/en/sql-reference/table-functions/file.md +++ b/docs/en/sql-reference/table-functions/file.md @@ -3,7 +3,7 @@ sidebar_position: 37 sidebar_label: file --- -# file {#file} +# file Creates a table from a file. This table function is similar to [url](../../sql-reference/table-functions/url.md) and [hdfs](../../sql-reference/table-functions/hdfs.md) ones. @@ -72,7 +72,7 @@ SELECT * FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 U └─────────┴─────────┴─────────┘ ``` -## Globs in Path {#globs-in-path} +## Globs in Path Multiple path components can have globs. For being processed file must exist and match to the whole path pattern (not only suffix or prefix). @@ -118,7 +118,7 @@ Query the data from files named `file000`, `file001`, … , `file999`: SELECT count(*) FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32'); ``` -## Virtual Columns {#virtual-columns} +## Virtual Columns - `_path` — Path to the file. - `_file` — Name of the file. diff --git a/docs/en/sql-reference/table-functions/generate.md b/docs/en/sql-reference/table-functions/generate.md index bb9ad3f7551..8459d47899b 100644 --- a/docs/en/sql-reference/table-functions/generate.md +++ b/docs/en/sql-reference/table-functions/generate.md @@ -3,7 +3,7 @@ sidebar_position: 47 sidebar_label: generateRandom --- -# generateRandom {#generaterandom} +# generateRandom Generates random data with given schema. Allows to populate test tables with data. @@ -25,7 +25,7 @@ generateRandom('name TypeName[, name TypeName]...', [, 'random_seed'[, 'max_stri A table object with requested schema. -## Usage Example {#usage-example} +## Usage Example ``` sql SELECT * FROM generateRandom('a Array(Int8), d Decimal32(4), c Tuple(DateTime64(3), UUID)', 1, 10, 2) LIMIT 3; diff --git a/docs/en/sql-reference/table-functions/hdfs.md b/docs/en/sql-reference/table-functions/hdfs.md index 7f7dc53d27e..49cc65cb87a 100644 --- a/docs/en/sql-reference/table-functions/hdfs.md +++ b/docs/en/sql-reference/table-functions/hdfs.md @@ -3,7 +3,7 @@ sidebar_position: 45 sidebar_label: hdfs --- -# hdfs {#hdfs} +# hdfs Creates a table from files in HDFS. This table function is similar to [url](../../sql-reference/table-functions/url.md) and [file](../../sql-reference/table-functions/file.md) ones. @@ -91,7 +91,7 @@ SELECT count(*) FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32') ``` -## Virtual Columns {#virtual-columns} +## Virtual Columns - `_path` — Path to the file. - `_file` — Name of the file. diff --git a/docs/en/sql-reference/table-functions/hdfsCluster.md b/docs/en/sql-reference/table-functions/hdfsCluster.md index b46b8e64a1a..f8511d74bff 100644 --- a/docs/en/sql-reference/table-functions/hdfsCluster.md +++ b/docs/en/sql-reference/table-functions/hdfsCluster.md @@ -3,7 +3,7 @@ sidebar_position: 55 sidebar_label: hdfsCluster --- -# hdfsCluster Table Function {#hdfsCluster-table-function} +# hdfsCluster Table Function Allows processing files from HDFS in parallel from many nodes in a specified cluster. On initiator it creates a connection to all nodes in the cluster, discloses asterics in HDFS file path, and dispatches each file dynamically. On the worker node it asks the initiator about the next task to process and processes it. This is repeated until all tasks are finished. diff --git a/docs/en/sql-reference/table-functions/input.md b/docs/en/sql-reference/table-functions/input.md index 916abb890ff..bf9da0091a3 100644 --- a/docs/en/sql-reference/table-functions/input.md +++ b/docs/en/sql-reference/table-functions/input.md @@ -3,7 +3,7 @@ sidebar_position: 46 sidebar_label: input --- -# input {#input} +# input `input(structure)` - table function that allows effectively convert and insert data sent to the server with given structure to the table with another structure. diff --git a/docs/en/sql-reference/table-functions/jdbc.md b/docs/en/sql-reference/table-functions/jdbc.md index 57128f7d146..d0111246d96 100644 --- a/docs/en/sql-reference/table-functions/jdbc.md +++ b/docs/en/sql-reference/table-functions/jdbc.md @@ -3,7 +3,7 @@ sidebar_position: 43 sidebar_label: jdbc --- -# jdbc {#table-function-jdbc} +# jdbc `jdbc(datasource, schema, table)` - returns table that is connected via JDBC driver. diff --git a/docs/en/sql-reference/table-functions/merge.md b/docs/en/sql-reference/table-functions/merge.md index 301f0a69caf..1597b7be98f 100644 --- a/docs/en/sql-reference/table-functions/merge.md +++ b/docs/en/sql-reference/table-functions/merge.md @@ -3,7 +3,7 @@ sidebar_position: 38 sidebar_label: merge --- -# merge {#merge} +# merge Creates a temporary [Merge](../../engines/table-engines/special/merge.md) table. The table structure is taken from the first table encountered that matches the regular expression. diff --git a/docs/en/sql-reference/table-functions/mysql.md b/docs/en/sql-reference/table-functions/mysql.md index c6983d8fba1..33ddaa89435 100644 --- a/docs/en/sql-reference/table-functions/mysql.md +++ b/docs/en/sql-reference/table-functions/mysql.md @@ -3,7 +3,7 @@ sidebar_position: 42 sidebar_label: mysql --- -# mysql {#mysql} +# mysql Allows `SELECT` and `INSERT` queries to be performed on data that is stored on a remote MySQL server. diff --git a/docs/en/sql-reference/table-functions/null.md b/docs/en/sql-reference/table-functions/null.md index 48df12bfece..ae84705cb66 100644 --- a/docs/en/sql-reference/table-functions/null.md +++ b/docs/en/sql-reference/table-functions/null.md @@ -3,7 +3,7 @@ sidebar_position: 53 sidebar_label: null function --- -# null {#null-function} +# null Creates a temporary table of the specified structure with the [Null](../../engines/table-engines/special/null.md) table engine. According to the `Null`-engine properties, the table data is ignored and the table itself is immediately droped right after the query execution. The function is used for the convenience of test writing and demonstrations. diff --git a/docs/en/sql-reference/table-functions/numbers.md b/docs/en/sql-reference/table-functions/numbers.md index c15c47cf725..a069afc3b58 100644 --- a/docs/en/sql-reference/table-functions/numbers.md +++ b/docs/en/sql-reference/table-functions/numbers.md @@ -3,7 +3,7 @@ sidebar_position: 39 sidebar_label: numbers --- -# numbers {#numbers} +# numbers `numbers(N)` – Returns a table with the single ‘number’ column (UInt64) that contains integers from 0 to N-1. `numbers(N, M)` - Returns a table with the single ‘number’ column (UInt64) that contains integers from N to (N + M - 1). diff --git a/docs/en/sql-reference/table-functions/odbc.md b/docs/en/sql-reference/table-functions/odbc.md index d2614337cdd..71f36a3da1a 100644 --- a/docs/en/sql-reference/table-functions/odbc.md +++ b/docs/en/sql-reference/table-functions/odbc.md @@ -3,7 +3,7 @@ sidebar_position: 44 sidebar_label: odbc --- -# odbc {#table-functions-odbc} +# odbc Returns table that is connected via [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity). @@ -21,7 +21,7 @@ To safely implement ODBC connections, ClickHouse uses a separate program `clickh The fields with the `NULL` values from the external table are converted into the default values for the base data type. For example, if a remote MySQL table field has the `INT NULL` type it is converted to 0 (the default value for ClickHouse `Int32` data type). -## Usage Example {#usage-example} +## Usage Example **Getting data from the local MySQL installation via ODBC** @@ -98,7 +98,7 @@ SELECT * FROM odbc('DSN=mysqlconn', 'test', 'test') └────────┴──────────────┴───────┴────────────────┘ ``` -## See Also {#see-also} +## See Also - [ODBC external dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-odbc) - [ODBC table engine](../../engines/table-engines/integrations/odbc.md). diff --git a/docs/en/sql-reference/table-functions/postgresql.md b/docs/en/sql-reference/table-functions/postgresql.md index 6a30b1f3f0c..b955b946d4e 100644 --- a/docs/en/sql-reference/table-functions/postgresql.md +++ b/docs/en/sql-reference/table-functions/postgresql.md @@ -3,7 +3,7 @@ sidebar_position: 42 sidebar_label: postgresql --- -# postgresql {#postgresql} +# postgresql Allows `SELECT` and `INSERT` queries to be performed on data that is stored on a remote PostgreSQL server. @@ -30,7 +30,7 @@ A table object with the same columns as the original PostgreSQL table. In the `INSERT` query to distinguish table function `postgresql(...)` from table name with column names list you must use keywords `FUNCTION` or `TABLE FUNCTION`. See examples below. ::: -## Implementation Details {#implementation-details} +## Implementation Details `SELECT` queries on PostgreSQL side run as `COPY (SELECT ...) TO STDOUT` inside read-only PostgreSQL transaction with commit after each `SELECT` query. diff --git a/docs/en/sql-reference/table-functions/remote.md b/docs/en/sql-reference/table-functions/remote.md index 0eae00564ba..f9e6860b081 100644 --- a/docs/en/sql-reference/table-functions/remote.md +++ b/docs/en/sql-reference/table-functions/remote.md @@ -3,7 +3,7 @@ sidebar_position: 40 sidebar_label: remote --- -# remote, remoteSecure {#remote-remotesecure} +# remote, remoteSecure Allows to access remote servers without creating a [Distributed](../../engines/table-engines/special/distributed.md) table. `remoteSecure` - same as `remote` but with a secured connection. diff --git a/docs/en/sql-reference/table-functions/s3.md b/docs/en/sql-reference/table-functions/s3.md index 61dda209ee6..39446dbd512 100644 --- a/docs/en/sql-reference/table-functions/s3.md +++ b/docs/en/sql-reference/table-functions/s3.md @@ -3,7 +3,7 @@ sidebar_position: 45 sidebar_label: s3 --- -# s3 Table Function {#s3-table-function} +# s3 Table Function Provides table-like interface to select/insert files in [Amazon S3](https://aws.amazon.com/s3/). This table function is similar to [hdfs](../../sql-reference/table-functions/hdfs.md), but provides S3-specific features. @@ -56,7 +56,7 @@ LIMIT 2; └─────────┴─────────┴─────────┘ ``` -## Usage {#usage-examples} +## Usage Suppose that we have several files with following URIs on S3: @@ -126,7 +126,7 @@ INSERT INTO FUNCTION s3('https://clickhouse-public-datasets.s3.amazonaws.com/my- SELECT name, value FROM existing_table; ``` -## Partitioned Write {#partitioned-write} +## Partitioned Write If you specify `PARTITION BY` expression when inserting data into `S3` table, a separate file is created for each partition value. Splitting the data into separate files helps to improve reading operations efficiency. diff --git a/docs/en/sql-reference/table-functions/s3Cluster.md b/docs/en/sql-reference/table-functions/s3Cluster.md index dbd3538c692..939aface0d7 100644 --- a/docs/en/sql-reference/table-functions/s3Cluster.md +++ b/docs/en/sql-reference/table-functions/s3Cluster.md @@ -3,7 +3,7 @@ sidebar_position: 55 sidebar_label: s3Cluster --- -# s3Cluster Table Function {#s3Cluster-table-function} +# s3Cluster Table Function Allows processing files from [Amazon S3](https://aws.amazon.com/s3/) in parallel from many nodes in a specified cluster. On initiator it creates a connection to all nodes in the cluster, discloses asterics in S3 file path, and dispatches each file dynamically. On the worker node it asks the initiator about the next task to process and processes it. This is repeated until all tasks are finished. diff --git a/docs/en/sql-reference/table-functions/sqlite.md b/docs/en/sql-reference/table-functions/sqlite.md index 6058843ae61..ff6ac64b382 100644 --- a/docs/en/sql-reference/table-functions/sqlite.md +++ b/docs/en/sql-reference/table-functions/sqlite.md @@ -3,7 +3,7 @@ sidebar_position: 55 sidebar_label: sqlite --- -## sqlite {#sqlite} +## sqlite Allows to perform queries on a data stored in an [SQLite](../../engines/database-engines/sqlite.md) database. diff --git a/docs/en/sql-reference/table-functions/url.md b/docs/en/sql-reference/table-functions/url.md index 3f2f9c6a710..421d03bf346 100644 --- a/docs/en/sql-reference/table-functions/url.md +++ b/docs/en/sql-reference/table-functions/url.md @@ -3,7 +3,7 @@ sidebar_position: 41 sidebar_label: url --- -# url {#url} +# url `url` function creates a table from the `URL` with given `format` and `structure`. diff --git a/docs/en/sql-reference/table-functions/view.md b/docs/en/sql-reference/table-functions/view.md index 727cc04e5a2..e3b63cf5588 100644 --- a/docs/en/sql-reference/table-functions/view.md +++ b/docs/en/sql-reference/table-functions/view.md @@ -3,7 +3,7 @@ sidebar_position: 51 sidebar_label: view --- -## view {#view} +## view Turns a subquery into a table. The function implements views (see [CREATE VIEW](https://clickhouse.com/docs/en/sql-reference/statements/create/view/#create-view)). The resulting table does not store data, but only stores the specified `SELECT` query. When reading from the table, ClickHouse executes the query and deletes all unnecessary columns from the result. diff --git a/docs/ru/development/developer-instruction.md b/docs/ru/development/developer-instruction.md index 6ff1d053865..4c1981198a0 100644 --- a/docs/ru/development/developer-instruction.md +++ b/docs/ru/development/developer-instruction.md @@ -200,14 +200,6 @@ cmake -DUSE_DEBUG_HELPERS=1 -DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1 В процессе сборки могут появится сообщения `libprotobuf WARNING` про protobuf файлы в библиотеке libhdfs2. Это не имеет значения. -В случае получения ошибок вида `error: variable 'y' set but not used [-Werror,-Wunused-but-set-variable]` ножно попробовать использовать другую версию компилятора сlang. Например, на момент написания данного текста описанная выше команда по установке clang для Ubuntu 20.04 по-умолчанию устанавливает clang-13, с которым возникает эта ошибка. Для решения проблемы можно установить clang-12 с помощью команд: -```bash -wget https://apt.llvm.org/llvm.sh -chmod +x llvm.sh -sudo ./llvm.sh 12 -``` -И далее использовать именно его, указав соответствующую версию при установке переменных окружения CC и CXX перед вызовом cmake. - При успешной сборке, вы получите готовый исполняемый файл `ClickHouse/build/programs/clickhouse`: ls -l programs/clickhouse diff --git a/docs/ru/sql-reference/data-types/map.md b/docs/ru/sql-reference/data-types/map.md index eb1b372c7d4..010e27d8477 100644 --- a/docs/ru/sql-reference/data-types/map.md +++ b/docs/ru/sql-reference/data-types/map.md @@ -9,8 +9,8 @@ sidebar_label: Map(key, value) **Параметры** -- `key` — ключ. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md) или [FixedString](../../sql-reference/data-types/fixedstring.md). -- `value` — значение. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [Array](../../sql-reference/data-types/array.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md) или [FixedString](../../sql-reference/data-types/fixedstring.md). +- `key` — ключ. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md), [FixedString](../../sql-reference/data-types/fixedstring.md), [UUID](../../sql-reference/data-types/uuid.md), [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md), [Date32](../../sql-reference/data-types/date32.md), [Enum](../../sql-reference/data-types/enum.md). +- `value` — значение. Любой тип, включая [Map](../../sql-reference/data-types/map.md) и [Array](../../sql-reference/data-types/array.md). Чтобы получить значение из колонки `a Map('key', 'value')`, используйте синтаксис `a['key']`. В настоящее время такая подстановка работает по алгоритму с линейной сложностью. diff --git a/docs/ru/sql-reference/functions/tuple-map-functions.md b/docs/ru/sql-reference/functions/tuple-map-functions.md index 1eedd3b158b..c4099c00f93 100644 --- a/docs/ru/sql-reference/functions/tuple-map-functions.md +++ b/docs/ru/sql-reference/functions/tuple-map-functions.md @@ -17,8 +17,8 @@ map(key1, value1[, key2, value2, ...]) **Аргументы** -- `key` — ключ. [String](../../sql-reference/data-types/string.md) или [Integer](../../sql-reference/data-types/int-uint.md). -- `value` — значение. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md) или [Array](../../sql-reference/data-types/array.md). +- `key` — ключ. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md), [FixedString](../../sql-reference/data-types/fixedstring.md), [UUID](../../sql-reference/data-types/uuid.md), [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md), [Date32](../../sql-reference/data-types/date32.md), [Enum](../../sql-reference/data-types/enum.md). +- `value` — значение. Любой тип, включая [Map](../../sql-reference/data-types/map.md) и [Array](../../sql-reference/data-types/array.md). **Возвращаемое значение** diff --git a/programs/copier/ClusterCopier.cpp b/programs/copier/ClusterCopier.cpp index 9adb3b737f3..f3609902fcb 100644 --- a/programs/copier/ClusterCopier.cpp +++ b/programs/copier/ClusterCopier.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -15,6 +16,9 @@ #include #include #include +#include +#include +#include namespace DB { @@ -1451,7 +1455,14 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( local_context->setSettings(task_cluster->settings_pull); local_context->setSetting("skip_unavailable_shards", true); - Block block = getBlockWithAllStreamData(InterpreterFactory::get(query_select_ast, local_context)->execute().pipeline); + InterpreterSelectWithUnionQuery select(query_select_ast, local_context, SelectQueryOptions{}); + QueryPlan plan; + select.buildQueryPlan(plan); + auto builder = std::move(*plan.buildQueryPipeline( + QueryPlanOptimizationSettings::fromContext(local_context), + BuildQueryPipelineSettings::fromContext(local_context))); + + Block block = getBlockWithAllStreamData(std::move(builder)); count = (block) ? block.safeGetByPosition(0).column->getUInt(0) : 0; } @@ -1532,22 +1543,27 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( QueryPipeline input; QueryPipeline output; { - BlockIO io_select = InterpreterFactory::get(query_select_ast, context_select)->execute(); BlockIO io_insert = InterpreterFactory::get(query_insert_ast, context_insert)->execute(); + InterpreterSelectWithUnionQuery select(query_select_ast, context_select, SelectQueryOptions{}); + QueryPlan plan; + select.buildQueryPlan(plan); + auto builder = std::move(*plan.buildQueryPipeline( + QueryPlanOptimizationSettings::fromContext(context_select), + BuildQueryPipelineSettings::fromContext(context_select))); + output = std::move(io_insert.pipeline); /// Add converting actions to make it possible to copy blocks with slightly different schema - const auto & select_block = io_select.pipeline.getHeader(); + const auto & select_block = builder.getHeader(); const auto & insert_block = output.getHeader(); auto actions_dag = ActionsDAG::makeConvertingActions( select_block.getColumnsWithTypeAndName(), insert_block.getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Position); + auto actions = std::make_shared(actions_dag, ExpressionActionsSettings::fromContext(getContext())); - QueryPipelineBuilder builder; - builder.init(std::move(io_select.pipeline)); builder.addSimpleTransform([&](const Block & header) { return std::make_shared(header, actions); @@ -1743,10 +1759,11 @@ String ClusterCopier::getRemoteCreateTable( remote_context->setSettings(settings); String query = "SHOW CREATE TABLE " + getQuotedTable(table); - Block block = getBlockWithAllStreamData( - QueryPipeline(std::make_shared( - std::make_shared(connection, query, InterpreterShowCreateQuery::getSampleBlock(), remote_context), false, false))); + QueryPipelineBuilder builder; + builder.init(Pipe(std::make_shared( + std::make_shared(connection, query, InterpreterShowCreateQuery::getSampleBlock(), remote_context), false, false))); + Block block = getBlockWithAllStreamData(std::move(builder)); return typeid_cast(*block.safeGetByPosition(0).column).getDataAt(0).toString(); } @@ -1858,7 +1875,14 @@ std::set ClusterCopier::getShardPartitions(const ConnectionTimeouts & ti auto local_context = Context::createCopy(context); local_context->setSettings(task_cluster->settings_pull); - Block block = getBlockWithAllStreamData(InterpreterFactory::get(query_ast, local_context)->execute().pipeline); + InterpreterSelectWithUnionQuery select(query_ast, local_context, SelectQueryOptions{}); + QueryPlan plan; + select.buildQueryPlan(plan); + auto builder = std::move(*plan.buildQueryPipeline( + QueryPlanOptimizationSettings::fromContext(local_context), + BuildQueryPipelineSettings::fromContext(local_context))); + + Block block = getBlockWithAllStreamData(std::move(builder)); if (block) { diff --git a/programs/copier/Internals.cpp b/programs/copier/Internals.cpp index 61bba214d5b..128194b401c 100644 --- a/programs/copier/Internals.cpp +++ b/programs/copier/Internals.cpp @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB { @@ -59,10 +60,8 @@ std::shared_ptr createASTStorageDistributed( } -Block getBlockWithAllStreamData(QueryPipeline pipeline) +Block getBlockWithAllStreamData(QueryPipelineBuilder builder) { - QueryPipelineBuilder builder; - builder.init(std::move(pipeline)); builder.addTransform(std::make_shared( builder.getHeader(), std::numeric_limits::max(), diff --git a/programs/copier/Internals.h b/programs/copier/Internals.h index dcde79cd6e5..64ab0019d05 100644 --- a/programs/copier/Internals.h +++ b/programs/copier/Internals.h @@ -161,7 +161,7 @@ std::shared_ptr createASTStorageDistributed( const String & cluster_name, const String & database, const String & table, const ASTPtr & sharding_key_ast = nullptr); -Block getBlockWithAllStreamData(QueryPipeline pipeline); +Block getBlockWithAllStreamData(QueryPipelineBuilder builder); bool isExtendedDefinitionStorage(const ASTPtr & storage_ast); diff --git a/programs/odbc-bridge/ColumnInfoHandler.cpp b/programs/odbc-bridge/ColumnInfoHandler.cpp index b029bdda5a5..f043c832aa4 100644 --- a/programs/odbc-bridge/ColumnInfoHandler.cpp +++ b/programs/odbc-bridge/ColumnInfoHandler.cpp @@ -17,7 +17,7 @@ #include #include "getIdentifierQuote.h" #include "validateODBCConnectionString.h" -#include "ODBCConnectionFactory.h" +#include "ODBCPooledConnectionFactory.h" #include #include @@ -105,7 +105,7 @@ void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServ { const bool external_table_functions_use_nulls = Poco::NumberParser::parseBool(params.get("external_table_functions_use_nulls", "false")); - auto connection_holder = ODBCConnectionFactory::instance().get( + auto connection_holder = ODBCPooledConnectionFactory::instance().get( validateODBCConnectionString(connection_string), getContext()->getSettingsRef().odbc_bridge_connection_pool_size); diff --git a/programs/odbc-bridge/IdentifierQuoteHandler.cpp b/programs/odbc-bridge/IdentifierQuoteHandler.cpp index 03298bcae74..c3a88804a51 100644 --- a/programs/odbc-bridge/IdentifierQuoteHandler.cpp +++ b/programs/odbc-bridge/IdentifierQuoteHandler.cpp @@ -14,7 +14,7 @@ #include #include "getIdentifierQuote.h" #include "validateODBCConnectionString.h" -#include "ODBCConnectionFactory.h" +#include "ODBCPooledConnectionFactory.h" namespace DB @@ -42,7 +42,7 @@ void IdentifierQuoteHandler::handleRequest(HTTPServerRequest & request, HTTPServ { std::string connection_string = params.get("connection_string"); - auto connection = ODBCConnectionFactory::instance().get( + auto connection = ODBCPooledConnectionFactory::instance().get( validateODBCConnectionString(connection_string), getContext()->getSettingsRef().odbc_bridge_connection_pool_size); diff --git a/programs/odbc-bridge/MainHandler.cpp b/programs/odbc-bridge/MainHandler.cpp index d96b2132020..bb1cf53205d 100644 --- a/programs/odbc-bridge/MainHandler.cpp +++ b/programs/odbc-bridge/MainHandler.cpp @@ -110,9 +110,12 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse try { - auto connection_handler = ODBCConnectionFactory::instance().get( - validateODBCConnectionString(connection_string), - getContext()->getSettingsRef().odbc_bridge_connection_pool_size); + nanodbc::ConnectionHolderPtr connection_handler; + if (getContext()->getSettingsRef().odbc_bridge_use_connection_pooling) + connection_handler = ODBCPooledConnectionFactory::instance().get( + validateODBCConnectionString(connection_string), getContext()->getSettingsRef().odbc_bridge_connection_pool_size); + else + connection_handler = std::make_shared(validateODBCConnectionString(connection_string)); if (mode == "write") { diff --git a/programs/odbc-bridge/ODBCBlockInputStream.h b/programs/odbc-bridge/ODBCBlockInputStream.h index 84d270e4713..79d5816ad01 100644 --- a/programs/odbc-bridge/ODBCBlockInputStream.h +++ b/programs/odbc-bridge/ODBCBlockInputStream.h @@ -4,7 +4,7 @@ #include #include #include -#include "ODBCConnectionFactory.h" +#include "ODBCPooledConnectionFactory.h" namespace DB diff --git a/programs/odbc-bridge/ODBCBlockOutputStream.h b/programs/odbc-bridge/ODBCBlockOutputStream.h index 2c57b8ee84f..f5e7b4e3a2d 100644 --- a/programs/odbc-bridge/ODBCBlockOutputStream.h +++ b/programs/odbc-bridge/ODBCBlockOutputStream.h @@ -5,7 +5,7 @@ #include #include #include -#include "ODBCConnectionFactory.h" +#include "ODBCPooledConnectionFactory.h" namespace DB diff --git a/programs/odbc-bridge/ODBCConnectionFactory.h b/programs/odbc-bridge/ODBCPooledConnectionFactory.h similarity index 90% rename from programs/odbc-bridge/ODBCConnectionFactory.h rename to programs/odbc-bridge/ODBCPooledConnectionFactory.h index 23601ceba9e..5c198c3be97 100644 --- a/programs/odbc-bridge/ODBCConnectionFactory.h +++ b/programs/odbc-bridge/ODBCPooledConnectionFactory.h @@ -37,11 +37,20 @@ public: { } + explicit ConnectionHolder(const String & connection_string_) + : pool(nullptr) + , connection() + , connection_string(connection_string_) + { + updateConnection(); + } + ConnectionHolder(const ConnectionHolder & other) = delete; ~ConnectionHolder() { - pool->returnObject(std::move(connection)); + if (pool != nullptr) + pool->returnObject(std::move(connection)); } nanodbc::connection & get() const @@ -115,12 +124,12 @@ T execute(nanodbc::ConnectionHolderPtr connection_holder, std::function #include #include "validateODBCConnectionString.h" -#include "ODBCConnectionFactory.h" +#include "ODBCPooledConnectionFactory.h" #include #include @@ -50,7 +50,7 @@ void SchemaAllowedHandler::handleRequest(HTTPServerRequest & request, HTTPServer { std::string connection_string = params.get("connection_string"); - auto connection = ODBCConnectionFactory::instance().get( + auto connection = ODBCPooledConnectionFactory::instance().get( validateODBCConnectionString(connection_string), getContext()->getSettingsRef().odbc_bridge_connection_pool_size); diff --git a/programs/odbc-bridge/getIdentifierQuote.h b/programs/odbc-bridge/getIdentifierQuote.h index a7620da2291..53ee1afd720 100644 --- a/programs/odbc-bridge/getIdentifierQuote.h +++ b/programs/odbc-bridge/getIdentifierQuote.h @@ -8,7 +8,7 @@ #include #include #include -#include "ODBCConnectionFactory.h" +#include "ODBCPooledConnectionFactory.h" namespace DB diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index b4b9f8c2bd4..bc5a959c88b 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -84,13 +84,13 @@ #include #include #include +#include #include #include #include #include #include #include -#include #include #include "config_core.h" @@ -103,7 +103,6 @@ #endif #if USE_SSL -# include # include # include #endif diff --git a/src/BridgeHelper/LibraryBridgeHelper.cpp b/src/BridgeHelper/LibraryBridgeHelper.cpp index 6d830e0b691..052ef3329b6 100644 --- a/src/BridgeHelper/LibraryBridgeHelper.cpp +++ b/src/BridgeHelper/LibraryBridgeHelper.cpp @@ -200,25 +200,25 @@ bool LibraryBridgeHelper::supportsSelectiveLoad() } -Pipe LibraryBridgeHelper::loadAll() +QueryPipeline LibraryBridgeHelper::loadAll() { startBridgeSync(); auto uri = createRequestURI(LOAD_ALL_METHOD); - return loadBase(uri); + return QueryPipeline(loadBase(uri)); } -Pipe LibraryBridgeHelper::loadIds(const std::vector & ids) +QueryPipeline LibraryBridgeHelper::loadIds(const std::vector & ids) { startBridgeSync(); auto uri = createRequestURI(LOAD_IDS_METHOD); uri.addQueryParameter("ids_num", toString(ids.size())); /// Not used parameter, but helpful auto ids_string = getDictIdsString(ids); - return loadBase(uri, [ids_string](std::ostream & os) { os << ids_string; }); + return QueryPipeline(loadBase(uri, [ids_string](std::ostream & os) { os << ids_string; })); } -Pipe LibraryBridgeHelper::loadKeys(const Block & requested_block) +QueryPipeline LibraryBridgeHelper::loadKeys(const Block & requested_block) { startBridgeSync(); auto uri = createRequestURI(LOAD_KEYS_METHOD); @@ -230,7 +230,7 @@ Pipe LibraryBridgeHelper::loadKeys(const Block & requested_block) auto output_format = getContext()->getOutputFormat(LibraryBridgeHelper::DEFAULT_FORMAT, out_buffer, requested_block.cloneEmpty()); formatBlock(output_format, requested_block); }; - return loadBase(uri, out_stream_callback); + return QueryPipeline(loadBase(uri, out_stream_callback)); } @@ -248,7 +248,7 @@ bool LibraryBridgeHelper::executeRequest(const Poco::URI & uri, ReadWriteBufferF } -Pipe LibraryBridgeHelper::loadBase(const Poco::URI & uri, ReadWriteBufferFromHTTP::OutStreamCallback out_stream_callback) +QueryPipeline LibraryBridgeHelper::loadBase(const Poco::URI & uri, ReadWriteBufferFromHTTP::OutStreamCallback out_stream_callback) { auto read_buf_ptr = std::make_unique( uri, @@ -263,7 +263,7 @@ Pipe LibraryBridgeHelper::loadBase(const Poco::URI & uri, ReadWriteBufferFromHTT auto source = FormatFactory::instance().getInput(LibraryBridgeHelper::DEFAULT_FORMAT, *read_buf_ptr, sample_block, getContext(), DEFAULT_BLOCK_SIZE); source->addBuffer(std::move(read_buf_ptr)); - return Pipe(std::move(source)); + return QueryPipeline(std::move(source)); } diff --git a/src/BridgeHelper/LibraryBridgeHelper.h b/src/BridgeHelper/LibraryBridgeHelper.h index 8cb0ca102b0..598e473f07b 100644 --- a/src/BridgeHelper/LibraryBridgeHelper.h +++ b/src/BridgeHelper/LibraryBridgeHelper.h @@ -6,6 +6,7 @@ #include #include #include +#include namespace DB @@ -38,13 +39,13 @@ public: bool supportsSelectiveLoad(); - Pipe loadAll(); + QueryPipeline loadAll(); - Pipe loadIds(const std::vector & ids); + QueryPipeline loadIds(const std::vector & ids); - Pipe loadKeys(const Block & requested_block); + QueryPipeline loadKeys(const Block & requested_block); - Pipe loadBase(const Poco::URI & uri, ReadWriteBufferFromHTTP::OutStreamCallback out_stream_callback = {}); + QueryPipeline loadBase(const Poco::URI & uri, ReadWriteBufferFromHTTP::OutStreamCallback out_stream_callback = {}); bool executeRequest(const Poco::URI & uri, ReadWriteBufferFromHTTP::OutStreamCallback out_stream_callback = {}) const; diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index d678441d442..b586979b546 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -57,6 +57,10 @@ #include #include #include +#include +#include +#include +#include #include #include #include @@ -1166,16 +1170,26 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des try { auto metadata = storage->getInMemoryMetadataPtr(); + QueryPlan plan; + storage->read( + plan, + sample.getNames(), + storage->getStorageSnapshot(metadata, global_context), + query_info, + global_context, + {}, + global_context->getSettingsRef().max_block_size, + getNumberOfPhysicalCPUCores()); + + auto builder = plan.buildQueryPipeline( + QueryPlanOptimizationSettings::fromContext(global_context), + BuildQueryPipelineSettings::fromContext(global_context)); + + QueryPlanResourceHolder resources; + auto pipe = QueryPipelineBuilder::getPipe(std::move(*builder), resources); + sendDataFromPipe( - storage->read( - sample.getNames(), - storage->getStorageSnapshot(metadata, global_context), - query_info, - global_context, - {}, - global_context->getSettingsRef().max_block_size, - getNumberOfPhysicalCPUCores() - ), + std::move(pipe), parsed_query, have_data_in_stdin ); diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index 50413b45c6b..7b88c5467bb 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -768,8 +768,7 @@ void Connection::sendExternalTablesData(ExternalTablesData & data) if (!elem->pipe) elem->pipe = elem->creating_pipe_callback(); - QueryPipelineBuilder pipeline; - pipeline.init(std::move(*elem->pipe)); + QueryPipelineBuilder pipeline = std::move(*elem->pipe); elem->pipe.reset(); pipeline.resize(1); auto sink = std::make_shared(pipeline.getHeader(), *this, *elem, std::move(on_cancel)); diff --git a/src/Client/IServerConnection.h b/src/Client/IServerConnection.h index dfa4873e426..542aecb9849 100644 --- a/src/Client/IServerConnection.h +++ b/src/Client/IServerConnection.h @@ -44,9 +44,9 @@ struct Packet struct ExternalTableData { /// Pipe of data form table; - std::unique_ptr pipe; + std::unique_ptr pipe; std::string table_name; - std::function()> creating_pipe_callback; + std::function()> creating_pipe_callback; /// Flag if need to stop reading. std::atomic_bool is_cancelled = false; }; diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index 973dde10756..60ac604e6c9 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -629,6 +629,7 @@ M(658, MEILISEARCH_MISSING_SOME_COLUMNS) \ M(659, UNKNOWN_STATUS_OF_TRANSACTION) \ M(660, HDFS_ERROR) \ + M(661, CANNOT_SEND_SIGNAL) \ \ M(999, KEEPER_EXCEPTION) \ M(1000, POCO_EXCEPTION) \ diff --git a/src/Common/ProgressIndication.h b/src/Common/ProgressIndication.h index aad4a8c18e5..9db9af96b49 100644 --- a/src/Common/ProgressIndication.h +++ b/src/Common/ProgressIndication.h @@ -44,7 +44,7 @@ public: /// 1. onProgress in clickhouse-client; /// 2. ProgressCallback via setProgressCallback methrod in: /// - context (used in clickhouse-local, can also be added in arbitrary place) - /// - SourceWithProgress (also in streams) + /// - ISource (also in streams) /// - readBufferFromFileDescriptor (for file processing progress) bool updateProgress(const Progress & value); diff --git a/src/Common/ThreadFuzzer.cpp b/src/Common/ThreadFuzzer.cpp index 2db049717ac..962cfee074d 100644 --- a/src/Common/ThreadFuzzer.cpp +++ b/src/Common/ThreadFuzzer.cpp @@ -294,8 +294,8 @@ void ThreadFuzzer::setup() const #if THREAD_FUZZER_WRAP_PTHREAD # define MAKE_WRAPPER(RET, NAME, ...) \ - extern "C" RET __##NAME(__VA_ARGS__); /* NOLINT */ \ - extern "C" RET NAME(__VA_ARGS__) /* NOLINT */ \ + extern "C" RET __##NAME(__VA_ARGS__); \ + extern "C" RET NAME(__VA_ARGS__) \ { \ injection( \ NAME##_before_yield_probability.load(std::memory_order_relaxed), \ diff --git a/src/Coordination/tests/gtest_coordination.cpp b/src/Coordination/tests/gtest_coordination.cpp index cf4d1eaf9f2..9f586bd41ea 100644 --- a/src/Coordination/tests/gtest_coordination.cpp +++ b/src/Coordination/tests/gtest_coordination.cpp @@ -139,8 +139,7 @@ struct SimpliestRaftServer std::cout << " done" << std::endl; break; } - std::cout << "."; - fflush(stdout); + std::cout << "." << std::flush; std::this_thread::sleep_for(std::chrono::milliseconds(100)); } } diff --git a/src/Core/ExternalTable.cpp b/src/Core/ExternalTable.cpp index 3b515fab5c9..4528fe19e03 100644 --- a/src/Core/ExternalTable.cpp +++ b/src/Core/ExternalTable.cpp @@ -12,8 +12,9 @@ #include #include #include -#include +#include #include +#include #include #include @@ -36,7 +37,8 @@ ExternalTableDataPtr BaseExternalTable::getData(ContextPtr context) auto input = context->getInputFormat(format, *read_buffer, sample_block, DEFAULT_BLOCK_SIZE); auto data = std::make_unique(); - data->pipe = std::make_unique(std::move(input)); + data->pipe = std::make_unique(); + data->pipe->init(Pipe(std::move(input))); data->table_name = name; return data; @@ -157,20 +159,14 @@ void ExternalTablesHandler::handlePart(const Poco::Net::MessageHeader & header, auto storage = temporary_table.getTable(); getContext()->addExternalTable(data->table_name, std::move(temporary_table)); auto sink = storage->write(ASTPtr(), storage->getInMemoryMetadataPtr(), getContext()); - auto exception_handling = std::make_shared(sink->getOutputPort().getHeader()); /// Write data - data->pipe->resize(1); + auto pipeline = QueryPipelineBuilder::getPipeline(std::move(*data->pipe)); + pipeline.complete(std::move(sink)); + pipeline.setNumThreads(1); - connect(*data->pipe->getOutputPort(0), sink->getInputPort()); - connect(sink->getOutputPort(), exception_handling->getPort()); - - auto processors = Pipe::detachProcessors(std::move(*data->pipe)); - processors.push_back(std::move(sink)); - processors.push_back(std::move(exception_handling)); - - auto executor = std::make_shared(processors, getContext()->getProcessListElement()); - executor->execute(/*num_threads = */ 1); + CompletedPipelineExecutor executor(pipeline); + executor.execute(); /// We are ready to receive the next file, for this we clear all the information received clear(); diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 1b3e5de8adb..729294e9c61 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -427,6 +427,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value) M(UInt64, postgresql_connection_pool_wait_timeout, 5000, "Connection pool push/pop timeout on empty pool for PostgreSQL table engine and database engine. By default it will block on empty pool.", 0) \ M(UInt64, glob_expansion_max_elements, 1000, "Maximum number of allowed addresses (For external storages, table functions, etc).", 0) \ M(UInt64, odbc_bridge_connection_pool_size, 16, "Connection pool size for each connection settings string in ODBC bridge.", 0) \ + M(Bool, odbc_bridge_use_connection_pooling, true, "Use connection pooling in ODBC bridge. If set to false, a new connection is created every time", 0) \ \ M(Seconds, distributed_replica_error_half_life, DBMS_CONNECTION_POOL_WITH_FAILOVER_DEFAULT_DECREASE_ERROR_PERIOD, "Time period reduces replica error counter by 2 times.", 0) \ M(UInt64, distributed_replica_error_cap, DBMS_CONNECTION_POOL_WITH_FAILOVER_MAX_ERROR_COUNT, "Max number of errors per replica, prevents piling up an incredible amount of errors if replica was offline for some time and allows it to be reconsidered in a shorter amount of time.", 0) \ diff --git a/src/Daemon/BaseDaemon.cpp b/src/Daemon/BaseDaemon.cpp index 1b1e4611dc2..a9bc1c5c27f 100644 --- a/src/Daemon/BaseDaemon.cpp +++ b/src/Daemon/BaseDaemon.cpp @@ -73,6 +73,7 @@ namespace DB namespace ErrorCodes { extern const int CANNOT_SET_SIGNAL_HANDLER; + extern const int CANNOT_SEND_SIGNAL; } } @@ -86,7 +87,9 @@ static void call_default_signal_handler(int sig) { if (SIG_ERR == signal(sig, SIG_DFL)) DB::throwFromErrno("Cannot set signal handler.", DB::ErrorCodes::CANNOT_SET_SIGNAL_HANDLER); - raise(sig); + + if (0 != raise(sig)) + DB::throwFromErrno("Cannot send signal.", DB::ErrorCodes::CANNOT_SEND_SIGNAL); } static const size_t signal_pipe_buf_size = diff --git a/src/DataTypes/ObjectUtils.cpp b/src/DataTypes/ObjectUtils.cpp index 62204652ec4..8f4daddc9e8 100644 --- a/src/DataTypes/ObjectUtils.cpp +++ b/src/DataTypes/ObjectUtils.cpp @@ -143,30 +143,17 @@ void convertObjectsToTuples(Block & block, const NamesAndTypesList & extended_st continue; const auto & column_object = assert_cast(*column.column); - const auto & subcolumns = column_object.getSubcolumns(); - if (!column_object.isFinalized()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot convert to tuple column '{}' from type {}. Column should be finalized first", column.name, column.type->getName()); - PathsInData tuple_paths; - DataTypes tuple_types; - Columns tuple_columns; - - for (const auto & entry : subcolumns) - { - tuple_paths.emplace_back(entry->path); - tuple_types.emplace_back(entry->data.getLeastCommonType()); - tuple_columns.emplace_back(entry->data.getFinalizedColumnPtr()); - } + std::tie(column.column, column.type) = unflattenObjectToTuple(column_object); auto it = storage_columns_map.find(column.name); if (it == storage_columns_map.end()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Column '{}' not found in storage", column.name); - std::tie(column.column, column.type) = unflattenTuple(tuple_paths, tuple_types, tuple_columns); - /// Check that constructed Tuple type and type in storage are compatible. getLeastCommonTypeForObject({column.type, it->second}, true); } @@ -585,6 +572,28 @@ DataTypePtr unflattenTuple(const PathsInData & paths, const DataTypes & tuple_ty return unflattenTuple(paths, tuple_types, tuple_columns).second; } +std::pair unflattenObjectToTuple(const ColumnObject & column) +{ + const auto & subcolumns = column.getSubcolumns(); + + PathsInData paths; + DataTypes types; + Columns columns; + + paths.reserve(subcolumns.size()); + types.reserve(subcolumns.size()); + columns.reserve(subcolumns.size()); + + for (const auto & entry : subcolumns) + { + paths.emplace_back(entry->path); + types.emplace_back(entry->data.getLeastCommonType()); + columns.emplace_back(entry->data.getFinalizedColumnPtr()); + } + + return unflattenTuple(paths, types, columns); +} + std::pair unflattenTuple( const PathsInData & paths, const DataTypes & tuple_types, diff --git a/src/DataTypes/ObjectUtils.h b/src/DataTypes/ObjectUtils.h index 8dc46ceecf5..7ec7f6c9f38 100644 --- a/src/DataTypes/ObjectUtils.h +++ b/src/DataTypes/ObjectUtils.h @@ -73,11 +73,14 @@ DataTypePtr unflattenTuple( const PathsInData & paths, const DataTypes & tuple_types); +std::pair unflattenObjectToTuple(const ColumnObject & column); + std::pair unflattenTuple( const PathsInData & paths, const DataTypes & tuple_types, const Columns & tuple_columns); + /// For all columns which exist in @expected_columns and /// don't exist in @available_columns adds to WITH clause /// an alias with column name to literal of default value of column type. diff --git a/src/DataTypes/Serializations/ISerialization.cpp b/src/DataTypes/Serializations/ISerialization.cpp index 512653ecb13..7df4a956c1a 100644 --- a/src/DataTypes/Serializations/ISerialization.cpp +++ b/src/DataTypes/Serializations/ISerialization.cpp @@ -172,10 +172,6 @@ String getNameForSubstreamPath( else stream_name += "." + it->tuple_element_name; } - else if (it->type == Substream::ObjectElement) - { - stream_name += escapeForFileName(".") + escapeForFileName(it->object_key_name); - } } return stream_name; diff --git a/src/DataTypes/Serializations/ISerialization.h b/src/DataTypes/Serializations/ISerialization.h index 6c6b64f2416..b5d2082631e 100644 --- a/src/DataTypes/Serializations/ISerialization.h +++ b/src/DataTypes/Serializations/ISerialization.h @@ -126,7 +126,7 @@ public: SparseOffsets, ObjectStructure, - ObjectElement, + ObjectData, Regular, }; @@ -136,9 +136,6 @@ public: /// Index of tuple element, starting at 1 or name. String tuple_element_name; - /// Name of subcolumn of object column. - String object_key_name; - /// Do we need to escape a dot in filenames for tuple elements. bool escape_tuple_delimiter = true; diff --git a/src/DataTypes/Serializations/PathInData.cpp b/src/DataTypes/Serializations/PathInData.cpp index 574c34f1c27..b7c8bbc4da1 100644 --- a/src/DataTypes/Serializations/PathInData.cpp +++ b/src/DataTypes/Serializations/PathInData.cpp @@ -6,9 +6,6 @@ #include #include -#include -#include - #include #include @@ -73,43 +70,6 @@ UInt128 PathInData::getPartsHash(const Parts & parts_) return res; } -void PathInData::writeBinary(WriteBuffer & out) const -{ - writeVarUInt(parts.size(), out); - for (const auto & part : parts) - { - writeStringBinary(part.key, out); - writeIntBinary(part.is_nested, out); - writeIntBinary(part.anonymous_array_level, out); - } -} - -void PathInData::readBinary(ReadBuffer & in) -{ - size_t num_parts; - readVarUInt(num_parts, in); - - Arena arena; - Parts temp_parts; - temp_parts.reserve(num_parts); - - for (size_t i = 0; i < num_parts; ++i) - { - bool is_nested; - UInt8 anonymous_array_level; - - auto ref = readStringBinaryInto(arena, in); - readIntBinary(is_nested, in); - readIntBinary(anonymous_array_level, in); - - temp_parts.emplace_back(static_cast(ref), is_nested, anonymous_array_level); - } - - /// Recreate path and parts. - buildPath(temp_parts); - buildParts(temp_parts); -} - void PathInData::buildPath(const Parts & other_parts) { if (other_parts.empty()) diff --git a/src/DataTypes/Serializations/PathInData.h b/src/DataTypes/Serializations/PathInData.h index 323bc37d99b..278a81a9b0b 100644 --- a/src/DataTypes/Serializations/PathInData.h +++ b/src/DataTypes/Serializations/PathInData.h @@ -7,9 +7,6 @@ namespace DB { -class ReadBuffer; -class WriteBuffer; - /// Class that represents path in document, e.g. JSON. class PathInData { @@ -57,9 +54,6 @@ public: bool isNested(size_t i) const { return parts[i].is_nested; } bool hasNested() const { return has_nested; } - void writeBinary(WriteBuffer & out) const; - void readBinary(ReadBuffer & in); - bool operator==(const PathInData & other) const { return parts == other.parts; } struct Hash { size_t operator()(const PathInData & value) const; }; diff --git a/src/DataTypes/Serializations/SerializationObject.cpp b/src/DataTypes/Serializations/SerializationObject.cpp index 2d6555dcb43..8b5f37d342b 100644 --- a/src/DataTypes/Serializations/SerializationObject.cpp +++ b/src/DataTypes/Serializations/SerializationObject.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -9,12 +10,17 @@ #include #include #include +#include +#include #include #include #include #include +#include +#include +#include namespace DB { @@ -193,24 +199,51 @@ void SerializationObject::deserializeTextCSV(IColumn & column, ReadBuffe } template -template -void SerializationObject::checkSerializationIsSupported(const TSettings & settings, const TStatePtr & state) const +template +void SerializationObject::checkSerializationIsSupported(const TSettings & settings) const { if (settings.position_independent_encoding) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "DataTypeObject doesn't support serialization with position independent encoding"); - - if (state) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "DataTypeObject doesn't support serialization with non-trivial state"); } +template +struct SerializationObject::SerializeStateObject : public ISerialization::SerializeBinaryBulkState +{ + bool is_first = true; + DataTypePtr nested_type; + SerializationPtr nested_serialization; + SerializeBinaryBulkStatePtr nested_state; +}; + +template +struct SerializationObject::DeserializeStateObject : public ISerialization::DeserializeBinaryBulkState +{ + BinarySerializationKind kind; + DataTypePtr nested_type; + SerializationPtr nested_serialization; + DeserializeBinaryBulkStatePtr nested_state; +}; + template void SerializationObject::serializeBinaryBulkStatePrefix( SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const { - checkSerializationIsSupported(settings, state); + checkSerializationIsSupported(settings); + if (state) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "DataTypeObject doesn't support serialization with non-trivial state"); + + settings.path.push_back(Substream::ObjectStructure); + auto * stream = settings.getter(settings.path); + settings.path.pop_back(); + + if (!stream) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Missing stream for kind of binary serialization"); + + writeIntBinary(static_cast(BinarySerializationKind::TUPLE), *stream); + state = std::make_shared(); } template @@ -218,7 +251,12 @@ void SerializationObject::serializeBinaryBulkStateSuffix( SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const { - checkSerializationIsSupported(settings, state); + checkSerializationIsSupported(settings); + auto * state_object = checkAndGetState(state); + + settings.path.push_back(Substream::ObjectData); + state_object->nested_serialization->serializeBinaryBulkStateSuffix(settings, state_object->nested_state); + settings.path.pop_back(); } template @@ -226,7 +264,56 @@ void SerializationObject::deserializeBinaryBulkStatePrefix( DeserializeBinaryBulkSettings & settings, DeserializeBinaryBulkStatePtr & state) const { - checkSerializationIsSupported(settings, state); + checkSerializationIsSupported(settings); + if (state) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "DataTypeObject doesn't support serialization with non-trivial state"); + + settings.path.push_back(Substream::ObjectStructure); + auto * stream = settings.getter(settings.path); + settings.path.pop_back(); + + if (!stream) + throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA, + "Cannot read kind of binary serialization of DataTypeObject, because its stream is missing"); + + UInt8 kind_raw; + readIntBinary(kind_raw, *stream); + auto kind = magic_enum::enum_cast(kind_raw); + if (!kind) + throw Exception(ErrorCodes::INCORRECT_DATA, + "Unknown binary serialization kind of Object: " + std::to_string(kind_raw)); + + auto state_object = std::make_shared(); + state_object->kind = *kind; + + if (state_object->kind == BinarySerializationKind::TUPLE) + { + String data_type_name; + readStringBinary(data_type_name, *stream); + state_object->nested_type = DataTypeFactory::instance().get(data_type_name); + state_object->nested_serialization = state_object->nested_type->getDefaultSerialization(); + + if (!isTuple(state_object->nested_type)) + throw Exception(ErrorCodes::INCORRECT_DATA, + "Data of type Object should be written as Tuple, got: {}", data_type_name); + } + else if (state_object->kind == BinarySerializationKind::STRING) + { + state_object->nested_type = std::make_shared(); + state_object->nested_serialization = std::make_shared(); + } + else + { + throw Exception(ErrorCodes::INCORRECT_DATA, + "Unknown binary serialization kind of Object: " + std::to_string(kind_raw)); + } + + settings.path.push_back(Substream::ObjectData); + state_object->nested_serialization->deserializeBinaryBulkStatePrefix(settings, state_object->nested_state); + settings.path.pop_back(); + + state = std::move(state_object); } template @@ -237,36 +324,45 @@ void SerializationObject::serializeBinaryBulkWithMultipleStreams( SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const { - checkSerializationIsSupported(settings, state); + checkSerializationIsSupported(settings); const auto & column_object = assert_cast(column); + auto * state_object = checkAndGetState(state); if (!column_object.isFinalized()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot write non-finalized ColumnObject"); - settings.path.push_back(Substream::ObjectStructure); - if (auto * stream = settings.getter(settings.path)) - writeVarUInt(column_object.getSubcolumns().size(), *stream); + auto [tuple_column, tuple_type] = unflattenObjectToTuple(column_object); - const auto & subcolumns = column_object.getSubcolumns(); - for (const auto & entry : subcolumns) + if (state_object->is_first) { - settings.path.back() = Substream::ObjectStructure; - settings.path.back().object_key_name = entry->path.getPath(); + /// Actually it's a part of serializeBinaryBulkStatePrefix, + /// but it cannot be done there, because we have to know the + /// structure of column. - const auto & type = entry->data.getLeastCommonType(); + settings.path.push_back(Substream::ObjectStructure); if (auto * stream = settings.getter(settings.path)) - { - entry->path.writeBinary(*stream); - writeStringBinary(type->getName(), *stream); - } + writeStringBinary(tuple_type->getName(), *stream); - settings.path.back() = Substream::ObjectElement; - if (auto * stream = settings.getter(settings.path)) - { - auto serialization = type->getDefaultSerialization(); - serialization->serializeBinaryBulkWithMultipleStreams( - entry->data.getFinalizedColumn(), offset, limit, settings, state); - } + state_object->nested_type = tuple_type; + state_object->nested_serialization = tuple_type->getDefaultSerialization(); + state_object->is_first = false; + + settings.path.back() = Substream::ObjectData; + state_object->nested_serialization->serializeBinaryBulkStatePrefix(settings, state_object->nested_state); + settings.path.pop_back(); + } + else if (!state_object->nested_type->equals(*tuple_type)) + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Types of internal column of Object mismatched. Expected: {}, Got: {}", + state_object->nested_type->getName(), tuple_type->getName()); + } + + settings.path.push_back(Substream::ObjectData); + if (auto * stream = settings.getter(settings.path)) + { + state_object->nested_serialization->serializeBinaryBulkWithMultipleStreams( + *tuple_column, offset, limit, settings, state_object->nested_state); } settings.path.pop_back(); @@ -280,54 +376,20 @@ void SerializationObject::deserializeBinaryBulkWithMultipleStreams( DeserializeBinaryBulkStatePtr & state, SubstreamsCache * cache) const { - checkSerializationIsSupported(settings, state); + checkSerializationIsSupported(settings); if (!column->empty()) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "DataTypeObject cannot be deserialized to non-empty column"); auto mutable_column = column->assumeMutable(); - auto & column_object = typeid_cast(*mutable_column); + auto & column_object = assert_cast(*mutable_column); + auto * state_object = checkAndGetState(state); - size_t num_subcolumns = 0; - settings.path.push_back(Substream::ObjectStructure); - if (auto * stream = settings.getter(settings.path)) - readVarUInt(num_subcolumns, *stream); - - settings.path.back() = Substream::ObjectElement; - for (size_t i = 0; i < num_subcolumns; ++i) - { - PathInData key; - String type_name; - - settings.path.back() = Substream::ObjectStructure; - if (auto * stream = settings.getter(settings.path)) - { - key.readBinary(*stream); - readStringBinary(type_name, *stream); - } - else - { - throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA, - "Cannot read structure of DataTypeObject, because its stream is missing"); - } - - settings.path.back() = Substream::ObjectElement; - settings.path.back().object_key_name = key.getPath(); - - if (auto * stream = settings.getter(settings.path)) - { - auto type = DataTypeFactory::instance().get(type_name); - auto serialization = type->getDefaultSerialization(); - ColumnPtr subcolumn_data = type->createColumn(); - serialization->deserializeBinaryBulkWithMultipleStreams(subcolumn_data, limit, settings, state, cache); - column_object.addSubcolumn(key, subcolumn_data->assumeMutable()); - } - else - { - throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA, - "Cannot read subcolumn '{}' of DataTypeObject, because its stream is missing", key.getPath()); - } - } + settings.path.push_back(Substream::ObjectData); + if (state_object->kind == BinarySerializationKind::STRING) + deserializeBinaryBulkFromString(column_object, limit, settings, *state_object, cache); + else + deserializeBinaryBulkFromTuple(column_object, limit, settings, *state_object, cache); settings.path.pop_back(); column_object.checkConsistency(); @@ -335,6 +397,49 @@ void SerializationObject::deserializeBinaryBulkWithMultipleStreams( column = std::move(mutable_column); } +template +void SerializationObject::deserializeBinaryBulkFromString( + ColumnObject & column_object, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeStateObject & state, + SubstreamsCache * cache) const +{ + ColumnPtr column_string = state.nested_type->createColumn(); + state.nested_serialization->deserializeBinaryBulkWithMultipleStreams( + column_string, limit, settings, state.nested_state, cache); + + ConvertImplGenericFromString::executeImpl(*column_string, column_object, *this, column_string->size()); +} + +template +void SerializationObject::deserializeBinaryBulkFromTuple( + ColumnObject & column_object, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeStateObject & state, + SubstreamsCache * cache) const +{ + ColumnPtr column_tuple = state.nested_type->createColumn(); + state.nested_serialization->deserializeBinaryBulkWithMultipleStreams( + column_tuple, limit, settings, state.nested_state, cache); + + auto [tuple_paths, tuple_types] = flattenTuple(state.nested_type); + auto flattened_tuple = flattenTuple(column_tuple); + const auto & tuple_columns = assert_cast(*flattened_tuple).getColumns(); + + assert(tuple_paths.size() == tuple_types.size()); + size_t num_subcolumns = tuple_paths.size(); + + if (tuple_columns.size() != num_subcolumns) + throw Exception(ErrorCodes::INCORRECT_DATA, + "Inconsistent type ({}) and column ({}) while reading column of type Object", + state.nested_type->getName(), column_tuple->getName()); + + for (size_t i = 0; i < num_subcolumns; ++i) + column_object.addSubcolumn(tuple_paths[i], tuple_columns[i]->assumeMutable()); +} + template void SerializationObject::serializeBinary(const Field &, WriteBuffer &) const { diff --git a/src/DataTypes/Serializations/SerializationObject.h b/src/DataTypes/Serializations/SerializationObject.h index 549c8735aee..ff72c84faaa 100644 --- a/src/DataTypes/Serializations/SerializationObject.h +++ b/src/DataTypes/Serializations/SerializationObject.h @@ -1,19 +1,35 @@ #pragma once +#include #include #include namespace DB { -/// Serialization for data type Object. -/// Supported only test serialization/deserialization. -/// and binary bulk serialization/deserialization without position independent -/// encoding, i.e. serialization/deserialization into Native format. +/** Serialization for data type Object. + * Supported only test serialization/deserialization. + * and binary bulk serialization/deserialization without position independent + * encoding, i.e. serialization/deserialization into Native format. + */ template class SerializationObject : public ISerialization { public: + /** In Native format ColumnObject can be serialized + * in two formats: as Tuple or as String. + * The format is the following: + * + * 1 byte -- 0 if Tuple, 1 if String. + * [type_name] -- Only for tuple serialization. + * ... data of internal column ... + * + * ClickHouse client serializazes objects as tuples. + * String serialization exists for clients, which cannot + * do parsing by themselves and they can send raw data as + * string. It will be parsed on the server side. + */ + void serializeBinaryBulkStatePrefix( SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const override; @@ -58,8 +74,31 @@ public: void deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; private: - template - void checkSerializationIsSupported(const TSettings & settings, const TStatePtr & state) const; + enum class BinarySerializationKind : UInt8 + { + TUPLE = 0, + STRING = 1, + }; + + struct SerializeStateObject; + struct DeserializeStateObject; + + void deserializeBinaryBulkFromString( + ColumnObject & column_object, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeStateObject & state, + SubstreamsCache * cache) const; + + void deserializeBinaryBulkFromTuple( + ColumnObject & column_object, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeStateObject & state, + SubstreamsCache * cache) const; + + template + void checkSerializationIsSupported(const TSettings & settings) const; template void deserializeTextImpl(IColumn & column, Reader && reader) const; diff --git a/src/DataTypes/Serializations/tests/gtest_object_serialization.cpp b/src/DataTypes/Serializations/tests/gtest_object_serialization.cpp new file mode 100644 index 00000000000..f1fbbe115e2 --- /dev/null +++ b/src/DataTypes/Serializations/tests/gtest_object_serialization.cpp @@ -0,0 +1,80 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#if USE_SIMDJSON + +using namespace DB; + +TEST(SerializationObject, FromString) +{ + WriteBufferFromOwnString out; + + auto column_string = ColumnString::create(); + column_string->insert(R"({"k1" : 1, "k2" : [{"k3" : "aa", "k4" : 2}, {"k3": "bb", "k4": 3}]})"); + column_string->insert(R"({"k1" : 2, "k2" : [{"k3" : "cc", "k5" : 4}, {"k4": 5}, {"k4": 6}]})"); + + { + auto serialization = std::make_shared(); + + ISerialization::SerializeBinaryBulkSettings settings; + ISerialization::SerializeBinaryBulkStatePtr state; + settings.position_independent_encoding = false; + settings.getter = [&out](const auto &) { return &out; }; + + writeIntBinary(static_cast(1), out); + serialization->serializeBinaryBulkStatePrefix(settings, state); + serialization->serializeBinaryBulkWithMultipleStreams(*column_string, 0, column_string->size(), settings, state); + serialization->serializeBinaryBulkStateSuffix(settings, state); + } + + auto type_object = std::make_shared("json", false); + ColumnPtr result_column = type_object->createColumn(); + + ReadBufferFromOwnString in(out.str()); + + { + auto serialization = type_object->getDefaultSerialization(); + + ISerialization::DeserializeBinaryBulkSettings settings; + ISerialization::DeserializeBinaryBulkStatePtr state; + settings.position_independent_encoding = false; + settings.getter = [&in](const auto &) { return ∈ }; + + serialization->deserializeBinaryBulkStatePrefix(settings, state); + serialization->deserializeBinaryBulkWithMultipleStreams(result_column, column_string->size(), settings, state, nullptr); + } + + auto & column_object = assert_cast(*result_column->assumeMutable()); + column_object.finalize(); + + ASSERT_TRUE(column_object.size() == 2); + ASSERT_TRUE(column_object.getSubcolumns().size() == 4); + + auto check_subcolumn = [&](const auto & name, const auto & type_name, const std::vector & expected) + { + const auto & subcolumn = column_object.getSubcolumn(PathInData{name}); + ASSERT_EQ(subcolumn.getLeastCommonType()->getName(), type_name); + + const auto & data = subcolumn.getFinalizedColumn(); + for (size_t i = 0; i < expected.size(); ++i) + ASSERT_EQ( + applyVisitor(FieldVisitorToString(), data[i]), + applyVisitor(FieldVisitorToString(), expected[i])); + }; + + check_subcolumn("k1", "Int8", {1, 2}); + check_subcolumn("k2.k3", "Array(String)", {Array{"aa", "bb"}, Array{"cc", "", ""}}); + check_subcolumn("k2.k4", "Array(Int8)", {Array{2, 3}, Array{0, 5, 6}}); + check_subcolumn("k2.k5", "Array(Int8)", {Array{0, 0}, Array{4, 0, 0}}); +} + +#endif diff --git a/src/Dictionaries/CassandraDictionarySource.cpp b/src/Dictionaries/CassandraDictionarySource.cpp index 9d574917f74..e51caf7112a 100644 --- a/src/Dictionaries/CassandraDictionarySource.cpp +++ b/src/Dictionaries/CassandraDictionarySource.cpp @@ -2,6 +2,8 @@ #include "DictionarySourceFactory.h" #include "DictionaryStructure.h" #include +#include +#include namespace DB { @@ -136,12 +138,12 @@ void CassandraDictionarySource::maybeAllowFiltering(String & query) const query += " ALLOW FILTERING;"; } -Pipe CassandraDictionarySource::loadAll() +QueryPipeline CassandraDictionarySource::loadAll() { String query = query_builder.composeLoadAllQuery(); maybeAllowFiltering(query); LOG_INFO(log, "Loading all using query: {}", query); - return Pipe(std::make_shared(getSession(), query, sample_block, max_block_size)); + return QueryPipeline(std::make_shared(getSession(), query, sample_block, max_block_size)); } std::string CassandraDictionarySource::toString() const @@ -149,15 +151,15 @@ std::string CassandraDictionarySource::toString() const return "Cassandra: " + configuration.db + '.' + configuration.table; } -Pipe CassandraDictionarySource::loadIds(const std::vector & ids) +QueryPipeline CassandraDictionarySource::loadIds(const std::vector & ids) { String query = query_builder.composeLoadIdsQuery(ids); maybeAllowFiltering(query); LOG_INFO(log, "Loading ids using query: {}", query); - return Pipe(std::make_shared(getSession(), query, sample_block, max_block_size)); + return QueryPipeline(std::make_shared(getSession(), query, sample_block, max_block_size)); } -Pipe CassandraDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) +QueryPipeline CassandraDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) { if (requested_rows.empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "No rows requested"); @@ -181,10 +183,10 @@ Pipe CassandraDictionarySource::loadKeys(const Columns & key_columns, const std: pipes.push_back(Pipe(std::make_shared(getSession(), query, sample_block, max_block_size))); } - return Pipe::unitePipes(std::move(pipes)); + return QueryPipeline(Pipe::unitePipes(std::move(pipes))); } -Pipe CassandraDictionarySource::loadUpdatedAll() +QueryPipeline CassandraDictionarySource::loadUpdatedAll() { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method loadUpdatedAll is unsupported for CassandraDictionarySource"); } diff --git a/src/Dictionaries/CassandraDictionarySource.h b/src/Dictionaries/CassandraDictionarySource.h index 76ad2316366..c2038a966ea 100644 --- a/src/Dictionaries/CassandraDictionarySource.h +++ b/src/Dictionaries/CassandraDictionarySource.h @@ -51,7 +51,7 @@ public: const String & config_prefix, Block & sample_block); - Pipe loadAll() override; + QueryPipeline loadAll() override; bool supportsSelectiveLoad() const override { return true; } @@ -64,11 +64,11 @@ public: return std::make_shared(dict_struct, configuration, sample_block); } - Pipe loadIds(const std::vector & ids) override; + QueryPipeline loadIds(const std::vector & ids) override; - Pipe loadKeys(const Columns & key_columns, const std::vector & requested_rows) override; + QueryPipeline loadKeys(const Columns & key_columns, const std::vector & requested_rows) override; - Pipe loadUpdatedAll() override; + QueryPipeline loadUpdatedAll() override; String toString() const override; diff --git a/src/Dictionaries/CassandraSource.cpp b/src/Dictionaries/CassandraSource.cpp index 5b84936a137..f5277e77eba 100644 --- a/src/Dictionaries/CassandraSource.cpp +++ b/src/Dictionaries/CassandraSource.cpp @@ -25,7 +25,7 @@ CassandraSource::CassandraSource( const String & query_str, const Block & sample_block, size_t max_block_size_) - : SourceWithProgress(sample_block) + : ISource(sample_block) , session(session_) , statement(query_str.c_str(), /*parameters count*/ 0) , max_block_size(max_block_size_) diff --git a/src/Dictionaries/CassandraSource.h b/src/Dictionaries/CassandraSource.h index ba76e72c245..9325883a33d 100644 --- a/src/Dictionaries/CassandraSource.h +++ b/src/Dictionaries/CassandraSource.h @@ -4,14 +4,14 @@ #if USE_CASSANDRA #include -#include +#include #include namespace DB { -class CassandraSource final : public SourceWithProgress +class CassandraSource final : public ISource { public: CassandraSource( diff --git a/src/Dictionaries/ClickHouseDictionarySource.cpp b/src/Dictionaries/ClickHouseDictionarySource.cpp index 355920bbf50..1a3f3f0edc4 100644 --- a/src/Dictionaries/ClickHouseDictionarySource.cpp +++ b/src/Dictionaries/ClickHouseDictionarySource.cpp @@ -110,29 +110,29 @@ std::string ClickHouseDictionarySource::getUpdateFieldAndDate() } } -Pipe ClickHouseDictionarySource::loadAllWithSizeHint(std::atomic * result_size_hint) +QueryPipeline ClickHouseDictionarySource::loadAllWithSizeHint(std::atomic * result_size_hint) { return createStreamForQuery(load_all_query, result_size_hint); } -Pipe ClickHouseDictionarySource::loadAll() +QueryPipeline ClickHouseDictionarySource::loadAll() { return createStreamForQuery(load_all_query); } -Pipe ClickHouseDictionarySource::loadUpdatedAll() +QueryPipeline ClickHouseDictionarySource::loadUpdatedAll() { String load_update_query = getUpdateFieldAndDate(); return createStreamForQuery(load_update_query); } -Pipe ClickHouseDictionarySource::loadIds(const std::vector & ids) +QueryPipeline ClickHouseDictionarySource::loadIds(const std::vector & ids) { return createStreamForQuery(query_builder.composeLoadIdsQuery(ids)); } -Pipe ClickHouseDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) +QueryPipeline ClickHouseDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) { String query = query_builder.composeLoadKeysQuery(key_columns, requested_rows, ExternalQueryBuilder::IN_WITH_TUPLES); return createStreamForQuery(query); @@ -162,9 +162,9 @@ std::string ClickHouseDictionarySource::toString() const return "ClickHouse: " + configuration.db + '.' + configuration.table + (where.empty() ? "" : ", where: " + where); } -Pipe ClickHouseDictionarySource::createStreamForQuery(const String & query, std::atomic * result_size_hint) +QueryPipeline ClickHouseDictionarySource::createStreamForQuery(const String & query, std::atomic * result_size_hint) { - QueryPipelineBuilder builder; + QueryPipeline pipeline; /// Sample block should not contain first row default values auto empty_sample_block = sample_block.cloneEmpty(); @@ -175,32 +175,25 @@ Pipe ClickHouseDictionarySource::createStreamForQuery(const String & query, std: if (configuration.is_local) { - builder.init(executeQuery(query, context_copy, true).pipeline); - auto converting = ActionsDAG::makeConvertingActions( - builder.getHeader().getColumnsWithTypeAndName(), - empty_sample_block.getColumnsWithTypeAndName(), - ActionsDAG::MatchColumnsMode::Position); + pipeline = executeQuery(query, context_copy, true).pipeline; - builder.addSimpleTransform([&](const Block & header) - { - return std::make_shared(header, std::make_shared(converting)); - }); + pipeline.convertStructureTo(empty_sample_block.getColumnsWithTypeAndName()); } else { - builder.init(Pipe(std::make_shared( - std::make_shared(pool, query, empty_sample_block, context_copy), false, false))); + pipeline = QueryPipeline(std::make_shared( + std::make_shared(pool, query, empty_sample_block, context_copy), false, false)); } if (result_size_hint) { - builder.setProgressCallback([result_size_hint](const Progress & progress) + pipeline.setProgressCallback([result_size_hint](const Progress & progress) { *result_size_hint += progress.total_rows_to_read; }); } - return QueryPipelineBuilder::getPipe(std::move(builder)); + return pipeline; } std::string ClickHouseDictionarySource::doInvalidateQuery(const std::string & request) const diff --git a/src/Dictionaries/ClickHouseDictionarySource.h b/src/Dictionaries/ClickHouseDictionarySource.h index cdcc0ee824f..007e3e8b29d 100644 --- a/src/Dictionaries/ClickHouseDictionarySource.h +++ b/src/Dictionaries/ClickHouseDictionarySource.h @@ -45,15 +45,15 @@ public: ClickHouseDictionarySource(const ClickHouseDictionarySource & other); ClickHouseDictionarySource & operator=(const ClickHouseDictionarySource &) = delete; - Pipe loadAllWithSizeHint(std::atomic * result_size_hint) override; + QueryPipeline loadAllWithSizeHint(std::atomic * result_size_hint) override; - Pipe loadAll() override; + QueryPipeline loadAll() override; - Pipe loadUpdatedAll() override; + QueryPipeline loadUpdatedAll() override; - Pipe loadIds(const std::vector & ids) override; + QueryPipeline loadIds(const std::vector & ids) override; - Pipe loadKeys(const Columns & key_columns, const std::vector & requested_rows) override; + QueryPipeline loadKeys(const Columns & key_columns, const std::vector & requested_rows) override; bool isModified() const override; bool supportsSelectiveLoad() const override { return true; } @@ -71,7 +71,7 @@ public: private: std::string getUpdateFieldAndDate(); - Pipe createStreamForQuery(const String & query, std::atomic * result_size_hint = nullptr); + QueryPipeline createStreamForQuery(const String & query, std::atomic * result_size_hint = nullptr); std::string doInvalidateQuery(const std::string & request) const; diff --git a/src/Dictionaries/DictionaryHelpers.h b/src/Dictionaries/DictionaryHelpers.h index 80b15eb2569..9899b9f4c88 100644 --- a/src/Dictionaries/DictionaryHelpers.h +++ b/src/Dictionaries/DictionaryHelpers.h @@ -518,7 +518,7 @@ template void mergeBlockWithPipe( size_t key_columns_size, Block & block_to_update, - Pipe pipe) + QueryPipeline pipeline) { using KeyType = std::conditional_t; @@ -568,8 +568,6 @@ void mergeBlockWithPipe( auto result_fetched_columns = block_to_update.cloneEmptyColumns(); - QueryPipeline pipeline(std::move(pipe)); - PullingPipelineExecutor executor(pipeline); Block block; diff --git a/src/Dictionaries/DictionarySource.cpp b/src/Dictionaries/DictionarySource.cpp index 526d9fc85cd..4763c3e3ff9 100644 --- a/src/Dictionaries/DictionarySource.cpp +++ b/src/Dictionaries/DictionarySource.cpp @@ -1,5 +1,6 @@ #include "DictionarySource.h" #include +#include namespace DB @@ -11,12 +12,12 @@ namespace ErrorCodes extern const int NO_SUCH_COLUMN_IN_TABLE; } -class DictionarySource : public SourceWithProgress +class DictionarySource : public ISource { public: explicit DictionarySource(std::shared_ptr coordinator_) - : SourceWithProgress(coordinator_->getHeader()), coordinator(std::move(coordinator_)) + : ISource(coordinator_->getHeader()), coordinator(std::move(coordinator_)) { } diff --git a/src/Dictionaries/DictionarySource.h b/src/Dictionaries/DictionarySource.h index b54a8da6596..3fa7ce8bda6 100644 --- a/src/Dictionaries/DictionarySource.h +++ b/src/Dictionaries/DictionarySource.h @@ -4,7 +4,7 @@ #include #include #include -#include +#include #include #include diff --git a/src/Dictionaries/DirectDictionary.cpp b/src/Dictionaries/DirectDictionary.cpp index f15e44a6e21..6ecc216e370 100644 --- a/src/Dictionaries/DirectDictionary.cpp +++ b/src/Dictionaries/DirectDictionary.cpp @@ -8,7 +8,9 @@ #include #include +#include #include +#include namespace DB { @@ -258,6 +260,35 @@ ColumnUInt8::Ptr DirectDictionary::isInHierarchy( return nullptr; } +class SourceFromQueryPipeline : public ISource +{ +public: + explicit SourceFromQueryPipeline(QueryPipeline pipeline_) + : ISource(pipeline_.getHeader()) + , pipeline(std::move(pipeline_)) + , executor(pipeline) + {} + + std::string getName() const override { return "SourceFromQueryPipeline"; } + + Chunk generate() override + { + Chunk chunk; + while (executor.pull(chunk)) + { + if (chunk) + return chunk; + } + + return {}; + } + + +private: + QueryPipeline pipeline; + PullingPipelineExecutor executor; +}; + template Pipe DirectDictionary::getSourcePipe( const Columns & key_columns [[maybe_unused]], @@ -275,7 +306,7 @@ Pipe DirectDictionary::getSourcePipe( for (auto key : requested_keys) ids.emplace_back(key); - pipe = source_ptr->loadIds(ids); + pipe = Pipe(std::make_shared(source_ptr->loadIds(ids))); } else { @@ -284,7 +315,7 @@ Pipe DirectDictionary::getSourcePipe( for (size_t i = 0; i < requested_keys_size; ++i) requested_rows.emplace_back(i); - pipe = source_ptr->loadKeys(key_columns, requested_rows); + pipe = Pipe(std::make_shared(source_ptr->loadKeys(key_columns, requested_rows))); } return pipe; @@ -293,7 +324,7 @@ Pipe DirectDictionary::getSourcePipe( template Pipe DirectDictionary::read(const Names & /* column_names */, size_t /* max_block_size */, size_t /* num_streams */) const { - return source_ptr->loadAll(); + return Pipe(std::make_shared(source_ptr->loadAll())); } namespace diff --git a/src/Dictionaries/ExecutableDictionarySource.cpp b/src/Dictionaries/ExecutableDictionarySource.cpp index e19d0328fab..5d805cc4a03 100644 --- a/src/Dictionaries/ExecutableDictionarySource.cpp +++ b/src/Dictionaries/ExecutableDictionarySource.cpp @@ -103,7 +103,7 @@ ExecutableDictionarySource::ExecutableDictionarySource(const ExecutableDictionar { } -Pipe ExecutableDictionarySource::loadAll() +QueryPipeline ExecutableDictionarySource::loadAll() { if (configuration.implicit_key) throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "ExecutableDictionarySource with implicit_key does not support loadAll method"); @@ -114,10 +114,10 @@ Pipe ExecutableDictionarySource::loadAll() auto command = configuration.command; updateCommandIfNeeded(command, coordinator_configuration.execute_direct, context); - return coordinator->createPipe(command, configuration.command_arguments, sample_block, context); + return QueryPipeline(coordinator->createPipe(command, configuration.command_arguments, sample_block, context)); } -Pipe ExecutableDictionarySource::loadUpdatedAll() +QueryPipeline ExecutableDictionarySource::loadUpdatedAll() { if (configuration.implicit_key) throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "ExecutableDictionarySource with implicit_key does not support loadUpdatedAll method"); @@ -148,10 +148,10 @@ Pipe ExecutableDictionarySource::loadUpdatedAll() update_time = new_update_time; LOG_TRACE(log, "loadUpdatedAll {}", command); - return coordinator->createPipe(command, command_arguments, sample_block, context); + return QueryPipeline(coordinator->createPipe(command, command_arguments, sample_block, context)); } -Pipe ExecutableDictionarySource::loadIds(const std::vector & ids) +QueryPipeline ExecutableDictionarySource::loadIds(const std::vector & ids) { LOG_TRACE(log, "loadIds {} size = {}", toString(), ids.size()); @@ -159,7 +159,7 @@ Pipe ExecutableDictionarySource::loadIds(const std::vector & ids) return getStreamForBlock(block); } -Pipe ExecutableDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) +QueryPipeline ExecutableDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) { LOG_TRACE(log, "loadKeys {} size = {}", toString(), requested_rows.size()); @@ -167,7 +167,7 @@ Pipe ExecutableDictionarySource::loadKeys(const Columns & key_columns, const std return getStreamForBlock(block); } -Pipe ExecutableDictionarySource::getStreamForBlock(const Block & block) +QueryPipeline ExecutableDictionarySource::getStreamForBlock(const Block & block) { const auto & coordinator_configuration = coordinator->getConfiguration(); String command = configuration.command; @@ -184,7 +184,7 @@ Pipe ExecutableDictionarySource::getStreamForBlock(const Block & block) if (configuration.implicit_key) pipe.addTransform(std::make_shared(block, pipe.getHeader())); - return pipe; + return QueryPipeline(std::move(pipe)); } bool ExecutableDictionarySource::isModified() const diff --git a/src/Dictionaries/ExecutableDictionarySource.h b/src/Dictionaries/ExecutableDictionarySource.h index 7a6254780a8..0456d3cafef 100644 --- a/src/Dictionaries/ExecutableDictionarySource.h +++ b/src/Dictionaries/ExecutableDictionarySource.h @@ -39,17 +39,17 @@ public: ExecutableDictionarySource(const ExecutableDictionarySource & other); ExecutableDictionarySource & operator=(const ExecutableDictionarySource &) = delete; - Pipe loadAll() override; + QueryPipeline loadAll() override; /** The logic of this method is flawed, absolutely incorrect and ignorant. * It may lead to skipping some values due to clock sync or timezone changes. * The intended usage of "update_field" is totally different. */ - Pipe loadUpdatedAll() override; + QueryPipeline loadUpdatedAll() override; - Pipe loadIds(const std::vector & ids) override; + QueryPipeline loadIds(const std::vector & ids) override; - Pipe loadKeys(const Columns & key_columns, const std::vector & requested_rows) override; + QueryPipeline loadKeys(const Columns & key_columns, const std::vector & requested_rows) override; bool isModified() const override; @@ -61,7 +61,7 @@ public: std::string toString() const override; - Pipe getStreamForBlock(const Block & block); + QueryPipeline getStreamForBlock(const Block & block); private: Poco::Logger * log; diff --git a/src/Dictionaries/ExecutablePoolDictionarySource.cpp b/src/Dictionaries/ExecutablePoolDictionarySource.cpp index 7f3d50519c2..df502efc882 100644 --- a/src/Dictionaries/ExecutablePoolDictionarySource.cpp +++ b/src/Dictionaries/ExecutablePoolDictionarySource.cpp @@ -68,17 +68,17 @@ ExecutablePoolDictionarySource::ExecutablePoolDictionarySource(const ExecutableP { } -Pipe ExecutablePoolDictionarySource::loadAll() +QueryPipeline ExecutablePoolDictionarySource::loadAll() { throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "ExecutablePoolDictionarySource does not support loadAll method"); } -Pipe ExecutablePoolDictionarySource::loadUpdatedAll() +QueryPipeline ExecutablePoolDictionarySource::loadUpdatedAll() { throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "ExecutablePoolDictionarySource does not support loadUpdatedAll method"); } -Pipe ExecutablePoolDictionarySource::loadIds(const std::vector & ids) +QueryPipeline ExecutablePoolDictionarySource::loadIds(const std::vector & ids) { LOG_TRACE(log, "loadIds {} size = {}", toString(), ids.size()); @@ -86,7 +86,7 @@ Pipe ExecutablePoolDictionarySource::loadIds(const std::vector & ids) return getStreamForBlock(block); } -Pipe ExecutablePoolDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) +QueryPipeline ExecutablePoolDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) { LOG_TRACE(log, "loadKeys {} size = {}", toString(), requested_rows.size()); @@ -94,7 +94,7 @@ Pipe ExecutablePoolDictionarySource::loadKeys(const Columns & key_columns, const return getStreamForBlock(block); } -Pipe ExecutablePoolDictionarySource::getStreamForBlock(const Block & block) +QueryPipeline ExecutablePoolDictionarySource::getStreamForBlock(const Block & block) { String command = configuration.command; const auto & coordinator_configuration = coordinator->getConfiguration(); @@ -147,7 +147,7 @@ Pipe ExecutablePoolDictionarySource::getStreamForBlock(const Block & block) if (configuration.implicit_key) pipe.addTransform(std::make_shared(block, pipe.getHeader())); - return pipe; + return QueryPipeline(std::move(pipe)); } bool ExecutablePoolDictionarySource::isModified() const diff --git a/src/Dictionaries/ExecutablePoolDictionarySource.h b/src/Dictionaries/ExecutablePoolDictionarySource.h index f732e274b1d..1fc10d18b76 100644 --- a/src/Dictionaries/ExecutablePoolDictionarySource.h +++ b/src/Dictionaries/ExecutablePoolDictionarySource.h @@ -42,17 +42,17 @@ public: ExecutablePoolDictionarySource(const ExecutablePoolDictionarySource & other); ExecutablePoolDictionarySource & operator=(const ExecutablePoolDictionarySource &) = delete; - Pipe loadAll() override; + QueryPipeline loadAll() override; /** The logic of this method is flawed, absolutely incorrect and ignorant. * It may lead to skipping some values due to clock sync or timezone changes. * The intended usage of "update_field" is totally different. */ - Pipe loadUpdatedAll() override; + QueryPipeline loadUpdatedAll() override; - Pipe loadIds(const std::vector & ids) override; + QueryPipeline loadIds(const std::vector & ids) override; - Pipe loadKeys(const Columns & key_columns, const std::vector & requested_rows) override; + QueryPipeline loadKeys(const Columns & key_columns, const std::vector & requested_rows) override; bool isModified() const override; @@ -64,7 +64,7 @@ public: std::string toString() const override; - Pipe getStreamForBlock(const Block & block); + QueryPipeline getStreamForBlock(const Block & block); private: const DictionaryStructure dict_struct; diff --git a/src/Dictionaries/FileDictionarySource.cpp b/src/Dictionaries/FileDictionarySource.cpp index a6b423cfe74..86287971428 100644 --- a/src/Dictionaries/FileDictionarySource.cpp +++ b/src/Dictionaries/FileDictionarySource.cpp @@ -46,7 +46,7 @@ FileDictionarySource::FileDictionarySource(const FileDictionarySource & other) } -Pipe FileDictionarySource::loadAll() +QueryPipeline FileDictionarySource::loadAll() { LOG_TRACE(&Poco::Logger::get("FileDictionary"), "loadAll {}", toString()); auto in_ptr = std::make_unique(filepath); @@ -54,7 +54,7 @@ Pipe FileDictionarySource::loadAll() source->addBuffer(std::move(in_ptr)); last_modification = getLastModification(); - return Pipe(std::move(source)); + return QueryPipeline(std::move(source)); } diff --git a/src/Dictionaries/FileDictionarySource.h b/src/Dictionaries/FileDictionarySource.h index 8fe2d87d8b9..86ce1baa02a 100644 --- a/src/Dictionaries/FileDictionarySource.h +++ b/src/Dictionaries/FileDictionarySource.h @@ -21,19 +21,19 @@ public: FileDictionarySource(const FileDictionarySource & other); - Pipe loadAll() override; + QueryPipeline loadAll() override; - Pipe loadUpdatedAll() override + QueryPipeline loadUpdatedAll() override { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method loadUpdatedAll is unsupported for FileDictionarySource"); } - Pipe loadIds(const std::vector & /*ids*/) override + QueryPipeline loadIds(const std::vector & /*ids*/) override { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method loadIds is unsupported for FileDictionarySource"); } - Pipe loadKeys(const Columns & /*key_columns*/, const std::vector & /*requested_rows*/) override + QueryPipeline loadKeys(const Columns & /*key_columns*/, const std::vector & /*requested_rows*/) override { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method loadKeys is unsupported for FileDictionarySource"); } diff --git a/src/Dictionaries/FlatDictionary.cpp b/src/Dictionaries/FlatDictionary.cpp index bd664224d41..93b83e1018a 100644 --- a/src/Dictionaries/FlatDictionary.cpp +++ b/src/Dictionaries/FlatDictionary.cpp @@ -379,11 +379,11 @@ void FlatDictionary::updateData() } else { - Pipe pipe(source_ptr->loadUpdatedAll()); + auto pipeline(source_ptr->loadUpdatedAll()); mergeBlockWithPipe( dict_struct.getKeysSize(), *update_field_loaded_block, - std::move(pipe)); + std::move(pipeline)); } if (update_field_loaded_block) diff --git a/src/Dictionaries/HTTPDictionarySource.cpp b/src/Dictionaries/HTTPDictionarySource.cpp index 17592a8d9da..d45213fa7f8 100644 --- a/src/Dictionaries/HTTPDictionarySource.cpp +++ b/src/Dictionaries/HTTPDictionarySource.cpp @@ -62,7 +62,7 @@ HTTPDictionarySource::HTTPDictionarySource(const HTTPDictionarySource & other) credentials.setPassword(other.credentials.getPassword()); } -Pipe HTTPDictionarySource::createWrappedBuffer(std::unique_ptr http_buffer_ptr) +QueryPipeline HTTPDictionarySource::createWrappedBuffer(std::unique_ptr http_buffer_ptr) { Poco::URI uri(configuration.url); String http_request_compression_method_str = http_buffer_ptr->getCompressionMethod(); @@ -70,7 +70,7 @@ Pipe HTTPDictionarySource::createWrappedBuffer(std::unique_ptrgetInputFormat(configuration.format, *in_ptr_wrapped, sample_block, max_block_size); source->addBuffer(std::move(in_ptr_wrapped)); - return Pipe(std::move(source)); + return QueryPipeline(std::move(source)); } void HTTPDictionarySource::getUpdateFieldAndDate(Poco::URI & uri) @@ -90,7 +90,7 @@ void HTTPDictionarySource::getUpdateFieldAndDate(Poco::URI & uri) } } -Pipe HTTPDictionarySource::loadAll() +QueryPipeline HTTPDictionarySource::loadAll() { LOG_TRACE(log, "loadAll {}", toString()); Poco::URI uri(configuration.url); @@ -110,7 +110,7 @@ Pipe HTTPDictionarySource::loadAll() return createWrappedBuffer(std::move(in_ptr)); } -Pipe HTTPDictionarySource::loadUpdatedAll() +QueryPipeline HTTPDictionarySource::loadUpdatedAll() { Poco::URI uri(configuration.url); getUpdateFieldAndDate(uri); @@ -131,7 +131,7 @@ Pipe HTTPDictionarySource::loadUpdatedAll() return createWrappedBuffer(std::move(in_ptr)); } -Pipe HTTPDictionarySource::loadIds(const std::vector & ids) +QueryPipeline HTTPDictionarySource::loadIds(const std::vector & ids) { LOG_TRACE(log, "loadIds {} size = {}", toString(), ids.size()); @@ -161,7 +161,7 @@ Pipe HTTPDictionarySource::loadIds(const std::vector & ids) return createWrappedBuffer(std::move(in_ptr)); } -Pipe HTTPDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) +QueryPipeline HTTPDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) { LOG_TRACE(log, "loadKeys {} size = {}", toString(), requested_rows.size()); diff --git a/src/Dictionaries/HTTPDictionarySource.h b/src/Dictionaries/HTTPDictionarySource.h index ce357814982..71351cd9987 100644 --- a/src/Dictionaries/HTTPDictionarySource.h +++ b/src/Dictionaries/HTTPDictionarySource.h @@ -43,13 +43,13 @@ public: HTTPDictionarySource(const HTTPDictionarySource & other); HTTPDictionarySource & operator=(const HTTPDictionarySource &) = delete; - Pipe loadAll() override; + QueryPipeline loadAll() override; - Pipe loadUpdatedAll() override; + QueryPipeline loadUpdatedAll() override; - Pipe loadIds(const std::vector & ids) override; + QueryPipeline loadIds(const std::vector & ids) override; - Pipe loadKeys(const Columns & key_columns, const std::vector & requested_rows) override; + QueryPipeline loadKeys(const Columns & key_columns, const std::vector & requested_rows) override; bool isModified() const override; @@ -65,7 +65,7 @@ private: void getUpdateFieldAndDate(Poco::URI & uri); // wrap buffer using encoding from made request - Pipe createWrappedBuffer(std::unique_ptr http_buffer); + QueryPipeline createWrappedBuffer(std::unique_ptr http_buffer); Poco::Logger * log; diff --git a/src/Dictionaries/HashedDictionary.cpp b/src/Dictionaries/HashedDictionary.cpp index 81d3d42617b..1efc3d660df 100644 --- a/src/Dictionaries/HashedDictionary.cpp +++ b/src/Dictionaries/HashedDictionary.cpp @@ -7,8 +7,6 @@ #include #include -#include - #include #include #include diff --git a/src/Dictionaries/IDictionary.h b/src/Dictionaries/IDictionary.h index 32c81beee6f..480befdcfb2 100644 --- a/src/Dictionaries/IDictionary.h +++ b/src/Dictionaries/IDictionary.h @@ -63,7 +63,7 @@ public: std::string getFullName() const { std::lock_guard lock{name_mutex}; - return dictionary_id.getInternalDictionaryName(); + return dictionary_id.getNameForLogs(); } StorageID getDictionaryID() const @@ -79,7 +79,7 @@ public: dictionary_id = new_name; } - std::string getLoadableName() const override final + std::string getLoadableName() const final { std::lock_guard lock{name_mutex}; return dictionary_id.getInternalDictionaryName(); diff --git a/src/Dictionaries/IDictionarySource.h b/src/Dictionaries/IDictionarySource.h index 128595b815f..0fd528aafd4 100644 --- a/src/Dictionaries/IDictionarySource.h +++ b/src/Dictionaries/IDictionarySource.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include #include #include @@ -21,10 +21,10 @@ class IDictionarySource public: /// Returns a pipe with all the data available from this source. - virtual Pipe loadAll() = 0; + virtual QueryPipeline loadAll() = 0; /// Returns a pipe with updated data available from this source. - virtual Pipe loadUpdatedAll() = 0; + virtual QueryPipeline loadUpdatedAll() = 0; /** * result_size_hint - approx number of rows in the stream. @@ -59,7 +59,7 @@ public: * * ... */ - virtual Pipe loadAllWithSizeHint(std::atomic * /* result_size_hint */) + virtual QueryPipeline loadAllWithSizeHint(std::atomic * /* result_size_hint */) { return loadAll(); } @@ -72,13 +72,13 @@ public: /** Returns an input stream with the data for a collection of identifiers. * It must be guaranteed, that 'ids' array will live at least until all data will be read from returned stream. */ - virtual Pipe loadIds(const std::vector & ids) = 0; + virtual QueryPipeline loadIds(const std::vector & ids) = 0; /** Returns an input stream with the data for a collection of composite keys. * `requested_rows` contains indices of all rows containing unique keys. * It must be guaranteed, that 'requested_rows' array will live at least until all data will be read from returned stream. */ - virtual Pipe loadKeys(const Columns & key_columns, const std::vector & requested_rows) = 0; + virtual QueryPipeline loadKeys(const Columns & key_columns, const std::vector & requested_rows) = 0; /// indicates whether the source has been modified since last load* operation virtual bool isModified() const = 0; diff --git a/src/Dictionaries/LibraryDictionarySource.cpp b/src/Dictionaries/LibraryDictionarySource.cpp index 20f4c4b0a01..936bbd72299 100644 --- a/src/Dictionaries/LibraryDictionarySource.cpp +++ b/src/Dictionaries/LibraryDictionarySource.cpp @@ -105,21 +105,21 @@ bool LibraryDictionarySource::supportsSelectiveLoad() const } -Pipe LibraryDictionarySource::loadAll() +QueryPipeline LibraryDictionarySource::loadAll() { LOG_TRACE(log, "loadAll {}", toString()); return bridge_helper->loadAll(); } -Pipe LibraryDictionarySource::loadIds(const std::vector & ids) +QueryPipeline LibraryDictionarySource::loadIds(const std::vector & ids) { LOG_TRACE(log, "loadIds {} size = {}", toString(), ids.size()); return bridge_helper->loadIds(ids); } -Pipe LibraryDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) +QueryPipeline LibraryDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) { LOG_TRACE(log, "loadKeys {} size = {}", toString(), requested_rows.size()); auto block = blockForKeys(dict_struct, key_columns, requested_rows); diff --git a/src/Dictionaries/LibraryDictionarySource.h b/src/Dictionaries/LibraryDictionarySource.h index 32a4c942959..09f95b17dab 100644 --- a/src/Dictionaries/LibraryDictionarySource.h +++ b/src/Dictionaries/LibraryDictionarySource.h @@ -46,16 +46,16 @@ public: ~LibraryDictionarySource() override; - Pipe loadAll() override; + QueryPipeline loadAll() override; - Pipe loadUpdatedAll() override + QueryPipeline loadUpdatedAll() override { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method loadUpdatedAll is unsupported for LibraryDictionarySource"); } - Pipe loadIds(const std::vector & ids) override; + QueryPipeline loadIds(const std::vector & ids) override; - Pipe loadKeys(const Columns & key_columns, const std::vector & requested_rows) override; + QueryPipeline loadKeys(const Columns & key_columns, const std::vector & requested_rows) override; bool isModified() const override; diff --git a/src/Dictionaries/MongoDBDictionarySource.cpp b/src/Dictionaries/MongoDBDictionarySource.cpp index 57f2c8f60c3..6a1bfceaec7 100644 --- a/src/Dictionaries/MongoDBDictionarySource.cpp +++ b/src/Dictionaries/MongoDBDictionarySource.cpp @@ -163,12 +163,12 @@ MongoDBDictionarySource::MongoDBDictionarySource(const MongoDBDictionarySource & MongoDBDictionarySource::~MongoDBDictionarySource() = default; -Pipe MongoDBDictionarySource::loadAll() +QueryPipeline MongoDBDictionarySource::loadAll() { - return Pipe(std::make_shared(connection, createCursor(db, collection, sample_block), sample_block, max_block_size)); + return QueryPipeline(std::make_shared(connection, createCursor(db, collection, sample_block), sample_block, max_block_size)); } -Pipe MongoDBDictionarySource::loadIds(const std::vector & ids) +QueryPipeline MongoDBDictionarySource::loadIds(const std::vector & ids) { if (!dict_struct.id) throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "'id' is required for selective loading"); @@ -185,11 +185,11 @@ Pipe MongoDBDictionarySource::loadIds(const std::vector & ids) cursor->query().selector().addNewDocument(dict_struct.id->name).add("$in", ids_array); - return Pipe(std::make_shared(connection, std::move(cursor), sample_block, max_block_size)); + return QueryPipeline(std::make_shared(connection, std::move(cursor), sample_block, max_block_size)); } -Pipe MongoDBDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) +QueryPipeline MongoDBDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) { if (!dict_struct.key) throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "'key' is required for selective loading"); @@ -251,7 +251,7 @@ Pipe MongoDBDictionarySource::loadKeys(const Columns & key_columns, const std::v /// If more than one key we should use $or cursor->query().selector().add("$or", keys_array); - return Pipe(std::make_shared(connection, std::move(cursor), sample_block, max_block_size)); + return QueryPipeline(std::make_shared(connection, std::move(cursor), sample_block, max_block_size)); } std::string MongoDBDictionarySource::toString() const diff --git a/src/Dictionaries/MongoDBDictionarySource.h b/src/Dictionaries/MongoDBDictionarySource.h index 85531f89902..0d4c5f6aa0b 100644 --- a/src/Dictionaries/MongoDBDictionarySource.h +++ b/src/Dictionaries/MongoDBDictionarySource.h @@ -46,18 +46,18 @@ public: ~MongoDBDictionarySource() override; - Pipe loadAll() override; + QueryPipeline loadAll() override; - Pipe loadUpdatedAll() override + QueryPipeline loadUpdatedAll() override { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method loadUpdatedAll is unsupported for MongoDBDictionarySource"); } bool supportsSelectiveLoad() const override { return true; } - Pipe loadIds(const std::vector & ids) override; + QueryPipeline loadIds(const std::vector & ids) override; - Pipe loadKeys(const Columns & key_columns, const std::vector & requested_rows) override; + QueryPipeline loadKeys(const Columns & key_columns, const std::vector & requested_rows) override; /// @todo: for MongoDB, modification date can somehow be determined from the `_id` object field bool isModified() const override { return true; } diff --git a/src/Dictionaries/MySQLDictionarySource.cpp b/src/Dictionaries/MySQLDictionarySource.cpp index 22ca5a5b08c..a5807f58f8a 100644 --- a/src/Dictionaries/MySQLDictionarySource.cpp +++ b/src/Dictionaries/MySQLDictionarySource.cpp @@ -180,13 +180,13 @@ std::string MySQLDictionarySource::getUpdateFieldAndDate() } } -Pipe MySQLDictionarySource::loadFromQuery(const String & query) +QueryPipeline MySQLDictionarySource::loadFromQuery(const String & query) { - return Pipe(std::make_shared( + return QueryPipeline(std::make_shared( pool, query, sample_block, settings)); } -Pipe MySQLDictionarySource::loadAll() +QueryPipeline MySQLDictionarySource::loadAll() { auto connection = pool->get(); last_modification = getLastModification(connection, false); @@ -195,7 +195,7 @@ Pipe MySQLDictionarySource::loadAll() return loadFromQuery(load_all_query); } -Pipe MySQLDictionarySource::loadUpdatedAll() +QueryPipeline MySQLDictionarySource::loadUpdatedAll() { auto connection = pool->get(); last_modification = getLastModification(connection, false); @@ -205,14 +205,14 @@ Pipe MySQLDictionarySource::loadUpdatedAll() return loadFromQuery(load_update_query); } -Pipe MySQLDictionarySource::loadIds(const std::vector & ids) +QueryPipeline MySQLDictionarySource::loadIds(const std::vector & ids) { /// We do not log in here and do not update the modification time, as the request can be large, and often called. const auto query = query_builder.composeLoadIdsQuery(ids); return loadFromQuery(query); } -Pipe MySQLDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) +QueryPipeline MySQLDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) { /// We do not log in here and do not update the modification time, as the request can be large, and often called. const auto query = query_builder.composeLoadKeysQuery(key_columns, requested_rows, ExternalQueryBuilder::AND_OR_CHAIN); diff --git a/src/Dictionaries/MySQLDictionarySource.h b/src/Dictionaries/MySQLDictionarySource.h index 90506ad1726..840345e3dc2 100644 --- a/src/Dictionaries/MySQLDictionarySource.h +++ b/src/Dictionaries/MySQLDictionarySource.h @@ -52,13 +52,13 @@ public: MySQLDictionarySource(const MySQLDictionarySource & other); MySQLDictionarySource & operator=(const MySQLDictionarySource &) = delete; - Pipe loadAll() override; + QueryPipeline loadAll() override; - Pipe loadUpdatedAll() override; + QueryPipeline loadUpdatedAll() override; - Pipe loadIds(const std::vector & ids) override; + QueryPipeline loadIds(const std::vector & ids) override; - Pipe loadKeys(const Columns & key_columns, const std::vector & requested_rows) override; + QueryPipeline loadKeys(const Columns & key_columns, const std::vector & requested_rows) override; bool isModified() const override; @@ -71,7 +71,7 @@ public: std::string toString() const override; private: - Pipe loadFromQuery(const String & query); + QueryPipeline loadFromQuery(const String & query); std::string getUpdateFieldAndDate(); diff --git a/src/Dictionaries/PostgreSQLDictionarySource.cpp b/src/Dictionaries/PostgreSQLDictionarySource.cpp index 511d6a7288e..eb1a4caf2fc 100644 --- a/src/Dictionaries/PostgreSQLDictionarySource.cpp +++ b/src/Dictionaries/PostgreSQLDictionarySource.cpp @@ -78,37 +78,37 @@ PostgreSQLDictionarySource::PostgreSQLDictionarySource(const PostgreSQLDictionar } -Pipe PostgreSQLDictionarySource::loadAll() +QueryPipeline PostgreSQLDictionarySource::loadAll() { LOG_TRACE(log, fmt::runtime(load_all_query)); return loadBase(load_all_query); } -Pipe PostgreSQLDictionarySource::loadUpdatedAll() +QueryPipeline PostgreSQLDictionarySource::loadUpdatedAll() { auto load_update_query = getUpdateFieldAndDate(); LOG_TRACE(log, fmt::runtime(load_update_query)); return loadBase(load_update_query); } -Pipe PostgreSQLDictionarySource::loadIds(const std::vector & ids) +QueryPipeline PostgreSQLDictionarySource::loadIds(const std::vector & ids) { const auto query = query_builder.composeLoadIdsQuery(ids); return loadBase(query); } -Pipe PostgreSQLDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) +QueryPipeline PostgreSQLDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) { const auto query = query_builder.composeLoadKeysQuery(key_columns, requested_rows, ExternalQueryBuilder::AND_OR_CHAIN); return loadBase(query); } -Pipe PostgreSQLDictionarySource::loadBase(const String & query) +QueryPipeline PostgreSQLDictionarySource::loadBase(const String & query) { - return Pipe(std::make_shared>(pool->get(), query, sample_block, max_block_size)); + return QueryPipeline(std::make_shared>(pool->get(), query, sample_block, max_block_size)); } diff --git a/src/Dictionaries/PostgreSQLDictionarySource.h b/src/Dictionaries/PostgreSQLDictionarySource.h index 7345366f3ac..b6a604bc7d3 100644 --- a/src/Dictionaries/PostgreSQLDictionarySource.h +++ b/src/Dictionaries/PostgreSQLDictionarySource.h @@ -41,10 +41,10 @@ public: PostgreSQLDictionarySource(const PostgreSQLDictionarySource & other); PostgreSQLDictionarySource & operator=(const PostgreSQLDictionarySource &) = delete; - Pipe loadAll() override; - Pipe loadUpdatedAll() override; - Pipe loadIds(const std::vector & ids) override; - Pipe loadKeys(const Columns & key_columns, const std::vector & requested_rows) override; + QueryPipeline loadAll() override; + QueryPipeline loadUpdatedAll() override; + QueryPipeline loadIds(const std::vector & ids) override; + QueryPipeline loadKeys(const Columns & key_columns, const std::vector & requested_rows) override; bool isModified() const override; bool supportsSelectiveLoad() const override; @@ -56,7 +56,7 @@ public: private: String getUpdateFieldAndDate(); String doInvalidateQuery(const std::string & request) const; - Pipe loadBase(const String & query); + QueryPipeline loadBase(const String & query); const DictionaryStructure dict_struct; const Configuration configuration; diff --git a/src/Dictionaries/RedisDictionarySource.cpp b/src/Dictionaries/RedisDictionarySource.cpp index 85a11e9a33d..ab5c758848a 100644 --- a/src/Dictionaries/RedisDictionarySource.cpp +++ b/src/Dictionaries/RedisDictionarySource.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include @@ -126,7 +127,7 @@ namespace DB __builtin_unreachable(); } - Pipe RedisDictionarySource::loadAll() + QueryPipeline RedisDictionarySource::loadAll() { auto connection = getConnection(); @@ -136,7 +137,7 @@ namespace DB /// Get only keys for specified storage type. auto all_keys = connection->client->execute(command_for_keys); if (all_keys.isNull()) - return Pipe(std::make_shared( + return QueryPipeline(std::make_shared( std::move(connection), RedisArray{}, configuration.storage_type, sample_block, REDIS_MAX_BLOCK_SIZE)); @@ -177,12 +178,12 @@ namespace DB keys = hkeys; } - return Pipe(std::make_shared( + return QueryPipeline(std::make_shared( std::move(connection), std::move(keys), configuration.storage_type, sample_block, REDIS_MAX_BLOCK_SIZE)); } - Pipe RedisDictionarySource::loadIds(const std::vector & ids) + QueryPipeline RedisDictionarySource::loadIds(const std::vector & ids) { auto connection = getConnection(); @@ -197,12 +198,12 @@ namespace DB for (UInt64 id : ids) keys << DB::toString(id); - return Pipe(std::make_shared( + return QueryPipeline(std::make_shared( std::move(connection), std::move(keys), configuration.storage_type, sample_block, REDIS_MAX_BLOCK_SIZE)); } - Pipe RedisDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) + QueryPipeline RedisDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) { auto connection = getConnection(); @@ -227,7 +228,7 @@ namespace DB keys.add(key); } - return Pipe(std::make_shared( + return QueryPipeline(std::make_shared( std::move(connection), std::move(keys), configuration.storage_type, sample_block, REDIS_MAX_BLOCK_SIZE)); } diff --git a/src/Dictionaries/RedisDictionarySource.h b/src/Dictionaries/RedisDictionarySource.h index af12981f348..bf745a7bb41 100644 --- a/src/Dictionaries/RedisDictionarySource.h +++ b/src/Dictionaries/RedisDictionarySource.h @@ -82,18 +82,18 @@ namespace DB ~RedisDictionarySource() override; - Pipe loadAll() override; + QueryPipeline loadAll() override; - Pipe loadUpdatedAll() override + QueryPipeline loadUpdatedAll() override { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method loadUpdatedAll is unsupported for RedisDictionarySource"); } bool supportsSelectiveLoad() const override { return true; } - Pipe loadIds(const std::vector & ids) override; + QueryPipeline loadIds(const std::vector & ids) override; - Pipe loadKeys(const Columns & key_columns, const std::vector & requested_rows) override; + QueryPipeline loadKeys(const Columns & key_columns, const std::vector & requested_rows) override; bool isModified() const override { return true; } diff --git a/src/Dictionaries/RedisSource.cpp b/src/Dictionaries/RedisSource.cpp index 6089b836d98..4208d5fa63b 100644 --- a/src/Dictionaries/RedisSource.cpp +++ b/src/Dictionaries/RedisSource.cpp @@ -35,7 +35,7 @@ namespace DB const RedisStorageType & storage_type_, const DB::Block & sample_block, size_t max_block_size_) - : SourceWithProgress(sample_block) + : ISource(sample_block) , connection(std::move(connection_)) , keys(keys_) , storage_type(storage_type_) diff --git a/src/Dictionaries/RedisSource.h b/src/Dictionaries/RedisSource.h index 24507998f58..0f8cc317003 100644 --- a/src/Dictionaries/RedisSource.h +++ b/src/Dictionaries/RedisSource.h @@ -3,7 +3,7 @@ #include #include -#include +#include #include #include #include "RedisDictionarySource.h" @@ -19,7 +19,7 @@ namespace Poco namespace DB { - class RedisSource final : public SourceWithProgress + class RedisSource final : public ISource { public: using RedisArray = Poco::Redis::Array; diff --git a/src/Dictionaries/XDBCDictionarySource.cpp b/src/Dictionaries/XDBCDictionarySource.cpp index df005349ce1..5e9c2f7ac7a 100644 --- a/src/Dictionaries/XDBCDictionarySource.cpp +++ b/src/Dictionaries/XDBCDictionarySource.cpp @@ -1,7 +1,6 @@ #include "XDBCDictionarySource.h" #include -#include #include #include #include @@ -119,14 +118,14 @@ std::string XDBCDictionarySource::getUpdateFieldAndDate() } -Pipe XDBCDictionarySource::loadAll() +QueryPipeline XDBCDictionarySource::loadAll() { LOG_TRACE(log, fmt::runtime(load_all_query)); return loadFromQuery(bridge_url, sample_block, load_all_query); } -Pipe XDBCDictionarySource::loadUpdatedAll() +QueryPipeline XDBCDictionarySource::loadUpdatedAll() { std::string load_query_update = getUpdateFieldAndDate(); @@ -135,14 +134,14 @@ Pipe XDBCDictionarySource::loadUpdatedAll() } -Pipe XDBCDictionarySource::loadIds(const std::vector & ids) +QueryPipeline XDBCDictionarySource::loadIds(const std::vector & ids) { const auto query = query_builder.composeLoadIdsQuery(ids); return loadFromQuery(bridge_url, sample_block, query); } -Pipe XDBCDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) +QueryPipeline XDBCDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) { const auto query = query_builder.composeLoadKeysQuery(key_columns, requested_rows, ExternalQueryBuilder::AND_OR_CHAIN); return loadFromQuery(bridge_url, sample_block, query); @@ -204,7 +203,7 @@ std::string XDBCDictionarySource::doInvalidateQuery(const std::string & request) } -Pipe XDBCDictionarySource::loadFromQuery(const Poco::URI & url, const Block & required_sample_block, const std::string & query) const +QueryPipeline XDBCDictionarySource::loadFromQuery(const Poco::URI & url, const Block & required_sample_block, const std::string & query) const { bridge_helper->startBridgeSync(); @@ -220,7 +219,7 @@ Pipe XDBCDictionarySource::loadFromQuery(const Poco::URI & url, const Block & re auto format = getContext()->getInputFormat(IXDBCBridgeHelper::DEFAULT_FORMAT, *read_buf, required_sample_block, max_block_size); format->addBuffer(std::move(read_buf)); - return Pipe(std::move(format)); + return QueryPipeline(std::move(format)); } void registerDictionarySourceXDBC(DictionarySourceFactory & factory) diff --git a/src/Dictionaries/XDBCDictionarySource.h b/src/Dictionaries/XDBCDictionarySource.h index 1892d5aa079..8ca2e172aa6 100644 --- a/src/Dictionaries/XDBCDictionarySource.h +++ b/src/Dictionaries/XDBCDictionarySource.h @@ -50,13 +50,13 @@ public: XDBCDictionarySource(const XDBCDictionarySource & other); XDBCDictionarySource & operator=(const XDBCDictionarySource &) = delete; - Pipe loadAll() override; + QueryPipeline loadAll() override; - Pipe loadUpdatedAll() override; + QueryPipeline loadUpdatedAll() override; - Pipe loadIds(const std::vector & ids) override; + QueryPipeline loadIds(const std::vector & ids) override; - Pipe loadKeys(const Columns & key_columns, const std::vector & requested_rows) override; + QueryPipeline loadKeys(const Columns & key_columns, const std::vector & requested_rows) override; bool isModified() const override; @@ -74,7 +74,7 @@ private: // execute invalidate_query. expects single cell in result std::string doInvalidateQuery(const std::string & request) const; - Pipe loadFromQuery(const Poco::URI & url, const Block & required_sample_block, const std::string & query) const; + QueryPipeline loadFromQuery(const Poco::URI & url, const Block & required_sample_block, const std::string & query) const; Poco::Logger * log; diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index bffc15cdc57..038bb9bf3df 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -1,5 +1,6 @@ #pragma once +#include "Common/Exception.h" #include #include @@ -705,7 +706,7 @@ template <> struct FormatImpl { template - static ReturnType execute(const DataTypeDate::FieldType x, WriteBuffer & wb, const DataTypeDate32 *, const DateLUTImpl *) + static ReturnType execute(const DataTypeDate32::FieldType x, WriteBuffer & wb, const DataTypeDate32 *, const DateLUTImpl *) { writeDateText(ExtendedDayNum(x), wb); return ReturnType(true); @@ -1046,13 +1047,11 @@ inline bool tryParseImpl(DataTypeUUID::FieldType & x, ReadBuffer & /** Throw exception with verbose message when string value is not parsed completely. */ -[[noreturn]] inline void throwExceptionForIncompletelyParsedValue(ReadBuffer & read_buffer, const DataTypePtr result_type) +[[noreturn]] inline void throwExceptionForIncompletelyParsedValue(ReadBuffer & read_buffer, const IDataType & result_type) { - const IDataType & to_type = *result_type; - WriteBufferFromOwnString message_buf; message_buf << "Cannot parse string " << quote << String(read_buffer.buffer().begin(), read_buffer.buffer().size()) - << " as " << to_type.getName() + << " as " << result_type.getName() << ": syntax error"; if (read_buffer.offset()) @@ -1062,8 +1061,8 @@ inline bool tryParseImpl(DataTypeUUID::FieldType & x, ReadBuffer & message_buf << " at begin of string"; // Currently there are no functions toIPv{4,6}Or{Null,Zero} - if (isNativeNumber(to_type) && !(to_type.getName() == "IPv4" || to_type.getName() == "IPv6")) - message_buf << ". Note: there are to" << to_type.getName() << "OrZero and to" << to_type.getName() << "OrNull functions, which returns zero/NULL instead of throwing exception."; + if (isNativeNumber(result_type) && !(result_type.getName() == "IPv4" || result_type.getName() == "IPv6")) + message_buf << ". Note: there are to" << result_type.getName() << "OrZero and to" << result_type.getName() << "OrNull functions, which returns zero/NULL instead of throwing exception."; throw Exception(message_buf.str(), ErrorCodes::CANNOT_PARSE_TEXT); } @@ -1253,7 +1252,7 @@ struct ConvertThroughParsing } if (!isAllRead(read_buffer)) - throwExceptionForIncompletelyParsedValue(read_buffer, res_type); + throwExceptionForIncompletelyParsedValue(read_buffer, *res_type); } else { @@ -1354,18 +1353,32 @@ struct ConvertImplGenericFromString static_assert(std::is_same_v || std::is_same_v, "Can be used only to parse from ColumnString or ColumnFixedString"); - const IColumn & col_from = *arguments[0].column; + const IColumn & column_from = *arguments[0].column; const IDataType & data_type_to = *result_type; - if (const StringColumnType * col_from_string = checkAndGetColumn(&col_from)) - { - auto res = data_type_to.createColumn(); + auto res = data_type_to.createColumn(); + auto serialization = data_type_to.getDefaultSerialization(); + const auto * null_map = column_nullable ? &column_nullable->getNullMapData() : nullptr; - IColumn & column_to = *res; + executeImpl(column_from, *res, *serialization, input_rows_count, null_map, result_type.get()); + return res; + } + + static void executeImpl( + const IColumn & column_from, + IColumn & column_to, + const ISerialization & serialization_from, + size_t input_rows_count, + const PaddedPODArray * null_map = nullptr, + const IDataType * result_type = nullptr) + { + static_assert(std::is_same_v || std::is_same_v, + "Can be used only to parse from ColumnString or ColumnFixedString"); + + if (const StringColumnType * col_from_string = checkAndGetColumn(&column_from)) + { column_to.reserve(input_rows_count); FormatSettings format_settings; - auto serialization = data_type_to.getDefaultSerialization(); - const auto * null_map = column_nullable ? &column_nullable->getNullMapData() : nullptr; for (size_t i = 0; i < input_rows_count; ++i) { if (null_map && (*null_map)[i]) @@ -1376,20 +1389,24 @@ struct ConvertImplGenericFromString const auto & val = col_from_string->getDataAt(i); ReadBufferFromMemory read_buffer(val.data, val.size); - - serialization->deserializeWholeText(column_to, read_buffer, format_settings); + serialization_from.deserializeWholeText(column_to, read_buffer, format_settings); if (!read_buffer.eof()) - throwExceptionForIncompletelyParsedValue(read_buffer, result_type); + { + if (result_type) + throwExceptionForIncompletelyParsedValue(read_buffer, *result_type); + else + throw Exception(ErrorCodes::CANNOT_PARSE_TEXT, + "Cannot parse string to column {}. Expected eof", column_to.getName()); + } } - - return res; } else - throw Exception("Illegal column " + arguments[0].column->getName() - + " of first argument of conversion function from string", - ErrorCodes::ILLEGAL_COLUMN); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, + "Illegal column {} of first argument of conversion function from string", + column_from.getName()); } + }; diff --git a/src/Functions/Regexps.h b/src/Functions/Regexps.h index 30afccbbac2..952e27b29bc 100644 --- a/src/Functions/Regexps.h +++ b/src/Functions/Regexps.h @@ -65,11 +65,6 @@ class LocalCacheTable public: using RegexpPtr = std::shared_ptr; - LocalCacheTable() - : known_regexps(max_regexp_cache_size, {"", nullptr}) - { - } - template void getOrSet(const String & pattern, RegexpPtr & regexp) { @@ -93,16 +88,17 @@ public: } private: + constexpr static size_t max_regexp_cache_size = 100; // collision probability + std::hash hasher; struct StringAndRegexp { std::string pattern; RegexpPtr regexp; }; - using CacheTable = std::vector; + using CacheTable = std::array; CacheTable known_regexps; - constexpr static size_t max_regexp_cache_size = 100; // collision probability }; } diff --git a/src/Functions/byteSize.cpp b/src/Functions/byteSize.cpp index 5a2dd3b2ec1..03ac85b7cfc 100644 --- a/src/Functions/byteSize.cpp +++ b/src/Functions/byteSize.cpp @@ -24,7 +24,9 @@ public: String getName() const override { return name; } bool useDefaultImplementationForNulls() const override { return false; } + bool useDefaultImplementationForNothing() const override { return false; } bool useDefaultImplementationForLowCardinalityColumns() const override { return false; } + bool useDefaultImplementationForSparseColumns() const override { return false; } bool isVariadic() const override { return true; } bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } size_t getNumberOfArguments() const override { return 0; } diff --git a/src/Functions/dumpColumnStructure.cpp b/src/Functions/dumpColumnStructure.cpp index f1e53cc6759..8435aa4b1aa 100644 --- a/src/Functions/dumpColumnStructure.cpp +++ b/src/Functions/dumpColumnStructure.cpp @@ -25,6 +25,9 @@ public: } bool useDefaultImplementationForNulls() const override { return false; } + bool useDefaultImplementationForNothing() const override { return false; } + bool useDefaultImplementationForLowCardinalityColumns() const override { return false; } + bool useDefaultImplementationForSparseColumns() const override { return false; } bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } diff --git a/src/Functions/gcd.cpp b/src/Functions/gcd.cpp index f42f8f96cdb..c8e70dc87a2 100644 --- a/src/Functions/gcd.cpp +++ b/src/Functions/gcd.cpp @@ -21,7 +21,7 @@ struct GCDImpl : public GCDLCMImpl, NameGCD> static ResultType applyImpl(A a, B b) { using Int = typename NumberTraits::ToInteger::Type; - return boost::integer::gcd(Int(a), Int(b)); + return boost::integer::gcd(Int(a), Int(b)); // NOLINT(clang-analyzer-core.UndefinedBinaryOperatorResult) } }; diff --git a/src/Functions/lcm.cpp b/src/Functions/lcm.cpp index 672d47d06b9..51a1bf59d1b 100644 --- a/src/Functions/lcm.cpp +++ b/src/Functions/lcm.cpp @@ -49,7 +49,7 @@ struct LCMImpl : public GCDLCMImpl, NameLCM> * (example: throw an exception or overflow in implementation specific way). */ - Unsigned val1 = abs(a) / boost::integer::gcd(Int(a), Int(b)); + Unsigned val1 = abs(a) / boost::integer::gcd(Int(a), Int(b)); // NOLINT(clang-analyzer-core.UndefinedBinaryOperatorResult) Unsigned val2 = abs(b); /// Overflow in implementation specific way. diff --git a/src/Functions/likePatternToRegexp.h b/src/Functions/likePatternToRegexp.h index 319a3729e16..298137d7bc1 100644 --- a/src/Functions/likePatternToRegexp.h +++ b/src/Functions/likePatternToRegexp.h @@ -1,17 +1,24 @@ #pragma once +#include #include namespace DB { +namespace ErrorCodes +{ + extern const int CANNOT_PARSE_ESCAPE_SEQUENCE; +} + /// Transforms the [I]LIKE expression into regexp re2. For example, abc%def -> ^abc.*def$ inline String likePatternToRegexp(std::string_view pattern) { String res; res.reserve(pattern.size() * 2); + const char * pos = pattern.data(); - const char * end = pos + pattern.size(); + const char * const end = pattern.begin() + pattern.size(); if (pos < end && *pos == '%') ++pos; @@ -36,13 +43,15 @@ inline String likePatternToRegexp(std::string_view pattern) res += "."; break; case '\\': + if (pos + 1 == end) + throw Exception(ErrorCodes::CANNOT_PARSE_ESCAPE_SEQUENCE, "Invalid escape sequence at the end of LIKE pattern"); /// Known escape sequences. - if (pos + 1 != end && (pos[1] == '%' || pos[1] == '_')) + if (pos[1] == '%' || pos[1] == '_') { res += pos[1]; ++pos; } - else if (pos + 1 != end && pos[1] == '\\') + else if (pos[1] == '\\') { res += "\\\\"; ++pos; diff --git a/src/IO/WriteBufferFromS3.cpp b/src/IO/WriteBufferFromS3.cpp index 2570a0d7c17..1d3ec6095d5 100644 --- a/src/IO/WriteBufferFromS3.cpp +++ b/src/IO/WriteBufferFromS3.cpp @@ -152,7 +152,7 @@ void WriteBufferFromS3::allocateBuffer() WriteBufferFromS3::~WriteBufferFromS3() { #ifndef NDEBUG - if (!is_finalized) + if (!finalized) { LOG_ERROR(log, "WriteBufferFromS3 is not finalized in destructor. It's a bug"); std::terminate(); @@ -200,8 +200,6 @@ void WriteBufferFromS3::finalizeImpl() if (!multipart_upload_id.empty()) completeMultipartUpload(); - - is_finalized = true; } void WriteBufferFromS3::createMultipartUpload() diff --git a/src/IO/WriteBufferFromS3.h b/src/IO/WriteBufferFromS3.h index 7dbaad72940..4cdc39b80a0 100644 --- a/src/IO/WriteBufferFromS3.h +++ b/src/IO/WriteBufferFromS3.h @@ -106,7 +106,6 @@ private: std::vector part_tags; bool is_prefinalized = false; - bool is_finalized = false; /// Following fields are for background uploads in thread pool (if specified). /// We use std::function to avoid dependency of Interpreters diff --git a/src/Interpreters/AsynchronousInsertQueue.cpp b/src/Interpreters/AsynchronousInsertQueue.cpp index 288be5c7ba3..fd8f252d139 100644 --- a/src/Interpreters/AsynchronousInsertQueue.cpp +++ b/src/Interpreters/AsynchronousInsertQueue.cpp @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include @@ -23,6 +22,8 @@ #include #include #include +#include +#include namespace CurrentMetrics diff --git a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp index 89123cda531..498e296a3af 100644 --- a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp +++ b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp @@ -181,7 +181,8 @@ SelectStreamFactory::ShardPlans SelectStreamFactory::createForShardWithParallelR const ASTPtr & table_function_ptr, const ThrottlerPtr & throttler, ContextPtr context, - UInt32 shard_count) + UInt32 shard_count, + const std::shared_ptr & storage_limits) { SelectStreamFactory::ShardPlans result; @@ -256,9 +257,11 @@ SelectStreamFactory::ShardPlans SelectStreamFactory::createForShardWithParallelR std::move(scalars), std::move(external_tables), &Poco::Logger::get("ReadFromParallelRemoteReplicasStep"), - shard_count); + shard_count, + storage_limits); remote_plan->addStep(std::move(read_from_remote)); + remote_plan->addInterpreterContext(context); result.remote_plan = std::move(remote_plan); } diff --git a/src/Interpreters/ClusterProxy/SelectStreamFactory.h b/src/Interpreters/ClusterProxy/SelectStreamFactory.h index f64e57e1316..440017a8e80 100644 --- a/src/Interpreters/ClusterProxy/SelectStreamFactory.h +++ b/src/Interpreters/ClusterProxy/SelectStreamFactory.h @@ -83,7 +83,8 @@ public: const ASTPtr & table_function_ptr, const ThrottlerPtr & throttler, ContextPtr context, - UInt32 shard_count + UInt32 shard_count, + const std::shared_ptr & storage_limits ); private: diff --git a/src/Interpreters/ClusterProxy/executeQuery.cpp b/src/Interpreters/ClusterProxy/executeQuery.cpp index e7a0e24fc7b..d974721627e 100644 --- a/src/Interpreters/ClusterProxy/executeQuery.cpp +++ b/src/Interpreters/ClusterProxy/executeQuery.cpp @@ -187,10 +187,12 @@ void executeQuery( std::move(scalars), std::move(external_tables), log, - shards); + shards, + query_info.storage_limits); read_from_remote->setStepDescription("Read from remote replica"); plan->addStep(std::move(read_from_remote)); + plan->addInterpreterContext(new_context); plans.emplace_back(std::move(plan)); } @@ -267,7 +269,7 @@ void executeQueryWithParallelReplicas( query_ast_for_shard = query_ast; auto shard_plans = stream_factory.createForShardWithParallelReplicas(shard_info, - query_ast_for_shard, main_table, table_func_ptr, throttler, context, shards); + query_ast_for_shard, main_table, table_func_ptr, throttler, context, shards, query_info.storage_limits); if (!shard_plans.local_plan && !shard_plans.remote_plan) throw Exception(ErrorCodes::LOGICAL_ERROR, "No plans were generated for reading from shard. This is a bug"); diff --git a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp index 2d9d18c2076..581a8cd87ee 100644 --- a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp +++ b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp @@ -187,6 +187,7 @@ void ExecuteScalarSubqueriesMatcher::visit(const ASTSubquery & subquery, ASTPtr auto io = interpreter->execute(); PullingAsyncPipelineExecutor executor(io.pipeline); + io.pipeline.setProgressCallback(data.getContext()->getProgressCallback()); while (block.rows() == 0 && executor.pull(block)); if (block.rows() == 0) diff --git a/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp b/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp index fb02ba3dc5a..4ac1d33468f 100644 --- a/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp +++ b/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp @@ -24,4 +24,67 @@ QueryPipelineBuilder IInterpreterUnionOrSelectQuery::buildQueryPipeline() QueryPlanOptimizationSettings::fromContext(context), BuildQueryPipelineSettings::fromContext(context))); } +static StreamLocalLimits getLimitsForStorage(const Settings & settings, const SelectQueryOptions & options) +{ + StreamLocalLimits limits; + limits.mode = LimitsMode::LIMITS_TOTAL; + limits.size_limits = SizeLimits(settings.max_rows_to_read, settings.max_bytes_to_read, settings.read_overflow_mode); + limits.speed_limits.max_execution_time = settings.max_execution_time; + limits.timeout_overflow_mode = settings.timeout_overflow_mode; + + /** Quota and minimal speed restrictions are checked on the initiating server of the request, and not on remote servers, + * because the initiating server has a summary of the execution of the request on all servers. + * + * But limits on data size to read and maximum execution time are reasonable to check both on initiator and + * additionally on each remote server, because these limits are checked per block of data processed, + * and remote servers may process way more blocks of data than are received by initiator. + * + * The limits to throttle maximum execution speed is also checked on all servers. + */ + if (options.to_stage == QueryProcessingStage::Complete) + { + limits.speed_limits.min_execution_rps = settings.min_execution_speed; + limits.speed_limits.min_execution_bps = settings.min_execution_speed_bytes; + } + + limits.speed_limits.max_execution_rps = settings.max_execution_speed; + limits.speed_limits.max_execution_bps = settings.max_execution_speed_bytes; + limits.speed_limits.timeout_before_checking_execution_speed = settings.timeout_before_checking_execution_speed; + + return limits; +} + +StorageLimits IInterpreterUnionOrSelectQuery::getStorageLimits(const Context & context, const SelectQueryOptions & options) +{ + const auto & settings = context.getSettingsRef(); + + StreamLocalLimits limits; + SizeLimits leaf_limits; + + /// Set the limits and quota for reading data, the speed and time of the query. + if (!options.ignore_limits) + { + limits = getLimitsForStorage(settings, options); + leaf_limits = SizeLimits(settings.max_rows_to_read_leaf, settings.max_bytes_to_read_leaf, settings.read_overflow_mode_leaf); + } + + return {limits, leaf_limits}; +} + +void IInterpreterUnionOrSelectQuery::setQuota(QueryPipeline & pipeline) const +{ + std::shared_ptr quota; + + if (!options.ignore_quota && (options.to_stage == QueryProcessingStage::Complete)) + quota = context->getQuota(); + + pipeline.setQuota(quota); +} + +void IInterpreterUnionOrSelectQuery::addStorageLimits(const StorageLimitsList & limits) +{ + for (const auto & val : limits) + storage_limits.push_back(val); +} + } diff --git a/src/Interpreters/IInterpreterUnionOrSelectQuery.h b/src/Interpreters/IInterpreterUnionOrSelectQuery.h index 66861fd2ae0..cda147485b3 100644 --- a/src/Interpreters/IInterpreterUnionOrSelectQuery.h +++ b/src/Interpreters/IInterpreterUnionOrSelectQuery.h @@ -8,6 +8,7 @@ namespace DB { + class IInterpreterUnionOrSelectQuery : public IInterpreter { public: @@ -54,14 +55,24 @@ public: /// You can find more details about this under ExecuteScalarSubqueriesMatcher::visit bool usesViewSource() const { return uses_view_source; } + /// Add limits from external query. + void addStorageLimits(const StorageLimitsList & limits); + protected: ASTPtr query_ptr; ContextMutablePtr context; Block result_header; SelectQueryOptions options; + StorageLimitsList storage_limits; + size_t max_streams = 1; bool settings_limit_offset_needed = false; bool settings_limit_offset_done = false; bool uses_view_source = false; + + /// Set quotas to query pipeline. + void setQuota(QueryPipeline & pipeline) const; + + static StorageLimits getStorageLimits(const Context & context, const SelectQueryOptions & options); }; } diff --git a/src/Interpreters/InterpreterExplainQuery.cpp b/src/Interpreters/InterpreterExplainQuery.cpp index 89b498cdd27..4d2628d8b26 100644 --- a/src/Interpreters/InterpreterExplainQuery.cpp +++ b/src/Interpreters/InterpreterExplainQuery.cpp @@ -1,6 +1,7 @@ #include #include +#include #include #include #include @@ -346,7 +347,8 @@ QueryPipeline InterpreterExplainQuery::executeImpl() if (settings.graph) { /// Pipe holds QueryPlan, should not go out-of-scope - auto pipe = QueryPipelineBuilder::getPipe(std::move(*pipeline)); + QueryPlanResourceHolder resources; + auto pipe = QueryPipelineBuilder::getPipe(std::move(*pipeline), resources); const auto & processors = pipe.getProcessors(); if (settings.compact) diff --git a/src/Interpreters/InterpreterInsertQuery.cpp b/src/Interpreters/InterpreterInsertQuery.cpp index ee902f5984a..241ab1b0f75 100644 --- a/src/Interpreters/InterpreterInsertQuery.cpp +++ b/src/Interpreters/InterpreterInsertQuery.cpp @@ -26,6 +26,8 @@ #include #include #include +#include +#include #include #include #include @@ -72,7 +74,7 @@ StoragePtr InterpreterInsertQuery::getTable(ASTInsertQuery & query) { InterpreterSelectWithUnionQuery interpreter_select{ query.select, getContext(), SelectQueryOptions(QueryProcessingStage::Complete, 1)}; - QueryPipelineBuilder tmp_pipeline = interpreter_select.buildQueryPipeline(); + auto tmp_pipeline = interpreter_select.buildQueryPipeline(); ColumnsDescription structure_hint{tmp_pipeline.getHeader().getNamesAndTypesList()}; table_function_ptr->setStructureHint(structure_hint); } @@ -301,6 +303,8 @@ BlockIO InterpreterInsertQuery::execute() auto & query = query_ptr->as(); QueryPipelineBuilder pipeline; + std::optional distributed_pipeline; + QueryPlanResourceHolder resources; StoragePtr table = getTable(query); checkStorageSupportsTransactionsIfNeeded(table, getContext()); @@ -322,20 +326,12 @@ BlockIO InterpreterInsertQuery::execute() if (!query.table_function) getContext()->checkAccess(AccessType::INSERT, query.table_id, query_sample_block.getNames()); - bool is_distributed_insert_select = false; - if (query.select && table->isRemote() && settings.parallel_distributed_insert_select) - { // Distributed INSERT SELECT - if (auto maybe_pipeline = table->distributedWrite(query, getContext())) - { - pipeline = std::move(*maybe_pipeline); - is_distributed_insert_select = true; - } - } + distributed_pipeline = table->distributedWrite(query, getContext()); std::vector out_chains; - if (!is_distributed_insert_select || query.watch) + if (!distributed_pipeline || query.watch) { size_t out_streams_size = 1; @@ -435,9 +431,9 @@ BlockIO InterpreterInsertQuery::execute() BlockIO res; /// What type of query: INSERT or INSERT SELECT or INSERT WATCH? - if (is_distributed_insert_select) + if (distributed_pipeline) { - res.pipeline = QueryPipelineBuilder::getPipeline(std::move(pipeline)); + res.pipeline = std::move(*distributed_pipeline); } else if (query.select || query.watch) { @@ -465,6 +461,10 @@ BlockIO InterpreterInsertQuery::execute() { return a.getNumThreads() < b.getNumThreads(); })->getNumThreads(); + + for (auto & chain : out_chains) + resources = chain.detachResources(); + pipeline.addChains(std::move(out_chains)); pipeline.setMaxThreads(num_insert_threads); @@ -499,6 +499,8 @@ BlockIO InterpreterInsertQuery::execute() } } + res.pipeline.addResources(std::move(resources)); + res.pipeline.addStorageHolder(table); if (inner_table) res.pipeline.addStorageHolder(inner_table); diff --git a/src/Interpreters/InterpreterKillQueryQuery.cpp b/src/Interpreters/InterpreterKillQueryQuery.cpp index 990079442ef..f279244d769 100644 --- a/src/Interpreters/InterpreterKillQueryQuery.cpp +++ b/src/Interpreters/InterpreterKillQueryQuery.cpp @@ -18,8 +18,9 @@ #include #include #include -#include +#include #include +#include #include #include #include @@ -125,12 +126,12 @@ static QueryDescriptors extractQueriesExceptMeAndCheckAccess(const Block & proce } -class SyncKillQuerySource : public SourceWithProgress +class SyncKillQuerySource : public ISource { public: SyncKillQuerySource(ProcessList & process_list_, QueryDescriptors && processes_to_stop_, Block && processes_block_, const Block & res_sample_block_) - : SourceWithProgress(res_sample_block_) + : ISource(res_sample_block_) , process_list(process_list_) , processes_to_stop(std::move(processes_to_stop_)) , processes_block(std::move(processes_block_)) diff --git a/src/Interpreters/InterpreterSelectIntersectExceptQuery.cpp b/src/Interpreters/InterpreterSelectIntersectExceptQuery.cpp index d6172766fb6..4d0c82d3345 100644 --- a/src/Interpreters/InterpreterSelectIntersectExceptQuery.cpp +++ b/src/Interpreters/InterpreterSelectIntersectExceptQuery.cpp @@ -9,6 +9,7 @@ #include #include #include +#include namespace DB @@ -105,6 +106,11 @@ InterpreterSelectIntersectExceptQuery::buildCurrentChildInterpreter(const ASTPtr void InterpreterSelectIntersectExceptQuery::buildQueryPlan(QueryPlan & query_plan) { + auto local_limits = getStorageLimits(*context, options); + storage_limits.emplace_back(local_limits); + for (auto & interpreter : nested_interpreters) + interpreter->addStorageLimits(storage_limits); + size_t num_plans = nested_interpreters.size(); std::vector> plans(num_plans); DataStreams data_streams(num_plans); @@ -131,6 +137,8 @@ void InterpreterSelectIntersectExceptQuery::buildQueryPlan(QueryPlan & query_pla auto max_threads = context->getSettingsRef().max_threads; auto step = std::make_unique(std::move(data_streams), final_operator, max_threads); query_plan.unitePlans(std::move(step), std::move(plans)); + + query_plan.addInterpreterContext(context); } BlockIO InterpreterSelectIntersectExceptQuery::execute() @@ -140,14 +148,13 @@ BlockIO InterpreterSelectIntersectExceptQuery::execute() QueryPlan query_plan; buildQueryPlan(query_plan); - auto pipeline = query_plan.buildQueryPipeline( + auto builder = query_plan.buildQueryPipeline( QueryPlanOptimizationSettings::fromContext(context), BuildQueryPipelineSettings::fromContext(context)); - pipeline->addInterpreterContext(context); + res.pipeline = QueryPipelineBuilder::getPipeline(std::move(*builder)); - res.pipeline = QueryPipelineBuilder::getPipeline(std::move(*query_plan.buildQueryPipeline( - QueryPlanOptimizationSettings::fromContext(context), BuildQueryPipelineSettings::fromContext(context)))); + setQuota(res.pipeline); return res; } diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 9aeb0116097..94ac7c26183 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -56,7 +56,6 @@ #include #include #include -#include #include #include #include @@ -65,6 +64,7 @@ #include #include #include +#include #include #include @@ -669,6 +669,13 @@ void InterpreterSelectQuery::buildQueryPlan(QueryPlan & query_plan) auto converting = std::make_unique(query_plan.getCurrentDataStream(), convert_actions_dag); query_plan.addStep(std::move(converting)); } + + /// Extend lifetime of context, table lock, storage. + query_plan.addInterpreterContext(context); + if (table_lock) + query_plan.addTableLock(std::move(table_lock)); + if (storage) + query_plan.addStorageHolder(storage); } BlockIO InterpreterSelectQuery::execute() @@ -678,8 +685,13 @@ BlockIO InterpreterSelectQuery::execute() buildQueryPlan(query_plan); - res.pipeline = QueryPipelineBuilder::getPipeline(std::move(*query_plan.buildQueryPipeline( - QueryPlanOptimizationSettings::fromContext(context), BuildQueryPipelineSettings::fromContext(context)))); + auto builder = query_plan.buildQueryPipeline( + QueryPlanOptimizationSettings::fromContext(context), BuildQueryPipelineSettings::fromContext(context)); + + res.pipeline = QueryPipelineBuilder::getPipeline(std::move(*builder)); + + setQuota(res.pipeline); + return res; } @@ -1182,8 +1194,9 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

(std::move(*prepared_pipe), context); + auto prepared_source_step = std::make_unique(std::move(*prepared_pipe)); query_plan.addStep(std::move(prepared_source_step)); + query_plan.addInterpreterContext(context); } if (from_stage == QueryProcessingStage::WithMergeableState && @@ -1567,36 +1580,6 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

(func, argument_types, desc.parameters), desc.column_name}}; auto source = std::make_shared(block_with_count); - auto prepared_count = std::make_unique(Pipe(std::move(source)), context); + auto prepared_count = std::make_unique(Pipe(std::move(source))); prepared_count->setStepDescription("Optimized trivial count"); query_plan.addStep(std::move(prepared_count)); from_stage = QueryProcessingStage::WithMergeableState; @@ -2029,6 +2012,9 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc if (!max_block_size) throw Exception("Setting 'max_block_size' cannot be zero", ErrorCodes::PARAMETER_OUT_OF_BOUND); + auto local_limits = getStorageLimits(*context, options); + storage_limits.emplace_back(local_limits); + /// Initialize the initial data streams to which the query transforms are superimposed. Table or subquery or prepared input? if (query_plan.isInitialized()) { @@ -2045,6 +2031,8 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc subquery, getSubqueryContext(context), options.copy().subquery().noModify(), required_columns); + interpreter_subquery->addStorageLimits(storage_limits); + if (query_analyzer->hasAggregation()) interpreter_subquery->ignoreWithTotals(); @@ -2123,19 +2111,7 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc query_info.input_order_info = query_info.order_optimizer->getInputOrder(metadata_snapshot, context, limit); } - StreamLocalLimits limits; - SizeLimits leaf_limits; - std::shared_ptr quota; - - /// Set the limits and quota for reading data, the speed and time of the query. - if (!options.ignore_limits) - { - limits = getLimitsForStorage(settings, options); - leaf_limits = SizeLimits(settings.max_rows_to_read_leaf, settings.max_bytes_to_read_leaf, settings.read_overflow_mode_leaf); - } - - if (!options.ignore_quota && (options.to_stage == QueryProcessingStage::Complete)) - quota = context->getQuota(); + query_info.storage_limits = std::make_shared(storage_limits); query_info.settings_limit_offset_done = options.settings_limit_offset_done; storage->read(query_plan, required_columns, storage_snapshot, query_info, context, processing_stage, max_block_size, max_streams); @@ -2158,12 +2134,6 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc auto header = storage_snapshot->getSampleBlockForColumns(required_columns); addEmptySourceToQueryPlan(query_plan, header, query_info, context); } - - /// Extend lifetime of context, table lock, storage. Set limits and quota. - auto adding_limits_and_quota = std::make_unique( - query_plan.getCurrentDataStream(), storage, std::move(table_lock), limits, leaf_limits, std::move(quota), context); - adding_limits_and_quota->setStepDescription("Set limits and quota after reading from storage"); - query_plan.addStep(std::move(adding_limits_and_quota)); } else throw Exception("Logical error in InterpreterSelectQuery: nowhere to read", ErrorCodes::LOGICAL_ERROR); diff --git a/src/Interpreters/InterpreterSelectQuery.h b/src/Interpreters/InterpreterSelectQuery.h index b7807a486b5..40afaaaeed0 100644 --- a/src/Interpreters/InterpreterSelectQuery.h +++ b/src/Interpreters/InterpreterSelectQuery.h @@ -12,6 +12,7 @@ #include #include #include +#include #include diff --git a/src/Interpreters/InterpreterSelectWithUnionQuery.cpp b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp index 1845ddec9b4..f0150fe663f 100644 --- a/src/Interpreters/InterpreterSelectWithUnionQuery.cpp +++ b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -272,6 +273,11 @@ void InterpreterSelectWithUnionQuery::buildQueryPlan(QueryPlan & query_plan) size_t num_plans = nested_interpreters.size(); const Settings & settings = context->getSettingsRef(); + auto local_limits = getStorageLimits(*context, options); + storage_limits.emplace_back(local_limits); + for (auto & interpreter : nested_interpreters) + interpreter->addStorageLimits(storage_limits); + /// Skip union for single interpreter. if (num_plans == 1) { @@ -335,6 +341,7 @@ void InterpreterSelectWithUnionQuery::buildQueryPlan(QueryPlan & query_plan) } } + query_plan.addInterpreterContext(context); } BlockIO InterpreterSelectWithUnionQuery::execute() @@ -344,17 +351,15 @@ BlockIO InterpreterSelectWithUnionQuery::execute() QueryPlan query_plan; buildQueryPlan(query_plan); - auto pipeline_builder = query_plan.buildQueryPipeline( + auto builder = query_plan.buildQueryPipeline( QueryPlanOptimizationSettings::fromContext(context), BuildQueryPipelineSettings::fromContext(context)); - pipeline_builder->addInterpreterContext(context); - - res.pipeline = QueryPipelineBuilder::getPipeline(std::move(*pipeline_builder)); + res.pipeline = QueryPipelineBuilder::getPipeline(std::move(*builder)); + setQuota(res.pipeline); return res; } - void InterpreterSelectWithUnionQuery::ignoreWithTotals() { for (auto & interpreter : nested_interpreters) diff --git a/src/Interpreters/InterpreterWatchQuery.cpp b/src/Interpreters/InterpreterWatchQuery.cpp index 8a079ee471d..7811c14a4ea 100644 --- a/src/Interpreters/InterpreterWatchQuery.cpp +++ b/src/Interpreters/InterpreterWatchQuery.cpp @@ -16,6 +16,7 @@ limitations under the License. */ #include #include #include +#include #include @@ -34,6 +35,20 @@ BlockIO InterpreterWatchQuery::execute() { BlockIO res; res.pipeline = QueryPipelineBuilder::getPipeline(buildQueryPipeline()); + + /// Constraints on the result, the quota on the result, and also callback for progress. + { + const Settings & settings = getContext()->getSettingsRef(); + + StreamLocalLimits limits; + limits.mode = LimitsMode::LIMITS_CURRENT; //-V1048 + limits.size_limits.max_rows = settings.max_result_rows; + limits.size_limits.max_bytes = settings.max_result_bytes; + limits.size_limits.overflow_mode = settings.result_overflow_mode; + + res.pipeline.setLimitsAndQuota(limits, getContext()->getQuota()); + } + return res; } @@ -86,18 +101,6 @@ QueryPipelineBuilder InterpreterWatchQuery::buildQueryPipeline() /// Watch storage auto pipe = storage->watch(required_columns, query_info, getContext(), from_stage, max_block_size, max_streams); - /// Constraints on the result, the quota on the result, and also callback for progress. - { - StreamLocalLimits limits; - limits.mode = LimitsMode::LIMITS_CURRENT; //-V1048 - limits.size_limits.max_rows = settings.max_result_rows; - limits.size_limits.max_bytes = settings.max_result_bytes; - limits.size_limits.overflow_mode = settings.result_overflow_mode; - - pipe.setLimits(limits); - pipe.setQuota(getContext()->getQuota()); - } - QueryPipelineBuilder pipeline; pipeline.init(std::move(pipe)); return pipeline; diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index a55de34efbc..8c1d929e409 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -740,7 +740,7 @@ ASTPtr MutationsInterpreter::prepare(bool dry_run) auto source = std::make_shared(first_stage_header); plan.addStep(std::make_unique(Pipe(std::move(source)))); auto pipeline = addStreamsForLaterStages(stages_copy, plan); - updated_header = std::make_unique(pipeline->getHeader()); + updated_header = std::make_unique(pipeline.getHeader()); } /// Special step to recalculate affected indices, projections and TTL expressions. @@ -890,7 +890,7 @@ ASTPtr MutationsInterpreter::prepareInterpreterSelectQuery(std::vector & return select; } -QueryPipelineBuilderPtr MutationsInterpreter::addStreamsForLaterStages(const std::vector & prepared_stages, QueryPlan & plan) const +QueryPipelineBuilder MutationsInterpreter::addStreamsForLaterStages(const std::vector & prepared_stages, QueryPlan & plan) const { for (size_t i_stage = 1; i_stage < prepared_stages.size(); ++i_stage) { @@ -924,11 +924,11 @@ QueryPipelineBuilderPtr MutationsInterpreter::addStreamsForLaterStages(const std QueryPlanOptimizationSettings do_not_optimize_plan; do_not_optimize_plan.optimize_plan = false; - auto pipeline = plan.buildQueryPipeline( + auto pipeline = std::move(*plan.buildQueryPipeline( do_not_optimize_plan, - BuildQueryPipelineSettings::fromContext(context)); + BuildQueryPipelineSettings::fromContext(context))); - pipeline->addSimpleTransform([&](const Block & header) + pipeline.addSimpleTransform([&](const Block & header) { return std::make_shared(header); }); @@ -966,7 +966,7 @@ void MutationsInterpreter::validate() auto pipeline = addStreamsForLaterStages(stages, plan); } -QueryPipeline MutationsInterpreter::execute() +QueryPipelineBuilder MutationsInterpreter::execute() { if (!can_execute) throw Exception("Cannot execute mutations interpreter because can_execute flag set to false", ErrorCodes::LOGICAL_ERROR); @@ -981,20 +981,18 @@ QueryPipeline MutationsInterpreter::execute() /// Sometimes we update just part of columns (for example UPDATE mutation) /// in this case we don't read sorting key, so just we don't check anything. - if (auto sort_desc = getStorageSortDescriptionIfPossible(builder->getHeader())) + if (auto sort_desc = getStorageSortDescriptionIfPossible(builder.getHeader())) { - builder->addSimpleTransform([&](const Block & header) + builder.addSimpleTransform([&](const Block & header) { return std::make_shared(header, *sort_desc); }); } - auto pipeline = QueryPipelineBuilder::getPipeline(std::move(*builder)); - if (!updated_header) - updated_header = std::make_unique(pipeline.getHeader()); + updated_header = std::make_unique(builder.getHeader()); - return pipeline; + return builder; } Block MutationsInterpreter::getUpdatedHeader() const diff --git a/src/Interpreters/MutationsInterpreter.h b/src/Interpreters/MutationsInterpreter.h index 7b0ccb3bae5..6ce132f300c 100644 --- a/src/Interpreters/MutationsInterpreter.h +++ b/src/Interpreters/MutationsInterpreter.h @@ -50,7 +50,7 @@ public: size_t evaluateCommandsSize(); /// The resulting stream will return blocks containing only changed columns and columns, that we need to recalculate indices. - QueryPipeline execute(); + QueryPipelineBuilder execute(); /// Only changed columns. Block getUpdatedHeader() const; @@ -84,7 +84,7 @@ private: struct Stage; ASTPtr prepareInterpreterSelectQuery(std::vector &prepared_stages, bool dry_run); - QueryPipelineBuilderPtr addStreamsForLaterStages(const std::vector & prepared_stages, QueryPlan & plan) const; + QueryPipelineBuilder addStreamsForLaterStages(const std::vector & prepared_stages, QueryPlan & plan) const; std::optional getStorageSortDescriptionIfPossible(const Block & header) const; diff --git a/src/Interpreters/TreeRewriter.cpp b/src/Interpreters/TreeRewriter.cpp index fac70e862d4..52b7de8bcab 100644 --- a/src/Interpreters/TreeRewriter.cpp +++ b/src/Interpreters/TreeRewriter.cpp @@ -1213,7 +1213,7 @@ TreeRewriterResultPtr TreeRewriter::analyzeSelect( all_source_columns_set.insert(name); } - normalize(query, result.aliases, all_source_columns_set, select_options.ignore_alias, settings, /* allow_self_aliases = */ true); + normalize(query, result.aliases, all_source_columns_set, select_options.ignore_alias, settings, /* allow_self_aliases = */ true, getContext()); /// Remove unneeded columns according to 'required_result_columns'. /// Leave all selected columns in case of DISTINCT; columns that contain arrayJoin function inside. @@ -1309,7 +1309,7 @@ TreeRewriterResultPtr TreeRewriter::analyze( TreeRewriterResult result(source_columns, storage, storage_snapshot, false); - normalize(query, result.aliases, result.source_columns_set, false, settings, allow_self_aliases); + normalize(query, result.aliases, result.source_columns_set, false, settings, allow_self_aliases, getContext()); /// Executing scalar subqueries. Column defaults could be a scalar subquery. executeScalarSubqueries(query, getContext(), 0, result.scalars, result.local_scalars, !execute_scalar_subqueries); @@ -1338,7 +1338,7 @@ TreeRewriterResultPtr TreeRewriter::analyze( } void TreeRewriter::normalize( - ASTPtr & query, Aliases & aliases, const NameSet & source_columns_set, bool ignore_alias, const Settings & settings, bool allow_self_aliases) + ASTPtr & query, Aliases & aliases, const NameSet & source_columns_set, bool ignore_alias, const Settings & settings, bool allow_self_aliases, ContextPtr context_) { UserDefinedSQLFunctionVisitor::Data data_user_defined_functions_visitor; UserDefinedSQLFunctionVisitor(data_user_defined_functions_visitor).visit(query); @@ -1400,7 +1400,10 @@ void TreeRewriter::normalize( MarkTableIdentifiersVisitor(identifiers_data).visit(query); /// Rewrite function names to their canonical ones. - if (settings.normalize_function_names) + /// Notice: function name normalization is disabled when it's a secondary query, because queries are either + /// already normalized on initiator node, or not normalized and should remain unnormalized for + /// compatibility. + if (context_->getClientInfo().query_kind != ClientInfo::QueryKind::SECONDARY_QUERY && settings.normalize_function_names) FunctionNameNormalizer().visit(query.get()); /// Common subexpression elimination. Rewrite rules. diff --git a/src/Interpreters/TreeRewriter.h b/src/Interpreters/TreeRewriter.h index 7fbe4e45fb3..2c246455ade 100644 --- a/src/Interpreters/TreeRewriter.h +++ b/src/Interpreters/TreeRewriter.h @@ -129,7 +129,7 @@ public: std::shared_ptr table_join = {}) const; private: - static void normalize(ASTPtr & query, Aliases & aliases, const NameSet & source_columns_set, bool ignore_alias, const Settings & settings, bool allow_self_aliases); + static void normalize(ASTPtr & query, Aliases & aliases, const NameSet & source_columns_set, bool ignore_alias, const Settings & settings, bool allow_self_aliases, ContextPtr context_); }; } diff --git a/src/Interpreters/evaluateConstantExpression.cpp b/src/Interpreters/evaluateConstantExpression.cpp index e9110d00128..039d79ed445 100644 --- a/src/Interpreters/evaluateConstantExpression.cpp +++ b/src/Interpreters/evaluateConstantExpression.cpp @@ -64,7 +64,10 @@ std::pair> evaluateConstantExpression(co ReplaceQueryParameterVisitor param_visitor(context->getQueryParameters()); param_visitor.visit(ast); - if (context->getSettingsRef().normalize_function_names) + /// Notice: function name normalization is disabled when it's a secondary query, because queries are either + /// already normalized on initiator node, or not normalized and should remain unnormalized for + /// compatibility. + if (context->getClientInfo().query_kind != ClientInfo::QueryKind::SECONDARY_QUERY && context->getSettingsRef().normalize_function_names) FunctionNameNormalizer().visit(ast.get()); String name = ast->getColumnName(); diff --git a/src/Interpreters/executeDDLQueryOnCluster.cpp b/src/Interpreters/executeDDLQueryOnCluster.cpp index b68a8554342..f1661549c61 100644 --- a/src/Interpreters/executeDDLQueryOnCluster.cpp +++ b/src/Interpreters/executeDDLQueryOnCluster.cpp @@ -194,7 +194,7 @@ BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr_, ContextPtr context, } -class DDLQueryStatusSource final : public SourceWithProgress +class DDLQueryStatusSource final : public ISource { public: DDLQueryStatusSource( @@ -275,7 +275,7 @@ static Block getSampleBlock(ContextPtr context_, bool hosts_to_wait) DDLQueryStatusSource::DDLQueryStatusSource( const String & zk_node_path, const DDLLogEntry & entry, ContextPtr context_, const std::optional & hosts_to_wait) - : SourceWithProgress(getSampleBlock(context_, hosts_to_wait.has_value()), true) + : ISource(getSampleBlock(context_, hosts_to_wait.has_value())) , node_path(zk_node_path) , context(context_) , watch(CLOCK_MONOTONIC_COARSE) @@ -452,7 +452,7 @@ IProcessor::Status DDLQueryStatusSource::prepare() return Status::Finished; } else - return SourceWithProgress::prepare(); + return ISource::prepare(); } Strings DDLQueryStatusSource::getChildrenAllowNoNode(const std::shared_ptr & zookeeper, const String & node_path) diff --git a/src/Interpreters/executeDDLQueryOnCluster.h b/src/Interpreters/executeDDLQueryOnCluster.h index 35beab803e5..3004fe2ff2e 100644 --- a/src/Interpreters/executeDDLQueryOnCluster.h +++ b/src/Interpreters/executeDDLQueryOnCluster.h @@ -2,7 +2,7 @@ #include #include -#include +#include #include #include diff --git a/src/Processors/Executors/CompletedPipelineExecutor.cpp b/src/Processors/Executors/CompletedPipelineExecutor.cpp index 8ec1916f4ce..90b1f9af586 100644 --- a/src/Processors/Executors/CompletedPipelineExecutor.cpp +++ b/src/Processors/Executors/CompletedPipelineExecutor.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include @@ -65,12 +66,11 @@ void CompletedPipelineExecutor::setCancelCallback(std::function is_cance void CompletedPipelineExecutor::execute() { - PipelineExecutor executor(pipeline.processors, pipeline.process_list_element); - if (interactive_timeout_ms) { data = std::make_unique(); data->executor = std::make_shared(pipeline.processors, pipeline.process_list_element); + data->executor->setReadProgressCallback(pipeline.getReadProgressCallback()); auto func = [&, thread_group = CurrentThread::getGroup()]() { @@ -92,7 +92,11 @@ void CompletedPipelineExecutor::execute() std::rethrow_exception(data->exception); } else + { + PipelineExecutor executor(pipeline.processors, pipeline.process_list_element); + executor.setReadProgressCallback(pipeline.getReadProgressCallback()); executor.execute(pipeline.getNumThreads()); + } } CompletedPipelineExecutor::~CompletedPipelineExecutor() diff --git a/src/Processors/Executors/ExecutionThreadContext.cpp b/src/Processors/Executors/ExecutionThreadContext.cpp index 383ac9d6832..a8056fedff2 100644 --- a/src/Processors/Executors/ExecutionThreadContext.cpp +++ b/src/Processors/Executors/ExecutionThreadContext.cpp @@ -1,4 +1,5 @@ #include +#include #include namespace DB @@ -38,16 +39,31 @@ static bool checkCanAddAdditionalInfoToException(const DB::Exception & exception && exception.code() != ErrorCodes::QUERY_WAS_CANCELLED; } -static void executeJob(IProcessor * processor) +static void executeJob(ExecutingGraph::Node * node, ReadProgressCallback * read_progress_callback) { try { - processor->work(); + node->processor->work(); + + /// Update read progress only for source nodes. + bool is_source = node->back_edges.empty(); + + if (is_source && read_progress_callback) + { + if (auto read_progress = node->processor->getReadProgress()) + { + if (read_progress->counters.total_rows_approx) + read_progress_callback->addTotalRowsApprox(read_progress->counters.total_rows_approx); + + if (!read_progress_callback->onProgress(read_progress->counters.read_rows, read_progress->counters.read_bytes, read_progress->limits)) + node->processor->cancel(); + } + } } catch (Exception & exception) { if (checkCanAddAdditionalInfoToException(exception)) - exception.addMessage("While executing " + processor->getName()); + exception.addMessage("While executing " + node->processor->getName()); throw; } } @@ -65,8 +81,7 @@ bool ExecutionThreadContext::executeTask() try { - executeJob(node->processor); - + executeJob(node, read_progress_callback); ++node->num_executed_jobs; } catch (...) diff --git a/src/Processors/Executors/ExecutionThreadContext.h b/src/Processors/Executors/ExecutionThreadContext.h index 5bab36f00ea..f0341333117 100644 --- a/src/Processors/Executors/ExecutionThreadContext.h +++ b/src/Processors/Executors/ExecutionThreadContext.h @@ -6,6 +6,8 @@ namespace DB { +class ReadProgressCallback; + /// Context for each executing thread of PipelineExecutor. class ExecutionThreadContext { @@ -25,6 +27,9 @@ private: /// Exception from executing thread itself. std::exception_ptr exception; + /// Callback for read progress. + ReadProgressCallback * read_progress_callback = nullptr; + public: #ifndef NDEBUG /// Time for different processing stages. @@ -53,11 +58,12 @@ public: std::unique_lock lockStatus() const { return std::unique_lock(node->status_mutex); } - void setException(std::exception_ptr exception_) { exception = std::move(exception_); } + void setException(std::exception_ptr exception_) { exception = exception_; } void rethrowExceptionIfHas(); - explicit ExecutionThreadContext(size_t thread_number_, bool profile_processors_) - : thread_number(thread_number_) + explicit ExecutionThreadContext(size_t thread_number_, bool profile_processors_, ReadProgressCallback * callback) + : read_progress_callback(callback) + , thread_number(thread_number_) , profile_processors(profile_processors_) {} }; diff --git a/src/Processors/Executors/ExecutorTasks.cpp b/src/Processors/Executors/ExecutorTasks.cpp index e62950ffca1..f2287e467dc 100644 --- a/src/Processors/Executors/ExecutorTasks.cpp +++ b/src/Processors/Executors/ExecutorTasks.cpp @@ -128,7 +128,7 @@ void ExecutorTasks::pushTasks(Queue & queue, Queue & async_queue, ExecutionThrea } } -void ExecutorTasks::init(size_t num_threads_, bool profile_processors) +void ExecutorTasks::init(size_t num_threads_, bool profile_processors, ReadProgressCallback * callback) { num_threads = num_threads_; threads_queue.init(num_threads); @@ -139,7 +139,7 @@ void ExecutorTasks::init(size_t num_threads_, bool profile_processors) executor_contexts.reserve(num_threads); for (size_t i = 0; i < num_threads; ++i) - executor_contexts.emplace_back(std::make_unique(i, profile_processors)); + executor_contexts.emplace_back(std::make_unique(i, profile_processors, callback)); } } diff --git a/src/Processors/Executors/ExecutorTasks.h b/src/Processors/Executors/ExecutorTasks.h index 7fd865c8aa9..caff1a35d98 100644 --- a/src/Processors/Executors/ExecutorTasks.h +++ b/src/Processors/Executors/ExecutorTasks.h @@ -54,7 +54,7 @@ public: void tryGetTask(ExecutionThreadContext & context); void pushTasks(Queue & queue, Queue & async_queue, ExecutionThreadContext & context); - void init(size_t num_threads_, bool profile_processors); + void init(size_t num_threads_, bool profile_processors, ReadProgressCallback * callback); void fill(Queue & queue); void processAsyncTasks(); diff --git a/src/Processors/Executors/IReadProgressCallback.h b/src/Processors/Executors/IReadProgressCallback.h new file mode 100644 index 00000000000..75a75eeb61d --- /dev/null +++ b/src/Processors/Executors/IReadProgressCallback.h @@ -0,0 +1,18 @@ +#pragma once +#include + +namespace DB +{ + +/// An interface for read progress callback. +class IReadProgressCallback +{ +public: + virtual ~IReadProgressCallback() = default; + virtual bool onProgress(uint64_t read_rows, uint64_t read_bytes) = 0; +}; + +using ReadProgressCallbackPtr = std::unique_ptr; + + +} diff --git a/src/Processors/Executors/PipelineExecutor.cpp b/src/Processors/Executors/PipelineExecutor.cpp index 039c2148232..9146a7ec262 100644 --- a/src/Processors/Executors/PipelineExecutor.cpp +++ b/src/Processors/Executors/PipelineExecutor.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -155,6 +156,11 @@ bool PipelineExecutor::checkTimeLimit() return continuing; } +void PipelineExecutor::setReadProgressCallback(ReadProgressCallbackPtr callback) +{ + read_progress_callback = std::move(callback); +} + void PipelineExecutor::finalizeExecution() { checkTimeLimit(); @@ -263,7 +269,7 @@ void PipelineExecutor::initializeExecution(size_t num_threads) Queue queue; graph->initializeExecution(queue); - tasks.init(num_threads, profile_processors); + tasks.init(num_threads, profile_processors, read_progress_callback.get()); tasks.fill(queue); } diff --git a/src/Processors/Executors/PipelineExecutor.h b/src/Processors/Executors/PipelineExecutor.h index 0a9f8bdbeee..80ba21a8adf 100644 --- a/src/Processors/Executors/PipelineExecutor.h +++ b/src/Processors/Executors/PipelineExecutor.h @@ -16,6 +16,9 @@ class QueryStatus; class ExecutingGraph; using ExecutingGraphPtr = std::unique_ptr; +class ReadProgressCallback; +using ReadProgressCallbackPtr = std::unique_ptr; + /// Executes query pipeline. class PipelineExecutor { @@ -48,6 +51,10 @@ public: /// Same as checkTimeLimit but it never throws. It returns false on cancellation or time limit reached [[nodiscard]] bool checkTimeLimitSoft(); + /// Set callback for read progress. + /// It would be called every time when processor reports read progress. + void setReadProgressCallback(ReadProgressCallbackPtr callback); + private: ExecutingGraphPtr graph; @@ -66,6 +73,8 @@ private: /// Now it's used to check if query was killed. QueryStatus * const process_list_element = nullptr; + ReadProgressCallbackPtr read_progress_callback; + using Queue = std::queue; void initializeExecution(size_t num_threads); /// Initialize executor contexts and task_queue. diff --git a/src/Processors/Executors/PullingAsyncPipelineExecutor.cpp b/src/Processors/Executors/PullingAsyncPipelineExecutor.cpp index 0f091e73743..f67e211c80e 100644 --- a/src/Processors/Executors/PullingAsyncPipelineExecutor.cpp +++ b/src/Processors/Executors/PullingAsyncPipelineExecutor.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include namespace DB @@ -98,6 +99,7 @@ bool PullingAsyncPipelineExecutor::pull(Chunk & chunk, uint64_t milliseconds) { data = std::make_unique(); data->executor = std::make_shared(pipeline.processors, pipeline.process_list_element); + data->executor->setReadProgressCallback(pipeline.getReadProgressCallback()); data->lazy_format = lazy_format.get(); auto func = [&, thread_group = CurrentThread::getGroup()]() diff --git a/src/Processors/Executors/PullingPipelineExecutor.cpp b/src/Processors/Executors/PullingPipelineExecutor.cpp index ae522c1073d..33498c69c07 100644 --- a/src/Processors/Executors/PullingPipelineExecutor.cpp +++ b/src/Processors/Executors/PullingPipelineExecutor.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include @@ -42,7 +43,10 @@ const Block & PullingPipelineExecutor::getHeader() const bool PullingPipelineExecutor::pull(Chunk & chunk) { if (!executor) + { executor = std::make_shared(pipeline.processors, pipeline.process_list_element); + executor->setReadProgressCallback(pipeline.getReadProgressCallback()); + } if (!executor->checkTimeLimitSoft()) return false; diff --git a/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp b/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp index 07cdb554aba..b6f08db61af 100644 --- a/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp +++ b/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include @@ -158,6 +159,7 @@ void PushingAsyncPipelineExecutor::start() data = std::make_unique(); data->executor = std::make_shared(pipeline.processors, pipeline.process_list_element); + data->executor->setReadProgressCallback(pipeline.getReadProgressCallback()); data->source = pushing_source.get(); auto func = [&, thread_group = CurrentThread::getGroup()]() diff --git a/src/Processors/Executors/PushingPipelineExecutor.cpp b/src/Processors/Executors/PushingPipelineExecutor.cpp index 1f8926154a9..bf43cd327fe 100644 --- a/src/Processors/Executors/PushingPipelineExecutor.cpp +++ b/src/Processors/Executors/PushingPipelineExecutor.cpp @@ -2,6 +2,7 @@ #include #include #include +#include namespace DB @@ -85,6 +86,7 @@ void PushingPipelineExecutor::start() started = true; executor = std::make_shared(pipeline.processors, pipeline.process_list_element); + executor->setReadProgressCallback(pipeline.getReadProgressCallback()); if (!executor->executeStep(&input_wait_flag)) throw Exception(ErrorCodes::LOGICAL_ERROR, diff --git a/src/Processors/IProcessor.h b/src/Processors/IProcessor.h index e0b25737933..9d885002cfe 100644 --- a/src/Processors/IProcessor.h +++ b/src/Processors/IProcessor.h @@ -18,6 +18,9 @@ namespace ErrorCodes class IQueryPlanStep; +struct StorageLimits; +using StorageLimitsList = std::list; + class IProcessor; using ProcessorPtr = std::shared_ptr; using Processors = std::vector; @@ -304,6 +307,29 @@ public: uint64_t getInputWaitElapsedUs() const { return input_wait_elapsed_us; } uint64_t getOutputWaitElapsedUs() const { return output_wait_elapsed_us; } + struct ReadProgressCounters + { + uint64_t read_rows = 0; + uint64_t read_bytes = 0; + uint64_t total_rows_approx = 0; + }; + + struct ReadProgress + { + ReadProgressCounters counters; + const StorageLimitsList & limits; + }; + + /// Set limits for current storage. + /// Different limits may be applied to different storages, we need to keep it per processor. + /// This method is need to be override only for sources. + virtual void setStorageLimits(const std::shared_ptr & /*storage_limits*/) {} + + /// This method is called for every processor without input ports. + /// Processor can return a new progress for the last read operation. + /// You should zero internal counters in the call, in order to make in idempotent. + virtual std::optional getReadProgress() { return std::nullopt; } + protected: virtual void onCancel() {} diff --git a/src/Processors/ISource.cpp b/src/Processors/ISource.cpp index 7ae988f7cdb..87592bb9ef9 100644 --- a/src/Processors/ISource.cpp +++ b/src/Processors/ISource.cpp @@ -1,4 +1,5 @@ #include +#include namespace DB @@ -9,14 +10,18 @@ namespace ErrorCodes extern const int NOT_IMPLEMENTED; } -ISource::ISource(Block header) - : IProcessor({}, {std::move(header)}), output(outputs.front()) +ISource::~ISource() = default; + +ISource::ISource(Block header, bool enable_auto_progress) + : IProcessor({}, {std::move(header)}) + , auto_progress(enable_auto_progress) + , output(outputs.front()) { } ISource::Status ISource::prepare() { - if (finished || isCancelled()) + if (finished) { output.finish(); return Status::Finished; @@ -35,6 +40,12 @@ ISource::Status ISource::prepare() output.pushData(std::move(current_chunk)); has_input = false; + if (isCancelled()) + { + output.finish(); + return Status::Finished; + } + if (got_exception) { finished = true; @@ -46,15 +57,49 @@ ISource::Status ISource::prepare() return Status::PortFull; } +void ISource::setStorageLimits(const std::shared_ptr & storage_limits_) +{ + storage_limits = storage_limits_; +} + +void ISource::progress(size_t read_rows, size_t read_bytes) +{ + //std::cerr << "========= Progress " << read_rows << " from " << getName() << std::endl << StackTrace().toString() << std::endl; + read_progress_was_set = true; + read_progress.read_rows += read_rows; + read_progress.read_bytes += read_bytes; +} + +std::optional ISource::getReadProgress() +{ + if (finished && read_progress.read_bytes == 0 && read_progress.read_bytes == 0 && read_progress.total_rows_approx == 0) + return {}; + + ReadProgressCounters res_progress; + std::swap(read_progress, res_progress); + + if (storage_limits) + return ReadProgress{res_progress, *storage_limits}; + + static StorageLimitsList empty_limits; + return ReadProgress{res_progress, empty_limits}; +} + void ISource::work() { try { + read_progress_was_set = false; + if (auto chunk = tryGenerate()) { current_chunk.chunk = std::move(*chunk); if (current_chunk.chunk) + { has_input = true; + if (auto_progress && !read_progress_was_set) + progress(current_chunk.chunk.getNumRows(), current_chunk.chunk.bytes()); + } } else finished = true; diff --git a/src/Processors/ISource.h b/src/Processors/ISource.h index db91c0c5bce..292f79ba348 100644 --- a/src/Processors/ISource.h +++ b/src/Processors/ISource.h @@ -8,6 +8,11 @@ namespace DB class ISource : public IProcessor { +private: + ReadProgressCounters read_progress; + bool read_progress_was_set = false; + bool auto_progress; + protected: OutputPort & output; bool has_input = false; @@ -15,17 +20,29 @@ protected: bool got_exception = false; Port::Data current_chunk; + std::shared_ptr storage_limits; + virtual Chunk generate(); virtual std::optional tryGenerate(); + virtual void progress(size_t read_rows, size_t read_bytes); + public: - explicit ISource(Block header); + explicit ISource(Block header, bool enable_auto_progress = true); + ~ISource() override; Status prepare() override; void work() override; OutputPort & getPort() { return output; } const OutputPort & getPort() const { return output; } + + void setStorageLimits(const std::shared_ptr & storage_limits_) override; + + /// Default implementation for all the sources. + std::optional getReadProgress() final; + + void addTotalRowsApprox(size_t value) { read_progress.total_rows_approx += value; } }; using SourcePtr = std::shared_ptr; diff --git a/src/Processors/Merges/Algorithms/tests/gtest_graphite.cpp b/src/Processors/Merges/Algorithms/tests/gtest_graphite.cpp index 9160ad6e0fa..ca895c5ed2a 100644 --- a/src/Processors/Merges/Algorithms/tests/gtest_graphite.cpp +++ b/src/Processors/Merges/Algorithms/tests/gtest_graphite.cpp @@ -35,6 +35,9 @@ static ConfigProcessor::LoadedConfig loadConfiguration(const std::string & confi static ConfigProcessor::LoadedConfig loadConfigurationFromString(std::string & s) { + /// NOTE: This code is a trash, because it's written in C. + /// We let it remain, because it's just some orphan old test. + char tmp_file[19]; strcpy(tmp_file, "/tmp/rollup-XXXXXX"); int fd = mkstemp(tmp_file); @@ -42,7 +45,8 @@ static ConfigProcessor::LoadedConfig loadConfigurationFromString(std::string & s { throw std::runtime_error(strerror(errno)); } - try { + try + { if (write(fd, s.c_str(), s.size()) < s.size()) { throw std::runtime_error("unable write to temp file"); @@ -56,16 +60,16 @@ static ConfigProcessor::LoadedConfig loadConfigurationFromString(std::string & s if (std::rename(tmp_file, config_path.c_str())) { int err = errno; - remove(tmp_file); + (void)remove(tmp_file); throw std::runtime_error(strerror(err)); } ConfigProcessor::LoadedConfig config = loadConfiguration(config_path); - remove(tmp_file); + (void)remove(tmp_file); return config; } catch (...) { - remove(tmp_file); + (void)remove(tmp_file); throw; } } diff --git a/src/Processors/QueryPlan/QueryPlan.cpp b/src/Processors/QueryPlan/QueryPlan.cpp index fd2c5e37a9f..08e36c5f32b 100644 --- a/src/Processors/QueryPlan/QueryPlan.cpp +++ b/src/Processors/QueryPlan/QueryPlan.cpp @@ -97,8 +97,7 @@ void QueryPlan::unitePlans(QueryPlanStepPtr step, std::vectormax_threads); - interpreter_context.insert(interpreter_context.end(), - plan->interpreter_context.begin(), plan->interpreter_context.end()); + resources = std::move(plan->resources); } } @@ -196,34 +195,13 @@ QueryPipelineBuilderPtr QueryPlan::buildQueryPipeline( stack.push(Frame{.node = frame.node->children[next_child]}); } - for (auto & context : interpreter_context) - last_pipeline->addInterpreterContext(std::move(context)); - - last_pipeline->setProgressCallback(build_pipeline_settings.progress_callback); + /// last_pipeline->setProgressCallback(build_pipeline_settings.progress_callback); last_pipeline->setProcessListElement(build_pipeline_settings.process_list_element); + last_pipeline->addResources(std::move(resources)); return last_pipeline; } -Pipe QueryPlan::convertToPipe( - const QueryPlanOptimizationSettings & optimization_settings, - const BuildQueryPipelineSettings & build_pipeline_settings) -{ - if (!isInitialized()) - return {}; - - if (isCompleted()) - throw Exception("Cannot convert completed QueryPlan to Pipe", ErrorCodes::LOGICAL_ERROR); - - return QueryPipelineBuilder::getPipe(std::move(*buildQueryPipeline(optimization_settings, build_pipeline_settings))); -} - -void QueryPlan::addInterpreterContext(ContextPtr context) -{ - interpreter_context.emplace_back(std::move(context)); -} - - static void explainStep(const IQueryPlanStep & step, JSONBuilder::JSONMap & map, const QueryPlan::ExplainPlanOptions & options) { map.add("Node Type", step.getName()); diff --git a/src/Processors/QueryPlan/QueryPlan.h b/src/Processors/QueryPlan/QueryPlan.h index 5e064713abd..ce12ce7beda 100644 --- a/src/Processors/QueryPlan/QueryPlan.h +++ b/src/Processors/QueryPlan/QueryPlan.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include @@ -60,11 +61,6 @@ public: const QueryPlanOptimizationSettings & optimization_settings, const BuildQueryPipelineSettings & build_pipeline_settings); - /// If initialized, build pipeline and convert to pipe. Otherwise, return empty pipe. - Pipe convertToPipe( - const QueryPlanOptimizationSettings & optimization_settings, - const BuildQueryPipelineSettings & build_pipeline_settings); - struct ExplainPlanOptions { /// Add output header to step. @@ -88,13 +84,18 @@ public: void explainPipeline(WriteBuffer & buffer, const ExplainPipelineOptions & options); void explainEstimate(MutableColumns & columns); + /// Do not allow to change the table while the pipeline alive. + void addTableLock(TableLockHolder lock) { resources.table_locks.emplace_back(std::move(lock)); } + void addInterpreterContext(std::shared_ptr context) { resources.interpreter_context.emplace_back(std::move(context)); } + void addStorageHolder(StoragePtr storage) { resources.storage_holders.emplace_back(std::move(storage)); } + + void addResources(QueryPlanResourceHolder resources_) { resources = std::move(resources_); } + /// Set upper limit for the recommend number of threads. Will be applied to the newly-created pipelines. /// TODO: make it in a better way. void setMaxThreads(size_t max_threads_) { max_threads = max_threads_; } size_t getMaxThreads() const { return max_threads; } - void addInterpreterContext(ContextPtr context); - /// Tree node. Step and it's children. struct Node { @@ -105,6 +106,7 @@ public: using Nodes = std::list; private: + QueryPlanResourceHolder resources; Nodes nodes; Node * root = nullptr; @@ -113,7 +115,6 @@ private: /// Those fields are passed to QueryPipeline. size_t max_threads = 0; - std::vector interpreter_context; }; std::string debugExplainStep(const IQueryPlanStep & step); diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index feb4a7d50f3..8adaf2f1027 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -1080,6 +1080,9 @@ void ReadFromMergeTree::initializePipeline(QueryPipelineBuilder & pipeline, cons column_names_to_read); } + for (const auto & processor : pipe.getProcessors()) + processor->setStorageLimits(query_info.storage_limits); + if (pipe.empty()) { pipeline.init(Pipe(std::make_shared(getOutputStream().header))); @@ -1148,11 +1151,11 @@ void ReadFromMergeTree::initializePipeline(QueryPipelineBuilder & pipeline, cons for (const auto & processor : pipe.getProcessors()) processors.emplace_back(processor); - // Attach QueryIdHolder if needed - if (query_id_holder) - pipe.addQueryIdHolder(std::move(query_id_holder)); pipeline.init(std::move(pipe)); + // Attach QueryIdHolder if needed + if (query_id_holder) + pipeline.setQueryIdHolder(std::move(query_id_holder)); } static const char * indexTypeToString(ReadFromMergeTree::IndexType type) diff --git a/src/Processors/QueryPlan/ReadFromPreparedSource.cpp b/src/Processors/QueryPlan/ReadFromPreparedSource.cpp index fc8136177cf..7446203ec35 100644 --- a/src/Processors/QueryPlan/ReadFromPreparedSource.cpp +++ b/src/Processors/QueryPlan/ReadFromPreparedSource.cpp @@ -4,10 +4,9 @@ namespace DB { -ReadFromPreparedSource::ReadFromPreparedSource(Pipe pipe_, std::shared_ptr context_) +ReadFromPreparedSource::ReadFromPreparedSource(Pipe pipe_) : ISourceStep(DataStream{.header = pipe_.getHeader()}) , pipe(std::move(pipe_)) - , context(std::move(context_)) { } @@ -17,9 +16,6 @@ void ReadFromPreparedSource::initializePipeline(QueryPipelineBuilder & pipeline, processors.emplace_back(processor); pipeline.init(std::move(pipe)); - - if (context) - pipeline.addInterpreterContext(std::move(context)); } } diff --git a/src/Processors/QueryPlan/ReadFromPreparedSource.h b/src/Processors/QueryPlan/ReadFromPreparedSource.h index bb6e814ad9f..05e3ebd5102 100644 --- a/src/Processors/QueryPlan/ReadFromPreparedSource.h +++ b/src/Processors/QueryPlan/ReadFromPreparedSource.h @@ -9,13 +9,13 @@ namespace DB class ReadFromPreparedSource : public ISourceStep { public: - explicit ReadFromPreparedSource(Pipe pipe_, ContextPtr context_ = nullptr); + explicit ReadFromPreparedSource(Pipe pipe_); String getName() const override { return "ReadFromPreparedSource"; } void initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override; -private: +protected: Pipe pipe; ContextPtr context; }; @@ -23,13 +23,19 @@ private: class ReadFromStorageStep : public ReadFromPreparedSource { public: - ReadFromStorageStep(Pipe pipe_, String storage_name) - : ReadFromPreparedSource(std::move(pipe_)) + ReadFromStorageStep(Pipe pipe_, String storage_name, std::shared_ptr storage_limits_) + : ReadFromPreparedSource(std::move(pipe_)), storage_limits(std::move(storage_limits_)) { setStepDescription(storage_name); + + for (const auto & processor : pipe.getProcessors()) + processor->setStorageLimits(storage_limits); } String getName() const override { return "ReadFromStorage"; } + +private: + std::shared_ptr storage_limits; }; } diff --git a/src/Processors/QueryPlan/ReadFromRemote.cpp b/src/Processors/QueryPlan/ReadFromRemote.cpp index 867daaff30c..6e2d776e1e4 100644 --- a/src/Processors/QueryPlan/ReadFromRemote.cpp +++ b/src/Processors/QueryPlan/ReadFromRemote.cpp @@ -15,6 +15,7 @@ #include #include #include +#include #include @@ -74,7 +75,8 @@ ReadFromRemote::ReadFromRemote( Scalars scalars_, Tables external_tables_, Poco::Logger * log_, - UInt32 shard_count_) + UInt32 shard_count_, + std::shared_ptr storage_limits_) : ISourceStep(DataStream{.header = std::move(header_)}) , shards(std::move(shards_)) , stage(stage_) @@ -84,6 +86,7 @@ ReadFromRemote::ReadFromRemote( , throttler(std::move(throttler_)) , scalars(std::move(scalars_)) , external_tables(std::move(external_tables_)) + , storage_limits(std::move(storage_limits_)) , log(log_) , shard_count(shard_count_) { @@ -108,7 +111,7 @@ void ReadFromRemote::addLazyPipe(Pipes & pipes, const ClusterProxy::SelectStream scalars = scalars, external_tables = external_tables, stage = stage, local_delay = shard.local_delay, add_agg_info, add_totals, add_extremes, async_read]() mutable - -> Pipe + -> QueryPipelineBuilder { auto current_settings = context->getSettingsRef(); auto timeouts = ConnectionTimeouts::getTCPTimeoutsWithFailover( @@ -142,9 +145,9 @@ void ReadFromRemote::addLazyPipe(Pipes & pipes, const ClusterProxy::SelectStream { auto plan = createLocalPlan(query, header, context, stage, shard.shard_info.shard_num, shard_count, 0, 0, /*coordinator=*/nullptr); - return QueryPipelineBuilder::getPipe(std::move(*plan->buildQueryPipeline( + return std::move(*plan->buildQueryPipeline( QueryPlanOptimizationSettings::fromContext(context), - BuildQueryPipelineSettings::fromContext(context)))); + BuildQueryPipelineSettings::fromContext(context))); } else { @@ -160,12 +163,14 @@ void ReadFromRemote::addLazyPipe(Pipes & pipes, const ClusterProxy::SelectStream auto remote_query_executor = std::make_shared( shard.shard_info.pool, std::move(connections), query_string, header, context, throttler, scalars, external_tables, stage); - return createRemoteSourcePipe(remote_query_executor, add_agg_info, add_totals, add_extremes, async_read); + auto pipe = createRemoteSourcePipe(remote_query_executor, add_agg_info, add_totals, add_extremes, async_read); + QueryPipelineBuilder builder; + builder.init(std::move(pipe)); + return builder; } }; pipes.emplace_back(createDelayedPipe(shard.header, lazily_create_stream, add_totals, add_extremes)); - pipes.back().addInterpreterContext(context); addConvertingActions(pipes.back(), output_stream->header); } @@ -198,7 +203,6 @@ void ReadFromRemote::addPipe(Pipes & pipes, const ClusterProxy::SelectStreamFact remote_query_executor->setMainTable(main_table); pipes.emplace_back(createRemoteSourcePipe(remote_query_executor, add_agg_info, add_totals, add_extremes, async_read)); - pipes.back().addInterpreterContext(context); addConvertingActions(pipes.back(), output_stream->header); } @@ -215,6 +219,10 @@ void ReadFromRemote::initializePipeline(QueryPipelineBuilder & pipeline, const B } auto pipe = Pipe::unitePipes(std::move(pipes)); + + for (const auto & processor : pipe.getProcessors()) + processor->setStorageLimits(storage_limits); + pipeline.init(std::move(pipe)); } @@ -231,7 +239,8 @@ ReadFromParallelRemoteReplicasStep::ReadFromParallelRemoteReplicasStep( Scalars scalars_, Tables external_tables_, Poco::Logger * log_, - UInt32 shard_count_) + UInt32 shard_count_, + std::shared_ptr storage_limits_) : ISourceStep(DataStream{.header = std::move(header_)}) , coordinator(std::move(coordinator_)) , shard(std::move(shard_)) @@ -242,6 +251,7 @@ ReadFromParallelRemoteReplicasStep::ReadFromParallelRemoteReplicasStep( , throttler(throttler_) , scalars(scalars_) , external_tables{external_tables_} + , storage_limits(std::move(storage_limits_)) , log(log_) , shard_count(shard_count_) { @@ -283,6 +293,10 @@ void ReadFromParallelRemoteReplicasStep::initializePipeline(QueryPipelineBuilder } auto pipe = Pipe::unitePipes(std::move(pipes)); + + for (const auto & processor : pipe.getProcessors()) + processor->setStorageLimits(storage_limits); + pipeline.init(std::move(pipe)); } @@ -317,7 +331,6 @@ void ReadFromParallelRemoteReplicasStep::addPipeForSingeReplica(Pipes & pipes, s remote_query_executor->setMainTable(main_table); pipes.emplace_back(createRemoteSourcePipe(remote_query_executor, add_agg_info, add_totals, add_extremes, async_read)); - pipes.back().addInterpreterContext(context); addConvertingActions(pipes.back(), output_stream->header); } diff --git a/src/Processors/QueryPlan/ReadFromRemote.h b/src/Processors/QueryPlan/ReadFromRemote.h index e8f4ee5fd37..0a21f240f5a 100644 --- a/src/Processors/QueryPlan/ReadFromRemote.h +++ b/src/Processors/QueryPlan/ReadFromRemote.h @@ -32,7 +32,8 @@ public: Scalars scalars_, Tables external_tables_, Poco::Logger * log_, - UInt32 shard_count_); + UInt32 shard_count_, + std::shared_ptr storage_limits_); String getName() const override { return "ReadFromRemote"; } @@ -57,6 +58,8 @@ private: Scalars scalars; Tables external_tables; + std::shared_ptr storage_limits; + Poco::Logger * log; UInt32 shard_count; @@ -80,7 +83,8 @@ public: Scalars scalars_, Tables external_tables_, Poco::Logger * log_, - UInt32 shard_count_); + UInt32 shard_count_, + std::shared_ptr storage_limits_); String getName() const override { return "ReadFromRemoteParallelReplicas"; } @@ -103,6 +107,8 @@ private: Scalars scalars; Tables external_tables; + std::shared_ptr storage_limits; + Poco::Logger * log; UInt32 shard_count{0}; diff --git a/src/Processors/QueryPlan/SettingQuotaAndLimitsStep.cpp b/src/Processors/QueryPlan/SettingQuotaAndLimitsStep.cpp deleted file mode 100644 index 4ca3d0ebf54..00000000000 --- a/src/Processors/QueryPlan/SettingQuotaAndLimitsStep.cpp +++ /dev/null @@ -1,71 +0,0 @@ -#include -#include -#include - -namespace DB -{ - -static ITransformingStep::Traits getTraits() -{ - return ITransformingStep::Traits - { - { - .preserves_distinct_columns = true, - .returns_single_stream = false, - .preserves_number_of_streams = true, - .preserves_sorting = true, - }, - { - .preserves_number_of_rows = true, - } - }; -} - -SettingQuotaAndLimitsStep::SettingQuotaAndLimitsStep( - const DataStream & input_stream_, - StoragePtr storage_, - TableLockHolder table_lock_, - StreamLocalLimits & limits_, - SizeLimits & leaf_limits_, - std::shared_ptr quota_, - ContextPtr context_) - : ITransformingStep(input_stream_, input_stream_.header, getTraits()) - , context(std::move(context_)) - , storage(std::move(storage_)) - , table_lock(std::move(table_lock_)) - , limits(limits_) - , leaf_limits(leaf_limits_) - , quota(std::move(quota_)) -{ -} - -void SettingQuotaAndLimitsStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) -{ - /// Table lock is stored inside pipeline here. - pipeline.setLimits(limits); - - /** - * Leaf size limits should be applied only for local processing of distributed queries. - * Such limits allow to control the read stage on leaf nodes and exclude the merging stage. - * Consider the case when distributed query needs to read from multiple shards. Then leaf - * limits will be applied on the shards only (including the root node) but will be ignored - * on the results merging stage. - */ - if (!storage->isRemote()) - pipeline.setLeafLimits(leaf_limits); - - if (quota) - pipeline.setQuota(quota); - - /// Order of resources below is important. - if (context) - pipeline.addInterpreterContext(std::move(context)); - - if (storage) - pipeline.addStorageHolder(std::move(storage)); - - if (table_lock) - pipeline.addTableLock(std::move(table_lock)); -} - -} diff --git a/src/Processors/QueryPlan/SettingQuotaAndLimitsStep.h b/src/Processors/QueryPlan/SettingQuotaAndLimitsStep.h deleted file mode 100644 index a8d1eef4b08..00000000000 --- a/src/Processors/QueryPlan/SettingQuotaAndLimitsStep.h +++ /dev/null @@ -1,46 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -namespace DB -{ - -class IStorage; -using StoragePtr = std::shared_ptr; - -struct StorageInMemoryMetadata; -using StorageMetadataPtr = std::shared_ptr; - -class EnabledQuota; - -/// Add limits, quota, table_lock and other stuff to pipeline. -/// Doesn't change DataStream. -class SettingQuotaAndLimitsStep : public ITransformingStep -{ -public: - SettingQuotaAndLimitsStep( - const DataStream & input_stream_, - StoragePtr storage_, - TableLockHolder table_lock_, - StreamLocalLimits & limits_, - SizeLimits & leaf_limits_, - std::shared_ptr quota_, - ContextPtr context_); - - String getName() const override { return "SettingQuotaAndLimits"; } - - void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override; - -private: - ContextPtr context; - StoragePtr storage; - TableLockHolder table_lock; - StreamLocalLimits limits; - SizeLimits leaf_limits; - std::shared_ptr quota; -}; - -} diff --git a/src/Processors/Sources/BlocksListSource.h b/src/Processors/Sources/BlocksListSource.h index 97e568a7dc0..5cdf08251ed 100644 --- a/src/Processors/Sources/BlocksListSource.h +++ b/src/Processors/Sources/BlocksListSource.h @@ -1,6 +1,6 @@ #pragma once -#include +#include namespace DB @@ -9,17 +9,17 @@ namespace DB /** A stream of blocks from which you can read the next block from an explicitly provided list. * Also see SourceFromSingleChunk. */ -class BlocksListSource : public SourceWithProgress +class BlocksListSource : public ISource { public: /// Acquires the ownership of the block list. explicit BlocksListSource(BlocksList && list_) - : SourceWithProgress(list_.empty() ? Block() : list_.front().cloneEmpty()) + : ISource(list_.empty() ? Block() : list_.front().cloneEmpty()) , list(std::move(list_)), it(list.begin()), end(list.end()) {} /// Uses a list of blocks lying somewhere else. BlocksListSource(BlocksList::iterator & begin_, BlocksList::iterator & end_) - : SourceWithProgress(begin_ == end_ ? Block() : begin_->cloneEmpty()) + : ISource(begin_ == end_ ? Block() : begin_->cloneEmpty()) , it(begin_), end(end_) {} String getName() const override { return "BlocksListSource"; } diff --git a/src/Processors/Sources/BlocksSource.h b/src/Processors/Sources/BlocksSource.h index 0030e7e7c35..ec0dc9609f1 100644 --- a/src/Processors/Sources/BlocksSource.h +++ b/src/Processors/Sources/BlocksSource.h @@ -11,7 +11,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#include #include @@ -20,12 +20,12 @@ namespace DB /** A stream of blocks from a shared vector of blocks */ -class BlocksSource : public SourceWithProgress +class BlocksSource : public ISource { public: /// Acquires shared ownership of the blocks vector BlocksSource(BlocksPtr blocks_ptr_, Block header) - : SourceWithProgress(std::move(header)) + : ISource(std::move(header)) , blocks(blocks_ptr_), it(blocks_ptr_->begin()), end(blocks_ptr_->end()) {} String getName() const override { return "Blocks"; } diff --git a/src/Processors/Sources/DelayedSource.cpp b/src/Processors/Sources/DelayedSource.cpp index 6cfdeeeeec5..ee7fd757949 100644 --- a/src/Processors/Sources/DelayedSource.cpp +++ b/src/Processors/Sources/DelayedSource.cpp @@ -2,6 +2,7 @@ #include #include #include +#include namespace DB { @@ -111,7 +112,9 @@ void synchronizePorts(OutputPort *& pipe_port, OutputPort * source_port, const B void DelayedSource::work() { - auto pipe = creator(); + auto builder = creator(); + auto pipe = QueryPipelineBuilder::getPipe(std::move(builder), resources); + const auto & header = main->getHeader(); if (pipe.empty()) diff --git a/src/Processors/Sources/DelayedSource.h b/src/Processors/Sources/DelayedSource.h index 3d17c13ad4c..f069bde455f 100644 --- a/src/Processors/Sources/DelayedSource.h +++ b/src/Processors/Sources/DelayedSource.h @@ -17,7 +17,7 @@ namespace DB class DelayedSource : public IProcessor { public: - using Creator = std::function; + using Creator = std::function; DelayedSource(const Block & header, Creator processors_creator, bool add_totals_port, bool add_extremes_port); String getName() const override { return "Delayed"; } @@ -31,6 +31,7 @@ public: OutputPort * getExtremesPort() { return extremes; } private: + QueryPlanResourceHolder resources; Creator creator; Processors processors; diff --git a/src/Processors/Sources/MySQLSource.cpp b/src/Processors/Sources/MySQLSource.cpp index f536f692a5d..5abffbe1140 100644 --- a/src/Processors/Sources/MySQLSource.cpp +++ b/src/Processors/Sources/MySQLSource.cpp @@ -55,7 +55,7 @@ MySQLSource::MySQLSource( const std::string & query_str, const Block & sample_block, const StreamSettings & settings_) - : SourceWithProgress(sample_block.cloneEmpty()) + : ISource(sample_block.cloneEmpty()) , log(&Poco::Logger::get("MySQLSource")) , connection{std::make_unique(entry, query_str)} , settings{std::make_unique(settings_)} @@ -66,7 +66,7 @@ MySQLSource::MySQLSource( /// For descendant MySQLWithFailoverSource MySQLSource::MySQLSource(const Block &sample_block_, const StreamSettings & settings_) - : SourceWithProgress(sample_block_.cloneEmpty()) + : ISource(sample_block_.cloneEmpty()) , log(&Poco::Logger::get("MySQLSource")) , settings(std::make_unique(settings_)) { diff --git a/src/Processors/Sources/MySQLSource.h b/src/Processors/Sources/MySQLSource.h index 5938cb4b57f..c4d447886c0 100644 --- a/src/Processors/Sources/MySQLSource.h +++ b/src/Processors/Sources/MySQLSource.h @@ -2,7 +2,7 @@ #include #include -#include +#include #include #include #include @@ -25,7 +25,7 @@ struct StreamSettings }; /// Allows processing results of a MySQL query as a sequence of Blocks, simplifies chaining -class MySQLSource : public SourceWithProgress +class MySQLSource : public ISource { public: MySQLSource( diff --git a/src/Processors/Sources/RemoteSource.cpp b/src/Processors/Sources/RemoteSource.cpp index 99ba459cf2c..9b01e048391 100644 --- a/src/Processors/Sources/RemoteSource.cpp +++ b/src/Processors/Sources/RemoteSource.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include @@ -8,7 +9,7 @@ namespace DB { RemoteSource::RemoteSource(RemoteQueryExecutorPtr executor, bool add_aggregation_info_, bool async_read_) - : SourceWithProgress(executor->getHeader(), false) + : ISource(executor->getHeader(), false) , add_aggregation_info(add_aggregation_info_), query_executor(std::move(executor)) , async_read(async_read_) { @@ -21,6 +22,16 @@ RemoteSource::RemoteSource(RemoteQueryExecutorPtr executor, bool add_aggregation RemoteSource::~RemoteSource() = default; +void RemoteSource::setStorageLimits(const std::shared_ptr & storage_limits_) +{ + /// Remove leaf limits for remote source. + StorageLimitsList list; + for (const auto & value : *storage_limits_) + list.emplace_back(StorageLimits{value.local_limits, {}}); + + storage_limits = std::make_shared(std::move(list)); +} + ISource::Status RemoteSource::prepare() { /// Check if query was cancelled before returning Async status. Otherwise it may lead to infinite loop. @@ -33,7 +44,7 @@ ISource::Status RemoteSource::prepare() if (is_async_state) return Status::Async; - Status status = SourceWithProgress::prepare(); + Status status = ISource::prepare(); /// To avoid resetting the connection (because of "unfinished" query) in the /// RemoteQueryExecutor it should be finished explicitly. if (status == Status::Finished) @@ -53,7 +64,12 @@ std::optional RemoteSource::tryGenerate() if (!was_query_sent) { /// Progress method will be called on Progress packet. - query_executor->setProgressCallback([this](const Progress & value) { progress(value); }); + query_executor->setProgressCallback([this](const Progress & value) + { + if (value.total_rows_to_read) + addTotalRowsApprox(value.total_rows_to_read); + progress(value.read_rows, value.read_bytes); + }); /// Get rows_before_limit result for remote query from ProfileInfo packet. query_executor->setProfileInfoCallback([this](const ProfileInfo & info) diff --git a/src/Processors/Sources/RemoteSource.h b/src/Processors/Sources/RemoteSource.h index 23c3ddec401..f415b91aae0 100644 --- a/src/Processors/Sources/RemoteSource.h +++ b/src/Processors/Sources/RemoteSource.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include #include #include @@ -14,7 +14,7 @@ using RemoteQueryExecutorPtr = std::shared_ptr; class RemoteQueryExecutorReadContext; /// Source from RemoteQueryExecutor. Executes remote query and returns query result chunks. -class RemoteSource : public SourceWithProgress +class RemoteSource : public ISource { public: /// Flag add_aggregation_info tells if AggregatedChunkInfo should be added to result chunk. @@ -33,6 +33,8 @@ public: int schedule() override { return fd; } + void setStorageLimits(const std::shared_ptr & storage_limits_) override; + protected: std::optional tryGenerate() override; void onCancel() override; diff --git a/src/Processors/Sources/SQLiteSource.cpp b/src/Processors/Sources/SQLiteSource.cpp index 04a5fa7c070..814480b63e3 100644 --- a/src/Processors/Sources/SQLiteSource.cpp +++ b/src/Processors/Sources/SQLiteSource.cpp @@ -27,7 +27,7 @@ SQLiteSource::SQLiteSource( const String & query_str_, const Block & sample_block, const UInt64 max_block_size_) - : SourceWithProgress(sample_block.cloneEmpty()) + : ISource(sample_block.cloneEmpty()) , query_str(query_str_) , max_block_size(max_block_size_) , sqlite_db(std::move(sqlite_db_)) diff --git a/src/Processors/Sources/SQLiteSource.h b/src/Processors/Sources/SQLiteSource.h index 14fa1af4e2d..a55c8204a16 100644 --- a/src/Processors/Sources/SQLiteSource.h +++ b/src/Processors/Sources/SQLiteSource.h @@ -4,7 +4,7 @@ #if USE_SQLITE #include -#include +#include #include @@ -12,7 +12,7 @@ namespace DB { -class SQLiteSource : public SourceWithProgress +class SQLiteSource : public ISource { using SQLitePtr = std::shared_ptr; diff --git a/src/Processors/Sources/ShellCommandSource.cpp b/src/Processors/Sources/ShellCommandSource.cpp index f2b8e3526bc..8598b0197fc 100644 --- a/src/Processors/Sources/ShellCommandSource.cpp +++ b/src/Processors/Sources/ShellCommandSource.cpp @@ -244,7 +244,7 @@ namespace * * If process_pool is passed in constructor then after source is destroyed process is returned to pool. */ - class ShellCommandSource final : public SourceWithProgress + class ShellCommandSource final : public ISource { public: @@ -260,7 +260,7 @@ namespace const ShellCommandSourceConfiguration & configuration_ = {}, std::unique_ptr && command_holder_ = nullptr, std::shared_ptr process_pool_ = nullptr) - : SourceWithProgress(sample_block_) + : ISource(sample_block_) , context(context_) , format(format_) , sample_block(sample_block_) @@ -373,7 +373,7 @@ namespace Status prepare() override { - auto status = SourceWithProgress::prepare(); + auto status = ISource::prepare(); if (status == Status::Finished) { @@ -578,9 +578,8 @@ Pipe ShellCommandSourceCoordinator::createPipe( source_configuration, std::move(process_holder), process_pool); - auto pipe = Pipe(std::move(source)); - return pipe; + return Pipe(std::move(source)); } } diff --git a/src/Processors/Sources/ShellCommandSource.h b/src/Processors/Sources/ShellCommandSource.h index a955c22715b..a0b4aff4c1b 100644 --- a/src/Processors/Sources/ShellCommandSource.h +++ b/src/Processors/Sources/ShellCommandSource.h @@ -10,7 +10,7 @@ #include #include -#include +#include #include #include #include diff --git a/src/Processors/Sources/SourceFromSingleChunk.cpp b/src/Processors/Sources/SourceFromSingleChunk.cpp index 776ed98599f..3e2b128acd2 100644 --- a/src/Processors/Sources/SourceFromSingleChunk.cpp +++ b/src/Processors/Sources/SourceFromSingleChunk.cpp @@ -5,8 +5,8 @@ namespace DB { -SourceFromSingleChunk::SourceFromSingleChunk(Block header, Chunk chunk_) : SourceWithProgress(std::move(header)), chunk(std::move(chunk_)) {} -SourceFromSingleChunk::SourceFromSingleChunk(Block data) : SourceWithProgress(data.cloneEmpty()), chunk(data.getColumns(), data.rows()) +SourceFromSingleChunk::SourceFromSingleChunk(Block header, Chunk chunk_) : ISource(std::move(header)), chunk(std::move(chunk_)) {} +SourceFromSingleChunk::SourceFromSingleChunk(Block data) : ISource(data.cloneEmpty()), chunk(data.getColumns(), data.rows()) { const auto & sample = getPort().getHeader(); bool has_aggregate_functions = false; diff --git a/src/Processors/Sources/SourceFromSingleChunk.h b/src/Processors/Sources/SourceFromSingleChunk.h index e06387b556f..fa85b94c231 100644 --- a/src/Processors/Sources/SourceFromSingleChunk.h +++ b/src/Processors/Sources/SourceFromSingleChunk.h @@ -1,11 +1,11 @@ #pragma once -#include +#include namespace DB { -class SourceFromSingleChunk : public SourceWithProgress +class SourceFromSingleChunk : public ISource { public: explicit SourceFromSingleChunk(Block header, Chunk chunk_); diff --git a/src/Processors/Sources/SourceWithProgress.cpp b/src/Processors/Sources/SourceWithProgress.cpp deleted file mode 100644 index b568166a3ab..00000000000 --- a/src/Processors/Sources/SourceWithProgress.cpp +++ /dev/null @@ -1,171 +0,0 @@ -#include - -#include -#include - -namespace ProfileEvents -{ - extern const Event SelectedRows; - extern const Event SelectedBytes; -} - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int TOO_MANY_ROWS; - extern const int TOO_MANY_BYTES; -} - -SourceWithProgress::SourceWithProgress(Block header, bool enable_auto_progress) - : ISourceWithProgress(header), auto_progress(enable_auto_progress) -{ -} - -void SourceWithProgress::setProcessListElement(QueryStatus * elem) -{ - process_list_elem = elem; - if (!elem) - return; - - /// Update total_rows_approx as soon as possible. - /// - /// It is important to do this, since you will not get correct - /// total_rows_approx until the query will start reading all parts (in case - /// of query needs to read from multiple parts), and this is especially a - /// problem in case of max_threads=1. - /// - /// NOTE: This can be done only if progress callback already set, since - /// otherwise total_rows_approx will lost. - if (total_rows_approx != 0 && progress_callback) - { - Progress total_rows_progress = {0, 0, total_rows_approx}; - - progress_callback(total_rows_progress); - process_list_elem->updateProgressIn(total_rows_progress); - - total_rows_approx = 0; - } -} - -bool SourceWithProgress::checkTimeLimit() const -{ - return limits.speed_limits.checkTimeLimit(total_stopwatch, limits.timeout_overflow_mode); -} - -void SourceWithProgress::work() -{ - if (!checkTimeLimit()) - { - cancel(); - } - else - { - was_progress_called = false; - - ISourceWithProgress::work(); - - if (auto_progress && !was_progress_called && has_input) - progress({ current_chunk.chunk.getNumRows(), current_chunk.chunk.bytes() }); - } -} - -/// TODO: Most of this must be done in PipelineExecutor outside. -void SourceWithProgress::progress(const Progress & value) -{ - was_progress_called = true; - - if (total_rows_approx != 0) - { - Progress total_rows_progress = {0, 0, total_rows_approx}; - - if (progress_callback) - progress_callback(total_rows_progress); - - if (process_list_elem) - process_list_elem->updateProgressIn(total_rows_progress); - - total_rows_approx = 0; - } - - if (progress_callback) - progress_callback(value); - - if (process_list_elem) - { - if (!process_list_elem->updateProgressIn(value)) - cancel(); - - /// The total amount of data processed or intended for processing in all sources, possibly on remote servers. - - ProgressValues progress = process_list_elem->getProgressIn(); - - /// If the mode is "throw" and estimate of total rows is known, then throw early if an estimate is too high. - /// If the mode is "break", then allow to read before limit even if estimate is very high. - - size_t rows_to_check_limit = progress.read_rows; - if (limits.size_limits.overflow_mode == OverflowMode::THROW && progress.total_rows_to_read > progress.read_rows) - rows_to_check_limit = progress.total_rows_to_read; - - /// Check the restrictions on the - /// * amount of data to read - /// * speed of the query - /// * quota on the amount of data to read - /// NOTE: Maybe it makes sense to have them checked directly in ProcessList? - - if (limits.mode == LimitsMode::LIMITS_TOTAL) - { - if (!limits.size_limits.check(rows_to_check_limit, progress.read_bytes, "rows or bytes to read", - ErrorCodes::TOO_MANY_ROWS, ErrorCodes::TOO_MANY_BYTES)) - { - cancel(); - } - } - - if (!leaf_limits.check(rows_to_check_limit, progress.read_bytes, "rows or bytes to read on leaf node", - ErrorCodes::TOO_MANY_ROWS, ErrorCodes::TOO_MANY_BYTES)) - { - cancel(); - } - - size_t total_rows = progress.total_rows_to_read; - - constexpr UInt64 profile_events_update_period_microseconds = 10 * 1000; // 10 milliseconds - UInt64 total_elapsed_microseconds = total_stopwatch.elapsedMicroseconds(); - - if (last_profile_events_update_time + profile_events_update_period_microseconds < total_elapsed_microseconds) - { - /// TODO: Should be done in PipelineExecutor. - CurrentThread::updatePerformanceCounters(); - last_profile_events_update_time = total_elapsed_microseconds; - } - - /// TODO: Should be done in PipelineExecutor. - limits.speed_limits.throttle(progress.read_rows, progress.read_bytes, total_rows, total_elapsed_microseconds); - - if (quota && limits.mode == LimitsMode::LIMITS_TOTAL) - quota->used({QuotaType::READ_ROWS, value.read_rows}, {QuotaType::READ_BYTES, value.read_bytes}); - } - - auto query_kind = IAST::QueryKind::None; - if (process_list_elem) - query_kind = process_list_elem->getQueryKind(); - - if (query_kind == IAST::QueryKind::None || query_kind == IAST::QueryKind::System) - { - /// Don't increase profile event counters for merges and mutations, cause they use - /// a separate counter MergedRows/MergedBytes. - - /// This is a bad way to check that a query is merge or mutation. Will fix it later. - /// Note: you can't just check for QueryKind::Select, cause there are - /// queries like CREATE AS SELECT or INSERT SELECT. - } - else - { - ProfileEvents::increment(ProfileEvents::SelectedRows, value.read_rows); - ProfileEvents::increment(ProfileEvents::SelectedBytes, value.read_bytes); - } -} - -} diff --git a/src/Processors/Sources/SourceWithProgress.h b/src/Processors/Sources/SourceWithProgress.h deleted file mode 100644 index 57002006957..00000000000 --- a/src/Processors/Sources/SourceWithProgress.h +++ /dev/null @@ -1,94 +0,0 @@ -#pragma once -#include -#include -#include -#include - - -namespace DB -{ - -class QueryStatus; -class EnabledQuota; - -/// Adds progress to ISource. -/// This class takes care of limits, quotas, callback on progress and updating performance counters for current thread. -class ISourceWithProgress : public ISource -{ -public: - using ISource::ISource; - - /// Set limitations that checked on each chunk. - virtual void setLimits(const StreamLocalLimits & limits_) = 0; - - /// Set limitations that checked on each chunk for distributed queries on leaf nodes. - virtual void setLeafLimits(const SizeLimits & leaf_limits_) = 0; - - /// Set the quota. If you set a quota on the amount of raw data, - /// then you should also set mode = LIMITS_TOTAL to LocalLimits with setLimits. - virtual void setQuota(const std::shared_ptr & quota_) = 0; - - /// Set the pointer to the process list item. - /// General information about the resources spent on the request will be written into it. - /// Based on this information, the quota and some restrictions will be checked. - /// This information will also be available in the SHOW PROCESSLIST request. - virtual void setProcessListElement(QueryStatus * elem) = 0; - - /// Set the execution progress bar callback. - /// It is called after each chunk. - /// The function takes the number of rows in the last chunk, the number of bytes in the last chunk. - /// Note that the callback can be called from different threads. - virtual void setProgressCallback(const ProgressCallback & callback) = 0; - - /// Set the approximate total number of rows to read. - virtual void addTotalRowsApprox(size_t value) = 0; - virtual void setTotalRowsApprox(size_t value) = 0; -}; - -/// Implementation for ISourceWithProgress -class SourceWithProgress : public ISourceWithProgress -{ -public: - using ISourceWithProgress::ISourceWithProgress; - /// If enable_auto_progress flag is set, progress() will be automatically called on each generated chunk. - SourceWithProgress(Block header, bool enable_auto_progress); - - void setLimits(const StreamLocalLimits & limits_) final { limits = limits_; } - void setLeafLimits(const SizeLimits & leaf_limits_) final {leaf_limits = leaf_limits_; } - void setQuota(const std::shared_ptr & quota_) final { quota = quota_; } - void setProcessListElement(QueryStatus * elem) final; - void setProgressCallback(const ProgressCallback & callback) final { progress_callback = callback; } - void addTotalRowsApprox(size_t value) final { total_rows_approx += value; } - void setTotalRowsApprox(size_t value) final { total_rows_approx = value; } - -protected: - /// Call this method to provide information about progress. - void progress(const Progress & value); - - void work() override; - - bool checkTimeLimit() const; - -private: - StreamLocalLimits limits; - SizeLimits leaf_limits; - std::shared_ptr quota; - ProgressCallback progress_callback; - QueryStatus * process_list_elem = nullptr; - - /// The approximate total number of rows to read. For progress bar. - size_t total_rows_approx = 0; - - Stopwatch total_stopwatch {CLOCK_MONOTONIC_COARSE}; /// Time with waiting time. - /// According to total_stopwatch in microseconds. - UInt64 last_profile_events_update_time = 0; - - /// This flag checks if progress() was manually called at generate() call. - /// If not, it will be called for chunk after generate() was finished. - bool was_progress_called = false; - - /// If enabled, progress() will be automatically called on each generated chunk. - bool auto_progress = true; -}; - -} diff --git a/src/Processors/Sources/TemporaryFileLazySource.cpp b/src/Processors/Sources/TemporaryFileLazySource.cpp index 0382229a7c0..477c7567ec7 100644 --- a/src/Processors/Sources/TemporaryFileLazySource.cpp +++ b/src/Processors/Sources/TemporaryFileLazySource.cpp @@ -7,7 +7,7 @@ namespace DB TemporaryFileLazySource::~TemporaryFileLazySource() = default; TemporaryFileLazySource::TemporaryFileLazySource(const std::string & path_, const Block & header_) - : ISource(header_) + : ISource(header_, true) , path(path_) , done(false) {} diff --git a/src/Processors/Transforms/AggregatingTransform.cpp b/src/Processors/Transforms/AggregatingTransform.cpp index 4133f959faf..b5b254c3e3c 100644 --- a/src/Processors/Transforms/AggregatingTransform.cpp +++ b/src/Processors/Transforms/AggregatingTransform.cpp @@ -110,7 +110,7 @@ public: ManyAggregatedDataVariantsPtr data_, SharedDataPtr shared_data_, Arena * arena_) - : ISource(params_->getHeader()) + : ISource(params_->getHeader(), false) , params(std::move(params_)) , data(std::move(data_)) , shared_data(std::move(shared_data_)) diff --git a/src/Processors/Transforms/LimitsCheckingTransform.cpp b/src/Processors/Transforms/LimitsCheckingTransform.cpp index e5f74003ac3..02d2fef808c 100644 --- a/src/Processors/Transforms/LimitsCheckingTransform.cpp +++ b/src/Processors/Transforms/LimitsCheckingTransform.cpp @@ -58,7 +58,7 @@ void LimitsCheckingTransform::checkQuota(Chunk & chunk) switch (limits.mode) { case LimitsMode::LIMITS_TOTAL: - /// Checked in SourceWithProgress::progress method. + /// Checked in ISource::progress method. break; case LimitsMode::LIMITS_CURRENT: diff --git a/src/Processors/Transforms/MongoDBSource.cpp b/src/Processors/Transforms/MongoDBSource.cpp index c2df39a5673..19d21f3409e 100644 --- a/src/Processors/Transforms/MongoDBSource.cpp +++ b/src/Processors/Transforms/MongoDBSource.cpp @@ -156,7 +156,7 @@ MongoDBSource::MongoDBSource( std::unique_ptr cursor_, const Block & sample_block, UInt64 max_block_size_) - : SourceWithProgress(sample_block.cloneEmpty()) + : ISource(sample_block.cloneEmpty()) , connection(connection_) , cursor{std::move(cursor_)} , max_block_size{max_block_size_} diff --git a/src/Processors/Transforms/MongoDBSource.h b/src/Processors/Transforms/MongoDBSource.h index 248cfb9960b..322aa4f50de 100644 --- a/src/Processors/Transforms/MongoDBSource.h +++ b/src/Processors/Transforms/MongoDBSource.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include #include @@ -22,7 +22,7 @@ void authenticate(Poco::MongoDB::Connection & connection, const std::string & da std::unique_ptr createCursor(const std::string & database, const std::string & collection, const Block & sample_block_to_select); /// Converts MongoDB Cursor to a stream of Blocks -class MongoDBSource final : public SourceWithProgress +class MongoDBSource final : public ISource { public: MongoDBSource( diff --git a/src/Processors/Transforms/PostgreSQLSource.cpp b/src/Processors/Transforms/PostgreSQLSource.cpp index a49206d0854..6926ac26bbc 100644 --- a/src/Processors/Transforms/PostgreSQLSource.cpp +++ b/src/Processors/Transforms/PostgreSQLSource.cpp @@ -29,7 +29,7 @@ PostgreSQLSource::PostgreSQLSource( const std::string & query_str_, const Block & sample_block, UInt64 max_block_size_) - : SourceWithProgress(sample_block.cloneEmpty()) + : ISource(sample_block.cloneEmpty()) , query_str(query_str_) , max_block_size(max_block_size_) , connection_holder(std::move(connection_holder_)) @@ -45,7 +45,7 @@ PostgreSQLSource::PostgreSQLSource( const Block & sample_block, UInt64 max_block_size_, bool auto_commit_) - : SourceWithProgress(sample_block.cloneEmpty()) + : ISource(sample_block.cloneEmpty()) , query_str(query_str_) , tx(std::move(tx_)) , max_block_size(max_block_size_) @@ -98,7 +98,7 @@ IProcessor::Status PostgreSQLSource::prepare() started = true; } - auto status = SourceWithProgress::prepare(); + auto status = ISource::prepare(); if (status == Status::Finished) onFinish(); diff --git a/src/Processors/Transforms/PostgreSQLSource.h b/src/Processors/Transforms/PostgreSQLSource.h index bd6203042bb..292cfc78d34 100644 --- a/src/Processors/Transforms/PostgreSQLSource.h +++ b/src/Processors/Transforms/PostgreSQLSource.h @@ -4,7 +4,7 @@ #if USE_LIBPQXX #include -#include +#include #include #include #include @@ -16,7 +16,7 @@ namespace DB { template -class PostgreSQLSource : public SourceWithProgress +class PostgreSQLSource : public ISource { public: diff --git a/src/Processors/Transforms/buildPushingToViewsChain.cpp b/src/Processors/Transforms/buildPushingToViewsChain.cpp index a3cc620e418..b68ec547280 100644 --- a/src/Processors/Transforms/buildPushingToViewsChain.cpp +++ b/src/Processors/Transforms/buildPushingToViewsChain.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff --git a/src/QueryPipeline/Chain.h b/src/QueryPipeline/Chain.h index 60dbad10131..09fb5e54cd4 100644 --- a/src/QueryPipeline/Chain.h +++ b/src/QueryPipeline/Chain.h @@ -42,14 +42,15 @@ public: void addTableLock(TableLockHolder lock) { holder.table_locks.emplace_back(std::move(lock)); } void addStorageHolder(StoragePtr storage) { holder.storage_holders.emplace_back(std::move(storage)); } - void attachResources(PipelineResourcesHolder holder_) { holder = std::move(holder_); } void addInterpreterContext(ContextPtr context) { holder.interpreter_context.emplace_back(std::move(context)); } - PipelineResourcesHolder detachResources() { return std::move(holder); } + + void attachResources(QueryPlanResourceHolder holder_) { holder = std::move(holder_); } + QueryPlanResourceHolder detachResources() { return std::move(holder); } void reset(); private: - PipelineResourcesHolder holder; + QueryPlanResourceHolder holder; /// -> source -> transform -> ... -> transform -> sink -> /// ^ -> -> -> -> ^ diff --git a/src/QueryPipeline/Pipe.cpp b/src/QueryPipeline/Pipe.cpp index 19009d9692a..ae342abeea5 100644 --- a/src/QueryPipeline/Pipe.cpp +++ b/src/QueryPipeline/Pipe.cpp @@ -8,8 +8,9 @@ #include #include #include -#include +#include #include +#include #include namespace DB @@ -101,16 +102,6 @@ static OutputPort * uniteTotals(const OutputPortRawPtrs & ports, const Block & h return totals_port; } -void Pipe::addQueryPlan(std::unique_ptr plan) -{ - holder.query_plans.emplace_back(std::move(plan)); -} - -PipelineResourcesHolder Pipe::detachResources() -{ - return std::move(holder); -} - Pipe::Pipe(ProcessorPtr source, OutputPort * output, OutputPort * totals, OutputPort * extremes) { if (!source->getInputs().empty()) @@ -302,19 +293,13 @@ Pipe Pipe::unitePipes(Pipes pipes, Processors * collected_processors, bool allow { Pipe res; - for (auto & pipe : pipes) - res.holder = std::move(pipe.holder); /// see move assignment for Pipe::Holder. - pipes = removeEmptyPipes(std::move(pipes)); if (pipes.empty()) return res; if (pipes.size() == 1) - { - pipes[0].holder = std::move(res.holder); return std::move(pipes[0]); - } OutputPortRawPtrs totals; OutputPortRawPtrs extremes; @@ -707,7 +692,6 @@ void Pipe::addChains(std::vector chains) connect(*output_ports[i], chains[i].getInputPort()); output_ports[i] = &chains[i].getOutputPort(); - holder = chains[i].detachResources(); auto added_processors = Chain::getProcessors(std::move(chains[i])); for (auto & transform : added_processors) { @@ -873,31 +857,4 @@ void Pipe::transform(const Transformer & transformer) max_parallel_streams = std::max(max_parallel_streams, output_ports.size()); } -void Pipe::setLimits(const StreamLocalLimits & limits) -{ - for (auto & processor : processors) - { - if (auto * source_with_progress = dynamic_cast(processor.get())) - source_with_progress->setLimits(limits); - } -} - -void Pipe::setLeafLimits(const SizeLimits & leaf_limits) -{ - for (auto & processor : processors) - { - if (auto * source_with_progress = dynamic_cast(processor.get())) - source_with_progress->setLeafLimits(leaf_limits); - } -} - -void Pipe::setQuota(const std::shared_ptr & quota) -{ - for (auto & processor : processors) - { - if (auto * source_with_progress = dynamic_cast(processor.get())) - source_with_progress->setQuota(quota); - } -} - } diff --git a/src/QueryPipeline/Pipe.h b/src/QueryPipeline/Pipe.h index 6f85b7a6a88..52059f4ad19 100644 --- a/src/QueryPipeline/Pipe.h +++ b/src/QueryPipeline/Pipe.h @@ -14,6 +14,8 @@ struct StreamLocalLimits; class Pipe; using Pipes = std::vector; +class ReadProgressCallback; + using OutputPortRawPtrs = std::vector; /// Pipe is a set of processors which represents the part of pipeline. @@ -99,26 +101,7 @@ public: /// Get processors from Pipe without destroying pipe (used for EXPLAIN to keep QueryPlan). const Processors & getProcessors() const { return processors; } - /// Specify quotas and limits for every ISourceWithProgress. - void setLimits(const StreamLocalLimits & limits); - void setLeafLimits(const SizeLimits & leaf_limits); - void setQuota(const std::shared_ptr & quota); - - /// Do not allow to change the table while the processors of pipe are alive. - void addTableLock(TableLockHolder lock) { holder.table_locks.emplace_back(std::move(lock)); } - /// This methods are from QueryPipeline. Needed to make conversion from pipeline to pipe possible. - void addInterpreterContext(std::shared_ptr context) { holder.interpreter_context.emplace_back(std::move(context)); } - void addStorageHolder(StoragePtr storage) { holder.storage_holders.emplace_back(std::move(storage)); } - void addQueryIdHolder(std::shared_ptr query_id_holder) { holder.query_id_holder = std::move(query_id_holder); } - /// For queries with nested interpreters (i.e. StorageDistributed) - void addQueryPlan(std::unique_ptr plan); - - PipelineResourcesHolder detachResources(); - private: - /// Destruction order: processors, header, locks, temporary storages, local contexts - PipelineResourcesHolder holder; - /// Header is common for all output below. Block header; Processors processors; diff --git a/src/QueryPipeline/PipelineResourcesHolder.cpp b/src/QueryPipeline/PipelineResourcesHolder.cpp index 2f6b6a9de32..6ebb40086d3 100644 --- a/src/QueryPipeline/PipelineResourcesHolder.cpp +++ b/src/QueryPipeline/PipelineResourcesHolder.cpp @@ -1,25 +1,23 @@ #include #include +#include namespace DB { -PipelineResourcesHolder::PipelineResourcesHolder() = default; -PipelineResourcesHolder::PipelineResourcesHolder(PipelineResourcesHolder &&) noexcept = default; -PipelineResourcesHolder::~PipelineResourcesHolder() = default; - -PipelineResourcesHolder & PipelineResourcesHolder::operator=(PipelineResourcesHolder && rhs) noexcept +QueryPlanResourceHolder & QueryPlanResourceHolder::operator=(QueryPlanResourceHolder && rhs) noexcept { table_locks.insert(table_locks.end(), rhs.table_locks.begin(), rhs.table_locks.end()); storage_holders.insert(storage_holders.end(), rhs.storage_holders.begin(), rhs.storage_holders.end()); interpreter_context.insert(interpreter_context.end(), rhs.interpreter_context.begin(), rhs.interpreter_context.end()); - for (auto & plan : rhs.query_plans) - query_plans.emplace_back(std::move(plan)); - - query_id_holder = std::move(rhs.query_id_holder); + query_id_holders.insert(query_id_holders.end(), rhs.query_id_holders.begin(), rhs.query_id_holders.end()); return *this; } +QueryPlanResourceHolder::QueryPlanResourceHolder() = default; +QueryPlanResourceHolder::QueryPlanResourceHolder(QueryPlanResourceHolder &&) noexcept = default; +QueryPlanResourceHolder::~QueryPlanResourceHolder() = default; + } diff --git a/src/QueryPipeline/PipelineResourcesHolder.h b/src/QueryPipeline/PipelineResourcesHolder.h index ea1651c77d5..46b1024f384 100644 --- a/src/QueryPipeline/PipelineResourcesHolder.h +++ b/src/QueryPipeline/PipelineResourcesHolder.h @@ -1,6 +1,5 @@ #pragma once #include -#include namespace DB { @@ -13,13 +12,15 @@ using StoragePtr = std::shared_ptr; class QueryPlan; class Context; -struct PipelineResourcesHolder +struct QueryIdHolder; + +struct QueryPlanResourceHolder { - PipelineResourcesHolder(); - PipelineResourcesHolder(PipelineResourcesHolder &&) noexcept; - ~PipelineResourcesHolder(); + QueryPlanResourceHolder(); + QueryPlanResourceHolder(QueryPlanResourceHolder &&) noexcept; + ~QueryPlanResourceHolder(); /// Custom move assignment does not destroy data from lhs. It appends data from rhs to lhs. - PipelineResourcesHolder& operator=(PipelineResourcesHolder &&) noexcept; + QueryPlanResourceHolder& operator=(QueryPlanResourceHolder &&) noexcept; /// Some processors may implicitly use Context or temporary Storage created by Interpreter. /// But lifetime of Streams is not nested in lifetime of Interpreters, so we have to store it here, @@ -27,8 +28,7 @@ struct PipelineResourcesHolder std::vector> interpreter_context; std::vector storage_holders; std::vector table_locks; - std::vector> query_plans; - std::shared_ptr query_id_holder; + std::vector> query_id_holders; }; } diff --git a/src/QueryPipeline/ProfileInfo.h b/src/QueryPipeline/ProfileInfo.h index 0a5800cd409..7a0a0c304e2 100644 --- a/src/QueryPipeline/ProfileInfo.h +++ b/src/QueryPipeline/ProfileInfo.h @@ -12,7 +12,7 @@ class Block; class ReadBuffer; class WriteBuffer; -/// Information for profiling. See SourceWithProgress.h +/// Information for profiling. See ISource.h struct ProfileInfo { bool started = false; diff --git a/src/QueryPipeline/QueryPipeline.cpp b/src/QueryPipeline/QueryPipeline.cpp index 192aa9dd7b5..31b18c7f7f0 100644 --- a/src/QueryPipeline/QueryPipeline.cpp +++ b/src/QueryPipeline/QueryPipeline.cpp @@ -3,6 +3,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -10,11 +13,14 @@ #include #include #include -#include +#include #include #include #include #include +#include +#include + namespace DB { @@ -203,7 +209,7 @@ static void initRowsBeforeLimit(IOutputFormat * output_format) QueryPipeline::QueryPipeline( - PipelineResourcesHolder resources_, + QueryPlanResourceHolder resources_, Processors processors_) : resources(std::move(resources_)) , processors(std::move(processors_)) @@ -212,7 +218,7 @@ QueryPipeline::QueryPipeline( } QueryPipeline::QueryPipeline( - PipelineResourcesHolder resources_, + QueryPlanResourceHolder resources_, Processors processors_, InputPort * input_) : resources(std::move(resources_)) @@ -248,7 +254,7 @@ QueryPipeline::QueryPipeline( QueryPipeline::QueryPipeline(std::shared_ptr source) : QueryPipeline(Pipe(std::move(source))) {} QueryPipeline::QueryPipeline( - PipelineResourcesHolder resources_, + QueryPlanResourceHolder resources_, Processors processors_, OutputPort * output_, OutputPort * totals_, @@ -264,8 +270,6 @@ QueryPipeline::QueryPipeline( QueryPipeline::QueryPipeline(Pipe pipe) { - resources = std::move(pipe.holder); - if (pipe.numOutputPorts() > 0) { pipe.resize(1); @@ -390,7 +394,6 @@ void QueryPipeline::complete(Pipe pipe) throw Exception(ErrorCodes::LOGICAL_ERROR, "Pipeline must be pushing to be completed with pipe"); pipe.resize(1); - resources = pipe.detachResources(); pipe.dropExtremes(); pipe.dropTotals(); connect(*pipe.getOutputPort(0), *input); @@ -469,26 +472,14 @@ Block QueryPipeline::getHeader() const void QueryPipeline::setProgressCallback(const ProgressCallback & callback) { - for (auto & processor : processors) - { - if (auto * source = dynamic_cast(processor.get())) - source->setProgressCallback(callback); - } + progress_callback = callback; } void QueryPipeline::setProcessListElement(QueryStatus * elem) { process_list_element = elem; - if (pulling() || completed()) - { - for (auto & processor : processors) - { - if (auto * source = dynamic_cast(processor.get())) - source->setProcessListElement(elem); - } - } - else if (pushing()) + if (pushing()) { if (auto * counting = dynamic_cast(&input->getProcessor())) { @@ -497,8 +488,12 @@ void QueryPipeline::setProcessListElement(QueryStatus * elem) } } +void QueryPipeline::setQuota(std::shared_ptr quota_) +{ + quota = std::move(quota_); +} -void QueryPipeline::setLimitsAndQuota(const StreamLocalLimits & limits, std::shared_ptr quota) +void QueryPipeline::setLimitsAndQuota(const StreamLocalLimits & limits, std::shared_ptr quota_) { if (!pulling()) throw Exception( @@ -506,7 +501,7 @@ void QueryPipeline::setLimitsAndQuota(const StreamLocalLimits & limits, std::sha "It is possible to set limits and quota only to pulling QueryPipeline"); auto transform = std::make_shared(output->getHeader(), limits); - transform->setQuota(quota); + transform->setQuota(quota_); connect(*output, transform->getInputPort()); output = &transform->getOutputPort(); processors.emplace_back(std::move(transform)); @@ -528,10 +523,60 @@ void QueryPipeline::addStorageHolder(StoragePtr storage) resources.storage_holders.emplace_back(std::move(storage)); } +void QueryPipeline::addCompletedPipeline(QueryPipeline other) +{ + if (!other.completed()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot add not completed pipeline"); + + resources = std::move(other.resources); + processors.insert(processors.end(), other.processors.begin(), other.processors.end()); +} + void QueryPipeline::reset() { QueryPipeline to_remove = std::move(*this); *this = QueryPipeline(); } +static void addExpression(OutputPort *& port, ExpressionActionsPtr actions, Processors & processors) +{ + if (port) + { + auto transform = std::make_shared(port->getHeader(), actions); + connect(*port, transform->getInputPort()); + port = &transform->getOutputPort(); + processors.emplace_back(std::move(transform)); + } +} + +void QueryPipeline::convertStructureTo(const ColumnsWithTypeAndName & columns) +{ + if (!pulling()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Pipeline must be pulling to convert header"); + + auto converting = ActionsDAG::makeConvertingActions( + output->getHeader().getColumnsWithTypeAndName(), + columns, + ActionsDAG::MatchColumnsMode::Position); + + auto actions = std::make_shared(std::move(converting)); + addExpression(output, actions, processors); + addExpression(totals, actions, processors); + addExpression(extremes, actions, processors); +} + +std::unique_ptr QueryPipeline::getReadProgressCallback() const +{ + auto callback = std::make_unique(); + + callback->setProgressCallback(progress_callback); + callback->setQuota(quota); + callback->setProcessListElement(process_list_element); + + if (!update_profile_events) + callback->disableProfileEventUpdate(); + + return callback; +} + } diff --git a/src/QueryPipeline/QueryPipeline.h b/src/QueryPipeline/QueryPipeline.h index 06fd1ee3ec1..1b88ede3349 100644 --- a/src/QueryPipeline/QueryPipeline.h +++ b/src/QueryPipeline/QueryPipeline.h @@ -1,5 +1,7 @@ #pragma once #include +#include +#include #include namespace DB @@ -27,6 +29,10 @@ class IOutputFormat; class SinkToStorage; class ISource; class ISink; +class ReadProgressCallback; + +struct ColumnWithTypeAndName; +using ColumnsWithTypeAndName = std::vector; class QueryPipeline { @@ -51,18 +57,18 @@ public: /// completed QueryPipeline( - PipelineResourcesHolder resources_, + QueryPlanResourceHolder resources_, Processors processors_); /// pushing QueryPipeline( - PipelineResourcesHolder resources_, + QueryPlanResourceHolder resources_, Processors processors_, InputPort * input_); /// pulling QueryPipeline( - PipelineResourcesHolder resources_, + QueryPlanResourceHolder resources_, Processors processors_, OutputPort * output_, OutputPort * totals_ = nullptr, @@ -93,17 +99,41 @@ public: void setProcessListElement(QueryStatus * elem); void setProgressCallback(const ProgressCallback & callback); - void setLimitsAndQuota(const StreamLocalLimits & limits, std::shared_ptr quota); + void setLimitsAndQuota(const StreamLocalLimits & limits, std::shared_ptr quota_); bool tryGetResultRowsAndBytes(UInt64 & result_rows, UInt64 & result_bytes) const; + void setQuota(std::shared_ptr quota_); + void addStorageHolder(StoragePtr storage); + /// Existing resources are not released here, see move ctor for QueryPlanResourceHolder. + void addResources(QueryPlanResourceHolder holder) { resources = std::move(holder); } + + /// Skip updating profile events. + /// For merges in mutations it may need special logic, it's done inside ProgressCallback. + void disableProfileEventUpdate() { update_profile_events = false; } + + /// Create progress callback from limits and quotas. + std::unique_ptr getReadProgressCallback() const; + + /// Add processors and resources from other pipeline. Other pipeline should be completed. + void addCompletedPipeline(QueryPipeline other); + const Processors & getProcessors() const { return processors; } + /// For pulling pipeline, convert structure to expected. + /// Trash, need to remove later. + void convertStructureTo(const ColumnsWithTypeAndName & columns); + void reset(); private: - PipelineResourcesHolder resources; + QueryPlanResourceHolder resources; + + ProgressCallback progress_callback; + std::shared_ptr quota; + bool update_profile_events = true; + Processors processors; InputPort * input = nullptr; diff --git a/src/QueryPipeline/QueryPipelineBuilder.cpp b/src/QueryPipeline/QueryPipelineBuilder.cpp index 012a825a9d5..7eccb1c70f5 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.cpp +++ b/src/QueryPipeline/QueryPipelineBuilder.cpp @@ -30,11 +30,6 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -void QueryPipelineBuilder::addQueryPlan(std::unique_ptr plan) -{ - pipe.addQueryPlan(std::move(plan)); -} - void QueryPipelineBuilder::checkInitialized() { if (!initialized()) @@ -88,7 +83,7 @@ void QueryPipelineBuilder::init(Pipe pipe_) pipe = std::move(pipe_); } -void QueryPipelineBuilder::init(QueryPipeline pipeline) +void QueryPipelineBuilder::init(QueryPipeline & pipeline) { if (initialized()) throw Exception("Pipeline has already been initialized", ErrorCodes::LOGICAL_ERROR); @@ -96,8 +91,6 @@ void QueryPipelineBuilder::init(QueryPipeline pipeline) if (pipeline.pushing()) throw Exception("Can't initialize pushing pipeline", ErrorCodes::LOGICAL_ERROR); - pipe.holder = std::move(pipeline.resources); - pipe.processors = std::move(pipeline.processors); if (pipeline.output) { pipe.output_ports = {pipeline.output}; @@ -272,11 +265,13 @@ QueryPipelineBuilder QueryPipelineBuilder::unitePipelines( bool will_limit_max_threads = true; size_t max_threads = 0; Pipes pipes; + QueryPlanResourceHolder resources; for (auto & pipeline_ptr : pipelines) { auto & pipeline = *pipeline_ptr; pipeline.checkInitialized(); + resources = std::move(pipeline.resources); pipeline.pipe.collected_processors = collected_processors; pipes.emplace_back(std::move(pipeline.pipe)); @@ -292,6 +287,7 @@ QueryPipelineBuilder QueryPipelineBuilder::unitePipelines( QueryPipelineBuilder pipeline; pipeline.init(Pipe::unitePipes(std::move(pipes), collected_processors, false)); + pipeline.addResources(std::move(resources)); if (will_limit_max_threads) { @@ -429,7 +425,7 @@ std::unique_ptr QueryPipelineBuilder::joinPipelines( step->appendExtraProcessors(processors); left->pipe.processors.insert(left->pipe.processors.end(), right->pipe.processors.begin(), right->pipe.processors.end()); - left->pipe.holder = std::move(right->pipe.holder); + left->resources = std::move(right->resources); left->pipe.header = left->pipe.output_ports.front()->getHeader(); left->pipe.max_parallel_streams = std::max(left->pipe.max_parallel_streams, right->pipe.max_parallel_streams); return left; @@ -469,31 +465,16 @@ void QueryPipelineBuilder::addPipelineBefore(QueryPipelineBuilder pipeline) Pipes pipes; pipes.emplace_back(std::move(pipe)); - pipes.emplace_back(QueryPipelineBuilder::getPipe(std::move(pipeline))); + pipes.emplace_back(QueryPipelineBuilder::getPipe(std::move(pipeline), resources)); pipe = Pipe::unitePipes(std::move(pipes), collected_processors, true); auto processor = std::make_shared(getHeader(), pipe.numOutputPorts(), delayed_streams, true); addTransform(std::move(processor)); } -void QueryPipelineBuilder::setProgressCallback(const ProgressCallback & callback) -{ - for (auto & processor : pipe.processors) - { - if (auto * source = dynamic_cast(processor.get())) - source->setProgressCallback(callback); - } -} - void QueryPipelineBuilder::setProcessListElement(QueryStatus * elem) { process_list_element = elem; - - for (auto & processor : pipe.processors) - { - if (auto * source = dynamic_cast(processor.get())) - source->setProcessListElement(elem); - } } PipelineExecutorPtr QueryPipelineBuilder::execute() @@ -504,9 +485,16 @@ PipelineExecutorPtr QueryPipelineBuilder::execute() return std::make_shared(pipe.processors, process_list_element); } +Pipe QueryPipelineBuilder::getPipe(QueryPipelineBuilder pipeline, QueryPlanResourceHolder & resources) +{ + resources = std::move(pipeline.resources); + return std::move(pipeline.pipe); +} + QueryPipeline QueryPipelineBuilder::getPipeline(QueryPipelineBuilder builder) { QueryPipeline res(std::move(builder.pipe)); + res.addResources(std::move(builder.resources)); res.setNumThreads(builder.getNumThreads()); res.setProcessListElement(builder.process_list_element); return res; diff --git a/src/QueryPipeline/QueryPipelineBuilder.h b/src/QueryPipeline/QueryPipelineBuilder.h index ad25985ab48..77ca3f05031 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.h +++ b/src/QueryPipeline/QueryPipelineBuilder.h @@ -42,7 +42,8 @@ public: /// All pipes must have same header. void init(Pipe pipe); - void init(QueryPipeline pipeline); + /// This is a constructor which adds some steps to pipeline. + void init(QueryPipeline & pipeline); /// Clear and release all resources. void reset(); @@ -58,6 +59,8 @@ public: void addTransform(ProcessorPtr transform); void addTransform(ProcessorPtr transform, InputPort * totals, InputPort * extremes); + /// Note: this two methods do not care about resources inside the chain. + /// You should attach them yourself. void addChains(std::vector chains); void addChain(Chain chain); @@ -120,15 +123,6 @@ public: const Block & getHeader() const { return pipe.getHeader(); } - void addTableLock(TableLockHolder lock) { pipe.addTableLock(std::move(lock)); } - void addInterpreterContext(ContextPtr context) { pipe.addInterpreterContext(std::move(context)); } - void addStorageHolder(StoragePtr storage) { pipe.addStorageHolder(std::move(storage)); } - void addQueryPlan(std::unique_ptr plan); - void setLimits(const StreamLocalLimits & limits) { pipe.setLimits(limits); } - void setLeafLimits(const SizeLimits & limits) { pipe.setLeafLimits(limits); } - void setQuota(const std::shared_ptr & quota) { pipe.setQuota(quota); } - - void setProgressCallback(const ProgressCallback & callback); void setProcessListElement(QueryStatus * elem); /// Recommend number of threads for pipeline execution. @@ -152,12 +146,17 @@ public: max_threads = max_threads_; } + void addResources(QueryPlanResourceHolder resources_) { resources = std::move(resources_); } + void setQueryIdHolder(std::shared_ptr query_id_holder) { resources.query_id_holders.emplace_back(std::move(query_id_holder)); } + /// Convert query pipeline to pipe. - static Pipe getPipe(QueryPipelineBuilder pipeline) { return std::move(pipeline.pipe); } + static Pipe getPipe(QueryPipelineBuilder pipeline, QueryPlanResourceHolder & resources); static QueryPipeline getPipeline(QueryPipelineBuilder builder); private: + /// Destruction order: processors, header, locks, temporary storages, local contexts + QueryPlanResourceHolder resources; Pipe pipe; /// Limit on the number of threads. Zero means no limit. diff --git a/src/QueryPipeline/ReadProgressCallback.cpp b/src/QueryPipeline/ReadProgressCallback.cpp new file mode 100644 index 00000000000..bbdabb8e8d8 --- /dev/null +++ b/src/QueryPipeline/ReadProgressCallback.cpp @@ -0,0 +1,144 @@ +#include +#include +#include + +namespace ProfileEvents +{ + extern const Event SelectedRows; + extern const Event SelectedBytes; +} + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int TOO_MANY_ROWS; + extern const int TOO_MANY_BYTES; +} + +void ReadProgressCallback::setProcessListElement(QueryStatus * elem) +{ + process_list_elem = elem; + if (!elem) + return; + + /// Update total_rows_approx as soon as possible. + /// + /// It is important to do this, since you will not get correct + /// total_rows_approx until the query will start reading all parts (in case + /// of query needs to read from multiple parts), and this is especially a + /// problem in case of max_threads=1. + /// + /// NOTE: This can be done only if progress callback already set, since + /// otherwise total_rows_approx will lost. + size_t rows_approx = 0; + if (progress_callback && (rows_approx = total_rows_approx.exchange(0)) != 0) + { + Progress total_rows_progress = {0, 0, rows_approx}; + + progress_callback(total_rows_progress); + process_list_elem->updateProgressIn(total_rows_progress); + } +} + +bool ReadProgressCallback::onProgress(uint64_t read_rows, uint64_t read_bytes, const StorageLimitsList & storage_limits) +{ + for (const auto & limits : storage_limits) + { + if (!limits.local_limits.speed_limits.checkTimeLimit(total_stopwatch, limits.local_limits.timeout_overflow_mode)) + return false; + } + + size_t rows_approx = 0; + if ((rows_approx = total_rows_approx.exchange(0)) != 0) + { + Progress total_rows_progress = {0, 0, rows_approx}; + + if (progress_callback) + progress_callback(total_rows_progress); + + if (process_list_elem) + process_list_elem->updateProgressIn(total_rows_progress); + } + + Progress value {read_rows, read_bytes}; + + if (progress_callback) + progress_callback(value); + + if (process_list_elem) + { + if (!process_list_elem->updateProgressIn(value)) + return false; + + /// The total amount of data processed or intended for processing in all sources, possibly on remote servers. + + ProgressValues progress = process_list_elem->getProgressIn(); + + for (const auto & limits : storage_limits) + { + /// If the mode is "throw" and estimate of total rows is known, then throw early if an estimate is too high. + /// If the mode is "break", then allow to read before limit even if estimate is very high. + + size_t rows_to_check_limit = progress.read_rows; + if (limits.local_limits.size_limits.overflow_mode == OverflowMode::THROW && progress.total_rows_to_read > progress.read_rows) + rows_to_check_limit = progress.total_rows_to_read; + + /// Check the restrictions on the + /// * amount of data to read + /// * speed of the query + /// * quota on the amount of data to read + /// NOTE: Maybe it makes sense to have them checked directly in ProcessList? + + if (limits.local_limits.mode == LimitsMode::LIMITS_TOTAL) + { + if (!limits.local_limits.size_limits.check( + rows_to_check_limit, progress.read_bytes, "rows or bytes to read", + ErrorCodes::TOO_MANY_ROWS, ErrorCodes::TOO_MANY_BYTES)) + { + return false; + } + } + + if (!limits.leaf_limits.check( + rows_to_check_limit, progress.read_bytes, "rows or bytes to read on leaf node", + ErrorCodes::TOO_MANY_ROWS, ErrorCodes::TOO_MANY_BYTES)) + { + return false; + } + } + + size_t total_rows = progress.total_rows_to_read; + + constexpr UInt64 profile_events_update_period_microseconds = 10 * 1000; // 10 milliseconds + UInt64 total_elapsed_microseconds = total_stopwatch.elapsedMicroseconds(); + + std::lock_guard lock(last_profile_events_update_time_mutex); + { + if (last_profile_events_update_time + profile_events_update_period_microseconds < total_elapsed_microseconds) + { + /// TODO: Should be done in PipelineExecutor. + CurrentThread::updatePerformanceCounters(); + last_profile_events_update_time = total_elapsed_microseconds; + } + } + + /// TODO: Should be done in PipelineExecutor. + for (const auto & limits : storage_limits) + limits.local_limits.speed_limits.throttle(progress.read_rows, progress.read_bytes, total_rows, total_elapsed_microseconds); + + if (quota) + quota->used({QuotaType::READ_ROWS, value.read_rows}, {QuotaType::READ_BYTES, value.read_bytes}); + } + + if (update_profile_events) + { + ProfileEvents::increment(ProfileEvents::SelectedRows, value.read_rows); + ProfileEvents::increment(ProfileEvents::SelectedBytes, value.read_bytes); + } + + return true; +} + +} diff --git a/src/QueryPipeline/ReadProgressCallback.h b/src/QueryPipeline/ReadProgressCallback.h new file mode 100644 index 00000000000..f64123ef39d --- /dev/null +++ b/src/QueryPipeline/ReadProgressCallback.h @@ -0,0 +1,46 @@ +#pragma once +#include +#include +#include +#include + +namespace DB +{ + +class QueryStatus; +class EnabledQuota; + +struct StorageLimits; +using StorageLimitsList = std::list; + +class ReadProgressCallback +{ +public: + void setQuota(const std::shared_ptr & quota_) { quota = quota_; } + void setProcessListElement(QueryStatus * elem); + void setProgressCallback(const ProgressCallback & callback) { progress_callback = callback; } + void addTotalRowsApprox(size_t value) { total_rows_approx += value; } + + /// Skip updating profile events. + /// For merges in mutations it may need special logic, it's done inside ProgressCallback. + void disableProfileEventUpdate() { update_profile_events = false; } + + bool onProgress(uint64_t read_rows, uint64_t read_bytes, const StorageLimitsList & storage_limits); + +private: + std::shared_ptr quota; + ProgressCallback progress_callback; + QueryStatus * process_list_elem = nullptr; + + /// The approximate total number of rows to read. For progress bar. + std::atomic_size_t total_rows_approx = 0; + + Stopwatch total_stopwatch {CLOCK_MONOTONIC_COARSE}; /// Time with waiting time. + /// According to total_stopwatch in microseconds. + UInt64 last_profile_events_update_time = 0; + std::mutex last_profile_events_update_time_mutex; + + bool update_profile_events = true; +}; + +} diff --git a/src/QueryPipeline/RemoteQueryExecutor.cpp b/src/QueryPipeline/RemoteQueryExecutor.cpp index 28ea7682ed8..1d19f557c49 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.cpp +++ b/src/QueryPipeline/RemoteQueryExecutor.cpp @@ -8,9 +8,13 @@ #include #include "Core/Protocol.h" #include "IO/ReadHelpers.h" +#include +#include #include #include #include +#include +#include #include #include #include @@ -569,18 +573,21 @@ void RemoteQueryExecutor::sendExternalTables() QueryProcessingStage::Enum read_from_table_stage = cur->getQueryProcessingStage( context, QueryProcessingStage::Complete, storage_snapshot, query_info); - Pipe pipe = cur->read( + QueryPlan plan; + cur->read( + plan, metadata_snapshot->getColumns().getNamesOfPhysical(), storage_snapshot, query_info, context, read_from_table_stage, DEFAULT_BLOCK_SIZE, 1); - if (pipe.empty()) - return std::make_unique( - std::make_shared(metadata_snapshot->getSampleBlock(), Chunk())); + auto builder = plan.buildQueryPipeline( + QueryPlanOptimizationSettings::fromContext(context), + BuildQueryPipelineSettings::fromContext(context)); - pipe.addTransform(std::make_shared(pipe.getHeader(), limits)); + builder->resize(1); + builder->addTransform(std::make_shared(builder->getHeader(), limits)); - return std::make_unique(std::move(pipe)); + return builder; }; data->pipe = data->creating_pipe_callback(); diff --git a/src/QueryPipeline/SizeLimits.cpp b/src/QueryPipeline/SizeLimits.cpp index 3514d0a5e28..3fe73f61402 100644 --- a/src/QueryPipeline/SizeLimits.cpp +++ b/src/QueryPipeline/SizeLimits.cpp @@ -35,9 +35,11 @@ bool SizeLimits::check(UInt64 rows, UInt64 bytes, const char * what, int too_man bool SizeLimits::softCheck(UInt64 rows, UInt64 bytes) const { - if (max_rows && rows > max_rows) + /// For result_overflow_mode = 'break', we check for >= to tell that no more data is needed. + /// Last chunk will be processed. + if (max_rows && rows >= max_rows) return false; - if (max_bytes && bytes > max_bytes) + if (max_bytes && bytes >= max_bytes) return false; return true; } diff --git a/src/QueryPipeline/StreamLocalLimits.h b/src/QueryPipeline/StreamLocalLimits.h index 7f49a5d0b07..5df026e6e3d 100644 --- a/src/QueryPipeline/StreamLocalLimits.h +++ b/src/QueryPipeline/StreamLocalLimits.h @@ -1,6 +1,7 @@ #pragma once #include #include +#include namespace DB { @@ -30,4 +31,12 @@ struct StreamLocalLimits OverflowMode timeout_overflow_mode = OverflowMode::THROW; }; +struct StorageLimits +{ + StreamLocalLimits local_limits; + SizeLimits leaf_limits; +}; + +using StorageLimitsList = std::list; + } diff --git a/src/Server/GRPCServer.cpp b/src/Server/GRPCServer.cpp index 414c596eb05..e1c73b7ebbb 100644 --- a/src/Server/GRPCServer.cpp +++ b/src/Server/GRPCServer.cpp @@ -1047,10 +1047,7 @@ namespace auto source = query_context->getInputFormat( input_format, *read_buffer, header, query_context->getSettings().max_insert_block_size); - QueryPipelineBuilder builder; - builder.init(Pipe(source)); - - pipeline = std::make_unique(QueryPipelineBuilder::getPipeline(std::move(builder))); + pipeline = std::make_unique(std::move(source)); pipeline_executor = std::make_unique(*pipeline); } diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index f056842926d..6171971cb85 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -1359,14 +1359,6 @@ void TCPHandler::receiveQuery() /// so we have to apply the changes first. query_context->setCurrentQueryId(state.query_id); - /// Disable function name normalization when it's a secondary query, because queries are either - /// already normalized on initiator node, or not normalized and should remain unnormalized for - /// compatibility. - if (query_kind == ClientInfo::QueryKind::SECONDARY_QUERY) - { - query_context->setSetting("normalize_function_names", false); - } - /// For testing hedged requests if (unlikely(sleep_after_receiving_query.totalMilliseconds())) { diff --git a/src/Storages/Distributed/DirectoryMonitor.cpp b/src/Storages/Distributed/DirectoryMonitor.cpp index 5cafe4d829d..5e9da48fc68 100644 --- a/src/Storages/Distributed/DirectoryMonitor.cpp +++ b/src/Storages/Distributed/DirectoryMonitor.cpp @@ -1,6 +1,6 @@ #include #include -#include +#include #include #include #include @@ -905,7 +905,7 @@ private: } }; -class DirectoryMonitorSource : public SourceWithProgress +class DirectoryMonitorSource : public ISource { public: @@ -940,7 +940,7 @@ public: } explicit DirectoryMonitorSource(Data data_) - : SourceWithProgress(data_.first_block.cloneEmpty()) + : ISource(data_.first_block.cloneEmpty()) , data(std::move(data_)) { } diff --git a/src/Storages/FileLog/FileLogSource.cpp b/src/Storages/FileLog/FileLogSource.cpp index ba4770a4bf7..c461f4b8160 100644 --- a/src/Storages/FileLog/FileLogSource.cpp +++ b/src/Storages/FileLog/FileLogSource.cpp @@ -19,7 +19,7 @@ FileLogSource::FileLogSource( size_t poll_time_out_, size_t stream_number_, size_t max_streams_number_) - : SourceWithProgress(storage_snapshot_->getSampleBlockForColumns(columns)) + : ISource(storage_snapshot_->getSampleBlockForColumns(columns)) , storage(storage_) , storage_snapshot(storage_snapshot_) , context(context_) diff --git a/src/Storages/FileLog/FileLogSource.h b/src/Storages/FileLog/FileLogSource.h index 831f4c907a5..51d69d23b57 100644 --- a/src/Storages/FileLog/FileLogSource.h +++ b/src/Storages/FileLog/FileLogSource.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include #include @@ -10,7 +10,7 @@ namespace Poco } namespace DB { -class FileLogSource : public SourceWithProgress +class FileLogSource : public ISource { public: FileLogSource( diff --git a/src/Storages/HDFS/StorageHDFS.cpp b/src/Storages/HDFS/StorageHDFS.cpp index fa87cfb776d..2e4c608dc9d 100644 --- a/src/Storages/HDFS/StorageHDFS.cpp +++ b/src/Storages/HDFS/StorageHDFS.cpp @@ -33,6 +33,7 @@ #include #include +#include #include #include @@ -297,7 +298,7 @@ HDFSSource::HDFSSource( UInt64 max_block_size_, std::shared_ptr file_iterator_, ColumnsDescription columns_description_) - : SourceWithProgress(getHeader(block_for_format_, requested_virtual_columns_)) + : ISource(getHeader(block_for_format_, requested_virtual_columns_)) , WithContext(context_) , storage(std::move(storage_)) , block_for_format(block_for_format_) @@ -418,7 +419,9 @@ public: void onException() override { - write_buf->finalize(); + if (!writer) + return; + onFinish(); } void onFinish() override @@ -432,6 +435,7 @@ public: } catch (...) { + /// Stop ParallelFormattingOutputFormat correctly. writer.reset(); throw; } diff --git a/src/Storages/HDFS/StorageHDFS.h b/src/Storages/HDFS/StorageHDFS.h index 1f9e57ab2b7..d987820b844 100644 --- a/src/Storages/HDFS/StorageHDFS.h +++ b/src/Storages/HDFS/StorageHDFS.h @@ -4,7 +4,7 @@ #if USE_HDFS -#include +#include #include #include #include @@ -82,7 +82,7 @@ private: class PullingPipelineExecutor; -class HDFSSource : public SourceWithProgress, WithContext +class HDFSSource : public ISource, WithContext { public: class DisclosedGlobIterator diff --git a/src/Storages/Hive/StorageHive.cpp b/src/Storages/Hive/StorageHive.cpp index 38c8c054a9b..4b51ad6d9b2 100644 --- a/src/Storages/Hive/StorageHive.cpp +++ b/src/Storages/Hive/StorageHive.cpp @@ -25,7 +25,8 @@ #include #include #include -#include +#include +#include #include #include #include @@ -55,7 +56,7 @@ static std::string getBaseName(const String & path) return path.substr(basename_start + 1); } -class StorageHiveSource : public SourceWithProgress, WithContext +class StorageHiveSource : public ISource, WithContext { public: using FileFormat = StorageHive::FileFormat; @@ -110,7 +111,7 @@ public: ContextPtr context_, UInt64 max_block_size_, const Names & text_input_field_names_ = {}) - : SourceWithProgress(getHeader(sample_block_, source_info_)) + : ISource(getHeader(sample_block_, source_info_)) , WithContext(context_) , source_info(std::move(source_info_)) , hdfs_namenode_url(std::move(hdfs_namenode_url_)) @@ -216,16 +217,15 @@ public: auto input_format = FormatFactory::instance().getInputFormat( format, *read_buf, to_read_block, getContext(), max_block_size, updateFormatSettings(current_file)); - QueryPipelineBuilder builder; - builder.init(Pipe(input_format)); + Pipe pipe(input_format); if (columns_description.hasDefaults()) { - builder.addSimpleTransform([&](const Block & header) + pipe.addSimpleTransform([&](const Block & header) { return std::make_shared(header, columns_description, *input_format, getContext()); }); } - pipeline = std::make_unique(QueryPipelineBuilder::getPipeline(std::move(builder))); + pipeline = std::make_unique(std::move(pipe)); reader = std::make_unique(*pipeline); } diff --git a/src/Storages/IStorage.cpp b/src/Storages/IStorage.cpp index 88d60e00b9c..43b67657a87 100644 --- a/src/Storages/IStorage.cpp +++ b/src/Storages/IStorage.cpp @@ -88,6 +88,17 @@ TableExclusiveLockHolder IStorage::lockExclusively(const String & query_id, cons return result; } +Pipe IStorage::watch( + const Names & /*column_names*/, + const SelectQueryInfo & /*query_info*/, + ContextPtr /*context*/, + QueryProcessingStage::Enum & /*processed_stage*/, + size_t /*max_block_size*/, + unsigned /*num_streams*/) +{ + throw Exception("Method watch is not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED); +} + Pipe IStorage::read( const Names & /*column_names*/, const StorageSnapshotPtr & /*storage_snapshot*/, @@ -111,6 +122,18 @@ void IStorage::read( unsigned num_streams) { auto pipe = read(column_names, storage_snapshot, query_info, context, processed_stage, max_block_size, num_streams); + readFromPipe(query_plan, std::move(pipe), column_names, storage_snapshot, query_info, context, getName()); +} + +void IStorage::readFromPipe( + QueryPlan & query_plan, + Pipe pipe, + const Names & column_names, + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo & query_info, + ContextPtr context, + std::string storage_name) +{ if (pipe.empty()) { auto header = storage_snapshot->getSampleBlockForColumns(column_names); @@ -118,11 +141,18 @@ void IStorage::read( } else { - auto read_step = std::make_unique(std::move(pipe), getName()); + auto read_step = std::make_unique(std::move(pipe), storage_name, query_info.storage_limits); query_plan.addStep(std::move(read_step)); } } +std::optional IStorage::distributedWrite( + const ASTInsertQuery & /*query*/, + ContextPtr /*context*/) +{ + return {}; +} + Pipe IStorage::alterPartition( const StorageMetadataPtr & /* metadata_snapshot */, const PartitionCommands & /* commands */, ContextPtr /* context */) { diff --git a/src/Storages/IStorage.h b/src/Storages/IStorage.h index ed17a3af972..519d4ad6517 100644 --- a/src/Storages/IStorage.h +++ b/src/Storages/IStorage.h @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -55,8 +54,7 @@ using QueryPlanPtr = std::unique_ptr; class SinkToStorage; using SinkToStoragePtr = std::shared_ptr; -class QueryPipelineBuilder; -using QueryPipelineBuilderPtr = std::unique_ptr; +class QueryPipeline; class IStoragePolicy; using StoragePolicyPtr = std::shared_ptr; @@ -318,15 +316,13 @@ public: ContextPtr /*context*/, QueryProcessingStage::Enum & /*processed_stage*/, size_t /*max_block_size*/, - unsigned /*num_streams*/) - { - throw Exception("Method watch is not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED); - } + unsigned /*num_streams*/); /// Returns true if FINAL modifier must be added to SELECT query depending on required columns. /// It's needed for ReplacingMergeTree wrappers such as MaterializedMySQL and MaterializedPostrgeSQL virtual bool needRewriteQueryWithFinal(const Names & /*column_names*/) const { return false; } +private: /** Read a set of columns from the table. * Accepts a list of columns to read, as well as a description of the query, * from which information can be extracted about how to retrieve data @@ -356,6 +352,7 @@ public: size_t /*max_block_size*/, unsigned /*num_streams*/); +public: /// Other version of read which adds reading step to query plan. /// Default implementation creates ReadFromStorageStep and uses usual read. virtual void read( @@ -391,12 +388,9 @@ public: * * Returns query pipeline if distributed writing is possible, and nullptr otherwise. */ - virtual QueryPipelineBuilderPtr distributedWrite( + virtual std::optional distributedWrite( const ASTInsertQuery & /*query*/, - ContextPtr /*context*/) - { - return nullptr; - } + ContextPtr /*context*/); /** Delete the table data. Called before deleting the directory with the data. * The method can be called only after detaching table from Context (when no queries are performed with table). @@ -635,6 +629,16 @@ public: return getStorageSnapshot(metadata_snapshot, query_context); } + /// A helper to implement read() + static void readFromPipe( + QueryPlan & query_plan, + Pipe pipe, + const Names & column_names, + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo & query_info, + ContextPtr context, + std::string storage_name); + private: /// Lock required for alter queries (lockForAlter). /// Allows to execute only one simultaneous alter query. diff --git a/src/Storages/Kafka/KafkaSource.cpp b/src/Storages/Kafka/KafkaSource.cpp index c00e2d0020c..38b404fe79e 100644 --- a/src/Storages/Kafka/KafkaSource.cpp +++ b/src/Storages/Kafka/KafkaSource.cpp @@ -35,7 +35,7 @@ KafkaSource::KafkaSource( Poco::Logger * log_, size_t max_block_size_, bool commit_in_suffix_) - : SourceWithProgress(storage_snapshot_->getSampleBlockForColumns(columns)) + : ISource(storage_snapshot_->getSampleBlockForColumns(columns)) , storage(storage_) , storage_snapshot(storage_snapshot_) , context(context_) @@ -60,6 +60,19 @@ KafkaSource::~KafkaSource() storage.pushReadBuffer(buffer); } +bool KafkaSource::checkTimeLimit() const +{ + if (max_execution_time != 0) + { + auto elapsed_ns = total_stopwatch.elapsed(); + + if (elapsed_ns > static_cast(max_execution_time.totalMicroseconds()) * 1000) + return false; + } + + return true; +} + Chunk KafkaSource::generateImpl() { if (!buffer) diff --git a/src/Storages/Kafka/KafkaSource.h b/src/Storages/Kafka/KafkaSource.h index 59b6d370b71..94e4224d8df 100644 --- a/src/Storages/Kafka/KafkaSource.h +++ b/src/Storages/Kafka/KafkaSource.h @@ -1,9 +1,10 @@ #pragma once -#include +#include #include #include +#include namespace Poco @@ -13,7 +14,7 @@ namespace Poco namespace DB { -class KafkaSource : public SourceWithProgress +class KafkaSource : public ISource { public: KafkaSource( @@ -33,6 +34,8 @@ public: void commit(); bool isStalled() const { return !buffer || buffer->isStalled(); } + void setTimeLimit(Poco::Timespan max_execution_time_) { max_execution_time = max_execution_time_; } + private: StorageKafka & storage; StorageSnapshotPtr storage_snapshot; @@ -50,6 +53,11 @@ private: const Block virtual_header; const HandleKafkaErrorMode handle_error_mode; + Poco::Timespan max_execution_time = 0; + Stopwatch total_stopwatch {CLOCK_MONOTONIC_COARSE}; + + bool checkTimeLimit() const; + Chunk generateImpl(); }; diff --git a/src/Storages/Kafka/StorageKafka.cpp b/src/Storages/Kafka/StorageKafka.cpp index 2409f8dcb6e..d03db010a1f 100644 --- a/src/Storages/Kafka/StorageKafka.cpp +++ b/src/Storages/Kafka/StorageKafka.cpp @@ -17,6 +17,8 @@ #include #include #include +#include +#include #include #include #include @@ -679,12 +681,11 @@ bool StorageKafka::streamToViews() // Limit read batch to maximum block size to allow DDL StreamLocalLimits limits; - limits.speed_limits.max_execution_time = kafka_settings->kafka_flush_interval_ms.changed - ? kafka_settings->kafka_flush_interval_ms - : getContext()->getSettingsRef().stream_flush_interval_ms; + Poco::Timespan max_execution_time = kafka_settings->kafka_flush_interval_ms.changed + ? kafka_settings->kafka_flush_interval_ms + : getContext()->getSettingsRef().stream_flush_interval_ms; - limits.timeout_overflow_mode = OverflowMode::BREAK; - source->setLimits(limits); + source->setTimeLimit(max_execution_time); } auto pipe = Pipe::unitePipes(std::move(pipes)); diff --git a/src/Storages/LiveView/LiveViewEventsSource.h b/src/Storages/LiveView/LiveViewEventsSource.h index 1f9f8bfb785..de10a98e1a2 100644 --- a/src/Storages/LiveView/LiveViewEventsSource.h +++ b/src/Storages/LiveView/LiveViewEventsSource.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include #include -#include +#include #include @@ -27,7 +27,7 @@ namespace DB * Keeps stream alive by outputting blocks with no rows * based on period specified by the heartbeat interval. */ -class LiveViewEventsSource : public SourceWithProgress +class LiveViewEventsSource : public ISource { using NonBlockingResult = std::pair; @@ -41,7 +41,7 @@ public: std::shared_ptr active_ptr_, const bool has_limit_, const UInt64 limit_, const UInt64 heartbeat_interval_sec_) - : SourceWithProgress({ColumnWithTypeAndName(ColumnUInt64::create(), std::make_shared(), "version")}), + : ISource({ColumnWithTypeAndName(ColumnUInt64::create(), std::make_shared(), "version")}), storage(std::move(storage_)), blocks_ptr(std::move(blocks_ptr_)), blocks_metadata_ptr(std::move(blocks_metadata_ptr_)), active_ptr(active_ptr_), has_limit(has_limit_), diff --git a/src/Storages/LiveView/LiveViewSource.h b/src/Storages/LiveView/LiveViewSource.h index 8d63890f603..f8b428fc04d 100644 --- a/src/Storages/LiveView/LiveViewSource.h +++ b/src/Storages/LiveView/LiveViewSource.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include namespace DB @@ -11,7 +11,7 @@ namespace DB * Keeps stream alive by outputting blocks with no rows * based on period specified by the heartbeat interval. */ -class LiveViewSource : public SourceWithProgress +class LiveViewSource : public ISource { using NonBlockingResult = std::pair; @@ -23,7 +23,7 @@ public: std::shared_ptr active_ptr_, const bool has_limit_, const UInt64 limit_, const UInt64 heartbeat_interval_sec_) - : SourceWithProgress(storage_->getHeader()) + : ISource(storage_->getHeader()) , storage(std::move(storage_)), blocks_ptr(std::move(blocks_ptr_)), blocks_metadata_ptr(std::move(blocks_metadata_ptr_)), active_ptr(active_ptr_), diff --git a/src/Storages/LiveView/StorageLiveView.cpp b/src/Storages/LiveView/StorageLiveView.cpp index adda446c5a2..75b2f981389 100644 --- a/src/Storages/LiveView/StorageLiveView.cpp +++ b/src/Storages/LiveView/StorageLiveView.cpp @@ -22,6 +22,7 @@ limitations under the License. */ #include #include #include +#include #include #include #include diff --git a/src/Storages/MeiliSearch/SourceMeiliSearch.cpp b/src/Storages/MeiliSearch/SourceMeiliSearch.cpp index bb53ff425ab..8e37e469e96 100644 --- a/src/Storages/MeiliSearch/SourceMeiliSearch.cpp +++ b/src/Storages/MeiliSearch/SourceMeiliSearch.cpp @@ -67,7 +67,7 @@ MeiliSearchSource::MeiliSearchSource( UInt64 max_block_size_, QueryRoute route_, std::unordered_map query_params_) - : SourceWithProgress(sample_block.cloneEmpty()) + : ISource(sample_block.cloneEmpty()) , connection(config) , max_block_size{max_block_size_} , route{route_} diff --git a/src/Storages/MeiliSearch/SourceMeiliSearch.h b/src/Storages/MeiliSearch/SourceMeiliSearch.h index 695982068b6..6ab24e47635 100644 --- a/src/Storages/MeiliSearch/SourceMeiliSearch.h +++ b/src/Storages/MeiliSearch/SourceMeiliSearch.h @@ -5,13 +5,13 @@ #include #include #include -#include +#include #include #include namespace DB { -class MeiliSearchSource final : public SourceWithProgress +class MeiliSearchSource final : public ISource { public: enum QueryRoute diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 8732a3ed3e5..706209e3521 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -422,10 +422,6 @@ void MergeTask::VerticalMergeStage::prepareVerticalMergeForOneColumn() const auto column_part_source = std::make_shared( *global_ctx->data, global_ctx->storage_snapshot, global_ctx->future_part->parts[part_num], column_names, ctx->read_with_direct_io, true); - /// Dereference unique_ptr - column_part_source->setProgressCallback( - MergeProgressCallback(global_ctx->merge_list_element_ptr, global_ctx->watch_prev_elapsed, *global_ctx->column_progress)); - pipes.emplace_back(std::move(column_part_source)); } @@ -436,6 +432,16 @@ void MergeTask::VerticalMergeStage::prepareVerticalMergeForOneColumn() const pipe.addTransform(std::move(transform)); ctx->column_parts_pipeline = QueryPipeline(std::move(pipe)); + + /// Dereference unique_ptr + ctx->column_parts_pipeline.setProgressCallback(MergeProgressCallback( + global_ctx->merge_list_element_ptr, + global_ctx->watch_prev_elapsed, + *global_ctx->column_progress)); + + /// Is calculated inside MergeProgressCallback. + ctx->column_parts_pipeline.disableProfileEventUpdate(); + ctx->executor = std::make_unique(ctx->column_parts_pipeline); ctx->column_to = std::make_unique( @@ -757,10 +763,6 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() auto input = std::make_unique( *global_ctx->data, global_ctx->storage_snapshot, part, global_ctx->merging_column_names, ctx->read_with_direct_io, true); - /// Dereference unique_ptr and pass horizontal_stage_progress by reference - input->setProgressCallback( - MergeProgressCallback(global_ctx->merge_list_element_ptr, global_ctx->watch_prev_elapsed, *global_ctx->horizontal_stage_progress)); - Pipe pipe(std::move(input)); if (global_ctx->metadata_snapshot->hasSortingKey()) @@ -860,6 +862,11 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() } global_ctx->merged_pipeline = QueryPipeline(std::move(res_pipe)); + /// Dereference unique_ptr and pass horizontal_stage_progress by reference + global_ctx->merged_pipeline.setProgressCallback(MergeProgressCallback(global_ctx->merge_list_element_ptr, global_ctx->watch_prev_elapsed, *global_ctx->horizontal_stage_progress)); + /// Is calculated inside MergeProgressCallback. + global_ctx->merged_pipeline.disableProfileEventUpdate(); + global_ctx->merging_executor = std::make_unique(global_ctx->merged_pipeline); } diff --git a/src/Storages/MergeTree/MergeTask.h b/src/Storages/MergeTree/MergeTask.h index efab102bfe6..0a7675b2268 100644 --- a/src/Storages/MergeTree/MergeTask.h +++ b/src/Storages/MergeTree/MergeTask.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #include diff --git a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp index b9158bde6f1..ca5e7393666 100644 --- a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp @@ -39,7 +39,7 @@ MergeTreeBaseSelectProcessor::MergeTreeBaseSelectProcessor( bool use_uncompressed_cache_, const Names & virt_column_names_, std::optional extension_) - : SourceWithProgress(transformHeader(std::move(header), prewhere_info_, storage_.getPartitionValueType(), virt_column_names_)) + : ISource(transformHeader(std::move(header), prewhere_info_, storage_.getPartitionValueType(), virt_column_names_)) , storage(storage_) , storage_snapshot(storage_snapshot_) , prewhere_info(prewhere_info_) @@ -294,7 +294,7 @@ Chunk MergeTreeBaseSelectProcessor::readFromPartImpl() UInt64 num_filtered_rows = read_result.numReadRows() - read_result.num_rows; - progress({ read_result.numReadRows(), read_result.numBytesRead() }); + progress(read_result.numReadRows(), read_result.numBytesRead()); if (task->size_predictor) { diff --git a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h index ae34fb66eba..ac01221ff26 100644 --- a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h @@ -5,7 +5,7 @@ #include #include -#include +#include namespace DB @@ -29,7 +29,7 @@ struct ParallelReadingExtension }; /// Base class for MergeTreeThreadSelectProcessor and MergeTreeSelectProcessor -class MergeTreeBaseSelectProcessor : public SourceWithProgress +class MergeTreeBaseSelectProcessor : public ISource { public: MergeTreeBaseSelectProcessor( diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 1866d65ccfd..f23a6e7834e 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -170,6 +171,7 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read( Pipes pipes; Pipe projection_pipe; Pipe ordinary_pipe; + QueryPlanResourceHolder resources; auto projection_plan = std::make_unique(); if (query_info.projection->desc->is_minmax_count_projection) @@ -216,8 +218,9 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read( projection_plan->addStep(std::move(expression_before_aggregation)); } - projection_pipe = projection_plan->convertToPipe( + auto builder = projection_plan->buildQueryPipeline( QueryPlanOptimizationSettings::fromContext(context), BuildQueryPipelineSettings::fromContext(context)); + projection_pipe = QueryPipelineBuilder::getPipe(std::move(*builder), resources); } if (query_info.projection->merge_tree_normal_select_result_ptr) @@ -246,8 +249,9 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read( ordinary_query_plan.addStep(std::move(where_step)); } - ordinary_pipe = ordinary_query_plan.convertToPipe( + auto builder = ordinary_query_plan.buildQueryPipeline( QueryPlanOptimizationSettings::fromContext(context), BuildQueryPipelineSettings::fromContext(context)); + ordinary_pipe = QueryPipelineBuilder::getPipe(std::move(*builder), resources); } if (query_info.projection->desc->type == ProjectionDescription::Type::Aggregate) @@ -372,7 +376,8 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read( pipe.resize(1); auto step = std::make_unique( std::move(pipe), - fmt::format("MergeTree(with {} projection {})", query_info.projection->desc->type, query_info.projection->desc->name)); + fmt::format("MergeTree(with {} projection {})", query_info.projection->desc->type, query_info.projection->desc->name), + query_info.storage_limits); plan->addStep(std::move(step)); return plan; } diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h index e0647aa1ed2..e01e624a9b7 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h @@ -12,6 +12,7 @@ namespace DB { class KeyCondition; +struct QueryIdHolder; using PartitionIdToMaxBlock = std::unordered_map; diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp index 62fa67235cb..22ef2db2e15 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp @@ -17,7 +17,7 @@ MergeTreeSequentialSource::MergeTreeSequentialSource( bool read_with_direct_io_, bool take_column_types_from_storage, bool quiet) - : SourceWithProgress(storage_snapshot_->getSampleBlockForColumns(columns_to_read_)) + : ISource(storage_snapshot_->getSampleBlockForColumns(columns_to_read_)) , storage(storage_) , storage_snapshot(storage_snapshot_) , data_part(std::move(data_part_)) diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.h b/src/Storages/MergeTree/MergeTreeSequentialSource.h index 962b2035b16..a3e4f5fa856 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.h +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.h @@ -1,5 +1,5 @@ #pragma once -#include +#include #include #include #include @@ -9,7 +9,7 @@ namespace DB { /// Lightweight (in terms of logic) stream for reading single part from MergeTree -class MergeTreeSequentialSource : public SourceWithProgress +class MergeTreeSequentialSource : public ISource { public: MergeTreeSequentialSource( diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index fb8f4ba0518..5df0f6eab68 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -312,7 +312,7 @@ static std::vector getProjectionsForNewDataPart( /// Return set of indices which should be recalculated during mutation also /// wraps input stream into additional expression stream static std::set getIndicesToRecalculate( - QueryPipeline & pipeline, + QueryPipelineBuilder & builder, const NameSet & updated_columns, const StorageMetadataPtr & metadata_snapshot, ContextPtr context, @@ -364,9 +364,9 @@ static std::set getIndicesToRecalculate( } } - if (!indices_to_recalc.empty() && pipeline.initialized()) + if (!indices_to_recalc.empty() && builder.initialized()) { - auto indices_recalc_syntax = TreeRewriter(context).analyze(indices_recalc_expr_list, pipeline.getHeader().getNamesAndTypesList()); + auto indices_recalc_syntax = TreeRewriter(context).analyze(indices_recalc_expr_list, builder.getHeader().getNamesAndTypesList()); auto indices_recalc_expr = ExpressionAnalyzer( indices_recalc_expr_list, indices_recalc_syntax, context).getActions(false); @@ -376,11 +376,8 @@ static std::set getIndicesToRecalculate( /// MutationsInterpreter which knows about skip indices and stream 'in' already has /// all required columns. /// TODO move this logic to single place. - QueryPipelineBuilder builder; - builder.init(std::move(pipeline)); builder.addTransform(std::make_shared(builder.getHeader(), indices_recalc_expr)); builder.addTransform(std::make_shared(builder.getHeader())); - pipeline = QueryPipelineBuilder::getPipeline(std::move(builder)); } return indices_to_recalc; } @@ -636,8 +633,10 @@ struct MutationContext std::unique_ptr num_mutations; + QueryPipelineBuilder mutating_pipeline_builder; QueryPipeline mutating_pipeline; // in std::unique_ptr mutating_executor; + ProgressCallback progress_callback; Block updated_header; std::unique_ptr interpreter; @@ -1078,11 +1077,10 @@ private: auto skip_part_indices = MutationHelpers::getIndicesForNewDataPart(ctx->metadata_snapshot->getSecondaryIndices(), ctx->for_file_renames); ctx->projections_to_build = MutationHelpers::getProjectionsForNewDataPart(ctx->metadata_snapshot->getProjections(), ctx->for_file_renames); - if (!ctx->mutating_pipeline.initialized()) + if (!ctx->mutating_pipeline_builder.initialized()) throw Exception("Cannot mutate part columns with uninitialized mutations stream. It's a bug", ErrorCodes::LOGICAL_ERROR); - QueryPipelineBuilder builder; - builder.init(std::move(ctx->mutating_pipeline)); + QueryPipelineBuilder builder(std::move(ctx->mutating_pipeline_builder)); if (ctx->metadata_snapshot->hasPrimaryKey() || ctx->metadata_snapshot->hasSecondaryIndices()) { @@ -1109,6 +1107,9 @@ private: ctx->txn); ctx->mutating_pipeline = QueryPipelineBuilder::getPipeline(std::move(builder)); + ctx->mutating_pipeline.setProgressCallback(ctx->progress_callback); + /// Is calculated inside MergeProgressCallback. + ctx->mutating_pipeline.disableProfileEventUpdate(); ctx->mutating_executor = std::make_unique(ctx->mutating_pipeline); part_merger_writer_task = std::make_unique(ctx); @@ -1260,10 +1261,9 @@ private: ctx->compression_codec = ctx->source_part->default_codec; - if (ctx->mutating_pipeline.initialized()) + if (ctx->mutating_pipeline_builder.initialized()) { - QueryPipelineBuilder builder; - builder.init(std::move(ctx->mutating_pipeline)); + QueryPipelineBuilder builder(std::move(ctx->mutating_pipeline_builder)); if (ctx->execute_ttl_type == ExecuteTTLType::NORMAL) builder.addTransform(std::make_shared(builder.getHeader(), *ctx->data, ctx->metadata_snapshot, ctx->new_data_part, ctx->time_of_mutation, true)); @@ -1283,6 +1283,9 @@ private: ); ctx->mutating_pipeline = QueryPipelineBuilder::getPipeline(std::move(builder)); + ctx->mutating_pipeline.setProgressCallback(ctx->progress_callback); + /// Is calculated inside MergeProgressCallback. + ctx->mutating_pipeline.disableProfileEventUpdate(); ctx->mutating_executor = std::make_unique(ctx->mutating_pipeline); ctx->projections_to_build = std::vector{ctx->projections_to_recalc.begin(), ctx->projections_to_recalc.end()}; @@ -1450,9 +1453,9 @@ bool MutateTask::prepare() ctx->materialized_indices = ctx->interpreter->grabMaterializedIndices(); ctx->materialized_projections = ctx->interpreter->grabMaterializedProjections(); ctx->mutation_kind = ctx->interpreter->getMutationKind(); - ctx->mutating_pipeline = ctx->interpreter->execute(); + ctx->mutating_pipeline_builder = ctx->interpreter->execute(); ctx->updated_header = ctx->interpreter->getUpdatedHeader(); - ctx->mutating_pipeline.setProgressCallback(MergeProgressCallback((*ctx->mutate_entry)->ptr(), ctx->watch_prev_elapsed, *ctx->stage_progress)); + ctx->progress_callback = MergeProgressCallback((*ctx->mutate_entry)->ptr(), ctx->watch_prev_elapsed, *ctx->stage_progress); } ctx->single_disk_volume = std::make_shared("volume_" + ctx->future_part->name, ctx->space_reservation->getDisk(), 0); @@ -1487,7 +1490,7 @@ bool MutateTask::prepare() ctx->need_sync = needSyncPart(ctx->source_part->rows_count, ctx->source_part->getBytesOnDisk(), *data_settings); ctx->execute_ttl_type = ExecuteTTLType::NONE; - if (ctx->mutating_pipeline.initialized()) + if (ctx->mutating_pipeline_builder.initialized()) ctx->execute_ttl_type = MutationHelpers::shouldExecuteTTL(ctx->metadata_snapshot, ctx->interpreter->getColumnDependencies()); /// All columns from part are changed and may be some more that were missing before in part @@ -1504,7 +1507,7 @@ bool MutateTask::prepare() ctx->updated_columns.emplace(name_type.name); ctx->indices_to_recalc = MutationHelpers::getIndicesToRecalculate( - ctx->mutating_pipeline, ctx->updated_columns, ctx->metadata_snapshot, ctx->context, ctx->materialized_indices, ctx->source_part); + ctx->mutating_pipeline_builder, ctx->updated_columns, ctx->metadata_snapshot, ctx->context, ctx->materialized_indices, ctx->source_part); ctx->projections_to_recalc = MutationHelpers::getProjectionsToRecalculate( ctx->updated_columns, ctx->metadata_snapshot, ctx->materialized_projections, ctx->source_part); diff --git a/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h b/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h index a9bcb353b84..b9b5874b3e6 100644 --- a/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h +++ b/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h @@ -51,7 +51,8 @@ public: return std::make_shared(*this, metadata_snapshot, object_columns); } - Pipe read( + void read( + QueryPlan & query_plan, const Names & column_names, const StorageSnapshotPtr & storage_snapshot, SelectQueryInfo & query_info, @@ -60,7 +61,7 @@ public: size_t max_block_size, unsigned num_streams) override { - QueryPlan query_plan = std::move(*MergeTreeDataSelectExecutor(storage) + query_plan = std::move(*MergeTreeDataSelectExecutor(storage) .readFromParts( parts, column_names, @@ -71,9 +72,6 @@ public: num_streams, nullptr, analysis_result_ptr)); - - return query_plan.convertToPipe( - QueryPlanOptimizationSettings::fromContext(context), BuildQueryPipelineSettings::fromContext(context)); } bool supportsPrewhere() const override { return true; } diff --git a/src/Storages/PartitionedSink.cpp b/src/Storages/PartitionedSink.cpp index fca69916f7f..027e4f1f306 100644 --- a/src/Storages/PartitionedSink.cpp +++ b/src/Storages/PartitionedSink.cpp @@ -13,7 +13,7 @@ #include #include -#include +#include #include diff --git a/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp b/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp index 58f775daf20..61a40c374eb 100644 --- a/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp +++ b/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp @@ -9,6 +9,8 @@ #include #include #include +#include +#include #include diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index fa90295bcd6..c9c9877cc93 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -6,6 +6,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp index a440584d353..190ffabe2c1 100644 --- a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp @@ -19,6 +19,8 @@ #include #include +#include +#include #include #include #include @@ -269,7 +271,8 @@ bool StorageMaterializedPostgreSQL::needRewriteQueryWithFinal(const Names & colu } -Pipe StorageMaterializedPostgreSQL::read( +void StorageMaterializedPostgreSQL::read( + QueryPlan & query_plan, const Names & column_names, const StorageSnapshotPtr & /*storage_snapshot*/, SelectQueryInfo & query_info, @@ -280,14 +283,12 @@ Pipe StorageMaterializedPostgreSQL::read( { auto nested_table = getNested(); - auto pipe = readFinalFromNestedStorage(nested_table, column_names, + readFinalFromNestedStorage(query_plan, nested_table, column_names, query_info, context_, processed_stage, max_block_size, num_streams); auto lock = lockForShare(context_->getCurrentQueryId(), context_->getSettingsRef().lock_acquire_timeout); - pipe.addTableLock(lock); - pipe.addStorageHolder(shared_from_this()); - - return pipe; + query_plan.addTableLock(lock); + query_plan.addStorageHolder(shared_from_this()); } diff --git a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.h b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.h index 6423c6d773f..bb3836a8853 100644 --- a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.h +++ b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.h @@ -90,7 +90,8 @@ public: bool needRewriteQueryWithFinal(const Names & column_names) const override; - Pipe read( + void read( + QueryPlan & query_plan, const Names & column_names, const StorageSnapshotPtr & storage_snapshot, SelectQueryInfo & query_info, diff --git a/src/Storages/ProjectionsDescription.cpp b/src/Storages/ProjectionsDescription.cpp index ad35c5d420b..0f24aecf72a 100644 --- a/src/Storages/ProjectionsDescription.cpp +++ b/src/Storages/ProjectionsDescription.cpp @@ -14,9 +14,11 @@ #include #include #include +#include #include #include #include +#include #include diff --git a/src/Storages/RabbitMQ/RabbitMQSource.cpp b/src/Storages/RabbitMQ/RabbitMQSource.cpp index 4946a3537f9..71d80f0a632 100644 --- a/src/Storages/RabbitMQ/RabbitMQSource.cpp +++ b/src/Storages/RabbitMQ/RabbitMQSource.cpp @@ -52,7 +52,7 @@ RabbitMQSource::RabbitMQSource( const Names & columns, size_t max_block_size_, bool ack_in_suffix_) - : SourceWithProgress(getSampleBlock(headers.first, headers.second)) + : ISource(getSampleBlock(headers.first, headers.second)) , storage(storage_) , storage_snapshot(storage_snapshot_) , context(context_) @@ -106,6 +106,19 @@ Chunk RabbitMQSource::generate() return chunk; } +bool RabbitMQSource::checkTimeLimit() const +{ + if (max_execution_time != 0) + { + auto elapsed_ns = total_stopwatch.elapsed(); + + if (elapsed_ns > static_cast(max_execution_time.totalMicroseconds()) * 1000) + return false; + } + + return true; +} + Chunk RabbitMQSource::generateImpl() { if (!buffer) diff --git a/src/Storages/RabbitMQ/RabbitMQSource.h b/src/Storages/RabbitMQ/RabbitMQSource.h index ff46408db42..bd2882d1938 100644 --- a/src/Storages/RabbitMQ/RabbitMQSource.h +++ b/src/Storages/RabbitMQ/RabbitMQSource.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include #include @@ -8,7 +8,7 @@ namespace DB { -class RabbitMQSource : public SourceWithProgress +class RabbitMQSource : public ISource { public: @@ -32,6 +32,9 @@ public: void updateChannel(); bool sendAck(); + + void setTimeLimit(Poco::Timespan max_execution_time_) { max_execution_time = max_execution_time_; } + private: StorageRabbitMQ & storage; StorageSnapshotPtr storage_snapshot; @@ -46,6 +49,11 @@ private: ConsumerBufferPtr buffer; + Poco::Timespan max_execution_time = 0; + Stopwatch total_stopwatch {CLOCK_MONOTONIC_COARSE}; + + bool checkTimeLimit() const; + RabbitMQSource( StorageRabbitMQ & storage_, const StorageSnapshotPtr & storage_snapshot_, diff --git a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp index 6d0a3f4ab6c..58f08c48c68 100644 --- a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp +++ b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp @@ -3,12 +3,16 @@ #include #include #include +#include #include #include #include #include #include #include +#include +#include +#include #include #include #include @@ -648,10 +652,11 @@ void StorageRabbitMQ::unbindExchange() } -Pipe StorageRabbitMQ::read( +void StorageRabbitMQ::read( + QueryPlan & query_plan, const Names & column_names, const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & /* query_info */, + SelectQueryInfo & query_info, ContextPtr local_context, QueryProcessingStage::Enum /* processed_stage */, size_t /* max_block_size */, @@ -661,7 +666,11 @@ Pipe StorageRabbitMQ::read( throw Exception("RabbitMQ setup not finished. Connection might be lost", ErrorCodes::CANNOT_CONNECT_RABBITMQ); if (num_created_consumers == 0) - return {}; + { + auto header = storage_snapshot->getSampleBlockForColumns(column_names); + InterpreterSelectQuery::addEmptySourceToQueryPlan(query_plan, header, query_info, local_context); + return; + } if (!local_context->getSettingsRef().stream_like_engine_allow_direct_select) throw Exception(ErrorCodes::QUERY_NOT_ALLOWED, "Direct select is not allowed. To enable use setting `stream_like_engine_allow_direct_select`"); @@ -708,9 +717,19 @@ Pipe StorageRabbitMQ::read( startLoop(); LOG_DEBUG(log, "Starting reading {} streams", pipes.size()); - auto united_pipe = Pipe::unitePipes(std::move(pipes)); - united_pipe.addInterpreterContext(modified_context); - return united_pipe; + auto pipe = Pipe::unitePipes(std::move(pipes)); + + if (pipe.empty()) + { + auto header = storage_snapshot->getSampleBlockForColumns(column_names); + InterpreterSelectQuery::addEmptySourceToQueryPlan(query_plan, header, query_info, local_context); + } + else + { + auto read_step = std::make_unique(std::move(pipe), getName(), query_info.storage_limits); + query_plan.addStep(std::move(read_step)); + query_plan.addInterpreterContext(modified_context); + } } @@ -1048,13 +1067,11 @@ bool StorageRabbitMQ::streamToViews() // Limit read batch to maximum block size to allow DDL StreamLocalLimits limits; - limits.speed_limits.max_execution_time = rabbitmq_settings->rabbitmq_flush_interval_ms.changed - ? rabbitmq_settings->rabbitmq_flush_interval_ms - : getContext()->getSettingsRef().stream_flush_interval_ms; + Poco::Timespan max_execution_time = rabbitmq_settings->rabbitmq_flush_interval_ms.changed + ? rabbitmq_settings->rabbitmq_flush_interval_ms + : getContext()->getSettingsRef().stream_flush_interval_ms; - limits.timeout_overflow_mode = OverflowMode::BREAK; - - source->setLimits(limits); + source->setTimeLimit(max_execution_time); } block_io.pipeline.complete(Pipe::unitePipes(std::move(pipes))); diff --git a/src/Storages/RabbitMQ/StorageRabbitMQ.h b/src/Storages/RabbitMQ/StorageRabbitMQ.h index 9190c374f41..455b2fe8f09 100644 --- a/src/Storages/RabbitMQ/StorageRabbitMQ.h +++ b/src/Storages/RabbitMQ/StorageRabbitMQ.h @@ -42,7 +42,8 @@ public: void checkTableCanBeDropped() const override { drop_table = true; } /// Always return virtual columns in addition to required columns - Pipe read( + void read( + QueryPlan & query_plan, const Names & column_names, const StorageSnapshotPtr & storage_snapshot, SelectQueryInfo & query_info, diff --git a/src/Storages/ReadFinalForExternalReplicaStorage.cpp b/src/Storages/ReadFinalForExternalReplicaStorage.cpp index 9b9cc44655e..3ec7a074fd4 100644 --- a/src/Storages/ReadFinalForExternalReplicaStorage.cpp +++ b/src/Storages/ReadFinalForExternalReplicaStorage.cpp @@ -10,7 +10,10 @@ #include #include #include +#include +#include #include +#include namespace DB @@ -24,7 +27,8 @@ bool needRewriteQueryWithFinalForStorage(const Names & column_names, const Stora return std::find(column_names.begin(), column_names.end(), version_column.name) == column_names.end(); } -Pipe readFinalFromNestedStorage( +void readFinalFromNestedStorage( + QueryPlan & query_plan, StoragePtr nested_storage, const Names & column_names, SelectQueryInfo & query_info, @@ -55,23 +59,32 @@ Pipe readFinalFromNestedStorage( } auto nested_snapshot = nested_storage->getStorageSnapshot(nested_metadata, context); - Pipe pipe = nested_storage->read(require_columns_name, nested_snapshot, query_info, context, processed_stage, max_block_size, num_streams); - pipe.addTableLock(lock); - pipe.addStorageHolder(nested_storage); + nested_storage->read(query_plan, require_columns_name, nested_snapshot, query_info, context, processed_stage, max_block_size, num_streams); - if (!expressions->children.empty() && !pipe.empty()) + if (!query_plan.isInitialized()) { - Block pipe_header = pipe.getHeader(); - auto syntax = TreeRewriter(context).analyze(expressions, pipe_header.getNamesAndTypesList()); - ExpressionActionsPtr expression_actions = ExpressionAnalyzer(expressions, syntax, context).getActions(true /* add_aliases */, false /* project_result */); - - pipe.addSimpleTransform([&](const Block & header) - { - return std::make_shared(header, expression_actions, filter_column_name, false); - }); + InterpreterSelectQuery::addEmptySourceToQueryPlan(query_plan, nested_header, query_info, context); + return; } - return pipe; + query_plan.addTableLock(lock); + query_plan.addStorageHolder(nested_storage); + + if (!expressions->children.empty()) + { + const auto & header = query_plan.getCurrentDataStream().header; + auto syntax = TreeRewriter(context).analyze(expressions, header.getNamesAndTypesList()); + auto actions = ExpressionAnalyzer(expressions, syntax, context).getActionsDAG(true /* add_aliases */, false /* project_result */); + + auto step = std::make_unique( + query_plan.getCurrentDataStream(), + actions, + filter_column_name, + false); + + step->setStepDescription("Filter columns"); + query_plan.addStep(std::move(step)); + } } } diff --git a/src/Storages/ReadFinalForExternalReplicaStorage.h b/src/Storages/ReadFinalForExternalReplicaStorage.h index f21b396513f..b922faa7361 100644 --- a/src/Storages/ReadFinalForExternalReplicaStorage.h +++ b/src/Storages/ReadFinalForExternalReplicaStorage.h @@ -13,7 +13,8 @@ namespace DB bool needRewriteQueryWithFinalForStorage(const Names & column_names, const StoragePtr & storage); -Pipe readFinalFromNestedStorage( +void readFinalFromNestedStorage( + QueryPlan & query_plan, StoragePtr nested_storage, const Names & column_names, SelectQueryInfo & query_info, diff --git a/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp b/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp index 64746000363..81cb58e4f5e 100644 --- a/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp +++ b/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp @@ -18,7 +18,7 @@ #include #include -#include +#include #include #include @@ -177,7 +177,7 @@ static std::pair getFilterKeys( } -class EmbeddedRocksDBSource : public SourceWithProgress +class EmbeddedRocksDBSource : public ISource { public: EmbeddedRocksDBSource( @@ -187,7 +187,7 @@ public: FieldVector::const_iterator begin_, FieldVector::const_iterator end_, const size_t max_block_size_) - : SourceWithProgress(header) + : ISource(header) , storage(storage_) , primary_key_pos(header.getPositionByName(storage.getPrimaryKey())) , keys(keys_) @@ -203,7 +203,7 @@ public: const Block & header, std::unique_ptr iterator_, const size_t max_block_size_) - : SourceWithProgress(header) + : ISource(header) , storage(storage_) , primary_key_pos(header.getPositionByName(storage.getPrimaryKey())) , iterator(std::move(iterator_)) diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h index 80194c5573f..bdb4c392c48 100644 --- a/src/Storages/SelectQueryInfo.h +++ b/src/Storages/SelectQueryInfo.h @@ -7,6 +7,7 @@ #include #include #include +#include #include @@ -141,6 +142,8 @@ struct SelectQueryInfoBase ASTPtr view_query; /// Optimized VIEW query ASTPtr original_query; /// Unmodified query for projection analysis + std::shared_ptr storage_limits; + /// Cluster for the query. ClusterPtr cluster; /// Optimized cluster for the query. diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index f54775a8706..4c962f36e4f 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -26,9 +26,8 @@ #include #include #include -#include +#include #include -#include #include #include #include @@ -143,11 +142,11 @@ StorageBuffer::StorageBuffer( /// Reads from one buffer (from one block) under its mutex. -class BufferSource : public SourceWithProgress +class BufferSource : public ISource { public: BufferSource(const Names & column_names_, StorageBuffer::Buffer & buffer_, const StorageSnapshotPtr & storage_snapshot) - : SourceWithProgress(storage_snapshot->getSampleBlockForColumns(column_names_)) + : ISource(storage_snapshot->getSampleBlockForColumns(column_names_)) , column_names_and_types(storage_snapshot->getColumnsByNames( GetColumnsOptions(GetColumnsOptions::All).withSubcolumns(), column_names_)) , buffer(buffer_) {} @@ -209,23 +208,6 @@ QueryProcessingStage::Enum StorageBuffer::getQueryProcessingStage( return QueryProcessingStage::FetchColumns; } - -Pipe StorageBuffer::read( - const Names & column_names, - const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & query_info, - ContextPtr local_context, - QueryProcessingStage::Enum processed_stage, - const size_t max_block_size, - const unsigned num_streams) -{ - QueryPlan plan; - read(plan, column_names, storage_snapshot, query_info, local_context, processed_stage, max_block_size, num_streams); - return plan.convertToPipe( - QueryPlanOptimizationSettings::fromContext(local_context), - BuildQueryPipelineSettings::fromContext(local_context)); -} - void StorageBuffer::read( QueryPlan & query_plan, const Names & column_names, @@ -334,21 +316,8 @@ void StorageBuffer::read( if (query_plan.isInitialized()) { - StreamLocalLimits limits; - SizeLimits leaf_limits; - - /// Add table lock for destination table. - auto adding_limits_and_quota = std::make_unique( - query_plan.getCurrentDataStream(), - destination, - std::move(destination_lock), - limits, - leaf_limits, - nullptr, - nullptr); - - adding_limits_and_quota->setStepDescription("Lock destination table for Buffer"); - query_plan.addStep(std::move(adding_limits_and_quota)); + query_plan.addStorageHolder(destination); + query_plan.addTableLock(std::move(destination_lock)); } } @@ -376,6 +345,7 @@ void StorageBuffer::read( auto interpreter = InterpreterSelectQuery( query_info.query, local_context, std::move(pipe_from_buffers), SelectQueryOptions(processed_stage).ignoreProjections()); + interpreter.addStorageLimits(*query_info.storage_limits); interpreter.buildQueryPlan(buffers_plan); } else @@ -406,6 +376,9 @@ void StorageBuffer::read( }); } + for (const auto & processor : pipe_from_buffers.getProcessors()) + processor->setStorageLimits(query_info.storage_limits); + auto read_from_buffers = std::make_unique(std::move(pipe_from_buffers)); read_from_buffers->setStepDescription("Read from buffers of Buffer table"); buffers_plan.addStep(std::move(read_from_buffers)); diff --git a/src/Storages/StorageBuffer.h b/src/Storages/StorageBuffer.h index 087588dd4dc..200b3fc1838 100644 --- a/src/Storages/StorageBuffer.h +++ b/src/Storages/StorageBuffer.h @@ -74,15 +74,6 @@ public: QueryProcessingStage::Enum getQueryProcessingStage(ContextPtr, QueryProcessingStage::Enum, const StorageSnapshotPtr &, SelectQueryInfo &) const override; - Pipe read( - const Names & column_names, - const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & query_info, - ContextPtr context, - QueryProcessingStage::Enum processed_stage, - size_t max_block_size, - unsigned num_streams) override; - void read( QueryPlan & query_plan, const Names & column_names, diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index a5f20c56c9d..1c785df9be4 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -647,22 +647,6 @@ StorageSnapshotPtr StorageDistributed::getStorageSnapshotForQuery( return std::make_shared(*this, metadata_snapshot, object_columns, std::move(snapshot_data)); } -Pipe StorageDistributed::read( - const Names & column_names, - const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & query_info, - ContextPtr local_context, - QueryProcessingStage::Enum processed_stage, - const size_t max_block_size, - const unsigned num_streams) -{ - QueryPlan plan; - read(plan, column_names, storage_snapshot, query_info, local_context, processed_stage, max_block_size, num_streams); - return plan.convertToPipe( - QueryPlanOptimizationSettings::fromContext(local_context), - BuildQueryPipelineSettings::fromContext(local_context)); -} - void StorageDistributed::read( QueryPlan & query_plan, const Names &, @@ -777,8 +761,10 @@ SinkToStoragePtr StorageDistributed::write(const ASTPtr &, const StorageMetadata } -QueryPipelineBuilderPtr StorageDistributed::distributedWrite(const ASTInsertQuery & query, ContextPtr local_context) +std::optional StorageDistributed::distributedWrite(const ASTInsertQuery & query, ContextPtr local_context) { + QueryPipeline pipeline; + const Settings & settings = local_context->getSettingsRef(); if (settings.max_distributed_depth && local_context->getClientInfo().distributed_depth >= settings.max_distributed_depth) throw Exception("Maximum distributed depth exceeded", ErrorCodes::TOO_LARGE_DISTRIBUTED_DEPTH); @@ -843,7 +829,7 @@ QueryPipelineBuilderPtr StorageDistributed::distributedWrite(const ASTInsertQuer getClusterName(), dst_addresses.size()); } - return nullptr; + return {}; } if (settings.parallel_distributed_insert_select == PARALLEL_DISTRIBUTED_INSERT_SELECT_ALL) @@ -856,8 +842,6 @@ QueryPipelineBuilderPtr StorageDistributed::distributedWrite(const ASTInsertQuer const auto & cluster = getCluster(); const auto & shards_info = cluster->getShardsInfo(); - std::vector> pipelines; - String new_query_str; { WriteBufferFromOwnString buf; @@ -876,8 +860,7 @@ QueryPipelineBuilderPtr StorageDistributed::distributedWrite(const ASTInsertQuer if (shard_info.isLocal()) { InterpreterInsertQuery interpreter(new_query, query_context); - pipelines.emplace_back(std::make_unique()); - pipelines.back()->init(interpreter.execute().pipeline); + pipeline.addCompletedPipeline(interpreter.execute().pipeline); } else { @@ -890,16 +873,14 @@ QueryPipelineBuilderPtr StorageDistributed::distributedWrite(const ASTInsertQuer /// INSERT SELECT query returns empty block auto remote_query_executor = std::make_shared(shard_info.pool, std::move(connections), new_query_str, Block{}, query_context); - pipelines.emplace_back(std::make_unique()); - pipelines.back()->init(Pipe(std::make_shared(remote_query_executor, false, settings.async_socket_for_remote))); - pipelines.back()->setSinks([](const Block & header, QueryPipelineBuilder::StreamType) -> ProcessorPtr - { - return std::make_shared(header); - }); + QueryPipeline remote_pipeline(std::make_shared(remote_query_executor, false, settings.async_socket_for_remote)); + remote_pipeline.complete(std::make_shared(remote_query_executor->getHeader())); + + pipeline.addCompletedPipeline(std::move(remote_pipeline)); } } - return std::make_unique(QueryPipelineBuilder::unitePipelines(std::move(pipelines))); + return pipeline; } diff --git a/src/Storages/StorageDistributed.h b/src/Storages/StorageDistributed.h index 58fa197c940..7cb25ae46ab 100644 --- a/src/Storages/StorageDistributed.h +++ b/src/Storages/StorageDistributed.h @@ -104,15 +104,6 @@ public: QueryProcessingStage::Enum getQueryProcessingStage(ContextPtr, QueryProcessingStage::Enum, const StorageSnapshotPtr &, SelectQueryInfo &) const override; - Pipe read( - const Names & column_names, - const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & query_info, - ContextPtr context, - QueryProcessingStage::Enum processed_stage, - size_t max_block_size, - unsigned num_streams) override; - void read( QueryPlan & query_plan, const Names & column_names, @@ -128,7 +119,7 @@ public: SinkToStoragePtr write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, ContextPtr context) override; - QueryPipelineBuilderPtr distributedWrite(const ASTInsertQuery & query, ContextPtr context) override; + std::optional distributedWrite(const ASTInsertQuery & query, ContextPtr context) override; /// Removes temporary data in local filesystem. void truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPtr, TableExclusiveLockHolder &) override; diff --git a/src/Storages/StorageExecutable.cpp b/src/Storages/StorageExecutable.cpp index 9638e5186f9..e0cbdbe98af 100644 --- a/src/Storages/StorageExecutable.cpp +++ b/src/Storages/StorageExecutable.cpp @@ -14,6 +14,7 @@ #include #include +#include #include #include #include @@ -101,10 +102,11 @@ StorageExecutable::StorageExecutable( coordinator = std::make_unique(std::move(configuration)); } -Pipe StorageExecutable::read( - const Names & /*column_names*/, +void StorageExecutable::read( + QueryPlan & query_plan, + const Names & column_names, const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & /*query_info*/, + SelectQueryInfo & query_info, ContextPtr context, QueryProcessingStage::Enum /*processed_stage*/, size_t max_block_size, @@ -134,12 +136,14 @@ Pipe StorageExecutable::read( user_scripts_path); Pipes inputs; + QueryPlanResourceHolder resources; inputs.reserve(input_queries.size()); for (auto & input_query : input_queries) { InterpreterSelectWithUnionQuery interpreter(input_query, context, {}); - inputs.emplace_back(QueryPipelineBuilder::getPipe(interpreter.buildQueryPipeline())); + auto builder = interpreter.buildQueryPipeline(); + inputs.emplace_back(QueryPipelineBuilder::getPipe(std::move(builder), resources)); } /// For executable pool we read data from input streams and convert it to single blocks streams. @@ -157,7 +161,9 @@ Pipe StorageExecutable::read( configuration.read_number_of_rows_from_process_output = true; } - return coordinator->createPipe(script_path, settings.script_arguments, std::move(inputs), std::move(sample_block), context, configuration); + auto pipe = coordinator->createPipe(script_path, settings.script_arguments, std::move(inputs), std::move(sample_block), context, configuration); + IStorage::readFromPipe(query_plan, std::move(pipe), column_names, storage_snapshot, query_info, context, getName()); + query_plan.addResources(std::move(resources)); } void registerStorageExecutable(StorageFactory & factory) diff --git a/src/Storages/StorageExecutable.h b/src/Storages/StorageExecutable.h index 51daeb83d4c..2638474082a 100644 --- a/src/Storages/StorageExecutable.h +++ b/src/Storages/StorageExecutable.h @@ -33,7 +33,8 @@ public: return "Executable"; } - Pipe read( + void read( + QueryPlan & query_plan, const Names & column_names, const StorageSnapshotPtr & /*storage_snapshot*/, SelectQueryInfo & query_info, diff --git a/src/Storages/StorageExternalDistributed.cpp b/src/Storages/StorageExternalDistributed.cpp index 5b72e50512b..181cf0ca183 100644 --- a/src/Storages/StorageExternalDistributed.cpp +++ b/src/Storages/StorageExternalDistributed.cpp @@ -5,10 +5,12 @@ #include #include #include +#include #include #include #include #include +#include #include #include #include @@ -16,6 +18,7 @@ #include #include #include +#include namespace DB @@ -170,7 +173,8 @@ StorageExternalDistributed::StorageExternalDistributed( } -Pipe StorageExternalDistributed::read( +void StorageExternalDistributed::read( + QueryPlan & query_plan, const Names & column_names, const StorageSnapshotPtr & storage_snapshot, SelectQueryInfo & query_info, @@ -179,10 +183,12 @@ Pipe StorageExternalDistributed::read( size_t max_block_size, unsigned num_streams) { - Pipes pipes; + std::vector> plans; for (const auto & shard : shards) { - pipes.emplace_back(shard->read( + plans.emplace_back(std::make_unique()); + shard->read( + *plans.back(), column_names, storage_snapshot, query_info, @@ -190,10 +196,28 @@ Pipe StorageExternalDistributed::read( processed_stage, max_block_size, num_streams - )); + ); } - return Pipe::unitePipes(std::move(pipes)); + if (plans.empty()) + { + auto header = storage_snapshot->getSampleBlockForColumns(column_names); + InterpreterSelectQuery::addEmptySourceToQueryPlan(query_plan, header, query_info, context); + } + + if (plans.size() == 1) + { + query_plan = std::move(*plans.front()); + return; + } + + DataStreams input_streams; + input_streams.reserve(plans.size()); + for (auto & plan : plans) + input_streams.emplace_back(plan->getCurrentDataStream()); + + auto union_step = std::make_unique(std::move(input_streams)); + query_plan.unitePlans(std::move(union_step), std::move(plans)); } diff --git a/src/Storages/StorageExternalDistributed.h b/src/Storages/StorageExternalDistributed.h index aca5a743be7..1fb67e4e96f 100644 --- a/src/Storages/StorageExternalDistributed.h +++ b/src/Storages/StorageExternalDistributed.h @@ -47,7 +47,8 @@ public: std::string getName() const override { return "ExternalDistributed"; } - Pipe read( + void read( + QueryPlan & query_plan, const Names & column_names, const StorageSnapshotPtr & storage_snapshot, SelectQueryInfo & query_info, diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 194c1f4fd2d..d466096c8ba 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -36,12 +36,13 @@ #include #include #include -#include +#include #include #include #include #include #include +#include #include @@ -424,7 +425,7 @@ static std::chrono::seconds getLockTimeout(ContextPtr context) using StorageFilePtr = std::shared_ptr; -class StorageFileSource : public SourceWithProgress +class StorageFileSource : public ISource { public: struct FilesInfo @@ -481,7 +482,7 @@ public: FilesInfoPtr files_info_, ColumnsDescription columns_description_, std::unique_ptr read_buf_) - : SourceWithProgress(getBlockForSource(storage_, storage_snapshot_, columns_description_, files_info_)) + : ISource(getBlockForSource(storage_, storage_snapshot_, columns_description_, files_info_)) , storage(std::move(storage_)) , storage_snapshot(storage_snapshot_) , files_info(std::move(files_info_)) @@ -812,7 +813,9 @@ public: void onException() override { - write_buf->finalize(); + if (!writer) + return; + onFinish(); } void onFinish() override diff --git a/src/Storages/StorageGenerateRandom.cpp b/src/Storages/StorageGenerateRandom.cpp index ae452bbf146..fa0baea40cd 100644 --- a/src/Storages/StorageGenerateRandom.cpp +++ b/src/Storages/StorageGenerateRandom.cpp @@ -376,11 +376,11 @@ ColumnPtr fillColumnWithRandomData( } -class GenerateSource : public SourceWithProgress +class GenerateSource : public ISource { public: GenerateSource(UInt64 block_size_, UInt64 max_array_length_, UInt64 max_string_length_, UInt64 random_seed_, Block block_header_, ContextPtr context_) - : SourceWithProgress(Nested::flatten(prepareBlockToFill(block_header_))) + : ISource(Nested::flatten(prepareBlockToFill(block_header_))) , block_size(block_size_), max_array_length(max_array_length_), max_string_length(max_string_length_) , block_to_fill(std::move(block_header_)), rng(random_seed_), context(context_) {} diff --git a/src/Storages/StorageInput.cpp b/src/Storages/StorageInput.cpp index a21a14cc240..4729d0a5bf8 100644 --- a/src/Storages/StorageInput.cpp +++ b/src/Storages/StorageInput.cpp @@ -4,7 +4,7 @@ #include #include -#include +#include #include @@ -25,10 +25,10 @@ StorageInput::StorageInput(const StorageID & table_id, const ColumnsDescription } -class StorageInputSource : public SourceWithProgress, WithContext +class StorageInputSource : public ISource, WithContext { public: - StorageInputSource(ContextPtr context_, Block sample_block) : SourceWithProgress(std::move(sample_block)), WithContext(context_) {} + StorageInputSource(ContextPtr context_, Block sample_block) : ISource(std::move(sample_block)), WithContext(context_) {} Chunk generate() override { diff --git a/src/Storages/StorageJoin.cpp b/src/Storages/StorageJoin.cpp index 020887dff35..5e161fc2e6a 100644 --- a/src/Storages/StorageJoin.cpp +++ b/src/Storages/StorageJoin.cpp @@ -17,8 +17,9 @@ #include #include -#include +#include #include +#include #include #include /// toLower @@ -123,7 +124,7 @@ void StorageJoin::mutate(const MutationCommands & commands, ContextPtr context) { auto storage_ptr = DatabaseCatalog::instance().getTable(getStorageID(), context); auto interpreter = std::make_unique(storage_ptr, metadata_snapshot, commands, context, true); - auto pipeline = interpreter->execute(); + auto pipeline = QueryPipelineBuilder::getPipeline(interpreter->execute()); PullingPipelineExecutor executor(pipeline); Block block; @@ -377,11 +378,11 @@ size_t rawSize(const StringRef & t) return t.size; } -class JoinSource : public SourceWithProgress +class JoinSource : public ISource { public: JoinSource(HashJoinPtr join_, TableLockHolder lock_holder_, UInt64 max_block_size_, Block sample_block_) - : SourceWithProgress(sample_block_) + : ISource(sample_block_) , join(join_) , lock_holder(lock_holder_) , max_block_size(max_block_size_) diff --git a/src/Storages/StorageLog.cpp b/src/Storages/StorageLog.cpp index 87a93163281..7b18b5fc7c6 100644 --- a/src/Storages/StorageLog.cpp +++ b/src/Storages/StorageLog.cpp @@ -21,7 +21,7 @@ #include #include "StorageLogSettings.h" #include -#include +#include #include #include @@ -55,7 +55,7 @@ namespace ErrorCodes /// NOTE: The lock `StorageLog::rwlock` is NOT kept locked while reading, /// because we read ranges of data that do not change. -class LogSource final : public SourceWithProgress +class LogSource final : public ISource { public: static Block getHeader(const NamesAndTypesList & columns) @@ -77,7 +77,7 @@ public: const std::vector & file_sizes_, bool limited_by_file_sizes_, ReadSettings read_settings_) - : SourceWithProgress(getHeader(columns_)) + : ISource(getHeader(columns_)) , block_size(block_size_) , columns(columns_) , storage(storage_) diff --git a/src/Storages/StorageMaterializedMySQL.cpp b/src/Storages/StorageMaterializedMySQL.cpp index dbc0dd9ae92..a7e54960563 100644 --- a/src/Storages/StorageMaterializedMySQL.cpp +++ b/src/Storages/StorageMaterializedMySQL.cpp @@ -32,7 +32,8 @@ bool StorageMaterializedMySQL::needRewriteQueryWithFinal(const Names & column_na return needRewriteQueryWithFinalForStorage(column_names, nested_storage); } -Pipe StorageMaterializedMySQL::read( +void StorageMaterializedMySQL::read( + QueryPlan & query_plan, const Names & column_names, const StorageSnapshotPtr & /*storage_snapshot*/, SelectQueryInfo & query_info, @@ -44,7 +45,7 @@ Pipe StorageMaterializedMySQL::read( if (const auto * db = typeid_cast(database)) db->rethrowExceptionIfNeeded(); - return readFinalFromNestedStorage(nested_storage, column_names, + readFinalFromNestedStorage(query_plan, nested_storage, column_names, query_info, context, processed_stage, max_block_size, num_streams); } diff --git a/src/Storages/StorageMaterializedMySQL.h b/src/Storages/StorageMaterializedMySQL.h index 5ce1e8d0a93..18375f3915e 100644 --- a/src/Storages/StorageMaterializedMySQL.h +++ b/src/Storages/StorageMaterializedMySQL.h @@ -23,8 +23,8 @@ public: bool needRewriteQueryWithFinal(const Names & column_names) const override; - Pipe read( - const Names & column_names, const StorageSnapshotPtr & metadata_snapshot, SelectQueryInfo & query_info, + void read( + QueryPlan & query_plan, const Names & column_names, const StorageSnapshotPtr & metadata_snapshot, SelectQueryInfo & query_info, ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, unsigned num_streams) override; SinkToStoragePtr write(const ASTPtr &, const StorageMetadataPtr &, ContextPtr) override { throwNotAllowed(); } diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index 99cc8a284b8..b32f77d825b 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -18,8 +18,8 @@ #include #include +#include #include -#include #include #include #include @@ -143,22 +143,6 @@ QueryProcessingStage::Enum StorageMaterializedView::getQueryProcessingStage( return getTargetTable()->getQueryProcessingStage(local_context, to_stage, getTargetTable()->getStorageSnapshot(target_metadata, local_context), query_info); } -Pipe StorageMaterializedView::read( - const Names & column_names, - const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & query_info, - ContextPtr local_context, - QueryProcessingStage::Enum processed_stage, - const size_t max_block_size, - const unsigned num_streams) -{ - QueryPlan plan; - read(plan, column_names, storage_snapshot, query_info, local_context, processed_stage, max_block_size, num_streams); - return plan.convertToPipe( - QueryPlanOptimizationSettings::fromContext(local_context), - BuildQueryPipelineSettings::fromContext(local_context)); -} - void StorageMaterializedView::read( QueryPlan & query_plan, const Names & column_names, @@ -204,21 +188,8 @@ void StorageMaterializedView::read( query_plan.addStep(std::move(converting_step)); } - StreamLocalLimits limits; - SizeLimits leaf_limits; - - /// Add table lock for destination table. - auto adding_limits_and_quota = std::make_unique( - query_plan.getCurrentDataStream(), - storage, - std::move(lock), - limits, - leaf_limits, - nullptr, - nullptr); - - adding_limits_and_quota->setStepDescription("Lock destination table for MaterializedView"); - query_plan.addStep(std::move(adding_limits_and_quota)); + query_plan.addStorageHolder(storage); + query_plan.addTableLock(std::move(lock)); } } diff --git a/src/Storages/StorageMaterializedView.h b/src/Storages/StorageMaterializedView.h index 001bf39f10f..e7c01297f67 100644 --- a/src/Storages/StorageMaterializedView.h +++ b/src/Storages/StorageMaterializedView.h @@ -83,15 +83,6 @@ public: ActionLock getActionLock(StorageActionBlockType type) override; - Pipe read( - const Names & column_names, - const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & query_info, - ContextPtr context, - QueryProcessingStage::Enum processed_stage, - size_t max_block_size, - unsigned num_streams) override; - void read( QueryPlan & query_plan, const Names & column_names, diff --git a/src/Storages/StorageMemory.cpp b/src/Storages/StorageMemory.cpp index 62b404ef5f5..1e032f78635 100644 --- a/src/Storages/StorageMemory.cpp +++ b/src/Storages/StorageMemory.cpp @@ -12,8 +12,9 @@ #include #include -#include +#include #include +#include #include #include #include @@ -40,7 +41,7 @@ namespace ErrorCodes } -class MemorySource : public SourceWithProgress +class MemorySource : public ISource { using InitializerFunc = std::function &)>; public: @@ -51,7 +52,7 @@ public: std::shared_ptr data_, std::shared_ptr> parallel_execution_index_, InitializerFunc initializer_func_ = {}) - : SourceWithProgress(storage_snapshot->getSampleBlockForColumns(column_names_)) + : ISource(storage_snapshot->getSampleBlockForColumns(column_names_)) , column_names_and_types(storage_snapshot->getColumnsByNames( GetColumnsOptions(GetColumnsOptions::All).withSubcolumns().withExtendedObjects(), column_names_)) , data(data_) @@ -315,7 +316,7 @@ void StorageMemory::mutate(const MutationCommands & commands, ContextPtr context new_context->setSetting("max_threads", 1); auto interpreter = std::make_unique(storage_ptr, metadata_snapshot, commands, new_context, true); - auto pipeline = interpreter->execute(); + auto pipeline = QueryPipelineBuilder::getPipeline(interpreter->execute()); PullingPipelineExecutor executor(pipeline); Blocks out; diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 95761ebacea..f6d7e8e7afd 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -22,6 +22,8 @@ #include #include #include +#include "Processors/QueryPlan/BuildQueryPipelineSettings.h" +#include "Processors/QueryPlan/Optimizations/QueryPlanOptimizationSettings.h" #include #include #include @@ -29,6 +31,8 @@ #include #include #include +#include +#include namespace DB @@ -234,7 +238,8 @@ SelectQueryInfo StorageMerge::getModifiedQueryInfo( } -Pipe StorageMerge::read( +void StorageMerge::read( + QueryPlan & query_plan, const Names & column_names, const StorageSnapshotPtr & storage_snapshot, SelectQueryInfo & query_info, @@ -275,11 +280,16 @@ Pipe StorageMerge::read( StorageListWithLocks selected_tables = getSelectedTables(local_context, query_info.query, has_database_virtual_column, has_table_virtual_column); + query_plan.addInterpreterContext(modified_context); + + QueryPlanResourceHolder resources; + if (selected_tables.empty()) { auto modified_query_info = getModifiedQueryInfo(query_info, modified_context, getStorageID(), false); /// FIXME: do we support sampling in this case? - return createSources( + auto pipe = createSources( + resources, {}, modified_query_info, processed_stage, @@ -292,6 +302,9 @@ Pipe StorageMerge::read( 0, has_database_virtual_column, has_table_virtual_column); + + IStorage::readFromPipe(query_plan, std::move(pipe), column_names, storage_snapshot, query_info, local_context, getName()); + return; } size_t tables_count = selected_tables.size(); @@ -387,6 +400,7 @@ Pipe StorageMerge::read( } auto source_pipe = createSources( + resources, nested_storage_snaphsot, modified_query_info, processed_stage, @@ -400,7 +414,13 @@ Pipe StorageMerge::read( has_database_virtual_column, has_table_virtual_column); - pipes.emplace_back(std::move(source_pipe)); + if (!source_pipe.empty()) + { + query_plan.addStorageHolder(std::get<1>(table)); + query_plan.addTableLock(std::get<2>(table)); + + pipes.emplace_back(std::move(source_pipe)); + } } auto pipe = Pipe::unitePipes(std::move(pipes)); @@ -411,10 +431,12 @@ Pipe StorageMerge::read( // because narrowPipe doesn't preserve order. narrowPipe(pipe, num_streams); - return pipe; + IStorage::readFromPipe(query_plan, std::move(pipe), column_names, storage_snapshot, query_info, local_context, getName()); + query_plan.addResources(std::move(resources)); } Pipe StorageMerge::createSources( + QueryPlanResourceHolder & resources, const StorageSnapshotPtr & storage_snapshot, SelectQueryInfo & modified_query_info, const QueryProcessingStage::Enum & processed_stage, @@ -429,19 +451,20 @@ Pipe StorageMerge::createSources( bool has_table_virtual_column, bool concat_streams) { - const auto & [database_name, storage, struct_lock, table_name] = storage_with_lock; + const auto & [database_name, storage, _, table_name] = storage_with_lock; auto & modified_select = modified_query_info.query->as(); Pipe pipe; if (!storage) { - pipe = QueryPipelineBuilder::getPipe(InterpreterSelectQuery( + auto builder = InterpreterSelectQuery( modified_query_info.query, modified_context, Pipe(std::make_shared(header)), - SelectQueryOptions(processed_stage).analyze()).buildQueryPipeline()); + SelectQueryOptions(processed_stage).analyze()).buildQueryPipeline(); + + pipe = QueryPipelineBuilder::getPipe(std::move(builder), resources); - pipe.addInterpreterContext(modified_context); return pipe; } @@ -460,7 +483,9 @@ Pipe StorageMerge::createSources( if (real_column_names.empty()) real_column_names.push_back(ExpressionActions::getSmallestColumn(storage_snapshot->metadata->getColumns().getAllPhysical())); - pipe = storage->read( + QueryPlan plan; + storage->read( + plan, real_column_names, storage_snapshot, modified_query_info, @@ -468,6 +493,15 @@ Pipe StorageMerge::createSources( processed_stage, max_block_size, UInt32(streams_num)); + + if (!plan.isInitialized()) + return {}; + + auto builder = plan.buildQueryPipeline( + QueryPlanOptimizationSettings::fromContext(modified_context), + BuildQueryPipelineSettings::fromContext(modified_context)); + + pipe = QueryPipelineBuilder::getPipe(std::move(*builder), resources); } else if (processed_stage > storage_stage) { @@ -482,7 +516,7 @@ Pipe StorageMerge::createSources( modified_query_info.query, modified_context, SelectQueryOptions(processed_stage).ignoreProjections()}; - pipe = QueryPipelineBuilder::getPipe(interpreter.buildQueryPipeline()); + pipe = QueryPipelineBuilder::getPipe(interpreter.buildQueryPipeline(), resources); /** Materialization is needed, since from distributed storage the constants come materialized. * If you do not do this, different types (Const and non-Const) columns will be produced in different threads, @@ -543,10 +577,6 @@ Pipe StorageMerge::createSources( /// Subordinary tables could have different but convertible types, like numeric types of different width. /// We must return streams with structure equals to structure of Merge table. convertingSourceStream(header, storage_snapshot->metadata, aliases, modified_context, modified_query_info.query, pipe, processed_stage); - - pipe.addTableLock(struct_lock); - pipe.addStorageHolder(storage); - pipe.addInterpreterContext(modified_context); } return pipe; diff --git a/src/Storages/StorageMerge.h b/src/Storages/StorageMerge.h index b4358177c9f..f6dae239e79 100644 --- a/src/Storages/StorageMerge.h +++ b/src/Storages/StorageMerge.h @@ -7,6 +7,8 @@ namespace DB { +struct QueryPlanResourceHolder; + /** A table that represents the union of an arbitrary number of other tables. * All tables must have the same structure. */ @@ -49,7 +51,8 @@ public: QueryProcessingStage::Enum getQueryProcessingStage(ContextPtr, QueryProcessingStage::Enum, const StorageSnapshotPtr &, SelectQueryInfo &) const override; - Pipe read( + void read( + QueryPlan & query_plan, const Names & column_names, const StorageSnapshotPtr & storage_snapshot, SelectQueryInfo & query_info, @@ -113,6 +116,7 @@ protected: using Aliases = std::vector; Pipe createSources( + QueryPlanResourceHolder & resources, const StorageSnapshotPtr & storage_snapshot, SelectQueryInfo & query_info, const QueryProcessingStage::Enum & processed_stage, diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 81b61909228..e92e5587928 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -233,22 +233,6 @@ void StorageMergeTree::read( query_plan = std::move(*plan); } -Pipe StorageMergeTree::read( - const Names & column_names, - const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & query_info, - ContextPtr local_context, - QueryProcessingStage::Enum processed_stage, - const size_t max_block_size, - const unsigned num_streams) -{ - QueryPlan plan; - read(plan, column_names, storage_snapshot, query_info, local_context, processed_stage, max_block_size, num_streams); - return plan.convertToPipe( - QueryPlanOptimizationSettings::fromContext(local_context), - BuildQueryPipelineSettings::fromContext(local_context)); -} - std::optional StorageMergeTree::totalRows(const Settings &) const { return getTotalActiveSizeInRows(); diff --git a/src/Storages/StorageMergeTree.h b/src/Storages/StorageMergeTree.h index 57770f8d1c3..d0875d2c969 100644 --- a/src/Storages/StorageMergeTree.h +++ b/src/Storages/StorageMergeTree.h @@ -58,15 +58,6 @@ public: bool supportsTransactions() const override { return true; } - Pipe read( - const Names & column_names, - const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & query_info, - ContextPtr context, - QueryProcessingStage::Enum processed_stage, - size_t max_block_size, - unsigned num_streams) override; - void read( QueryPlan & query_plan, const Names & column_names, diff --git a/src/Storages/StoragePostgreSQL.cpp b/src/Storages/StoragePostgreSQL.cpp index 69b6339881b..5b57384c1dd 100644 --- a/src/Storages/StoragePostgreSQL.cpp +++ b/src/Storages/StoragePostgreSQL.cpp @@ -36,6 +36,8 @@ #include +#include + #include #include diff --git a/src/Storages/StorageProxy.h b/src/Storages/StorageProxy.h index b1eb190bd1d..0fabff59db4 100644 --- a/src/Storages/StorageProxy.h +++ b/src/Storages/StorageProxy.h @@ -55,7 +55,8 @@ public: return getNested()->watch(column_names, query_info, context, processed_stage, max_block_size, num_streams); } - Pipe read( + void read( + QueryPlan & query_plan, const Names & column_names, const StorageSnapshotPtr & storage_snapshot, SelectQueryInfo & query_info, @@ -64,7 +65,7 @@ public: size_t max_block_size, unsigned num_streams) override { - return getNested()->read(column_names, storage_snapshot, query_info, context, processed_stage, max_block_size, num_streams); + return getNested()->read(query_plan, column_names, storage_snapshot, query_info, context, processed_stage, max_block_size, num_streams); } SinkToStoragePtr write(const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, ContextPtr context) override diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 07eb8b18765..f10838c8148 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -4397,23 +4397,6 @@ void StorageReplicatedMergeTree::read( } } -Pipe StorageReplicatedMergeTree::read( - const Names & column_names, - const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & query_info, - ContextPtr local_context, - QueryProcessingStage::Enum processed_stage, - const size_t max_block_size, - const unsigned num_streams) -{ - QueryPlan plan; - read(plan, column_names, storage_snapshot, query_info, local_context, processed_stage, max_block_size, num_streams); - return plan.convertToPipe( - QueryPlanOptimizationSettings::fromContext(local_context), - BuildQueryPipelineSettings::fromContext(local_context)); -} - - template void StorageReplicatedMergeTree::foreachActiveParts(Func && func, bool select_sequential_consistency) const { diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index ce240b4e59e..af77126ee59 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -117,15 +117,6 @@ public: bool supportsReplication() const override { return true; } bool supportsDeduplication() const override { return true; } - Pipe read( - const Names & column_names, - const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & query_info, - ContextPtr context, - QueryProcessingStage::Enum processed_stage, - size_t max_block_size, - unsigned num_streams) override; - void read( QueryPlan & query_plan, const Names & column_names, diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index 393ea0e24ff..f524a405c9b 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -54,7 +54,7 @@ #include #include -#include +#include #include #include #include @@ -397,7 +397,7 @@ StorageS3Source::StorageS3Source( const String & version_id_, std::shared_ptr file_iterator_, const size_t download_thread_num_) - : SourceWithProgress(getHeader(sample_block_, requested_virtual_columns_)) + : ISource(getHeader(sample_block_, requested_virtual_columns_)) , WithContext(context_) , name(std::move(name_)) , bucket(bucket_) @@ -603,7 +603,9 @@ public: void onException() override { - write_buf->finalize(); + if (!writer) + return; + onFinish(); } void onFinish() override diff --git a/src/Storages/StorageS3.h b/src/Storages/StorageS3.h index b246de18bfb..c0e5b80c709 100644 --- a/src/Storages/StorageS3.h +++ b/src/Storages/StorageS3.h @@ -11,7 +11,7 @@ #include #include -#include +#include #include #include #include @@ -29,7 +29,7 @@ namespace DB class PullingPipelineExecutor; class StorageS3SequentialSource; -class StorageS3Source : public SourceWithProgress, WithContext +class StorageS3Source : public ISource, WithContext { public: class DisclosedGlobIterator diff --git a/src/Storages/StorageS3Cluster.cpp b/src/Storages/StorageS3Cluster.cpp index 13be199bd37..297f806d086 100644 --- a/src/Storages/StorageS3Cluster.cpp +++ b/src/Storages/StorageS3Cluster.cpp @@ -23,7 +23,7 @@ #include #include #include -#include "Processors/Sources/SourceWithProgress.h" +#include "Processors/ISource.h" #include #include #include diff --git a/src/Storages/StorageSQLite.cpp b/src/Storages/StorageSQLite.cpp index 9b203967edd..1eb473af80d 100644 --- a/src/Storages/StorageSQLite.cpp +++ b/src/Storages/StorageSQLite.cpp @@ -16,6 +16,7 @@ #include #include #include +#include #include diff --git a/src/Storages/StorageStripeLog.cpp b/src/Storages/StorageStripeLog.cpp index 323d382cf2b..d66ac088a08 100644 --- a/src/Storages/StorageStripeLog.cpp +++ b/src/Storages/StorageStripeLog.cpp @@ -30,7 +30,7 @@ #include #include #include "StorageLogSettings.h" -#include +#include #include #include #include @@ -60,7 +60,7 @@ namespace ErrorCodes /// NOTE: The lock `StorageStripeLog::rwlock` is NOT kept locked while reading, /// because we read ranges of data that do not change. -class StripeLogSource final : public SourceWithProgress +class StripeLogSource final : public ISource { public: static Block getHeader( @@ -94,7 +94,7 @@ public: IndexForNativeFormat::Blocks::const_iterator index_begin_, IndexForNativeFormat::Blocks::const_iterator index_end_, size_t file_size_) - : SourceWithProgress(getHeader(storage_snapshot_, column_names, index_begin_, index_end_)) + : ISource(getHeader(storage_snapshot_, column_names, index_begin_, index_end_)) , storage(storage_) , storage_snapshot(storage_snapshot_) , read_settings(std::move(read_settings_)) diff --git a/src/Storages/StorageTableFunction.h b/src/Storages/StorageTableFunction.h index 8bc1b160e77..2a4bfdf304b 100644 --- a/src/Storages/StorageTableFunction.h +++ b/src/Storages/StorageTableFunction.h @@ -5,6 +5,8 @@ #include #include #include +#include +#include #include #include @@ -91,7 +93,8 @@ public: nested->drop(); } - Pipe read( + void read( + QueryPlan & query_plan, const Names & column_names, const StorageSnapshotPtr & storage_snapshot, SelectQueryInfo & query_info, @@ -105,27 +108,26 @@ public: cnames += c + " "; auto storage = getNested(); auto nested_snapshot = storage->getStorageSnapshot(storage->getInMemoryMetadataPtr(), context); - auto pipe = storage->read(column_names, nested_snapshot, query_info, context, + storage->read(query_plan, column_names, nested_snapshot, query_info, context, processed_stage, max_block_size, num_streams); - if (!pipe.empty() && add_conversion) + if (add_conversion) { + auto from_header = query_plan.getCurrentDataStream().header; auto to_header = getHeaderForProcessingStage(column_names, storage_snapshot, query_info, context, processed_stage); auto convert_actions_dag = ActionsDAG::makeConvertingActions( - pipe.getHeader().getColumnsWithTypeAndName(), + from_header.getColumnsWithTypeAndName(), to_header.getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Name); - auto convert_actions = std::make_shared( - convert_actions_dag, - ExpressionActionsSettings::fromSettings(context->getSettingsRef(), CompileExpressions::yes)); - pipe.addSimpleTransform([&](const Block & header) - { - return std::make_shared(header, convert_actions); - }); + auto step = std::make_unique( + query_plan.getCurrentDataStream(), + convert_actions_dag); + + step->setStepDescription("Converting columns"); + query_plan.addStep(std::move(step)); } - return pipe; } SinkToStoragePtr write( diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index 062241797e0..cd55c32fb9c 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -29,7 +29,7 @@ #include #include -#include +#include #include #include #include @@ -114,7 +114,7 @@ namespace } - class StorageURLSource : public SourceWithProgress + class StorageURLSource : public ISource { using URIParams = std::vector>; @@ -165,7 +165,7 @@ namespace const ReadWriteBufferFromHTTP::HTTPHeaderEntries & headers_ = {}, const URIParams & params = {}, bool glob_url = false) - : SourceWithProgress(sample_block), name(std::move(name_)), uri_info(uri_info_) + : ISource(sample_block), name(std::move(name_)), uri_info(uri_info_) { auto headers = getHeaders(headers_); @@ -445,14 +445,25 @@ void StorageURLSink::consume(Chunk chunk) void StorageURLSink::onException() { - write_buf->finalize(); + if (!writer) + return; + onFinish(); } void StorageURLSink::onFinish() { - writer->finalize(); - writer->flush(); - write_buf->finalize(); + try + { + writer->finalize(); + writer->flush(); + write_buf->finalize(); + } + catch (...) + { + /// Stop ParallelFormattingOutputFormat correctly. + writer.reset(); + throw; + } } class PartitionedStorageURLSink : public PartitionedSink diff --git a/src/Storages/StorageView.cpp b/src/Storages/StorageView.cpp index 98d07686796..bbbad012547 100644 --- a/src/Storages/StorageView.cpp +++ b/src/Storages/StorageView.cpp @@ -20,7 +20,6 @@ #include #include #include -#include #include #include @@ -104,23 +103,6 @@ StorageView::StorageView( setInMemoryMetadata(storage_metadata); } - -Pipe StorageView::read( - const Names & column_names, - const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & query_info, - ContextPtr context, - QueryProcessingStage::Enum processed_stage, - const size_t max_block_size, - const unsigned num_streams) -{ - QueryPlan plan; - read(plan, column_names, storage_snapshot, query_info, context, processed_stage, max_block_size, num_streams); - return plan.convertToPipe( - QueryPlanOptimizationSettings::fromContext(context), - BuildQueryPipelineSettings::fromContext(context)); -} - void StorageView::read( QueryPlan & query_plan, const Names & column_names, @@ -142,6 +124,7 @@ void StorageView::read( auto options = SelectQueryOptions(QueryProcessingStage::Complete, 0, false, query_info.settings_limit_offset_done); InterpreterSelectWithUnionQuery interpreter(current_inner_query, context, options, column_names); + interpreter.addStorageLimits(*query_info.storage_limits); interpreter.buildQueryPlan(query_plan); /// It's expected that the columns read from storage are not constant. diff --git a/src/Storages/StorageView.h b/src/Storages/StorageView.h index 70accfd061b..31c96addd08 100644 --- a/src/Storages/StorageView.h +++ b/src/Storages/StorageView.h @@ -24,15 +24,6 @@ public: bool supportsSampling() const override { return true; } bool supportsFinal() const override { return true; } - Pipe read( - const Names & column_names, - const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & query_info, - ContextPtr context, - QueryProcessingStage::Enum processed_stage, - size_t max_block_size, - unsigned num_streams) override; - void read( QueryPlan & query_plan, const Names & column_names, diff --git a/src/Storages/System/StorageSystemColumns.cpp b/src/Storages/System/StorageSystemColumns.cpp index ca821221611..81e3e6365a7 100644 --- a/src/Storages/System/StorageSystemColumns.cpp +++ b/src/Storages/System/StorageSystemColumns.cpp @@ -64,7 +64,7 @@ namespace } -class ColumnsSource : public SourceWithProgress +class ColumnsSource : public ISource { public: ColumnsSource( @@ -75,7 +75,7 @@ public: ColumnPtr tables_, Storages storages_, ContextPtr context) - : SourceWithProgress(header_) + : ISource(header_) , columns_mask(std::move(columns_mask_)), max_block_size(max_block_size_) , databases(std::move(databases_)), tables(std::move(tables_)), storages(std::move(storages_)) , total_tables(tables->size()), access(context->getAccess()) diff --git a/src/Storages/System/StorageSystemDataSkippingIndices.cpp b/src/Storages/System/StorageSystemDataSkippingIndices.cpp index 828e77bd83a..e725f8a03c6 100644 --- a/src/Storages/System/StorageSystemDataSkippingIndices.cpp +++ b/src/Storages/System/StorageSystemDataSkippingIndices.cpp @@ -8,7 +8,8 @@ #include #include #include -#include +#include +#include namespace DB @@ -32,7 +33,7 @@ StorageSystemDataSkippingIndices::StorageSystemDataSkippingIndices(const Storage setInMemoryMetadata(storage_metadata); } -class DataSkippingIndicesSource : public SourceWithProgress +class DataSkippingIndicesSource : public ISource { public: DataSkippingIndicesSource( @@ -41,7 +42,7 @@ public: UInt64 max_block_size_, ColumnPtr databases_, ContextPtr context_) - : SourceWithProgress(header) + : ISource(header) , column_mask(std::move(columns_mask_)) , max_block_size(max_block_size_) , databases(std::move(databases_)) diff --git a/src/Storages/System/StorageSystemDetachedParts.cpp b/src/Storages/System/StorageSystemDetachedParts.cpp index 3549085f319..574ce4f44c2 100644 --- a/src/Storages/System/StorageSystemDetachedParts.cpp +++ b/src/Storages/System/StorageSystemDetachedParts.cpp @@ -6,6 +6,7 @@ #include #include #include +#include namespace DB { diff --git a/src/Storages/System/StorageSystemDisks.cpp b/src/Storages/System/StorageSystemDisks.cpp index 5cc79c1ceee..c84a4f17510 100644 --- a/src/Storages/System/StorageSystemDisks.cpp +++ b/src/Storages/System/StorageSystemDisks.cpp @@ -1,5 +1,6 @@ #include #include +#include #include namespace DB diff --git a/src/Storages/System/StorageSystemNumbers.cpp b/src/Storages/System/StorageSystemNumbers.cpp index 2e48bb857ce..523ec25b89c 100644 --- a/src/Storages/System/StorageSystemNumbers.cpp +++ b/src/Storages/System/StorageSystemNumbers.cpp @@ -3,7 +3,7 @@ #include #include -#include +#include #include #include @@ -14,11 +14,11 @@ namespace DB namespace { -class NumbersSource : public SourceWithProgress +class NumbersSource : public ISource { public: NumbersSource(UInt64 block_size_, UInt64 offset_, UInt64 step_) - : SourceWithProgress(createHeader()), block_size(block_size_), next(offset_), step(step_) {} + : ISource(createHeader()), block_size(block_size_), next(offset_), step(step_) {} String getName() const override { return "Numbers"; } @@ -36,7 +36,7 @@ protected: next += step; - progress({column->size(), column->byteSize()}); + progress(column->size(), column->byteSize()); return { Columns {std::move(column)}, block_size }; } @@ -61,11 +61,11 @@ struct NumbersMultiThreadedState using NumbersMultiThreadedStatePtr = std::shared_ptr; -class NumbersMultiThreadedSource : public SourceWithProgress +class NumbersMultiThreadedSource : public ISource { public: NumbersMultiThreadedSource(NumbersMultiThreadedStatePtr state_, UInt64 block_size_, UInt64 max_counter_) - : SourceWithProgress(createHeader()) + : ISource(createHeader()) , state(std::move(state_)) , block_size(block_size_) , max_counter(max_counter_) {} @@ -94,7 +94,7 @@ protected: while (pos < end) *pos++ = curr++; - progress({column->size(), column->byteSize()}); + progress(column->size(), column->byteSize()); return { Columns {std::move(column)}, block_size }; } diff --git a/src/Storages/System/StorageSystemPartsBase.cpp b/src/Storages/System/StorageSystemPartsBase.cpp index 84b14b2c8be..0b976680d00 100644 --- a/src/Storages/System/StorageSystemPartsBase.cpp +++ b/src/Storages/System/StorageSystemPartsBase.cpp @@ -14,6 +14,7 @@ #include #include #include +#include #include diff --git a/src/Storages/System/StorageSystemStoragePolicies.cpp b/src/Storages/System/StorageSystemStoragePolicies.cpp index 04c98e6be9c..832c430e2be 100644 --- a/src/Storages/System/StorageSystemStoragePolicies.cpp +++ b/src/Storages/System/StorageSystemStoragePolicies.cpp @@ -7,6 +7,7 @@ #include #include #include +#include namespace DB diff --git a/src/Storages/System/StorageSystemTables.cpp b/src/Storages/System/StorageSystemTables.cpp index 53e4d8fefa3..5eb69f3442b 100644 --- a/src/Storages/System/StorageSystemTables.cpp +++ b/src/Storages/System/StorageSystemTables.cpp @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include @@ -124,7 +124,7 @@ static bool needLockStructure(const DatabasePtr & database, const Block & header return false; } -class TablesBlockSource : public SourceWithProgress +class TablesBlockSource : public ISource { public: TablesBlockSource( @@ -134,7 +134,7 @@ public: ColumnPtr databases_, ColumnPtr tables_, ContextPtr context_) - : SourceWithProgress(std::move(header)) + : ISource(std::move(header)) , columns_mask(std::move(columns_mask_)) , max_block_size(max_block_size_) , databases(std::move(databases_)) diff --git a/src/Storages/System/StorageSystemZeros.cpp b/src/Storages/System/StorageSystemZeros.cpp index b6a623c3071..9e5836fa358 100644 --- a/src/Storages/System/StorageSystemZeros.cpp +++ b/src/Storages/System/StorageSystemZeros.cpp @@ -1,6 +1,6 @@ #include -#include +#include #include #include @@ -23,11 +23,11 @@ using ZerosStatePtr = std::shared_ptr; /// Source which generates zeros. /// Uses state to share the number of generated rows between threads. /// If state is nullptr, then limit is ignored. -class ZerosSource : public SourceWithProgress +class ZerosSource : public ISource { public: ZerosSource(UInt64 block_size, UInt64 limit_, ZerosStatePtr state_) - : SourceWithProgress(createHeader()), limit(limit_), state(std::move(state_)) + : ISource(createHeader()), limit(limit_), state(std::move(state_)) { column = createColumn(block_size); } @@ -54,7 +54,7 @@ protected: } } - progress({column->size(), column->byteSize()}); + progress(column->size(), column->byteSize()); return { Columns {std::move(column_ptr)}, column_size }; } diff --git a/src/Storages/System/StorageSystemZooKeeper.cpp b/src/Storages/System/StorageSystemZooKeeper.cpp index 4ba6c00ad9d..1212d9da60a 100644 --- a/src/Storages/System/StorageSystemZooKeeper.cpp +++ b/src/Storages/System/StorageSystemZooKeeper.cpp @@ -16,8 +16,12 @@ #include #include #include +#include +#include +#include #include #include +#include namespace DB @@ -28,6 +32,157 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; } +/** ZkNodeCache is a trie tree to cache all the zookeeper writes. The purpose of this struct is to avoid creating/setting nodes + * repeatedly. For example, If we create path /a/b/c/d/e and path /a/b/d/f in the same transaction. We don't want to create + * their common path "/a/b" twice. This data structure will cache this changes and generates the eventual requests within one pass. + */ +struct ZkNodeCache +{ + using ZkNodeCachePtr = std::shared_ptr; + + std::unordered_map children; + String value; + String path; + bool exists; + bool changed; + + ZkNodeCache() : exists(true), changed(false) { } + ZkNodeCache(String path_, bool exists_) : path(path_), exists(exists_), changed(false) { } + + void insert(const std::vector & nodes, zkutil::ZooKeeperPtr zookeeper, const String & value_to_set, size_t index) + { + /// If this node has an empty name, just skip it. + /// Possibly a "/a//b///c//d/" will cause empty node. + while (index < nodes.size() && nodes[index].empty()) + ++index; + + if (index == nodes.size()) + { + value = value_to_set; + changed = true; + return; + } + const String & child_name = nodes[index]; + ++index; + if (!children.contains(child_name)) + { + String sub_path = path + "/" + child_name; + bool child_exist = false; + if (exists) + { + /// If this node doesn't exists, neither will its child. + child_exist = zookeeper->exists(sub_path); + } + children[child_name] = std::make_shared(sub_path, child_exist); + } + children[child_name]->insert(nodes, zookeeper, value_to_set, index); + } + + void generateRequests(Coordination::Requests & requests) + { + /** If the node doesn't exists, we should generate create request. + * If the node exists, we should generate set request. + * This dfs will prove ancestor nodes are processed first. + */ + if (!exists) + { + auto request = zkutil::makeCreateRequest(path, value, zkutil::CreateMode::Persistent); + requests.push_back(request); + } + else if (changed) + { + auto request = zkutil::makeSetRequest(path, value, -1); + requests.push_back(request); + } + for (auto [_, child] : children) + child->generateRequests(requests); + } +}; + +class ZooKeeperSink : public SinkToStorage +{ + zkutil::ZooKeeperPtr zookeeper; + + ZkNodeCache cache; + +public: + ZooKeeperSink(const Block & header, ContextPtr context) : SinkToStorage(header), zookeeper(context->getZooKeeper()) { } + String getName() const override { return "ZooKeeperSink"; } + + void consume(Chunk chunk) override + { + auto block = getHeader().cloneWithColumns(chunk.getColumns()); + size_t rows = block.rows(); + for (size_t i = 0; i < rows; i++) + { + String name = block.getByPosition(0).column->getDataAt(i).toString(); + String value = block.getByPosition(1).column->getDataAt(i).toString(); + String path = block.getByPosition(2).column->getDataAt(i).toString(); + + /// We don't expect a "name" contains a path. + if (name.find('/') != std::string::npos) + { + throw Exception("Column `name` should not contain '/'", ErrorCodes::BAD_ARGUMENTS); + } + + if (name.empty()) + { + throw Exception("Column `name` should not be empty", ErrorCodes::BAD_ARGUMENTS); + } + + if (path.empty()) + { + throw Exception("Column `path` should not be empty", ErrorCodes::BAD_ARGUMENTS); + } + + if (path.size() + name.size() > PATH_MAX) + { + throw Exception("Sum of `name` length and `path` length should not exceed PATH_MAX", ErrorCodes::BAD_ARGUMENTS); + } + + std::vector path_vec; + boost::split(path_vec, path, boost::is_any_of("/")); + path_vec.push_back(name); + cache.insert(path_vec, zookeeper, value, 0); + } + } + + void onFinish() override + { + Coordination::Requests requests; + cache.generateRequests(requests); + zookeeper->multi(requests); + } +}; + +StorageSystemZooKeeper::StorageSystemZooKeeper(const StorageID & table_id_) + : IStorageSystemOneBlock(table_id_) +{ + StorageInMemoryMetadata storage_metadata; + ColumnsDescription desc; + auto columns = getNamesAndTypes(); + for (const auto & col : columns) + { + ColumnDescription col_desc(col.name, col.type); + /// We only allow column `name`, `path`, `value` to insert. + if (col.name != "name" && col.name != "path" && col.name != "value") + col_desc.default_desc.kind = ColumnDefaultKind::Materialized; + desc.add(col_desc); + } + storage_metadata.setColumns(desc); + setInMemoryMetadata(storage_metadata); +} + +SinkToStoragePtr StorageSystemZooKeeper::write(const ASTPtr &, const StorageMetadataPtr &, ContextPtr context) +{ + if (!context->getConfigRef().getBool("allow_zookeeper_write", false)) + throw Exception("Prohibit writing to system.zookeeper, unless config `allow_zookeeper_write` as true", ErrorCodes::BAD_ARGUMENTS); + Block write_header; + write_header.insert(ColumnWithTypeAndName(std::make_shared(), "name")); + write_header.insert(ColumnWithTypeAndName(std::make_shared(), "value")); + write_header.insert(ColumnWithTypeAndName(std::make_shared(), "path")); + return std::make_shared(write_header, context); +} NamesAndTypesList StorageSystemZooKeeper::getNamesAndTypes() { diff --git a/src/Storages/System/StorageSystemZooKeeper.h b/src/Storages/System/StorageSystemZooKeeper.h index 32ca767ac24..20ad29af481 100644 --- a/src/Storages/System/StorageSystemZooKeeper.h +++ b/src/Storages/System/StorageSystemZooKeeper.h @@ -14,10 +14,14 @@ class Context; class StorageSystemZooKeeper final : public IStorageSystemOneBlock { public: + explicit StorageSystemZooKeeper(const StorageID & table_id_); + std::string getName() const override { return "SystemZooKeeper"; } static NamesAndTypesList getNamesAndTypes(); + SinkToStoragePtr write(const ASTPtr & /*query*/, const StorageMetadataPtr & /*metadata_snapshot*/, ContextPtr /*context*/) override; + protected: using IStorageSystemOneBlock::IStorageSystemOneBlock; diff --git a/src/Storages/WindowView/StorageWindowView.cpp b/src/Storages/WindowView/StorageWindowView.cpp index 0f4d5885d19..6fbc845c103 100644 --- a/src/Storages/WindowView/StorageWindowView.cpp +++ b/src/Storages/WindowView/StorageWindowView.cpp @@ -41,7 +41,6 @@ #include #include #include -#include #include #include #include @@ -60,6 +59,7 @@ #include #include +#include namespace DB { @@ -579,7 +579,8 @@ std::pair StorageWindowView::getNewBlocks(UInt32 watermark) }); Pipes pipes; - auto pipe = QueryPipelineBuilder::getPipe(std::move(builder)); + QueryPlanResourceHolder resources; + auto pipe = QueryPipelineBuilder::getPipe(std::move(builder), resources); pipes.emplace_back(std::move(pipe)); auto creator = [&](const StorageID & blocks_id_global) @@ -1045,20 +1046,20 @@ void StorageWindowView::threadFuncFireEvent() } } -Pipe StorageWindowView::read( - const Names & column_names, - const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & query_info, - ContextPtr local_context, - QueryProcessingStage::Enum processed_stage, - const size_t max_block_size, - const unsigned num_streams) -{ - QueryPlan plan; - read(plan, column_names, storage_snapshot, query_info, local_context, processed_stage, max_block_size, num_streams); - return plan.convertToPipe( - QueryPlanOptimizationSettings::fromContext(local_context), BuildQueryPipelineSettings::fromContext(local_context)); -} +// Pipe StorageWindowView::read( +// const Names & column_names, +// const StorageSnapshotPtr & storage_snapshot, +// SelectQueryInfo & query_info, +// ContextPtr local_context, +// QueryProcessingStage::Enum processed_stage, +// const size_t max_block_size, +// const unsigned num_streams) +// { +// QueryPlan plan; +// read(plan, column_names, storage_snapshot, query_info, local_context, processed_stage, max_block_size, num_streams); +// return plan.convertToPipe( +// QueryPlanOptimizationSettings::fromContext(local_context), BuildQueryPipelineSettings::fromContext(local_context)); +// } void StorageWindowView::read( QueryPlan & query_plan, @@ -1097,21 +1098,8 @@ void StorageWindowView::read( query_plan.addStep(std::move(converting_step)); } - StreamLocalLimits limits; - SizeLimits leaf_limits; - - /// Add table lock for target table. - auto adding_limits_and_quota = std::make_unique( - query_plan.getCurrentDataStream(), - storage, - std::move(lock), - limits, - leaf_limits, - nullptr, - nullptr); - - adding_limits_and_quota->setStepDescription("Lock target table for WindowView"); - query_plan.addStep(std::move(adding_limits_and_quota)); + query_plan.addStorageHolder(storage); + query_plan.addTableLock(std::move(lock)); } } @@ -1171,8 +1159,8 @@ StorageWindowView::StorageWindowView( throw Exception(ErrorCodes::INCORRECT_QUERY, "SELECT query is not specified for {}", getName()); /// If the target table is not set, use inner target table - inner_target_table = query.to_table_id.empty(); - if (inner_target_table && !query.storage) + has_inner_target_table = query.to_table_id.empty(); + if (has_inner_target_table && !query.storage) throw Exception( "You must specify where to save results of a WindowView query: either ENGINE or an existing table in a TO clause", ErrorCodes::INCORRECT_QUERY); @@ -1185,8 +1173,6 @@ StorageWindowView::StorageWindowView( /// Extract information about watermark, lateness. eventTimeParser(query); - target_table_id = query.to_table_id; - auto inner_query = initInnerQuery(query.select->list_of_selects->children.at(0)->as(), context_); if (query.inner_storage) @@ -1194,6 +1180,8 @@ StorageWindowView::StorageWindowView( inner_table_id = StorageID(getStorageID().database_name, generateInnerTableName(getStorageID())); inner_fetch_query = generateInnerFetchQuery(inner_table_id); + target_table_id = has_inner_target_table ? StorageID(table_id_.database_name, generateTargetTableName(table_id_)) : query.to_table_id; + if (is_proctime) next_fire_signal = getWindowUpperBound(std::time(nullptr)); @@ -1205,7 +1193,7 @@ StorageWindowView::StorageWindowView( InterpreterCreateQuery create_interpreter(inner_create_query, create_context); create_interpreter.setInternal(true); create_interpreter.execute(); - if (inner_target_table) + if (has_inner_target_table) { /// create inner target table auto create_context = Context::createCopy(context_); @@ -1222,15 +1210,9 @@ StorageWindowView::StorageWindowView( InterpreterCreateQuery create_interpreter(target_create_query, create_context); create_interpreter.setInternal(true); create_interpreter.execute(); - - target_table_id = StorageID(target_create_query->getDatabase(), target_create_query->getTable()); } - else - target_table_id = query.to_table_id; } - inner_fetch_query = generateInnerFetchQuery(inner_table_id); - clean_cache_task = getContext()->getSchedulePool().createTask(getStorageID().getFullTableName(), [this] { threadFuncCleanup(); }); fire_task = getContext()->getSchedulePool().createTask( getStorageID().getFullTableName(), [this] { is_proctime ? threadFuncFireProc() : threadFuncFireEvent(); }); @@ -1627,7 +1609,7 @@ void StorageWindowView::dropInnerTableIfAny(bool no_delay, ContextPtr local_cont InterpreterDropQuery::executeDropQuery( ASTDropQuery::Kind::Drop, getContext(), local_context, inner_table_id, no_delay); - if (inner_target_table) + if (has_inner_target_table) InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, getContext(), local_context, target_table_id, no_delay); } catch (...) diff --git a/src/Storages/WindowView/StorageWindowView.h b/src/Storages/WindowView/StorageWindowView.h index b639a5924ad..f5717ad3fdf 100644 --- a/src/Storages/WindowView/StorageWindowView.h +++ b/src/Storages/WindowView/StorageWindowView.h @@ -142,14 +142,14 @@ public: void startup() override; void shutdown() override; - Pipe read( - const Names & column_names, - const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & query_info, - ContextPtr context, - QueryProcessingStage::Enum processed_stage, - size_t max_block_size, - unsigned num_streams) override; + // Pipe read( + // const Names & column_names, + // const StorageSnapshotPtr & storage_snapshot, + // SelectQueryInfo & query_info, + // ContextPtr context, + // QueryProcessingStage::Enum processed_stage, + // size_t max_block_size, + // unsigned num_streams) override; void read( QueryPlan & query_plan, @@ -201,7 +201,7 @@ private: std::atomic shutdown_called{false}; std::atomic modifying_query{false}; bool has_inner_table{true}; - bool inner_target_table{false}; + bool has_inner_target_table{false}; mutable Block input_header; mutable Block output_header; UInt64 fire_signal_timeout_s; diff --git a/src/Storages/WindowView/WindowViewSource.h b/src/Storages/WindowView/WindowViewSource.h index b1648427fae..74720bf8074 100644 --- a/src/Storages/WindowView/WindowViewSource.h +++ b/src/Storages/WindowView/WindowViewSource.h @@ -1,13 +1,13 @@ #pragma once #include -#include +#include namespace DB { -class WindowViewSource : public SourceWithProgress +class WindowViewSource : public ISource { public: WindowViewSource( @@ -17,7 +17,7 @@ public: const bool has_limit_, const UInt64 limit_, const UInt64 heartbeat_interval_sec_) - : SourceWithProgress( + : ISource( is_events_ ? Block( {ColumnWithTypeAndName(ColumnUInt32::create(), std::make_shared(window_view_timezone_), "watermark")}) : storage_->getOutputHeader()) diff --git a/src/Storages/tests/gtest_storage_log.cpp b/src/Storages/tests/gtest_storage_log.cpp index 30dd63a5948..f5fdb606018 100644 --- a/src/Storages/tests/gtest_storage_log.cpp +++ b/src/Storages/tests/gtest_storage_log.cpp @@ -20,6 +20,10 @@ #include #include #include +#include +#include +#include +#include #if !defined(__clang__) # pragma GCC diagnostic push @@ -126,7 +130,12 @@ std::string readData(DB::StoragePtr & table, const DB::ContextPtr context) QueryProcessingStage::Enum stage = table->getQueryProcessingStage( context, QueryProcessingStage::Complete, storage_snapshot, query_info); - QueryPipeline pipeline(table->read(column_names, storage_snapshot, query_info, context, stage, 8192, 1)); + QueryPlan plan; + table->read(plan, column_names, storage_snapshot, query_info, context, stage, 8192, 1); + + auto pipeline = QueryPipelineBuilder::getPipeline(std::move(*plan.buildQueryPipeline( + QueryPlanOptimizationSettings::fromContext(context), + BuildQueryPipelineSettings::fromContext(context)))); Block sample; { diff --git a/src/TableFunctions/TableFunctionFormat.cpp b/src/TableFunctions/TableFunctionFormat.cpp index 83e6b07ea25..d3ce9627598 100644 --- a/src/TableFunctions/TableFunctionFormat.cpp +++ b/src/TableFunctions/TableFunctionFormat.cpp @@ -64,9 +64,7 @@ Block TableFunctionFormat::parseData(ColumnsDescription columns, ContextPtr cont auto read_buf = std::make_unique(data); auto input_format = context->getInputFormat(format, *read_buf, block, context->getSettingsRef().max_block_size); - QueryPipelineBuilder builder; - builder.init(Pipe(input_format)); - auto pipeline = std::make_unique(QueryPipelineBuilder::getPipeline(std::move(builder))); + auto pipeline = std::make_unique(input_format); auto reader = std::make_unique(*pipeline); std::vector blocks; diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index df013af8141..3b61e2077cf 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -8,7 +8,7 @@ BuildConfig = Dict[str, ConfValue] CI_CONFIG = { "build_config": { "package_release": { - "compiler": "clang-13", + "compiler": "clang-14", "build_type": "", "sanitizer": "", "package_type": "deb", @@ -19,7 +19,7 @@ CI_CONFIG = { "with_coverage": False, }, "coverity": { - "compiler": "clang-13", + "compiler": "clang-14", "build_type": "", "sanitizer": "", "package_type": "coverity", @@ -29,19 +29,8 @@ CI_CONFIG = { "with_coverage": False, "official": False, }, - # FIXME update to gcc-12 and turn on - # "binary_gcc": { - # "compiler": "gcc-11", - # "build_type": "", - # "sanitizer": "", - # "package_type": "binary", - # "bundled": "bundled", - # "splitted": "unsplitted", - # "tidy": "disable", - # "with_coverage": False, - # }, "package_aarch64": { - "compiler": "clang-13-aarch64", + "compiler": "clang-14-aarch64", "build_type": "", "sanitizer": "", "package_type": "deb", @@ -52,7 +41,7 @@ CI_CONFIG = { "with_coverage": False, }, "package_asan": { - "compiler": "clang-13", + "compiler": "clang-14", "build_type": "", "sanitizer": "address", "package_type": "deb", @@ -62,7 +51,7 @@ CI_CONFIG = { "with_coverage": False, }, "package_ubsan": { - "compiler": "clang-13", + "compiler": "clang-14", "build_type": "", "sanitizer": "undefined", "package_type": "deb", @@ -72,7 +61,7 @@ CI_CONFIG = { "with_coverage": False, }, "package_tsan": { - "compiler": "clang-13", + "compiler": "clang-14", "build_type": "", "sanitizer": "thread", "package_type": "deb", @@ -82,7 +71,7 @@ CI_CONFIG = { "with_coverage": False, }, "package_msan": { - "compiler": "clang-13", + "compiler": "clang-14", "build_type": "", "sanitizer": "memory", "package_type": "deb", @@ -92,7 +81,7 @@ CI_CONFIG = { "with_coverage": False, }, "package_debug": { - "compiler": "clang-13", + "compiler": "clang-14", "build_type": "debug", "sanitizer": "", "package_type": "deb", @@ -102,7 +91,7 @@ CI_CONFIG = { "with_coverage": False, }, "binary_release": { - "compiler": "clang-13", + "compiler": "clang-14", "build_type": "", "sanitizer": "", "package_type": "binary", @@ -113,7 +102,7 @@ CI_CONFIG = { "with_coverage": False, }, "binary_tidy": { - "compiler": "clang-13", + "compiler": "clang-14", "build_type": "debug", "sanitizer": "", "package_type": "binary", @@ -124,7 +113,7 @@ CI_CONFIG = { "with_coverage": False, }, "binary_splitted": { - "compiler": "clang-13", + "compiler": "clang-14", "build_type": "", "sanitizer": "", "package_type": "binary", @@ -134,7 +123,7 @@ CI_CONFIG = { "with_coverage": False, }, "binary_darwin": { - "compiler": "clang-13-darwin", + "compiler": "clang-14-darwin", "build_type": "", "sanitizer": "", "package_type": "binary", @@ -145,7 +134,7 @@ CI_CONFIG = { "with_coverage": False, }, "binary_aarch64": { - "compiler": "clang-13-aarch64", + "compiler": "clang-14-aarch64", "build_type": "", "sanitizer": "", "package_type": "binary", @@ -156,7 +145,7 @@ CI_CONFIG = { "with_coverage": False, }, "binary_freebsd": { - "compiler": "clang-13-freebsd", + "compiler": "clang-14-freebsd", "build_type": "", "sanitizer": "", "package_type": "binary", @@ -167,7 +156,7 @@ CI_CONFIG = { "with_coverage": False, }, "binary_darwin_aarch64": { - "compiler": "clang-13-darwin-aarch64", + "compiler": "clang-14-darwin-aarch64", "build_type": "", "sanitizer": "", "package_type": "binary", @@ -178,7 +167,7 @@ CI_CONFIG = { "with_coverage": False, }, "binary_ppc64le": { - "compiler": "clang-13-ppc64le", + "compiler": "clang-14-ppc64le", "build_type": "", "sanitizer": "", "package_type": "binary", @@ -209,7 +198,6 @@ CI_CONFIG = { "binary_freebsd", "binary_darwin_aarch64", "binary_ppc64le", - # "binary_gcc", ], }, "tests_config": { @@ -314,9 +302,6 @@ CI_CONFIG = { "Testflows check (actions)": { "required_build": "package_release", }, - # "Unit tests (release-gcc, actions)": { - # "required_build": "binary_gcc", - # }, "Unit tests (release-clang, actions)": { "required_build": "binary_release", }, @@ -359,8 +344,13 @@ CI_CONFIG = { "ClickHouse Keeper Jepsen (actions)": { "required_build": "binary_release", }, - "Performance Comparison (actions)": { + "Performance Comparison": { "required_build": "package_release", + "test_grep_exclude_filter": "", + }, + "Performance Comparison Aarch64": { + "required_build": "package_aarch64", + "test_grep_exclude_filter": "constant_column_search", }, }, } # type: dict diff --git a/tests/ci/performance_comparison_check.py b/tests/ci/performance_comparison_check.py index b491c739653..ea2e90a4c4f 100644 --- a/tests/ci/performance_comparison_check.py +++ b/tests/ci/performance_comparison_check.py @@ -112,6 +112,15 @@ if __name__ == "__main__": else: check_name_with_group = check_name + test_grep_exclude_filter = CI_CONFIG["tests_config"][check_name][ + "test_grep_exclude_filter" + ] + if test_grep_exclude_filter: + docker_env += f" -e CHPC_TEST_GREP_EXCLUDE={test_grep_exclude_filter}" + logging.info( + "Fill fliter our performance tests by grep -v %s", test_grep_exclude_filter + ) + rerun_helper = RerunHelper(gh, pr_info, check_name_with_group) if rerun_helper.is_already_finished_by_status(): logging.info("Check is already finished according to github status, exiting") diff --git a/tests/config/config.d/zookeeper_write.xml b/tests/config/config.d/zookeeper_write.xml new file mode 100644 index 00000000000..ce484261aba --- /dev/null +++ b/tests/config/config.d/zookeeper_write.xml @@ -0,0 +1,3 @@ + + true + diff --git a/tests/config/install.sh b/tests/config/install.sh index fb015b4f931..e4a4fdacdd0 100755 --- a/tests/config/install.sh +++ b/tests/config/install.sh @@ -16,6 +16,7 @@ mkdir -p $DEST_SERVER_PATH/users.d/ mkdir -p $DEST_CLIENT_PATH ln -sf $SRC_PATH/config.d/zookeeper.xml $DEST_SERVER_PATH/config.d/ +ln -sf $SRC_PATH/config.d/zookeeper_write.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/listen.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/text_log.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/custom_settings_prefixes.xml $DEST_SERVER_PATH/config.d/ diff --git a/tests/integration/ci-runner.py b/tests/integration/ci-runner.py index 9ced81d73f0..e4bd1be9027 100755 --- a/tests/integration/ci-runner.py +++ b/tests/integration/ci-runner.py @@ -117,6 +117,7 @@ def get_counters(fname): # Lines like: # [gw0] [ 7%] ERROR test_mysql_protocol/test.py::test_golang_client + # [gw3] [ 40%] PASSED test_replicated_users/test.py::test_rename_replicated[QUOTA] state = line_arr[-2] test_name = line_arr[-1] @@ -941,6 +942,16 @@ class ClickhouseIntegrationTestsRunner: if "(memory)" in self.params["context_name"]: result_state = "success" + for res in test_result: + # It's not easy to parse output of pytest + # Especially when test names may contain spaces + # Do not allow it to avoid obscure failures + if " " not in res[0]: + continue + logging.warning("Found invalid test name with space: %s", res[0]) + status_text = "Found test with invalid name, see main log" + result_state = "failure" + return result_state, status_text, test_result, [] diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 6716157082c..f8ad9213e5b 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -16,12 +16,6 @@ import traceback import urllib.parse import shlex import urllib3 -from cassandra.policies import RoundRobinPolicy -import cassandra.cluster -import psycopg2 -import pymongo -import meilisearch -import pymysql import requests try: @@ -34,6 +28,7 @@ try: from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT import pymongo import pymysql + import meilisearch from confluent_kafka.avro.cached_schema_registry_client import ( CachedSchemaRegistryClient, ) diff --git a/tests/integration/test_replicated_users/test.py b/tests/integration/test_replicated_users/test.py index add45d262e6..56383f0d2df 100644 --- a/tests/integration/test_replicated_users/test.py +++ b/tests/integration/test_replicated_users/test.py @@ -41,7 +41,7 @@ entities = [ def get_entity_id(entity): - return entity.keyword + return entity.keyword.replace(" ", "_") @pytest.mark.parametrize("entity", entities, ids=get_entity_id) diff --git a/tests/queries/0_stateless/00047_stored_aggregates_complex.reference b/tests/queries/0_stateless/00047_stored_aggregates_complex.reference index e3609c9c4f1..659571d79c5 100644 --- a/tests/queries/0_stateless/00047_stored_aggregates_complex.reference +++ b/tests/queries/0_stateless/00047_stored_aggregates_complex.reference @@ -3,37 +3,37 @@ 2014-06-01 0 2 245 24.5 7 20 21 [24.5,28.1] ['20','21','22','23','24','25','26','27','28','29'] 2014-06-01 0 3 345 34.5 7 30 35 [34.5,38.1] ['30','31','32','33','34','35','36','37','38','39'] 2014-06-01 0 4 445 44.5 7 40 42 [44.5,48.1] ['40','41','42','43','44','45','46','47','48','49'] -2014-06-01 0 5 545 54.5 7 50 56 [54.5,58.099999999999994] ['50','51','52','53','54','55','56','57','58','59'] +2014-06-01 0 5 545 54.5 7 50 56 [54.5,58.1] ['50','51','52','53','54','55','56','57','58','59'] 2014-06-01 0 6 645 64.5 7 60 63 [64.5,68.1] ['60','61','62','63','64','65','66','67','68','69'] -2014-06-01 0 7 745 74.5 7 70 70 [74.5,78.10000000000001] ['70','71','72','73','74','75','76','77','78','79'] +2014-06-01 0 7 745 74.5 7 70 70 [74.5,78.1] ['70','71','72','73','74','75','76','77','78','79'] 2014-06-01 0 8 845 84.5 7 80 84 [84.5,88.1] ['80','81','82','83','84','85','86','87','88','89'] 2014-06-01 0 9 945 94.5 7 90 91 [94.5,98.1] ['90','91','92','93','94','95','96','97','98','99'] -2014-06-01 1 10 1045 104.5 7 100 105 [104.5,108.10000000000001] ['100','101','102','103','104','105','106','107','108','109'] -2014-06-01 1 11 1145 114.5 7 110 112 [114.5,118.10000000000001] ['110','111','112','113','114','115','116','117','118','119'] +2014-06-01 1 10 1045 104.5 7 100 105 [104.5,108.1] ['100','101','102','103','104','105','106','107','108','109'] +2014-06-01 1 11 1145 114.5 7 110 112 [114.5,118.1] ['110','111','112','113','114','115','116','117','118','119'] 2014-06-01 1 12 1245 124.5 7 120 126 [124.5,128.1] ['120','121','122','123','124','125','126','127','128','129'] 2014-06-01 1 13 1345 134.5 7 130 133 [134.5,138.1] ['130','131','132','133','134','135','136','137','138','139'] 2014-06-01 1 14 1445 144.5 7 140 140 [144.5,148.1] ['140','141','142','143','144','145','146','147','148','149'] 2014-06-01 1 15 1545 154.5 7 150 154 [154.5,158.1] ['150','151','152','153','154','155','156','157','158','159'] 2014-06-01 1 16 1645 164.5 7 160 161 [164.5,168.1] ['160','161','162','163','164','165','166','167','168','169'] -2014-06-01 1 17 1745 174.5 7 170 175 [174.5,178.10000000000002] ['170','171','172','173','174','175','176','177','178','179'] -2014-06-01 1 18 1845 184.5 7 180 182 [184.5,188.10000000000002] ['180','181','182','183','184','185','186','187','188','189'] +2014-06-01 1 17 1745 174.5 7 170 175 [174.5,178.1] ['170','171','172','173','174','175','176','177','178','179'] +2014-06-01 1 18 1845 184.5 7 180 182 [184.5,188.1] ['180','181','182','183','184','185','186','187','188','189'] 2014-06-01 1 19 1945 194.5 7 190 196 [194.5,198.1] ['190','191','192','193','194','195','196','197','198','199'] 2014-06-01 2 20 2045 204.5 7 200 203 [204.5,208.1] ['200','201','202','203','204','205','206','207','208','209'] 2014-06-01 2 21 2145 214.5 7 210 210 [214.5,218.1] ['210','211','212','213','214','215','216','217','218','219'] 2014-06-01 2 22 2245 224.5 7 220 224 [224.5,228.1] ['220','221','222','223','224','225','226','227','228','229'] 2014-06-01 2 23 2345 234.5 7 230 231 [234.5,238.1] ['230','231','232','233','234','235','236','237','238','239'] -2014-06-01 2 24 2445 244.5 7 240 245 [244.5,248.10000000000002] ['240','241','242','243','244','245','246','247','248','249'] +2014-06-01 2 24 2445 244.5 7 240 245 [244.5,248.1] ['240','241','242','243','244','245','246','247','248','249'] 2014-06-01 2 25 2545 254.5 7 250 252 [254.5,258.1] ['250','251','252','253','254','255','256','257','258','259'] 2014-06-01 2 26 2645 264.5 7 260 266 [264.5,268.1] ['260','261','262','263','264','265','266','267','268','269'] 2014-06-01 2 27 2745 274.5 7 270 273 [274.5,278.1] ['270','271','272','273','274','275','276','277','278','279'] 2014-06-01 2 28 2845 284.5 7 280 280 [284.5,288.1] ['280','281','282','283','284','285','286','287','288','289'] 2014-06-01 2 29 2945 294.5 7 290 294 [294.5,298.1] ['290','291','292','293','294','295','296','297','298','299'] 2014-06-01 3 30 3045 304.5 7 300 301 [304.5,308.1] ['300','301','302','303','304','305','306','307','308','309'] -2014-06-01 3 31 3145 314.5 7 310 315 [314.5,318.09999999999997] ['310','311','312','313','314','315','316','317','318','319'] -2014-06-01 3 32 3245 324.5 7 320 322 [324.5,328.09999999999997] ['320','321','322','323','324','325','326','327','328','329'] -2014-06-01 3 33 3345 334.5 7 330 336 [334.5,338.09999999999997] ['330','331','332','333','334','335','336','337','338','339'] -2014-06-01 3 34 3445 344.5 7 340 343 [344.5,348.09999999999997] ['340','341','342','343','344','345','346','347','348','349'] -2014-06-01 3 35 3545 354.5 7 350 350 [354.5,358.09999999999997] ['350','351','352','353','354','355','356','357','358','359'] +2014-06-01 3 31 3145 314.5 7 310 315 [314.5,318.1] ['310','311','312','313','314','315','316','317','318','319'] +2014-06-01 3 32 3245 324.5 7 320 322 [324.5,328.1] ['320','321','322','323','324','325','326','327','328','329'] +2014-06-01 3 33 3345 334.5 7 330 336 [334.5,338.1] ['330','331','332','333','334','335','336','337','338','339'] +2014-06-01 3 34 3445 344.5 7 340 343 [344.5,348.1] ['340','341','342','343','344','345','346','347','348','349'] +2014-06-01 3 35 3545 354.5 7 350 350 [354.5,358.1] ['350','351','352','353','354','355','356','357','358','359'] 2014-06-01 3 36 3645 364.5 7 360 364 [364.5,368.1] ['360','361','362','363','364','365','366','367','368','369'] 2014-06-01 3 37 3745 374.5 7 370 371 [374.5,378.1] ['370','371','372','373','374','375','376','377','378','379'] 2014-06-01 3 38 3845 384.5 7 380 385 [384.5,388.1] ['380','381','382','383','384','385','386','387','388','389'] @@ -45,19 +45,19 @@ 2014-06-01 4 44 4445 444.5 7 440 441 [444.5,448.1] ['440','441','442','443','444','445','446','447','448','449'] 2014-06-01 4 45 4545 454.5 7 450 455 [454.5,458.1] ['450','451','452','453','454','455','456','457','458','459'] 2014-06-01 4 46 4645 464.5 7 460 462 [464.5,468.1] ['460','461','462','463','464','465','466','467','468','469'] -2014-06-01 4 47 4745 474.5 7 470 476 [474.5,478.09999999999997] ['470','471','472','473','474','475','476','477','478','479'] -2014-06-01 4 48 4845 484.5 7 480 483 [484.5,488.09999999999997] ['480','481','482','483','484','485','486','487','488','489'] -2014-06-01 4 49 4945 494.5 7 490 490 [494.5,498.09999999999997] ['490','491','492','493','494','495','496','497','498','499'] -2014-06-01 5 50 5045 504.5 7 500 504 [504.5,508.09999999999997] ['500','501','502','503','504','505','506','507','508','509'] +2014-06-01 4 47 4745 474.5 7 470 476 [474.5,478.1] ['470','471','472','473','474','475','476','477','478','479'] +2014-06-01 4 48 4845 484.5 7 480 483 [484.5,488.1] ['480','481','482','483','484','485','486','487','488','489'] +2014-06-01 4 49 4945 494.5 7 490 490 [494.5,498.1] ['490','491','492','493','494','495','496','497','498','499'] +2014-06-01 5 50 5045 504.5 7 500 504 [504.5,508.1] ['500','501','502','503','504','505','506','507','508','509'] 2014-06-01 5 51 5145 514.5 7 510 511 [514.5,518.1] ['510','511','512','513','514','515','516','517','518','519'] 2014-06-01 5 52 5245 524.5 7 520 525 [524.5,528.1] ['520','521','522','523','524','525','526','527','528','529'] 2014-06-01 5 53 5345 534.5 7 530 532 [534.5,538.1] ['530','531','532','533','534','535','536','537','538','539'] 2014-06-01 5 54 5445 544.5 7 540 546 [544.5,548.1] ['540','541','542','543','544','545','546','547','548','549'] 2014-06-01 5 55 5545 554.5 7 550 553 [554.5,558.1] ['550','551','552','553','554','555','556','557','558','559'] 2014-06-01 5 56 5645 564.5 7 560 560 [564.5,568.1] ['560','561','562','563','564','565','566','567','568','569'] -2014-06-01 5 57 5745 574.5 7 570 574 [574.5,578.0999999999999] ['570','571','572','573','574','575','576','577','578','579'] -2014-06-01 5 58 5845 584.5 7 580 581 [584.5,588.0999999999999] ['580','581','582','583','584','585','586','587','588','589'] -2014-06-01 5 59 5945 594.5 7 590 595 [594.5,598.0999999999999] ['590','591','592','593','594','595','596','597','598','599'] +2014-06-01 5 57 5745 574.5 7 570 574 [574.5,578.1] ['570','571','572','573','574','575','576','577','578','579'] +2014-06-01 5 58 5845 584.5 7 580 581 [584.5,588.1] ['580','581','582','583','584','585','586','587','588','589'] +2014-06-01 5 59 5945 594.5 7 590 595 [594.5,598.1] ['590','591','592','593','594','595','596','597','598','599'] 2014-06-01 6 60 6045 604.5 7 600 602 [604.5,608.1] ['600','601','602','603','604','605','606','607','608','609'] 2014-06-01 6 61 6145 614.5 7 610 616 [614.5,618.1] ['610','611','612','613','614','615','616','617','618','619'] 2014-06-01 6 62 6245 624.5 7 620 623 [624.5,628.1] ['620','621','622','623','624','625','626','627','628','629'] @@ -82,15 +82,15 @@ 2014-06-01 8 81 8145 814.5 7 810 812 [814.5,818.1] ['810','811','812','813','814','815','816','817','818','819'] 2014-06-01 8 82 8245 824.5 7 820 826 [824.5,828.1] ['820','821','822','823','824','825','826','827','828','829'] 2014-06-01 8 83 8345 834.5 7 830 833 [834.5,838.1] ['830','831','832','833','834','835','836','837','838','839'] -2014-06-01 8 84 8445 844.5 7 840 840 [844.5,848.0999999999999] ['840','841','842','843','844','845','846','847','848','849'] -2014-06-01 8 85 8545 854.5 7 850 854 [854.5,858.0999999999999] ['850','851','852','853','854','855','856','857','858','859'] -2014-06-01 8 86 8645 864.5 7 860 861 [864.5,868.0999999999999] ['860','861','862','863','864','865','866','867','868','869'] -2014-06-01 8 87 8745 874.5 7 870 875 [874.5,878.0999999999999] ['870','871','872','873','874','875','876','877','878','879'] -2014-06-01 8 88 8845 884.5 7 880 882 [884.5,888.0999999999999] ['880','881','882','883','884','885','886','887','888','889'] -2014-06-01 8 89 8945 894.5 7 890 896 [894.5,898.0999999999999] ['890','891','892','893','894','895','896','897','898','899'] -2014-06-01 9 90 9045 904.5 7 900 903 [904.5,908.0999999999999] ['900','901','902','903','904','905','906','907','908','909'] -2014-06-01 9 91 9145 914.5 7 910 910 [914.5,918.0999999999999] ['910','911','912','913','914','915','916','917','918','919'] -2014-06-01 9 92 9245 924.5 7 920 924 [924.5,928.0999999999999] ['920','921','922','923','924','925','926','927','928','929'] +2014-06-01 8 84 8445 844.5 7 840 840 [844.5,848.1] ['840','841','842','843','844','845','846','847','848','849'] +2014-06-01 8 85 8545 854.5 7 850 854 [854.5,858.1] ['850','851','852','853','854','855','856','857','858','859'] +2014-06-01 8 86 8645 864.5 7 860 861 [864.5,868.1] ['860','861','862','863','864','865','866','867','868','869'] +2014-06-01 8 87 8745 874.5 7 870 875 [874.5,878.1] ['870','871','872','873','874','875','876','877','878','879'] +2014-06-01 8 88 8845 884.5 7 880 882 [884.5,888.1] ['880','881','882','883','884','885','886','887','888','889'] +2014-06-01 8 89 8945 894.5 7 890 896 [894.5,898.1] ['890','891','892','893','894','895','896','897','898','899'] +2014-06-01 9 90 9045 904.5 7 900 903 [904.5,908.1] ['900','901','902','903','904','905','906','907','908','909'] +2014-06-01 9 91 9145 914.5 7 910 910 [914.5,918.1] ['910','911','912','913','914','915','916','917','918','919'] +2014-06-01 9 92 9245 924.5 7 920 924 [924.5,928.1] ['920','921','922','923','924','925','926','927','928','929'] 2014-06-01 9 93 9345 934.5 7 930 931 [934.5,938.1] ['930','931','932','933','934','935','936','937','938','939'] 2014-06-01 9 94 9445 944.5 7 940 945 [944.5,948.1] ['940','941','942','943','944','945','946','947','948','949'] 2014-06-01 9 95 9545 954.5 7 950 952 [954.5,958.1] ['950','951','952','953','954','955','956','957','958','959'] @@ -98,7 +98,7 @@ 2014-06-01 9 97 9745 974.5 7 970 973 [974.5,978.1] ['970','971','972','973','974','975','976','977','978','979'] 2014-06-01 9 98 9845 984.5 7 980 980 [984.5,988.1] ['980','981','982','983','984','985','986','987','988','989'] 2014-06-01 9 99 9945 994.5 7 990 994 [994.5,998.1] ['990','991','992','993','994','995','996','997','998','999'] -2014-06-01 0 4950 49.5 7 0 0 [49.5,89.10000000000001] ['0','1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50','51','52','53','54','55','56','57','58','59','60','61','62','63','64','65','66','67','68','69','70','71','72','73','74','75','76','77','78','79','80','81','82','83','84','85','86','87','88','89','90','91','92','93','94','95','96','97','98','99'] +2014-06-01 0 4950 49.5 7 0 0 [49.5,89.1] ['0','1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50','51','52','53','54','55','56','57','58','59','60','61','62','63','64','65','66','67','68','69','70','71','72','73','74','75','76','77','78','79','80','81','82','83','84','85','86','87','88','89','90','91','92','93','94','95','96','97','98','99'] 2014-06-01 1 14950 149.5 7 100 105 [149.5,189.1] ['100','101','102','103','104','105','106','107','108','109','110','111','112','113','114','115','116','117','118','119','120','121','122','123','124','125','126','127','128','129','130','131','132','133','134','135','136','137','138','139','140','141','142','143','144','145','146','147','148','149','150','151','152','153','154','155','156','157','158','159','160','161','162','163','164','165','166','167','168','169','170','171','172','173','174','175','176','177','178','179','180','181','182','183','184','185','186','187','188','189','190','191','192','193','194','195','196','197','198','199'] 2014-06-01 2 24950 249.5 7 200 203 [249.5,289.1] ['200','201','202','203','204','205','206','207','208','209','210','211','212','213','214','215','216','217','218','219','220','221','222','223','224','225','226','227','228','229','230','231','232','233','234','235','236','237','238','239','240','241','242','243','244','245','246','247','248','249','250','251','252','253','254','255','256','257','258','259','260','261','262','263','264','265','266','267','268','269','270','271','272','273','274','275','276','277','278','279','280','281','282','283','284','285','286','287','288','289','290','291','292','293','294','295','296','297','298','299'] 2014-06-01 3 34950 349.5 7 300 301 [349.5,389.1] ['300','301','302','303','304','305','306','307','308','309','310','311','312','313','314','315','316','317','318','319','320','321','322','323','324','325','326','327','328','329','330','331','332','333','334','335','336','337','338','339','340','341','342','343','344','345','346','347','348','349','350','351','352','353','354','355','356','357','358','359','360','361','362','363','364','365','366','367','368','369','370','371','372','373','374','375','376','377','378','379','380','381','382','383','384','385','386','387','388','389','390','391','392','393','394','395','396','397','398','399'] diff --git a/tests/queries/0_stateless/00047_stored_aggregates_complex.sql b/tests/queries/0_stateless/00047_stored_aggregates_complex.sql index cee13c34e6e..63728f131b0 100644 --- a/tests/queries/0_stateless/00047_stored_aggregates_complex.sql +++ b/tests/queries/0_stateless/00047_stored_aggregates_complex.sql @@ -37,7 +37,7 @@ ORDER BY d, k1, k2; SELECT d, k1, k2, sumMerge(Sum), avgMerge(Avg), uniqMerge(Uniq), anyMerge(Any), anyIfMerge(AnyIf), - quantilesMerge(0.5, 0.9)(Quantiles), + arrayMap(x -> round(x, 6), quantilesMerge(0.5, 0.9)(Quantiles)), groupArrayMerge(GroupArray) FROM stored_aggregates GROUP BY d, k1, k2 @@ -46,7 +46,7 @@ ORDER BY d, k1, k2; SELECT d, k1, sumMerge(Sum), avgMerge(Avg), uniqMerge(Uniq), anyMerge(Any), anyIfMerge(AnyIf), - quantilesMerge(0.5, 0.9)(Quantiles), + arrayMap(x -> round(x, 6), quantilesMerge(0.5, 0.9)(Quantiles)), groupArrayMerge(GroupArray) FROM stored_aggregates GROUP BY d, k1 @@ -55,7 +55,7 @@ ORDER BY d, k1; SELECT d, sumMerge(Sum), avgMerge(Avg), uniqMerge(Uniq), anyMerge(Any), anyIfMerge(AnyIf), - quantilesMerge(0.5, 0.9)(Quantiles), + arrayMap(x -> round(x, 6), quantilesMerge(0.5, 0.9)(Quantiles)), groupArrayMerge(GroupArray) FROM stored_aggregates GROUP BY d diff --git a/tests/queries/0_stateless/00273_quantiles.reference b/tests/queries/0_stateless/00273_quantiles.reference index ac9db31bae4..55f7d871fe6 100644 --- a/tests/queries/0_stateless/00273_quantiles.reference +++ b/tests/queries/0_stateless/00273_quantiles.reference @@ -9,30 +9,30 @@ 1 333334 [699140.3,835642,967430.8] [699999,833333,966666] 2 266667 [426546,536239,638933.4] [426665,533332,639999] 3 114285 [296938,342324,388778] [297142,342856,388570] -4 63492 [228369.80000000002,254011,279351.6] [228571,253968,279364] +4 63492 [228369.8,254011,279351.6] [228571,253968,279364] 5 40404 [185602.3,202005,218108.5] [185858,202020,218181] -6 27972 [156598.6,167864,179118.40000000002] [156643,167832,179020] +6 27972 [156598.6,167864,179118.4] [156643,167832,179020] 7 20513 [135400.8,143550,151792.6] [135384,143589,151794] -8 15686 [119239.20000000001,125463,131772.40000000002] [119215,125490,131764] -9 12384 [106509.79999999999,111538,116415.8] [106501,111455,116408] +8 15686 [119239.2,125463,131772.4] [119215,125490,131764] +9 12384 [106509.8,111538,116415.8] [106501,111455,116408] 10 10025 [96223.2,100346,104288.7] [96240,100250,104260] -11 8282 [87732.70000000001,91035,94408.6] [87784,91097,94409] +11 8282 [87732.7,91035,94408.6] [87784,91097,94409] 12 6957 [80694.6,83477,86259.4] [80694,83477,86260] -13 5925 [74666.40000000001,77036,79405.6] [74666,77036,79406] +13 5925 [74666.4,77036,79405.6] [74666,77036,79406] 14 5109 [69475.8,71519,73562.2] [69475,71519,73563] 15 4449 [64960.8,66740,68519.2] [64960,66740,68520] -16 3910 [60996.9,62560.5,64124.100000000006] [60997,62561,64125] -17 3464 [57488.299999999996,58873.5,60258.7] [57488,58874,60259] +16 3910 [60996.9,62560.5,64124.1] [60997,62561,64125] +17 3464 [57488.3,58873.5,60258.7] [57488,58874,60259] 18 3088 [54362.7,55597.5,56832.3] [54362,55598,56833] 19 2772 [51559.1,52667.5,53775.9] [51559,52668,53776] 20 2502 [49030.1,50030.5,51030.9] [49030,50031,51031] -21 2269 [46737.8,47645,48552.200000000004] [46737,47645,48553] +21 2269 [46737.8,47645,48552.2] [46737,47645,48553] 22 2067 [44650.6,45477,46303.4] [44650,45477,46304] 23 1891 [42742,43498,44254] [42742,43498,44254] 24 1737 [40989.6,41684,42378.4] [40989,41684,42379] 25 1601 [39375,40015,40655] [39375,40015,40655] 26 1480 [37882.9,38474.5,39066.1] [37883,38475,39067] -27 1372 [36500.100000000006,37048.5,37596.9] [36500,37049,37597] +27 1372 [36500.1,37048.5,37596.9] [36500,37049,37597] 28 1276 [35214.5,35724.5,36234.5] [35214,35725,36235] 29 1189 [34016.8,34492,34967.2] [34016,34492,34968] 30 1112 [32897.1,33341.5,33785.9] [32897,33342,33786] @@ -43,15 +43,15 @@ 35 816 [28250.5,28576.5,28902.5] [28250,28577,28903] 36 772 [27474.1,27782.5,28090.9] [27474,27783,28091] 37 731 [26739,27031,27323] [26739,27031,27323] -38 692 [26043.100000000002,26319.5,26595.899999999998] [26043,26320,26596] -39 658 [25381.699999999997,25644.5,25907.300000000003] [25381,25645,25908] +38 692 [26043.1,26319.5,26595.9] [26043,26320,26596] +39 658 [25381.7,25644.5,25907.3] [25381,25645,25908] 40 625 [24753.4,25003,25252.6] [24753,25003,25253] 41 595 [24155.4,24393,24630.6] [24155,24393,24631] 42 567 [23585.6,23812,24038.4] [23585,23812,24039] 43 541 [23042,23258,23474] [23042,23258,23474] 44 517 [22522.6,22729,22935.4] [22522,22729,22936] -45 493 [22027.199999999997,22224,22420.8] [22027,22224,22421] -46 473 [21552.2,21741,21929.800000000003] [21552,21741,21930] +45 493 [22027.2,22224,22420.8] [22027,22224,22421] +46 473 [21552.2,21741,21929.8] [21552,21741,21930] 47 453 [21097.2,21278,21458.8] [21097,21278,21459] 48 434 [20661.3,20834.5,21007.7] [20661,20835,21008] 49 416 [20243.5,20409.5,20575.5] [20243,20410,20576] @@ -62,46 +62,46 @@ 54 343 [18382.2,18519,18655.8] [18382,18519,18656] 55 330 [18050.9,18182.5,18314.1] [18051,18183,18315] 56 319 [17730.8,17858,17985.2] [17730,17858,17986] -57 308 [17421.699999999997,17544.5,17667.3] [17421,17545,17668] -58 297 [17123.6,17242,17360.399999999998] [17123,17242,17361] +57 308 [17421.7,17544.5,17667.3] [17421,17545,17668] +58 297 [17123.6,17242,17360.4] [17123,17242,17361] 59 288 [16834.7,16949.5,17064.3] [16834,16950,17065] -60 278 [16555.699999999997,16666.5,16777.3] [16555,16667,16778] +60 278 [16555.7,16666.5,16777.3] [16555,16667,16778] 61 268 [16286.7,16393.5,16500.3] [16286,16394,16501] 62 261 [16025,16129,16233] [16025,16129,16233] 63 251 [15773,15873,15973] [15773,15873,15973] -64 245 [15527.4,15625,15722.599999999999] [15527,15625,15723] +64 245 [15527.4,15625,15722.6] [15527,15625,15723] 65 236 [15290.5,15384.5,15478.5] [15290,15385,15479] -66 230 [15059.900000000001,15151.5,15243.099999999999] [15060,15152,15244] +66 230 [15059.9,15151.5,15243.1] [15060,15152,15244] 67 223 [14836.2,14925,15013.8] [14836,14925,15014] 68 216 [14619.5,14705.5,14791.5] [14619,14706,14792] -69 210 [14408.900000000001,14492.5,14576.1] [14409,14493,14577] +69 210 [14408.9,14492.5,14576.1] [14409,14493,14577] 70 204 [14204.3,14285.5,14366.7] [14204,14286,14367] -71 198 [14005.699999999999,14084.5,14163.3] [14005,14085,14164] -72 193 [13812.2,13889,13965.800000000001] [13812,13889,13966] +71 198 [14005.7,14084.5,14163.3] [14005,14085,14164] +72 193 [13812.2,13889,13965.8] [13812,13889,13966] 73 188 [13623.7,13698.5,13773.3] [13623,13699,13774] -74 183 [13440.199999999999,13513,13585.8] [13440,13513,13586] +74 183 [13440.2,13513,13585.8] [13440,13513,13586] 75 177 [13262.6,13333,13403.4] [13262,13333,13404] -76 174 [13088.300000000001,13157.5,13226.7] [13088,13158,13227] +76 174 [13088.3,13157.5,13226.7] [13088,13158,13227] 77 168 [12919.7,12986.5,13053.3] [12919,12987,13054] -78 165 [12754.400000000001,12820,12885.6] [12754,12820,12886] +78 165 [12754.4,12820,12885.6] [12754,12820,12886] 79 160 [12593.9,12657.5,12721.1] [12594,12658,12722] 80 156 [12437.5,12499.5,12561.5] [12437,12500,12562] -81 153 [12284.199999999999,12345,12405.8] [12284,12345,12406] +81 153 [12284.2,12345,12405.8] [12284,12345,12406] 82 148 [12135.7,12194.5,12253.3] [12135,12195,12254] 83 145 [11990.4,12048,12105.6] [11990,12048,12106] -84 142 [11848.099999999999,11904.5,11960.900000000001] [11848,11905,11961] +84 142 [11848.1,11904.5,11960.9] [11848,11905,11961] 85 139 [11708.8,11764,11819.2] [11708,11764,11820] -86 135 [11573.4,11627,11680.599999999999] [11573,11627,11681] -87 132 [11441.099999999999,11493.5,11545.9] [11441,11494,11546] +86 135 [11573.4,11627,11680.6] [11573,11627,11681] +87 132 [11441.1,11493.5,11545.9] [11441,11494,11546] 88 129 [11311.8,11363,11414.2] [11311,11363,11415] 89 126 [11185.5,11235.5,11285.5] [11185,11236,11286] 90 124 [11061.3,11110.5,11159.7] [11061,11111,11160] 91 121 [10940,10988,11036] [10940,10988,11036] 92 118 [10821.7,10868.5,10915.3] [10821,10869,10916] 93 115 [10706.4,10752,10797.6] [10706,10752,10798] -94 113 [10593.2,10638,10682.800000000001] [10593,10638,10683] +94 113 [10593.2,10638,10682.8] [10593,10638,10683] 95 111 [10482,10526,10570] [10482,10526,10570] -96 109 [10372.8,10416,10459.199999999999] [10372,10416,10460] +96 109 [10372.8,10416,10459.2] [10372,10416,10460] 97 106 [10266.5,10308.5,10350.5] [10266,10309,10351] 98 104 [10162.3,10203.5,10244.7] [10162,10204,10245] 99 102 [10060.1,10100.5,10140.9] [10060,10101,10141] @@ -109,7 +109,7 @@ 101 98 [9861.7,9900.5,9939.3] [9861,9901,9940] 102 96 [9765.5,9803.5,9841.5] [9765,9804,9842] 103 95 [9670.4,9708,9745.6] [9670,9708,9746] -104 92 [9578.099999999999,9614.5,9650.9] [9578,9615,9651] +104 92 [9578.1,9614.5,9650.9] [9578,9615,9651] 105 91 [9487,9523,9559] [9487,9523,9559] 106 89 [9397.8,9433,9468.2] [9397,9433,9469] 107 87 [9310.6,9345,9379.4] [9310,9345,9380] @@ -119,9 +119,9 @@ 111 81 [8976,9008,9040] [8976,9008,9040] 112 80 [8895.9,8927.5,8959.1] [8896,8928,8960] 113 78 [8817.7,8848.5,8879.3] [8817,8849,8880] -114 77 [8740.6,8771,8801.400000000001] [8740,8771,8802] -115 75 [8665.400000000001,8695,8724.6] [8665,8695,8725] -116 75 [8590.400000000001,8620,8649.6] [8590,8620,8650] +114 77 [8740.6,8771,8801.4] [8740,8771,8802] +115 75 [8665.4,8695,8724.6] [8665,8695,8725] +116 75 [8590.4,8620,8649.6] [8590,8620,8650] 117 73 [8517.2,8546,8574.8] [8517,8546,8575] 118 72 [8445.1,8473.5,8501.9] [8445,8474,8502] 119 70 [8374.9,8402.5,8430.1] [8375,8403,8431] @@ -136,38 +136,38 @@ 128 61 [7788,7812,7836] [7788,7812,7836] 129 60 [7727.9,7751.5,7775.1] [7728,7752,7776] 130 60 [7667.9,7691.5,7715.1] [7668,7692,7716] -131 58 [7609.699999999999,7632.5,7655.299999999999] [7609,7633,7656] +131 58 [7609.7,7632.5,7655.3] [7609,7633,7656] 132 57 [7552.6,7575,7597.4] [7552,7575,7598] 133 57 [7495.6,7518,7540.4] [7495,7518,7541] 134 56 [7439.5,7461.5,7483.5] [7439,7462,7484] -135 54 [7385.299999999999,7406.5,7427.700000000001] [7385,7407,7428] -136 54 [7331.299999999999,7352.5,7373.700000000001] [7331,7353,7374] -137 54 [7277.3,7298.5,7319.700000000001] [7277,7299,7320] +135 54 [7385.3,7406.5,7427.7] [7385,7407,7428] +136 54 [7331.3,7352.5,7373.7] [7331,7353,7374] +137 54 [7277.3,7298.5,7319.7] [7277,7299,7320] 138 52 [7225.1,7245.5,7265.9] [7225,7246,7266] 139 52 [7173.1,7193.5,7213.9] [7173,7194,7214] 140 51 [7122,7142,7162] [7122,7142,7162] -141 50 [7071.900000000001,7091.5,7111.099999999999] [7072,7092,7112] -142 50 [7021.900000000001,7041.5,7061.099999999999] [7022,7042,7062] +141 50 [7071.9,7091.5,7111.1] [7072,7092,7112] +142 50 [7021.9,7041.5,7061.1] [7022,7042,7062] 143 49 [6972.8,6992,7011.2] [6972,6992,7012] 144 48 [6924.7,6943.5,6962.3] [6924,6944,6963] -145 48 [6876.700000000001,6895.5,6914.299999999999] [6876,6896,6915] +145 48 [6876.7,6895.5,6914.3] [6876,6896,6915] 146 47 [6829.6,6848,6866.4] [6829,6848,6867] 147 46 [6783.5,6801.5,6819.5] [6783,6802,6820] 148 45 [6738.4,6756,6773.6] [6738,6756,6774] 149 46 [6692.5,6710.5,6728.5] [6692,6711,6729] 150 44 [6648.3,6665.5,6682.7] [6648,6666,6683] -151 44 [6604.3,6621.5,6638.700000000001] [6604,6622,6639] -152 43 [6561.200000000001,6578,6594.8] [6561,6578,6595] -153 43 [6518.2,6535,6551.799999999999] [6518,6535,6552] +151 44 [6604.3,6621.5,6638.7] [6604,6622,6639] +152 43 [6561.2,6578,6594.8] [6561,6578,6595] +153 43 [6518.2,6535,6551.8] [6518,6535,6552] 154 42 [6476.1,6492.5,6508.9] [6476,6493,6509] -155 42 [6434.1,6450.5,6466.900000000001] [6434,6451,6467] +155 42 [6434.1,6450.5,6466.9] [6434,6451,6467] 156 41 [6393,6409,6425] [6393,6409,6425] -157 40 [6352.900000000001,6368.5,6384.1] [6353,6369,6385] -158 40 [6312.900000000001,6328.5,6344.1] [6313,6329,6345] -159 40 [6272.900000000001,6288.5,6304.1] [6273,6289,6305] -160 39 [6233.8,6249,6264.200000000001] [6233,6249,6265] -161 39 [6194.8,6210,6225.200000000001] [6194,6210,6226] -162 38 [6156.700000000001,6171.5,6186.299999999999] [6156,6172,6187] +157 40 [6352.9,6368.5,6384.1] [6353,6369,6385] +158 40 [6312.9,6328.5,6344.1] [6313,6329,6345] +159 40 [6272.9,6288.5,6304.1] [6273,6289,6305] +160 39 [6233.8,6249,6264.2] [6233,6249,6265] +161 39 [6194.8,6210,6225.2] [6194,6210,6226] +162 38 [6156.7,6171.5,6186.3] [6156,6172,6187] 163 37 [6119.6,6134,6148.4] [6119,6134,6149] 164 37 [6082.6,6097,6111.4] [6082,6097,6112] 165 37 [6045.6,6060,6074.4] [6045,6060,6075] @@ -177,26 +177,26 @@ 169 35 [5902.4,5916,5929.6] [5902,5916,5930] 170 34 [5868.3,5881.5,5894.7] [5868,5882,5895] 171 35 [5833.4,5847,5860.6] [5833,5847,5861] -172 33 [5800.2,5813,5825.799999999999] [5800,5813,5826] +172 33 [5800.2,5813,5825.8] [5800,5813,5826] 173 34 [5766.3,5779.5,5792.7] [5766,5780,5793] 174 33 [5733.2,5746,5758.8] [5733,5746,5759] -175 32 [5701.1,5713.5,5725.900000000001] [5701,5714,5726] +175 32 [5701.1,5713.5,5725.9] [5701,5714,5726] 176 33 [5668.2,5681,5693.8] [5668,5681,5694] -177 32 [5636.1,5648.5,5660.900000000001] [5636,5649,5661] +177 32 [5636.1,5648.5,5660.9] [5636,5649,5661] 178 31 [5605,5617,5629] [5605,5617,5629] 179 31 [5574,5586,5598] [5574,5586,5598] 180 31 [5543,5555,5567] [5543,5555,5567] 181 31 [5512,5524,5536] [5512,5524,5536] -182 30 [5481.9,5493.5,5505.099999999999] [5482,5494,5506] -183 30 [5451.9,5463.5,5475.099999999999] [5452,5464,5476] +182 30 [5481.9,5493.5,5505.1] [5482,5494,5506] +183 30 [5451.9,5463.5,5475.1] [5452,5464,5476] 184 29 [5422.8,5434,5445.2] [5422,5434,5446] 185 30 [5392.9,5404.5,5416.1] [5393,5405,5417] 186 29 [5363.8,5375,5386.2] [5363,5375,5387] -187 28 [5335.7,5346.5,5357.299999999999] [5335,5347,5358] +187 28 [5335.7,5346.5,5357.3] [5335,5347,5358] 188 28 [5307.7,5318.5,5329.3] [5307,5319,5330] 189 28 [5279.7,5290.5,5301.3] [5279,5291,5302] 190 28 [5251.7,5262.5,5273.3] [5251,5263,5274] -191 28 [5223.700000000001,5234.5,5245.3] [5223,5235,5246] +191 28 [5223.7,5234.5,5245.3] [5223,5235,5246] 192 27 [5196.6,5207,5217.4] [5196,5207,5218] 193 27 [5169.6,5180,5190.4] [5169,5180,5191] 194 26 [5143.5,5153.5,5163.5] [5143,5154,5164] @@ -211,17 +211,17 @@ 203 24 [4916.3,4925.5,4934.7] [4916,4926,4935] 204 25 [4891.4,4901,4910.6] [4891,4901,4911] 205 23 [4868.2,4877,4885.8] [4868,4877,4886] -206 24 [4844.3,4853.5,4862.700000000001] [4844,4854,4863] +206 24 [4844.3,4853.5,4862.7] [4844,4854,4863] 207 23 [4821.2,4830,4838.8] [4821,4830,4839] 208 23 [4798.2,4807,4815.8] [4798,4807,4816] 209 23 [4775.2,4784,4792.8] [4775,4784,4793] 210 23 [4752.2,4761,4769.8] [4752,4761,4770] 211 22 [4730.1,4738.5,4746.9] [4730,4739,4747] 212 23 [4707.2,4716,4724.8] [4707,4716,4725] -213 22 [4685.1,4693.5,4701.900000000001] [4685,4694,4702] +213 22 [4685.1,4693.5,4701.9] [4685,4694,4702] 214 21 [4664,4672,4680] [4664,4672,4680] -215 22 [4642.099999999999,4650.5,4658.900000000001] [4642,4651,4659] -216 22 [4620.1,4628.5,4636.900000000001] [4620,4629,4637] +215 22 [4642.1,4650.5,4658.9] [4642,4651,4659] +216 22 [4620.1,4628.5,4636.9] [4620,4629,4637] 217 21 [4599,4607,4615] [4599,4607,4615] 218 21 [4578,4586,4594] [4578,4586,4594] 219 21 [4557,4565,4573] [4557,4565,4573] @@ -231,20 +231,20 @@ 223 20 [4475.9,4483.5,4491.1] [4476,4484,4492] 224 20 [4455.9,4463.5,4471.1] [4456,4464,4472] 225 20 [4435.9,4443.5,4451.1] [4436,4444,4452] -226 19 [4416.8,4424,4431.200000000001] [4416,4424,4432] +226 19 [4416.8,4424,4431.2] [4416,4424,4432] 227 20 [4396.9,4404.5,4412.1] [4397,4405,4413] 228 19 [4377.8,4385,4392.2] [4377,4385,4393] 229 19 [4358.8,4366,4373.2] [4358,4366,4374] 230 19 [4339.8,4347,4354.2] [4339,4347,4355] 231 19 [4320.8,4328,4335.2] [4320,4328,4336] 232 18 [4302.7,4309.5,4316.3] [4302,4310,4317] -233 19 [4283.8,4291,4298.200000000001] [4283,4291,4299] +233 19 [4283.8,4291,4298.2] [4283,4291,4299] 234 18 [4265.7,4272.5,4279.3] [4265,4273,4280] 235 18 [4247.7,4254.5,4261.3] [4247,4255,4262] 236 18 [4229.7,4236.5,4243.3] [4229,4237,4244] -237 18 [4211.7,4218.5,4225.299999999999] [4211,4219,4226] +237 18 [4211.7,4218.5,4225.3] [4211,4219,4226] 238 18 [4193.7,4200.5,4207.3] [4193,4201,4208] -239 17 [4176.6,4183,4189.400000000001] [4176,4183,4190] +239 17 [4176.6,4183,4189.4] [4176,4183,4190] 240 17 [4159.6,4166,4172.4] [4159,4166,4173] 241 18 [4141.7,4148.5,4155.3] [4141,4149,4156] 242 17 [4124.6,4131,4137.4] [4124,4131,4138] @@ -269,42 +269,42 @@ 261 14 [3825.3,3830.5,3835.7] [3825,3831,3836] 262 15 [3810.4,3816,3821.6] [3810,3816,3822] 263 14 [3796.3,3801.5,3806.7] [3796,3802,3807] -264 15 [3781.3999999999996,3787,3792.6000000000004] [3781,3787,3793] -265 14 [3767.2999999999997,3772.5,3777.7] [3767,3773,3778] +264 15 [3781.4,3787,3792.6] [3781,3787,3793] +265 14 [3767.3,3772.5,3777.7] [3767,3773,3778] 266 14 [3753.3,3758.5,3763.7] [3753,3759,3764] 267 14 [3739.3,3744.5,3749.7] [3739,3745,3750] 268 14 [3725.3,3730.5,3735.7] [3725,3731,3736] -269 14 [3711.3,3716.5,3721.7000000000003] [3711,3717,3722] -270 14 [3697.2999999999997,3702.5,3707.7] [3697,3703,3708] +269 14 [3711.3,3716.5,3721.7] [3711,3717,3722] +270 14 [3697.3,3702.5,3707.7] [3697,3703,3708] 271 13 [3684.2,3689,3693.8] [3684,3689,3694] 272 14 [3670.3,3675.5,3680.7] [3670,3676,3681] -273 13 [3657.2000000000003,3662,3666.8] [3657,3662,3667] +273 13 [3657.2,3662,3666.8] [3657,3662,3667] 274 14 [3643.3,3648.5,3653.7] [3643,3649,3654] -275 13 [3630.2000000000003,3635,3639.8] [3630,3635,3640] +275 13 [3630.2,3635,3639.8] [3630,3635,3640] 276 13 [3617.2,3622,3626.8] [3617,3622,3627] 277 13 [3604.2,3609,3613.8] [3604,3609,3614] 278 13 [3591.2,3596,3600.8] [3591,3596,3601] -279 13 [3578.2,3583,3587.7999999999997] [3578,3583,3588] +279 13 [3578.2,3583,3587.8] [3578,3583,3588] 280 12 [3566.1,3570.5,3574.9] [3566,3571,3575] -281 13 [3553.2,3558,3562.7999999999997] [3553,3558,3563] -282 13 [3540.2000000000003,3545,3549.8] [3540,3545,3550] -283 12 [3528.1000000000004,3532.5,3536.8999999999996] [3528,3533,3537] -284 13 [3515.2000000000003,3520,3524.8] [3515,3520,3525] -285 12 [3503.1000000000004,3507.5,3511.8999999999996] [3503,3508,3512] +281 13 [3553.2,3558,3562.8] [3553,3558,3563] +282 13 [3540.2,3545,3549.8] [3540,3545,3550] +283 12 [3528.1,3532.5,3536.9] [3528,3533,3537] +284 13 [3515.2,3520,3524.8] [3515,3520,3525] +285 12 [3503.1,3507.5,3511.9] [3503,3508,3512] 286 12 [3491.1,3495.5,3499.9] [3491,3496,3500] -287 12 [3479.1000000000004,3483.5,3487.9] [3479,3484,3488] +287 12 [3479.1,3483.5,3487.9] [3479,3484,3488] 288 12 [3467.1,3471.5,3475.9] [3467,3472,3476] 289 12 [3455.1,3459.5,3463.9] [3455,3460,3464] -290 12 [3443.1000000000004,3447.5,3451.8999999999996] [3443,3448,3452] +290 12 [3443.1,3447.5,3451.9] [3443,3448,3452] 291 12 [3431.1,3435.5,3439.9] [3431,3436,3440] -292 12 [3419.1000000000004,3423.5,3427.9] [3419,3424,3428] +292 12 [3419.1,3423.5,3427.9] [3419,3424,3428] 293 11 [3408,3412,3416] [3408,3412,3416] 294 12 [3396.1,3400.5,3404.9] [3396,3401,3405] 295 11 [3385,3389,3393] [3385,3389,3393] -296 12 [3373.1000000000004,3377.5,3381.8999999999996] [3373,3378,3382] +296 12 [3373.1,3377.5,3381.9] [3373,3378,3382] 297 11 [3362,3366,3370] [3362,3366,3370] 298 11 [3351,3355,3359] [3351,3355,3359] -299 12 [3339.1000000000004,3343.5,3347.9] [3339,3344,3348] +299 12 [3339.1,3343.5,3347.9] [3339,3344,3348] 300 11 [3328,3332,3336] [3328,3332,3336] 301 11 [3317,3321,3325] [3317,3321,3325] 302 11 [3306,3310,3314] [3306,3310,3314] @@ -312,9 +312,9 @@ 304 10 [3284.9,3288.5,3292.1] [3285,3289,3293] 305 11 [3274,3278,3282] [3274,3278,3282] 306 11 [3263,3267,3271] [3263,3267,3271] -307 10 [3252.9,3256.5,3260.1000000000004] [3253,3257,3261] +307 10 [3252.9,3256.5,3260.1] [3253,3257,3261] 308 11 [3242,3246,3250] [3242,3246,3250] -309 10 [3231.9,3235.5,3239.1000000000004] [3232,3236,3240] +309 10 [3231.9,3235.5,3239.1] [3232,3236,3240] 310 11 [3221,3225,3229] [3221,3225,3229] 311 10 [3210.9,3214.5,3218.1] [3211,3215,3219] 312 11 [3200,3204,3208] [3200,3204,3208] @@ -328,26 +328,26 @@ 320 9 [3120.8,3124,3127.2] [3120,3124,3128] 321 10 [3110.9,3114.5,3118.1] [3111,3115,3119] 322 10 [3100.9,3104.5,3108.1] [3101,3105,3109] -323 9 [3091.8,3095,3098.2000000000003] [3091,3095,3099] -324 10 [3081.9,3085.5,3089.1000000000004] [3082,3086,3090] +323 9 [3091.8,3095,3098.2] [3091,3095,3099] +324 10 [3081.9,3085.5,3089.1] [3082,3086,3090] 325 9 [3072.8,3076,3079.2] [3072,3076,3080] 326 10 [3062.9,3066.5,3070.1] [3063,3067,3071] 327 9 [3053.8,3057,3060.2] [3053,3057,3061] -328 9 [3044.7999999999997,3048,3051.2] [3044,3048,3052] +328 9 [3044.8,3048,3051.2] [3044,3048,3052] 329 10 [3034.9,3038.5,3042.1] [3035,3039,3043] 330 9 [3025.8,3029,3032.2] [3025,3029,3033] -331 9 [3016.7999999999997,3020,3023.2000000000003] [3016,3020,3024] +331 9 [3016.8,3020,3023.2] [3016,3020,3024] 332 9 [3007.8,3011,3014.2] [3007,3011,3015] 333 9 [2998.8,3002,3005.2] [2998,3002,3006] -334 9 [2989.7999999999997,2993,2996.2] [2989,2993,2997] +334 9 [2989.8,2993,2996.2] [2989,2993,2997] 335 9 [2980.8,2984,2987.2] [2980,2984,2988] -336 9 [2971.7999999999997,2975,2978.2000000000003] [2971,2975,2979] +336 9 [2971.8,2975,2978.2] [2971,2975,2979] 337 9 [2962.8,2966,2969.2] [2962,2966,2970] 338 8 [2954.7,2957.5,2960.3] [2954,2958,2961] 339 9 [2945.8,2949,2952.2] [2945,2949,2953] -340 9 [2936.7999999999997,2940,2943.2000000000003] [2936,2940,2944] +340 9 [2936.8,2940,2943.2] [2936,2940,2944] 341 8 [2928.7,2931.5,2934.3] [2928,2932,2935] -342 9 [2919.7999999999997,2923,2926.2] [2919,2923,2927] +342 9 [2919.8,2923,2926.2] [2919,2923,2927] 343 8 [2911.7,2914.5,2917.3] [2911,2915,2918] 344 9 [2902.8,2906,2909.2] [2902,2906,2910] 345 8 [2894.7,2897.5,2900.3] [2894,2898,2901] @@ -356,7 +356,7 @@ 348 8 [2869.7,2872.5,2875.3] [2869,2873,2876] 349 8 [2861.7,2864.5,2867.3] [2861,2865,2868] 350 8 [2853.7,2856.5,2859.3] [2853,2857,2860] -351 9 [2844.7999999999997,2848,2851.2] [2844,2848,2852] +351 9 [2844.8,2848,2851.2] [2844,2848,2852] 352 8 [2836.7,2839.5,2842.3] [2836,2840,2843] 353 8 [2828.7,2831.5,2834.3] [2828,2832,2835] 354 8 [2820.7,2823.5,2826.3] [2820,2824,2827] @@ -366,27 +366,27 @@ 358 8 [2789.7,2792.5,2795.3] [2789,2793,2796] 359 8 [2781.7,2784.5,2787.3] [2781,2785,2788] 360 8 [2773.7,2776.5,2779.3] [2773,2777,2780] -361 7 [2766.6000000000004,2769,2771.4] [2766,2769,2772] +361 7 [2766.6,2769,2771.4] [2766,2769,2772] 362 8 [2758.7,2761.5,2764.3] [2758,2762,2765] -363 7 [2751.6000000000004,2754,2756.4] [2751,2754,2757] +363 7 [2751.6,2754,2756.4] [2751,2754,2757] 364 8 [2743.7,2746.5,2749.3] [2743,2747,2750] 365 8 [2735.7,2738.5,2741.3] [2735,2739,2742] -366 7 [2728.6000000000004,2731,2733.4] [2728,2731,2734] -367 7 [2721.6000000000004,2724,2726.3999999999996] [2721,2724,2727] +366 7 [2728.6,2731,2733.4] [2728,2731,2734] +367 7 [2721.6,2724,2726.4] [2721,2724,2727] 368 8 [2713.7,2716.5,2719.3] [2713,2717,2720] -369 7 [2706.6000000000004,2709,2711.3999999999996] [2706,2709,2712] -370 7 [2699.6,2702,2704.3999999999996] [2699,2702,2705] +369 7 [2706.6,2709,2711.4] [2706,2709,2712] +370 7 [2699.6,2702,2704.4] [2699,2702,2705] 371 8 [2691.7,2694.5,2697.3] [2691,2695,2698] -372 7 [2684.6,2687,2689.3999999999996] [2684,2687,2690] +372 7 [2684.6,2687,2689.4] [2684,2687,2690] 373 7 [2677.6,2680,2682.4] [2677,2680,2683] 374 7 [2670.6,2673,2675.4] [2670,2673,2676] -375 7 [2663.6000000000004,2666,2668.4] [2663,2666,2669] -376 7 [2656.6000000000004,2659,2661.3999999999996] [2656,2659,2662] -377 7 [2649.6,2652,2654.3999999999996] [2649,2652,2655] +375 7 [2663.6,2666,2668.4] [2663,2666,2669] +376 7 [2656.6,2659,2661.4] [2656,2659,2662] +377 7 [2649.6,2652,2654.4] [2649,2652,2655] 378 7 [2642.6,2645,2647.4] [2642,2645,2648] 379 7 [2635.6,2638,2640.4] [2635,2638,2641] -380 7 [2628.6000000000004,2631,2633.4] [2628,2631,2634] -381 7 [2621.6000000000004,2624,2626.3999999999996] [2621,2624,2627] +380 7 [2628.6,2631,2633.4] [2628,2631,2634] +381 7 [2621.6,2624,2626.4] [2621,2624,2627] 382 7 [2614.6,2617,2619.4] [2614,2617,2620] 383 7 [2607.6,2610,2612.4] [2607,2610,2613] 384 7 [2600.6,2603,2605.4] [2600,2603,2606] @@ -405,14 +405,14 @@ 397 7 [2515.6,2518,2520.4] [2515,2518,2521] 398 6 [2509.5,2511.5,2513.5] [2509,2512,2514] 399 6 [2503.5,2505.5,2507.5] [2503,2506,2508] -400 7 [2496.6,2499,2501.3999999999996] [2496,2499,2502] +400 7 [2496.6,2499,2501.4] [2496,2499,2502] 401 6 [2490.5,2492.5,2494.5] [2490,2493,2495] 402 6 [2484.5,2486.5,2488.5] [2484,2487,2489] 403 6 [2478.5,2480.5,2482.5] [2478,2481,2483] 404 6 [2472.5,2474.5,2476.5] [2472,2475,2477] 405 6 [2466.5,2468.5,2470.5] [2466,2469,2471] 406 6 [2460.5,2462.5,2464.5] [2460,2463,2465] -407 7 [2453.6000000000004,2456,2458.3999999999996] [2453,2456,2459] +407 7 [2453.6,2456,2458.4] [2453,2456,2459] 408 6 [2447.5,2449.5,2451.5] [2447,2450,2452] 409 5 [2442.4,2444,2445.6] [2442,2444,2446] 410 6 [2436.5,2438.5,2440.5] [2436,2439,2441] @@ -422,7 +422,7 @@ 414 6 [2412.5,2414.5,2416.5] [2412,2415,2417] 415 6 [2406.5,2408.5,2410.5] [2406,2409,2411] 416 6 [2400.5,2402.5,2404.5] [2400,2403,2405] -417 5 [2395.4,2397,2398.6000000000004] [2395,2397,2399] +417 5 [2395.4,2397,2398.6] [2395,2397,2399] 418 6 [2389.5,2391.5,2393.5] [2389,2392,2394] 419 6 [2383.5,2385.5,2387.5] [2383,2386,2388] 420 5 [2378.4,2380,2381.6] [2378,2380,2382] @@ -430,9 +430,9 @@ 422 6 [2366.5,2368.5,2370.5] [2366,2369,2371] 423 5 [2361.4,2363,2364.6] [2361,2363,2365] 424 6 [2355.5,2357.5,2359.5] [2355,2358,2360] -425 5 [2350.4,2352,2353.6000000000004] [2350,2352,2354] +425 5 [2350.4,2352,2353.6] [2350,2352,2354] 426 6 [2344.5,2346.5,2348.5] [2344,2347,2349] -427 5 [2339.3999999999996,2341,2342.6] [2339,2341,2343] +427 5 [2339.4,2341,2342.6] [2339,2341,2343] 428 6 [2333.5,2335.5,2337.5] [2333,2336,2338] 429 5 [2328.4,2330,2331.6] [2328,2330,2332] 430 6 [2322.5,2324.5,2326.5] [2322,2325,2327] @@ -448,16 +448,16 @@ 440 5 [2270.4,2272,2273.6] [2270,2272,2274] 441 5 [2265.4,2267,2268.6] [2265,2267,2269] 442 6 [2259.5,2261.5,2263.5] [2259,2262,2264] -443 5 [2254.3999999999996,2256,2257.6] [2254,2256,2258] -444 5 [2249.3999999999996,2251,2252.6] [2249,2251,2253] -445 5 [2244.3999999999996,2246,2247.6] [2244,2246,2248] -446 5 [2239.3999999999996,2241,2242.6] [2239,2241,2243] -447 5 [2234.3999999999996,2236,2237.6] [2234,2236,2238] -448 5 [2229.3999999999996,2231,2232.6] [2229,2231,2233] -449 5 [2224.3999999999996,2226,2227.6] [2224,2226,2228] -450 5 [2219.3999999999996,2221,2222.6] [2219,2221,2223] -451 5 [2214.3999999999996,2216,2217.6] [2214,2216,2218] -452 5 [2209.3999999999996,2211,2212.6] [2209,2211,2213] +443 5 [2254.4,2256,2257.6] [2254,2256,2258] +444 5 [2249.4,2251,2252.6] [2249,2251,2253] +445 5 [2244.4,2246,2247.6] [2244,2246,2248] +446 5 [2239.4,2241,2242.6] [2239,2241,2243] +447 5 [2234.4,2236,2237.6] [2234,2236,2238] +448 5 [2229.4,2231,2232.6] [2229,2231,2233] +449 5 [2224.4,2226,2227.6] [2224,2226,2228] +450 5 [2219.4,2221,2222.6] [2219,2221,2223] +451 5 [2214.4,2216,2217.6] [2214,2216,2218] +452 5 [2209.4,2211,2212.6] [2209,2211,2213] 453 4 [2205.3,2206.5,2207.7] [2205,2207,2208] 454 5 [2200.4,2202,2203.6] [2200,2202,2204] 455 5 [2195.4,2197,2198.6] [2195,2197,2199] @@ -471,421 +471,421 @@ 463 5 [2157.4,2159,2160.6] [2157,2159,2161] 464 5 [2152.4,2154,2155.6] [2152,2154,2156] 465 4 [2148.3,2149.5,2150.7] [2148,2150,2151] -466 5 [2143.4,2145,2146.6000000000004] [2143,2145,2147] +466 5 [2143.4,2145,2146.6] [2143,2145,2147] 467 4 [2139.3,2140.5,2141.7] [2139,2141,2142] -468 5 [2134.3999999999996,2136,2137.6] [2134,2136,2138] -469 5 [2129.3999999999996,2131,2132.6] [2129,2131,2133] +468 5 [2134.4,2136,2137.6] [2134,2136,2138] +469 5 [2129.4,2131,2132.6] [2129,2131,2133] 470 4 [2125.3,2126.5,2127.7] [2125,2127,2128] 471 5 [2120.4,2122,2123.6] [2120,2122,2124] 472 4 [2116.3,2117.5,2118.7] [2116,2118,2119] 473 5 [2111.4,2113,2114.6] [2111,2113,2115] 474 4 [2107.3,2108.5,2109.7] [2107,2109,2110] 475 4 [2103.3,2104.5,2105.7] [2103,2105,2106] -476 5 [2098.4,2100,2101.6000000000004] [2098,2100,2102] +476 5 [2098.4,2100,2101.6] [2098,2100,2102] 477 4 [2094.3,2095.5,2096.7] [2094,2096,2097] -478 5 [2089.3999999999996,2091,2092.6] [2089,2091,2093] +478 5 [2089.4,2091,2092.6] [2089,2091,2093] 479 4 [2085.3,2086.5,2087.7] [2085,2087,2088] 480 4 [2081.3,2082.5,2083.7] [2081,2083,2084] 481 5 [2076.4,2078,2079.6] [2076,2078,2080] 482 4 [2072.3,2073.5,2074.7] [2072,2074,2075] 483 4 [2068.3,2069.5,2070.7] [2068,2070,2071] -484 5 [2063.4,2065,2066.6000000000004] [2063,2065,2067] +484 5 [2063.4,2065,2066.6] [2063,2065,2067] 485 4 [2059.3,2060.5,2061.7] [2059,2061,2062] 486 4 [2055.3,2056.5,2057.7] [2055,2057,2058] 487 4 [2051.3,2052.5,2053.7] [2051,2053,2054] 488 4 [2047.3,2048.5,2049.7] [2047,2049,2050] 489 5 [2042.4,2044,2045.6] [2042,2044,2046] -490 4 [2038.3,2039.5,2040.6999999999998] [2038,2040,2041] -491 4 [2034.3000000000002,2035.5,2036.6999999999998] [2034,2036,2037] -492 4 [2030.3000000000002,2031.5,2032.7] [2030,2032,2033] +490 4 [2038.3,2039.5,2040.7] [2038,2040,2041] +491 4 [2034.3,2035.5,2036.7] [2034,2036,2037] +492 4 [2030.3,2031.5,2032.7] [2030,2032,2033] 493 4 [2026.3,2027.5,2028.7] [2026,2028,2029] 494 4 [2022.3,2023.5,2024.7] [2022,2024,2025] -495 4 [2018.3,2019.5,2020.6999999999998] [2018,2020,2021] -496 4 [2014.3000000000002,2015.5,2016.6999999999998] [2014,2016,2017] -497 4 [2010.3000000000002,2011.5,2012.7] [2010,2012,2013] +495 4 [2018.3,2019.5,2020.7] [2018,2020,2021] +496 4 [2014.3,2015.5,2016.7] [2014,2016,2017] +497 4 [2010.3,2011.5,2012.7] [2010,2012,2013] 498 4 [2006.3,2007.5,2008.7] [2006,2008,2009] 499 4 [2002.3,2003.5,2004.7] [2002,2004,2005] -500 4 [1998.3,1999.5,2000.6999999999998] [1998,2000,2001] -501 4 [1994.3000000000002,1995.5,1996.6999999999998] [1994,1996,1997] -502 4 [1990.3000000000002,1991.5,1992.7] [1990,1992,1993] +500 4 [1998.3,1999.5,2000.7] [1998,2000,2001] +501 4 [1994.3,1995.5,1996.7] [1994,1996,1997] +502 4 [1990.3,1991.5,1992.7] [1990,1992,1993] 503 4 [1986.3,1987.5,1988.7] [1986,1988,1989] 504 4 [1982.3,1983.5,1984.7] [1982,1984,1985] -505 4 [1978.3,1979.5,1980.6999999999998] [1978,1980,1981] -506 4 [1974.3000000000002,1975.5,1976.6999999999998] [1974,1976,1977] -507 4 [1970.3000000000002,1971.5,1972.7] [1970,1972,1973] +505 4 [1978.3,1979.5,1980.7] [1978,1980,1981] +506 4 [1974.3,1975.5,1976.7] [1974,1976,1977] +507 4 [1970.3,1971.5,1972.7] [1970,1972,1973] 508 4 [1966.3,1967.5,1968.7] [1966,1968,1969] -509 4 [1962.3,1963.5,1964.7000000000003] [1962,1964,1965] -510 4 [1958.3,1959.5,1960.6999999999998] [1958,1960,1961] -511 3 [1955.2,1956,1956.8000000000002] [1955,1956,1957] +509 4 [1962.3,1963.5,1964.7] [1962,1964,1965] +510 4 [1958.3,1959.5,1960.7] [1958,1960,1961] +511 3 [1955.2,1956,1956.8] [1955,1956,1957] 512 4 [1951.3,1952.5,1953.7] [1951,1953,1954] -513 4 [1947.3,1948.5,1949.7000000000003] [1947,1949,1950] -514 4 [1943.3,1944.5,1945.6999999999998] [1943,1945,1946] -515 4 [1939.3000000000002,1940.5,1941.6999999999998] [1939,1941,1942] -516 3 [1936.2000000000003,1937,1937.8] [1936,1937,1938] -517 4 [1932.3,1933.5,1934.7000000000003] [1932,1934,1935] -518 4 [1928.3,1929.5,1930.6999999999998] [1928,1930,1931] -519 4 [1924.3000000000002,1925.5,1926.6999999999998] [1924,1926,1927] -520 3 [1921.2000000000003,1922,1922.8] [1921,1922,1923] -521 4 [1917.3,1918.5,1919.7000000000003] [1917,1919,1920] -522 4 [1913.3,1914.5,1915.6999999999998] [1913,1915,1916] -523 3 [1910.2,1911,1911.8000000000002] [1910,1911,1912] +513 4 [1947.3,1948.5,1949.7] [1947,1949,1950] +514 4 [1943.3,1944.5,1945.7] [1943,1945,1946] +515 4 [1939.3,1940.5,1941.7] [1939,1941,1942] +516 3 [1936.2,1937,1937.8] [1936,1937,1938] +517 4 [1932.3,1933.5,1934.7] [1932,1934,1935] +518 4 [1928.3,1929.5,1930.7] [1928,1930,1931] +519 4 [1924.3,1925.5,1926.7] [1924,1926,1927] +520 3 [1921.2,1922,1922.8] [1921,1922,1923] +521 4 [1917.3,1918.5,1919.7] [1917,1919,1920] +522 4 [1913.3,1914.5,1915.7] [1913,1915,1916] +523 3 [1910.2,1911,1911.8] [1910,1911,1912] 524 4 [1906.3,1907.5,1908.7] [1906,1908,1909] -525 4 [1902.3,1903.5,1904.6999999999998] [1902,1904,1905] -526 3 [1899.2,1900,1900.8000000000002] [1899,1900,1901] -527 4 [1895.3000000000002,1896.5,1897.7] [1895,1897,1898] -528 3 [1892.2000000000003,1893,1893.8] [1892,1893,1894] -529 4 [1888.3,1889.5,1890.6999999999998] [1888,1890,1891] -530 3 [1885.2,1886,1886.8000000000002] [1885,1886,1887] +525 4 [1902.3,1903.5,1904.7] [1902,1904,1905] +526 3 [1899.2,1900,1900.8] [1899,1900,1901] +527 4 [1895.3,1896.5,1897.7] [1895,1897,1898] +528 3 [1892.2,1893,1893.8] [1892,1893,1894] +529 4 [1888.3,1889.5,1890.7] [1888,1890,1891] +530 3 [1885.2,1886,1886.8] [1885,1886,1887] 531 4 [1881.3,1882.5,1883.7] [1881,1883,1884] -532 4 [1877.3,1878.5,1879.6999999999998] [1877,1879,1880] -533 3 [1874.2,1875,1875.8000000000002] [1874,1875,1876] -534 4 [1870.3000000000002,1871.5,1872.7] [1870,1872,1873] -535 3 [1867.2000000000003,1868,1868.8] [1867,1868,1869] -536 4 [1863.3,1864.5,1865.6999999999998] [1863,1865,1866] -537 3 [1860.2,1861,1861.8000000000002] [1860,1861,1862] -538 3 [1857.2000000000003,1858,1858.8] [1857,1858,1859] -539 4 [1853.3,1854.5,1855.6999999999998] [1853,1855,1856] -540 3 [1850.2,1851,1851.8000000000002] [1850,1851,1852] +532 4 [1877.3,1878.5,1879.7] [1877,1879,1880] +533 3 [1874.2,1875,1875.8] [1874,1875,1876] +534 4 [1870.3,1871.5,1872.7] [1870,1872,1873] +535 3 [1867.2,1868,1868.8] [1867,1868,1869] +536 4 [1863.3,1864.5,1865.7] [1863,1865,1866] +537 3 [1860.2,1861,1861.8] [1860,1861,1862] +538 3 [1857.2,1858,1858.8] [1857,1858,1859] +539 4 [1853.3,1854.5,1855.7] [1853,1855,1856] +540 3 [1850.2,1851,1851.8] [1850,1851,1852] 541 4 [1846.3,1847.5,1848.7] [1846,1848,1849] 542 3 [1843.2,1844,1844.8] [1843,1844,1845] -543 4 [1839.3000000000002,1840.5,1841.7] [1839,1841,1842] -544 3 [1836.2000000000003,1837,1837.8] [1836,1837,1838] +543 4 [1839.3,1840.5,1841.7] [1839,1841,1842] +544 3 [1836.2,1837,1837.8] [1836,1837,1838] 545 3 [1833.2,1834,1834.8] [1833,1834,1835] -546 4 [1829.3000000000002,1830.5,1831.7] [1829,1831,1832] -547 3 [1826.2000000000003,1827,1827.8] [1826,1827,1828] +546 4 [1829.3,1830.5,1831.7] [1829,1831,1832] +547 3 [1826.2,1827,1827.8] [1826,1827,1828] 548 3 [1823.2,1824,1824.8] [1823,1824,1825] -549 4 [1819.3000000000002,1820.5,1821.7] [1819,1821,1822] -550 3 [1816.2000000000003,1817,1817.8] [1816,1817,1818] +549 4 [1819.3,1820.5,1821.7] [1819,1821,1822] +550 3 [1816.2,1817,1817.8] [1816,1817,1818] 551 3 [1813.2,1814,1814.8] [1813,1814,1815] -552 4 [1809.3000000000002,1810.5,1811.7] [1809,1811,1812] -553 3 [1806.2000000000003,1807,1807.8] [1806,1807,1808] +552 4 [1809.3,1810.5,1811.7] [1809,1811,1812] +553 3 [1806.2,1807,1807.8] [1806,1807,1808] 554 3 [1803.2,1804,1804.8] [1803,1804,1805] -555 3 [1800.2,1801,1801.8000000000002] [1800,1801,1802] +555 3 [1800.2,1801,1801.8] [1800,1801,1802] 556 4 [1796.3,1797.5,1798.7] [1796,1798,1799] 557 3 [1793.2,1794,1794.8] [1793,1794,1795] -558 3 [1790.2,1791,1791.8000000000002] [1790,1791,1792] -559 3 [1787.2000000000003,1788,1788.8] [1787,1788,1789] -560 3 [1784.2,1785,1785.8000000000002] [1784,1785,1786] -561 4 [1780.3000000000002,1781.5,1782.7] [1780,1782,1783] -562 3 [1777.2000000000003,1778,1778.8] [1777,1778,1779] -563 3 [1774.2,1775,1775.8000000000002] [1774,1775,1776] -564 3 [1771.2000000000003,1772,1772.8] [1771,1772,1773] +558 3 [1790.2,1791,1791.8] [1790,1791,1792] +559 3 [1787.2,1788,1788.8] [1787,1788,1789] +560 3 [1784.2,1785,1785.8] [1784,1785,1786] +561 4 [1780.3,1781.5,1782.7] [1780,1782,1783] +562 3 [1777.2,1778,1778.8] [1777,1778,1779] +563 3 [1774.2,1775,1775.8] [1774,1775,1776] +564 3 [1771.2,1772,1772.8] [1771,1772,1773] 565 3 [1768.2,1769,1769.8] [1768,1769,1770] -566 3 [1765.2,1766,1766.8000000000002] [1765,1766,1767] -567 3 [1762.2000000000003,1763,1763.8] [1762,1763,1764] -568 3 [1759.2,1760,1760.8000000000002] [1759,1760,1761] -569 4 [1755.3000000000002,1756.5,1757.7] [1755,1757,1758] -570 3 [1752.2000000000003,1753,1753.8] [1752,1753,1754] -571 3 [1749.2,1750,1750.8000000000002] [1749,1750,1751] -572 3 [1746.2000000000003,1747,1747.8] [1746,1747,1748] +566 3 [1765.2,1766,1766.8] [1765,1766,1767] +567 3 [1762.2,1763,1763.8] [1762,1763,1764] +568 3 [1759.2,1760,1760.8] [1759,1760,1761] +569 4 [1755.3,1756.5,1757.7] [1755,1757,1758] +570 3 [1752.2,1753,1753.8] [1752,1753,1754] +571 3 [1749.2,1750,1750.8] [1749,1750,1751] +572 3 [1746.2,1747,1747.8] [1746,1747,1748] 573 3 [1743.2,1744,1744.8] [1743,1744,1745] -574 3 [1740.2,1741,1741.8000000000002] [1740,1741,1742] -575 3 [1737.2000000000003,1738,1738.8] [1737,1738,1739] -576 3 [1734.2,1735,1735.8000000000002] [1734,1735,1736] -577 3 [1731.2000000000003,1732,1732.8] [1731,1732,1733] +574 3 [1740.2,1741,1741.8] [1740,1741,1742] +575 3 [1737.2,1738,1738.8] [1737,1738,1739] +576 3 [1734.2,1735,1735.8] [1734,1735,1736] +577 3 [1731.2,1732,1732.8] [1731,1732,1733] 578 3 [1728.2,1729,1729.8] [1728,1729,1730] -579 3 [1725.2,1726,1726.8000000000002] [1725,1726,1727] -580 3 [1722.2000000000003,1723,1723.8] [1722,1723,1724] -581 3 [1719.2,1720,1720.8000000000002] [1719,1720,1721] -582 3 [1716.2000000000003,1717,1717.8] [1716,1717,1718] +579 3 [1725.2,1726,1726.8] [1725,1726,1727] +580 3 [1722.2,1723,1723.8] [1722,1723,1724] +581 3 [1719.2,1720,1720.8] [1719,1720,1721] +582 3 [1716.2,1717,1717.8] [1716,1717,1718] 583 3 [1713.2,1714,1714.8] [1713,1714,1715] -584 3 [1710.2,1711,1711.8000000000002] [1710,1711,1712] -585 3 [1707.2000000000003,1708,1708.8] [1707,1708,1709] +584 3 [1710.2,1711,1711.8] [1710,1711,1712] +585 3 [1707.2,1708,1708.8] [1707,1708,1709] 586 2 [1705.1,1705.5,1705.9] [1705,1706,1706] -587 3 [1702.2000000000003,1703,1703.8] [1702,1703,1704] -588 3 [1699.2,1700,1700.8000000000002] [1699,1700,1701] -589 3 [1696.2000000000003,1697,1697.8] [1696,1697,1698] +587 3 [1702.2,1703,1703.8] [1702,1703,1704] +588 3 [1699.2,1700,1700.8] [1699,1700,1701] +589 3 [1696.2,1697,1697.8] [1696,1697,1698] 590 3 [1693.2,1694,1694.8] [1693,1694,1695] -591 3 [1690.2,1691,1691.8000000000002] [1690,1691,1692] -592 3 [1687.2000000000003,1688,1688.8] [1687,1688,1689] -593 3 [1684.2,1685,1685.8000000000002] [1684,1685,1686] +591 3 [1690.2,1691,1691.8] [1690,1691,1692] +592 3 [1687.2,1688,1688.8] [1687,1688,1689] +593 3 [1684.2,1685,1685.8] [1684,1685,1686] 594 2 [1682.1,1682.5,1682.9] [1682,1683,1683] -595 3 [1679.2,1680,1680.8000000000002] [1679,1680,1681] -596 3 [1676.2000000000003,1677,1677.8] [1676,1677,1678] +595 3 [1679.2,1680,1680.8] [1679,1680,1681] +596 3 [1676.2,1677,1677.8] [1676,1677,1678] 597 3 [1673.2,1674,1674.8] [1673,1674,1675] -598 3 [1670.2,1671,1671.8000000000002] [1670,1671,1672] -599 2 [1668.1000000000001,1668.5,1668.9] [1668,1669,1669] -600 3 [1665.2,1666,1666.8000000000002] [1665,1666,1667] -601 3 [1662.2000000000003,1663,1663.8] [1662,1663,1664] -602 3 [1659.2,1660,1660.8000000000002] [1659,1660,1661] +598 3 [1670.2,1671,1671.8] [1670,1671,1672] +599 2 [1668.1,1668.5,1668.9] [1668,1669,1669] +600 3 [1665.2,1666,1666.8] [1665,1666,1667] +601 3 [1662.2,1663,1663.8] [1662,1663,1664] +602 3 [1659.2,1660,1660.8] [1659,1660,1661] 603 2 [1657.1,1657.5,1657.9] [1657,1658,1658] -604 3 [1654.2,1655,1655.8000000000002] [1654,1655,1656] -605 3 [1651.2000000000003,1652,1652.8] [1651,1652,1653] +604 3 [1654.2,1655,1655.8] [1654,1655,1656] +605 3 [1651.2,1652,1652.8] [1651,1652,1653] 606 3 [1648.2,1649,1649.8] [1648,1649,1650] -607 2 [1646.1000000000001,1646.5,1646.8999999999999] [1646,1647,1647] +607 2 [1646.1,1646.5,1646.9] [1646,1647,1647] 608 3 [1643.2,1644,1644.8] [1643,1644,1645] -609 3 [1640.2,1641,1641.8000000000002] [1640,1641,1642] -610 2 [1638.1000000000001,1638.5,1638.9] [1638,1639,1639] -611 3 [1635.2,1636,1636.8000000000002] [1635,1636,1637] -612 3 [1632.2000000000003,1633,1633.8] [1632,1633,1634] -613 3 [1629.2,1630,1630.8000000000002] [1629,1630,1631] +609 3 [1640.2,1641,1641.8] [1640,1641,1642] +610 2 [1638.1,1638.5,1638.9] [1638,1639,1639] +611 3 [1635.2,1636,1636.8] [1635,1636,1637] +612 3 [1632.2,1633,1633.8] [1632,1633,1634] +613 3 [1629.2,1630,1630.8] [1629,1630,1631] 614 2 [1627.1,1627.5,1627.9] [1627,1628,1628] -615 3 [1624.2,1625,1625.8000000000002] [1624,1625,1626] +615 3 [1624.2,1625,1625.8] [1624,1625,1626] 616 2 [1622.1,1622.5,1622.9] [1622,1623,1623] -617 3 [1619.2,1620,1620.8000000000002] [1619,1620,1621] -618 3 [1616.2000000000003,1617,1617.8] [1616,1617,1618] -619 2 [1614.1000000000001,1614.5,1614.9] [1614,1615,1615] -620 3 [1611.2000000000003,1612,1612.8] [1611,1612,1613] -621 2 [1609.1000000000001,1609.5,1609.9] [1609,1610,1610] -622 3 [1606.2000000000003,1607,1607.8] [1606,1607,1608] +617 3 [1619.2,1620,1620.8] [1619,1620,1621] +618 3 [1616.2,1617,1617.8] [1616,1617,1618] +619 2 [1614.1,1614.5,1614.9] [1614,1615,1615] +620 3 [1611.2,1612,1612.8] [1611,1612,1613] +621 2 [1609.1,1609.5,1609.9] [1609,1610,1610] +622 3 [1606.2,1607,1607.8] [1606,1607,1608] 623 3 [1603.2,1604,1604.8] [1603,1604,1605] -624 2 [1601.1000000000001,1601.5,1601.8999999999999] [1601,1602,1602] +624 2 [1601.1,1601.5,1601.9] [1601,1602,1602] 625 3 [1598.2,1599,1599.8] [1598,1599,1600] -626 2 [1596.1000000000001,1596.5,1596.8999999999999] [1596,1597,1597] +626 2 [1596.1,1596.5,1596.9] [1596,1597,1597] 627 3 [1593.2,1594,1594.8] [1593,1594,1595] -628 2 [1591.1000000000001,1591.5,1591.8999999999999] [1591,1592,1592] +628 2 [1591.1,1591.5,1591.9] [1591,1592,1592] 629 3 [1588.2,1589,1589.8] [1588,1589,1590] -630 2 [1586.1000000000001,1586.5,1586.8999999999999] [1586,1587,1587] +630 2 [1586.1,1586.5,1586.9] [1586,1587,1587] 631 3 [1583.2,1584,1584.8] [1583,1584,1585] -632 2 [1581.1000000000001,1581.5,1581.8999999999999] [1581,1582,1582] +632 2 [1581.1,1581.5,1581.9] [1581,1582,1582] 633 3 [1578.2,1579,1579.8] [1578,1579,1580] -634 2 [1576.1000000000001,1576.5,1576.8999999999999] [1576,1577,1577] +634 2 [1576.1,1576.5,1576.9] [1576,1577,1577] 635 3 [1573.2,1574,1574.8] [1573,1574,1575] -636 2 [1571.1000000000001,1571.5,1571.8999999999999] [1571,1572,1572] +636 2 [1571.1,1571.5,1571.9] [1571,1572,1572] 637 3 [1568.2,1569,1569.8] [1568,1569,1570] -638 2 [1566.1000000000001,1566.5,1566.8999999999999] [1566,1567,1567] +638 2 [1566.1,1566.5,1566.9] [1566,1567,1567] 639 3 [1563.2,1564,1564.8] [1563,1564,1565] -640 2 [1561.1000000000001,1561.5,1561.8999999999999] [1561,1562,1562] +640 2 [1561.1,1561.5,1561.9] [1561,1562,1562] 641 3 [1558.2,1559,1559.8] [1558,1559,1560] -642 2 [1556.1000000000001,1556.5,1556.8999999999999] [1556,1557,1557] -643 2 [1554.1000000000001,1554.5,1554.9] [1554,1555,1555] -644 3 [1551.2000000000003,1552,1552.8] [1551,1552,1553] -645 2 [1549.1000000000001,1549.5,1549.9] [1549,1550,1550] -646 3 [1546.2000000000003,1547,1547.8] [1546,1547,1548] -647 2 [1544.1000000000001,1544.5,1544.9] [1544,1545,1545] +642 2 [1556.1,1556.5,1556.9] [1556,1557,1557] +643 2 [1554.1,1554.5,1554.9] [1554,1555,1555] +644 3 [1551.2,1552,1552.8] [1551,1552,1553] +645 2 [1549.1,1549.5,1549.9] [1549,1550,1550] +646 3 [1546.2,1547,1547.8] [1546,1547,1548] +647 2 [1544.1,1544.5,1544.9] [1544,1545,1545] 648 2 [1542.1,1542.5,1542.9] [1542,1543,1543] -649 3 [1539.2,1540,1540.8000000000002] [1539,1540,1541] +649 3 [1539.2,1540,1540.8] [1539,1540,1541] 650 2 [1537.1,1537.5,1537.9] [1537,1538,1538] -651 3 [1534.2,1535,1535.8000000000002] [1534,1535,1536] +651 3 [1534.2,1535,1535.8] [1534,1535,1536] 652 2 [1532.1,1532.5,1532.9] [1532,1533,1533] 653 2 [1530.1,1530.5,1530.9] [1530,1531,1531] -654 3 [1527.2000000000003,1528,1528.8] [1527,1528,1529] +654 3 [1527.2,1528,1528.8] [1527,1528,1529] 655 2 [1525.1,1525.5,1525.9] [1525,1526,1526] -656 2 [1523.1000000000001,1523.5,1523.9] [1523,1524,1524] -657 3 [1520.2,1521,1521.8000000000002] [1520,1521,1522] -658 2 [1518.1000000000001,1518.5,1518.9] [1518,1519,1519] -659 2 [1516.1000000000001,1516.5,1516.8999999999999] [1516,1517,1517] -660 2 [1514.1000000000001,1514.5,1514.9] [1514,1515,1515] +656 2 [1523.1,1523.5,1523.9] [1523,1524,1524] +657 3 [1520.2,1521,1521.8] [1520,1521,1522] +658 2 [1518.1,1518.5,1518.9] [1518,1519,1519] +659 2 [1516.1,1516.5,1516.9] [1516,1517,1517] +660 2 [1514.1,1514.5,1514.9] [1514,1515,1515] 661 3 [1511.2,1512,1512.8] [1511,1512,1513] -662 2 [1509.1000000000001,1509.5,1509.9] [1509,1510,1510] +662 2 [1509.1,1509.5,1509.9] [1509,1510,1510] 663 2 [1507.1,1507.5,1507.9] [1507,1508,1508] 664 3 [1504.2,1505,1505.8] [1504,1505,1506] 665 2 [1502.1,1502.5,1502.9] [1502,1503,1503] 666 2 [1500.1,1500.5,1500.9] [1500,1501,1501] -667 2 [1498.1000000000001,1498.5,1498.9] [1498,1499,1499] -668 3 [1495.2,1496,1496.8000000000002] [1495,1496,1497] -669 2 [1493.1000000000001,1493.5,1493.9] [1493,1494,1494] -670 2 [1491.1000000000001,1491.5,1491.8999999999999] [1491,1492,1492] -671 2 [1489.1000000000001,1489.5,1489.9] [1489,1490,1490] +667 2 [1498.1,1498.5,1498.9] [1498,1499,1499] +668 3 [1495.2,1496,1496.8] [1495,1496,1497] +669 2 [1493.1,1493.5,1493.9] [1493,1494,1494] +670 2 [1491.1,1491.5,1491.9] [1491,1492,1492] +671 2 [1489.1,1489.5,1489.9] [1489,1490,1490] 672 3 [1486.2,1487,1487.8] [1486,1487,1488] -673 2 [1484.1000000000001,1484.5,1484.9] [1484,1485,1485] +673 2 [1484.1,1484.5,1484.9] [1484,1485,1485] 674 2 [1482.1,1482.5,1482.9] [1482,1483,1483] 675 2 [1480.1,1480.5,1480.9] [1480,1481,1481] -676 2 [1478.1000000000001,1478.5,1478.9] [1478,1479,1479] -677 2 [1476.1000000000001,1476.5,1476.8999999999999] [1476,1477,1477] +676 2 [1478.1,1478.5,1478.9] [1478,1479,1479] +677 2 [1476.1,1476.5,1476.9] [1476,1477,1477] 678 3 [1473.2,1474,1474.8] [1473,1474,1475] -679 2 [1471.1000000000001,1471.5,1471.8999999999999] [1471,1472,1472] -680 2 [1469.1000000000001,1469.5,1469.9] [1469,1470,1470] +679 2 [1471.1,1471.5,1471.9] [1471,1472,1472] +680 2 [1469.1,1469.5,1469.9] [1469,1470,1470] 681 2 [1467.1,1467.5,1467.9] [1467,1468,1468] 682 2 [1465.1,1465.5,1465.9] [1465,1466,1466] -683 2 [1463.1000000000001,1463.5,1463.9] [1463,1464,1464] -684 3 [1460.2,1461,1461.8000000000002] [1460,1461,1462] -685 2 [1458.1000000000001,1458.5,1458.9] [1458,1459,1459] -686 2 [1456.1000000000001,1456.5,1456.8999999999999] [1456,1457,1457] -687 2 [1454.1000000000001,1454.5,1454.9] [1454,1455,1455] +683 2 [1463.1,1463.5,1463.9] [1463,1464,1464] +684 3 [1460.2,1461,1461.8] [1460,1461,1462] +685 2 [1458.1,1458.5,1458.9] [1458,1459,1459] +686 2 [1456.1,1456.5,1456.9] [1456,1457,1457] +687 2 [1454.1,1454.5,1454.9] [1454,1455,1455] 688 2 [1452.1,1452.5,1452.9] [1452,1453,1453] 689 2 [1450.1,1450.5,1450.9] [1450,1451,1451] -690 2 [1448.1000000000001,1448.5,1448.9] [1448,1449,1449] -691 2 [1446.1000000000001,1446.5,1446.8999999999999] [1446,1447,1447] -692 2 [1444.1000000000001,1444.5,1444.9] [1444,1445,1445] +690 2 [1448.1,1448.5,1448.9] [1448,1449,1449] +691 2 [1446.1,1446.5,1446.9] [1446,1447,1447] +692 2 [1444.1,1444.5,1444.9] [1444,1445,1445] 693 3 [1441.2,1442,1442.8] [1441,1442,1443] -694 2 [1439.1000000000001,1439.5,1439.9] [1439,1440,1440] +694 2 [1439.1,1439.5,1439.9] [1439,1440,1440] 695 2 [1437.1,1437.5,1437.9] [1437,1438,1438] 696 2 [1435.1,1435.5,1435.9] [1435,1436,1436] -697 2 [1433.1000000000001,1433.5,1433.9] [1433,1434,1434] -698 2 [1431.1000000000001,1431.5,1431.8999999999999] [1431,1432,1432] -699 2 [1429.1000000000001,1429.5,1429.9] [1429,1430,1430] +697 2 [1433.1,1433.5,1433.9] [1433,1434,1434] +698 2 [1431.1,1431.5,1431.9] [1431,1432,1432] +699 2 [1429.1,1429.5,1429.9] [1429,1430,1430] 700 2 [1427.1,1427.5,1427.9] [1427,1428,1428] 701 2 [1425.1,1425.5,1425.9] [1425,1426,1426] -702 2 [1423.1000000000001,1423.5,1423.9] [1423,1424,1424] -703 2 [1421.1000000000001,1421.5,1421.8999999999999] [1421,1422,1422] -704 2 [1419.1000000000001,1419.5,1419.9] [1419,1420,1420] +702 2 [1423.1,1423.5,1423.9] [1423,1424,1424] +703 2 [1421.1,1421.5,1421.9] [1421,1422,1422] +704 2 [1419.1,1419.5,1419.9] [1419,1420,1420] 705 2 [1417.1,1417.5,1417.9] [1417,1418,1418] 706 2 [1415.1,1415.5,1415.9] [1415,1416,1416] -707 2 [1413.1000000000001,1413.5,1413.9] [1413,1414,1414] -708 2 [1411.1000000000001,1411.5,1411.8999999999999] [1411,1412,1412] -709 2 [1409.1000000000001,1409.5,1409.9] [1409,1410,1410] +707 2 [1413.1,1413.5,1413.9] [1413,1414,1414] +708 2 [1411.1,1411.5,1411.9] [1411,1412,1412] +709 2 [1409.1,1409.5,1409.9] [1409,1410,1410] 710 2 [1407.1,1407.5,1407.9] [1407,1408,1408] 711 2 [1405.1,1405.5,1405.9] [1405,1406,1406] -712 2 [1403.1000000000001,1403.5,1403.9] [1403,1404,1404] -713 2 [1401.1000000000001,1401.5,1401.8999999999999] [1401,1402,1402] -714 2 [1399.1000000000001,1399.5,1399.9] [1399,1400,1400] +712 2 [1403.1,1403.5,1403.9] [1403,1404,1404] +713 2 [1401.1,1401.5,1401.9] [1401,1402,1402] +714 2 [1399.1,1399.5,1399.9] [1399,1400,1400] 715 2 [1397.1,1397.5,1397.9] [1397,1398,1398] 716 2 [1395.1,1395.5,1395.9] [1395,1396,1396] -717 2 [1393.1000000000001,1393.5,1393.9] [1393,1394,1394] -718 2 [1391.1000000000001,1391.5,1391.8999999999999] [1391,1392,1392] -719 2 [1389.1000000000001,1389.5,1389.9] [1389,1390,1390] +717 2 [1393.1,1393.5,1393.9] [1393,1394,1394] +718 2 [1391.1,1391.5,1391.9] [1391,1392,1392] +719 2 [1389.1,1389.5,1389.9] [1389,1390,1390] 720 2 [1387.1,1387.5,1387.9] [1387,1388,1388] 721 1 [1386,1386,1386] [1386,1386,1386] -722 2 [1384.1000000000001,1384.5,1384.9] [1384,1385,1385] +722 2 [1384.1,1384.5,1384.9] [1384,1385,1385] 723 2 [1382.1,1382.5,1382.9] [1382,1383,1383] 724 2 [1380.1,1380.5,1380.9] [1380,1381,1381] -725 2 [1378.1000000000001,1378.5,1378.9] [1378,1379,1379] -726 2 [1376.1000000000001,1376.5,1376.8999999999999] [1376,1377,1377] -727 2 [1374.1000000000001,1374.5,1374.9] [1374,1375,1375] +725 2 [1378.1,1378.5,1378.9] [1378,1379,1379] +726 2 [1376.1,1376.5,1376.9] [1376,1377,1377] +727 2 [1374.1,1374.5,1374.9] [1374,1375,1375] 728 2 [1372.1,1372.5,1372.9] [1372,1373,1373] 729 2 [1370.1,1370.5,1370.9] [1370,1371,1371] -730 2 [1368.1000000000001,1368.5,1368.9] [1368,1369,1369] +730 2 [1368.1,1368.5,1368.9] [1368,1369,1369] 731 1 [1367,1367,1367] [1367,1367,1367] 732 2 [1365.1,1365.5,1365.9] [1365,1366,1366] -733 2 [1363.1000000000001,1363.5,1363.9] [1363,1364,1364] -734 2 [1361.1000000000001,1361.5,1361.8999999999999] [1361,1362,1362] -735 2 [1359.1000000000001,1359.5,1359.9] [1359,1360,1360] +733 2 [1363.1,1363.5,1363.9] [1363,1364,1364] +734 2 [1361.1,1361.5,1361.9] [1361,1362,1362] +735 2 [1359.1,1359.5,1359.9] [1359,1360,1360] 736 2 [1357.1,1357.5,1357.9] [1357,1358,1358] 737 2 [1355.1,1355.5,1355.9] [1355,1356,1356] 738 1 [1354,1354,1354] [1354,1354,1354] 739 2 [1352.1,1352.5,1352.9] [1352,1353,1353] 740 2 [1350.1,1350.5,1350.9] [1350,1351,1351] -741 2 [1348.1000000000001,1348.5,1348.9] [1348,1349,1349] -742 2 [1346.1000000000001,1346.5,1346.8999999999999] [1346,1347,1347] -743 2 [1344.1000000000001,1344.5,1344.9] [1344,1345,1345] +741 2 [1348.1,1348.5,1348.9] [1348,1349,1349] +742 2 [1346.1,1346.5,1346.9] [1346,1347,1347] +743 2 [1344.1,1344.5,1344.9] [1344,1345,1345] 744 1 [1343,1343,1343] [1343,1343,1343] -745 2 [1341.1000000000001,1341.5,1341.8999999999999] [1341,1342,1342] -746 2 [1339.1000000000001,1339.5,1339.9] [1339,1340,1340] +745 2 [1341.1,1341.5,1341.9] [1341,1342,1342] +746 2 [1339.1,1339.5,1339.9] [1339,1340,1340] 747 2 [1337.1,1337.5,1337.9] [1337,1338,1338] 748 1 [1336,1336,1336] [1336,1336,1336] -749 2 [1334.1000000000001,1334.5,1334.9] [1334,1335,1335] +749 2 [1334.1,1334.5,1334.9] [1334,1335,1335] 750 2 [1332.1,1332.5,1332.9] [1332,1333,1333] 751 2 [1330.1,1330.5,1330.9] [1330,1331,1331] -752 2 [1328.1000000000001,1328.5,1328.9] [1328,1329,1329] +752 2 [1328.1,1328.5,1328.9] [1328,1329,1329] 753 1 [1327,1327,1327] [1327,1327,1327] 754 2 [1325.1,1325.5,1325.9] [1325,1326,1326] -755 2 [1323.1000000000001,1323.5,1323.9] [1323,1324,1324] -756 2 [1321.1000000000001,1321.5,1321.8999999999999] [1321,1322,1322] +755 2 [1323.1,1323.5,1323.9] [1323,1324,1324] +756 2 [1321.1,1321.5,1321.9] [1321,1322,1322] 757 1 [1320,1320,1320] [1320,1320,1320] -758 2 [1318.1000000000001,1318.5,1318.9] [1318,1319,1319] -759 2 [1316.1000000000001,1316.5,1316.8999999999999] [1316,1317,1317] -760 2 [1314.1000000000001,1314.5,1314.9] [1314,1315,1315] +758 2 [1318.1,1318.5,1318.9] [1318,1319,1319] +759 2 [1316.1,1316.5,1316.9] [1316,1317,1317] +760 2 [1314.1,1314.5,1314.9] [1314,1315,1315] 761 1 [1313,1313,1313] [1313,1313,1313] -762 2 [1311.1000000000001,1311.5,1311.8999999999999] [1311,1312,1312] -763 2 [1309.1000000000001,1309.5,1309.9] [1309,1310,1310] +762 2 [1311.1,1311.5,1311.9] [1311,1312,1312] +763 2 [1309.1,1309.5,1309.9] [1309,1310,1310] 764 1 [1308,1308,1308] [1308,1308,1308] -765 2 [1306.1000000000001,1306.5,1306.8999999999999] [1306,1307,1307] -766 2 [1304.1000000000001,1304.5,1304.9] [1304,1305,1305] +765 2 [1306.1,1306.5,1306.9] [1306,1307,1307] +766 2 [1304.1,1304.5,1304.9] [1304,1305,1305] 767 2 [1302.1,1302.5,1302.9] [1302,1303,1303] 768 1 [1301,1301,1301] [1301,1301,1301] -769 2 [1299.1000000000001,1299.5,1299.9] [1299,1300,1300] +769 2 [1299.1,1299.5,1299.9] [1299,1300,1300] 770 2 [1297.1,1297.5,1297.9] [1297,1298,1298] 771 1 [1296,1296,1296] [1296,1296,1296] -772 2 [1294.1000000000001,1294.5,1294.9] [1294,1295,1295] +772 2 [1294.1,1294.5,1294.9] [1294,1295,1295] 773 2 [1292.1,1292.5,1292.9] [1292,1293,1293] 774 1 [1291,1291,1291] [1291,1291,1291] -775 2 [1289.1000000000001,1289.5,1289.9] [1289,1290,1290] +775 2 [1289.1,1289.5,1289.9] [1289,1290,1290] 776 2 [1287.1,1287.5,1287.9] [1287,1288,1288] 777 1 [1286,1286,1286] [1286,1286,1286] -778 2 [1284.1000000000001,1284.5,1284.9] [1284,1285,1285] +778 2 [1284.1,1284.5,1284.9] [1284,1285,1285] 779 2 [1282.1,1282.5,1282.9] [1282,1283,1283] 780 1 [1281,1281,1281] [1281,1281,1281] -781 2 [1279.1000000000001,1279.5,1279.9] [1279,1280,1280] +781 2 [1279.1,1279.5,1279.9] [1279,1280,1280] 782 2 [1277.1,1277.5,1277.9] [1277,1278,1278] 783 1 [1276,1276,1276] [1276,1276,1276] -784 2 [1274.1000000000001,1274.5,1274.9] [1274,1275,1275] +784 2 [1274.1,1274.5,1274.9] [1274,1275,1275] 785 1 [1273,1273,1273] [1273,1273,1273] -786 2 [1271.1000000000001,1271.5,1271.8999999999999] [1271,1272,1272] -787 2 [1269.1000000000001,1269.5,1269.9] [1269,1270,1270] +786 2 [1271.1,1271.5,1271.9] [1271,1272,1272] +787 2 [1269.1,1269.5,1269.9] [1269,1270,1270] 788 1 [1268,1268,1268] [1268,1268,1268] -789 2 [1266.1000000000001,1266.5,1266.8999999999999] [1266,1267,1267] +789 2 [1266.1,1266.5,1266.9] [1266,1267,1267] 790 1 [1265,1265,1265] [1265,1265,1265] -791 2 [1263.1000000000001,1263.5,1263.9] [1263,1264,1264] -792 2 [1261.1000000000001,1261.5,1261.8999999999999] [1261,1262,1262] +791 2 [1263.1,1263.5,1263.9] [1263,1264,1264] +792 2 [1261.1,1261.5,1261.9] [1261,1262,1262] 793 1 [1260,1260,1260] [1260,1260,1260] -794 2 [1258.1000000000001,1258.5,1258.9] [1258,1259,1259] +794 2 [1258.1,1258.5,1258.9] [1258,1259,1259] 795 1 [1257,1257,1257] [1257,1257,1257] 796 2 [1255.1,1255.5,1255.9] [1255,1256,1256] -797 2 [1253.1000000000001,1253.5,1253.9] [1253,1254,1254] +797 2 [1253.1,1253.5,1253.9] [1253,1254,1254] 798 1 [1252,1252,1252] [1252,1252,1252] 799 2 [1250.1,1250.5,1250.9] [1250,1251,1251] 800 1 [1249,1249,1249] [1249,1249,1249] 801 2 [1247.1,1247.5,1247.9] [1247,1248,1248] 802 1 [1246,1246,1246] [1246,1246,1246] -803 2 [1244.1000000000001,1244.5,1244.9] [1244,1245,1245] +803 2 [1244.1,1244.5,1244.9] [1244,1245,1245] 804 1 [1243,1243,1243] [1243,1243,1243] -805 2 [1241.1000000000001,1241.5,1241.8999999999999] [1241,1242,1242] -806 2 [1239.1000000000001,1239.5,1239.9] [1239,1240,1240] +805 2 [1241.1,1241.5,1241.9] [1241,1242,1242] +806 2 [1239.1,1239.5,1239.9] [1239,1240,1240] 807 1 [1238,1238,1238] [1238,1238,1238] -808 2 [1236.1000000000001,1236.5,1236.8999999999999] [1236,1237,1237] +808 2 [1236.1,1236.5,1236.9] [1236,1237,1237] 809 1 [1235,1235,1235] [1235,1235,1235] -810 2 [1233.1000000000001,1233.5,1233.9] [1233,1234,1234] +810 2 [1233.1,1233.5,1233.9] [1233,1234,1234] 811 1 [1232,1232,1232] [1232,1232,1232] 812 2 [1230.1,1230.5,1230.9] [1230,1231,1231] 813 1 [1229,1229,1229] [1229,1229,1229] 814 2 [1227.1,1227.5,1227.9] [1227,1228,1228] 815 1 [1226,1226,1226] [1226,1226,1226] -816 2 [1224.1000000000001,1224.5,1224.9] [1224,1225,1225] +816 2 [1224.1,1224.5,1224.9] [1224,1225,1225] 817 1 [1223,1223,1223] [1223,1223,1223] -818 2 [1221.1000000000001,1221.5,1221.8999999999999] [1221,1222,1222] +818 2 [1221.1,1221.5,1221.9] [1221,1222,1222] 819 1 [1220,1220,1220] [1220,1220,1220] -820 2 [1218.1000000000001,1218.5,1218.9] [1218,1219,1219] +820 2 [1218.1,1218.5,1218.9] [1218,1219,1219] 821 1 [1217,1217,1217] [1217,1217,1217] 822 2 [1215.1,1215.5,1215.9] [1215,1216,1216] 823 1 [1214,1214,1214] [1214,1214,1214] 824 2 [1212.1,1212.5,1212.9] [1212,1213,1213] 825 1 [1211,1211,1211] [1211,1211,1211] -826 2 [1209.1000000000001,1209.5,1209.9] [1209,1210,1210] +826 2 [1209.1,1209.5,1209.9] [1209,1210,1210] 827 1 [1208,1208,1208] [1208,1208,1208] 828 1 [1207,1207,1207] [1207,1207,1207] 829 2 [1205.1,1205.5,1205.9] [1205,1206,1206] 830 1 [1204,1204,1204] [1204,1204,1204] 831 2 [1202.1,1202.5,1202.9] [1202,1203,1203] 832 1 [1201,1201,1201] [1201,1201,1201] -833 2 [1199.1000000000001,1199.5,1199.9] [1199,1200,1200] +833 2 [1199.1,1199.5,1199.9] [1199,1200,1200] 834 1 [1198,1198,1198] [1198,1198,1198] -835 2 [1196.1000000000001,1196.5,1196.8999999999999] [1196,1197,1197] +835 2 [1196.1,1196.5,1196.9] [1196,1197,1197] 836 1 [1195,1195,1195] [1195,1195,1195] 837 1 [1194,1194,1194] [1194,1194,1194] 838 2 [1192.1,1192.5,1192.9] [1192,1193,1193] 839 1 [1191,1191,1191] [1191,1191,1191] -840 2 [1189.1000000000001,1189.5,1189.9] [1189,1190,1190] +840 2 [1189.1,1189.5,1189.9] [1189,1190,1190] 841 1 [1188,1188,1188] [1188,1188,1188] -842 2 [1186.1000000000001,1186.5,1186.8999999999999] [1186,1187,1187] +842 2 [1186.1,1186.5,1186.9] [1186,1187,1187] 843 1 [1185,1185,1185] [1185,1185,1185] 844 1 [1184,1184,1184] [1184,1184,1184] 845 2 [1182.1,1182.5,1182.9] [1182,1183,1183] 846 1 [1181,1181,1181] [1181,1181,1181] -847 2 [1179.1000000000001,1179.5,1179.9] [1179,1180,1180] +847 2 [1179.1,1179.5,1179.9] [1179,1180,1180] 848 1 [1178,1178,1178] [1178,1178,1178] 849 1 [1177,1177,1177] [1177,1177,1177] 850 2 [1175.1,1175.5,1175.9] [1175,1176,1176] 851 1 [1174,1174,1174] [1174,1174,1174] 852 1 [1173,1173,1173] [1173,1173,1173] -853 2 [1171.1000000000001,1171.5,1171.8999999999999] [1171,1172,1172] +853 2 [1171.1,1171.5,1171.9] [1171,1172,1172] 854 1 [1170,1170,1170] [1170,1170,1170] -855 2 [1168.1000000000001,1168.5,1168.9] [1168,1169,1169] +855 2 [1168.1,1168.5,1168.9] [1168,1169,1169] 856 1 [1167,1167,1167] [1167,1167,1167] 857 1 [1166,1166,1166] [1166,1166,1166] -858 2 [1164.1000000000001,1164.5,1164.9] [1164,1165,1165] +858 2 [1164.1,1164.5,1164.9] [1164,1165,1165] 859 1 [1163,1163,1163] [1163,1163,1163] 860 1 [1162,1162,1162] [1162,1162,1162] 861 2 [1160.1,1160.5,1160.9] [1160,1161,1161] 862 1 [1159,1159,1159] [1159,1159,1159] 863 1 [1158,1158,1158] [1158,1158,1158] -864 2 [1156.1000000000001,1156.5,1156.8999999999999] [1156,1157,1157] +864 2 [1156.1,1156.5,1156.9] [1156,1157,1157] 865 1 [1155,1155,1155] [1155,1155,1155] 866 1 [1154,1154,1154] [1154,1154,1154] 867 2 [1152.1,1152.5,1152.9] [1152,1153,1153] 868 1 [1151,1151,1151] [1151,1151,1151] 869 1 [1150,1150,1150] [1150,1150,1150] -870 2 [1148.1000000000001,1148.5,1148.9] [1148,1149,1149] +870 2 [1148.1,1148.5,1148.9] [1148,1149,1149] 871 1 [1147,1147,1147] [1147,1147,1147] 872 1 [1146,1146,1146] [1146,1146,1146] -873 2 [1144.1000000000001,1144.5,1144.9] [1144,1145,1145] +873 2 [1144.1,1144.5,1144.9] [1144,1145,1145] 874 1 [1143,1143,1143] [1143,1143,1143] 875 1 [1142,1142,1142] [1142,1142,1142] 876 2 [1140.1,1140.5,1140.9] [1140,1141,1141] 877 1 [1139,1139,1139] [1139,1139,1139] 878 1 [1138,1138,1138] [1138,1138,1138] 879 1 [1137,1137,1137] [1137,1137,1137] -880 2 [1135.1,1135.5,1135.8999999999999] [1135,1136,1136] +880 2 [1135.1,1135.5,1135.9] [1135,1136,1136] 881 1 [1134,1134,1134] [1134,1134,1134] 882 1 [1133,1133,1133] [1133,1133,1133] 883 2 [1131.1,1131.5,1131.9] [1131,1132,1132] @@ -895,15 +895,15 @@ 887 2 [1126.1,1126.5,1126.9] [1126,1127,1127] 888 1 [1125,1125,1125] [1125,1125,1125] 889 1 [1124,1124,1124] [1124,1124,1124] -890 2 [1122.1000000000001,1122.5,1122.9] [1122,1123,1123] +890 2 [1122.1,1122.5,1122.9] [1122,1123,1123] 891 1 [1121,1121,1121] [1121,1121,1121] 892 1 [1120,1120,1120] [1120,1120,1120] 893 1 [1119,1119,1119] [1119,1119,1119] -894 2 [1117.1000000000001,1117.5,1117.9] [1117,1118,1118] +894 2 [1117.1,1117.5,1117.9] [1117,1118,1118] 895 1 [1116,1116,1116] [1116,1116,1116] 896 1 [1115,1115,1115] [1115,1115,1115] 897 1 [1114,1114,1114] [1114,1114,1114] -898 2 [1112.1000000000001,1112.5,1112.9] [1112,1113,1113] +898 2 [1112.1,1112.5,1112.9] [1112,1113,1113] 899 1 [1111,1111,1111] [1111,1111,1111] 900 1 [1110,1110,1110] [1110,1110,1110] 901 1 [1109,1109,1109] [1109,1109,1109] @@ -917,7 +917,7 @@ 909 1 [1099,1099,1099] [1099,1099,1099] 910 1 [1098,1098,1098] [1098,1098,1098] 911 1 [1097,1097,1097] [1097,1097,1097] -912 2 [1095.1,1095.5,1095.8999999999999] [1095,1096,1096] +912 2 [1095.1,1095.5,1095.9] [1095,1096,1096] 913 1 [1094,1094,1094] [1094,1094,1094] 914 1 [1093,1093,1093] [1093,1093,1093] 915 1 [1092,1092,1092] [1092,1092,1092] @@ -928,19 +928,19 @@ 920 1 [1086,1086,1086] [1086,1086,1086] 921 1 [1085,1085,1085] [1085,1085,1085] 922 1 [1084,1084,1084] [1084,1084,1084] -923 2 [1082.1000000000001,1082.5,1082.9] [1082,1083,1083] +923 2 [1082.1,1082.5,1082.9] [1082,1083,1083] 924 1 [1081,1081,1081] [1081,1081,1081] 925 1 [1080,1080,1080] [1080,1080,1080] 926 1 [1079,1079,1079] [1079,1079,1079] 927 1 [1078,1078,1078] [1078,1078,1078] 928 1 [1077,1077,1077] [1077,1077,1077] -929 2 [1075.1,1075.5,1075.8999999999999] [1075,1076,1076] +929 2 [1075.1,1075.5,1075.9] [1075,1076,1076] 930 1 [1074,1074,1074] [1074,1074,1074] 931 1 [1073,1073,1073] [1073,1073,1073] 932 1 [1072,1072,1072] [1072,1072,1072] 933 1 [1071,1071,1071] [1071,1071,1071] 934 1 [1070,1070,1070] [1070,1070,1070] -935 2 [1068.1000000000001,1068.5,1068.9] [1068,1069,1069] +935 2 [1068.1,1068.5,1068.9] [1068,1069,1069] 936 1 [1067,1067,1067] [1067,1067,1067] 937 1 [1066,1066,1066] [1066,1066,1066] 938 1 [1065,1065,1065] [1065,1065,1065] @@ -956,7 +956,7 @@ 948 1 [1054,1054,1054] [1054,1054,1054] 949 1 [1053,1053,1053] [1053,1053,1053] 950 1 [1052,1052,1052] [1052,1052,1052] -951 2 [1050.1,1050.5,1050.8999999999999] [1050,1051,1051] +951 2 [1050.1,1050.5,1050.9] [1050,1051,1051] 952 1 [1049,1049,1049] [1049,1049,1049] 953 1 [1048,1048,1048] [1048,1048,1048] 954 1 [1047,1047,1047] [1047,1047,1047] @@ -967,7 +967,7 @@ 959 1 [1042,1042,1042] [1042,1042,1042] 960 1 [1041,1041,1041] [1041,1041,1041] 961 1 [1040,1040,1040] [1040,1040,1040] -962 2 [1038.1000000000001,1038.5,1038.9] [1038,1039,1039] +962 2 [1038.1,1038.5,1038.9] [1038,1039,1039] 963 1 [1037,1037,1037] [1037,1037,1037] 964 1 [1036,1036,1036] [1036,1036,1036] 965 1 [1035,1035,1035] [1035,1035,1035] @@ -983,7 +983,7 @@ 975 1 [1025,1025,1025] [1025,1025,1025] 976 1 [1024,1024,1024] [1024,1024,1024] 977 1 [1023,1023,1023] [1023,1023,1023] -978 2 [1021.1,1021.5,1021.9000000000001] [1021,1022,1022] +978 2 [1021.1,1021.5,1021.9] [1021,1022,1022] 979 1 [1020,1020,1020] [1020,1020,1020] 980 1 [1019,1019,1019] [1019,1019,1019] 981 1 [1018,1018,1018] [1018,1018,1018] diff --git a/tests/queries/0_stateless/00273_quantiles.sql b/tests/queries/0_stateless/00273_quantiles.sql index a3609834e3c..9fef1f63057 100644 --- a/tests/queries/0_stateless/00273_quantiles.sql +++ b/tests/queries/0_stateless/00273_quantiles.sql @@ -8,4 +8,4 @@ SELECT quantilesExact(0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0 SELECT quantilesTDigest(0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 1)(x) FROM (SELECT number AS x FROM system.numbers LIMIT 1001); SELECT quantilesDeterministic(0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 1)(x, x) FROM (SELECT number AS x FROM system.numbers LIMIT 1001); -SELECT round(1000000 / (number + 1)) AS k, count() AS c, quantilesDeterministic(0.1, 0.5, 0.9)(number, intHash64(number)) AS q1, quantilesExact(0.1, 0.5, 0.9)(number) AS q2 FROM (SELECT number FROM system.numbers LIMIT 1000000) GROUP BY k ORDER BY k; +SELECT round(1000000 / (number + 1)) AS k, count() AS c, arrayMap(x -> round(x, 6), quantilesDeterministic(0.1, 0.5, 0.9)(number, intHash64(number))) AS q1, quantilesExact(0.1, 0.5, 0.9)(number) AS q2 FROM (SELECT number FROM system.numbers LIMIT 1000000) GROUP BY k ORDER BY k; diff --git a/tests/queries/0_stateless/00900_orc_load.sh b/tests/queries/0_stateless/00900_orc_load.sh index b3f2c39e5d2..62149fa554e 100755 --- a/tests/queries/0_stateless/00900_orc_load.sh +++ b/tests/queries/0_stateless/00900_orc_load.sh @@ -5,16 +5,13 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -DATA_FILE=$CUR_DIR/data_orc/test.orc - ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS orc_load" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE orc_load (int Int32, smallint Int8, bigint Int64, float Float32, double Float64, date Date, y String, datetime64 DateTime64(3)) ENGINE = Memory" ${CLICKHOUSE_CLIENT} --query="insert into orc_load values (0, 0, 0, 0, 0, '2019-01-01', 'test1', toDateTime64('2019-01-01 02:03:04.567', 3)), (2147483647, -1, 9223372036854775806, 123.345345, 345345.3453451212, '2019-01-01', 'test2', toDateTime64('2019-01-01 02:03:04.567', 3))" -${CLICKHOUSE_CLIENT} --query="select * from orc_load FORMAT ORC" > $DATA_FILE +${CLICKHOUSE_CLIENT} --query="select * from orc_load FORMAT ORC" > "${CLICKHOUSE_TMP}"/test.orc ${CLICKHOUSE_CLIENT} --query="truncate table orc_load" -cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} -q "insert into orc_load format ORC" -timeout 3 ${CLICKHOUSE_CLIENT} -q "insert into orc_load format ORC" < $DATA_FILE +cat "${CLICKHOUSE_TMP}"/test.orc | ${CLICKHOUSE_CLIENT} -q "insert into orc_load format ORC" +timeout 3 ${CLICKHOUSE_CLIENT} -q "insert into orc_load format ORC" < "${CLICKHOUSE_TMP}"/test.orc ${CLICKHOUSE_CLIENT} --query="select * from orc_load" ${CLICKHOUSE_CLIENT} --query="drop table orc_load" -rm -rf "$DATA_FILE" diff --git a/tests/queries/0_stateless/00908_bloom_filter_index.sh b/tests/queries/0_stateless/00908_bloom_filter_index.sh index 59375794af0..f1f07ca360b 100755 --- a/tests/queries/0_stateless/00908_bloom_filter_index.sh +++ b/tests/queries/0_stateless/00908_bloom_filter_index.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS bloom_filter_idx;" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS bloom_filter_idx2;" -$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS bloom_filter_idx2;" +$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS bloom_filter_idx3;" # NGRAM BF @@ -87,11 +87,11 @@ $CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT * FROM bloom_filte $CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT * FROM bloom_filter_idx WHERE s LIKE '%2\\\\%2%' ORDER BY k" $CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT * FROM bloom_filter_idx WHERE s LIKE '%2\\\\%2%' ORDER BY k FORMAT JSON" | grep "rows_read" -$CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT * FROM bloom_filter_idx WHERE s LIKE '%_\\\\%2\\\\__\\\\' ORDER BY k" -$CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT * FROM bloom_filter_idx WHERE s LIKE '%_\\\\%2\\\\__\\\\' ORDER BY k FORMAT JSON" | grep "rows_read" +$CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT * FROM bloom_filter_idx WHERE s LIKE '%_\\\\%2\\\\__\\\\\\\\' ORDER BY k" +$CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT * FROM bloom_filter_idx WHERE s LIKE '%_\\\\%2\\\\__\\\\\\\\' ORDER BY k FORMAT JSON" | grep "rows_read" -$CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT * FROM bloom_filter_idx WHERE s LIKE '2\\\\_2\\\\%2_2\\\\' ORDER BY k" -$CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT * FROM bloom_filter_idx WHERE s LIKE '2\\\\_2\\\\%2_2\\\\' ORDER BY k FORMAT JSON" | grep "rows_read" +$CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT * FROM bloom_filter_idx WHERE s LIKE '2\\\\_2\\\\%2_2\\\\\\\\' ORDER BY k" +$CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT * FROM bloom_filter_idx WHERE s LIKE '2\\\\_2\\\\%2_2\\\\\\\\' ORDER BY k FORMAT JSON" | grep "rows_read" $CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT * FROM bloom_filter_idx WHERE s LIKE '2\\\\_2\\\\%2_2_' ORDER BY k" $CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT * FROM bloom_filter_idx WHERE s LIKE '2\\\\_2\\\\%2_2_' ORDER BY k FORMAT JSON" | grep "rows_read" diff --git a/tests/queries/0_stateless/01043_categorical_iv.sql b/tests/queries/0_stateless/01043_categorical_iv.sql index 263b245680d..049070a69ab 100644 --- a/tests/queries/0_stateless/01043_categorical_iv.sql +++ b/tests/queries/0_stateless/01043_categorical_iv.sql @@ -24,7 +24,7 @@ FROM ( -- single category SELECT - categoricalInformationValue(x.1, x.2) + arrayMap(x -> x = 0 ? 0 : x, categoricalInformationValue(x.1, x.2)) -- remove negative zeros FROM ( SELECT arrayJoin([(1, 0), (1, 0), (1, 0), (1, 1), (1, 1)]) as x diff --git a/tests/queries/0_stateless/01085_window_view_attach.reference b/tests/queries/0_stateless/01085_window_view_attach.reference new file mode 100644 index 00000000000..66b9aa91493 --- /dev/null +++ b/tests/queries/0_stateless/01085_window_view_attach.reference @@ -0,0 +1,13 @@ +.inner.target.wv +.inner.wv +mt +wv +mt +.inner.target.wv +.inner.wv +mt +.inner.target.wv +.inner.wv +mt +wv +mt diff --git a/tests/queries/0_stateless/01085_window_view_attach.sql b/tests/queries/0_stateless/01085_window_view_attach.sql new file mode 100644 index 00000000000..604bf5dd198 --- /dev/null +++ b/tests/queries/0_stateless/01085_window_view_attach.sql @@ -0,0 +1,28 @@ +-- Tags: no-parallel + +SET allow_experimental_window_view = 1; + +DROP DATABASE IF EXISTS test_01085; +CREATE DATABASE test_01085 ENGINE=Ordinary; + +DROP TABLE IF EXISTS test_01085.mt; +DROP TABLE IF EXISTS test_01085.wv; + +CREATE TABLE test_01085.mt(a Int32, market Int32, timestamp DateTime) ENGINE=MergeTree ORDER BY tuple(); +CREATE WINDOW VIEW test_01085.wv ENGINE Memory WATERMARK=ASCENDING AS SELECT count(a) AS count, market, tumbleEnd(wid) AS w_end FROM test_01085.mt GROUP BY tumble(timestamp, INTERVAL '5' SECOND) AS wid, market; + +SHOW tables FROM test_01085; + +DROP TABLE test_01085.wv NO DELAY; +SHOW tables FROM test_01085; + +CREATE WINDOW VIEW test_01085.wv ENGINE Memory WATERMARK=ASCENDING AS SELECT count(a) AS count, market, tumbleEnd(wid) AS w_end FROM test_01085.mt GROUP BY tumble(timestamp, INTERVAL '5' SECOND) AS wid, market; + +DETACH TABLE test_01085.wv; +SHOW tables FROM test_01085; + +ATTACH TABLE test_01085.wv; +SHOW tables FROM test_01085; + +DROP TABLE test_01085.wv NO DELAY; +SHOW tables FROM test_01085; diff --git a/tests/queries/0_stateless/01132_max_rows_to_read.reference b/tests/queries/0_stateless/01132_max_rows_to_read.reference index 5087d15b87c..d5ec7c6771b 100644 --- a/tests/queries/0_stateless/01132_max_rows_to_read.reference +++ b/tests/queries/0_stateless/01132_max_rows_to_read.reference @@ -37,6 +37,17 @@ 8 9 10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 0 1 2 @@ -55,3 +66,12 @@ 15 16 17 +18 +19 +20 +21 +22 +23 +24 +25 +26 diff --git a/tests/queries/0_stateless/01132_max_rows_to_read.sql b/tests/queries/0_stateless/01132_max_rows_to_read.sql index b7923a27d04..8127befa83c 100644 --- a/tests/queries/0_stateless/01132_max_rows_to_read.sql +++ b/tests/queries/0_stateless/01132_max_rows_to_read.sql @@ -23,7 +23,6 @@ SELECT count() FROM numbers(31); SELECT * FROM numbers(30); -- the same for uneven block sizes --- NOTE: currently it outputs less amount of data; it will be better to output the latest block also SET max_block_size = 11; SELECT * FROM numbers(30); SET max_block_size = 9; diff --git a/tests/queries/0_stateless/01304_polygons_sym_difference.reference b/tests/queries/0_stateless/01304_polygons_sym_difference.reference index 7d16848ac2e..9344410f192 100644 --- a/tests/queries/0_stateless/01304_polygons_sym_difference.reference +++ b/tests/queries/0_stateless/01304_polygons_sym_difference.reference @@ -1,7 +1,7 @@ [[[(1,2.9),(1,1),(2.9,1),(3,0),(0,0),(0,3),(1,2.9)]],[[(1,2.9),(1,4),(4,4),(4,1),(2.9,1),(2.6,2),(2,2.6),(1,2.9)]]] -------- MultiPolygon with Polygon -MULTIPOLYGON(((36.9725 59.0149,35.5408 58.9593,37.2817 59.9768,38.7325 59.9465,36.9725 59.0149)),((36.9725 59.0149,37.3119 59.0258,37.8553 58.9075,36.5949 58.1673,36.0123 58.2869,37.191 58.6819,36.4989 58.7512,36.9725 59.0149)),((36.151 54.791,37.7653 55.1891,37.06 55.3843,37.2824 55.5258,38.0373 55.6523,37.6238 55.7402,38.1319 56.0534,38.2186 56.0594,38.1688 56.0758,38.4339 56.2361,38.944 56.0594,38.1884 55.8564,38.4907 55.5327,37.7955 55.3956,38.2609 55.1775,38.1601 55.1091,36.7074 54.6506,37.0035 54.2999,36.6985 54.0791,36.0472 54.7217,36.151 54.791)),((36.151 54.791,36.0123 54.7554,36.0472 54.7217,34.9611 53.9765,34.894 54.1226,35.6193 54.4929,34.9706 54.9262,35.2275 55.0993,36.4354 55.3441,35.7505 55.4454,35.9817 55.5958,36.5563 55.6352,36.193 55.7319,37.2281 56.3799,38.1688 56.0758,38.1319 56.0534,36.647 55.9411,37.6238 55.7402,37.2824 55.5258,36.8283 55.4471,37.06 55.3843,36.151 54.791)),((36.5334 56.6753,38.2312 56.9795,37.565 56.5843,37.463 56.5623,37.5054 56.5484,37.2281 56.3799,36.4446 56.6242,36.5334 56.6753)),((36.5334 56.6753,36.375 56.6455,36.4446 56.6242,36.0233 56.3789,35.4083 56.5254,36.1999 57.0022,36.9794 57.0751,36.4587 57.1544,38.0535 58.0542,38.3395 57.9356,37.4328 57.7103,38.0744 57.5312,37.9669 57.4734,37.1608 57.2554,37.4489 57.1909,36.5334 56.6753)),((36.8709 53.2765,37.135 53.4711,37.8559 52.9188,38.0214 52.8989,37.1608 52.2393,35.4682 52.2022,36.5022 53.0008,37.4328 52.9552,36.8709 53.2765)),((36.8709 53.2765,36.5022 53.0008,35.3776 53.0462,35.3645 53.076,36.1528 53.6763,36.8709 53.2765)),((36.6985 54.0791,36.919 53.8561,36.3552 53.8269,36.6985 54.0791)),((35.5408 58.9593,35.3712 58.8556,34.6522 58.9167,35.5408 58.9593)),((36.0848 57.855,36.3932 58.0447,36.4354 58.0478,36.403 58.0507,36.5949 58.1673,37.1608 58.0478,36.0848 57.855)),((36.0848 57.855,35.9179 57.7512,35.7402 57.7909,36.0848 57.855)),((37.135 53.4711,36.9794 53.5878,37.3119 53.9273,37.0035 54.2999,38.1601 55.1091,38.3093 55.1546,38.2609 55.1775,39.8102 56.1914,39.8205 56.0763,40.425 56.1942,40.5716 55.8007,40.5504 55.7875,39.7601 55.7544,39.8151 55.3187,37.135 53.4711)),((38.2312 56.9795,38.2699 57.0021,38.3093 56.9929,38.2312 56.9795)),((36.4989 58.7512,36.1498 58.553,34.9952 58.6226,35.3712 58.8556,36.4989 58.7512)),((36.4587 57.1544,36.1999 57.0022,34.4816 56.8232,34.8098 57.0409,36.0727 57.0915,35.0338 57.1875,35.4682 57.4674,36.1936 57.4998,35.613 57.5595,35.9179 57.7512,37.0097 57.4998,35.7705 57.2554,36.4587 57.1544)),((38.0535 58.0542,37.4026 58.3187,38.5813 58.7446,37.8553 58.9075,39.7299 59.9314,44.4751 59.81,44.4146 55.3097,40.0925 52.1652,38.3395 52.1652,39.1456 52.7573,39.5787 52.6996,39.2704 52.8471,39.9877 53.3534,40.0019 53.354,39.9942 53.358,43.0243 55.3269,43.0243 56.2614,40.2143 54.467,39.5485 54.5631,39.5485 54.8773,40.3948 54.8773,40.3948 55.2408,39.8205 55.2753,39.8151 55.3187,40.5504 55.7875,40.5761 55.7884,40.5716 55.8007,43.0243 57.2554,43.0243 58.0797,40.4543 56.5923,40.3343 56.9599,39.7903 56.9929,39.7863 57.025,42.5105 58.477,41.6944 58.8542,40.1389 58.048,39.6392 58.0478,39.6392 58.3427,39.7184 58.3823,40.3343 58.3821,40.4136 58.7241,41.2108 59.1035,40.6366 59.3817,39.8163 58.9766,38.5209 59.119,39.4085 58.7696,38.7465 58.4255,38.3698 58.2869,38.432 58.2584,38.0535 58.0542)),((34.4996 55.9565,33.5244 56.1686,33.7222 56.3063,34.5917 56.2949,35.0485 56.303,34.744 56.1118,34.7126 56.11,34.7331 56.1049,34.4996 55.9565)),((34.4996 55.9565,35.0954 55.822,34.9721 55.7463,34.2598 55.8023,34.4996 55.9565)),((31.6069 56.3194,31.5088 55.9411,31.7782 55.7778,30.2092 54.6331,30.2394 53.6774,31.7439 54.8677,31.4182 54.4227,31.8748 54.1736,29.3931 52.2763,29.4536 59.7796,30.5719 59.9919,30.4812 58.8542,32.3249 59.9465,33.6548 59.9465,30.179 57.9196,30.179 56.9764,32.2175 58.3664,32.1738 58.0318,31.5088 57.4998,31.6514 57.1258,30.3301 56.1942,30.2394 55.2753,31.6069 56.3194)),((31.6069 56.3194,31.7506 56.8609,31.6514 57.1258,34.0496 58.6717,34.9952 58.6226,34.6028 58.3749,33.6245 58.271,34.3593 58.2189,33.7581 57.8255,33.2316 57.7748,33.6325 57.7419,31.6069 56.3194)),((33.5244 56.1686,33.1204 55.8832,32.748 55.9072,32.9547 55.7645,31.7439 54.8677,31.8413 54.9989,32.204 55.5156,31.7782 55.7778,33.3418 56.8364,33.8361 56.6953,34.1885 56.6259,33.7222 56.3063,32.8387 56.3117,33.5244 56.1686)),((33.1204 55.8832,34.2598 55.8023,33.6125 55.3778,33.5036 55.3785,32.9547 55.7645,33.1204 55.8832)),((35.3188 55.9582,36.193 55.7319,35.9817 55.5958,35.1358 55.5327,35.7505 55.4454,35.2275 55.0993,34.8335 55.0162,34.9706 54.9262,34.7231 54.7576,34.2593 54.9642,35.0149 55.3613,34.3709 55.3709,34.9721 55.7463,35.6798 55.6863,35.0954 55.822,35.3188 55.9582)),((35.3188 55.9582,34.7331 56.1049,34.744 56.1118,35.6571 56.1619,35.3188 55.9582)),((33.3418 56.8364,32.9596 56.9434,33.5602 56.9781,33.3418 56.8364)),((33.4048 52.8423,34.7731 52.9188,34.7731 53.7847,34.7279 53.8116,34.9611 53.9765,35.3645 53.076,34.2895 52.2208,32.5969 52.2208,33.4048 52.8423)),((33.4048 52.8423,33.1712 52.8276,32.5275 53.1741,34.7231 54.7576,35.0753 54.5981,34.1081 54.1757,34.7279 53.8116,33.4048 52.8423)),((32.2523 53.964,32.476 53.8383,32.0831 53.408,32.5275 53.1741,31.2368 52.1652,29.7861 52.1466,32.2523 53.964)),((32.2523 53.964,31.8748 54.1736,33.6125 55.3778,34.3709 55.3709,32.2523 53.964)),((36.3552 53.8269,36.1528 53.6763,35.9216 53.8026,36.3552 53.8269)),((32.5691 58.5924,34.8637 59.9768,36.2843 59.9616,34.0496 58.6717,33.8361 58.6819,34.7428 59.5659,33.4734 58.8542,32.5691 58.5924)),((32.5691 58.5924,32.2175 58.3664,32.2342 58.4928,32.5691 58.5924)),((33.5602 56.9781,34.0208 57.2724,35.0338 57.1875,34.8098 57.0409,33.5602 56.9781)),((36.3932 58.0447,35.1134 57.9454,35.4314 58.1349,36.403 58.0507,36.3932 58.0447)),((35.1134 57.9454,34.6332 57.6538,33.6325 57.7419,33.7581 57.8255,35.1134 57.9454)),((35.4314 58.1349,34.3593 58.2189,34.6028 58.3749,36.0877 58.5174,35.4314 58.1349)),((35.4682 57.4674,34.2274 57.4023,34.6332 57.6538,35.613 57.5595,35.4682 57.4674)),((34.4816 56.8232,34.3867 56.7596,34.229 56.7948,34.4816 56.8232)),((34.1885 56.6259,34.3867 56.7596,35.4083 56.5254,35.2273 56.414,34.1885 56.6259)),((34.2274 57.4023,34.0208 57.2724,33.1712 57.337,34.2274 57.4023)),((35.0485 56.303,35.2273 56.414,35.71 56.3117,35.0485 56.303)),((35.6571 56.1619,36.0233 56.3789,36.7074 56.211,35.6571 56.1619)),((36.1498 58.553,36.3447 58.5402,36.0877 58.5174,36.1498 58.553)),((40.2143 54.467,40.3948 54.4403,40.6064 54.034,39.9716 53.9807,40.2437 53.5878,39.5485 53.5878,39.9942 53.358,39.9877 53.3534,38.5511 53.2922,40.2143 54.467)),((39.8102 56.1914,39.7903 56.4121,40.2529 56.4682,39.8102 56.1914)),((38.0214 52.8989,38.4609 53.226,39.2704 52.8471,39.1456 52.7573,38.0214 52.8989)),((38.5511 53.2922,38.4609 53.226,38.3395 53.2817,38.5511 53.2922)),((40.4543 56.5923,40.4855 56.4957,40.2529 56.4682,40.4543 56.5923)),((40.1389 58.048,40.2437 58.0478,40.3343 57.4673,39.7299 57.4673,39.7863 57.025,38.4339 56.2361,37.5054 56.5484,37.565 56.5843,38.9742 56.8774,38.4915 57.1308,40.1389 58.048)),((40.4136 58.7241,39.7184 58.3823,39.6392 58.3821,39.6392 58.3427,38.3737 57.6908,38.3395 57.7103,38.8533 58.0638,38.432 58.2584,38.7465 58.4255,39.5485 58.7133,39.4085 58.7696,39.8163 58.9766,40.4552 58.9011,40.4136 58.7241)),((38.3737 57.6908,38.7325 57.4835,38.2186 57.2717,38.4915 57.1308,38.2699 57.0021,37.4489 57.1909,37.9669 57.4734,38.128 57.516,38.0744 57.5312,38.3737 57.6908))) +MULTIPOLYGON(((-20 -10.3067,-20 -20,-10 -20.8791,-10 -40,-40 -40,-40 -10,-20 -10.3067)),((20 10.3067,20 -20,-10 -20.8791,-10 -10,-20 -10.3067,-20 20,10 20.8791,10 10,20 10.3067)),((20 10.3067,20 20,10 20.8791,10 40,40 40,40 10,20 10.3067))) -------- MultiPolygon with Polygon with Holes -MULTIPOLYGON(((24.3677 61.4598,26.6528 61.1008,26.8726 61.7107,30.564 61.0583,31.3989 62.0215,36.0132 61.1432,36.8921 62.0009,42.6489 60.6301,43.5718 61.3757,47.0435 59.8889,49.5923 60.0868,49.1528 58.1707,51.9214 57.9148,50.2515 56.1455,52.6685 55.826,51.6577 54.2909,52.8882 53.9302,50.647 53.0148,51.394 52.4828,48.0542 51.1793,49.2847 50.5414,47.1753 49.153,43.9233 49.8096,42.561 48.7779,36.936 49.6676,35.2661 48.7489,32.8052 49.5252,27.2241 48.9802,26.1255 50.4015,21.2036 50.205,20.0171 51.5634,17.4683 53.0148,19.4458 54.0852,19.4458 55.8753,19.5776 57.4922,19.5776 58.6769,24.3677 61.4598),(24.4556 59.4227,21.2036 58.4937,21.3354 56.897,21.5991 55.9246,25.2026 55.9984,28.8501 57.0646,27.0923 57.8448,28.8062 59.1759,26.2573 59.1759,24.4556 59.4227),(35.1489 56.5859,36.7074 56.211,34.7126 56.11,36.5563 55.6352,35.1358 55.5327,36.4354 55.3441,34.8335 55.0162,35.6193 54.4929,34.894 54.1226,35.3776 53.0462,37.0604 52.9744,34.9585 51.4814,36.5405 50.4015,39.6606 50.2893,39.7925 52.1335,41.77 50.6808,44.4946 51.9713,47.3071 52.5095,44.0552 53.5403,46.604 53.6967,47.6147 55.4041,45.3735 55.4041,42.8247 56.5837,40.4412 56.1511,40.425 56.1942,39.8205 56.0763,39.7903 56.4121,40.4855 56.4957,40.3343 56.9599,39.7903 56.9929,39.7379 57.4051,40.0149 57.4677,40.3343 57.4673,40.3237 57.5365,42.6929 58.0314,40.8911 59.2659,39.2792 59.0373,38.5209 59.119,38.8838 58.9777,38.0944 58.8545,37.3119 59.0258,37.2327 59.0233,37.1118 59.6677,35.1343 59.8448,31.9702 58.9727,32.25 58.4976,32.2342 58.4928,32.1738 58.0318,31.5088 57.4998,31.7506 56.8609,31.5088 55.9411,32.204 55.5156,31.8413 54.9989,31.627 54.7093,29.5972 55.5037,29.1577 55.7518,22.5659 55.1286,22.5659 53.5403,22.0386 51.4814,26.2573 51.4266,30.1245 50.5414,32.1899 51.1793,30.1245 53.1731,32.4808 53.1989,33.1712 52.8276,34.7731 52.9188,34.7731 53.1793,35.0903 53.1731,34.7731 53.3243,34.7731 53.7847,34.1081 54.1757,35.0753 54.5981,34.2593 54.9642,35.0149 55.3613,33.5036 55.3785,32.748 55.9072,35.6798 55.6863,32.8387 56.3117,34.5917 56.2949,35.71 56.3117,33.8361 56.6953,33.7182 56.7292,35.1489 56.5859)),((35.1489 56.5859,34.229 56.7948,36.9794 57.0751,35.7705 57.2554,37.0097 57.4998,35.7402 57.7909,37.1608 58.0478,36.0123 58.2869,37.191 58.6819,34.6522 58.9167,37.2327 59.0233,37.2876 58.7226,38.0944 58.8545,38.5813 58.7446,37.4026 58.3187,38.3395 57.9356,37.4328 57.7103,38.128 57.516,37.1608 57.2554,38.3092 56.9929,38.309 56.9928,36.375 56.6455,36.8799 56.4895,36.6724 56.4139,35.1489 56.5859)),((33.1079 56.9523,32.25 58.4976,33.4734 58.8542,34.7428 59.5659,33.8361 58.6819,36.3447 58.5402,33.6245 58.271,36.4354 58.0478,33.2316 57.7748,36.1936 57.4998,33.1712 57.337,36.0727 57.0915,33.1079 56.9523)),((33.1079 56.9523,33.1392 56.8934,32.9596 56.9434,33.1079 56.9523)),((33.7182 56.7292,33.2007 56.7768,33.1392 56.8934,33.7182 56.7292)),((37.0604 52.9744,37.2165 53.0798,37.4328 52.9552,37.0604 52.9744)),((34.7731 53.3243,34.7731 53.1793,32.4808 53.1989,32.0831 53.408,32.476 53.8383,31.4182 54.4227,31.627 54.7093,33.1128 54.0852,34.7731 53.3243)),((36.9508 55.414,37.7653 55.1891,36.8822 54.975,36.5845 55.3291,36.9508 55.414)),((36.9508 55.414,36.8283 55.4471,37.9482 55.6376,36.9508 55.414)),((37.2165 53.0798,35.9216 53.8026,36.919 53.8561,36.0123 54.7554,36.8822 54.975,37.0572 54.7635,36.7074 54.6506,37.3119 53.9273,36.9794 53.5878,37.4471 53.2343,37.2165 53.0798)),((37.0572 54.7635,38.3093 55.1546,37.7955 55.3956,38.4907 55.5327,38.3184 55.7179,40.4412 56.1511,40.5761 55.7884,39.7601 55.7544,39.8205 55.2753,40.3948 55.2408,40.3948 54.8773,39.5485 54.8773,39.5485 54.5631,40.3948 54.4403,40.6064 54.034,39.9716 53.9807,40.2437 53.5878,39.5485 53.5878,40.0019 53.354,38.3395 53.2817,39.5787 52.6996,37.8559 52.9188,37.4471 53.2343,37.9907 53.5925,37.0572 54.7635)),((38.5798 57.0849,38.2186 57.2717,38.7325 57.4835,38.3395 57.7103,38.8533 58.0638,38.3698 58.2869,39.5485 58.7133,38.8838 58.9777,39.2792 59.0373,40.4552 58.9011,40.3343 58.3821,39.6392 58.3821,39.6392 58.0478,40.2437 58.0478,40.3237 57.5365,40.0149 57.4677,39.7299 57.4673,39.7379 57.4051,39.0894 57.2553,38.5798 57.0849)),((38.5798 57.0849,38.9742 56.8774,37.463 56.5623,38.944 56.0594,38.1884 55.8564,38.3184 55.7179,38.0262 55.6546,36.647 55.9411,38.2186 56.0594,36.8799 56.4895,38.309 56.9928,38.3093 56.9929,38.3092 56.9929,38.5798 57.0849)),((37.9482 55.6376,38.0262 55.6546,38.0373 55.6523,37.9482 55.6376))) +MULTIPOLYGON(((-10 -20.8791,-20 -20,-20 -10.3067,-10 -10,-10 -20.8791)),((10 20.8791,20 20,20 10.3067,10 10,10 20.8791)),((50 50,50 -50,-50 -50,-50 50,50 50),(20 10.3067,40 10,40 40,10 40,10 20.8791,-20 20,-20 -10.3067,-40 -10,-40 -40,-10 -40,-10 -20.8791,20 -20,20 10.3067))) -------- Polygon with Polygon with Holes -MULTIPOLYGON(((24.3677 61.4598,26.6528 61.1008,26.8726 61.7107,30.564 61.0583,31.3989 62.0215,36.0132 61.1432,36.8921 62.0009,42.6489 60.6301,43.5718 61.3757,47.0435 59.8889,49.5923 60.0868,49.1528 58.1707,51.9214 57.9148,50.2515 56.1455,52.6685 55.826,51.6577 54.2909,52.8882 53.9302,50.647 53.0148,51.394 52.4828,48.0542 51.1793,49.2847 50.5414,47.1753 49.153,43.9233 49.8096,42.561 48.7779,36.936 49.6676,35.2661 48.7489,32.8052 49.5252,27.2241 48.9802,26.1255 50.4015,21.2036 50.205,20.0171 51.5634,17.4683 53.0148,19.4458 54.0852,19.4458 55.8753,19.5776 57.4922,19.5776 58.6769,24.3677 61.4598),(24.4556 59.4227,21.2036 58.4937,21.3354 56.897,21.5991 55.9246,25.2026 55.9984,28.8501 57.0646,27.0923 57.8448,28.8062 59.1759,26.2573 59.1759,24.4556 59.4227),(35.9475 59.7758,36.2843 59.9616,34.8637 59.9768,34.2247 59.6064,31.9702 58.9727,32.2964 58.4175,30.179 56.9764,30.179 57.9196,33.6548 59.9465,32.3249 59.9465,30.4812 58.8542,30.5719 59.9919,29.4536 59.7796,29.4171 55.606,29.1577 55.7518,22.5659 55.1286,22.5659 53.5403,22.0386 51.4814,26.2573 51.4266,30.1245 50.5414,32.1899 51.1793,31.1968 52.1649,31.2368 52.1652,32.5603 53.1989,33.8733 53.1922,32.5969 52.2208,34.2895 52.2208,37.2766 54.4948,37.7431 53.9104,35.4682 52.2022,35.9681 52.2157,34.9585 51.4814,36.5405 50.4015,39.6606 50.2893,39.7925 52.1335,41.77 50.6808,44.4946 51.9713,47.3071 52.5095,44.0552 53.5403,46.604 53.6967,47.6147 55.4041,45.3735 55.4041,44.4212 55.8594,44.4751 59.81,39.7299 59.9314,37.6322 58.7797,37.2876 58.7226,37.2102 59.1452,38.7325 59.9465,37.2817 59.9768,36.7912 59.6986,35.9475 59.7758)),((32.6512 57.792,32.2964 58.4175,34.2247 59.6064,35.1343 59.8448,35.9475 59.7758,32.6512 57.792)),((32.6512 57.792,32.9378 57.2699,30.2394 55.2753,30.3301 56.1942,32.6512 57.792)),((33.2446 56.7729,33.2007 56.7768,32.9378 57.2699,36.7912 59.6986,37.1118 59.6677,37.2102 59.1452,33.2446 56.7729)),((33.2446 56.7729,34.2635 56.6767,31.5682 54.7333,30.7705 55.0525,33.2446 56.7729)),((34.2635 56.6767,37.6322 58.7797,40.2079 59.1718,35.4536 56.5531,34.2635 56.6767)),((40.2079 59.1718,40.6366 59.3817,40.8804 59.2644,40.2079 59.1718)),((34.3351 53.53,35.0903 53.1731,33.8733 53.1922,34.3351 53.53)),((34.3351 53.53,33.5144 53.9057,38.1759 56.9472,39.0894 57.2553,40.9691 57.677,37.1934 55.4694,36.5845 55.3291,36.7219 55.1665,34.3351 53.53)),((32.6907 54.2663,33.1128 54.0852,33.5144 53.9057,32.5603 53.1989,31.1682 53.1903,32.6907 54.2663)),((32.6907 54.2663,32.2591 54.4483,35.4536 56.5531,36.1815 56.4715,32.6907 54.2663)),((38.1759 56.9472,36.6724 56.4139,36.1815 56.4715,41.168 59.0834,41.5887 58.8012,38.1759 56.9472)),((37.2766 54.4948,36.7219 55.1665,37.1934 55.4694,39.4328 55.9511,37.2766 54.4948)),((40.9691 57.677,42.2498 58.3455,42.6929 58.0314,40.9691 57.677)),((30.7705 55.0525,30.2092 54.6331,30.2394 53.6774,31.5682 54.7333,32.2591 54.4483,30.5408 53.1811,30.1245 53.1731,30.3098 53.0028,29.3931 52.2763,29.4171 55.606,29.5972 55.5037,30.7705 55.0525)),((30.5408 53.1811,31.1682 53.1903,30.5785 52.7531,30.3098 53.0028,30.5408 53.1811)),((30.5785 52.7531,31.1968 52.1649,29.7861 52.1466,30.5785 52.7531)),((35.9681 52.2157,37.9907 53.5925,37.7431 53.9104,41.4519 56.3413,42.8247 56.5837,44.4212 55.8594,44.4146 55.3097,40.0925 52.1652,38.3395 52.1652,43.0243 55.3269,43.0243 56.2614,37.1608 52.2393,35.9681 52.2157)),((39.4328 55.9511,43.0243 58.0797,43.0243 57.2554,41.4519 56.3413,39.4328 55.9511)),((41.168 59.0834,40.9299 59.2404,41.2108 59.1035,41.168 59.0834)),((41.5887 58.8012,41.6944 58.8542,42.5105 58.477,42.2498 58.3455,41.5887 58.8012)),((40.9299 59.2404,40.8804 59.2644,40.8911 59.2659,40.9299 59.2404))) +MULTIPOLYGON(((-20 -10.3067,-10 -10,-10 -20.8791,-20 -20,-20 -10.3067)),((10 20.8791,20 20,20 10.3067,10 10,10 20.8791)),((50 50,50 -50,-50 -50,-50 50,50 50),(20 10.3067,40 10,40 40,10 40,10 20.8791,-20 20,-20 -10.3067,-40 -10,-40 -40,-10 -40,-10 -20.8791,20 -20,20 10.3067))) diff --git a/tests/queries/0_stateless/01304_polygons_sym_difference.sql b/tests/queries/0_stateless/01304_polygons_sym_difference.sql index f4893dd5b33..c4129676b26 100644 --- a/tests/queries/0_stateless/01304_polygons_sym_difference.sql +++ b/tests/queries/0_stateless/01304_polygons_sym_difference.sql @@ -1,10 +1,15 @@ select polygonsSymDifferenceCartesian([[[(0, 0),(0, 3),(1, 2.9),(2, 2.6),(2.6, 2),(2.9, 1),(3, 0),(0, 0)]]], [[[(1., 1.),(1., 4.),(4., 4.),(4., 1.),(1., 1.)]]]); - + +-- Google "draw wkt online" + select '-------- MultiPolygon with Polygon'; -select wkt(polygonsSymDifferenceSpherical([[(29.453587685533865,59.779570356240356),(29.393139070478895,52.276266797422124),(40.636581470703206,59.38168915000267),(41.21084331372543,59.103467777099866),(29.786055068336193,52.146627480315004),(31.23682182965546,52.16517054781818),(41.69443223416517,58.85424941916091),(42.51048853740727,58.47703162291134),(32.59691566839227,52.22075341251539),(34.289476889931414,52.22075341251539),(43.02430176537451,58.07974369546071),(43.02430176537451,57.25537683364851),(35.468224883503325,52.2022335126388),(37.16078610504247,52.23926559241349),(43.02430176537451,56.26136189644947),(43.02430176537451,55.326904361850836),(38.33953409861437,52.16517054781818),(40.09254393520848,52.16517054781818),(44.4146199116388,55.3097062225408),(44.47506852669377,59.80998197603594),(39.72985224487867,59.931351417569715),(30.23941968124846,53.67744677450975),(30.20919537372098,54.63314259659509),(38.73245009647167,59.94649146557819),(37.2816833351524,59.97675082987618),(30.23941968124846,55.2752875586599),(30.33009260383092,56.19415599955667),(36.28428118674541,59.96162460231375),(34.863738732953635,59.97675082987618),(30.178971066193498,56.97640788219866),(30.178971066193498,57.91957806959033),(33.65476643185424,59.94649146557819),(32.32489690064491,59.94649146557819),(30.481214141468342,58.85424941916091),(30.571887064050795,59.99187015036608),(29.453587685533865,59.779570356240356)]], [[[(33.473420586689336,58.85424941916091),(32.23422397806246,58.492830557036),(32.173775363007486,58.03176922751564),(31.508840597402823,57.499784781503735),(31.750635057622702,56.86092686957355),(31.508840597402823,55.941082594334574),(32.20399967053497,55.515591939372456),(31.84130798020516,54.998862226280465),(31.418167674820367,54.422670886434275),(32.47601843828233,53.83826377018255),(32.08310244042503,53.408048308050866),(33.171177511414484,52.82758702113742),(34.77306581037117,52.91880107773494),(34.77306581037117,53.784726518357985),(34.108131044766516,54.17574726780569),(35.07530888564602,54.59813930694554),(34.25925258240394,54.96417435716029),(35.01486027059106,55.361278263643584),(33.50364489421682,55.37845402950552),(32.7480372060297,55.90721384574556),(35.67979503619571,55.68634475630185),(32.83871012861215,56.311688992608396),(34.591719965206266,56.29492065473883),(35.7100193437232,56.311688992608396),(33.83611227701915,56.695333481003644),(32.95960735872209,56.9434497616887),(36.072711034053015,57.091531913901434),(33.171177511414484,57.33702717078384),(36.193608264162954,57.499784781503735),(33.23162612646945,57.77481561306047),(36.43540272438284,58.04776787540811),(33.62454212432676,58.27099811968307),(36.344729801800376,58.54018474404165),(33.83611227701915,58.68186423448108),(34.74284150284369,59.565911441555244),(33.473420586689336,58.85424941916091)]], [[(34.65216858026123,58.91672306881671),(37.19101041256995,58.68186423448108),(36.01226241899805,58.28688958537609),(37.16078610504247,58.04776787540811),(35.74024365125068,57.79092907387934),(37.009664567405046,57.499784781503735),(35.77046795877817,57.25537683364851),(36.979440259877556,57.07510745541089),(34.22902827487645,56.794777197297435),(36.7074214921302,56.210968525786996),(34.712617195316206,56.10998276812964),(36.55629995449277,55.63519693782703),(35.13575750070099,55.53270067649592),(36.43540272438284,55.34409504165558),(34.83351442542614,55.01619492319591),(35.61934642114075,54.49294870011772),(34.89396304048112,54.12264226523038),(35.37755196092087,53.046178687628185),(37.43280487278982,52.95523300597458),(35.92158949641559,53.80257986695776),(36.91899164482259,53.856094327816805),(36.01226241899805,54.75541714463799),(37.765272255592166,55.189110239786885),(36.828318722240134,55.44708256557195),(38.03729102333953,55.652253637168315),(36.64697287707522,55.941082594334574),(38.21863686850443,56.05939028508024),(36.37495410932787,56.64551287174558),(38.30930979108689,56.992876013526654),(37.16078610504247,57.25537683364851),(38.127963945921984,57.516020773674256),(37.43280487278982,57.710289827306724),(38.33953409861437,57.935626886818994),(37.40258056526235,58.31865112960426),(38.58132855883426,58.744648733419496),(37.31190764267989,59.02578062465136),(34.65216858026123,58.91672306881671)]], [[(38.52087994377928,59.11898412389468),(39.54850639971376,58.713270635642914),(38.369758406141855,58.28688958537609),(38.85334732658162,58.06375936407028),(38.33953409861437,57.710289827306724),(38.73245009647167,57.48354156434209),(38.21863686850443,57.271721400459285),(38.97424455669155,56.87744603722649),(37.463029180317314,56.5623320541159),(38.94402024916407,56.05939028508024),(38.18841256097694,55.856355210835915),(38.490655636251795,55.53270067649592),(37.795496563119656,55.39562234093384),(38.30930979108689,55.154587013355666),(36.7074214921302,54.65063295250911),(37.31190764267989,53.92734063371401),(36.979440259877556,53.58783775557231),(37.855945178174615,52.91880107773497),(39.57873070724124,52.69956490610895),(38.33953409861437,53.281741738901104),(40.00187101262603,53.35396273604752),(39.54850639971376,53.58783775557231),(40.24366547284591,53.58783775557231),(39.97164670509855,53.98069568468355),(40.60635716317572,54.03398248547225),(40.39478701048334,54.44025165268903),(39.54850639971376,54.56310590284329),(39.54850639971376,54.87732350170489),(40.39478701048334,54.87732350170489),(40.39478701048334,55.24083903654295),(39.82052516746112,55.2752875586599),(39.760076552406154,55.75443792473942),(40.57613285564824,55.78844000174894),(40.425011318010824,56.19415599955667),(39.82052516746112,56.07626182891758),(39.79030085993364,56.41214455508424),(40.48545993306579,56.495655446714636),(40.33433839542836,56.95993246553937),(39.79030085993364,56.992876013526654),(39.72985224487867,57.46729112028032),(40.33433839542836,57.46729112028032),(40.24366547284591,58.04776787540811),(39.63917932229622,58.04776787540811),(39.63917932229622,58.382088724871295),(40.33433839542836,58.382088724871295),(40.45523562553831,58.9011152358548),(38.52087994377928,59.11898412389468)]]])) format TSV; +select wkt(polygonsSymDifferenceSpherical([[(29.453587685533865,59.779570356240356),(29.393139070478895,52.276266797422124),(40.636581470703206,59.38168915000267),(41.21084331372543,59.103467777099866),(29.786055068336193,52.146627480315004),(31.23682182965546,52.16517054781818),(41.69443223416517,58.85424941916091),(42.51048853740727,58.47703162291134),(32.59691566839227,52.22075341251539),(34.289476889931414,52.22075341251539),(43.02430176537451,58.07974369546071),(43.02430176537451,57.25537683364851),(35.468224883503325,52.2022335126388),(37.16078610504247,52.23926559241349),(43.02430176537451,56.26136189644947),(43.02430176537451,55.326904361850836),(38.33953409861437,52.16517054781818),(40.09254393520848,52.16517054781818),(44.4146199116388,55.3097062225408),(44.47506852669377,59.80998197603594),(39.72985224487867,59.931351417569715),(30.23941968124846,53.67744677450975),(30.20919537372098,54.63314259659509),(38.73245009647167,59.94649146557819),(37.2816833351524,59.97675082987618),(30.23941968124846,55.2752875586599),(30.33009260383092,56.19415599955667),(36.28428118674541,59.96162460231375),(34.863738732953635,59.97675082987618),(30.178971066193498,56.97640788219866),(30.178971066193498,57.91957806959033),(33.65476643185424,59.94649146557819),(32.32489690064491,59.94649146557819),(30.481214141468342,58.85424941916091),(30.571887064050795,59.99187015036608),(29.453587685533865,59.779570356240356)]], [[[(33.473420586689336,58.85424941916091),(32.23422397806246,58.492830557036),(32.173775363007486,58.03176922751564),(31.508840597402823,57.499784781503735),(31.750635057622702,56.86092686957355),(31.508840597402823,55.941082594334574),(32.20399967053497,55.515591939372456),(31.84130798020516,54.998862226280465),(31.418167674820367,54.422670886434275),(32.47601843828233,53.83826377018255),(32.08310244042503,53.408048308050866),(33.171177511414484,52.82758702113742),(34.77306581037117,52.91880107773494),(34.77306581037117,53.784726518357985),(34.108131044766516,54.17574726780569),(35.07530888564602,54.59813930694554),(34.25925258240394,54.96417435716029),(35.01486027059106,55.361278263643584),(33.50364489421682,55.37845402950552),(32.7480372060297,55.90721384574556),(35.67979503619571,55.68634475630185),(32.83871012861215,56.311688992608396),(34.591719965206266,56.29492065473883),(35.7100193437232,56.311688992608396),(33.83611227701915,56.695333481003644),(32.95960735872209,56.9434497616887),(36.072711034053015,57.091531913901434),(33.171177511414484,57.33702717078384),(36.193608264162954,57.499784781503735),(33.23162612646945,57.77481561306047),(36.43540272438284,58.04776787540811),(33.62454212432676,58.27099811968307),(36.344729801800376,58.54018474404165),(33.83611227701915,58.68186423448108),(34.74284150284369,59.565911441555244),(33.473420586689336,58.85424941916091)]], [[(34.65216858026123,58.91672306881671),(37.19101041256995,58.68186423448108),(36.01226241899805,58.28688958537609),(37.16078610504247,58.04776787540811),(35.74024365125068,57.79092907387934),(37.009664567405046,57.499784781503735),(35.77046795877817,57.25537683364851),(36.979440259877556,57.07510745541089),(34.22902827487645,56.794777197297435),(36.7074214921302,56.210968525786996),(34.712617195316206,56.10998276812964),(36.55629995449277,55.63519693782703),(35.13575750070099,55.53270067649592),(36.43540272438284,55.34409504165558),(34.83351442542614,55.01619492319591),(35.61934642114075,54.49294870011772),(34.89396304048112,54.12264226523038),(35.37755196092087,53.046178687628185),(37.43280487278982,52.95523300597458),(35.92158949641559,53.80257986695776),(36.91899164482259,53.856094327816805),(36.01226241899805,54.75541714463799),(37.765272255592166,55.189110239786885),(36.828318722240134,55.44708256557195),(38.03729102333953,55.652253637168315),(36.64697287707522,55.941082594334574),(38.21863686850443,56.05939028508024),(36.37495410932787,56.64551287174558),(38.30930979108689,56.992876013526654),(37.16078610504247,57.25537683364851),(38.127963945921984,57.516020773674256),(37.43280487278982,57.710289827306724),(38.33953409861437,57.935626886818994),(37.40258056526235,58.31865112960426),(38.58132855883426,58.744648733419496),(37.31190764267989,59.02578062465136),(34.65216858026123,58.91672306881671)]], [[(38.52087994377928,59.11898412389468),(39.54850639971376,58.713270635642914),(38.369758406141855,58.28688958537609),(38.85334732658162,58.06375936407028),(38.33953409861437,57.710289827306724),(38.73245009647167,57.48354156434209),(38.21863686850443,57.271721400459285),(38.97424455669155,56.87744603722649),(37.463029180317314,56.5623320541159),(38.94402024916407,56.05939028508024),(38.18841256097694,55.856355210835915),(38.490655636251795,55.53270067649592),(37.795496563119656,55.39562234093384),(38.30930979108689,55.154587013355666),(36.7074214921302,54.65063295250911),(37.31190764267989,53.92734063371401),(36.979440259877556,53.58783775557231),(37.855945178174615,52.91880107773497),(39.57873070724124,52.69956490610895),(38.33953409861437,53.281741738901104),(40.00187101262603,53.35396273604752),(39.54850639971376,53.58783775557231),(40.24366547284591,53.58783775557231),(39.97164670509855,53.98069568468355),(40.60635716317572,54.03398248547225),(40.39478701048334,54.44025165268903),(39.54850639971376,54.56310590284329),(39.54850639971376,54.87732350170489),(40.39478701048334,54.87732350170489),(40.39478701048334,55.24083903654295),(39.82052516746112,55.2752875586599),(39.760076552406154,55.75443792473942),(40.57613285564824,55.78844000174894),(40.425011318010824,56.19415599955667),(39.82052516746112,56.07626182891758),(39.79030085993364,56.41214455508424),(40.48545993306579,56.495655446714636),(40.33433839542836,56.95993246553937),(39.79030085993364,56.992876013526654),(39.72985224487867,57.46729112028032),(40.33433839542836,57.46729112028032),(40.24366547284591,58.04776787540811),(39.63917932229622,58.04776787540811),(39.63917932229622,58.382088724871295),(40.33433839542836,58.382088724871295),(40.45523562553831,58.9011152358548),(38.52087994377928,59.11898412389468)]]])) format Null; +SELECT wkt(arraySort(polygonsSymDifferenceSpherical([[[(10., 10.), (10., 40.), (40., 40.), (40., 10.), (10., 10.)]], [[(-10., -10.), (-10., -40.), (-40., -40.), (-40., -10.), (-10., -10.)]]], [[[(-20., -20.), (-20., 20.), (20., 20.), (20., -20.), (-20., -20.)]]]))); select '-------- MultiPolygon with Polygon with Holes'; -select wkt(polygonsSymDifferenceSpherical([[[(33.473420586689336,58.85424941916091),(32.23422397806246,58.492830557036),(32.173775363007486,58.03176922751564),(31.508840597402823,57.499784781503735),(31.750635057622702,56.86092686957355),(31.508840597402823,55.941082594334574),(32.20399967053497,55.515591939372456),(31.84130798020516,54.998862226280465),(31.418167674820367,54.422670886434275),(32.47601843828233,53.83826377018255),(32.08310244042503,53.408048308050866),(33.171177511414484,52.82758702113742),(34.77306581037117,52.91880107773494),(34.77306581037117,53.784726518357985),(34.108131044766516,54.17574726780569),(35.07530888564602,54.59813930694554),(34.25925258240394,54.96417435716029),(35.01486027059106,55.361278263643584),(33.50364489421682,55.37845402950552),(32.7480372060297,55.90721384574556),(35.67979503619571,55.68634475630185),(32.83871012861215,56.311688992608396),(34.591719965206266,56.29492065473883),(35.7100193437232,56.311688992608396),(33.83611227701915,56.695333481003644),(32.95960735872209,56.9434497616887),(36.072711034053015,57.091531913901434),(33.171177511414484,57.33702717078384),(36.193608264162954,57.499784781503735),(33.23162612646945,57.77481561306047),(36.43540272438284,58.04776787540811),(33.62454212432676,58.27099811968307),(36.344729801800376,58.54018474404165),(33.83611227701915,58.68186423448108),(34.74284150284369,59.565911441555244),(33.473420586689336,58.85424941916091)]], [[(34.65216858026123,58.91672306881671),(37.19101041256995,58.68186423448108),(36.01226241899805,58.28688958537609),(37.16078610504247,58.04776787540811),(35.74024365125068,57.79092907387934),(37.009664567405046,57.499784781503735),(35.77046795877817,57.25537683364851),(36.979440259877556,57.07510745541089),(34.22902827487645,56.794777197297435),(36.7074214921302,56.210968525786996),(34.712617195316206,56.10998276812964),(36.55629995449277,55.63519693782703),(35.13575750070099,55.53270067649592),(36.43540272438284,55.34409504165558),(34.83351442542614,55.01619492319591),(35.61934642114075,54.49294870011772),(34.89396304048112,54.12264226523038),(35.37755196092087,53.046178687628185),(37.43280487278982,52.95523300597458),(35.92158949641559,53.80257986695776),(36.91899164482259,53.856094327816805),(36.01226241899805,54.75541714463799),(37.765272255592166,55.189110239786885),(36.828318722240134,55.44708256557195),(38.03729102333953,55.652253637168315),(36.64697287707522,55.941082594334574),(38.21863686850443,56.05939028508024),(36.37495410932787,56.64551287174558),(38.30930979108689,56.992876013526654),(37.16078610504247,57.25537683364851),(38.127963945921984,57.516020773674256),(37.43280487278982,57.710289827306724),(38.33953409861437,57.935626886818994),(37.40258056526235,58.31865112960426),(38.58132855883426,58.744648733419496),(37.31190764267989,59.02578062465136),(34.65216858026123,58.91672306881671)]], [[(38.52087994377928,59.11898412389468),(39.54850639971376,58.713270635642914),(38.369758406141855,58.28688958537609),(38.85334732658162,58.06375936407028),(38.33953409861437,57.710289827306724),(38.73245009647167,57.48354156434209),(38.21863686850443,57.271721400459285),(38.97424455669155,56.87744603722649),(37.463029180317314,56.5623320541159),(38.94402024916407,56.05939028508024),(38.18841256097694,55.856355210835915),(38.490655636251795,55.53270067649592),(37.795496563119656,55.39562234093384),(38.30930979108689,55.154587013355666),(36.7074214921302,54.65063295250911),(37.31190764267989,53.92734063371401),(36.979440259877556,53.58783775557231),(37.855945178174615,52.91880107773497),(39.57873070724124,52.69956490610895),(38.33953409861437,53.281741738901104),(40.00187101262603,53.35396273604752),(39.54850639971376,53.58783775557231),(40.24366547284591,53.58783775557231),(39.97164670509855,53.98069568468355),(40.60635716317572,54.03398248547225),(40.39478701048334,54.44025165268903),(39.54850639971376,54.56310590284329),(39.54850639971376,54.87732350170489),(40.39478701048334,54.87732350170489),(40.39478701048334,55.24083903654295),(39.82052516746112,55.2752875586599),(39.760076552406154,55.75443792473942),(40.57613285564824,55.78844000174894),(40.425011318010824,56.19415599955667),(39.82052516746112,56.07626182891758),(39.79030085993364,56.41214455508424),(40.48545993306579,56.495655446714636),(40.33433839542836,56.95993246553937),(39.79030085993364,56.992876013526654),(39.72985224487867,57.46729112028032),(40.33433839542836,57.46729112028032),(40.24366547284591,58.04776787540811),(39.63917932229622,58.04776787540811),(39.63917932229622,58.382088724871295),(40.33433839542836,58.382088724871295),(40.45523562553831,58.9011152358548),(38.52087994377928,59.11898412389468)]]], [[(24.367675781249993,61.45977057029751),(19.577636718749993,58.67693767258692),(19.577636718749993,57.492213666700735),(19.445800781249996,55.87531083569678),(19.445800781249996,54.085173420886775),(17.468261718749996,53.014783245859235),(20.017089843749993,51.563412328675895),(21.203613281249993,50.205033264943324),(26.125488281249993,50.40151532278236),(27.22412109374999,48.980216985374994),(32.80517578124999,49.525208341974405),(35.26611328124999,48.74894534343292),(36.93603515624999,49.66762782262194),(42.56103515625,48.77791275550183),(43.92333984374999,49.8096315635631),(47.17529296875,49.152969656170455),(49.28466796875,50.54136296522162),(48.05419921875,51.17934297928929),(51.39404296875,52.48278022207825),(50.64697265625,53.014783245859235),(52.88818359375,53.93021986394004),(51.65771484374999,54.29088164657006),(52.66845703125,55.825973254619015),(50.25146484375,56.145549500679095),(51.92138671875,57.914847767009206),(49.15283203125,58.17070248348605),(49.59228515625,60.086762746260064),(47.043457031249986,59.88893689676584),(43.57177734375,61.37567331572748),(42.64892578125,60.630101766266705),(36.89208984374999,62.000904713685856),(36.01318359374999,61.143235250840576),(31.398925781249993,62.02152819100766),(30.563964843749996,61.05828537037917),(26.872558593749993,61.71070595883174),(26.652832031249993,61.10078883158897),(24.367675781249993,61.45977057029751)], [(24.455566406249993,59.42272750081452),(21.203613281249993,58.49369382056807),(21.335449218749993,56.89700392127261),(21.599121093749993,55.92458580482949),(25.202636718749993,55.998380955359636),(28.850097656249993,57.06463027327854),(27.09228515625,57.844750992890994),(28.806152343749996,59.17592824927138),(26.257324218749993,59.17592824927138),(24.455566406249993,59.42272750081452)], [(35.13427734375,59.84481485969107),(31.970214843749993,58.97266715450152),(33.20068359374999,56.776808316568406),(36.67236328125,56.41390137600675),(39.08935546874999,57.25528054528888),(42.69287109374999,58.03137242177638),(40.89111328124999,59.26588062825809),(37.28759765625,58.722598828043374),(37.11181640624999,59.66774058164964),(35.13427734375,59.84481485969107)], [(29.157714843749993,55.75184939173528),(22.565917968749993,55.128649068488784),(22.565917968749993,53.54030739150019),(22.038574218749996,51.48138289610097),(26.257324218749993,51.42661449707484),(30.124511718749993,50.54136296522162),(32.18994140624999,51.17934297928929),(30.124511718749993,53.173119202640635),(35.09033203124999,53.173119202640635),(33.11279296875,54.085173420886775),(29.597167968749993,55.50374985927513),(29.157714843749993,55.75184939173528)], [(42.82470703125,56.58369172128337),(36.584472656249986,55.329144408405085),(37.99072265625,53.592504809039355),(34.95849609374999,51.48138289610097),(36.54052734374999,50.40151532278236),(39.66064453124999,50.289339253291786),(39.79248046875,52.13348804077148),(41.77001953125,50.68079714532166),(44.49462890624999,51.97134580885171),(47.30712890624999,52.509534770327264),(44.05517578125,53.54030739150019),(46.60400390625,53.696706475303245),(47.61474609375,55.40406982700608),(45.37353515625,55.40406982700608),(42.82470703125,56.58369172128337)]])) format TSV; +select wkt(polygonsSymDifferenceSpherical([[[(33.473420586689336,58.85424941916091),(32.23422397806246,58.492830557036),(32.173775363007486,58.03176922751564),(31.508840597402823,57.499784781503735),(31.750635057622702,56.86092686957355),(31.508840597402823,55.941082594334574),(32.20399967053497,55.515591939372456),(31.84130798020516,54.998862226280465),(31.418167674820367,54.422670886434275),(32.47601843828233,53.83826377018255),(32.08310244042503,53.408048308050866),(33.171177511414484,52.82758702113742),(34.77306581037117,52.91880107773494),(34.77306581037117,53.784726518357985),(34.108131044766516,54.17574726780569),(35.07530888564602,54.59813930694554),(34.25925258240394,54.96417435716029),(35.01486027059106,55.361278263643584),(33.50364489421682,55.37845402950552),(32.7480372060297,55.90721384574556),(35.67979503619571,55.68634475630185),(32.83871012861215,56.311688992608396),(34.591719965206266,56.29492065473883),(35.7100193437232,56.311688992608396),(33.83611227701915,56.695333481003644),(32.95960735872209,56.9434497616887),(36.072711034053015,57.091531913901434),(33.171177511414484,57.33702717078384),(36.193608264162954,57.499784781503735),(33.23162612646945,57.77481561306047),(36.43540272438284,58.04776787540811),(33.62454212432676,58.27099811968307),(36.344729801800376,58.54018474404165),(33.83611227701915,58.68186423448108),(34.74284150284369,59.565911441555244),(33.473420586689336,58.85424941916091)]], [[(34.65216858026123,58.91672306881671),(37.19101041256995,58.68186423448108),(36.01226241899805,58.28688958537609),(37.16078610504247,58.04776787540811),(35.74024365125068,57.79092907387934),(37.009664567405046,57.499784781503735),(35.77046795877817,57.25537683364851),(36.979440259877556,57.07510745541089),(34.22902827487645,56.794777197297435),(36.7074214921302,56.210968525786996),(34.712617195316206,56.10998276812964),(36.55629995449277,55.63519693782703),(35.13575750070099,55.53270067649592),(36.43540272438284,55.34409504165558),(34.83351442542614,55.01619492319591),(35.61934642114075,54.49294870011772),(34.89396304048112,54.12264226523038),(35.37755196092087,53.046178687628185),(37.43280487278982,52.95523300597458),(35.92158949641559,53.80257986695776),(36.91899164482259,53.856094327816805),(36.01226241899805,54.75541714463799),(37.765272255592166,55.189110239786885),(36.828318722240134,55.44708256557195),(38.03729102333953,55.652253637168315),(36.64697287707522,55.941082594334574),(38.21863686850443,56.05939028508024),(36.37495410932787,56.64551287174558),(38.30930979108689,56.992876013526654),(37.16078610504247,57.25537683364851),(38.127963945921984,57.516020773674256),(37.43280487278982,57.710289827306724),(38.33953409861437,57.935626886818994),(37.40258056526235,58.31865112960426),(38.58132855883426,58.744648733419496),(37.31190764267989,59.02578062465136),(34.65216858026123,58.91672306881671)]], [[(38.52087994377928,59.11898412389468),(39.54850639971376,58.713270635642914),(38.369758406141855,58.28688958537609),(38.85334732658162,58.06375936407028),(38.33953409861437,57.710289827306724),(38.73245009647167,57.48354156434209),(38.21863686850443,57.271721400459285),(38.97424455669155,56.87744603722649),(37.463029180317314,56.5623320541159),(38.94402024916407,56.05939028508024),(38.18841256097694,55.856355210835915),(38.490655636251795,55.53270067649592),(37.795496563119656,55.39562234093384),(38.30930979108689,55.154587013355666),(36.7074214921302,54.65063295250911),(37.31190764267989,53.92734063371401),(36.979440259877556,53.58783775557231),(37.855945178174615,52.91880107773497),(39.57873070724124,52.69956490610895),(38.33953409861437,53.281741738901104),(40.00187101262603,53.35396273604752),(39.54850639971376,53.58783775557231),(40.24366547284591,53.58783775557231),(39.97164670509855,53.98069568468355),(40.60635716317572,54.03398248547225),(40.39478701048334,54.44025165268903),(39.54850639971376,54.56310590284329),(39.54850639971376,54.87732350170489),(40.39478701048334,54.87732350170489),(40.39478701048334,55.24083903654295),(39.82052516746112,55.2752875586599),(39.760076552406154,55.75443792473942),(40.57613285564824,55.78844000174894),(40.425011318010824,56.19415599955667),(39.82052516746112,56.07626182891758),(39.79030085993364,56.41214455508424),(40.48545993306579,56.495655446714636),(40.33433839542836,56.95993246553937),(39.79030085993364,56.992876013526654),(39.72985224487867,57.46729112028032),(40.33433839542836,57.46729112028032),(40.24366547284591,58.04776787540811),(39.63917932229622,58.04776787540811),(39.63917932229622,58.382088724871295),(40.33433839542836,58.382088724871295),(40.45523562553831,58.9011152358548),(38.52087994377928,59.11898412389468)]]], [[(24.367675781249993,61.45977057029751),(19.577636718749993,58.67693767258692),(19.577636718749993,57.492213666700735),(19.445800781249996,55.87531083569678),(19.445800781249996,54.085173420886775),(17.468261718749996,53.014783245859235),(20.017089843749993,51.563412328675895),(21.203613281249993,50.205033264943324),(26.125488281249993,50.40151532278236),(27.22412109374999,48.980216985374994),(32.80517578124999,49.525208341974405),(35.26611328124999,48.74894534343292),(36.93603515624999,49.66762782262194),(42.56103515625,48.77791275550183),(43.92333984374999,49.8096315635631),(47.17529296875,49.152969656170455),(49.28466796875,50.54136296522162),(48.05419921875,51.17934297928929),(51.39404296875,52.48278022207825),(50.64697265625,53.014783245859235),(52.88818359375,53.93021986394004),(51.65771484374999,54.29088164657006),(52.66845703125,55.825973254619015),(50.25146484375,56.145549500679095),(51.92138671875,57.914847767009206),(49.15283203125,58.17070248348605),(49.59228515625,60.086762746260064),(47.043457031249986,59.88893689676584),(43.57177734375,61.37567331572748),(42.64892578125,60.630101766266705),(36.89208984374999,62.000904713685856),(36.01318359374999,61.143235250840576),(31.398925781249993,62.02152819100766),(30.563964843749996,61.05828537037917),(26.872558593749993,61.71070595883174),(26.652832031249993,61.10078883158897),(24.367675781249993,61.45977057029751)], [(24.455566406249993,59.42272750081452),(21.203613281249993,58.49369382056807),(21.335449218749993,56.89700392127261),(21.599121093749993,55.92458580482949),(25.202636718749993,55.998380955359636),(28.850097656249993,57.06463027327854),(27.09228515625,57.844750992890994),(28.806152343749996,59.17592824927138),(26.257324218749993,59.17592824927138),(24.455566406249993,59.42272750081452)], [(35.13427734375,59.84481485969107),(31.970214843749993,58.97266715450152),(33.20068359374999,56.776808316568406),(36.67236328125,56.41390137600675),(39.08935546874999,57.25528054528888),(42.69287109374999,58.03137242177638),(40.89111328124999,59.26588062825809),(37.28759765625,58.722598828043374),(37.11181640624999,59.66774058164964),(35.13427734375,59.84481485969107)], [(29.157714843749993,55.75184939173528),(22.565917968749993,55.128649068488784),(22.565917968749993,53.54030739150019),(22.038574218749996,51.48138289610097),(26.257324218749993,51.42661449707484),(30.124511718749993,50.54136296522162),(32.18994140624999,51.17934297928929),(30.124511718749993,53.173119202640635),(35.09033203124999,53.173119202640635),(33.11279296875,54.085173420886775),(29.597167968749993,55.50374985927513),(29.157714843749993,55.75184939173528)], [(42.82470703125,56.58369172128337),(36.584472656249986,55.329144408405085),(37.99072265625,53.592504809039355),(34.95849609374999,51.48138289610097),(36.54052734374999,50.40151532278236),(39.66064453124999,50.289339253291786),(39.79248046875,52.13348804077148),(41.77001953125,50.68079714532166),(44.49462890624999,51.97134580885171),(47.30712890624999,52.509534770327264),(44.05517578125,53.54030739150019),(46.60400390625,53.696706475303245),(47.61474609375,55.40406982700608),(45.37353515625,55.40406982700608),(42.82470703125,56.58369172128337)]])) format Null; +SELECT wkt(arraySort(polygonsSymDifferenceSpherical([[(50.,50.),(50.,-50.),(-50.,-50.),(-50.,50.),(50.,50.)],[(10.,10.),(10.,40.),(40.,40.),(40.,10.),(10.,10.)],[(-10.,-10.),(-10.,-40.),(-40.,-40.),(-40.,-10.),(-10.,-10.)]], [[[(-20.,-20.),(-20.,20.),(20.,20.),(20.,-20.),(-20.,-20.)]]]))); select '-------- Polygon with Polygon with Holes'; -select wkt(polygonsSymDifferenceSpherical([[(29.453587685533865,59.779570356240356),(29.393139070478895,52.276266797422124),(40.636581470703206,59.38168915000267),(41.21084331372543,59.103467777099866),(29.786055068336193,52.146627480315004),(31.23682182965546,52.16517054781818),(41.69443223416517,58.85424941916091),(42.51048853740727,58.47703162291134),(32.59691566839227,52.22075341251539),(34.289476889931414,52.22075341251539),(43.02430176537451,58.07974369546071),(43.02430176537451,57.25537683364851),(35.468224883503325,52.2022335126388),(37.16078610504247,52.23926559241349),(43.02430176537451,56.26136189644947),(43.02430176537451,55.326904361850836),(38.33953409861437,52.16517054781818),(40.09254393520848,52.16517054781818),(44.4146199116388,55.3097062225408),(44.47506852669377,59.80998197603594),(39.72985224487867,59.931351417569715),(30.23941968124846,53.67744677450975),(30.20919537372098,54.63314259659509),(38.73245009647167,59.94649146557819),(37.2816833351524,59.97675082987618),(30.23941968124846,55.2752875586599),(30.33009260383092,56.19415599955667),(36.28428118674541,59.96162460231375),(34.863738732953635,59.97675082987618),(30.178971066193498,56.97640788219866),(30.178971066193498,57.91957806959033),(33.65476643185424,59.94649146557819),(32.32489690064491,59.94649146557819),(30.481214141468342,58.85424941916091),(30.571887064050795,59.99187015036608),(29.453587685533865,59.779570356240356)]], [[(24.367675781249993,61.45977057029751),(19.577636718749993,58.67693767258692),(19.577636718749993,57.492213666700735),(19.445800781249996,55.87531083569678),(19.445800781249996,54.085173420886775),(17.468261718749996,53.014783245859235),(20.017089843749993,51.563412328675895),(21.203613281249993,50.205033264943324),(26.125488281249993,50.40151532278236),(27.22412109374999,48.980216985374994),(32.80517578124999,49.525208341974405),(35.26611328124999,48.74894534343292),(36.93603515624999,49.66762782262194),(42.56103515625,48.77791275550183),(43.92333984374999,49.8096315635631),(47.17529296875,49.152969656170455),(49.28466796875,50.54136296522162),(48.05419921875,51.17934297928929),(51.39404296875,52.48278022207825),(50.64697265625,53.014783245859235),(52.88818359375,53.93021986394004),(51.65771484374999,54.29088164657006),(52.66845703125,55.825973254619015),(50.25146484375,56.145549500679095),(51.92138671875,57.914847767009206),(49.15283203125,58.17070248348605),(49.59228515625,60.086762746260064),(47.043457031249986,59.88893689676584),(43.57177734375,61.37567331572748),(42.64892578125,60.630101766266705),(36.89208984374999,62.000904713685856),(36.01318359374999,61.143235250840576),(31.398925781249993,62.02152819100766),(30.563964843749996,61.05828537037917),(26.872558593749993,61.71070595883174),(26.652832031249993,61.10078883158897),(24.367675781249993,61.45977057029751)], [(24.455566406249993,59.42272750081452),(21.203613281249993,58.49369382056807),(21.335449218749993,56.89700392127261),(21.599121093749993,55.92458580482949),(25.202636718749993,55.998380955359636),(28.850097656249993,57.06463027327854),(27.09228515625,57.844750992890994),(28.806152343749996,59.17592824927138),(26.257324218749993,59.17592824927138),(24.455566406249993,59.42272750081452)], [(35.13427734375,59.84481485969107),(31.970214843749993,58.97266715450152),(33.20068359374999,56.776808316568406),(36.67236328125,56.41390137600675),(39.08935546874999,57.25528054528888),(42.69287109374999,58.03137242177638),(40.89111328124999,59.26588062825809),(37.28759765625,58.722598828043374),(37.11181640624999,59.66774058164964),(35.13427734375,59.84481485969107)], [(29.157714843749993,55.75184939173528),(22.565917968749993,55.128649068488784),(22.565917968749993,53.54030739150019),(22.038574218749996,51.48138289610097),(26.257324218749993,51.42661449707484),(30.124511718749993,50.54136296522162),(32.18994140624999,51.17934297928929),(30.124511718749993,53.173119202640635),(35.09033203124999,53.173119202640635),(33.11279296875,54.085173420886775),(29.597167968749993,55.50374985927513),(29.157714843749993,55.75184939173528)], [(42.82470703125,56.58369172128337),(36.584472656249986,55.329144408405085),(37.99072265625,53.592504809039355),(34.95849609374999,51.48138289610097),(36.54052734374999,50.40151532278236),(39.66064453124999,50.289339253291786),(39.79248046875,52.13348804077148),(41.77001953125,50.68079714532166),(44.49462890624999,51.97134580885171),(47.30712890624999,52.509534770327264),(44.05517578125,53.54030739150019),(46.60400390625,53.696706475303245),(47.61474609375,55.40406982700608),(45.37353515625,55.40406982700608),(42.82470703125,56.58369172128337)]])) format TSV; +select wkt(polygonsSymDifferenceSpherical([[(29.453587685533865,59.779570356240356),(29.393139070478895,52.276266797422124),(40.636581470703206,59.38168915000267),(41.21084331372543,59.103467777099866),(29.786055068336193,52.146627480315004),(31.23682182965546,52.16517054781818),(41.69443223416517,58.85424941916091),(42.51048853740727,58.47703162291134),(32.59691566839227,52.22075341251539),(34.289476889931414,52.22075341251539),(43.02430176537451,58.07974369546071),(43.02430176537451,57.25537683364851),(35.468224883503325,52.2022335126388),(37.16078610504247,52.23926559241349),(43.02430176537451,56.26136189644947),(43.02430176537451,55.326904361850836),(38.33953409861437,52.16517054781818),(40.09254393520848,52.16517054781818),(44.4146199116388,55.3097062225408),(44.47506852669377,59.80998197603594),(39.72985224487867,59.931351417569715),(30.23941968124846,53.67744677450975),(30.20919537372098,54.63314259659509),(38.73245009647167,59.94649146557819),(37.2816833351524,59.97675082987618),(30.23941968124846,55.2752875586599),(30.33009260383092,56.19415599955667),(36.28428118674541,59.96162460231375),(34.863738732953635,59.97675082987618),(30.178971066193498,56.97640788219866),(30.178971066193498,57.91957806959033),(33.65476643185424,59.94649146557819),(32.32489690064491,59.94649146557819),(30.481214141468342,58.85424941916091),(30.571887064050795,59.99187015036608),(29.453587685533865,59.779570356240356)]], [[(24.367675781249993,61.45977057029751),(19.577636718749993,58.67693767258692),(19.577636718749993,57.492213666700735),(19.445800781249996,55.87531083569678),(19.445800781249996,54.085173420886775),(17.468261718749996,53.014783245859235),(20.017089843749993,51.563412328675895),(21.203613281249993,50.205033264943324),(26.125488281249993,50.40151532278236),(27.22412109374999,48.980216985374994),(32.80517578124999,49.525208341974405),(35.26611328124999,48.74894534343292),(36.93603515624999,49.66762782262194),(42.56103515625,48.77791275550183),(43.92333984374999,49.8096315635631),(47.17529296875,49.152969656170455),(49.28466796875,50.54136296522162),(48.05419921875,51.17934297928929),(51.39404296875,52.48278022207825),(50.64697265625,53.014783245859235),(52.88818359375,53.93021986394004),(51.65771484374999,54.29088164657006),(52.66845703125,55.825973254619015),(50.25146484375,56.145549500679095),(51.92138671875,57.914847767009206),(49.15283203125,58.17070248348605),(49.59228515625,60.086762746260064),(47.043457031249986,59.88893689676584),(43.57177734375,61.37567331572748),(42.64892578125,60.630101766266705),(36.89208984374999,62.000904713685856),(36.01318359374999,61.143235250840576),(31.398925781249993,62.02152819100766),(30.563964843749996,61.05828537037917),(26.872558593749993,61.71070595883174),(26.652832031249993,61.10078883158897),(24.367675781249993,61.45977057029751)], [(24.455566406249993,59.42272750081452),(21.203613281249993,58.49369382056807),(21.335449218749993,56.89700392127261),(21.599121093749993,55.92458580482949),(25.202636718749993,55.998380955359636),(28.850097656249993,57.06463027327854),(27.09228515625,57.844750992890994),(28.806152343749996,59.17592824927138),(26.257324218749993,59.17592824927138),(24.455566406249993,59.42272750081452)], [(35.13427734375,59.84481485969107),(31.970214843749993,58.97266715450152),(33.20068359374999,56.776808316568406),(36.67236328125,56.41390137600675),(39.08935546874999,57.25528054528888),(42.69287109374999,58.03137242177638),(40.89111328124999,59.26588062825809),(37.28759765625,58.722598828043374),(37.11181640624999,59.66774058164964),(35.13427734375,59.84481485969107)], [(29.157714843749993,55.75184939173528),(22.565917968749993,55.128649068488784),(22.565917968749993,53.54030739150019),(22.038574218749996,51.48138289610097),(26.257324218749993,51.42661449707484),(30.124511718749993,50.54136296522162),(32.18994140624999,51.17934297928929),(30.124511718749993,53.173119202640635),(35.09033203124999,53.173119202640635),(33.11279296875,54.085173420886775),(29.597167968749993,55.50374985927513),(29.157714843749993,55.75184939173528)], [(42.82470703125,56.58369172128337),(36.584472656249986,55.329144408405085),(37.99072265625,53.592504809039355),(34.95849609374999,51.48138289610097),(36.54052734374999,50.40151532278236),(39.66064453124999,50.289339253291786),(39.79248046875,52.13348804077148),(41.77001953125,50.68079714532166),(44.49462890624999,51.97134580885171),(47.30712890624999,52.509534770327264),(44.05517578125,53.54030739150019),(46.60400390625,53.696706475303245),(47.61474609375,55.40406982700608),(45.37353515625,55.40406982700608),(42.82470703125,56.58369172128337)]])) format Null; +SELECT wkt(arraySort(polygonsSymDifferenceSpherical([[(50., 50.), (50., -50.), (-50., -50.), (-50., 50.), (50., 50.)], [(10., 10.), (10., 40.), (40., 40.), (40., 10.), (10., 10.)], [(-10., -10.), (-10., -40.), (-40., -40.), (-40., -10.), (-10., -10.)]], [[(-20., -20.), (-20., 20.), (20., 20.), (20., -20.), (-20., -20.)]]))); diff --git a/tests/queries/0_stateless/01308_polygon_area.reference b/tests/queries/0_stateless/01308_polygon_area.reference index 56d0c4ef174..bc80145f180 100644 --- a/tests/queries/0_stateless/01308_polygon_area.reference +++ b/tests/queries/0_stateless/01308_polygon_area.reference @@ -1,2 +1,2 @@ 25 -9.387703638370358e-8 +9.387704e-8 diff --git a/tests/queries/0_stateless/01308_polygon_area.sql b/tests/queries/0_stateless/01308_polygon_area.sql index e3a44ad7d51..494d0de4570 100644 --- a/tests/queries/0_stateless/01308_polygon_area.sql +++ b/tests/queries/0_stateless/01308_polygon_area.sql @@ -1,3 +1,3 @@ select polygonAreaCartesian([[[(0., 0.), (0., 5.), (5., 5.), (5., 0.)]]]); -select polygonAreaSpherical([[[(4.346693, 50.858306), (4.367945, 50.852455), (4.366227, 50.840809), (4.344961, 50.833264), (4.338074, 50.848677), (4.346693, 50.858306)]]]); +select round(polygonAreaSpherical([[[(4.346693, 50.858306), (4.367945, 50.852455), (4.366227, 50.840809), (4.344961, 50.833264), (4.338074, 50.848677), (4.346693, 50.858306)]]]), 14); SELECT polygonAreaCartesian([]); -- { serverError 36 } diff --git a/tests/queries/0_stateless/01508_explain_header.reference b/tests/queries/0_stateless/01508_explain_header.reference index 4bfbe1c818b..7510e67c643 100644 --- a/tests/queries/0_stateless/01508_explain_header.reference +++ b/tests/queries/0_stateless/01508_explain_header.reference @@ -1,6 +1,4 @@ Expression ((Projection + Before ORDER BY)) Header: x UInt8 - SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemOne) Header: dummy UInt8 - ReadFromStorage (SystemOne) - Header: dummy UInt8 diff --git a/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference b/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference index b6b8b04907c..1d76d9bd631 100644 --- a/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference +++ b/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference @@ -7,12 +7,11 @@ ExpressionTransform × 3 AggregatingInOrderTransform × 3 (Expression) ExpressionTransform × 3 - (SettingQuotaAndLimits) - (ReadFromMergeTree) - ExpressionTransform × 4 - MergeTreeInOrder 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - MergeTreeInOrder × 2 0 → 1 - ExpressionTransform - MergeTreeInOrder 0 → 1 + (ReadFromMergeTree) + ExpressionTransform × 4 + MergeTreeInOrder 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + MergeTreeInOrder × 2 0 → 1 + ExpressionTransform + MergeTreeInOrder 0 → 1 diff --git a/tests/queries/0_stateless/01553_settings_early_apply.reference b/tests/queries/0_stateless/01553_settings_early_apply.reference index 3dad208be5d..e4e4738c6ab 100644 --- a/tests/queries/0_stateless/01553_settings_early_apply.reference +++ b/tests/queries/0_stateless/01553_settings_early_apply.reference @@ -1,9 +1,6 @@ number 0 - number - - 1 0 1 2 diff --git a/tests/queries/0_stateless/01556_explain_select_with_union_query.reference b/tests/queries/0_stateless/01556_explain_select_with_union_query.reference index 40c99db429d..c18e6b70b0d 100644 --- a/tests/queries/0_stateless/01556_explain_select_with_union_query.reference +++ b/tests/queries/0_stateless/01556_explain_select_with_union_query.reference @@ -1,252 +1,180 @@ Union Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemOne) +Union + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) +Distinct + Union + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) +Distinct + Union + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) +Distinct + Union + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) +Distinct + Union + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) ReadFromStorage (SystemOne) Union Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) -Distinct - Union - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) -Distinct - Union - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) -Distinct - Union - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) -Distinct - Union - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) -Union - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Distinct Union Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Distinct Union Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) -Distinct - Union - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) + Expression ((Projection + Before ORDER BY)) ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) ReadFromStorage (SystemOne) Distinct Union Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) -Distinct - Union - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) -Union - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) + Expression ((Projection + Before ORDER BY)) ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) +Distinct + Union + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) +Distinct + Union + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) ReadFromStorage (SystemOne) Union Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemOne) +Union + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) +Union + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) +Distinct + Union + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) +Distinct + Union + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) +Distinct + Union + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) + ReadFromStorage (SystemOne) + Expression ((Projection + Before ORDER BY)) ReadFromStorage (SystemOne) Union Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) -Distinct - Union - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) -Distinct - Union - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) -Distinct - Union - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) -Union + ReadFromStorage (SystemOne) Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) - Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Distinct Union Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Union Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Union Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Union Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) Expression ((Projection + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemOne) + ReadFromStorage (SystemOne) diff --git a/tests/queries/0_stateless/01558_ttest.reference b/tests/queries/0_stateless/01558_ttest.reference index 5cbc86038c3..af4a33a67c7 100644 --- a/tests/queries/0_stateless/01558_ttest.reference +++ b/tests/queries/0_stateless/01558_ttest.reference @@ -1,16 +1,16 @@ 0.021378001462867 -0.0213780014628671 +0.021378 0.090773324285671 -0.0907733242891952 +0.090773 0.00339907162713746 -0.0033990715715539 +0.003399 -0.5028215369186904 0.6152361677168877 --0.5028215369187079 0.6152361677171103 +-0.502822 0.615236 14.971190998235835 5.898143508382202e-44 -14.971190998235837 0 -7.650530175770567 9.960200184229425 +14.971191 0 +7.65053 9.9602 -2.610898982580138 0.00916587538237954 --2.610898982580134 0.0091658753823834 +-2.610899 0.009166 -28.740781574102936 7.667329672103986e-133 --28.74078157410298 0 --9.625938422388245 -8.395483817611758 +-28.740782 0 +-9.625938 -8.395484 diff --git a/tests/queries/0_stateless/01558_ttest.sql b/tests/queries/0_stateless/01558_ttest.sql index 94a48c38fcc..9eab38a552d 100644 --- a/tests/queries/0_stateless/01558_ttest.sql +++ b/tests/queries/0_stateless/01558_ttest.sql @@ -3,34 +3,34 @@ CREATE TABLE welch_ttest (left Float64, right UInt8) ENGINE = Memory; INSERT INTO welch_ttest VALUES (27.5, 0), (21.0, 0), (19.0, 0), (23.6, 0), (17.0, 0), (17.9, 0), (16.9, 0), (20.1, 0), (21.9, 0), (22.6, 0), (23.1, 0), (19.6, 0), (19.0, 0), (21.7, 0), (21.4, 0), (27.1, 1), (22.0, 1), (20.8, 1), (23.4, 1), (23.4, 1), (23.5, 1), (25.8, 1), (22.0, 1), (24.8, 1), (20.2, 1), (21.9, 1), (22.1, 1), (22.9, 1), (20.5, 1), (24.4, 1); SELECT '0.021378001462867'; -SELECT roundBankers(welchTTest(left, right).2, 16) from welch_ttest; +SELECT roundBankers(welchTTest(left, right).2, 6) from welch_ttest; DROP TABLE IF EXISTS welch_ttest; CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; INSERT INTO welch_ttest VALUES (30.02, 0), (29.99, 0), (30.11, 0), (29.97, 0), (30.01, 0), (29.99, 0), (29.89, 1), (29.93, 1), (29.72, 1), (29.98, 1), (30.02, 1), (29.98, 1); SELECT '0.090773324285671'; -SELECT roundBankers(welchTTest(left, right).2, 16) from welch_ttest; +SELECT roundBankers(welchTTest(left, right).2, 6) from welch_ttest; DROP TABLE IF EXISTS welch_ttest; CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; INSERT INTO welch_ttest VALUES (0.010268, 0), (0.000167, 0), (0.000167, 0), (0.159258, 1), (0.136278, 1), (0.122389, 1); SELECT '0.00339907162713746'; -SELECT roundBankers(welchTTest(left, right).2, 16) from welch_ttest; +SELECT roundBankers(welchTTest(left, right).2, 6) from welch_ttest; DROP TABLE IF EXISTS welch_ttest; CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; INSERT INTO welch_ttest VALUES (14.72789, 0), (9.61661, 0), (13.57615, 0), (3.98392, 0), (11.98889, 0), (10.99422, 0), (5.44792, 0), (20.29346, 0), (7.05926, 0), (9.22732, 0), (12.06847, 0), (13.52612, 0), (8.24597, 0), (9.35245, 0), (10.12297, 0), (15.80624, 0), (13.68613, 0), (10.72729, 0), (5.62078, 0), (6.12229, 0), (6.03801, 0), (8.95585, 0), (24.04613, 0), (9.04757, 0), (2.68263, 0), (15.43935, 0), (2.89423, 0), (4.01423, 0), (4.30568, 0), (11.99948, 0), (8.40574, 0), (10.86642, 0), (9.4266, 0), (-8.12752, 0), (7.91634, 0), (7.3967, 0), (2.26431, 0), (14.20118, 0), (6.68233, 0), (15.46221, 0), (7.88467, 0), (11.20011, 0), (8.92027, 0), (10.27926, 0), (5.14395, 0), (5.62178, 0), (12.84383, 0), (9.98009, 0), (-0.69789, 0), (11.41386, 0), (7.76863, 0), (7.21743, 0), (1.81176, 0), (9.43762, 0), (19.22117, 0), (2.97128, 0), (14.32851, 0), (7.54959, 0), (3.81545, 0), (10.1281, 0), (2.48596, 0), (10.0461, 0), (3.59714, 0), (9.73522, 0), (18.8077, 0), (3.15148, 0), (12.26062, 0), (5.66707, 0), (6.58623, 0), (17.30902, 0), (9.91391, 0), (5.36946, 0), (15.73637, 0), (16.96281, 0), (11.54063, 0), (18.37358, 0), (11.38255, 0), (10.53256, 0), (8.08833, 0), (16.27556, 0), (2.42969, 0), (9.56127, 0), (7.32998, 0), (9.19511, 0), (9.66903, 0), (4.15029, 0), (8.83511, 0), (14.60617, 0), (14.06143, 0), (5.39556, 0), (10.11871, 0), (10.56619, 0), (14.4462, 0), (10.42106, 0), (7.75551, 0), (11.00418, 0), (4.47226, 0), (16.35461, 0), (18.55174, 0), (11.82044, 0), (7.39454, 0), (11.27767, 0), (6.83827, 0), (7.76858, 0), (15.97614, 0), (14.53781, 0), (12.99546, 0), (16.91151, 0), (9.65012, 0), (14.25487, 0), (14.03618, 0), (2.57382, 0), (2.50779, 0), (14.24787, 0), (13.34666, 0), (7.31102, 0), (10.22981, 0), (17.4435, 0), (21.2074, 0), (6.64191, 0), (18.7086, 0), (14.78686, 0), (9.85287, 0), (4.48263, 0), (14.17469, 0), (14.4342, 0), (19.2481, 0), (3.47165, 0), (8.28712, 0), (8.81657, 0), (0.92319, 0), (20.41106, 0), (6.76127, 0), (22.00242, 0), (8.66129, 0), (10.9929, 0), (17.95494, 0), (17.20996, 0), (12.18888, 0), (12.14257, 0), (15.81243, 0), (4.43362, 0), (1.17567, 0), (15.60881, 0), (9.34833, 0), (6.33513, 0), (-0.83095, 0), (12.43268, 0), (6.63207, 0), (11.96877, 0), (14.81029, 0), (21.84876, 0), (3.75896, 0), (6.91307, 0), (13.73015, 0), (8.63753, 0), (15.71679, 0), (1.74565, 0), (9.16895, 0), (5.70685, 0), (5.00117, 0), (13.06888, 0), (7.51204, 0), (15.34885, 0), (5.20264, 0), (8.59043, 0), (6.45619, 0), (14.61979, 0), (11.7075, 0), (14.04901, 0), (4.20525, 0), (15.1733, 0), (3.12934, 0), (8.08049, 0), (15.41273, 0), (16.90751, 0), (5.86893, 0), (7.1086, 0), (4.418, 0), (12.0614, 0), (7.07887, 0), (3.61585, 0), (11.73001, 0), (10.80449, 0), (8.40311, 0), (9.91276, 0), (16.4164, 0), (5.25034, 0), (15.20283, 0), (10.42909, 0), (9.53888, 0), (14.68939, 0), (6.60007, 0), (18.31058, 0), (7.01885, 0), (18.71631, 0), (10.50002, 0), (10.7517, 0), (4.23224, 0), (2.28924, 0), (8.56059, 0), (8.25095, 0), (9.15673, 0), (13.28409, 0), (8.4513, 0), (2.83911, 0), (2.79676, 0), (9.11055, 0), (7.18529, 0), (-4.1258, 0), (5.28306, 0), (6.82757, 0), (10.89035, 0), (5.24822, 0), (11.935, 0), (6.45675, 0), (10.18088, 0), (4.9932, 0), (18.09939, 0), (8.11738, 0), (5.37883, 0), (10.50339, 0), (16.64093, 0), (14.77263, 0), (13.71385, 0), (6.98746, 0), (10.74635, 0), (5.49432, 0), (13.46078, 0), (10.67565, 0), (9.0291, 0), (11.51417, 0), (13.07118, 0), (9.5049, 0), (8.50611, 0), (6.47606, 0), (13.06526, 0), (19.08658, 0), (9.49741, 0), (10.60865, 0), (2.28996, 0), (8.12846, 0), (5.62241, 0), (4.07712, 0), (17.98526, 0), (9.466, 0), (11.38904, 0), (5.91826, 0), (1.52059, 0), (18.79161, 0), (18.20669, 0), (-1.67829, 0), (18.01586, 0), (16.31577, 0), (7.88281, 0), (8.46179, 0), (10.31113, 0), (14.88377, 0), (1.31835, 0), (2.53176, 0), (9.48625, 0), (3.97936, 0), (11.52319, 0), (13.24178, 0), (7.58739, 0), (10.00959, 0), (9.73361, 0), (8.35716, 0), (1.65491, 0), (11.11521, 0), (6.08355, 0), (10.04582, 0), (11.58237, 0), (16.40249, 0), (1.9691, 0), (13.22776, 0), (2.67059, 0), (9.83651, 0), (2.12539, 0), (9.27114, 0), (9.0699, 0), (2.78179, 0), (12.49311, 0), (12.97662, 0), (15.06359, 0), (16.91565, 0), (5.92011, 0), (5.81304, 0), (8.46425, 0), (9.48705, 0), (4.68191, 0), (5.70028, 0), (-0.78798, 0), (10.03442, 0), (15.45433, 0), (9.43845, 0), (3.05825, 0), (6.92126, 0), (14.05905, 0), (19.71579, 0), (15.0131, 0), (4.50386, 0), (1.31061, 0), (10.81197, 0), (14.32942, 0), (9.26469, 0), (7.27679, 0), (22.69295, 0), (12.03763, 0), (7.34876, 0), (16.60689, 0), (7.48786, 0), (15.78602, 0), (17.21048, 0), (13.93482, 0), (9.69911, 0), (12.24315, 0), (10.58131, 0), (19.57006, 0), (9.8856, 0), (11.70302, 0), (7.89864, 0), (12.24831, 0), (16.93707, 0), (9.65467, 0), (4.221, 0), (15.45229, 0), (12.83088, 0), (7.58313, 0), (12.895, 0), (10.02471, 0), (13.36059, 0), (5.07864, 0), (9.72017, 0), (11.05809, 0), (15.28528, 0), (13.99834, 0), (19.26989, 0), (9.41846, 0), (11.65425, 0), (8.49638, 0), (6.38592, 0), (-4.69837, 0), (12.22061, 0), (9.41331, 0), (13.2075, 0), (12.97005, 0), (11.44352, 0), (9.79805, 0), (6.93116, 0), (10.07691, 0), (22.05892, 0), (7.80353, 0), (-2.17276, 0), (0.61509, 0), (8.35842, 0), (17.77108, 0), (14.70841, 0), (1.27992, 0), (15.62699, 0), (9.32914, 0), (15.41866, 0), (10.82009, 0), (3.29902, 0), (9.21998, 0), (7.93845, 0), (10.33344, 0), (12.06399, 0), (5.5308, 0), (8.38727, 0), (18.11104, 0), (8.86565, 0), (19.41825, 0), (9.52376, 0), (3.94552, 0), (9.37587, 0), (15.44954, 0), (15.90527, 0), (13.18927, 0), (7.01646, 0), (9.06005, 0), (9.06431, 0), (5.76006, 0), (9.18705, 0), (-3.48446, 0), (15.89817, 0), (12.94719, 0), (23.69426, 0), (17.47755, 0), (15.61528, 0), (0.54832, 0), (14.32916, 0), (9.55305, 0), (13.79891, 0), (0.82544, 0), (13.34875, 0), (9.07614, 0), (5.19621, 0), (2.1451, 0), (9.87726, 0), (8.45439, 0), (-1.41842, 0), (7.93598, 0), (11.23151, 0), (17.84458, 0), (7.02237, 0), (10.7842, 0), (4.42832, 0), (4.45044, 0), (1.50938, 0), (21.21651, 0), (6.2097, 0), (6.84354, 0), (18.53804, 0), (12.01072, 0), (4.8345, 0), (20.41587, 0), (14.48353, 0), (8.71116, 0), (12.42818, 0), (14.89244, 0), (8.03033, 0), (5.25917, 0), (2.30092, 0), (10.22504, 0), (15.37573, 0), (7.13666, 0), (4.45018, 0), (10.18405, 0), (3.91025, 0), (14.52304, 0), (13.14771, 0), (11.99219, 0), (9.21345, 0), (8.85106, 0), (12.91887, 0), (15.62308, 0), (11.88034, 0), (15.12097, 0), (11.58168, 0), (16.83051, 0), (5.25405, 0), (2.19976, 0), (4.56716, 0), (16.46053, 0), (5.61995, 0), (8.67704, 0), (5.62789, 0), (9.84815, 0), (13.05834, 0), (11.74205, 0), (3.88393, 0), (16.15321, 0), (4.83925, 0), (13.00334, 0), (4.4028, 0), (4.35794, 0), (4.47478, 0), (2.38713, 0), (4.25235, 0), (10.87509, 0), (9.82411, 0), (13.61518, 0), (10.25507, 0), (4.0335, 0), (10.69881, 0), (5.70321, 0), (6.96244, 0), (9.35874, 0), (6.28076, 0), (8.29015, 0), (6.88653, 0), (7.70687, 0), (8.2001, 0), (6.73415, 0), (3.82052, 0), (3.94469, 0), (15.82384, 0), (2.54004, 0), (10.74876, 0), (12.60517, 0), (17.7024, 0), (4.6722, 0), (13.67341, 0), (6.4565, 0), (12.95699, 0), (4.56912, 0), (5.58464, 0), (4.0638, 0), (13.05559, 0), (5.38269, 0), (0.16354, 0), (7.23962, 0), (7.38577, 0), (8.50951, 0), (13.72574, 0), (17.80421, 0), (3.01135, 0), (8.02608, 0), (14.23847, 0), (-8.65656, 1), (22.98234, 1), (23.80821, 1), (13.33939, 1), (-4.05537, 1), (23.5155, 1), (-6.45272, 1), (17.7903, 1), (11.463, 1), (5.28021, 1), (8.39157, 1), (6.02464, 1), (14.43732, 1), (15.76584, 1), (1.54391, 1), (1.24897, 1), (27.1507, 1), (7.71091, 1), (15.71846, 1), (32.97808, 1), (-1.79334, 1), (-9.23439, 1), (11.27838, 1), (0.72703, 1), (18.51557, 1), (9.16619, 1), (17.29624, 1), (-1.30208, 1), (-3.48018, 1), (10.12082, 1), (-8.01318, 1), (-14.22264, 1), (16.58174, 1), (-0.55975, 1), (5.61449, 1), (1.44626, 1), (7.89158, 1), (1.13369, 1), (-0.82609, 1), (12.23365, 1), (12.45443, 1), (14.46915, 1), (13.72627, 1), (18.41459, 1), (29.66702, 1), (1.51619, 1), (10.40078, 1), (3.33266, 1), (6.12036, 1), (11.86553, 1), (6.59422, 1), (22.0948, 1), (1.79623, 1), (14.29513, 1), (19.69162, 1), (-7.98033, 1), (5.48433, 1), (-2.28474, 1), (9.91876, 1), (10.64097, 1), (0.22523, 1), (17.01773, 1), (22.37388, 1), (14.04215, 1), (23.1244, 1), (18.96958, 1), (8.42663, 1), (3.7165, 1), (14.29366, 1), (23.50886, 1), (26.33722, 1), (26.72396, 1), (13.26287, 1), (12.97607, 1), (17.41838, 1), (8.63875, 1), (17.08943, 1), (23.15356, 1), (-4.4965, 1), (7.58895, 1), (26.04074, 1), (6.84245, 1), (20.56287, 1), (3.84735, 1), (-2.76304, 1), (13.1615, 1), (8.21954, 1), (-3.49943, 1), (22.12419, 1), (7.08323, 1), (16.12937, 1), (-0.32672, 1), (16.5942, 1), (7.68977, 1), (11.39484, 1), (-5.11987, 1), (20.87404, 1), (8.01007, 1), (3.26497, 1), (5.61253, 1), (20.69182, 1), (0.0296, 1), (21.904, 1), (22.46572, 1), (3.63685, 1), (-5.10846, 1), (14.86389, 1), (5.47188, 1), (18.44095, 1), (16.71368, 1), (6.36704, 1), (8.82663, 1), (14.6727, 1), (7.98383, 1), (2.65568, 1), (21.45827, 1), (11.77948, 1), (4.71979, 1), (3.17951, 1), (13.90226, 1), (15.50578, 1), (10.8026, 1), (16.91369, 1), (9.90552, 1), (13.87322, 1), (4.12366, 1), (-3.78985, 1), (1.7599, 1), (3.43715, 1), (-3.45246, 1), (23.64571, 1), (-4.96877, 1), (3.93514, 1), (1.49914, 1), (12.71519, 1), (5.11521, 1), (4.79872, 1), (20.89391, 1), (5.363, 1), (8.02765, 1), (14.30804, 1), (11.49002, 1), (14.25281, 1), (7.6573, 1), (15.49686, 1), (3.29327, 1), (2.27236, 1), (12.58104, 1), (19.19128, 1), (15.25901, 1), (6.5221, 1), (10.10965, 1), (12.75249, 1), (16.50977, 1), (-8.6697, 1), (8.28553, 1), (1.44315, 1), (4.65869, 1), (0.98149, 1), (0.16623, 1), (17.66332, 1), (4.35346, 1), (6.52742, 1), (-1.06631, 1), (-5.28454, 1), (14.25583, 1), (8.74058, 1), (1.89553, 1), (-0.92959, 1), (10.30289, 1), (-6.3744, 1), (-8.1706, 1), (10.95369, 1), (4.94384, 1), (28.40568, 1), (3.7004, 1), (2.52363, 1), (4.07997, 1), (7.8849, 1), (17.95409, 1), (16.67021, 1), (11.34377, 1), (-0.07446, 1), (22.00223, 1), (3.31778, 1), (18.50719, 1), (-3.58655, 1), (6.5394, 1), (12.40459, 1), (16.59866, 1), (7.54176, 1), (-1.51044, 1), (12.69758, 1), (2.9842, 1), (2.49187, 1), (2.04113, 1), (-2.46544, 1), (15.18368, 1), (-0.04058, 1), (-0.4127, 1), (10.5526, 1), (12.03982, 1), (12.10923, 1), (11.54954, 1), (-1.18613, 1), (11.30984, 1), (23.54105, 1), (10.67321, 1), (24.09196, 1), (7.5008, 1), (12.52233, 1), (4.30673, 1), (9.35793, 1), (4.44472, 1), (-7.00679, 1), (8.56241, 1), (23.73891, 1), (15.62708, 1), (16.09205, 1), (12.52074, 1), (14.58927, 1), (-4.80187, 1), (8.47964, 1), (7.75477, 1), (12.6893, 1), (7.14147, 1), (12.12654, 1), (12.32334, 1), (7.98909, 1), (3.26652, 1), (20.53684, 1), (32.3369, 1), (19.74911, 1), (-4.62897, 1), (8.26483, 1), (20.88451, 1), (-2.12982, 1), (25.61459, 1), (5.32091, 1), (-4.1196, 1), (7.57937, 1), (21.15847, 1), (6.46355, 1), (7.74846, 1), (19.62636, 1), (28.34629, 1), (26.73919, 1), (20.40427, 1), (3.03378, 1), (10.2537, 1), (7.47745, 1), (10.79184, 1), (3.91962, 1), (19.97973, 1), (18.87711, 1), (12.56157, 1), (11.46033, 1), (3.78661, 1), (-9.45748, 1), (12.06033, 1), (-0.74615, 1), (13.2815, 1), (24.78052, 1), (5.83337, 1), (17.4111, 1), (19.70331, 1), (11.78446, 1), (-1.366, 1), (1.37458, 1), (16.31483, 1), (32.63464, 1), (-3.79736, 1), (19.17984, 1), (-0.27705, 1), (-3.69456, 1), (28.38058, 1), (-1.36876, 1), (-25.63301, 1), (3.58644, 1), (-6.85667, 1), (13.42225, 1), (12.04671, 1), (28.99468, 1), (7.87662, 1), (2.61119, 1), (-3.56022, 1), (1.50022, 1), (14.55836, 1), (9.35831, 1), (16.9366, 1), (29.23126, 1), (15.31386, 1), (13.46112, 1), (7.39667, 1), (11.15599, 1), (9.80499, 1), (22.64923, 1), (8.67693, 1), (18.67335, 1), (-3.19127, 1), (22.94716, 1), (17.86834, 1), (16.98267, 1), (15.91653, 1), (11.79718, 1), (18.50208, 1), (8.90755, 1), (10.44843, 1), (4.67433, 1), (6.82287, 1), (10.82228, 1), (-4.18631, 1), (20.3872, 1), (11.84735, 1), (21.25376, 1), (10.55032, 1), (12.19023, 1), (0.63369, 1), (7.92381, 1), (17.90933, 1), (15.30781, 1), (10.01877, 1), (0.88744, 1), (22.20967, 1), (-4.23117, 1), (21.50819, 1), (11.27421, 1), (-16.23179, 1), (33.43085, 1), (5.15093, 1), (1.34505, 1), (6.027, 1), (-10.43035, 1), (27.45998, 1), (19.24886, 1), (-4.44761, 1), (5.453, 1), (12.73758, 1), (11.2897, 1), (31.032, 1), (7.39168, 1), (11.95245, 1), (26.279, 1), (-1.0255, 1), (10.36675, 1), (11.58439, 1), (27.8405, 1), (13.1707, 1), (31.39133, 1), (27.08301, 1), (-2.14368, 1), (4.08476, 1), (21.5573, 1), (16.69822, 1), (7.69955, 1), (8.32793, 1), (6.49235, 1), (-7.3284, 1), (10.58264, 1), (-6.17006, 1), (34.55782, 1), (10.93221, 1), (44.24299, 1), (14.6224, 1), (-7.42798, 1), (15.52351, 1), (11.33982, 1), (10.46716, 1), (13.0986, 1), (-4.25988, 1), (9.55316, 1), (0.75489, 1), (25.99212, 1), (-0.81401, 1), (3.49551, 1), (22.99402, 1), (10.99628, 1), (23.70223, 1), (2.71482, 1), (22.82309, 1), (31.25686, 1), (4.86318, 1), (-1.06476, 1), (15.10298, 1), (-0.61015, 1), (17.81246, 1), (-1.55788, 1), (18.09709, 1), (9.11271, 1), (9.94682, 1), (-7.33194, 1), (-4.67293, 1), (21.81717, 1), (7.16318, 1), (13.25649, 1), (13.88776, 1), (4.95793, 1), (17.65303, 1), (14.47382, 1), (13.19373, 1), (31.86093, 1), (5.73161, 1), (10.96492, 1), (6.97951, 1), (1.75136, 1), (10.96144, 1), (15.08137, 1), (9.95311, 1), (7.07729, 1), (3.08148, 1), (22.37954, 1), (8.51951, 1), (2.88746, 1), (26.73509, 1), (-2.88939, 1), (-2.82367, 1), (-0.35783, 1), (14.22076, 1), (11.50295, 1), (7.10171, 1), (8.28488, 1), (0.54178, 1), (13.8022, 1), (15.62157, 1), (10.79173, 1), (28.18946, 1), (30.43524, 1), (2.54914, 1), (9.89421, 1), (13.08631, 1), (4.68761, 1), (5.61516, 1), (22.88072, 1), (7.4735, 1), (11.27382, 1), (2.39559, 1), (-3.31889, 1), (9.61957, 1), (23.01381, 1), (-1.23467, 1), (9.07691, 1), (15.78056, 1), (12.28421, 1), (9.44888, 1), (13.16928, 1), (4.33357, 1), (2.21737, 1), (33.17833, 1), (13.25407, 1), (-2.47961, 1), (6.41401, 1), (18.8439, 1), (-4.63375, 1), (-8.2909, 1), (12.18221, 1), (-2.95356, 1), (19.61659, 1), (12.45056, 1), (-4.17198, 1), (21.9641, 1), (11.96416, 1), (12.74573, 1), (10.47873, 1), (12.73295, 1), (11.31373, 1), (9.9827, 1), (5.87138, 1), (4.24372, 1), (-23.72256, 1), (28.41337, 1), (4.88103, 1), (3.61902, 1), (8.93586, 1), (16.40759, 1), (27.84494, 1), (5.6001, 1), (14.51379, 1), (13.5576, 1), (12.92213, 1), (3.90686, 1), (17.07104, 1), (15.84268, 1), (17.38777, 1), (16.54766, 1), (5.94487, 1), (17.02804, 1), (7.66386, 1), (10.43088, 1), (6.16059, 1), (20.46178, 1), (20.02888, 1), (20.95949, 1), (6.50808, 1), (7.22366, 1), (8.06659, 1), (16.08241, 1), (13.83514, 1), (-0.33454, 1), (12.98848, 1), (12.99024, 1); SELECT '-0.5028215369186904', '0.6152361677168877'; -SELECT roundBankers(welchTTest(left, right).1, 16) as t_stat, roundBankers(welchTTest(left, right).2, 16) as p_value from welch_ttest; +SELECT roundBankers(welchTTest(left, right).1, 6) as t_stat, roundBankers(welchTTest(left, right).2, 6) as p_value from welch_ttest; DROP TABLE IF EXISTS welch_ttest; CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; INSERT INTO welch_ttest VALUES (4.82025, 0), (6.13896, 0), (15.20277, 0), (14.15351, 0), (7.21338, 0), (8.55506, 0), (13.80816, 0), (11.28411, 0), (7.4612, 0), (7.43759, 0), (12.9832, 0), (-5.74783, 0), (12.47114, 0), (15.14223, 0), (3.40603, 0), (9.27323, 0), (7.88547, 0), (8.56456, 0), (4.59731, 0), (7.91213, 0), (7.33894, 0), (21.74811, 0), (11.92111, 0), (0.18828, 0), (10.47314, 0), (20.37396, 0), (11.04991, 0), (13.30083, 0), (14.28065, 0), (2.86942, 0), (24.96072, 0), (14.20164, 0), (18.28769, 0), (10.50949, 0), (9.22273, 0), (11.77608, 0), (8.56872, 0), (13.74535, 0), (11.65209, 0), (12.51894, 0), (17.76256, 0), (13.52122, 0), (8.70796, 0), (6.04749, 0), (16.33064, 0), (8.35636, 0), (14.03496, 0), (11.05834, 0), (14.49261, 0), (2.59383, 0), (8.01022, 0), (4.05458, 0), (13.26384, 0), (14.62058, 0), (10.52489, 0), (8.46357, 0), (6.4147, 0), (9.70071, 0), (12.47581, 0), (4.38333, 0), (17.54172, 0), (10.12109, 0), (7.73186, 0), (14.0279, 0), (11.6621, 0), (17.47045, 0), (15.50223, 0), (15.46034, 0), (13.39964, 0), (14.98025, 0), (15.87912, 0), (17.67374, 0), (9.64073, 0), (12.84904, 0), (7.70278, 0), (13.03156, 0), (9.04512, 0), (15.97014, 0), (8.96389, 0), (11.48009, 0), (9.71153, 0), (13.00084, 0), (12.39803, 0), (13.08188, 0), (5.82244, 0), (10.81871, 0), (8.2539, 0), (7.52114, 0), (9.11488, 0), (8.37482, 0), (14.48652, 0), (11.42152, 0), (16.03111, 0), (13.14057, 0), (-2.26351, 0), (15.50394, 0), (14.88603, 0), (13.37257, 0), (11.84026, 0), (7.66558, 0), (6.24584, 0), (3.6312, 0), (2.7018, 0), (5.63656, 0), (5.82643, 0), (10.06745, 0), (-0.5831, 0), (14.84202, 0), (9.5524, 0), (19.71713, 0), (14.23109, 0), (8.69105, 0), (5.33742, 0), (7.30372, 0), (7.93342, 0), (15.20884, 0), (7.53839, 0), (13.45311, 0), (11.04473, 0), (10.76673, 0), (15.44145, 0), (14.06596, 0), (9.14873, 0), (12.88372, 0), (8.74994, 0), (10.53263, 0), (16.16694, 0), (8.37197, 0), (3.43739, 0), (4.72799, 0), (9.08802, 0), (11.2531, 0), (5.16115, 0), (10.20895, 0), (18.70884, 0), (15.88924, 0), (3.38758, 0), (6.46449, 0), (10.21088, 0), (14.08458, 0), (15.74508, 0), (19.31896, 0), (13.19641, 0), (11.95409, 0), (10.70718, 0), (1.05245, 0), (10.04772, 0), (17.01369, 0), (10.2286, 0), (19.58323, 0), (7.02892, 0), (4.16866, 0), (8.94326, 0), (4.99854, 0), (8.88352, 0), (18.65422, 0), (17.32328, 0), (9.33492, 0), (14.94788, 0), (8.05863, 0), (14.6737, 0), (10.93801, 0), (0.54036, 0), (-0.34242, 0), (5.89076, 0), (3.15189, 0), (1.94421, 0), (6.38698, 0), (10.50654, 0), (8.95362, 0), (6.23711, 0), (11.75359, 0), (12.42155, 0), (-1.55472, 0), (4.6688, 0), (10.48087, 0), (11.74615, 0), (9.26822, 0), (7.55517, 0), (12.76005, 0), (16.47102, 0), (11.31297, 0), (14.37437, 0), (2.38799, 0), (6.44577, 0), (5.07471, 0), (11.55123, 0), (7.76795, 0), (10.60116, 0), (14.40885, 0), (11.58158, 0), (8.81648, 0), (12.92299, 0), (11.26939, 0), (17.95014, 0), (2.95002, 0), (17.41959, 0), (11.12455, 0), (8.78541, 0), (14.36413, 0), (12.98554, 0), (12.58505, 0), (15.49789, 0), (11.70999, 0), (0.65596, 0), (11.08202, 0), (14.75752, 0), (6.84385, 0), (9.27245, 0), (13.78243, 0), (17.4863, 0), (4.01777, 0), (11.82861, 0), (13.86551, 0), (6.16591, 0), (8.71589, 0), (16.77195, 0), (17.23243, 0), (-2.12941, 0), (5.66629, 0), (12.45153, 0), (1.63971, 0), (13.84031, 0), (4.6144, 0), (5.26169, 0), (9.27769, 0), (9.14288, 0), (9.71953, 0), (9.38446, 0), (1.64788, 0), (11.72922, 0), (13.68926, 0), (9.42952, 0), (12.05574, 0), (9.09148, 0), (5.32273, 0), (20.25258, 0), (10.14599, 0), (10.82156, 0), (5.75736, 0), (7.13567, 0), (9.29746, 0), (5.1618, 0), (10.076, 0), (21.65669, 0), (13.35486, 0), (6.79957, 0), (8.76243, 0), (14.59294, 0), (16.90609, 0), (10.50337, 0), (-0.07923, 0), (13.51648, 0), (12.0676, 0), (0.86482, 0), (9.03563, 0), (5.38751, 0), (17.16866, 0), (2.78702, 0), (11.15548, 0), (12.30843, 0), (8.04897, 0), (9.95814, 0), (11.29308, 0), (14.13032, 0), (21.05877, 0), (3.57386, 0), (7.96631, 0), (3.30484, 0), (18.61856, 0), (16.35184, 0), (7.65236, 0), (18.02895, 0), (9.79458, 0), (16.7274, 0), (8.84453, 0), (13.05709, 0), (10.91447, 0), (8.40171, 0), (16.95211, 0), (11.82194, 0), (19.87978, 0), (12.88455, 0), (-0.00947, 0), (12.28109, 0), (6.96462, 0), (13.75282, 0), (14.39141, 0), (11.07193, 0), (12.88039, 0), (11.38253, 0), (21.02707, 0), (7.51955, 0), (6.31984, 0), (15.6543, 0), (14.80315, 0), (8.38024, 0), (21.7516, 0), (14.31336, 0), (15.04703, 0), (5.73787, 0), (13.16911, 0), (12.40695, 0), (9.88968, 0), (8.46703, 0), (8.70637, 0), (8.03551, 0), (5.9757, 0), (12.22951, 0), (3.14736, 0), (10.51266, 0), (18.593, 0), (10.82213, 0), (7.14216, 0), (6.81154, 0), (-0.6486, 0), (20.56136, 0), (11.35367, 0), (11.38205, 0), (17.14, 0), (14.91215, 0), (15.50207, 0), (5.93162, 0), (3.74869, 0), (14.11532, 0), (7.38954, 0), (5.45764, 0), (18.33733, 0), (9.91923, 0), (2.38991, 0), (14.16756, 0), (2.39791, 0), (6.92586, 0), (5.32474, 0), (2.28812, 0), (5.71718, 0), (5.84197, 0), (2.76206, 0), (19.05928, 0), (11.51788, 0), (6.56648, 0), (3.35735, 0), (7.55948, 0), (19.99908, 0), (13.00634, 0), (18.36886, 0), (11.14675, 0), (16.72931, 0), (12.50106, 0), (6.00605, 0), (23.06653, 0), (5.39694, 0), (9.53167, 0), (12.76944, 0), (7.20604, 0), (13.25391, 0), (13.7341, 0), (10.85292, 0), (-7.75835, 0), (10.29728, 0), (13.70099, 0), (10.17959, 0), (9.98399, 0), (12.69389, 0), (-0.28848, 0), (-2.18319, 0), (13.36378, 0), (10.09232, 0), (5.49489, 0), (5.46156, 0), (0.94225, 0), (12.79205, 0), (10.09593, 0), (6.06218, 0), (0.89463, 0), (11.88986, 0), (10.79733, 0), (1.51371, 0), (2.20967, 0), (15.45732, 0), (16.5262, 0), (5.99724, 0), (8.3613, 0), (15.68183, 0), (15.32117, 0), (14.15674, 0), (6.64553, 0), (4.20777, 0), (-0.10521, 0), (-0.88169, 0), (1.85913, 0), (9.73673, 0), (0.30926, 0), (6.17559, 0), (11.76602, 0), (5.68385, 0), (14.57088, 0), (12.81509, 0), (9.85682, 0), (12.06376, 0), (6.08874, 0), (11.63921, 0), (14.86722, 0), (10.41035, 0), (2.93794, 0), (12.21841, 0), (0.23804, 0), (3.14845, 0), (7.29748, 0), (3.06134, 0), (13.77684, 0), (16.21992, 0), (5.33511, 0), (9.68959, 0), (9.44169, 0), (18.08012, 0), (4.04224, 0), (8.77918, 0), (10.18324, 0), (9.38914, 0), (11.76995, 0), (14.19963, 0), (6.88817, 0), (16.56123, 0), (15.39885, 0), (5.21241, 0), (4.44408, 0), (17.87587, 0), (12.53337, 0), (13.60916, 0), (6.60104, 0), (7.35453, 0), (18.61572, 0), (6.10437, 0), (13.08682, 0), (12.15404, 0), (4.90789, 0), (2.13353, 0), (12.49593, 0), (11.93056, 0), (13.29408, 0), (5.70038, 0), (8.40271, 0), (5.19456, 0), (-5.51028, 0), (14.0329, 0), (10.38365, 0), (6.56812, 0), (4.21129, 0), (9.7157, 0), (9.88553, 0), (13.45346, 0), (4.97752, 0), (12.77595, 0), (8.56465, 0), (4.27703, 0), (18.12502, 0), (12.45735, 0), (12.42912, 0), (12.08125, 0), (10.85779, 0), (4.36013, 0), (11.85062, 0), (8.47776, 0), (9.60822, 0), (11.3069, 0), (14.25525, 0), (1.55168, 0), (14.57782, 0), (7.84786, 0), (9.87774, 0), (14.75575, 0), (3.68774, 0), (9.37667, 0), (20.28676, 0), (12.10027, 0), (8.01819, 0), (18.78158, 0), (20.85402, 0), (18.98069, 0), (16.1429, 0), (9.24047, 0), (14.12487, 0), (10.18841, 0), (-3.04478, 0), (5.7552, 0), (9.30376, 0), (11.42837, 0), (6.02364, 0), (8.86984, 0), (10.91177, 0), (10.04418, 0), (18.10774, 0), (7.49384, 0), (9.11556, 0), (9.7051, 0), (5.23268, 0), (9.04647, 0), (8.81547, 0), (2.65098, 0), (-2.69857, 1), (15.80943, 1), (7.31555, 1), (3.96517, 1), (4.77809, 1), (9.6472, 1), (-26.41717, 1), (-10.85635, 1), (-1.4376, 1), (-0.96308, 1), (2.84315, 1), (5.79467, 1), (-3.06091, 1), (-14.62902, 1), (22.08022, 1), (-2.11982, 1), (-4.84824, 1), (-10.50447, 1), (2.4891, 1), (9.90324, 1), (-22.66866, 1), (-0.97103, 1), (-16.57608, 1), (-3.78749, 1), (25.84511, 1), (5.30797, 1), (-18.19466, 1), (11.72708, 1), (0.2891, 1), (-9.83474, 1), (6.69942, 1), (18.09604, 1), (18.52651, 1), (1.38201, 1), (7.64615, 1), (17.66598, 1), (-2.44141, 1), (-9.01598, 1), (27.69142, 1), (4.06946, 1), (-15.0077, 1), (-10.49648, 1), (-4.88322, 1), (-25.09805, 1), (-4.64024, 1), (20.94434, 1), (24.12126, 1), (-14.10962, 1), (10.6512, 1), (14.50687, 1), (-19.88081, 1), (-11.55271, 1), (13.16921, 1), (16.63864, 1), (-24.08114, 1), (-9.09949, 1), (-10.54702, 1), (0.20813, 1), (8.19066, 1), (-2.70523, 1), (-0.23954, 1), (7.19398, 1), (-7.1618, 1), (-7.44322, 1), (-17.92031, 1), (-1.58146, 1), (9.18338, 1), (3.25838, 1), (-14.30234, 1), (1.84695, 1), (31.13794, 1), (-0.85067, 1), (19.02787, 1), (-3.09594, 1), (13.45584, 1), (-5.48104, 1), (-22.74928, 1), (-8.03697, 1), (17.31143, 1), (-16.65231, 1), (-18.58713, 1), (-16.52641, 1), (14.95261, 1), (12.56762, 1), (15.00188, 1), (1.85858, 1), (2.1926, 1), (-2.4095, 1), (21.56873, 1), (3.35509, 1), (-4.98672, 1), (35.08603, 1), (-10.01602, 1), (-3.85153, 1), (-6.81974, 1), (19.56525, 1), (-9.35488, 1), (0.24268, 1), (-3.51488, 1), (-0.37066, 1), (24.20888, 1), (-11.73537, 1), (0.01282, 1), (0.03963, 1), (-9.65589, 1), (-0.37429, 1), (5.61255, 1), (0.49984, 1), (-10.15066, 1), (-14.54314, 1), (16.56889, 1), (-7.73873, 1), (-3.76422, 1), (1.40722, 1), (2.28818, 1), (-13.12643, 1), (5.17082, 1), (4.79089, 1), (-17.42643, 1), (8.72548, 1), (-3.70285, 1), (16.77893, 1), (13.382, 1), (19.98418, 1), (0.00483, 1), (-4.75951, 1), (2.35391, 1), (21.65809, 1), (-9.2714, 1), (-18.38253, 1), (7.23097, 1), (14.97927, 1), (-4.02197, 1), (-29.8189, 1), (-12.8554, 1), (-7.60124, 1), (-14.90158, 1), (-3.31486, 1), (31.38144, 1), (-8.61288, 1), (15.31895, 1), (-10.19488, 1), (13.796, 1), (-0.32912, 1), (-0.0684, 1), (-30.06834, 1), (24.93912, 1), (-3.26506, 1), (-8.29751, 1), (-5.39189, 1), (-25.08603, 1), (-1.45318, 1), (16.72724, 1), (-3.38467, 1), (-26.00478, 1), (7.28369, 1), (16.96226, 1), (16.5858, 1), (10.46583, 1), (3.84345, 1), (-2.99382, 1), (1.42078, 1), (-11.0123, 1), (2.09909, 1), (1.21064, 1), (15.36079, 1), (-21.61349, 1), (22.7726, 1), (10.50512, 1), (-6.95825, 1), (9.20036, 1), (15.66902, 1), (3.28098, 1), (-9.05692, 1), (0.32882, 1), (-1.64934, 1), (-4.81406, 1), (-5.06006, 1), (19.97493, 1), (2.88646, 1), (-0.34552, 1), (7.55186, 1), (-22.96115, 1), (31.29166, 1), (6.18798, 1), (-2.52715, 1), (-11.58799, 1), (14.13596, 1), (13.45069, 1), (12.15179, 1), (3.44491, 1), (-8.78006, 1), (18.32087, 1), (11.91757, 1), (-2.00179, 1), (10.88411, 1), (9.09327, 1), (6.62484, 1), (8.87178, 1), (11.52254, 1), (-14.15988, 1), (-17.19515, 1), (14.03089, 1), (-2.4095, 1), (-16.83575, 1), (2.71469, 1), (4.84351, 1), (-1.17651, 1), (-3.37529, 1), (-19.92137, 1), (4.48952, 1), (-12.4906, 1), (-5.65277, 1), (8.50819, 1), (-19.61261, 1), (12.54156, 1), (11.06784, 1), (-12.59285, 1), (3.43683, 1), (-3.00325, 1), (12.49082, 1), (7.20955, 1), (17.6547, 1), (15.8619, 1), (24.3048, 1), (-8.05434, 1), (-6.06901, 1), (-15.69515, 1), (-11.13917, 1), (-3.90757, 1), (-2.57038, 1), (5.14065, 1), (17.8497, 1), (-8.64665, 1), (-18.68331, 1), (5.8567, 1), (-20.93884, 1), (4.40583, 1), (14.35985, 1), (4.18134, 1), (4.3635, 1), (9.35428, 1), (2.8908, 1), (16.01017, 1), (-1.48499, 1), (-9.97949, 1), (1.03055, 1), (-2.79697, 1), (6.85977, 1), (4.73213, 1), (2.7815, 1), (-2.46866, 1), (18.39425, 1), (-0.80378, 1), (-0.22982, 1), (-16.11608, 1), (3.0862, 1), (3.20779, 1), (10.50146, 1), (-0.21305, 1), (11.21012, 1), (-0.99825, 1), (18.39633, 1), (-3.39003, 1), (-0.64411, 1), (-1.39932, 1), (15.45319, 1), (-0.66044, 1), (-15.2223, 1), (-34.39907, 1), (-3.57836, 1), (16.82828, 1), (1.66624, 1), (15.43475, 1), (8.17776, 1), (5.50486, 1), (10.43082, 1), (-6.63332, 1), (2.28008, 1), (16.37203, 1), (5.16313, 1), (-8.85281, 1), (13.26692, 1), (-7.46842, 1), (8.43091, 1), (-13.18172, 1), (-0.72401, 1), (22.3881, 1), (10.65448, 1), (2.81289, 1), (10.92405, 1), (-8.95358, 1), (19.80653, 1), (-12.86527, 1), (5.38826, 1), (-6.83501, 1), (-15.7647, 1), (-27.67412, 1), (8.6499, 1), (-4.89542, 1), (16.76167, 1), (12.84284, 1), (-17.27324, 1), (-4.18726, 1), (-14.62366, 1), (-5.49863, 1), (-16.22846, 1), (10.60329, 1), (6.46781, 1), (1.70458, 1), (10.77448, 1), (0.8463, 1), (13.0482, 1), (-4.36264, 1), (3.22647, 1), (2.38828, 1), (6.7946, 1), (-0.25254, 1), (1.2497, 1), (1.6544, 1), (4.1019, 1), (11.27839, 1), (-5.04127, 1), (18.11674, 1), (0.51231, 1), (-0.51029, 1), (13.52556, 1), (16.10171, 1), (5.68197, 1), (-2.85904, 1), (-8.89167, 1), (6.24489, 1), (10.85319, 1), (-0.39816, 1), (3.87079, 1), (-3.1867, 1), (1.55322, 1), (16.86779, 1), (-14.60321, 1), (-1.81952, 1), (-3.11624, 1), (1.24193, 1), (10.18179, 1), (4.69796, 1), (0.69032, 1), (11.7723, 1), (7.62896, 1), (9.89741, 1), (9.11484, 1), (-3.84676, 1), (-0.4777, 1), (0.95958, 1), (-7.95056, 1), (-10.97474, 1), (-6.54861, 1), (34.74933, 1), (27.39463, 1), (4.18299, 1), (6.02476, 1), (-1.99397, 1), (1.26478, 1), (23.37106, 1), (10.49682, 1), (-11.04354, 1), (-12.22284, 1), (-9.87635, 1), (28.90511, 1), (6.77613, 1), (0.55352, 1), (0.37031, 1), (7.1418, 1), (3.24897, 1), (-1.60918, 1), (3.1675, 1), (-17.97072, 1), (-5.61743, 1), (14.1422, 1), (14.87695, 1), (-4.65961, 1), (-0.99174, 1), (-2.96623, 1), (-9.02263, 1), (-17.2088, 1), (2.78608, 1), (6.74239, 1), (4.8524, 1), (7.46731, 1), (1.04894, 1), (-12.8023, 1), (-17.18188, 1), (-5.08801, 1), (22.13942, 1), (-0.36384, 1), (17.80564, 1), (7.67504, 1), (1.59779, 1), (4.10942, 1), (0.61074, 1), (-14.40767, 1), (10.59906, 1), (16.57017, 1), (-15.17526, 1), (-6.98549, 1), (-0.64548, 1), (3.23756, 1), (14.65504, 1), (4.583, 1), (12.72378, 1), (5.26547, 1), (0.81781, 1), (9.38273, 1), (10.37636, 1), (10.70325, 1), (-0.83043, 1), (-7.53149, 1), (-9.09147, 1), (-19.51381, 1), (-28.44508, 1), (6.44392, 1), (11.10201, 1), (-2.86184, 1), (8.30673, 1), (8.8797, 1), (10.68053, 1), (15.62919, 1), (8.00579, 1), (6.4651, 1), (-4.50029, 1), (18.04514, 1), (11.12996, 1), (-5.14007, 1), (9.43857, 1), (3.13476, 1), (4.9772, 1), (-17.45782, 1), (0.05552, 1), (-1.90283, 1), (2.67908, 1), (-2.62243, 1), (-3.22767, 1), (-8.70222, 1), (-23.11605, 1), (21.6757, 1), (12.70076, 1), (4.4322, 1), (11.69344, 1), (9.18052, 1), (-2.2549, 1), (-2.15615, 1), (20.29765, 1), (-0.29536, 1), (15.50109, 1), (8.79187, 1), (5.11533, 1), (-20.44436, 1), (-3.00909, 1), (-4.48291, 1), (21.84462, 1), (1.94225, 1), (-2.81908, 1), (17.19418, 1), (-9.33528, 1), (-0.17346, 1), (0.03958, 1), (-35.17786, 1), (8.36887, 1), (-9.02292, 1), (-10.98804, 1), (0.29335, 1), (4.29634, 1), (3.87718, 1), (-9.08532, 1), (7.13922, 1), (-7.62463, 1), (-10.5666, 1), (4.68165, 1), (-3.30172, 1), (13.04852, 1), (13.45616, 1), (2.41043, 1), (-0.36501, 1), (-15.67383, 1), (17.92217, 1), (8.42106, 1), (3.22063, 1), (-7.31753, 1), (21.99596, 1), (-36.8273, 1), (-20.46391, 1), (5.74179, 1), (-15.83178, 1), (14.90454, 1), (-8.84645, 1), (3.72036, 1), (4.6877, 1), (16.35418, 1), (3.15441, 1), (2.39907, 1), (-17.58664, 1), (-13.18269, 1); SELECT '14.971190998235835', '5.898143508382202e-44'; -SELECT roundBankers(welchTTest(left, right).1, 16) as t_stat, roundBankers(welchTTest(left, right).2, 16) as p_value from welch_ttest; -SELECT roundBankers(welchTTest(0.95)(left, right).3, 16) as t_stat, roundBankers(welchTTest(0.95)(left, right).4, 16) as p_value from welch_ttest; +SELECT roundBankers(welchTTest(left, right).1, 6) as t_stat, roundBankers(welchTTest(left, right).2, 6) as p_value from welch_ttest; +SELECT roundBankers(welchTTest(0.95)(left, right).3, 6) as t_stat, roundBankers(welchTTest(0.95)(left, right).4, 6) as p_value from welch_ttest; DROP TABLE IF EXISTS welch_ttest; @@ -42,7 +42,7 @@ DROP TABLE IF EXISTS student_ttest; CREATE TABLE student_ttest (left Float64, right Float64) ENGINE = Memory; INSERT INTO student_ttest VALUES (0.88854, 0), (-5.76966, 0), (6.76618, 0), (3.55546, 0), (-9.76948, 0), (4.92323, 0), (-0.36352, 0), (0.97018, 0), (4.61656, 0), (-6.78292, 0), (4.02008, 0), (12.41838, 0), (5.14417, 0), (3.86836, 0), (-1.26199, 0), (12.44106, 0), (3.28349, 0), (1.77261, 0), (-8.94748, 0), (-1.01449, 0), (-1.26377, 0), (6.79682, 0), (6.32333, 0), (-8.21214, 0), (-1.68565, 0), (9.7557, 0), (3.66694, 0), (1.39967, 0), (-5.52035, 0), (-10.95601, 0), (0.93877, 0), (1.45933, 0), (-5.40551, 0), (-0.83857, 0), (8.50794, 0), (-6.68686, 0), (5.03099, 0), (1.56251, 0), (4.17381, 0), (-2.92644, 0), (5.11068, 0), (2.09617, 0), (11.7787, 0), (6.50336, 0), (0.62098, 0), (-7.97121, 0), (3.81902, 0), (0.33151, 0), (10.68584, 0), (0.56007, 0), (-7.38621, 0), (5.05882, 0), (2.34616, 0), (11.3806, 0), (5.95276, 0), (-3.01429, 0), (5.98169, 0), (0.96985, 0), (-1.15932, 0), (2.11547, 0), (2.49668, 0), (-12.49569, 0), (-4.94667, 0), (-3.64215, 0), (-8.35595, 0), (3.211, 0), (2.33805, 0), (2.38608, 0), (-3.2862, 0), (-0.80454, 0), (-0.53483, 0), (10.66445, 0), (-0.37619, 0), (0.48246, 0), (7.41919, 0), (0.42414, 0), (-2.32335, 0), (-0.70223, 0), (-5.9332, 0), (-1.20561, 0), (3.39865, 0), (9.61739, 0), (-0.78651, 0), (-4.00256, 0), (-7.99646, 0), (8.72923, 0), (0.71859, 0), (-1.62726, 0), (5.11234, 0), (-0.95625, 0), (-3.75573, 0), (1.03141, 0), (-3.33588, 0), (1.51804, 0), (-3.30935, 0), (-1.97507, 0), (4.06456, 0), (3.27195, 0), (-7.81761, 0), (-3.81785, 0), (-4.18311, 0), (-11.33313, 0), (-0.25221, 0), (7.2514, 0), (5.30301, 0), (2.46762, 0), (4.22716, 0), (0.33916, 0), (9.7638, 0), (-7.58684, 0), (-4.09888, 0), (4.26617, 0), (-0.56744, 0), (4.65125, 0), (-1.30301, 0), (4.53771, 0), (9.96929, 0), (3.72939, 0), (-2.29818, 0), (3.09417, 0), (0.82251, 0), (5.29975, 0), (2.8685, 0), (-5.73321, 0), (-1.85651, 0), (-1.07984, 0), (9.78342, 0), (-13.49652, 0), (3.68791, 0), (1.9998, 0), (1.11674, 0), (9.43869, 0), (4.07029, 0), (5.32715, 0), (7.16504, 0), (6.66096, 0), (-5.7111, 0), (-0.38575, 0), (4.49165, 0), (-3.36489, 0), (7.71814, 0), (-1.58966, 0), (-1.61063, 0), (-0.91602, 0), (0.73459, 0), (-3.24463, 0), (6.3947, 0), (-2.77845, 0), (4.45899, 0), (-8.84186, 0), (2.62276, 0), (1.774, 0), (4.3692, 0), (0.05942, 0), (-1.44042, 0), (-2.53594, 0), (-2.24752, 0), (4.98874, 0), (4.05434, 0), (-2.56483, 0), (-6.79286, 0), (-2.06165, 0), (-0.26056, 0), (1.89567, 0), (-3.15145, 0), (-7.31321, 0), (0.28936, 0), (-0.63111, 0), (0.22611, 0), (-9.3377, 0), (-5.76638, 0), (3.87306, 0), (6.7011, 0), (9.03915, 0), (-1.21835, 0), (0.82892, 0), (2.80656, 0), (-1.34746, 0), (-1.99912, 0), (0.6036, 0), (-3.46117, 0), (5.23732, 0), (-1.86702, 0), (-5.86115, 0), (6.48523, 0), (-7.40158, 0), (-1.38913, 0), (4.94613, 0), (-2.07818, 0), (2.39808, 0), (4.89238, 0), (4.39481, 0), (5.20425, 0), (13.62598, 0), (-2.86293, 0), (-3.62396, 0), (-4.28695, 0), (4.66425, 0), (2.20871, 0), (1.60382, 0), (-9.87024, 0), (-7.37302, 0), (-4.17814, 0), (2.5148, 0), (3.21708, 0), (-11.48089, 0), (1.19821, 0), (-0.07436, 0), (-1.10652, 0), (4.03395, 0), (-4.35883, 0), (2.04013, 0), (0.52264, 0), (8.14004, 0), (-8.86949, 0), (-0.35807, 0), (-10.71113, 0), (-2.13755, 0), (0.50715, 0), (6.30826, 0), (2.37527, 0), (0.20872, 0), (-5.85729, 0), (-4.97217, 0), (-9.78434, 0), (-1.53277, 0), (0.14827, 0), (-1.053, 0), (1.74558, 0), (11.17194, 0), (9.35487, 0), (-9.17209, 0), (10.41814, 0), (7.41206, 0), (3.71775, 0), (-2.04674, 0), (6.18037, 0), (5.6383, 0), (-0.90058, 0), (-1.27073, 0), (-2.3473, 0), (-8.44271, 0), (2.75551, 0), (-1.15521, 0), (4.08722, 0), (-1.70399, 0), (7.24114, 0), (-8.43976, 0), (-1.53052, 0), (-0.00526, 0), (-4.04813, 0), (-2.84299, 0), (-5.201, 0), (7.75774, 0), (-2.85791, 0), (-3.86071, 0), (-1.80029, 0), (-5.26015, 0), (-3.158, 0), (7.71014, 0), (-4.84866, 0), (-8.38785, 0), (7.67021, 0), (4.96521, 0), (-0.40919, 0), (-3.25711, 0), (3.07685, 0), (2.89376, 0), (-10.47331, 0), (-3.48942, 0), (1.13906, 0), (-8.57454, 0), (-3.38963, 0), (-2.3195, 0), (-1.60694, 0), (-5.57406, 0), (-0.93075, 0), (-11.76579, 0), (10.68283, 0), (8.74324, 0), (7.66409, 0), (4.76715, 0), (0.44539, 0), (-1.35941, 0), (4.18849, 0), (-6.17097, 0), (0.27977, 0), (-1.45006, 0), (-4.81694, 0), (-3.0297, 0), (0.02145, 0), (2.46883, 0), (9.60317, 0), (-9.93898, 0), (1.05549, 0), (5.55366, 0), (-3.80722, 0), (-4.18851, 0), (1.00351, 0), (3.11385, 0), (-5.17623, 0), (-3.18396, 0), (-6.65302, 0), (-0.50832, 0), (-4.04375, 0), (4.52707, 0), (6.63124, 0), (-3.72082, 0), (5.79825, 0), (-2.0158, 0), (-2.78369, 0), (-1.91821, 0), (6.31714, 0), (-1.80869, 0), (8.55586, 0), (2.40826, 0), (-8.46361, 0), (5.04452, 0), (-0.84665, 0), (2.30903, 0), (-3.71837, 0), (-0.69419, 0), (3.6733, 0), (-1.96098, 0), (2.36747, 0), (-12.03622, 0), (4.38481, 0), (2.93955, 0), (2.16804, 0), (-0.08218, 0), (-3.97934, 0), (-7.43985, 0), (0.91666, 0), (7.23432, 0), (-6.13303, 0), (-10.23217, 0), (-6.21681, 0), (-0.80934, 0), (0.17914, 0), (2.13338, 0), (6.97656, 0), (6.90455, 0), (6.25943, 0), (-6.04019, 0), (-7.30909, 0), (1.4589, 0), (12.00208, 0), (2.22457, 0), (-2.45912, 0), (-6.92213, 0), (4.05547, 0), (0.04709, 0), (-7.70952, 0), (-1.47883, 0), (1.3701, 0), (-4.92928, 0), (-2.75872, 0), (-0.09178, 0), (2.62642, 0), (-1.14623, 0), (2.76609, 0), (4.94404, 0), (-7.01764, 0), (-10.91568, 0), (-2.49738, 0), (0.73576, 0), (2.25436, 0), (-1.72956, 0), (2.41054, 0), (5.72149, 0), (-6.41371, 0), (3.38217, 0), (1.24133, 0), (10.03634, 0), (-2.37303, 0), (-1.35543, 0), (-1.4387, 0), (-4.0976, 0), (-0.82501, 0), (-1.93498, 0), (5.59955, 0), (5.46656, 0), (2.43568, 0), (-0.23926, 0), (-4.9945, 0), (-4.96655, 0), (-0.59258, 0), (2.02497, 0), (0.67583, 0), (3.16522, 0), (-1.9673, 0), (-6.75319, 0), (-6.69723, 0), (0.81148, 0), (4.44531, 0), (-4.43522, 0), (-5.28602, 0), (-3.58829, 0), (-7.97395, 0), (-2.84891, 0), (-3.95112, 0), (3.54945, 0), (12.12376, 0), (-3.12347, 0), (3.65209, 0), (9.34031, 0), (-0.26348, 0), (-5.23968, 0), (2.22336, 0), (-10.70405, 0), (-4.41319, 0), (-5.94912, 0), (1.8147, 0), (7.69287, 0), (9.46125, 0), (4.72497, 0), (-0.57565, 0), (-1.12303, 0), (2.90272, 0), (-4.4584, 0), (4.28819, 0), (11.64512, 0), (-1.80395, 0), (2.51605, 0), (-3.18439, 0), (-0.70213, 0), (-7.68383, 0), (-8.32268, 0), (-8.71115, 0), (9.96933, 0), (0.95675, 0), (3.35114, 0), (-2.66008, 0), (7.75456, 0), (0.73568, 0), (0.3483, 0), (-1.09203, 0), (-7.76963, 0), (5.81902, 0), (-3.41424, 0), (-0.39209, 0), (4.67608, 0), (0.68753, 0), (5.17179, 0), (4.98983, 0), (-0.12659, 0), (3.25267, 0), (1.50184, 0), (2.94507, 0), (-0.42333, 0), (-3.66227, 0), (8.90812, 0), (4.74411, 0), (2.22018, 0), (-2.07976, 0), (4.8711, 0), (0.5023, 0), (6.31569, 0), (-4.36903, 0), (3.82146, 0), (-6.99477, 0), (3.61225, 0), (14.69335, 0), (0.58368, 0), (4.65341, 0), (-3.14272, 0), (2.67048, 0), (4.64963, 0), (-2.70828, 0), (1.42923, 0), (5.84498, 0), (-4.76568, 0), (0.19907, 0), (1.67486, 0), (5.32145, 0), (-8.03477, 0), (3.46776, 0), (4.66374, 0), (-5.37394, 0), (5.39045, 0), (-1.44756, 0), (-1.64419, 0), (3.39699, 0), (-2.94659, 0), (-2.38437, 0), (-0.23958, 0), (6.88389, 0), (-2.7172, 0), (-1.53419, 0), (7.38841, 0), (-5.44178, 0), (-0.89287, 0), (2.93546, 0), (-0.26901, 0), (-4.70044, 0), (2.25846, 0), (-9.28813, 0), (6.04268, 0), (4.41693, 0), (1.75714, 0), (-2.90702, 1), (3.61651, 1), (4.27458, 1), (4.82133, 1), (9.59483, 1), (1.00424, 1), (2.04147, 1), (-3.58214, 1), (6.59543, 1), (-1.00532, 1), (-3.59794, 1), (-2.82434, 1), (-3.13194, 1), (9.90977, 1), (0.523, 1), (4.62779, 1), (-2.56872, 1), (2.25807, 1), (1.04044, 1), (-2.35744, 1), (10.81531, 1), (-9.68469, 1), (3.80885, 1), (12.70435, 1), (-6.01112, 1), (1.89065, 1), (5.08892, 1), (3.45254, 1), (11.58151, 1), (0.85035, 1), (8.38397, 1), (1.17169, 1), (4.74621, 1), (-1.66614, 1), (4.2414, 1), (1.68765, 1), (1.85223, 1), (9.10111, 1), (-2.38085, 1), (-14.79595, 1), (-3.8938, 1), (-3.41864, 1), (-3.15282, 1), (-0.56684, 1), (12.87997, 1), (6.89115, 1), (12.921, 1), (-7.94908, 1), (2.45687, 1), (2.14957, 1), (7.55081, 1), (-3.71534, 1), (-2.41064, 1), (-0.80734, 1), (-4.75651, 1), (2.05241, 1), (-5.44523, 1), (-2.75054, 1), (-13.00131, 1), (-2.74451, 1), (-1.39004, 1), (-3.02854, 1), (7.65112, 1), (1.1245, 1), (6.74117, 1), (-0.75777, 1), (8.93451, 1), (-8.85559, 1), (-0.36405, 1), (4.02742, 1), (6.88718, 1), (-1.05124, 1), (3.04085, 1), (3.32368, 1), (1.147, 1), (3.41554, 1), (-3.47851, 1), (-0.47684, 1), (-0.55605, 1), (-0.17006, 1), (2.26218, 1), (12.45494, 1), (-1.84097, 1), (1.64934, 1), (-7.07496, 1), (-9.99462, 1), (6.09954, 1), (-1.05319, 1), (3.04757, 1), (0.93899, 1), (-4.63243, 1), (-7.43322, 1), (-7.298, 1), (-6.59016, 1), (-6.11649, 1), (0.56682, 1), (2.00661, 1), (-2.79814, 1), (2.84482, 1), (3.65348, 1), (-4.22807, 1), (-4.54336, 1), (-3.63343, 1), (2.96878, 1), (6.11661, 1), (-1.70919, 1), (-4.71133, 1), (6.09652, 1), (-6.83454, 1), (0.18006, 1), (1.51676, 1), (-5.31646, 1), (-3.21215, 1), (-5.07599, 1), (-2.36591, 1), (3.55724, 1), (4.8904, 1), (-3.22586, 1), (-1.74928, 1), (5.73458, 1), (1.41188, 1), (2.86255, 1), (2.90179, 1), (-2.19949, 1), (1.72727, 1), (1.76939, 1), (-0.12848, 1), (-0.52, 1), (3.48333, 1), (7.8262, 1), (0.09099, 1), (7.77017, 1), (9.49484, 1), (1.42825, 1), (1.99624, 1), (4.00419, 1), (1.07925, 1), (-0.09987, 1), (-5.48733, 1), (-1.83517, 1), (2.38059, 1), (1.42075, 1), (-1.11968, 1), (-6.46035, 1), (7.66576, 1), (4.6307, 1), (5.55989, 1), (3.16684, 1), (5.07671, 1), (-10.20566, 1), (-4.73386, 1), (1.28353, 1), (6.75679, 1), (12.09895, 1), (7.0049, 1), (7.16156, 1), (-0.64311, 1), (-0.66747, 1), (3.99996, 1), (9.07298, 1), (-4.60971, 1), (0.70744, 1), (2.56774, 1), (9.32424, 1), (3.95087, 1), (7.11372, 1), (-0.89284, 1), (8.6155, 1), (-0.14141, 1), (-4.86319, 1), (-6.95801, 1), (4.44883, 1), (4.6156, 1), (-2.3579, 1), (-5.1186, 1), (8.12819, 1), (2.78392, 1), (-4.30221, 1), (-1.47506, 1), (6.8598, 1), (0.47636, 1), (0.95383, 1), (7.79779, 1), (-2.61767, 1), (-10.5087, 1), (-2.74299, 1), (3.87369, 1), (-1.07093, 1), (4.98864, 1), (-7.50772, 1), (6.41316, 1), (1.39061, 1), (-3.1747, 1), (-2.13621, 1), (-0.02203, 1), (0.89025, 1), (-5.87746, 1), (3.60026, 1), (-0.23178, 1), (-2.1897, 1), (-5.85101, 1), (-1.6053, 1), (3.6184, 1), (-8.53795, 1), (-0.35987, 1), (2.15301, 1), (-6.60692, 1), (9.54341, 1), (1.11511, 1), (2.94025, 1), (12.05657, 1), (3.75156, 1), (7.95597, 1), (-0.99449, 1), (0.90597, 1), (-7.90627, 1), (3.50863, 1), (-1.47493, 1), (4.11671, 1), (10.06325, 1), (-1.06059, 1), (-1.37737, 1), (-0.42542, 1), (-3.90267, 1), (9.35037, 1), (-7.91219, 1), (-4.69945, 1), (3.63776, 1), (3.46492, 1), (2.84518, 1), (-3.04301, 1), (8.82764, 1), (7.80134, 1), (7.87755, 1), (7.01035, 1), (2.43271, 1), (11.36418, 1), (-6.92659, 1), (5.95541, 1), (3.59436, 1), (5.18429, 1), (4.20225, 1), (0.5029, 1), (4.03074, 1), (5.23152, 1), (10.65409, 1), (-0.69845, 1), (11.70096, 1), (5.80692, 1), (-8.1819, 1), (4.31485, 1), (5.7227, 1), (5.67398, 1), (-1.75826, 1), (7.54164, 1), (-1.79026, 1), (-1.7395, 1), (5.65042, 1), (0.38765, 1), (-4.64719, 1), (-10.22048, 1), (-2.05447, 1), (-2.43441, 1), (-5.38551, 1), (5.47764, 1), (8.26637, 1), (-3.6421, 1), (-11.66269, 1), (3.972, 1), (5.46642, 1), (-3.72304, 1), (5.75251, 1), (5.12841, 1), (0.59067, 1), (5.21138, 1), (-4.58702, 1), (-8.737, 1), (-2.12737, 1), (0.22888, 1), (-1.46448, 1), (2.40311, 1), (-5.21814, 1), (13.94749, 1), (-2.77448, 1), (-3.7867, 1), (3.4954, 1), (3.12586, 1), (-7.01485, 1), (-3.20727, 1), (6.31415, 1), (2.37521, 1), (8.13787, 1), (2.15956, 1), (-0.40842, 1), (-7.27283, 1), (4.27575, 1), (-2.89126, 1), (6.84344, 1), (7.0869, 1), (-5.18837, 1), (2.67648, 1), (-6.57021, 1), (0.60429, 1), (-1.04921, 1), (7.12873, 1), (1.68973, 1), (-2.58404, 1), (-3.83114, 1), (-7.26546, 1), (-5.07153, 1), (-0.80395, 1), (2.09455, 1), (4.33374, 1), (8.54335, 1), (0.80566, 1), (-8.38085, 1), (7.54812, 1), (8.78007, 1), (1.5857, 1), (8.43855, 1), (-1.90846, 1), (-1.2434, 1), (7.16172, 1), (-3.44129, 1), (-6.37542, 1), (-4.99486, 1), (4.99033, 1), (-1.83734, 1), (-2.83289, 1), (-4.13997, 1), (1.40163, 1), (8.57867, 1), (-1.87639, 1), (3.41667, 1), (6.31762, 1), (1.58473, 1), (1.63625, 1), (-6.93618, 1), (3.58046, 1), (-6.8097, 1), (4.69978, 1), (-1.72912, 1), (5.29491, 1), (-1.63062, 1), (5.83818, 1), (17.0769, 1), (4.54301, 1), (-1.33801, 1), (5.64339, 1), (1.26913, 1), (-1.01553, 1), (4.8316, 1), (3.08635, 1), (-2.27738, 1), (-1.13761, 1), (10.08698, 1), (5.33827, 1), (2.84345, 1), (-1.51132, 1), (13.46078, 1), (8.58965, 1), (-2.36683, 1), (-1.8217, 1), (1.96981, 1), (2.31718, 1), (3.66493, 1), (1.93104, 1), (5.20332, 1), (3.20519, 1), (3.34631, 1), (7.0087, 1), (-7.96126, 1), (-0.62182, 1), (-4.65227, 1), (10.6572, 1), (4.50891, 1), (9.74298, 1), (3.85707, 1), (6.41144, 1), (1.48649, 1), (2.28076, 1), (2.75342, 1), (-5.40401, 1), (7.11389, 1), (5.74368, 1), (6.78345, 1), (3.83773, 1), (0.70959, 1), (0.57434, 1), (1.5888, 1), (3.94889, 1), (5.8234, 1), (7.78366, 1), (9.08354, 1), (-7.99182, 1), (-2.77033, 1), (-10.29342, 1), (1.76251, 1), (2.09266, 1), (4.20614, 1), (-3.63064, 1), (-2.17794, 1), (-2.66225, 1), (-2.74707, 1), (-1.93431, 1), (1.38629, 1), (4.12816, 1), (-1.58902, 1), (-5.08864, 1), (-2.30491, 1), (2.64605, 1), (1.16158, 1), (2.63534, 1), (1.4956, 1), (-4.60768, 1), (0.60771, 1), (3.29549, 1), (-1.42592, 1), (0.8883, 1), (-1.10612, 1), (-2.57296, 1), (5.88085, 1), (7.40745, 1), (13.48116, 1), (5.53539, 1), (-1.46014, 1), (3.73304, 1), (3.5435, 1), (-3.89151, 1), (4.16265, 1), (2.32663, 1), (5.31735, 1), (6.33485, 1), (2.1339, 1), (0.82708, 1), (-2.95155, 1), (-6.76019, 1), (-4.20179, 1), (8.78354, 1), (1.41863, 1), (7.65689, 1), (-6.52601, 1), (-4.4426, 1), (-4.49483, 1), (-3.91479, 1), (-2.84562, 1), (2.58974, 1), (2.24424, 1), (-4.65846, 1), (8.4062, 1), (8.20262, 1), (-8.63752, 1), (4.97966, 1), (-0.35563, 1), (-4.72116, 1), (-2.95997, 1), (2.73959, 1), (-0.23956, 1), (10.13915, 1), (11.83775, 1), (-2.50332, 1), (-0.58181, 1), (-7.62836, 1), (2.26478, 1), (-3.50179, 1), (-2.08023, 1), (4.07256, 1), (-1.40826, 1), (-2.33644, 1), (3.00197, 1), (4.23668, 1), (-2.24647, 1), (1.0445, 1), (-0.31901, 1), (8.62657, 1), (3.92817, 1), (0.08462, 1), (10.15884, 1), (0.4113, 1), (4.45847, 1), (5.82941, 1), (6.59202, 1), (-3.73441, 1), (-5.86969, 1), (-4.56543, 1), (-1.32636, 1), (-0.17884, 1), (-3.56181, 1), (-0.66932, 1), (6.87538, 1), (0.73527, 1), (-0.24177, 1), (-0.8657, 1), (-0.22977, 1), (1.02095, 1), (6.16311, 1), (-5.68027, 1), (-3.7619, 1), (4.22959, 1), (-1.5249, 1); SELECT '-2.610898982580138', '0.00916587538237954'; -SELECT roundBankers(studentTTest(left, right).1, 16) as t_stat, roundBankers(studentTTest(left, right).2, 16) as p_value from student_ttest; +SELECT roundBankers(studentTTest(left, right).1, 6) as t_stat, roundBankers(studentTTest(left, right).2, 6) as p_value from student_ttest; DROP TABLE IF EXISTS student_ttest; /*Check t-stat and p-value and compare it with scipy.stat implementation @@ -52,6 +52,6 @@ CREATE TABLE student_ttest (left Float64, right Float64) ENGINE = Memory; INSERT INTO student_ttest VALUES (4.52546, 0), (8.69444, 1), (3.73628, 0), (3.81414, 1), (-0.39478, 0), (12.38442, 1), (5.15633, 0), (8.9738, 1), (0.50539, 0), (9.19594, 1), (-5.34036, 0), (7.21009, 1), (0.19336, 0), (4.97743, 1), (8.35729, 0), (4.94756, 1), (6.95818, 0), (19.80911, 1), (-2.93812, 0), (13.75358, 1), (8.30807, 0), (16.56373, 1), (-3.3517, 0), (9.72882, 1), (4.16279, 0), (4.64509, 1), (-3.17231, 0), (17.76854, 1), (1.93545, 0), (4.80693, 1), (11.06606, 0), (8.79505, 1), (-4.22678, 0), (10.88868, 1), (-1.99975, 0), (6.21932, 1), (-4.51178, 0), (15.11614, 1), (-4.50711, 0), (13.24703, 1), (1.89786, 0), (14.76476, 1), (-6.19638, 0), (-0.6117, 1), (-3.70188, 0), (17.48993, 1), (5.01334, 0), (12.11847, 1), (1.79036, 0), (4.87439, 1), (2.14435, 0), (18.56479, 1), (3.0282, 0), (1.23712, 1), (2.35528, 0), (5.41596, 1), (-12.18535, 0), (4.54994, 1), (5.59709, 0), (11.37668, 1), (-12.92336, 0), (9.5982, 1), (-0.04281, 0), (6.59822, 1), (-0.16923, 0), (1.16703, 1), (0.88924, 0), (8.88418, 1), (-4.68414, 0), (10.95047, 1), (8.01099, 0), (5.52787, 1), (2.61686, 0), (-1.11647, 1), (-2.76895, 0), (14.49946, 1), (3.32165, 0), (3.27585, 1), (-0.85135, 0), (-0.42025, 1), (1.21368, 0), (6.37906, 1), (4.38673, 0), (2.5242, 1), (6.20964, 0), (8.1405, 1), (-1.23172, 0), (6.46732, 1), (4.65516, 0), (9.89332, 1), (-1.87143, 0), (10.4374, 1), (0.86429, 0), (-1.06465, 1), (2.51184, 0), (6.84902, 1), (-1.88822, 0), (10.96576, 1), (-1.61802, 0), (7.83319, 1), (1.93653, 0), (14.39823, 1), (-3.66631, 0), (7.02594, 1), (-1.05294, 0), (13.46629, 1), (-10.74718, 0), (10.39531, 1), (16.49295, 0), (11.27348, 1), (-7.65494, 0), (9.32187, 1), (-3.39303, 0), (12.32667, 1), (-4.89418, 0), (8.98905, 1), (3.2521, 0), (9.54757, 1), (0.05831, 0), (5.98325, 1), (-3.00409, 0), (3.47248, 1), (5.76702, 0), (9.26966, 1), (2.67674, 0), (5.77816, 1), (10.52623, 0), (6.32966, 1), (-0.54501, 0), (9.49313, 1), (-4.89835, 0), (6.21337, 1), (3.52457, 0), (10.00242, 1), (-0.0451, 0), (6.25167, 1), (-6.61226, 0), (15.64671, 1), (9.02391, 0), (2.78968, 1), (5.52571, 0), (6.55442, 1), (4.54352, 0), (3.68819, 1), (-3.8394, 0), (9.55934, 1), (-7.75295, 0), (4.166, 1), (5.91167, 0), (12.32471, 1), (1.38897, 0), (7.10969, 1), (6.24166, 0), (16.31723, 1), (5.58536, 0), (12.99482, 1), (4.7591, 0), (10.11585, 1), (-2.58336, 0), (10.29455, 1), (-1.91263, 0), (18.27524, 1), (3.31575, 0), (12.84435, 1), (5.3507, 0), (13.11954, 1), (-15.22081, 0), (12.84147, 1), (-0.84775, 0), (15.55658, 1), (-4.538, 0), (11.45329, 1), (6.71177, 0), (7.50912, 1), (0.52882, 0), (8.56226, 1), (2.0242, 0), (8.63104, 1), (5.69146, 0), (15.68026, 1), (4.63328, 0), (21.6361, 1), (0.22984, 0), (6.23925, 1), (-2.84052, 0), (8.65714, 1), (7.91867, 0), (9.9423, 1), (1.11001, 0), (12.28213, 1), (-0.11251, 0), (3.11279, 1), (-0.20905, 0), (13.58128, 1), (0.03287, 0), (16.51407, 1), (-1.59397, 0), (16.60476, 1), (-5.39405, 0), (12.02022, 1), (-7.1233, 0), (12.11035, 1), (4.51517, 0), (9.47832, 1), (-0.70967, 0), (6.40742, 1), (5.67299, 0), (8.87252, 1), (-0.33835, 0), (15.14265, 1), (-1.83047, 0), (2.23572, 1), (-0.62877, 0), (11.57144, 1), (-7.23148, 0), (18.87737, 1), (0.1802, 0), (12.1833, 1), (11.73325, 0), (11.17519, 1), (2.17603, 0), (16.80422, 1), (-0.11683, 0), (6.81423, 1), (-1.29102, 0), (12.12546, 1), (-0.23201, 0), (8.06153, 1), (-6.8643, 0), (10.97228, 1), (-6.85153, 0), (7.30596, 1), (-4.77163, 0), (15.44026, 1), (6.11721, 0), (8.00993, 1), (5.96406, 0), (12.60196, 1), (3.59135, 0), (13.96832, 1), (-0.60095, 0), (14.03207, 1), (3.11163, 0), (4.53758, 1), (-0.18831, 0), (8.08297, 1), (0.67657, 0), (4.90451, 1), (-3.16117, 0), (8.14253, 1), (0.26957, 0), (19.88605, 1), (2.18653, 0), (13.85254, 1), (-5.94611, 0), (23.01839, 1), (-4.39352, 0), (6.02084, 1), (-3.71525, 0), (9.60319, 1), (5.11103, 0), (1.90511, 1), (1.33998, 0), (10.35237, 1), (1.01629, 0), (16.27082, 1), (-3.36917, 0), (12.52379, 1), (-3.99661, 0), (11.37435, 1), (8.19336, 0), (13.61823, 1), (2.89168, 0), (15.77622, 1), (-11.10373, 0), (15.17254, 1), (11.68005, 0), (6.711, 1), (3.08282, 0), (4.74205, 1), (-6.81506, 0), (10.09812, 1), (-2.34587, 0), (6.61722, 1), (-2.68725, 0), (10.34164, 1), (0.3577, 0), (8.96602, 1), (-3.05682, 0), (12.32157, 1), (9.08062, 0), (11.75711, 1), (-0.77913, 0), (13.49499, 1), (10.35215, 0), (8.57713, 1), (6.82565, 0), (11.50313, 1), (-1.24674, 0), (1.13097, 1), (5.18822, 0), (7.83205, 1), (-3.70743, 0), (5.77957, 1), (1.40319, 0), (15.5519, 1), (5.89432, 0), (10.82676, 1), (1.43152, 0), (11.51218, 1), (6.70638, 0), (9.29779, 1), (9.76613, 0), (9.77021, 1), (4.27604, 0), (9.94114, 1), (-2.63141, 0), (15.54513, 1), (-7.8133, 0), (19.10736, 1), (-0.06668, 0), (15.04205, 1), (1.05391, 0), (9.03114, 1), (4.41797, 0), (24.0104, 1), (0.09337, 0), (9.94205, 1), (6.16075, 0), (2.5925, 1), (7.49413, 0), (8.82726, 1), (-3.52872, 0), (10.0209, 1), (-2.17126, 0), (8.1635, 1), (-3.87605, 0), (4.24074, 1), (3.26607, 0), (7.67291, 1), (-3.28045, 0), (5.21642, 1), (2.1429, 0), (11.2808, 1), (1.53386, 0), (6.88172, 1), (0.21169, 0), (5.98743, 1), (-0.63674, 0), (17.97249, 1), (5.84893, 0), (6.46323, 1), (-0.63498, 0), (15.37416, 1), (8.29526, 0), (2.89957, 1), (-1.08358, 0), (17.13044, 1), (-2.306, 0), (11.06355, 1), (2.86991, 0), (3.09625, 1), (-0.76074, 0), (-2.33019, 1), (5.49191, 0), (7.42675, 1), (1.82883, 0), (15.06792, 1), (-3.70497, 0), (8.81116, 1), (-0.53232, 0), (19.17446, 1), (-11.49722, 0), (18.77181, 1), (3.44877, 0), (14.06443, 1), (-1.8596, 0), (12.81241, 1), (-10.34851, 0), (2.72299, 1), (1.13093, 0), (18.67739, 1), (-10.93389, 0), (11.63275, 1), (-3.39703, 0), (2.23891, 1), (0.19749, 0), (13.01195, 1), (-3.68389, 0), (7.43402, 1), (-4.67863, 0), (8.14599, 1), (10.78916, 0), (16.65328, 1), (0.37675, 0), (1.362, 1), (3.98094, 0), (3.87957, 1), (-3.64775, 0), (11.16134, 1), (-4.8443, 0), (6.25357, 1), (1.102, 0), (4.21945, 1), (8.72112, 0), (12.50047, 1), (-1.47361, 0), (6.45486, 1), (6.24183, 0), (18.99924, 1), (6.83569, 0), (18.09508, 1), (-3.11684, 0), (13.59528, 1), (4.91306, 0), (3.39681, 1), (-0.03628, 0), (13.33157, 1), (5.1282, 0), (5.8945, 1), (-2.38558, 0), (5.61212, 1), (2.33351, 0), (8.41149, 1), (-0.97191, 0), (13.78608, 1), (-0.05588, 0), (6.08609, 1), (-4.70019, 0), (12.76962, 1), (-5.12371, 0), (3.26206, 1), (0.65606, 0), (0.25528, 1), (-0.11574, 0), (11.9083, 1), (4.4238, 0), (4.35071, 1), (6.93399, 0), (11.19855, 1), (3.68712, 0), (13.87404, 1), (-0.01187, 0), (6.87986, 1), (1.8332, 0), (8.32566, 1), (5.81322, 0), (22.51334, 1), (-4.04709, 0), (2.5226, 1), (-8.26397, 0), (16.84498, 1), (-2.11273, 0), (6.26108, 1), (5.28396, 0), (13.84824, 1), (0.73054, 0), (6.03262, 1), (6.43559, 0), (14.12668, 1), (4.35565, 0), (16.01939, 1), (-1.05545, 0), (8.19237, 1), (5.00087, 0), (18.01595, 1), (-2.72239, 0), (9.45609, 1), (7.32313, 0), (6.90459, 1), (2.11548, 0), (12.83115, 1), (-3.40953, 0), (10.603, 1), (6.97051, 0), (13.70439, 1), (-0.45567, 0), (6.1633, 1), (1.31699, 0), (4.1151, 1), (-1.49871, 0), (8.20499, 1), (7.14772, 0), (11.67903, 1), (0.79277, 0), (7.30851, 1), (6.9698, 0), (6.50941, 1), (2.08733, 0), (7.3949, 1), (-3.55962, 0), (12.80075, 1), (0.75601, 0), (5.62043, 1), (1.21, 0), (18.2542, 1), (-2.17877, 0), (17.9393, 1), (1.83206, 0), (16.4569, 1), (5.72463, 0), (8.78811, 1), (7.42257, 0), (4.85949, 1), (0.97829, 0), (-3.36394, 1), (7.54238, 0), (5.38683, 1), (9.91081, 0), (12.26083, 1), (-4.61743, 0), (10.27907, 1), (-4.40799, 0), (11.5144, 1), (9.99854, 0), (11.57335, 1), (8.53725, 0), (1.94203, 1), (3.2905, 0), (7.78228, 1), (0.38634, 0), (11.79385, 1), (-2.53374, 0), (10.18415, 1), (4.94758, 0), (14.67613, 1), (4.79624, 0), (4.70301, 1), (5.57664, 0), (12.72151, 1), (-6.44871, 0), (-3.35508, 1), (3.34431, 0), (17.63775, 1), (0.14209, 0), (2.53883, 1), (10.88431, 0), (14.01483, 1), (0.31846, 0), (12.4387, 1), (-0.54703, 0), (11.15408, 1), (-4.67791, 0), (7.74882, 1), (-5.68011, 0), (13.60956, 1), (-4.93362, 0), (7.81991, 1), (1.2271, 0), (10.90969, 1), (5.27512, 0), (8.19828, 1), (-3.84611, 0), (-1.18523, 1), (6.81706, 0), (0.5916, 1), (10.33033, 0), (0.35805, 1), (5.13979, 0), (12.98364, 1), (3.66534, 0), (11.38628, 1), (-2.07219, 0), (13.94644, 1), (10.65442, 0), (2.03781, 1), (-3.31751, 0), (10.74447, 1), (-1.82011, 0), (12.35656, 1), (-0.39886, 0), (7.08701, 1), (1.77052, 0), (2.69871, 1), (1.29049, 0), (19.66653, 1), (7.92344, 0), (7.88636, 1), (-2.92595, 0), (10.36916, 1), (-2.67107, 0), (1.632, 1), (5.64708, 0), (11.86081, 1), (0.34639, 0), (13.47602, 1), (-3.04356, 0), (6.60204, 1), (3.98828, 0), (7.01303, 1), (-1.36695, 0), (20.19992, 1), (-8.48462, 0), (18.88249, 1), (-4.04669, 0), (11.34367, 1), (9.84561, 0), (12.97305, 1), (-6.1537, 0), (9.5776, 1), (0.82433, 0), (17.91364, 1), (1.92449, 0), (18.3247, 1), (2.51288, 0), (9.9211, 1), (0.40965, 0), (7.14257, 1), (2.89183, 0), (6.59133, 1), (3.84347, 0), (12.35274, 1), (0.66829, 0), (10.57523, 1), (-3.45094, 0), (12.12859, 1), (1.3544, 0), (9.47177, 1), (-9.85456, 0), (0.60659, 1), (5.25689, 0), (4.72996, 1), (-5.26018, 0), (4.51121, 1), (-6.16912, 0), (13.28893, 1), (-1.77163, 0), (8.09014, 1), (3.96687, 0), (8.02511, 1), (0.70893, 0), (13.85406, 1), (-5.45342, 0), (1.75412, 1), (-3.89706, 0), (6.00641, 1), (3.11868, 0), (6.35554, 1), (4.41714, 0), (7.11293, 1), (7.64841, 0), (8.30442, 1), (0.00489, 0), (12.63024, 1), (3.2263, 0), (12.38966, 1), (-5.33042, 0), (7.6801, 1), (2.52189, 0), (11.33744, 1), (-7.40308, 0), (4.67713, 1), (0.67891, 0), (7.62276, 1), (2.49343, 0), (2.14478, 1), (5.43133, 0), (15.32988, 1), (-0.67541, 0), (1.52299, 1), (-0.60299, 0), (17.00017, 1), (-6.32903, 0), (8.29701, 1), (-3.44336, 0), (10.92961, 1), (-0.23963, 0), (6.78449, 1), (6.94686, 0), (7.02698, 1), (6.59442, 0), (11.51719, 1), (-4.18532, 0), (9.97926, 1), (-1.8228, 0), (7.44251, 1), (-0.29443, 0), (7.58541, 1), (2.99821, 0), (4.76058, 1), (2.51942, 0), (12.88959, 1), (-3.49176, 0), (9.974, 1), (-0.57979, 0), (17.03689, 1), (8.69471, 0), (11.14554, 1), (-1.19427, 0), (11.7392, 1), (-3.17119, 0), (11.50029, 1), (-2.99566, 0), (19.41759, 1), (-3.34493, 0), (9.65127, 1), (-2.33826, 0), (9.87673, 1), (-5.04164, 0), (14.13485, 1), (-0.48214, 0), (9.78034, 1), (7.45097, 0), (1.57826, 1), (3.04787, 0), (3.72091, 1), (2.92632, 0), (9.4054, 1), (1.39694, 0), (23.22816, 1), (4.38686, 0), (-0.12571, 1), (3.25753, 0), (6.97343, 1), (7.14218, 0), (10.09049, 1), (-4.04341, 0), (11.78393, 1), (-9.19352, 0), (3.01909, 1), (2.78473, 0), (16.09448, 1), (0.33331, 0), (6.25485, 1), (9.89238, 0), (7.13164, 1), (6.00566, 0), (7.75879, 1), (-1.7511, 0), (9.56834, 1), (4.77815, 0), (6.14824, 1), (5.07457, 0), (13.53454, 1), (2.56132, 0), (8.26364, 1), (2.38317, 0), (8.7095, 1), (-1.63486, 0), (10.61607, 1), (-1.46871, 0), (10.64418, 1), (-5.8681, 0), (23.9106, 1), (-2.96227, 0), (11.38978, 1), (-1.90638, 0), (11.4383, 1), (-13.3052, 0), (18.41498, 1), (-2.14705, 0), (3.70959, 1), (-9.62069, 0), (19.95918, 1), (2.29313, 0), (9.53847, 1), (0.22162, 0), (14.04957, 1), (-1.83956, 0), (13.70151, 1), (4.1853, 0), (5.45046, 1), (6.05965, 0), (10.95061, 1), (-0.23737, 0), (9.55156, 1), (6.07452, 0), (17.92345, 1), (4.34629, 0), (6.23976, 1), (4.02922, 0), (8.71029, 1), (3.62622, 0), (13.58736, 1), (-3.95825, 0), (8.78527, 1), (-1.63412, 0), (11.14213, 1), (-1.25727, 0), (12.23717, 1), (5.06323, 0), (16.44557, 1), (-0.66176, 0), (0.47144, 1), (2.36606, 0), (9.7198, 1), (-5.77792, 0), (13.50981, 1), (4.535, 0), (14.27806, 1), (1.02031, 0), (13.50793, 1), (4.49345, 0), (7.47381, 1), (-4.99791, 0), (11.07844, 1), (2.46716, 0), (9.89844, 1), (3.65471, 0), (21.48548, 1), (11.2283, 0), (6.92085, 1), (6.69743, 0), (4.44074, 1), (-5.60375, 0), (19.98074, 1), (0.28683, 0), (7.92826, 1), (-0.85737, 0), (16.6313, 1), (4.26726, 0), (17.17618, 1), (-3.4322, 0), (13.80807, 1), (-2.07039, 0), (5.37083, 1), (-2.26798, 0), (9.73962, 1), (-0.99818, 0), (10.66273, 1), (0.41335, 0), (8.90639, 1), (5.18124, 0), (12.24596, 1), (-5.01858, 0), (16.89203, 1), (2.05561, 0), (12.69184, 1), (-0.12117, 0), (15.59077, 1), (0.99471, 0), (6.94287, 1), (6.89979, 0), (-0.1801, 1), (-4.18527, 0), (3.25318, 1), (-6.35104, 0), (8.08804, 1), (3.89734, 0), (13.78384, 1), (-1.979, 0), (0.46434, 1), (3.15404, 0), (7.78224, 1), (3.52672, 0), (9.10987, 1), (2.48372, 0), (-0.89391, 1), (-6.13089, 0), (14.3696, 1), (2.2968, 0), (3.01763, 1), (-2.74324, 0), (8.03559, 1), (-0.12876, 0), (7.24609, 1), (-1.51135, 0), (11.86271, 1), (-3.92434, 0), (6.28196, 1), (-1.71254, 0), (8.9725, 1), (-1.25878, 0), (14.46114, 1), (2.03021, 0), (9.50216, 1), (4.31726, 0), (16.30413, 1), (-3.02908, 0), (1.02795, 1), (9.7093, 0), (1.88717, 1), (-3.36284, 0), (9.80106, 1), (6.70938, 0), (4.53487, 1), (0.42762, 0), (16.34543, 1), (5.04726, 0), (7.71098, 1), (2.78386, 0), (2.74639, 1), (6.83022, 0), (6.51875, 1), (-3.02109, 0), (10.42308, 1), (-0.65382, 0), (13.57901, 1), (-15.58675, 0), (0.52784, 1), (5.89746, 0), (4.4708, 1), (-4.11598, 0), (6.39619, 1), (-1.37208, 0), (14.57666, 1), (10.08082, 0), (2.71602, 1), (5.35686, 0), (12.53905, 1), (1.93331, 0), (11.4292, 1), (10.47444, 0), (12.44641, 1), (-2.36872, 0), (14.50894, 1), (6.50752, 0), (17.64374, 1), (2.54603, 0), (11.03218, 1), (-0.4332, 0), (9.82789, 1), (5.26572, 0), (10.11104, 1), (2.09016, 0), (2.16137, 1), (1.15513, 0), (10.24054, 1), (14.95941, 0), (12.86909, 1), (-3.85505, 0), (15.22845, 1), (-2.36239, 0), (5.05411, 1), (1.64338, 0), (10.84836, 1), (-4.25074, 0), (11.15717, 1), (7.29744, 0), (0.91782, 1), (-1.18964, 0), (13.29961, 1), (5.60612, 0), (15.11314, 1), (-3.77011, 0), (11.54004, 1), (6.67642, 0), (-0.94238, 1), (-0.06862, 0), (19.32581, 1), (5.60514, 0), (10.20744, 1), (3.7341, 0), (6.54857, 1), (9.59001, 0), (8.69108, 1), (3.30093, 0), (8.2296, 1), (-2.75658, 0), (8.4474, 1), (4.71994, 0), (6.81178, 1), (0.74699, 0), (5.99415, 1), (2.91095, 0), (13.99336, 1), (-7.36829, 0), (8.7469, 1), (-5.29487, 0), (8.62349, 1), (3.31079, 0), (1.84212, 1), (1.06974, 0), (4.4762, 1), (-1.18424, 0), (9.25421, 1), (-7.415, 0), (10.44229, 1), (3.40595, 0), (12.21649, 1), (-7.63085, 0), (10.45968, 1), (1.13336, 0), (15.34722, 1), (-0.0096, 0), (5.50868, 1), (0.8928, 0), (10.93609, 1), (-0.5943, 0), (2.78631, 1), (7.48306, 0), (11.86145, 1), (10.11943, 0), (18.67385, 1), (5.60459, 0), (10.64051, 1), (4.00189, 0), (12.75565, 1), (2.35823, 0), (6.63666, 1), (0.33475, 0), (12.19343, 1), (3.47072, 0), (9.08636, 1), (-6.68867, 0), (11.67256, 1), (3.31031, 0), (20.31392, 1), (2.17159, 0), (11.66443, 1); SELECT '-28.740781574102936', '7.667329672103986e-133'; -SELECT roundBankers(studentTTest(left, right).1, 16) as t_stat, roundBankers(studentTTest(left, right).2, 16) as p_value from student_ttest; -SELECT roundBankers(studentTTest(0.95)(left, right).3, 16) as t_stat, roundBankers(studentTTest(0.95)(left, right).4, 16) as p_value from student_ttest; +SELECT roundBankers(studentTTest(left, right).1, 6) as t_stat, roundBankers(studentTTest(left, right).2, 6) as p_value from student_ttest; +SELECT roundBankers(studentTTest(0.95)(left, right).3, 6) as t_stat, roundBankers(studentTTest(0.95)(left, right).4, 6) as p_value from student_ttest; DROP TABLE IF EXISTS student_ttest; diff --git a/tests/queries/0_stateless/01562_optimize_monotonous_functions_in_order_by.reference b/tests/queries/0_stateless/01562_optimize_monotonous_functions_in_order_by.reference index 46aaa6e07d6..45a1a094c49 100644 --- a/tests/queries/0_stateless/01562_optimize_monotonous_functions_in_order_by.reference +++ b/tests/queries/0_stateless/01562_optimize_monotonous_functions_in_order_by.reference @@ -8,8 +8,7 @@ Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting (Sorting for ORDER BY) Expression (Before ORDER BY) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromMergeTree (default.test_order_by) + ReadFromMergeTree (default.test_order_by) SELECT timestamp, key @@ -20,8 +19,7 @@ Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting Expression (Before ORDER BY) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromMergeTree (default.test_order_by) + ReadFromMergeTree (default.test_order_by) SELECT timestamp, key @@ -34,8 +32,7 @@ Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting Expression (Before ORDER BY) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromMergeTree (default.test_order_by) + ReadFromMergeTree (default.test_order_by) SELECT timestamp, key diff --git a/tests/queries/0_stateless/01576_alias_column_rewrite.reference b/tests/queries/0_stateless/01576_alias_column_rewrite.reference index 678cbf7fb57..830db82274f 100644 --- a/tests/queries/0_stateless/01576_alias_column_rewrite.reference +++ b/tests/queries/0_stateless/01576_alias_column_rewrite.reference @@ -25,36 +25,30 @@ Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting (Sorting for ORDER BY) Expression (Before ORDER BY) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromMergeTree (default.test_table) + ReadFromMergeTree (default.test_table) Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting Expression (Before ORDER BY) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromMergeTree (default.test_table) + ReadFromMergeTree (default.test_table) Expression ((Projection + Before ORDER BY [lifted up part])) Limit (preliminary LIMIT (without OFFSET)) Sorting Expression (Before ORDER BY) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromMergeTree (default.test_table) + ReadFromMergeTree (default.test_table) optimize_aggregation_in_order Expression ((Projection + Before ORDER BY)) Aggregating Expression (Before GROUP BY) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromMergeTree (default.test_table) + ReadFromMergeTree (default.test_table) Expression ((Projection + Before ORDER BY)) Aggregating Expression (Before GROUP BY) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromMergeTree (default.test_table) + ReadFromMergeTree (default.test_table) Expression ((Projection + Before ORDER BY)) Aggregating Expression (Before GROUP BY) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromMergeTree (default.test_table) + ReadFromMergeTree (default.test_table) second-index 1 1 diff --git a/tests/queries/0_stateless/01576_alias_column_rewrite.sql b/tests/queries/0_stateless/01576_alias_column_rewrite.sql index 910c95afd64..450127797cc 100644 --- a/tests/queries/0_stateless/01576_alias_column_rewrite.sql +++ b/tests/queries/0_stateless/01576_alias_column_rewrite.sql @@ -123,6 +123,7 @@ create table pl (dt DateTime, i int, projection p (select sum(i) group by toStar insert into pl values ('2020-10-24', 1); +set max_rows_to_read = 2; select sum(i) from pd group by dt_m settings allow_experimental_projection_optimization = 1, force_optimize_projection = 1; drop table pd; diff --git a/tests/queries/0_stateless/01591_window_functions.reference b/tests/queries/0_stateless/01591_window_functions.reference index 47da43399e6..0f21ba9b99c 100644 --- a/tests/queries/0_stateless/01591_window_functions.reference +++ b/tests/queries/0_stateless/01591_window_functions.reference @@ -912,8 +912,7 @@ Expression ((Projection + Before ORDER BY)) Window (Window step for window \'PARTITION BY p ORDER BY o ASC\') Sorting (Sorting for window \'PARTITION BY p ORDER BY o ASC\') Expression ((Before window functions + (Projection + Before ORDER BY))) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemNumbers) + ReadFromStorage (SystemNumbers) explain select count(*) over (order by o, number), count(*) over (order by number) @@ -928,8 +927,7 @@ Expression ((Projection + Before ORDER BY)) Expression ((Before window functions + (Projection + Before ORDER BY)) [lifted up part]) Sorting (Sorting for window \'ORDER BY number ASC\') Expression ((Before window functions + (Projection + Before ORDER BY))) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemNumbers) + ReadFromStorage (SystemNumbers) -- A test case for the sort comparator found by fuzzer. SELECT max(number) OVER (ORDER BY number DESC NULLS FIRST), diff --git a/tests/queries/0_stateless/01786_explain_merge_tree.reference b/tests/queries/0_stateless/01786_explain_merge_tree.reference index 25c7c37beca..7e0a91b203f 100644 --- a/tests/queries/0_stateless/01786_explain_merge_tree.reference +++ b/tests/queries/0_stateless/01786_explain_merge_tree.reference @@ -1,85 +1,83 @@ - ReadFromMergeTree (default.test_index) - Indexes: - MinMax - Keys: - y - Condition: (y in [1, +Inf)) - Parts: 4/5 - Granules: 11/12 - Partition - Keys: - y - bitAnd(z, 3) - Condition: and((bitAnd(z, 3) not in [1, 1]), and((y in [1, +Inf)), (bitAnd(z, 3) not in [1, 1]))) - Parts: 3/4 - Granules: 10/11 - PrimaryKey - Keys: - x - y - Condition: and((x in [11, +Inf)), (y in [1, +Inf))) - Parts: 2/3 - Granules: 6/10 - Skip - Name: t_minmax - Description: minmax GRANULARITY 2 - Parts: 1/2 - Granules: 2/6 - Skip - Name: t_set - Description: set GRANULARITY 2 - Parts: 1/1 - Granules: 1/2 + ReadFromMergeTree (default.test_index) + Indexes: + MinMax + Keys: + y + Condition: (y in [1, +Inf)) + Parts: 4/5 + Granules: 11/12 + Partition + Keys: + y + bitAnd(z, 3) + Condition: and((bitAnd(z, 3) not in [1, 1]), and((y in [1, +Inf)), (bitAnd(z, 3) not in [1, 1]))) + Parts: 3/4 + Granules: 10/11 + PrimaryKey + Keys: + x + y + Condition: and((x in [11, +Inf)), (y in [1, +Inf))) + Parts: 2/3 + Granules: 6/10 + Skip + Name: t_minmax + Description: minmax GRANULARITY 2 + Parts: 1/2 + Granules: 2/6 + Skip + Name: t_set + Description: set GRANULARITY 2 + Parts: 1/1 + Granules: 1/2 ----------------- - "Node Type": "ReadFromMergeTree", - "Description": "default.test_index", - "Indexes": [ - { - "Type": "MinMax", - "Keys": ["y"], - "Condition": "(y in [1, +Inf))", - "Initial Parts": 5, - "Selected Parts": 4, - "Initial Granules": 12, - "Selected Granules": 11 - }, - { - "Type": "Partition", - "Keys": ["y", "bitAnd(z, 3)"], - "Condition": "and((bitAnd(z, 3) not in [1, 1]), and((y in [1, +Inf)), (bitAnd(z, 3) not in [1, 1])))", - "Initial Parts": 4, - "Selected Parts": 3, - "Initial Granules": 11, - "Selected Granules": 10 - }, - { - "Type": "PrimaryKey", - "Keys": ["x", "y"], - "Condition": "and((x in [11, +Inf)), (y in [1, +Inf)))", - "Initial Parts": 3, - "Selected Parts": 2, - "Initial Granules": 10, - "Selected Granules": 6 - }, - { - "Type": "Skip", - "Name": "t_minmax", - "Description": "minmax GRANULARITY 2", - "Initial Parts": 2, - "Selected Parts": 1, - "Initial Granules": 6, - "Selected Granules": 2 - }, - { - "Type": "Skip", - "Name": "t_set", - "Description": "set GRANULARITY 2", - "Initial Parts": 1, - "Selected Parts": 1, - "Initial Granules": 2, - "Selected Granules": 1 - } - ] + "Node Type": "ReadFromMergeTree", + "Description": "default.test_index", + "Indexes": [ + { + "Type": "MinMax", + "Keys": ["y"], + "Condition": "(y in [1, +Inf))", + "Initial Parts": 5, + "Selected Parts": 4, + "Initial Granules": 12, + "Selected Granules": 11 + }, + { + "Type": "Partition", + "Keys": ["y", "bitAnd(z, 3)"], + "Condition": "and((bitAnd(z, 3) not in [1, 1]), and((y in [1, +Inf)), (bitAnd(z, 3) not in [1, 1])))", + "Initial Parts": 4, + "Selected Parts": 3, + "Initial Granules": 11, + "Selected Granules": 10 + }, + { + "Type": "PrimaryKey", + "Keys": ["x", "y"], + "Condition": "and((x in [11, +Inf)), (y in [1, +Inf)))", + "Initial Parts": 3, + "Selected Parts": 2, + "Initial Granules": 10, + "Selected Granules": 6 + }, + { + "Type": "Skip", + "Name": "t_minmax", + "Description": "minmax GRANULARITY 2", + "Initial Parts": 2, + "Selected Parts": 1, + "Initial Granules": 6, + "Selected Granules": 2 + }, + { + "Type": "Skip", + "Name": "t_set", + "Description": "set GRANULARITY 2", + "Initial Parts": 1, + "Selected Parts": 1, + "Initial Granules": 2, + "Selected Granules": 1 } ] } @@ -90,21 +88,21 @@ } ] ----------------- - ReadFromMergeTree (default.test_index) - ReadType: InOrder - Parts: 1 - Granules: 3 + ReadFromMergeTree (default.test_index) + ReadType: InOrder + Parts: 1 + Granules: 3 ----------------- - ReadFromMergeTree (default.test_index) - ReadType: InReverseOrder - Parts: 1 - Granules: 3 - ReadFromMergeTree (default.idx) - Indexes: - PrimaryKey - Keys: - x - plus(x, y) - Condition: or((x in 2-element set), (plus(plus(x, y), 1) in (-Inf, 2])) - Parts: 1/1 - Granules: 1/1 + ReadFromMergeTree (default.test_index) + ReadType: InReverseOrder + Parts: 1 + Granules: 3 + ReadFromMergeTree (default.idx) + Indexes: + PrimaryKey + Keys: + x + plus(x, y) + Condition: or((x in 2-element set), (plus(plus(x, y), 1) in (-Inf, 2])) + Parts: 1/1 + Granules: 1/1 diff --git a/tests/queries/0_stateless/01823_explain_json.reference b/tests/queries/0_stateless/01823_explain_json.reference index f75cd69dbf3..9e36660204b 100644 --- a/tests/queries/0_stateless/01823_explain_json.reference +++ b/tests/queries/0_stateless/01823_explain_json.reference @@ -7,12 +7,7 @@ "Node Type": "Expression", "Plans": [ { - "Node Type": "SettingQuotaAndLimits", - "Plans": [ - { - "Node Type": "ReadFromStorage" - } - ] + "Node Type": "ReadFromStorage" } ] }, @@ -20,12 +15,7 @@ "Node Type": "Expression", "Plans": [ { - "Node Type": "SettingQuotaAndLimits", - "Plans": [ - { - "Node Type": "ReadFromStorage" - } - ] + "Node Type": "ReadFromStorage" } ] } diff --git a/tests/queries/0_stateless/01847_bad_like.reference b/tests/queries/0_stateless/01847_bad_like.reference index 06f4e8a840d..f202cb75513 100644 --- a/tests/queries/0_stateless/01847_bad_like.reference +++ b/tests/queries/0_stateless/01847_bad_like.reference @@ -22,4 +22,3 @@ 1 1 1 -1 diff --git a/tests/queries/0_stateless/01847_bad_like.sql b/tests/queries/0_stateless/01847_bad_like.sql index c7dedacc600..8eb6fd3941f 100644 --- a/tests/queries/0_stateless/01847_bad_like.sql +++ b/tests/queries/0_stateless/01847_bad_like.sql @@ -22,7 +22,7 @@ SELECT '\\' LIKE '%\\\\%'; SELECT '\\' LIKE '\\\\%'; SELECT '\\' LIKE '%\\\\'; SELECT '\\' LIKE '\\\\'; -SELECT '\\' LIKE '\\'; +SELECT '\\' LIKE '\\'; -- { serverError 25 } SELECT '\\xyz\\' LIKE '\\\\%\\\\'; SELECT '\\xyz\\' LIKE '\\\\___\\\\'; diff --git a/tests/queries/0_stateless/01861_explain_pipeline.reference b/tests/queries/0_stateless/01861_explain_pipeline.reference index 63ba55f5a04..2ba294d7e4d 100644 --- a/tests/queries/0_stateless/01861_explain_pipeline.reference +++ b/tests/queries/0_stateless/01861_explain_pipeline.reference @@ -1,11 +1,10 @@ (Expression) ExpressionTransform - (SettingQuotaAndLimits) - (ReadFromMergeTree) - ExpressionTransform - ReplacingSorted 2 → 1 - ExpressionTransform × 2 - MergeTreeInOrder × 2 0 → 1 + (ReadFromMergeTree) + ExpressionTransform + ReplacingSorted 2 → 1 + ExpressionTransform × 2 + MergeTreeInOrder × 2 0 → 1 0 0 1 1 2 2 @@ -15,11 +14,10 @@ ExpressionTransform 6 6 (Expression) ExpressionTransform × 2 - (SettingQuotaAndLimits) - (ReadFromMergeTree) - ExpressionTransform × 2 - ReplacingSorted × 2 2 → 1 - Copy × 2 1 → 2 - AddingSelector × 2 - ExpressionTransform × 2 - MergeTreeInOrder × 2 0 → 1 + (ReadFromMergeTree) + ExpressionTransform × 2 + ReplacingSorted × 2 2 → 1 + Copy × 2 1 → 2 + AddingSelector × 2 + ExpressionTransform × 2 + MergeTreeInOrder × 2 0 → 1 diff --git a/tests/queries/0_stateless/01883_with_grouping_sets.reference b/tests/queries/0_stateless/01883_with_grouping_sets.reference index 83fda9556e7..8fae10a05a4 100644 --- a/tests/queries/0_stateless/01883_with_grouping_sets.reference +++ b/tests/queries/0_stateless/01883_with_grouping_sets.reference @@ -13,9 +13,8 @@ ExpressionTransform Copy 1 → 2 (Expression) ExpressionTransform - (SettingQuotaAndLimits) - (ReadFromStorage) - Memory 0 → 1 + (ReadFromStorage) + Memory 0 → 1 1 0 1 4500 1 0 3 4700 1 0 5 4900 @@ -106,9 +105,8 @@ ExpressionTransform Copy × 3 1 → 2 (Expression) ExpressionTransform × 3 - (SettingQuotaAndLimits) - (ReadFromStorage) - NumbersMt × 3 0 → 1 + (ReadFromStorage) + NumbersMt × 3 0 → 1 4999500000 10000 4999510000 10000 4999520000 10000 diff --git a/tests/queries/0_stateless/01906_h3_to_geo.reference b/tests/queries/0_stateless/01906_h3_to_geo.reference index 93e8600576c..1903ce868b4 100644 --- a/tests/queries/0_stateless/01906_h3_to_geo.reference +++ b/tests/queries/0_stateless/01906_h3_to_geo.reference @@ -1,19 +1,19 @@ -(-173.6412167681162,-14.130272474941535) -(59.48137613600854,58.020407687755686) -(172.68095885060296,-83.6576608516349) -(-94.46556851304558,-69.1999982492279) -(-8.188263637093279,-55.856179102736284) -(77.25594891852249,47.39278564360122) -(135.11348004704536,36.60778126579667) -(39.28534828967223,49.07710003066973) -(124.71163478198051,-27.481172161567258) -(-147.4887686066785,76.73237945824442) -(86.63291906118863,-25.52526285188784) -(23.27751790712118,13.126101362212724) -(-70.40163237204142,-63.12562536833242) -(15.642428355535966,40.285813505163574) -(-76.53411447979884,54.5560449693637) -(8.19906334981474,67.69370966550179) +-173.641 -14.13 +59.481 58.02 +172.681 -83.658 +-94.466 -69.2 +-8.188 -55.856 +77.256 47.393 +135.113 36.608 +39.285 49.077 +124.712 -27.481 +-147.489 76.732 +86.633 -25.525 +23.278 13.126 +-70.402 -63.126 +15.642 40.286 +-76.534 54.556 +8.199 67.694 ok ok ok diff --git a/tests/queries/0_stateless/01906_h3_to_geo.sql b/tests/queries/0_stateless/01906_h3_to_geo.sql index cb0de3cbb29..b05479450e3 100644 --- a/tests/queries/0_stateless/01906_h3_to_geo.sql +++ b/tests/queries/0_stateless/01906_h3_to_geo.sql @@ -25,7 +25,7 @@ INSERT INTO h3_indexes VALUES (639763125756281263); INSERT INTO h3_indexes VALUES (644178757620501158); -SELECT h3ToGeo(h3_index) FROM h3_indexes ORDER BY h3_index; +WITH h3ToGeo(h3_index) AS p SELECT round(p.1, 3), round(p.2, 3) FROM h3_indexes ORDER BY h3_index; DROP TABLE h3_indexes; @@ -56,7 +56,7 @@ SELECT result FROM ( SELECT (lat, lon) AS input_geo, h3ToGeo(geoToH3(lat, lon, res)) AS output_geo, - if(input_geo = output_geo, 'ok', 'fail') AS result + if(abs(input_geo.1 - output_geo.1) < 0.001 AND abs(input_geo.2 - output_geo.2) < 0.001, 'ok', 'fail') AS result FROM h3_geo ); diff --git a/tests/queries/0_stateless/01921_datatype_date32.reference b/tests/queries/0_stateless/01921_datatype_date32.reference index a6b03123b33..70eebc76c01 100644 --- a/tests/queries/0_stateless/01921_datatype_date32.reference +++ b/tests/queries/0_stateless/01921_datatype_date32.reference @@ -283,3 +283,9 @@ 1925-01-01 \N 1925-01-01 \N +1925-01-01 +1969-12-31 +1970-01-01 +2149-06-06 +2149-06-07 +2283-11-11 diff --git a/tests/queries/0_stateless/01921_datatype_date32.sql b/tests/queries/0_stateless/01921_datatype_date32.sql index 0805b94a9fe..ef6e3e5ee89 100644 --- a/tests/queries/0_stateless/01921_datatype_date32.sql +++ b/tests/queries/0_stateless/01921_datatype_date32.sql @@ -118,4 +118,15 @@ select toDate32OrZero('1924-01-01'), toDate32OrNull('1924-01-01'); select toDate32OrZero(''), toDate32OrNull(''); select (select toDate32OrZero('')); select (select toDate32OrNull('')); +SELECT toString(T.d) dateStr +FROM + ( + SELECT '1925-01-01'::Date32 d + UNION ALL SELECT '1969-12-31'::Date32 + UNION ALL SELECT '1970-01-01'::Date32 + UNION ALL SELECT '2149-06-06'::Date32 + UNION ALL SELECT '2149-06-07'::Date32 + UNION ALL SELECT '2283-11-11'::Date32 + ) AS T +ORDER BY T.d diff --git a/tests/queries/0_stateless/01951_distributed_push_down_limit.reference b/tests/queries/0_stateless/01951_distributed_push_down_limit.reference index d0e7a9ef15b..7f73a8c6554 100644 --- a/tests/queries/0_stateless/01951_distributed_push_down_limit.reference +++ b/tests/queries/0_stateless/01951_distributed_push_down_limit.reference @@ -3,26 +3,18 @@ explain select * from remote('127.{1,2}', view(select * from numbers(1e6))) orde Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting (Merge sorted streams after aggregation stage for ORDER BY) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - Union - Sorting (Sorting for ORDER BY) - Expression (Before ORDER BY) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - Expression ((Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemNumbers) - ReadFromRemote (Read from remote replica) + Union + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY))))) + ReadFromStorage (SystemNumbers) + ReadFromRemote (Read from remote replica) explain select * from remote('127.{1,2}', view(select * from numbers(1e6))) order by number limit 10 settings distributed_push_down_limit=1; Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting (Merge sorted streams after aggregation stage for ORDER BY) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - Union - Limit (preliminary LIMIT (with OFFSET)) - Sorting (Sorting for ORDER BY) - Expression (Before ORDER BY) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - Expression ((Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemNumbers) - ReadFromRemote (Read from remote replica) + Union + Limit (preliminary LIMIT (with OFFSET)) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY))))) + ReadFromStorage (SystemNumbers) + ReadFromRemote (Read from remote replica) diff --git a/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference b/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference index b23631395ff..9592ed5691d 100644 --- a/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference +++ b/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference @@ -2,106 +2,74 @@ explain select distinct k1 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- not optimized Expression (Projection) Distinct - SettingQuotaAndLimits (Set limits and quota after reading from storage) - Union - Distinct (Preliminary DISTINCT) - Expression (Before ORDER BY) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - Expression ((Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemNumbers) - ReadFromRemote (Read from remote replica) + Union + Distinct (Preliminary DISTINCT) + Expression ((Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY))))) + ReadFromStorage (SystemNumbers) + ReadFromRemote (Read from remote replica) explain select distinct k1, k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- optimized -SettingQuotaAndLimits (Set limits and quota after reading from storage) - Union - Expression (Projection) - Distinct - Distinct (Preliminary DISTINCT) - Expression (Before ORDER BY) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - Expression ((Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemNumbers) - ReadFromRemote (Read from remote replica) +Union + Expression (Projection) + Distinct + Distinct (Preliminary DISTINCT) + Expression ((Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY))))) + ReadFromStorage (SystemNumbers) + ReadFromRemote (Read from remote replica) explain select distinct on (k1) k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- not optimized Expression (Projection) LimitBy Expression (Before LIMIT BY) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - Union - LimitBy - Expression ((Before LIMIT BY + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - Expression ((Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemNumbers) - ReadFromRemote (Read from remote replica) + Union + LimitBy + Expression ((Before LIMIT BY + (Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))))) + ReadFromStorage (SystemNumbers) + ReadFromRemote (Read from remote replica) explain select distinct on (k1, k2) v from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- optimized -SettingQuotaAndLimits (Set limits and quota after reading from storage) - Union - Expression (Projection) - LimitBy - Expression ((Before LIMIT BY + Before ORDER BY)) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - Expression ((Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemNumbers) - ReadFromRemote (Read from remote replica) +Union + Expression (Projection) + LimitBy + Expression ((Before LIMIT BY + (Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))))) + ReadFromStorage (SystemNumbers) + ReadFromRemote (Read from remote replica) explain select distinct k1 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- not optimized Expression (Projection) Distinct Sorting (Merge sorted streams for ORDER BY, without aggregation) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - Union - Sorting (Sorting for ORDER BY) - Distinct (Preliminary DISTINCT) - Expression (Before ORDER BY) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - Expression ((Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemNumbers) - ReadFromRemote (Read from remote replica) + Union + Sorting (Sorting for ORDER BY) + Distinct (Preliminary DISTINCT) + Expression ((Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY))))) + ReadFromStorage (SystemNumbers) + ReadFromRemote (Read from remote replica) explain select distinct k1, k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- optimized Expression (Projection) Sorting (Merge sorted streams after aggregation stage for ORDER BY) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - Union - Distinct - Sorting (Sorting for ORDER BY) - Distinct (Preliminary DISTINCT) - Expression (Before ORDER BY) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - Expression ((Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemNumbers) - ReadFromRemote (Read from remote replica) + Union + Distinct + Sorting (Sorting for ORDER BY) + Distinct (Preliminary DISTINCT) + Expression ((Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY))))) + ReadFromStorage (SystemNumbers) + ReadFromRemote (Read from remote replica) explain select distinct on (k1) k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- not optimized Expression (Projection) LimitBy Expression (Before LIMIT BY) Sorting (Merge sorted streams for ORDER BY, without aggregation) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - Union - LimitBy - Expression (Before LIMIT BY) - Sorting (Sorting for ORDER BY) - Expression (Before ORDER BY) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - Expression ((Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemNumbers) - ReadFromRemote (Read from remote replica) + Union + LimitBy + Expression ((Before LIMIT BY + (Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) [lifted up part])) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY))))) + ReadFromStorage (SystemNumbers) + ReadFromRemote (Read from remote replica) explain select distinct on (k1, k2) v from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- optimized Expression (Projection) Sorting (Merge sorted streams after aggregation stage for ORDER BY) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - Union - LimitBy - Expression (Before LIMIT BY) - Sorting (Sorting for ORDER BY) - Expression (Before ORDER BY) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - Expression ((Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemNumbers) - ReadFromRemote (Read from remote replica) + Union + LimitBy + Expression ((Before LIMIT BY + (Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) [lifted up part])) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY))))) + ReadFromStorage (SystemNumbers) + ReadFromRemote (Read from remote replica) diff --git a/tests/queries/0_stateless/02006_h3_to_geo_boundary.reference b/tests/queries/0_stateless/02006_h3_to_geo_boundary.reference index 0d54f3895bd..96ba6e17526 100644 --- a/tests/queries/0_stateless/02006_h3_to_geo_boundary.reference +++ b/tests/queries/0_stateless/02006_h3_to_geo_boundary.reference @@ -1,16 +1,16 @@ -[(-25.60370257696877,-170.61932339479839),(-16.505947603561054,-161.6348206171839),(-5.762860491436932,-165.41674992858833),(-3.968796976609588,-176.05696384421356),(-11.54529597541476,175.98600155652952),(-22.19754138630238,177.51613498805204)] -[(57.15637211465772,51.15131422930827),(53.74532256646542,56.28611799875301),(54.25756495131088,64.15861768734945),(58.31910366055611,68.25632325438265),(62.114389785017146,63.44462184184533),(61.440920615681854,53.98254701603947)] -[(-82.24829137508873,167.18203088800593),(-83.41761096812805,158.00531624510785),(-84.94207431820979,162.09183616506846),(-85.00324300064887,-178.60454506450245),(-83.46691212211444,-172.41232929697492),(-82.25118471750908,179.4928586395771)] -[(-69.70201806837188,-95.63006768303532),(-69.78121889088241,-93.8329499937899),(-69.26603652285242,-92.70414199751751),(-68.6908704290193,-93.35479180342097),(-68.62037380778602,-95.0614247833063),(-69.11663254992226,-96.20491957306085)] -[(-55.64960804791368,-8.350548718066527),(-55.828830219010115,-8.612333853721543),(-56.03525612929049,-8.450494689408638),(-56.06193617137633,-8.024722968315496),(-55.88205068143165,-7.7639036247878686),(-55.67615021200244,-7.927868022867216)] -[(47.4047230189944,77.11372810022105),(47.31523866865899,77.16981222471887),(47.30324229614998,77.31177114774472),(47.380675190840634,77.39807294060385),(47.47025252565847,77.34232219743376),(47.482304118992815,77.19993524307596)] -[(36.59082706214739,135.1447773494689),(36.623412749699106,135.14466572237862),(36.640360586702336,135.11335418763244),(36.62472012921161,135.0821822076476),(36.592149268586866,135.08232217250773),(36.575204036900494,135.1136057921021)] -[(49.072491410488254,39.26829772479248),(49.06434071502539,39.28264806614126),(49.068946849412825,39.29969935764574),(49.08170687750978,39.30240475320644),(49.08985976852211,39.288050003564216),(49.0852504352169,39.27099426655672)] -[(-27.484668731117956,124.71456214331452),(-27.480255804296018,124.7164671069391),(-27.476759420067374,124.71353974495929),(-27.477675754654342,124.70870779388683),(-27.48208829554146,124.706802674375),(-27.485584887763263,124.70972966178107)] -[(76.73422062536704,-147.4905412849195),(76.73289074232657,-147.4965026703208),(76.73104961876471,-147.49472931389167),(76.730538300536,-147.48699643238956),(76.73186795003895,-147.4810352654388),(76.73370915128375,-147.4828067613433)] -[(-25.52488377871919,86.63224601066914),(-25.525614626072162,86.63222807640663),(-25.525993697439322,86.63290113101664),(-25.5256419200276,86.6335921124905),(-25.524911076278325,86.63361003835355),(-25.524532006337036,86.63293699114213)] -[(13.126242531761285,23.27727960465014),(13.125977729043674,23.277268554217677),(13.125836559860176,23.277506856149834),(13.125960192636454,23.27775620899515),(13.12622499528112,23.27776726055665),(13.126366165222441,23.277528958143787)] -[(-63.12570930185924,-70.40176049055108),(-63.12571366261393,-70.4015366058905),(-63.1256297288591,-70.40140848801948),(-63.125541434726955,-70.4015042543287),(-63.125537074078174,-70.40172813751032),(-63.125621007455614,-70.40185625586165)] -[(40.28583119436284,15.642383240826787),(40.2857926151193,15.642384471654026),(40.28577492591182,15.642429586343791),(40.28579581594201,15.642473470233714),(40.2858343951909,15.642472239444253),(40.28585208440421,15.642427124727082)] -[(54.55605961596273,-76.53410754061841),(54.556055751776405,-76.53413299495058),(54.5560411051761,-76.53413993412053),(54.55603032276505,-76.53412141897394),(54.55603418694863,-76.53409596465765),(54.556048833546,-76.53408902547206)] -[(67.69370622234305,8.199070817163335),(67.69370970389446,8.199075597443361),(67.69371314705361,8.199068130094206),(67.69371310866035,8.199055882463448),(67.69370962710823,8.19905110218645),(67.69370618395016,8.199058569537081)] +[(-25.6,-170.62),(-16.51,-161.63),(-5.76,-165.42),(-3.97,-176.06),(-11.55,175.99),(-22.2,177.52)] +[(57.16,51.15),(53.75,56.29),(54.26,64.16),(58.32,68.26),(62.11,63.44),(61.44,53.98)] +[(-82.25,167.18),(-83.42,158.01),(-84.94,162.09),(-85,-178.6),(-83.47,-172.41),(-82.25,179.49)] +[(-69.7,-95.63),(-69.78,-93.83),(-69.27,-92.7),(-68.69,-93.35),(-68.62,-95.06),(-69.12,-96.2)] +[(-55.65,-8.35),(-55.83,-8.61),(-56.04,-8.45),(-56.06,-8.02),(-55.88,-7.76),(-55.68,-7.93)] +[(47.4,77.11),(47.32,77.17),(47.3,77.31),(47.38,77.4),(47.47,77.34),(47.48,77.2)] +[(36.59,135.14),(36.62,135.14),(36.64,135.11),(36.62,135.08),(36.59,135.08),(36.58,135.11)] +[(49.07,39.27),(49.06,39.28),(49.07,39.3),(49.08,39.3),(49.09,39.29),(49.09,39.27)] +[(-27.48,124.71),(-27.48,124.72),(-27.48,124.71),(-27.48,124.71),(-27.48,124.71),(-27.49,124.71)] +[(76.73,-147.49),(76.73,-147.5),(76.73,-147.49),(76.73,-147.49),(76.73,-147.48),(76.73,-147.48)] +[(-25.52,86.63),(-25.53,86.63),(-25.53,86.63),(-25.53,86.63),(-25.52,86.63),(-25.52,86.63)] +[(13.13,23.28),(13.13,23.28),(13.13,23.28),(13.13,23.28),(13.13,23.28),(13.13,23.28)] +[(-63.13,-70.4),(-63.13,-70.4),(-63.13,-70.4),(-63.13,-70.4),(-63.13,-70.4),(-63.13,-70.4)] +[(40.29,15.64),(40.29,15.64),(40.29,15.64),(40.29,15.64),(40.29,15.64),(40.29,15.64)] +[(54.56,-76.53),(54.56,-76.53),(54.56,-76.53),(54.56,-76.53),(54.56,-76.53),(54.56,-76.53)] +[(67.69,8.2),(67.69,8.2),(67.69,8.2),(67.69,8.2),(67.69,8.2),(67.69,8.2)] diff --git a/tests/queries/0_stateless/02006_h3_to_geo_boundary.sql b/tests/queries/0_stateless/02006_h3_to_geo_boundary.sql index 906df97b4fd..003429c9c88 100644 --- a/tests/queries/0_stateless/02006_h3_to_geo_boundary.sql +++ b/tests/queries/0_stateless/02006_h3_to_geo_boundary.sql @@ -23,6 +23,6 @@ INSERT INTO h3_indexes VALUES (635544851677385791); INSERT INTO h3_indexes VALUES (639763125756281263); INSERT INTO h3_indexes VALUES (644178757620501158); -SELECT h3ToGeoBoundary(h3_index) FROM h3_indexes ORDER BY h3_index; +SELECT arrayMap(p -> (round(p.1, 2), round(p.2, 2)), h3ToGeoBoundary(h3_index)) FROM h3_indexes ORDER BY h3_index; DROP TABLE h3_indexes; diff --git a/tests/queries/0_stateless/02013_lc_nullable_and_infinity.sql b/tests/queries/0_stateless/02013_lc_nullable_and_infinity.sql index c1c8a9c00b1..8cca4aa4e59 100644 --- a/tests/queries/0_stateless/02013_lc_nullable_and_infinity.sql +++ b/tests/queries/0_stateless/02013_lc_nullable_and_infinity.sql @@ -1,3 +1,3 @@ -set receive_timeout = '10', receive_data_timeout_ms = '10000', extremes = '1', allow_suspicious_low_cardinality_types = '1', force_primary_key = '1', join_use_nulls = '1', max_rows_to_read = '1', join_algorithm = 'partial_merge'; +set receive_timeout = '10', receive_data_timeout_ms = '10000', extremes = '1', allow_suspicious_low_cardinality_types = '1', force_primary_key = '1', join_use_nulls = '1', max_rows_to_read = '2', join_algorithm = 'partial_merge'; SELECT * FROM (SELECT dummy AS val FROM system.one) AS s1 ANY LEFT JOIN (SELECT toLowCardinality(dummy) AS rval FROM system.one) AS s2 ON (val + 9223372036854775806) = (rval * 1); diff --git a/tests/queries/0_stateless/02020_exponential_smoothing.reference b/tests/queries/0_stateless/02020_exponential_smoothing.reference index b3c23420678..07ae7cf0010 100644 --- a/tests/queries/0_stateless/02020_exponential_smoothing.reference +++ b/tests/queries/0_stateless/02020_exponential_smoothing.reference @@ -1,13 +1,13 @@ 1 0 0.5 0 1 0.25 0 2 0.125 -0 3 0.0625 -0 4 0.03125 -0 5 0.015625 -0 6 0.0078125 -0 7 0.00390625 -0 8 0.001953125 -0 9 0.0009765625 +0 3 0.062 +0 4 0.031 +0 5 0.016 +0 6 0.008 +0 7 0.004 +0 8 0.002 +0 9 0.001 1 0 0.067 0 1 0.062 0 2 0.058 @@ -22,12 +22,12 @@ 1 1 0.5 2 2 1.25 3 3 2.125 -4 4 3.0625 -5 5 4.03125 -6 6 5.015625 -7 7 6.0078125 -8 8 7.00390625 -9 9 8.001953125 +4 4 3.062 +5 5 4.031 +6 6 5.016 +7 7 6.008 +8 8 7.004 +9 9 8.002 1 0 0.067 ███▎ 0 1 0.062 ███ 0 2 0.058 ██▊ @@ -129,15 +129,15 @@ 0 48 0.065 ███▏ 0 49 0.032 █▌ 1 0 1 -0 1 0.36787944117144233 -0 2 0.1353352832366127 -0 3 0.04978706836786395 -0 4 0.018315638888734186 -0 5 0.00673794699908547 -0 6 0.0024787521766663594 -0 7 0.0009118819655545166 -0 8 0.00033546262790251196 -0 9 0.0001234098040866796 +0 1 0.368 +0 2 0.135 +0 3 0.05 +0 4 0.018 +0 5 0.007 +0 6 0.002 +0 7 0.001 +0 8 0 +0 9 0 1 0 1 0 1 0.905 0 2 0.819 @@ -150,14 +150,14 @@ 0 9 0.407 0 0 0 1 1 1 -2 2 2.3678794411714423 -3 3 3.8710941655794975 -4 4 5.424095958355417 -5 5 6.99541339002007 -6 6 8.573468768683808 -7 7 10.154002899524214 -8 8 11.735448912330172 -9 9 13.317230387764035 +2 2 2.368 +3 3 3.871 +4 4 5.424 +5 5 6.995 +6 6 8.573 +7 7 10.154 +8 8 11.735 +9 9 13.317 1 0 1 █████ 0 1 0.905 ████▌ 0 2 0.819 ████ @@ -259,15 +259,15 @@ 0 48 0.05 ██▌ 0 49 0.018 ▊ 1 0 1 -0 1 0.36787944117144233 -0 2 0.1353352832366127 -0 3 0.04978706836786395 -0 4 0.018315638888734186 -0 5 0.00673794699908547 -0 6 0.0024787521766663594 -0 7 0.0009118819655545166 -0 8 0.00033546262790251196 -0 9 0.0001234098040866796 +0 1 0.368 +0 2 0.135 +0 3 0.05 +0 4 0.018 +0 5 0.007 +0 6 0.002 +0 7 0.001 +0 8 0 +0 9 0 1 0 1 0 1 0.905 0 2 0.819 @@ -389,15 +389,15 @@ 0 48 0.05 ██▍ 0 49 0.018 ▊ 1 0 1 -0 1 1.3678794411714423 -0 2 1.5032147244080551 -0 3 1.553001792775919 -0 4 1.5713174316646532 -0 5 1.5780553786637386 -0 6 1.5805341308404048 -0 7 1.5814460128059595 -0 8 1.581781475433862 -0 9 1.5819048852379487 +0 1 1.368 +0 2 1.503 +0 3 1.553 +0 4 1.571 +0 5 1.578 +0 6 1.581 +0 7 1.581 +0 8 1.582 +0 9 1.582 1 0 1 0 1 1.905 0 2 2.724 @@ -409,15 +409,15 @@ 0 8 6.236 0 9 6.643 0 0 1 -1 1 1.3678794411714423 -2 2 1.5032147244080551 -3 3 1.553001792775919 -4 4 1.5713174316646532 -5 5 1.5780553786637386 -6 6 1.5805341308404048 -7 7 1.5814460128059595 -8 8 1.581781475433862 -9 9 1.5819048852379487 +1 1 1.368 +2 2 1.503 +3 3 1.553 +4 4 1.571 +5 5 1.578 +6 6 1.581 +7 7 1.581 +8 8 1.582 +9 9 1.582 1 0 1 █████ 0 1 1.819 █████████ 0 2 2.489 ████████████▍ @@ -519,15 +519,15 @@ 0 48 10.43 ██████████████████████████ 0 49 10.438 ██████████████████████████ 1 0 1 -0 1 0.2689414213699951 -0 2 0.09003057317038046 -0 3 0.032058603280084995 -0 4 0.01165623095603961 -0 5 0.004269778545282112 -0 6 0.0015683003158864733 -0 7 0.000576612769687006 -0 8 0.00021207899644323433 -0 9 0.00007801341612780745 +0 1 0.269 +0 2 0.09 +0 3 0.032 +0 4 0.012 +0 5 0.004 +0 6 0.002 +0 7 0.001 +0 8 0 +0 9 0 1 0 1 0 1 0.475 0 2 0.301 @@ -539,15 +539,15 @@ 0 8 0.072 0 9 0.061 0 0 0 -1 1 0.7310585786300049 -2 2 1.5752103826044412 -3 3 2.4926527345857696 -4 4 3.4519415676621947 -5 5 4.432932763071741 -6 6 5.424412292903226 -7 7 6.420707894737403 -8 8 7.419134118454189 -9 9 8.41847731304077 +1 1 0.731 +2 2 1.575 +3 3 2.493 +4 4 3.452 +5 5 4.433 +6 6 5.424 +7 7 6.421 +8 8 7.419 +9 9 8.418 1 0 1 ██████████ 0 1 0.475 ████▋ 0 2 0.301 ███ diff --git a/tests/queries/0_stateless/02020_exponential_smoothing.sql b/tests/queries/0_stateless/02020_exponential_smoothing.sql index a39b09a883d..1e51b86856d 100644 --- a/tests/queries/0_stateless/02020_exponential_smoothing.sql +++ b/tests/queries/0_stateless/02020_exponential_smoothing.sql @@ -1,8 +1,7 @@ -- exponentialMovingAverage -SELECT number = 0 AS value, number AS time, exponentialMovingAverage(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10); +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialMovingAverage(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialMovingAverage(10)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); - -SELECT number AS value, number AS time, exponentialMovingAverage(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10); +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number AS value, number AS time, exponentialMovingAverage(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); SELECT value, @@ -33,10 +32,9 @@ FROM ); -- exponentialTimeDecayedSum -SELECT number = 0 AS value, number AS time, exponentialTimeDecayedSum(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10); +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialTimeDecayedSum(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialTimeDecayedSum(10)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); - -SELECT number AS value, number AS time, exponentialTimeDecayedSum(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10); +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number AS value, number AS time, exponentialTimeDecayedSum(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); SELECT value, @@ -67,10 +65,9 @@ FROM ); -- exponentialTimeDecayedMax -SELECT number = 0 AS value, number AS time, exponentialTimeDecayedMax(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10); +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialTimeDecayedMax(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialTimeDecayedMax(10)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); - -SELECT number AS value, number AS time, exponentialTimeDecayedMax(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10); +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number AS value, number AS time, exponentialTimeDecayedMax(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); SELECT value, @@ -101,10 +98,9 @@ FROM ); -- exponentialTimeDecayedCount -SELECT number = 0 AS value, number AS time, exponentialTimeDecayedCount(1)(time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10); +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialTimeDecayedCount(1)(time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialTimeDecayedCount(10)(time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); - -SELECT number AS value, number AS time, exponentialTimeDecayedCount(1)(time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10); +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number AS value, number AS time, exponentialTimeDecayedCount(1)(time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); SELECT value, @@ -135,10 +131,9 @@ FROM ); -- exponentialTimeDecayedAvg -SELECT number = 0 AS value, number AS time, exponentialTimeDecayedAvg(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10); +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialTimeDecayedAvg(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialTimeDecayedAvg(10)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); - -SELECT number AS value, number AS time, exponentialTimeDecayedAvg(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10); +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number AS value, number AS time, exponentialTimeDecayedAvg(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); SELECT value, diff --git a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference index 9e24b7c6ea6..437b934c28c 100644 --- a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference +++ b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference @@ -11,9 +11,8 @@ ExpressionTransform MergingSortedTransform 2 → 1 (Expression) ExpressionTransform × 2 - (SettingQuotaAndLimits) - (ReadFromMergeTree) - MergeTreeInOrder × 2 0 → 1 + (ReadFromMergeTree) + MergeTreeInOrder × 2 0 → 1 2020-10-01 9 2020-10-01 9 2020-10-01 9 @@ -27,12 +26,11 @@ ExpressionTransform MergingSortedTransform 2 → 1 (Expression) ExpressionTransform × 2 - (SettingQuotaAndLimits) - (ReadFromMergeTree) - ReverseTransform - MergeTreeReverse 0 → 1 - ReverseTransform - MergeTreeReverse 0 → 1 + (ReadFromMergeTree) + ReverseTransform + MergeTreeReverse 0 → 1 + ReverseTransform + MergeTreeReverse 0 → 1 2020-10-01 9 2020-10-01 9 2020-10-01 9 @@ -48,9 +46,8 @@ ExpressionTransform MergingSortedTransform 2 → 1 (Expression) ExpressionTransform × 2 - (SettingQuotaAndLimits) - (ReadFromMergeTree) - MergeTreeInOrder × 2 0 → 1 + (ReadFromMergeTree) + MergeTreeInOrder × 2 0 → 1 2020-10-11 0 2020-10-11 0 2020-10-11 0 @@ -65,9 +62,8 @@ ExpressionTransform ExpressionTransform (Filter) FilterTransform - (SettingQuotaAndLimits) - (ReadFromMergeTree) - MergeTreeInOrder 0 → 1 + (ReadFromMergeTree) + MergeTreeInOrder 0 → 1 2020-10-11 0 0 2020-10-11 0 10 2020-10-11 0 20 @@ -84,9 +80,8 @@ ExpressionTransform ExpressionTransform (Filter) FilterTransform - (SettingQuotaAndLimits) - (ReadFromMergeTree) - MergeTreeInOrder 0 → 1 + (ReadFromMergeTree) + MergeTreeInOrder 0 → 1 2020-10-12 0 2020-10-12 1 2020-10-12 2 @@ -101,10 +96,9 @@ ExpressionTransform ExpressionTransform (Filter) FilterTransform - (SettingQuotaAndLimits) - (ReadFromMergeTree) - ReverseTransform - MergeTreeReverse 0 → 1 + (ReadFromMergeTree) + ReverseTransform + MergeTreeReverse 0 → 1 2020-10-12 99999 2020-10-12 99998 2020-10-12 99997 @@ -130,9 +124,8 @@ ExpressionTransform (Sorting) (Expression) ExpressionTransform - (SettingQuotaAndLimits) - (ReadFromMergeTree) - MergeTreeInOrder 0 → 1 + (ReadFromMergeTree) + MergeTreeInOrder 0 → 1 2020-10-10 00:00:00 0.01 2020-10-10 00:00:00 0.01 2020-10-10 00:00:00 0.01 @@ -145,9 +138,8 @@ ExpressionTransform (Sorting) (Expression) ExpressionTransform - (SettingQuotaAndLimits) - (ReadFromMergeTree) - MergeTreeInOrder 0 → 1 + (ReadFromMergeTree) + MergeTreeInOrder 0 → 1 2020-10-10 00:00:00 0.01 2020-10-10 00:00:00 0.01 2020-10-10 00:00:00 0.01 diff --git a/tests/queries/0_stateless/02160_h3_cell_area_m2.reference b/tests/queries/0_stateless/02160_h3_cell_area_m2.reference index e8727e05cf9..96b0fa8d739 100644 --- a/tests/queries/0_stateless/02160_h3_cell_area_m2.reference +++ b/tests/queries/0_stateless/02160_h3_cell_area_m2.reference @@ -1,16 +1,16 @@ -4106166334463.9233 -666617118882.2277 -85294486110.07852 -12781831077.715292 -1730585103.2965515 -302748289.6422262 -30296673.089799587 -4984621.68910725 -644257.1047199412 -113498.17901913072 -16692.536464980716 -2335.8824226249617 -324.4496823479308 -48.63220901355471 -7.442732649761864 -0.5977527784258132 +4106166334463.92 +666617118882.23 +85294486110.08 +12781831077.72 +1730585103.3 +302748289.64 +30296673.09 +4984621.69 +644257.1 +113498.18 +16692.54 +2335.88 +324.45 +48.63 +7.44 +0.6 diff --git a/tests/queries/0_stateless/02160_h3_cell_area_m2.sql b/tests/queries/0_stateless/02160_h3_cell_area_m2.sql index 55c6ef45542..bad06c21d18 100644 --- a/tests/queries/0_stateless/02160_h3_cell_area_m2.sql +++ b/tests/queries/0_stateless/02160_h3_cell_area_m2.sql @@ -24,7 +24,6 @@ INSERT INTO h3_indexes VALUES (635544851677385791); INSERT INTO h3_indexes VALUES (639763125756281263); INSERT INTO h3_indexes VALUES (644178757620501158); - -SELECT h3CellAreaM2(h3_index) FROM h3_indexes ORDER BY h3_index; +SELECT round(h3CellAreaM2(h3_index), 2) FROM h3_indexes ORDER BY h3_index; DROP TABLE h3_indexes; diff --git a/tests/queries/0_stateless/02160_h3_cell_area_rads2.reference b/tests/queries/0_stateless/02160_h3_cell_area_rads2.reference index d74c3f77f97..0a5610490a8 100644 --- a/tests/queries/0_stateless/02160_h3_cell_area_rads2.reference +++ b/tests/queries/0_stateless/02160_h3_cell_area_rads2.reference @@ -1,16 +1,16 @@ -0.10116268528089567 -0.01642329421346843 -0.002101380838405832 -0.00031490306268786255 -0.000042636031250655976 -0.000007458740696242262 -7.464122383736096e-7 -1.2280498988731694e-7 -1.587241563444197e-8 -2.7962288004989136e-9 -4.112502211061015e-10 -5.754860352096175e-11 -7.99339296836726e-12 -1.1981406631437076e-12 -1.8336491007639705e-13 -1.4726699133479243e-14 +0.10116268 +0.01642329 +0.00210138 +0.00031490 +0.00004263 +0.00000745 +7.46412238 +1.22804989 +1.58724156 +2.79622880 +4.11250221 +5.75486035 +7.99339296 +1.19814066 +1.83364910 +1.47266991 diff --git a/tests/queries/0_stateless/02160_h3_cell_area_rads2.sql b/tests/queries/0_stateless/02160_h3_cell_area_rads2.sql index 038a0cabd50..17d4a7e732d 100644 --- a/tests/queries/0_stateless/02160_h3_cell_area_rads2.sql +++ b/tests/queries/0_stateless/02160_h3_cell_area_rads2.sql @@ -24,7 +24,6 @@ INSERT INTO h3_indexes VALUES (635544851677385791); INSERT INTO h3_indexes VALUES (639763125756281263); INSERT INTO h3_indexes VALUES (644178757620501158); - -SELECT h3CellAreaRads2(h3_index) FROM h3_indexes ORDER BY h3_index; +SELECT substring(h3CellAreaRads2(h3_index)::String, 1, 10) FROM h3_indexes ORDER BY h3_index; DROP TABLE h3_indexes; diff --git a/tests/queries/0_stateless/02165_h3_exact_edge_length_Km.reference b/tests/queries/0_stateless/02165_h3_exact_edge_length_Km.reference index 1e44981f9eb..050c5db2a90 100644 --- a/tests/queries/0_stateless/02165_h3_exact_edge_length_Km.reference +++ b/tests/queries/0_stateless/02165_h3_exact_edge_length_Km.reference @@ -1,16 +1,16 @@ -489.55559989912314 -192.39078306095627 -66.91913220366439 -1263.6096633631134 -480.7440319163875 -195.44963163407317 -1263.6096633631118 -461.80697194406935 -190.08769842412468 -1263.6096633631123 -465.41972260404145 -64.81970466298482 -1263.6096633631116 -69.63641611246636 -195.6274718146093 -67.66085681290775 +489.56 +192.39 +66.92 +1263.61 +480.74 +195.45 +1263.61 +461.81 +190.09 +1263.61 +465.42 +64.82 +1263.61 +69.64 +195.63 +67.66 diff --git a/tests/queries/0_stateless/02165_h3_exact_edge_length_Km.sql b/tests/queries/0_stateless/02165_h3_exact_edge_length_Km.sql index 26607227484..6a3a288cce4 100644 --- a/tests/queries/0_stateless/02165_h3_exact_edge_length_Km.sql +++ b/tests/queries/0_stateless/02165_h3_exact_edge_length_Km.sql @@ -23,7 +23,6 @@ INSERT INTO h3_indexes VALUES (1530240222715969535); INSERT INTO h3_indexes VALUES (1602297816753897471); INSERT INTO h3_indexes VALUES (1242009915283734527); -SELECT h3ExactEdgeLengthKm(h3_index) FROM h3_indexes ORDER BY h3_index; +SELECT round(h3ExactEdgeLengthKm(h3_index), 2) FROM h3_indexes ORDER BY h3_index; DROP TABLE h3_indexes; - diff --git a/tests/queries/0_stateless/02165_h3_exact_edge_length_m.reference b/tests/queries/0_stateless/02165_h3_exact_edge_length_m.reference index 52dcaaf8548..4852c8d5bda 100644 --- a/tests/queries/0_stateless/02165_h3_exact_edge_length_m.reference +++ b/tests/queries/0_stateless/02165_h3_exact_edge_length_m.reference @@ -1,16 +1,16 @@ -489555.59989912313 -192390.78306095628 -66919.13220366438 -1263609.6633631135 -480744.0319163875 -195449.63163407316 -1263609.663363112 -461806.9719440694 -190087.69842412468 -1263609.6633631124 -465419.72260404145 -64819.70466298482 -1263609.6633631117 -69636.41611246637 -195627.4718146093 -67660.85681290775 +489555.6 +192390.78 +66919.13 +1263609.66 +480744.03 +195449.63 +1263609.66 +461806.97 +190087.7 +1263609.66 +465419.72 +64819.7 +1263609.66 +69636.42 +195627.47 +67660.86 diff --git a/tests/queries/0_stateless/02165_h3_exact_edge_length_m.sql b/tests/queries/0_stateless/02165_h3_exact_edge_length_m.sql index 093ab1dd2d2..06b50670c29 100644 --- a/tests/queries/0_stateless/02165_h3_exact_edge_length_m.sql +++ b/tests/queries/0_stateless/02165_h3_exact_edge_length_m.sql @@ -23,7 +23,6 @@ INSERT INTO h3_indexes VALUES (1530240222715969535); INSERT INTO h3_indexes VALUES (1602297816753897471); INSERT INTO h3_indexes VALUES (1242009915283734527); -SELECT h3ExactEdgeLengthM(h3_index) FROM h3_indexes ORDER BY h3_index; +SELECT round(h3ExactEdgeLengthM(h3_index), 2) FROM h3_indexes ORDER BY h3_index; DROP TABLE h3_indexes; - diff --git a/tests/queries/0_stateless/02165_h3_exact_edge_length_rads.reference b/tests/queries/0_stateless/02165_h3_exact_edge_length_rads.reference index cec63f72b07..c2cdc1e628e 100644 --- a/tests/queries/0_stateless/02165_h3_exact_edge_length_rads.reference +++ b/tests/queries/0_stateless/02165_h3_exact_edge_length_rads.reference @@ -1,16 +1,16 @@ -0.07684116278590451 -0.03019786002394998 -0.010503697500779932 -0.19833750417794152 -0.07545808979092708 -0.030677980118976447 -0.19833750417794127 -0.0724857089044268 -0.029836365432681984 -0.19833750417794133 -0.07305277005463119 -0.010174169141909536 -0.19833750417794122 -0.010930205246202099 -0.030705894101096694 -0.010620119376973209 +0.07684 +0.0302 +0.0105 +0.19834 +0.07546 +0.03068 +0.19834 +0.07249 +0.02984 +0.19834 +0.07305 +0.01017 +0.19834 +0.01093 +0.03071 +0.01062 diff --git a/tests/queries/0_stateless/02165_h3_exact_edge_length_rads.sql b/tests/queries/0_stateless/02165_h3_exact_edge_length_rads.sql index d618e69f032..b03d527156a 100644 --- a/tests/queries/0_stateless/02165_h3_exact_edge_length_rads.sql +++ b/tests/queries/0_stateless/02165_h3_exact_edge_length_rads.sql @@ -23,7 +23,6 @@ INSERT INTO h3_indexes VALUES (1530240222715969535); INSERT INTO h3_indexes VALUES (1602297816753897471); INSERT INTO h3_indexes VALUES (1242009915283734527); -SELECT h3ExactEdgeLengthRads(h3_index) FROM h3_indexes ORDER BY h3_index; +SELECT round(h3ExactEdgeLengthRads(h3_index), 5) FROM h3_indexes ORDER BY h3_index; DROP TABLE h3_indexes; - diff --git a/tests/queries/0_stateless/02210_processors_profile_log.reference b/tests/queries/0_stateless/02210_processors_profile_log.reference index a056b445bbd..1a7dd64d657 100644 --- a/tests/queries/0_stateless/02210_processors_profile_log.reference +++ b/tests/queries/0_stateless/02210_processors_profile_log.reference @@ -2,9 +2,8 @@ EXPLAIN PIPELINE SELECT sleep(1); (Expression) ExpressionTransform - (SettingQuotaAndLimits) - (ReadFromStorage) - SourceFromSingleChunk 0 → 1 + (ReadFromStorage) + SourceFromSingleChunk 0 → 1 SELECT sleep(1) SETTINGS log_processors_profiles=true, log_queries=1, log_queries_min_type='QUERY_FINISH'; 0 SYSTEM FLUSH LOGS; diff --git a/tests/queries/0_stateless/02212_h3_point_dist.reference b/tests/queries/0_stateless/02212_h3_point_dist.reference index 00d316ab508..17ea61d3209 100644 --- a/tests/queries/0_stateless/02212_h3_point_dist.reference +++ b/tests/queries/0_stateless/02212_h3_point_dist.reference @@ -1,42 +1,42 @@ -- select h3PointDistM(lat1, lon1,lat2, lon2) AS k from table1 order by k; -111195.05197522961 -111228.91103262542 -111901.0753744776 -111901.07537447763 -111901.07537447763 -157225.60925091387 -157249.55851177874 -222457.78082261496 -223528.36944466401 -223528.36944466401 -400900.66882205213 -2223901.039504589 +111195.05 +111228.91 +111901.08 +111901.08 +111901.08 +157225.61 +157249.56 +222457.78 +223528.37 +223528.37 +400900.67 +2223901.04 -- select h3PointDistKm(lat1, lon1,lat2, lon2) AS k from table1 order by k; -111.1950519752296 -111.22891103262542 -111.9010753744776 -111.90107537447763 -111.90107537447763 -157.22560925091386 -157.24955851177873 -222.45778082261498 -223.528369444664 -223.528369444664 -400.9006688220521 -2223.901039504589 +111.2 +111.23 +111.9 +111.9 +111.9 +157.23 +157.25 +222.46 +223.53 +223.53 +400.9 +2223.9 -- select h3PointDistRads(lat1, lon1,lat2, lon2) AS k from table1 order by k; -0.01745329251994332 -0.017458607073268143 -0.017564110696598745 -0.01756411069659875 -0.01756411069659875 -0.024678297290546682 -0.02468205639176644 -0.034917207673048706 -0.03508524839120321 -0.03508524839120321 -0.06292579139178688 -0.3490658503988659 +0.01745 +0.01746 +0.01756 +0.01756 +0.01756 +0.02468 +0.02468 +0.03492 +0.03509 +0.03509 +0.06293 +0.34907 -- test for non const cols -0.3490658503988659 -0.3490658503988659 +0.34907 +0.34907 diff --git a/tests/queries/0_stateless/02212_h3_point_dist.sql b/tests/queries/0_stateless/02212_h3_point_dist.sql index ccc806db75e..bcba4be04e1 100644 --- a/tests/queries/0_stateless/02212_h3_point_dist.sql +++ b/tests/queries/0_stateless/02212_h3_point_dist.sql @@ -18,15 +18,15 @@ INSERT INTO table1 VALUES(-84, 181, -83, 182); INSERT INTO table1 VALUES(-87, 0, -85, 3); select '-- select h3PointDistM(lat1, lon1,lat2, lon2) AS k from table1 order by k;'; -select h3PointDistM(lat1, lon1,lat2, lon2) AS k from table1 order by k; +select round(h3PointDistM(lat1, lon1,lat2, lon2), 2) AS k from table1 order by k; select '-- select h3PointDistKm(lat1, lon1,lat2, lon2) AS k from table1 order by k;'; -select h3PointDistKm(lat1, lon1,lat2, lon2) AS k from table1 order by k; +select round(h3PointDistKm(lat1, lon1,lat2, lon2), 2) AS k from table1 order by k; select '-- select h3PointDistRads(lat1, lon1,lat2, lon2) AS k from table1 order by k;'; -select h3PointDistRads(lat1, lon1,lat2, lon2) AS k from table1 order by k; +select round(h3PointDistRads(lat1, lon1,lat2, lon2), 5) AS k from table1 order by k; DROP TABLE table1; -- tests for const columns select '-- test for non const cols'; -select h3PointDistRads(-10.0 ,0.0, 10.0, arrayJoin([0.0])) as h3PointDistRads; -select h3PointDistRads(-10.0 ,0.0, 10.0, toFloat64(0)) as h3PointDistRads; +select round(h3PointDistRads(-10.0 ,0.0, 10.0, arrayJoin([0.0])), 5) as h3PointDistRads; +select round(h3PointDistRads(-10.0 ,0.0, 10.0, toFloat64(0)) , 5)as h3PointDistRads; diff --git a/tests/queries/0_stateless/02223_h3_test_const_columns.reference b/tests/queries/0_stateless/02223_h3_test_const_columns.reference index 1e8bcea904b..46dedd44e45 100644 --- a/tests/queries/0_stateless/02223_h3_test_const_columns.reference +++ b/tests/queries/0_stateless/02223_h3_test_const_columns.reference @@ -4,32 +4,32 @@ 587531185127686143 607221000000 86745854035 -607220.9782 -86745.85403 -4106166334463.9233 -85294486110.07852 -12781831077.715292 -0.10116268528089567 -0.002101380838405832 -0.00031490306268786255 +607220.98 +86745.85 +4106166334463.92 +85294486110.08 +12781831077.72 +0.1 +0 +0 0 2 3 -9.961887434044831 -3.7652395323603707 -1.4231267757782213 -1107712.591 -418676.0055 -158244.6558 -1107.712591 -418.6760055 -158.2446558 -(-173.6412167681162,-14.130272474941535) -(172.68095885060296,-83.6576608516349) -(-94.46556851304558,-69.1999982492279) -[(-25.60370257696877,-170.61932339479839),(-16.505947603561054,-161.6348206171839),(-5.762860491436932,-165.41674992858833),(-3.968796976609588,-176.05696384421356),(-11.54529597541476,175.98600155652952),(-22.19754138630238,177.51613498805204)] -[(-82.24829137508873,167.18203088800593),(-83.41761096812805,158.00531624510785),(-84.94207431820979,162.09183616506846),(-85.00324300064887,-178.60454506450245),(-83.46691212211444,-172.41232929697492),(-82.25118471750908,179.4928586395771)] -[(-69.70201806837188,-95.63006768303532),(-69.78121889088241,-93.8329499937899),(-69.26603652285242,-92.70414199751751),(-68.6908704290193,-93.35479180342097),(-68.62037380778602,-95.0614247833063),(-69.11663254992226,-96.20491957306085)] +9.96 +3.77 +1.42 +1107712.59 +418676.01 +158244.66 +1107.71 +418.68 +158.24 +-173.64 -14.13 +172.68 -83.66 +-94.47 -69.2 +[(-25.6,-170.62),(-16.51,-161.63),(-5.76,-165.42),(-3.97,-176.06),(-11.55,175.99),(-22.2,177.52)] +[(-82.25,167.18),(-83.42,158.01),(-84.94,162.09),(-85,-178.6),(-83.47,-172.41),(-82.25,179.49)] +[(-69.7,-95.63),(-69.78,-93.83),(-69.27,-92.7),(-68.69,-93.35),(-68.62,-95.06),(-69.12,-96.2)] [579275502070530047,579768083279773695,578888473977552895,579662530163507199,579205133326352383,578466261512486911,578712552117108735] [578888473977552895,580225480116928511,579979189512306687,578114417791598591,578079233419509759,578712552117108735,579310686442618879,578606999000842239,578571814628753407,579205133326352383,579275502070530047,578466261512486911,579240317698441215,577727389698621439,579838452023951359,579662530163507199,579768083279773695,580331033233195007] [577375545977733119,578431077140398079,579099580210085887,579732898907684863,580612508209905663,579275502070530047,580190295744839679,577094071001022463,578606999000842239,579029211465908223,577727389698621439,579240317698441215,579662530163507199,578571814628753407,580331033233195007,580295848861106175,579205133326352383,577903311559065599,578114417791598591,579838452023951359,577445914721910783,577868127186976767,578079233419509759,579592161419329535,578501445884575743,578712552117108735,580225480116928511,580471770721550335,580647692581994495,578466261512486911,579768083279773695,578888473977552895,579979189512306687,579310686442618879] @@ -63,18 +63,18 @@ 581496515558637567 585996266895310847 590499385486344191 -1263609.6633631135 -1263609.663363112 -1263609.6633631124 -1263609.6633631117 -1263.6096633631134 -1263.6096633631118 -1263.6096633631123 -1263.6096633631116 -0.19833750417794152 -0.19833750417794127 -0.19833750417794133 -0.19833750417794122 +1263609.66 +1263609.66 +1263609.66 +1263609.66 +1263.61 +1263.61 +1263.61 +1263.61 +0.2 +0.2 +0.2 +0.2 842 5882 41162 diff --git a/tests/queries/0_stateless/02223_h3_test_const_columns.sql b/tests/queries/0_stateless/02223_h3_test_const_columns.sql index 42d5e677b09..50ccfaaf173 100644 --- a/tests/queries/0_stateless/02223_h3_test_const_columns.sql +++ b/tests/queries/0_stateless/02223_h3_test_const_columns.sql @@ -1,19 +1,19 @@ -- Tags: no-fasttest -select geoToH3(toFloat64(0),toFloat64(1),arrayJoin([1,2])); +select round(geoToH3(toFloat64(0),toFloat64(1),arrayJoin([1,2])), 2); select h3ToParent(641573946153969375, arrayJoin([1,2])); -SELECT h3HexAreaM2(arrayJoin([1,2])); -SELECT h3HexAreaKm2(arrayJoin([1,2])); -SELECT h3CellAreaM2(arrayJoin([579205133326352383,589753847883235327,594082350283882495])); +SELECT round(h3HexAreaM2(arrayJoin([1,2])), 2); +SELECT round(h3HexAreaKm2(arrayJoin([1,2])), 2); +SELECT round(h3CellAreaM2(arrayJoin([579205133326352383,589753847883235327,594082350283882495])), 2); SELECT NULL, toFloat64('-1'), -2147483648, h3CellAreaM2(arrayJoin([9223372036854775807, 65535, NULL])); -- { serverError 117 } -SELECT h3CellAreaRads2(arrayJoin([579205133326352383,589753847883235327,594082350283882495])); +SELECT round(h3CellAreaRads2(arrayJoin([579205133326352383,589753847883235327,594082350283882495])), 2); SELECT NULL, toFloat64('-1'), -2147483648, h3CellAreaRads2(arrayJoin([9223372036854775807, 65535, NULL])); -- { serverError 117 } SELECT h3GetResolution(arrayJoin([579205133326352383,589753847883235327,594082350283882495])); -SELECT h3EdgeAngle(arrayJoin([0,1,2])); -SELECT h3EdgeLengthM(arrayJoin([0,1,2])); -SELECT h3EdgeLengthKm(arrayJoin([0,1,2])); -SELECT h3ToGeo(arrayJoin([579205133326352383,589753847883235327,594082350283882495])); -SELECT h3ToGeoBoundary(arrayJoin([579205133326352383,589753847883235327,594082350283882495])); +SELECT round(h3EdgeAngle(arrayJoin([0,1,2])), 2); +SELECT round(h3EdgeLengthM(arrayJoin([0,1,2])), 2); +SELECT round(h3EdgeLengthKm(arrayJoin([0,1,2])), 2); +WITH h3ToGeo(arrayJoin([579205133326352383,589753847883235327,594082350283882495])) AS p SELECT round(p.1, 2), round(p.2, 2); +SELECT arrayMap(p -> (round(p.1, 2), round(p.2, 2)), h3ToGeoBoundary(arrayJoin([579205133326352383,589753847883235327,594082350283882495]))); SELECT h3kRing(arrayJoin([579205133326352383]), arrayJoin([toUInt16(1),toUInt16(2),toUInt16(3)])); SELECT h3GetBaseCell(arrayJoin([579205133326352383,589753847883235327,594082350283882495])); SELECT h3IndexesAreNeighbors(617420388351344639, arrayJoin([617420388352655359, 617420388351344639, 617420388352917503])); @@ -25,9 +25,9 @@ SELECT h3IsResClassIII(arrayJoin([579205133326352383,589753847883235327,59408235 SELECT h3IsPentagon(arrayJoin([stringToH3('8f28308280f18f2'),stringToH3('821c07fffffffff'),stringToH3('0x8f28308280f18f2L'),stringToH3('0x821c07fffffffffL')])); SELECT h3GetFaces(arrayJoin([stringToH3('8f28308280f18f2'),stringToH3('821c07fffffffff'),stringToH3('0x8f28308280f18f2L'),stringToH3('0x821c07fffffffffL')])); SELECT h3ToCenterChild(577023702256844799, arrayJoin([1,2,3])); -SELECT h3ExactEdgeLengthM(arrayJoin([1298057039473278975,1370114633511206911,1442172227549134847,1514229821587062783])); -SELECT h3ExactEdgeLengthKm(arrayJoin([1298057039473278975,1370114633511206911,1442172227549134847,1514229821587062783])); -SELECT h3ExactEdgeLengthRads(arrayJoin([1298057039473278975,1370114633511206911,1442172227549134847,1514229821587062783])); +SELECT round(h3ExactEdgeLengthM(arrayJoin([1298057039473278975,1370114633511206911,1442172227549134847,1514229821587062783])), 2); +SELECT round(h3ExactEdgeLengthKm(arrayJoin([1298057039473278975,1370114633511206911,1442172227549134847,1514229821587062783])), 2); +SELECT round(h3ExactEdgeLengthRads(arrayJoin([1298057039473278975,1370114633511206911,1442172227549134847,1514229821587062783])), 2); SELECT h3NumHexagons(arrayJoin([1,2,3])); SELECT h3Line(arrayJoin([stringToH3('85283473fffffff')]), arrayJoin([stringToH3('8528342bfffffff')])); SELECT h3HexRing(arrayJoin([579205133326352383]), arrayJoin([toUInt16(1),toUInt16(2),toUInt16(3)])); -- { serverError 117 } diff --git a/tests/queries/0_stateless/02227_union_match_by_name.reference b/tests/queries/0_stateless/02227_union_match_by_name.reference index 72c4987a3d2..847c5acd18d 100644 --- a/tests/queries/0_stateless/02227_union_match_by_name.reference +++ b/tests/queries/0_stateless/02227_union_match_by_name.reference @@ -22,10 +22,8 @@ Header: avgWeighted(x, y) Nullable(Float64) Header: 255 UInt8 1 UInt8 dummy UInt8 - SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemOne) Header: dummy UInt8 - ReadFromStorage (SystemOne) - Header: dummy UInt8 Expression (Conversion before UNION) Header: x Nullable(UInt8) y UInt8 @@ -36,9 +34,7 @@ Header: avgWeighted(x, y) Nullable(Float64) Header: NULL Nullable(Nothing) 1 UInt8 dummy UInt8 - SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemOne) Header: dummy UInt8 - ReadFromStorage (SystemOne) - Header: dummy UInt8 SELECT avgWeighted(x, y) FROM (SELECT NULL, 255 AS x, 1 AS y UNION ALL SELECT y, NULL AS x, 1 AS y); 255 diff --git a/tests/queries/0_stateless/02233_optimize_aggregation_in_order_prefix.reference b/tests/queries/0_stateless/02233_optimize_aggregation_in_order_prefix.reference index 6d18398aa3c..9d252c9f396 100644 --- a/tests/queries/0_stateless/02233_optimize_aggregation_in_order_prefix.reference +++ b/tests/queries/0_stateless/02233_optimize_aggregation_in_order_prefix.reference @@ -18,9 +18,8 @@ ExpressionTransform × 2 AggregatingInOrderTransform (Expression) ExpressionTransform - (SettingQuotaAndLimits) - (ReadFromMergeTree) - MergeTreeInOrder 0 → 1 + (ReadFromMergeTree) + MergeTreeInOrder 0 → 1 explain pipeline select parent_key, child_key, count() from data_02233 group by parent_key, child_key with totals order by parent_key, child_key settings max_threads=1; (Expression) ExpressionTransform × 2 @@ -36,9 +35,8 @@ ExpressionTransform × 2 AggregatingTransform (Expression) ExpressionTransform - (SettingQuotaAndLimits) - (ReadFromMergeTree) - MergeTreeInOrder 0 → 1 + (ReadFromMergeTree) + MergeTreeInOrder 0 → 1 select parent_key, child_key, count() from data_02233 group by parent_key, child_key with totals order by parent_key, child_key settings max_threads=1, optimize_aggregation_in_order=1; 0 0 4 0 1 3 diff --git a/tests/queries/0_stateless/02236_explain_pipeline_join.reference b/tests/queries/0_stateless/02236_explain_pipeline_join.reference index ed993e2a1e7..5d7a7bfc488 100644 --- a/tests/queries/0_stateless/02236_explain_pipeline_join.reference +++ b/tests/queries/0_stateless/02236_explain_pipeline_join.reference @@ -4,16 +4,14 @@ ExpressionTransform JoiningTransform 2 → 1 (Expression) ExpressionTransform - (SettingQuotaAndLimits) + (Limit) + Limit + (ReadFromStorage) + Numbers 0 → 1 + (Expression) + FillingRightJoinSide + ExpressionTransform (Limit) Limit (ReadFromStorage) Numbers 0 → 1 - (Expression) - FillingRightJoinSide - ExpressionTransform - (SettingQuotaAndLimits) - (Limit) - Limit - (ReadFromStorage) - Numbers 0 → 1 diff --git a/tests/queries/0_stateless/02293_h3_line.reference b/tests/queries/0_stateless/02293_h3_line.reference index 1211776b7f1..c02779b2c93 100644 --- a/tests/queries/0_stateless/02293_h3_line.reference +++ b/tests/queries/0_stateless/02293_h3_line.reference @@ -1,28 +1,28 @@ -[590080540275638271,590080471556161535,590080883873021951,590106516237844479,590104385934065663,590103630019821567,590103561300344831] -[590080540275638271,590080471556161535,590080608995115007,590104454653542399,590104385934065663,590104523373019135,590103767458775039] -[590080540275638271,590080471556161535,590080608995115007,590104454653542399,590104111056158719,590104523373019135,590105554165170175] -[590080540275638271,590080677714591743,590080608995115007,590104179775635455,590104317214588927,590104248495112191,590105279287263231] -[590080540275638271,590077585338138623,590077310460231679,590079097166626815,590078822288719871,590079028447150079,590094009293078527] -[590080540275638271,590077585338138623,590077310460231679,590079097166626815,590079165886103551,590078891008196607,590092978500927487] -[590080540275638271,590077585338138623,590077173021278207,590077379179708415,590079165886103551,590077860216045567,590092841061974015] -[590080540275638271,590080815153545215,590079784361394175,590096483194241023,590096758072147967,590095727279996927,590094833926799359] -[590080540275638271,590080815153545215,590096620633194495,590096414474764287,590096758072147967,590094971365752831,590094765207322623] -[590080540275638271,590080815153545215,590096620633194495,590096414474764287,590096689352671231,590094902646276095,590095177524183039] -[590080540275638271,590080815153545215,590096620633194495,590096414474764287,590096826791624703,590095933438427135,590096208316334079,590098269900636159] -[590000619524194303,590000344646287359,590000413365764095,589998351781462015,590000894402101247,589998832817799167,589998901537275903,589998626659368959,589972994294546431] -[590000619524194303,590000207207333887,590000413365764095,590001169280008191,590000894402101247,590000963121577983,589975330756755455,589975055878848511,589975124598325247] -[590000619524194303,590000207207333887,590000413365764095,590001169280008191,590000756963147775,590000825682624511,589975330756755455,589974918439895039,589974987159371775] -[590000619524194303,590000207207333887,590000275926810623,590001031841054719,590000756963147775,590000825682624511,589975193317801983,589975262037278719,589973956367220735] -[590000619524194303,590000207207333887,590000275926810623,590001031841054719,590001100560531455,589990998797451263,589991067516927999,589974231245127679,589973818928267263] -[590000619524194303,590000207207333887,590000275926810623,590001031841054719,589990517761114111,589991273675358207,589990861358497791,589990930077974527,589974093806174207] -[590000619524194303,590000482085240831,590277902612824063,590278177490731007,589992648064892927,589992716784369663,589992579345416191,589991548553265151,589991411114311679] -[590000619524194303,590000482085240831,590277902612824063,590278177490731007,589992648064892927,589992510625939455,589992854223323135,589991823431172095,589991685992218623] -[590000619524194303,590000482085240831,590277902612824063,590278177490731007,590278108771254271,589992922942799871,589992785503846399,590126170008190975,590126444886097919] -[590000550804717567,590000207207333887,590000619524194303,590001650316345343,590001581596868607,590001719035822079,590259760670965759,590260654024163327,590260379146256383,590257561647710207,590258455000907775,590259485793058815,590259210915151871] -[590000550804717567,590000207207333887,590000619524194303,590001650316345343,590001306718961663,590001719035822079,590260516585209855,590260241707302911,590260379146256383,590258317561954303,590258042684047359,590258180123000831,590168226327953407] -[590000550804717567,590000207207333887,590000619524194303,590001650316345343,590001306718961663,590001444157915135,590260516585209855,590260241707302911,590260447865733119,590258386281431039,590258042684047359,590258248842477567,590167951450046463] -[590000550804717567,590000207207333887,590000344646287359,590001650316345343,590001306718961663,590001444157915135,590260585304686591,590260310426779647,590172005899173887,590258386281431039,590258111403524095,590173105410801663,590173242849755135] -[590000550804717567,590000207207333887,590000344646287359,589998283061985279,589998420500938751,589999451293089791,589999107695706111,589999313854136319,590172555654987775,590172693093941247,590169875595395071,590169600717488127,590169738156441599] -[590000550804717567,590000207207333887,590000413365764095,589998283061985279,589998420500938751,589998145623031807,589999176415182847,589999313854136319,590172624374464511,590172280777080831,590172418216034303,590170356631732223,590170494070685695] -[590000550804717567,590000207207333887,590000413365764095,589998283061985279,589998008184078335,589998145623031807,589999176415182847,590000069768380415,590172624374464511,590172349496557567,590172486935511039,590170425351208959,590170081753825279] -[590000550804717567,590000207207333887,590000413365764095,589998283061985279,589998008184078335,589998145623031807,589999932329426943,590000069768380415,589999794890473471,590172349496557567,589984126849777663,590170425351208959,590170150473302015] +7 +7 +7 +7 +7 +7 +7 +7 +7 +7 +8 +9 +9 +9 +9 +9 +9 +9 +9 +9 +13 +13 +13 +13 +13 +13 +13 +13 diff --git a/tests/queries/0_stateless/02293_h3_line.sql b/tests/queries/0_stateless/02293_h3_line.sql index 01b1a84ef22..476587ebe7c 100644 --- a/tests/queries/0_stateless/02293_h3_line.sql +++ b/tests/queries/0_stateless/02293_h3_line.sql @@ -35,10 +35,22 @@ INSERT INTO h3_indexes VALUES (26, '8301a5fffffffff','830b4efffffffff'); INSERT INTO h3_indexes VALUES (27, '8301a5fffffffff','830b48fffffffff'); INSERT INTO h3_indexes VALUES (28, '8301a5fffffffff','830b49fffffffff'); +/* +Given two H3 indexes, return the line of indexes between them (inclusive). +This function may fail to find the line between two indexes, for example if they are very far apart. +It may also fail when finding distances for indexes on opposite sides of a pentagon. -SELECT h3Line(stringToH3(start), stringToH3(end)) FROM h3_indexes ORDER BY id; +Notes: + The specific output of this function should not be considered stable across library versions. + The only guarantees the library provides are that the line length will be h3Distance(start, end) + 1 + and that every index in the line will be a neighbor of the preceding index. + Lines are drawn in grid space, and may not correspond exactly to either Cartesian lines or great arcs. + +https://h3geo.org/docs/api/traversal + */ + +SELECT length(h3Line(stringToH3(start), stringToH3(end))) FROM h3_indexes ORDER BY id; SELECT h3Line(0xffffffffffffff, 0xffffffffffffff); -- { serverError 117 } DROP TABLE h3_indexes; - diff --git a/tests/queries/0_stateless/02303_query_kind.reference b/tests/queries/0_stateless/02303_query_kind.reference index 51addfdb857..b899b9f5b45 100644 --- a/tests/queries/0_stateless/02303_query_kind.reference +++ b/tests/queries/0_stateless/02303_query_kind.reference @@ -5,10 +5,8 @@ Header: dummy String Header: toString(dummy) String Expression (Before GROUP BY) Header: toString(dummy) String - SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemOne) Header: dummy UInt8 - ReadFromStorage (SystemOne) - Header: dummy UInt8 clickhouse-local --query_kind secondary_query -q explain plan header=1 select toString(dummy) as dummy from system.one group by dummy Expression ((Projection + Before ORDER BY)) Header: dummy String @@ -16,10 +14,8 @@ Header: dummy String Header: toString(dummy) String Expression (Before GROUP BY) Header: toString(dummy) String - SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemOne) Header: dummy UInt8 - ReadFromStorage (SystemOne) - Header: dummy UInt8 clickhouse-client --query_kind initial_query -q explain plan header=1 select toString(dummy) as dummy from system.one group by dummy Expression ((Projection + Before ORDER BY)) Header: dummy String @@ -27,10 +23,8 @@ Header: dummy String Header: dummy UInt8 Expression (Before GROUP BY) Header: dummy UInt8 - SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemOne) Header: dummy UInt8 - ReadFromStorage (SystemOne) - Header: dummy UInt8 clickhouse-local --query_kind initial_query -q explain plan header=1 select toString(dummy) as dummy from system.one group by dummy Expression ((Projection + Before ORDER BY)) Header: dummy String @@ -38,7 +32,5 @@ Header: dummy String Header: dummy UInt8 Expression (Before GROUP BY) Header: dummy UInt8 - SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemOne) Header: dummy UInt8 - ReadFromStorage (SystemOne) - Header: dummy UInt8 diff --git a/tests/queries/0_stateless/02311_system_zookeeper_insert.reference b/tests/queries/0_stateless/02311_system_zookeeper_insert.reference new file mode 100644 index 00000000000..d8a35d57e07 --- /dev/null +++ b/tests/queries/0_stateless/02311_system_zookeeper_insert.reference @@ -0,0 +1,27 @@ +/default/1-insert-testc c +/default/1-insert-testc/c c +/default/1-insert-testc/c/c c +/default/1-insert-testc/c/c d +/default/1-insert-testc/c/c e +/default/1-insert-testc/c/c f +/default/1-insert-testc/c/c kk +/default/1-insert-testc/c/c/c c +/default/1-insert-testc/c/c/c/c c +/default/1-insert-testc/c/c/c/c/c c +/default/1-insert-testc/c/c/c/c/c/c c 9 +/default/1-insert-testc/c/c/c/c/c/c/c c 10 +/default/1-insert-testc/c/c/d e 10 +/default/1-insert-testc/c/c/d f 11 +/default/1-insert-testc/c/c/d g 12 +/default/1-insert-testc/c/c/e g 13 +/default/1-insert-testc/c/c/f g 14 +/default/1-insert-testc/c/c/kk g 14 +------------------------- +/default/2-insert-testx testb z +/default/2-insert-testx testc x +/default/2-insert-testx testz y +/default/2-insert-testz c +/default/2-insert-testz/c cd +/default/2-insert-testz/c/cd dd +/default/2-insert-testz/c/cd testc +/default/2-insert-testz/c/cd/dd testc y diff --git a/tests/queries/0_stateless/02311_system_zookeeper_insert.sql b/tests/queries/0_stateless/02311_system_zookeeper_insert.sql new file mode 100644 index 00000000000..e1c42278086 --- /dev/null +++ b/tests/queries/0_stateless/02311_system_zookeeper_insert.sql @@ -0,0 +1,43 @@ +-- Tags: zookeeper + +set allow_unrestricted_reads_from_keeper = 'true'; + +drop table if exists test_zkinsert; + +create table test_zkinsert ( + name String, + path String, + value String +) ENGINE Memory; + +-- test recursive create and big transaction +insert into test_zkinsert (name, path, value) values ('c', '/1-insert-testc/c/c/c/c/c/c', 11), ('e', '/1-insert-testc/c/c/d', 10), ('c', '/1-insert-testc/c/c/c/c/c/c/c', 10), ('c', '/1-insert-testc/c/c/c/c/c/c', 9), ('f', '/1-insert-testc/c/c/d', 11), ('g', '/1-insert-testc/c/c/d', 12), ('g', '/1-insert-testc/c/c/e', 13), ('g', '/1-insert-testc/c/c/f', 14), ('g', '/1-insert-testc/c/c/kk', 14); +-- insert same value, suppose to have no side effects +insert into system.zookeeper (name, path, value) SELECT name, '/' || currentDatabase() || path, value from test_zkinsert; + +SELECT * FROM (SELECT path, name, value FROM system.zookeeper ORDER BY path, name) WHERE path LIKE '/' || currentDatabase() || '/1-insert-test%'; + +SELECT '-------------------------'; + +-- test inserting into root path +insert into test_zkinsert (name, path, value) values ('testc', '/2-insert-testx', 'x'); +insert into test_zkinsert (name, path, value) values ('testz', '/2-insert-testx', 'y'); +insert into test_zkinsert (name, path, value) values ('testc', '/2-insert-testz//c/cd/dd//', 'y'); +insert into test_zkinsert (name, path) values ('testc', '/2-insert-testz//c/cd/'); +insert into test_zkinsert (name, value, path) values ('testb', 'z', '/2-insert-testx'); + +insert into system.zookeeper (name, path, value) SELECT name, '/' || currentDatabase() || path, value from test_zkinsert; + +SELECT * FROM (SELECT path, name, value FROM system.zookeeper ORDER BY path, name) WHERE path LIKE '/' || currentDatabase() || '/2-insert-test%'; + +-- test exceptions +insert into system.zookeeper (name, value) values ('abc', 'y'); -- { serverError 36 } +insert into system.zookeeper (path, value) values ('a/b/c', 'y'); -- { serverError 36 } +insert into system.zookeeper (name, version) values ('abc', 111); -- { serverError 44 } +insert into system.zookeeper (name, versionxyz) values ('abc', 111); -- { serverError 16 } +insert into system.zookeeper (name, path, value) values ('a/b/c', '/', 'y'); -- { serverError 36 } +insert into system.zookeeper (name, path, value) values ('/', '/a/b/c', 'z'); -- { serverError 36 } +insert into system.zookeeper (name, path, value) values ('', '/', 'y'); -- { serverError 36 } +insert into system.zookeeper (name, path, value) values ('abc', '/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc', 'y'); -- { serverError 36 } + +drop table if exists test_zkinsert; diff --git a/tests/queries/0_stateless/02311_system_zookeeper_insert_priv.reference b/tests/queries/0_stateless/02311_system_zookeeper_insert_priv.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/02311_system_zookeeper_insert_priv.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/02311_system_zookeeper_insert_priv.sh b/tests/queries/0_stateless/02311_system_zookeeper_insert_priv.sh new file mode 100755 index 00000000000..24a1f7e7c39 --- /dev/null +++ b/tests/queries/0_stateless/02311_system_zookeeper_insert_priv.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +# Tags: no-parallel + + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + + +${CLICKHOUSE_CLIENT_BINARY} --query "drop user if exists u_02311" +${CLICKHOUSE_CLIENT_BINARY} --query "create user u_02311" +error="$(${CLICKHOUSE_CLIENT_BINARY} --user=u_02311 --query "insert into system.zookeeper (path, name, value) values ('//3-insert-testc/c/c/kk', 'kk', '11')" 2>&1 > /dev/null)" +echo "${error}" | grep -Fc "ACCESS_DENIED" + +${CLICKHOUSE_CLIENT_BINARY} --query "drop user u_02311" diff --git a/tests/queries/0_stateless/02313_dump_column_structure_low_cardinality.reference b/tests/queries/0_stateless/02313_dump_column_structure_low_cardinality.reference new file mode 100644 index 00000000000..fa7f1799c31 --- /dev/null +++ b/tests/queries/0_stateless/02313_dump_column_structure_low_cardinality.reference @@ -0,0 +1 @@ +Array(LowCardinality(String)), Const(size = 1, Array(size = 1, UInt64(size = 1), ColumnLowCardinality(size = 2, UInt8(size = 2), ColumnUnique(size = 3, String(size = 3))))) diff --git a/tests/queries/0_stateless/02313_dump_column_structure_low_cardinality.sql b/tests/queries/0_stateless/02313_dump_column_structure_low_cardinality.sql new file mode 100644 index 00000000000..66ce6184fae --- /dev/null +++ b/tests/queries/0_stateless/02313_dump_column_structure_low_cardinality.sql @@ -0,0 +1 @@ +SELECT dumpColumnStructure(['Hello', 'World']::Array(LowCardinality(String))); diff --git a/tests/queries/0_stateless/02313_multiple_limits.reference b/tests/queries/0_stateless/02313_multiple_limits.reference new file mode 100644 index 00000000000..a5d4ffe0f33 --- /dev/null +++ b/tests/queries/0_stateless/02313_multiple_limits.reference @@ -0,0 +1,6 @@ +45 +45 +1 1 +1 +1 1 +1 diff --git a/tests/queries/0_stateless/02313_multiple_limits.sql b/tests/queries/0_stateless/02313_multiple_limits.sql new file mode 100644 index 00000000000..2924bd3aebc --- /dev/null +++ b/tests/queries/0_stateless/02313_multiple_limits.sql @@ -0,0 +1,106 @@ +SELECT sum(x) +FROM +( + SELECT x + FROM + ( + SELECT number AS x + FROM system.numbers + SETTINGS max_rows_to_read = 10, read_overflow_mode = 'break', max_block_size = 2 + ) + SETTINGS max_rows_to_read = 20, read_overflow_mode = 'break', max_block_size = 2 +); + +SELECT sum(x) +FROM +( + SELECT x + FROM + ( + SELECT number AS x + FROM system.numbers + SETTINGS max_rows_to_read = 20, read_overflow_mode = 'break', max_block_size = 2 + ) + SETTINGS max_rows_to_read = 10, read_overflow_mode = 'break', max_block_size = 2 +); + + +SELECT count() >= 20, count() <= 22 +FROM +( + SELECT x + FROM + ( + SELECT zero AS x + FROM system.zeros + SETTINGS max_block_size = 2, max_rows_to_read = 10, read_overflow_mode = 'break' + ) + UNION ALL + SELECT x + FROM + ( + SELECT zero + 1 AS x + FROM system.zeros + SETTINGS max_block_size = 2, max_rows_to_read = 20, read_overflow_mode = 'break' + ) +); + +SELECT sum(x) >= 10 +FROM +( + SELECT x + FROM + ( + SELECT zero AS x + FROM system.zeros + SETTINGS max_block_size = 2, max_rows_to_read = 10, read_overflow_mode = 'break' + ) + UNION ALL + SELECT x + FROM + ( + SELECT zero + 1 AS x + FROM system.zeros + SETTINGS max_block_size = 2, max_rows_to_read = 20, read_overflow_mode = 'break' + ) +); + +SELECT count() >= 20, count() <= 22 +FROM +( + SELECT x + FROM + ( + SELECT zero AS x + FROM system.zeros + SETTINGS max_block_size = 2, max_rows_to_read = 20, read_overflow_mode = 'break' + ) + UNION ALL + SELECT x + FROM + ( + SELECT zero + 1 AS x + FROM system.zeros + SETTINGS max_block_size = 2, max_rows_to_read = 10, read_overflow_mode = 'break' + ) +); + +SELECT sum(x) <= 10 +FROM +( + SELECT x + FROM + ( + SELECT zero AS x + FROM system.zeros + SETTINGS max_block_size = 2, max_rows_to_read = 20, read_overflow_mode = 'break' + ) + UNION ALL + SELECT x + FROM + ( + SELECT zero + 1 AS x + FROM system.zeros + SETTINGS max_block_size = 2, max_rows_to_read = 10, read_overflow_mode = 'break' + ) +); diff --git a/tests/queries/0_stateless/02317_like_with_trailing_escape.reference b/tests/queries/0_stateless/02317_like_with_trailing_escape.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02317_like_with_trailing_escape.sql b/tests/queries/0_stateless/02317_like_with_trailing_escape.sql new file mode 100644 index 00000000000..a5017e920c2 --- /dev/null +++ b/tests/queries/0_stateless/02317_like_with_trailing_escape.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab (haystack String, pattern String) engine = MergeTree() ORDER BY haystack; + +INSERT INTO tab VALUES ('haystack', 'pattern\\'); + +-- const pattern +SELECT haystack LIKE 'pattern\\' from tab; -- { serverError 25 } + +-- non-const pattern +SELECT haystack LIKE pattern from tab; -- { serverError 25 } + +DROP TABLE IF EXISTS tab; diff --git a/tests/queries/1_stateful/00172_early_constant_folding.reference b/tests/queries/1_stateful/00172_early_constant_folding.reference index da2d9a690ee..27cd6b545e0 100644 --- a/tests/queries/1_stateful/00172_early_constant_folding.reference +++ b/tests/queries/1_stateful/00172_early_constant_folding.reference @@ -1,8 +1,7 @@ (Expression) ExpressionTransform - (SettingQuotaAndLimits) - (ReadFromStorage) - AggregatingTransform - StrictResize - ExpressionTransform - SourceFromSingleChunk 0 → 1 + (ReadFromStorage) + AggregatingTransform + StrictResize + ExpressionTransform + SourceFromSingleChunk 0 → 1